1 /* 2 * ipmi_si.c 3 * 4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC, 5 * BT). 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 * 18 * 19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * You should have received a copy of the GNU General Public License along 31 * with this program; if not, write to the Free Software Foundation, Inc., 32 * 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 /* 36 * This file holds the "policy" for the interface to the SMI state 37 * machine. It does the configuration, handles timers and interrupts, 38 * and drives the real SMI state machine. 39 */ 40 41 #include <linux/config.h> 42 #include <linux/module.h> 43 #include <linux/moduleparam.h> 44 #include <asm/system.h> 45 #include <linux/sched.h> 46 #include <linux/timer.h> 47 #include <linux/errno.h> 48 #include <linux/spinlock.h> 49 #include <linux/slab.h> 50 #include <linux/delay.h> 51 #include <linux/list.h> 52 #include <linux/pci.h> 53 #include <linux/ioport.h> 54 #include <linux/notifier.h> 55 #include <linux/mutex.h> 56 #include <linux/kthread.h> 57 #include <asm/irq.h> 58 #ifdef CONFIG_HIGH_RES_TIMERS 59 #include <linux/hrtime.h> 60 # if defined(schedule_next_int) 61 /* Old high-res timer code, do translations. */ 62 # define get_arch_cycles(a) quick_update_jiffies_sub(a) 63 # define arch_cycles_per_jiffy cycles_per_jiffies 64 # endif 65 static inline void add_usec_to_timer(struct timer_list *t, long v) 66 { 67 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000); 68 while (t->arch_cycle_expires >= arch_cycles_per_jiffy) 69 { 70 t->expires++; 71 t->arch_cycle_expires -= arch_cycles_per_jiffy; 72 } 73 } 74 #endif 75 #include <linux/interrupt.h> 76 #include <linux/rcupdate.h> 77 #include <linux/ipmi_smi.h> 78 #include <asm/io.h> 79 #include "ipmi_si_sm.h" 80 #include <linux/init.h> 81 #include <linux/dmi.h> 82 83 /* Measure times between events in the driver. */ 84 #undef DEBUG_TIMING 85 86 /* Call every 10 ms. */ 87 #define SI_TIMEOUT_TIME_USEC 10000 88 #define SI_USEC_PER_JIFFY (1000000/HZ) 89 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) 90 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a 91 short timeout */ 92 93 enum si_intf_state { 94 SI_NORMAL, 95 SI_GETTING_FLAGS, 96 SI_GETTING_EVENTS, 97 SI_CLEARING_FLAGS, 98 SI_CLEARING_FLAGS_THEN_SET_IRQ, 99 SI_GETTING_MESSAGES, 100 SI_ENABLE_INTERRUPTS1, 101 SI_ENABLE_INTERRUPTS2 102 /* FIXME - add watchdog stuff. */ 103 }; 104 105 /* Some BT-specific defines we need here. */ 106 #define IPMI_BT_INTMASK_REG 2 107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 109 110 enum si_type { 111 SI_KCS, SI_SMIC, SI_BT 112 }; 113 static char *si_to_str[] = { "KCS", "SMIC", "BT" }; 114 115 #define DEVICE_NAME "ipmi_si" 116 117 static struct device_driver ipmi_driver = 118 { 119 .name = DEVICE_NAME, 120 .bus = &platform_bus_type 121 }; 122 123 struct smi_info 124 { 125 int intf_num; 126 ipmi_smi_t intf; 127 struct si_sm_data *si_sm; 128 struct si_sm_handlers *handlers; 129 enum si_type si_type; 130 spinlock_t si_lock; 131 spinlock_t msg_lock; 132 struct list_head xmit_msgs; 133 struct list_head hp_xmit_msgs; 134 struct ipmi_smi_msg *curr_msg; 135 enum si_intf_state si_state; 136 137 /* Used to handle the various types of I/O that can occur with 138 IPMI */ 139 struct si_sm_io io; 140 int (*io_setup)(struct smi_info *info); 141 void (*io_cleanup)(struct smi_info *info); 142 int (*irq_setup)(struct smi_info *info); 143 void (*irq_cleanup)(struct smi_info *info); 144 unsigned int io_size; 145 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ 146 void (*addr_source_cleanup)(struct smi_info *info); 147 void *addr_source_data; 148 149 /* Per-OEM handler, called from handle_flags(). 150 Returns 1 when handle_flags() needs to be re-run 151 or 0 indicating it set si_state itself. 152 */ 153 int (*oem_data_avail_handler)(struct smi_info *smi_info); 154 155 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN 156 is set to hold the flags until we are done handling everything 157 from the flags. */ 158 #define RECEIVE_MSG_AVAIL 0x01 159 #define EVENT_MSG_BUFFER_FULL 0x02 160 #define WDT_PRE_TIMEOUT_INT 0x08 161 #define OEM0_DATA_AVAIL 0x20 162 #define OEM1_DATA_AVAIL 0x40 163 #define OEM2_DATA_AVAIL 0x80 164 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ 165 OEM1_DATA_AVAIL | \ 166 OEM2_DATA_AVAIL) 167 unsigned char msg_flags; 168 169 /* If set to true, this will request events the next time the 170 state machine is idle. */ 171 atomic_t req_events; 172 173 /* If true, run the state machine to completion on every send 174 call. Generally used after a panic to make sure stuff goes 175 out. */ 176 int run_to_completion; 177 178 /* The I/O port of an SI interface. */ 179 int port; 180 181 /* The space between start addresses of the two ports. For 182 instance, if the first port is 0xca2 and the spacing is 4, then 183 the second port is 0xca6. */ 184 unsigned int spacing; 185 186 /* zero if no irq; */ 187 int irq; 188 189 /* The timer for this si. */ 190 struct timer_list si_timer; 191 192 /* The time (in jiffies) the last timeout occurred at. */ 193 unsigned long last_timeout_jiffies; 194 195 /* Used to gracefully stop the timer without race conditions. */ 196 atomic_t stop_operation; 197 198 /* The driver will disable interrupts when it gets into a 199 situation where it cannot handle messages due to lack of 200 memory. Once that situation clears up, it will re-enable 201 interrupts. */ 202 int interrupt_disabled; 203 204 /* From the get device id response... */ 205 struct ipmi_device_id device_id; 206 207 /* Driver model stuff. */ 208 struct device *dev; 209 struct platform_device *pdev; 210 211 /* True if we allocated the device, false if it came from 212 * someplace else (like PCI). */ 213 int dev_registered; 214 215 /* Slave address, could be reported from DMI. */ 216 unsigned char slave_addr; 217 218 /* Counters and things for the proc filesystem. */ 219 spinlock_t count_lock; 220 unsigned long short_timeouts; 221 unsigned long long_timeouts; 222 unsigned long timeout_restarts; 223 unsigned long idles; 224 unsigned long interrupts; 225 unsigned long attentions; 226 unsigned long flag_fetches; 227 unsigned long hosed_count; 228 unsigned long complete_transactions; 229 unsigned long events; 230 unsigned long watchdog_pretimeouts; 231 unsigned long incoming_messages; 232 233 struct task_struct *thread; 234 235 struct list_head link; 236 }; 237 238 static int try_smi_init(struct smi_info *smi); 239 240 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 241 static int register_xaction_notifier(struct notifier_block * nb) 242 { 243 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 244 } 245 246 static void si_restart_short_timer(struct smi_info *smi_info); 247 248 static void deliver_recv_msg(struct smi_info *smi_info, 249 struct ipmi_smi_msg *msg) 250 { 251 /* Deliver the message to the upper layer with the lock 252 released. */ 253 spin_unlock(&(smi_info->si_lock)); 254 ipmi_smi_msg_received(smi_info->intf, msg); 255 spin_lock(&(smi_info->si_lock)); 256 } 257 258 static void return_hosed_msg(struct smi_info *smi_info) 259 { 260 struct ipmi_smi_msg *msg = smi_info->curr_msg; 261 262 /* Make it a reponse */ 263 msg->rsp[0] = msg->data[0] | 4; 264 msg->rsp[1] = msg->data[1]; 265 msg->rsp[2] = 0xFF; /* Unknown error. */ 266 msg->rsp_size = 3; 267 268 smi_info->curr_msg = NULL; 269 deliver_recv_msg(smi_info, msg); 270 } 271 272 static enum si_sm_result start_next_msg(struct smi_info *smi_info) 273 { 274 int rv; 275 struct list_head *entry = NULL; 276 #ifdef DEBUG_TIMING 277 struct timeval t; 278 #endif 279 280 /* No need to save flags, we aleady have interrupts off and we 281 already hold the SMI lock. */ 282 spin_lock(&(smi_info->msg_lock)); 283 284 /* Pick the high priority queue first. */ 285 if (!list_empty(&(smi_info->hp_xmit_msgs))) { 286 entry = smi_info->hp_xmit_msgs.next; 287 } else if (!list_empty(&(smi_info->xmit_msgs))) { 288 entry = smi_info->xmit_msgs.next; 289 } 290 291 if (!entry) { 292 smi_info->curr_msg = NULL; 293 rv = SI_SM_IDLE; 294 } else { 295 int err; 296 297 list_del(entry); 298 smi_info->curr_msg = list_entry(entry, 299 struct ipmi_smi_msg, 300 link); 301 #ifdef DEBUG_TIMING 302 do_gettimeofday(&t); 303 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 304 #endif 305 err = atomic_notifier_call_chain(&xaction_notifier_list, 306 0, smi_info); 307 if (err & NOTIFY_STOP_MASK) { 308 rv = SI_SM_CALL_WITHOUT_DELAY; 309 goto out; 310 } 311 err = smi_info->handlers->start_transaction( 312 smi_info->si_sm, 313 smi_info->curr_msg->data, 314 smi_info->curr_msg->data_size); 315 if (err) { 316 return_hosed_msg(smi_info); 317 } 318 319 rv = SI_SM_CALL_WITHOUT_DELAY; 320 } 321 out: 322 spin_unlock(&(smi_info->msg_lock)); 323 324 return rv; 325 } 326 327 static void start_enable_irq(struct smi_info *smi_info) 328 { 329 unsigned char msg[2]; 330 331 /* If we are enabling interrupts, we have to tell the 332 BMC to use them. */ 333 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 334 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 335 336 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 337 smi_info->si_state = SI_ENABLE_INTERRUPTS1; 338 } 339 340 static void start_clear_flags(struct smi_info *smi_info) 341 { 342 unsigned char msg[3]; 343 344 /* Make sure the watchdog pre-timeout flag is not set at startup. */ 345 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 346 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 347 msg[2] = WDT_PRE_TIMEOUT_INT; 348 349 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 350 smi_info->si_state = SI_CLEARING_FLAGS; 351 } 352 353 /* When we have a situtaion where we run out of memory and cannot 354 allocate messages, we just leave them in the BMC and run the system 355 polled until we can allocate some memory. Once we have some 356 memory, we will re-enable the interrupt. */ 357 static inline void disable_si_irq(struct smi_info *smi_info) 358 { 359 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 360 disable_irq_nosync(smi_info->irq); 361 smi_info->interrupt_disabled = 1; 362 } 363 } 364 365 static inline void enable_si_irq(struct smi_info *smi_info) 366 { 367 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 368 enable_irq(smi_info->irq); 369 smi_info->interrupt_disabled = 0; 370 } 371 } 372 373 static void handle_flags(struct smi_info *smi_info) 374 { 375 retry: 376 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { 377 /* Watchdog pre-timeout */ 378 spin_lock(&smi_info->count_lock); 379 smi_info->watchdog_pretimeouts++; 380 spin_unlock(&smi_info->count_lock); 381 382 start_clear_flags(smi_info); 383 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 384 spin_unlock(&(smi_info->si_lock)); 385 ipmi_smi_watchdog_pretimeout(smi_info->intf); 386 spin_lock(&(smi_info->si_lock)); 387 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 388 /* Messages available. */ 389 smi_info->curr_msg = ipmi_alloc_smi_msg(); 390 if (!smi_info->curr_msg) { 391 disable_si_irq(smi_info); 392 smi_info->si_state = SI_NORMAL; 393 return; 394 } 395 enable_si_irq(smi_info); 396 397 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 398 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 399 smi_info->curr_msg->data_size = 2; 400 401 smi_info->handlers->start_transaction( 402 smi_info->si_sm, 403 smi_info->curr_msg->data, 404 smi_info->curr_msg->data_size); 405 smi_info->si_state = SI_GETTING_MESSAGES; 406 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 407 /* Events available. */ 408 smi_info->curr_msg = ipmi_alloc_smi_msg(); 409 if (!smi_info->curr_msg) { 410 disable_si_irq(smi_info); 411 smi_info->si_state = SI_NORMAL; 412 return; 413 } 414 enable_si_irq(smi_info); 415 416 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 417 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 418 smi_info->curr_msg->data_size = 2; 419 420 smi_info->handlers->start_transaction( 421 smi_info->si_sm, 422 smi_info->curr_msg->data, 423 smi_info->curr_msg->data_size); 424 smi_info->si_state = SI_GETTING_EVENTS; 425 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) { 426 if (smi_info->oem_data_avail_handler) 427 if (smi_info->oem_data_avail_handler(smi_info)) 428 goto retry; 429 } else { 430 smi_info->si_state = SI_NORMAL; 431 } 432 } 433 434 static void handle_transaction_done(struct smi_info *smi_info) 435 { 436 struct ipmi_smi_msg *msg; 437 #ifdef DEBUG_TIMING 438 struct timeval t; 439 440 do_gettimeofday(&t); 441 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); 442 #endif 443 switch (smi_info->si_state) { 444 case SI_NORMAL: 445 if (!smi_info->curr_msg) 446 break; 447 448 smi_info->curr_msg->rsp_size 449 = smi_info->handlers->get_result( 450 smi_info->si_sm, 451 smi_info->curr_msg->rsp, 452 IPMI_MAX_MSG_LENGTH); 453 454 /* Do this here becase deliver_recv_msg() releases the 455 lock, and a new message can be put in during the 456 time the lock is released. */ 457 msg = smi_info->curr_msg; 458 smi_info->curr_msg = NULL; 459 deliver_recv_msg(smi_info, msg); 460 break; 461 462 case SI_GETTING_FLAGS: 463 { 464 unsigned char msg[4]; 465 unsigned int len; 466 467 /* We got the flags from the SMI, now handle them. */ 468 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 469 if (msg[2] != 0) { 470 /* Error fetching flags, just give up for 471 now. */ 472 smi_info->si_state = SI_NORMAL; 473 } else if (len < 4) { 474 /* Hmm, no flags. That's technically illegal, but 475 don't use uninitialized data. */ 476 smi_info->si_state = SI_NORMAL; 477 } else { 478 smi_info->msg_flags = msg[3]; 479 handle_flags(smi_info); 480 } 481 break; 482 } 483 484 case SI_CLEARING_FLAGS: 485 case SI_CLEARING_FLAGS_THEN_SET_IRQ: 486 { 487 unsigned char msg[3]; 488 489 /* We cleared the flags. */ 490 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 491 if (msg[2] != 0) { 492 /* Error clearing flags */ 493 printk(KERN_WARNING 494 "ipmi_si: Error clearing flags: %2.2x\n", 495 msg[2]); 496 } 497 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) 498 start_enable_irq(smi_info); 499 else 500 smi_info->si_state = SI_NORMAL; 501 break; 502 } 503 504 case SI_GETTING_EVENTS: 505 { 506 smi_info->curr_msg->rsp_size 507 = smi_info->handlers->get_result( 508 smi_info->si_sm, 509 smi_info->curr_msg->rsp, 510 IPMI_MAX_MSG_LENGTH); 511 512 /* Do this here becase deliver_recv_msg() releases the 513 lock, and a new message can be put in during the 514 time the lock is released. */ 515 msg = smi_info->curr_msg; 516 smi_info->curr_msg = NULL; 517 if (msg->rsp[2] != 0) { 518 /* Error getting event, probably done. */ 519 msg->done(msg); 520 521 /* Take off the event flag. */ 522 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; 523 handle_flags(smi_info); 524 } else { 525 spin_lock(&smi_info->count_lock); 526 smi_info->events++; 527 spin_unlock(&smi_info->count_lock); 528 529 /* Do this before we deliver the message 530 because delivering the message releases the 531 lock and something else can mess with the 532 state. */ 533 handle_flags(smi_info); 534 535 deliver_recv_msg(smi_info, msg); 536 } 537 break; 538 } 539 540 case SI_GETTING_MESSAGES: 541 { 542 smi_info->curr_msg->rsp_size 543 = smi_info->handlers->get_result( 544 smi_info->si_sm, 545 smi_info->curr_msg->rsp, 546 IPMI_MAX_MSG_LENGTH); 547 548 /* Do this here becase deliver_recv_msg() releases the 549 lock, and a new message can be put in during the 550 time the lock is released. */ 551 msg = smi_info->curr_msg; 552 smi_info->curr_msg = NULL; 553 if (msg->rsp[2] != 0) { 554 /* Error getting event, probably done. */ 555 msg->done(msg); 556 557 /* Take off the msg flag. */ 558 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; 559 handle_flags(smi_info); 560 } else { 561 spin_lock(&smi_info->count_lock); 562 smi_info->incoming_messages++; 563 spin_unlock(&smi_info->count_lock); 564 565 /* Do this before we deliver the message 566 because delivering the message releases the 567 lock and something else can mess with the 568 state. */ 569 handle_flags(smi_info); 570 571 deliver_recv_msg(smi_info, msg); 572 } 573 break; 574 } 575 576 case SI_ENABLE_INTERRUPTS1: 577 { 578 unsigned char msg[4]; 579 580 /* We got the flags from the SMI, now handle them. */ 581 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 582 if (msg[2] != 0) { 583 printk(KERN_WARNING 584 "ipmi_si: Could not enable interrupts" 585 ", failed get, using polled mode.\n"); 586 smi_info->si_state = SI_NORMAL; 587 } else { 588 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 589 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 590 msg[2] = msg[3] | 1; /* enable msg queue int */ 591 smi_info->handlers->start_transaction( 592 smi_info->si_sm, msg, 3); 593 smi_info->si_state = SI_ENABLE_INTERRUPTS2; 594 } 595 break; 596 } 597 598 case SI_ENABLE_INTERRUPTS2: 599 { 600 unsigned char msg[4]; 601 602 /* We got the flags from the SMI, now handle them. */ 603 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 604 if (msg[2] != 0) { 605 printk(KERN_WARNING 606 "ipmi_si: Could not enable interrupts" 607 ", failed set, using polled mode.\n"); 608 } 609 smi_info->si_state = SI_NORMAL; 610 break; 611 } 612 } 613 } 614 615 /* Called on timeouts and events. Timeouts should pass the elapsed 616 time, interrupts should pass in zero. */ 617 static enum si_sm_result smi_event_handler(struct smi_info *smi_info, 618 int time) 619 { 620 enum si_sm_result si_sm_result; 621 622 restart: 623 /* There used to be a loop here that waited a little while 624 (around 25us) before giving up. That turned out to be 625 pointless, the minimum delays I was seeing were in the 300us 626 range, which is far too long to wait in an interrupt. So 627 we just run until the state machine tells us something 628 happened or it needs a delay. */ 629 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); 630 time = 0; 631 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) 632 { 633 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 634 } 635 636 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) 637 { 638 spin_lock(&smi_info->count_lock); 639 smi_info->complete_transactions++; 640 spin_unlock(&smi_info->count_lock); 641 642 handle_transaction_done(smi_info); 643 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 644 } 645 else if (si_sm_result == SI_SM_HOSED) 646 { 647 spin_lock(&smi_info->count_lock); 648 smi_info->hosed_count++; 649 spin_unlock(&smi_info->count_lock); 650 651 /* Do the before return_hosed_msg, because that 652 releases the lock. */ 653 smi_info->si_state = SI_NORMAL; 654 if (smi_info->curr_msg != NULL) { 655 /* If we were handling a user message, format 656 a response to send to the upper layer to 657 tell it about the error. */ 658 return_hosed_msg(smi_info); 659 } 660 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 661 } 662 663 /* We prefer handling attn over new messages. */ 664 if (si_sm_result == SI_SM_ATTN) 665 { 666 unsigned char msg[2]; 667 668 spin_lock(&smi_info->count_lock); 669 smi_info->attentions++; 670 spin_unlock(&smi_info->count_lock); 671 672 /* Got a attn, send down a get message flags to see 673 what's causing it. It would be better to handle 674 this in the upper layer, but due to the way 675 interrupts work with the SMI, that's not really 676 possible. */ 677 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 678 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 679 680 smi_info->handlers->start_transaction( 681 smi_info->si_sm, msg, 2); 682 smi_info->si_state = SI_GETTING_FLAGS; 683 goto restart; 684 } 685 686 /* If we are currently idle, try to start the next message. */ 687 if (si_sm_result == SI_SM_IDLE) { 688 spin_lock(&smi_info->count_lock); 689 smi_info->idles++; 690 spin_unlock(&smi_info->count_lock); 691 692 si_sm_result = start_next_msg(smi_info); 693 if (si_sm_result != SI_SM_IDLE) 694 goto restart; 695 } 696 697 if ((si_sm_result == SI_SM_IDLE) 698 && (atomic_read(&smi_info->req_events))) 699 { 700 /* We are idle and the upper layer requested that I fetch 701 events, so do so. */ 702 unsigned char msg[2]; 703 704 spin_lock(&smi_info->count_lock); 705 smi_info->flag_fetches++; 706 spin_unlock(&smi_info->count_lock); 707 708 atomic_set(&smi_info->req_events, 0); 709 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 710 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 711 712 smi_info->handlers->start_transaction( 713 smi_info->si_sm, msg, 2); 714 smi_info->si_state = SI_GETTING_FLAGS; 715 goto restart; 716 } 717 718 return si_sm_result; 719 } 720 721 static void sender(void *send_info, 722 struct ipmi_smi_msg *msg, 723 int priority) 724 { 725 struct smi_info *smi_info = send_info; 726 enum si_sm_result result; 727 unsigned long flags; 728 #ifdef DEBUG_TIMING 729 struct timeval t; 730 #endif 731 732 spin_lock_irqsave(&(smi_info->msg_lock), flags); 733 #ifdef DEBUG_TIMING 734 do_gettimeofday(&t); 735 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 736 #endif 737 738 if (smi_info->run_to_completion) { 739 /* If we are running to completion, then throw it in 740 the list and run transactions until everything is 741 clear. Priority doesn't matter here. */ 742 list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); 743 744 /* We have to release the msg lock and claim the smi 745 lock in this case, because of race conditions. */ 746 spin_unlock_irqrestore(&(smi_info->msg_lock), flags); 747 748 spin_lock_irqsave(&(smi_info->si_lock), flags); 749 result = smi_event_handler(smi_info, 0); 750 while (result != SI_SM_IDLE) { 751 udelay(SI_SHORT_TIMEOUT_USEC); 752 result = smi_event_handler(smi_info, 753 SI_SHORT_TIMEOUT_USEC); 754 } 755 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 756 return; 757 } else { 758 if (priority > 0) { 759 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs)); 760 } else { 761 list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); 762 } 763 } 764 spin_unlock_irqrestore(&(smi_info->msg_lock), flags); 765 766 spin_lock_irqsave(&(smi_info->si_lock), flags); 767 if ((smi_info->si_state == SI_NORMAL) 768 && (smi_info->curr_msg == NULL)) 769 { 770 start_next_msg(smi_info); 771 si_restart_short_timer(smi_info); 772 } 773 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 774 } 775 776 static void set_run_to_completion(void *send_info, int i_run_to_completion) 777 { 778 struct smi_info *smi_info = send_info; 779 enum si_sm_result result; 780 unsigned long flags; 781 782 spin_lock_irqsave(&(smi_info->si_lock), flags); 783 784 smi_info->run_to_completion = i_run_to_completion; 785 if (i_run_to_completion) { 786 result = smi_event_handler(smi_info, 0); 787 while (result != SI_SM_IDLE) { 788 udelay(SI_SHORT_TIMEOUT_USEC); 789 result = smi_event_handler(smi_info, 790 SI_SHORT_TIMEOUT_USEC); 791 } 792 } 793 794 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 795 } 796 797 static int ipmi_thread(void *data) 798 { 799 struct smi_info *smi_info = data; 800 unsigned long flags; 801 enum si_sm_result smi_result; 802 803 set_user_nice(current, 19); 804 while (!kthread_should_stop()) { 805 spin_lock_irqsave(&(smi_info->si_lock), flags); 806 smi_result = smi_event_handler(smi_info, 0); 807 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 808 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 809 /* do nothing */ 810 } 811 else if (smi_result == SI_SM_CALL_WITH_DELAY) 812 udelay(1); 813 else 814 schedule_timeout_interruptible(1); 815 } 816 return 0; 817 } 818 819 820 static void poll(void *send_info) 821 { 822 struct smi_info *smi_info = send_info; 823 824 smi_event_handler(smi_info, 0); 825 } 826 827 static void request_events(void *send_info) 828 { 829 struct smi_info *smi_info = send_info; 830 831 atomic_set(&smi_info->req_events, 1); 832 } 833 834 static int initialized = 0; 835 836 /* Must be called with interrupts off and with the si_lock held. */ 837 static void si_restart_short_timer(struct smi_info *smi_info) 838 { 839 #if defined(CONFIG_HIGH_RES_TIMERS) 840 unsigned long flags; 841 unsigned long jiffies_now; 842 unsigned long seq; 843 844 if (del_timer(&(smi_info->si_timer))) { 845 /* If we don't delete the timer, then it will go off 846 immediately, anyway. So we only process if we 847 actually delete the timer. */ 848 849 do { 850 seq = read_seqbegin_irqsave(&xtime_lock, flags); 851 jiffies_now = jiffies; 852 smi_info->si_timer.expires = jiffies_now; 853 smi_info->si_timer.arch_cycle_expires 854 = get_arch_cycles(jiffies_now); 855 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 856 857 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); 858 859 add_timer(&(smi_info->si_timer)); 860 spin_lock_irqsave(&smi_info->count_lock, flags); 861 smi_info->timeout_restarts++; 862 spin_unlock_irqrestore(&smi_info->count_lock, flags); 863 } 864 #endif 865 } 866 867 static void smi_timeout(unsigned long data) 868 { 869 struct smi_info *smi_info = (struct smi_info *) data; 870 enum si_sm_result smi_result; 871 unsigned long flags; 872 unsigned long jiffies_now; 873 long time_diff; 874 #ifdef DEBUG_TIMING 875 struct timeval t; 876 #endif 877 878 if (atomic_read(&smi_info->stop_operation)) 879 return; 880 881 spin_lock_irqsave(&(smi_info->si_lock), flags); 882 #ifdef DEBUG_TIMING 883 do_gettimeofday(&t); 884 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); 885 #endif 886 jiffies_now = jiffies; 887 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 888 * SI_USEC_PER_JIFFY); 889 smi_result = smi_event_handler(smi_info, time_diff); 890 891 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 892 893 smi_info->last_timeout_jiffies = jiffies_now; 894 895 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 896 /* Running with interrupts, only do long timeouts. */ 897 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 898 spin_lock_irqsave(&smi_info->count_lock, flags); 899 smi_info->long_timeouts++; 900 spin_unlock_irqrestore(&smi_info->count_lock, flags); 901 goto do_add_timer; 902 } 903 904 /* If the state machine asks for a short delay, then shorten 905 the timer timeout. */ 906 if (smi_result == SI_SM_CALL_WITH_DELAY) { 907 #if defined(CONFIG_HIGH_RES_TIMERS) 908 unsigned long seq; 909 #endif 910 spin_lock_irqsave(&smi_info->count_lock, flags); 911 smi_info->short_timeouts++; 912 spin_unlock_irqrestore(&smi_info->count_lock, flags); 913 #if defined(CONFIG_HIGH_RES_TIMERS) 914 do { 915 seq = read_seqbegin_irqsave(&xtime_lock, flags); 916 smi_info->si_timer.expires = jiffies; 917 smi_info->si_timer.arch_cycle_expires 918 = get_arch_cycles(smi_info->si_timer.expires); 919 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 920 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); 921 #else 922 smi_info->si_timer.expires = jiffies + 1; 923 #endif 924 } else { 925 spin_lock_irqsave(&smi_info->count_lock, flags); 926 smi_info->long_timeouts++; 927 spin_unlock_irqrestore(&smi_info->count_lock, flags); 928 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 929 #if defined(CONFIG_HIGH_RES_TIMERS) 930 smi_info->si_timer.arch_cycle_expires = 0; 931 #endif 932 } 933 934 do_add_timer: 935 add_timer(&(smi_info->si_timer)); 936 } 937 938 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs) 939 { 940 struct smi_info *smi_info = data; 941 unsigned long flags; 942 #ifdef DEBUG_TIMING 943 struct timeval t; 944 #endif 945 946 spin_lock_irqsave(&(smi_info->si_lock), flags); 947 948 spin_lock(&smi_info->count_lock); 949 smi_info->interrupts++; 950 spin_unlock(&smi_info->count_lock); 951 952 if (atomic_read(&smi_info->stop_operation)) 953 goto out; 954 955 #ifdef DEBUG_TIMING 956 do_gettimeofday(&t); 957 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); 958 #endif 959 smi_event_handler(smi_info, 0); 960 out: 961 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 962 return IRQ_HANDLED; 963 } 964 965 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs) 966 { 967 struct smi_info *smi_info = data; 968 /* We need to clear the IRQ flag for the BT interface. */ 969 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 970 IPMI_BT_INTMASK_CLEAR_IRQ_BIT 971 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 972 return si_irq_handler(irq, data, regs); 973 } 974 975 static int smi_start_processing(void *send_info, 976 ipmi_smi_t intf) 977 { 978 struct smi_info *new_smi = send_info; 979 980 new_smi->intf = intf; 981 982 /* Set up the timer that drives the interface. */ 983 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); 984 new_smi->last_timeout_jiffies = jiffies; 985 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 986 987 if (new_smi->si_type != SI_BT) { 988 new_smi->thread = kthread_run(ipmi_thread, new_smi, 989 "kipmi%d", new_smi->intf_num); 990 if (IS_ERR(new_smi->thread)) { 991 printk(KERN_NOTICE "ipmi_si_intf: Could not start" 992 " kernel thread due to error %ld, only using" 993 " timers to drive the interface\n", 994 PTR_ERR(new_smi->thread)); 995 new_smi->thread = NULL; 996 } 997 } 998 999 return 0; 1000 } 1001 1002 static struct ipmi_smi_handlers handlers = 1003 { 1004 .owner = THIS_MODULE, 1005 .start_processing = smi_start_processing, 1006 .sender = sender, 1007 .request_events = request_events, 1008 .set_run_to_completion = set_run_to_completion, 1009 .poll = poll, 1010 }; 1011 1012 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, 1013 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ 1014 1015 #define SI_MAX_PARMS 4 1016 static LIST_HEAD(smi_infos); 1017 static DEFINE_MUTEX(smi_infos_lock); 1018 static int smi_num; /* Used to sequence the SMIs */ 1019 1020 #define DEFAULT_REGSPACING 1 1021 1022 static int si_trydefaults = 1; 1023 static char *si_type[SI_MAX_PARMS]; 1024 #define MAX_SI_TYPE_STR 30 1025 static char si_type_str[MAX_SI_TYPE_STR]; 1026 static unsigned long addrs[SI_MAX_PARMS]; 1027 static int num_addrs; 1028 static unsigned int ports[SI_MAX_PARMS]; 1029 static int num_ports; 1030 static int irqs[SI_MAX_PARMS]; 1031 static int num_irqs; 1032 static int regspacings[SI_MAX_PARMS]; 1033 static int num_regspacings = 0; 1034 static int regsizes[SI_MAX_PARMS]; 1035 static int num_regsizes = 0; 1036 static int regshifts[SI_MAX_PARMS]; 1037 static int num_regshifts = 0; 1038 static int slave_addrs[SI_MAX_PARMS]; 1039 static int num_slave_addrs = 0; 1040 1041 1042 module_param_named(trydefaults, si_trydefaults, bool, 0); 1043 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" 1044 " default scan of the KCS and SMIC interface at the standard" 1045 " address"); 1046 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); 1047 MODULE_PARM_DESC(type, "Defines the type of each interface, each" 1048 " interface separated by commas. The types are 'kcs'," 1049 " 'smic', and 'bt'. For example si_type=kcs,bt will set" 1050 " the first interface to kcs and the second to bt"); 1051 module_param_array(addrs, long, &num_addrs, 0); 1052 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" 1053 " addresses separated by commas. Only use if an interface" 1054 " is in memory. Otherwise, set it to zero or leave" 1055 " it blank."); 1056 module_param_array(ports, int, &num_ports, 0); 1057 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" 1058 " addresses separated by commas. Only use if an interface" 1059 " is a port. Otherwise, set it to zero or leave" 1060 " it blank."); 1061 module_param_array(irqs, int, &num_irqs, 0); 1062 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the" 1063 " addresses separated by commas. Only use if an interface" 1064 " has an interrupt. Otherwise, set it to zero or leave" 1065 " it blank."); 1066 module_param_array(regspacings, int, &num_regspacings, 0); 1067 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address" 1068 " and each successive register used by the interface. For" 1069 " instance, if the start address is 0xca2 and the spacing" 1070 " is 2, then the second address is at 0xca4. Defaults" 1071 " to 1."); 1072 module_param_array(regsizes, int, &num_regsizes, 0); 1073 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes." 1074 " This should generally be 1, 2, 4, or 8 for an 8-bit," 1075 " 16-bit, 32-bit, or 64-bit register. Use this if you" 1076 " the 8-bit IPMI register has to be read from a larger" 1077 " register."); 1078 module_param_array(regshifts, int, &num_regshifts, 0); 1079 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the." 1080 " IPMI register, in bits. For instance, if the data" 1081 " is read from a 32-bit word and the IPMI data is in" 1082 " bit 8-15, then the shift would be 8"); 1083 module_param_array(slave_addrs, int, &num_slave_addrs, 0); 1084 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" 1085 " the controller. Normally this is 0x20, but can be" 1086 " overridden by this parm. This is an array indexed" 1087 " by interface number."); 1088 1089 1090 #define IPMI_IO_ADDR_SPACE 0 1091 #define IPMI_MEM_ADDR_SPACE 1 1092 static char *addr_space_to_str[] = { "I/O", "memory" }; 1093 1094 static void std_irq_cleanup(struct smi_info *info) 1095 { 1096 if (info->si_type == SI_BT) 1097 /* Disable the interrupt in the BT interface. */ 1098 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); 1099 free_irq(info->irq, info); 1100 } 1101 1102 static int std_irq_setup(struct smi_info *info) 1103 { 1104 int rv; 1105 1106 if (!info->irq) 1107 return 0; 1108 1109 if (info->si_type == SI_BT) { 1110 rv = request_irq(info->irq, 1111 si_bt_irq_handler, 1112 SA_INTERRUPT, 1113 DEVICE_NAME, 1114 info); 1115 if (!rv) 1116 /* Enable the interrupt in the BT interface. */ 1117 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 1118 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1119 } else 1120 rv = request_irq(info->irq, 1121 si_irq_handler, 1122 SA_INTERRUPT, 1123 DEVICE_NAME, 1124 info); 1125 if (rv) { 1126 printk(KERN_WARNING 1127 "ipmi_si: %s unable to claim interrupt %d," 1128 " running polled\n", 1129 DEVICE_NAME, info->irq); 1130 info->irq = 0; 1131 } else { 1132 info->irq_cleanup = std_irq_cleanup; 1133 printk(" Using irq %d\n", info->irq); 1134 } 1135 1136 return rv; 1137 } 1138 1139 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) 1140 { 1141 unsigned int addr = io->addr_data; 1142 1143 return inb(addr + (offset * io->regspacing)); 1144 } 1145 1146 static void port_outb(struct si_sm_io *io, unsigned int offset, 1147 unsigned char b) 1148 { 1149 unsigned int addr = io->addr_data; 1150 1151 outb(b, addr + (offset * io->regspacing)); 1152 } 1153 1154 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) 1155 { 1156 unsigned int addr = io->addr_data; 1157 1158 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; 1159 } 1160 1161 static void port_outw(struct si_sm_io *io, unsigned int offset, 1162 unsigned char b) 1163 { 1164 unsigned int addr = io->addr_data; 1165 1166 outw(b << io->regshift, addr + (offset * io->regspacing)); 1167 } 1168 1169 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) 1170 { 1171 unsigned int addr = io->addr_data; 1172 1173 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; 1174 } 1175 1176 static void port_outl(struct si_sm_io *io, unsigned int offset, 1177 unsigned char b) 1178 { 1179 unsigned int addr = io->addr_data; 1180 1181 outl(b << io->regshift, addr+(offset * io->regspacing)); 1182 } 1183 1184 static void port_cleanup(struct smi_info *info) 1185 { 1186 unsigned int addr = info->io.addr_data; 1187 int mapsize; 1188 1189 if (addr) { 1190 mapsize = ((info->io_size * info->io.regspacing) 1191 - (info->io.regspacing - info->io.regsize)); 1192 1193 release_region (addr, mapsize); 1194 } 1195 } 1196 1197 static int port_setup(struct smi_info *info) 1198 { 1199 unsigned int addr = info->io.addr_data; 1200 int mapsize; 1201 1202 if (!addr) 1203 return -ENODEV; 1204 1205 info->io_cleanup = port_cleanup; 1206 1207 /* Figure out the actual inb/inw/inl/etc routine to use based 1208 upon the register size. */ 1209 switch (info->io.regsize) { 1210 case 1: 1211 info->io.inputb = port_inb; 1212 info->io.outputb = port_outb; 1213 break; 1214 case 2: 1215 info->io.inputb = port_inw; 1216 info->io.outputb = port_outw; 1217 break; 1218 case 4: 1219 info->io.inputb = port_inl; 1220 info->io.outputb = port_outl; 1221 break; 1222 default: 1223 printk("ipmi_si: Invalid register size: %d\n", 1224 info->io.regsize); 1225 return -EINVAL; 1226 } 1227 1228 /* Calculate the total amount of memory to claim. This is an 1229 * unusual looking calculation, but it avoids claiming any 1230 * more memory than it has to. It will claim everything 1231 * between the first address to the end of the last full 1232 * register. */ 1233 mapsize = ((info->io_size * info->io.regspacing) 1234 - (info->io.regspacing - info->io.regsize)); 1235 1236 if (request_region(addr, mapsize, DEVICE_NAME) == NULL) 1237 return -EIO; 1238 return 0; 1239 } 1240 1241 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) 1242 { 1243 return readb((io->addr)+(offset * io->regspacing)); 1244 } 1245 1246 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, 1247 unsigned char b) 1248 { 1249 writeb(b, (io->addr)+(offset * io->regspacing)); 1250 } 1251 1252 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) 1253 { 1254 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) 1255 && 0xff; 1256 } 1257 1258 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, 1259 unsigned char b) 1260 { 1261 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1262 } 1263 1264 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) 1265 { 1266 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) 1267 && 0xff; 1268 } 1269 1270 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, 1271 unsigned char b) 1272 { 1273 writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1274 } 1275 1276 #ifdef readq 1277 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) 1278 { 1279 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) 1280 && 0xff; 1281 } 1282 1283 static void mem_outq(struct si_sm_io *io, unsigned int offset, 1284 unsigned char b) 1285 { 1286 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1287 } 1288 #endif 1289 1290 static void mem_cleanup(struct smi_info *info) 1291 { 1292 unsigned long addr = info->io.addr_data; 1293 int mapsize; 1294 1295 if (info->io.addr) { 1296 iounmap(info->io.addr); 1297 1298 mapsize = ((info->io_size * info->io.regspacing) 1299 - (info->io.regspacing - info->io.regsize)); 1300 1301 release_mem_region(addr, mapsize); 1302 } 1303 } 1304 1305 static int mem_setup(struct smi_info *info) 1306 { 1307 unsigned long addr = info->io.addr_data; 1308 int mapsize; 1309 1310 if (!addr) 1311 return -ENODEV; 1312 1313 info->io_cleanup = mem_cleanup; 1314 1315 /* Figure out the actual readb/readw/readl/etc routine to use based 1316 upon the register size. */ 1317 switch (info->io.regsize) { 1318 case 1: 1319 info->io.inputb = intf_mem_inb; 1320 info->io.outputb = intf_mem_outb; 1321 break; 1322 case 2: 1323 info->io.inputb = intf_mem_inw; 1324 info->io.outputb = intf_mem_outw; 1325 break; 1326 case 4: 1327 info->io.inputb = intf_mem_inl; 1328 info->io.outputb = intf_mem_outl; 1329 break; 1330 #ifdef readq 1331 case 8: 1332 info->io.inputb = mem_inq; 1333 info->io.outputb = mem_outq; 1334 break; 1335 #endif 1336 default: 1337 printk("ipmi_si: Invalid register size: %d\n", 1338 info->io.regsize); 1339 return -EINVAL; 1340 } 1341 1342 /* Calculate the total amount of memory to claim. This is an 1343 * unusual looking calculation, but it avoids claiming any 1344 * more memory than it has to. It will claim everything 1345 * between the first address to the end of the last full 1346 * register. */ 1347 mapsize = ((info->io_size * info->io.regspacing) 1348 - (info->io.regspacing - info->io.regsize)); 1349 1350 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL) 1351 return -EIO; 1352 1353 info->io.addr = ioremap(addr, mapsize); 1354 if (info->io.addr == NULL) { 1355 release_mem_region(addr, mapsize); 1356 return -EIO; 1357 } 1358 return 0; 1359 } 1360 1361 1362 static __devinit void hardcode_find_bmc(void) 1363 { 1364 int i; 1365 struct smi_info *info; 1366 1367 for (i = 0; i < SI_MAX_PARMS; i++) { 1368 if (!ports[i] && !addrs[i]) 1369 continue; 1370 1371 info = kzalloc(sizeof(*info), GFP_KERNEL); 1372 if (!info) 1373 return; 1374 1375 info->addr_source = "hardcoded"; 1376 1377 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { 1378 info->si_type = SI_KCS; 1379 } else if (strcmp(si_type[i], "smic") == 0) { 1380 info->si_type = SI_SMIC; 1381 } else if (strcmp(si_type[i], "bt") == 0) { 1382 info->si_type = SI_BT; 1383 } else { 1384 printk(KERN_WARNING 1385 "ipmi_si: Interface type specified " 1386 "for interface %d, was invalid: %s\n", 1387 i, si_type[i]); 1388 kfree(info); 1389 continue; 1390 } 1391 1392 if (ports[i]) { 1393 /* An I/O port */ 1394 info->io_setup = port_setup; 1395 info->io.addr_data = ports[i]; 1396 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1397 } else if (addrs[i]) { 1398 /* A memory port */ 1399 info->io_setup = mem_setup; 1400 info->io.addr_data = addrs[i]; 1401 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1402 } else { 1403 printk(KERN_WARNING 1404 "ipmi_si: Interface type specified " 1405 "for interface %d, " 1406 "but port and address were not set or " 1407 "set to zero.\n", i); 1408 kfree(info); 1409 continue; 1410 } 1411 1412 info->io.addr = NULL; 1413 info->io.regspacing = regspacings[i]; 1414 if (!info->io.regspacing) 1415 info->io.regspacing = DEFAULT_REGSPACING; 1416 info->io.regsize = regsizes[i]; 1417 if (!info->io.regsize) 1418 info->io.regsize = DEFAULT_REGSPACING; 1419 info->io.regshift = regshifts[i]; 1420 info->irq = irqs[i]; 1421 if (info->irq) 1422 info->irq_setup = std_irq_setup; 1423 1424 try_smi_init(info); 1425 } 1426 } 1427 1428 #ifdef CONFIG_ACPI 1429 1430 #include <linux/acpi.h> 1431 1432 /* Once we get an ACPI failure, we don't try any more, because we go 1433 through the tables sequentially. Once we don't find a table, there 1434 are no more. */ 1435 static int acpi_failure = 0; 1436 1437 /* For GPE-type interrupts. */ 1438 static u32 ipmi_acpi_gpe(void *context) 1439 { 1440 struct smi_info *smi_info = context; 1441 unsigned long flags; 1442 #ifdef DEBUG_TIMING 1443 struct timeval t; 1444 #endif 1445 1446 spin_lock_irqsave(&(smi_info->si_lock), flags); 1447 1448 spin_lock(&smi_info->count_lock); 1449 smi_info->interrupts++; 1450 spin_unlock(&smi_info->count_lock); 1451 1452 if (atomic_read(&smi_info->stop_operation)) 1453 goto out; 1454 1455 #ifdef DEBUG_TIMING 1456 do_gettimeofday(&t); 1457 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1458 #endif 1459 smi_event_handler(smi_info, 0); 1460 out: 1461 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1462 1463 return ACPI_INTERRUPT_HANDLED; 1464 } 1465 1466 static void acpi_gpe_irq_cleanup(struct smi_info *info) 1467 { 1468 if (!info->irq) 1469 return; 1470 1471 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); 1472 } 1473 1474 static int acpi_gpe_irq_setup(struct smi_info *info) 1475 { 1476 acpi_status status; 1477 1478 if (!info->irq) 1479 return 0; 1480 1481 /* FIXME - is level triggered right? */ 1482 status = acpi_install_gpe_handler(NULL, 1483 info->irq, 1484 ACPI_GPE_LEVEL_TRIGGERED, 1485 &ipmi_acpi_gpe, 1486 info); 1487 if (status != AE_OK) { 1488 printk(KERN_WARNING 1489 "ipmi_si: %s unable to claim ACPI GPE %d," 1490 " running polled\n", 1491 DEVICE_NAME, info->irq); 1492 info->irq = 0; 1493 return -EINVAL; 1494 } else { 1495 info->irq_cleanup = acpi_gpe_irq_cleanup; 1496 printk(" Using ACPI GPE %d\n", info->irq); 1497 return 0; 1498 } 1499 } 1500 1501 /* 1502 * Defined at 1503 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf 1504 */ 1505 struct SPMITable { 1506 s8 Signature[4]; 1507 u32 Length; 1508 u8 Revision; 1509 u8 Checksum; 1510 s8 OEMID[6]; 1511 s8 OEMTableID[8]; 1512 s8 OEMRevision[4]; 1513 s8 CreatorID[4]; 1514 s8 CreatorRevision[4]; 1515 u8 InterfaceType; 1516 u8 IPMIlegacy; 1517 s16 SpecificationRevision; 1518 1519 /* 1520 * Bit 0 - SCI interrupt supported 1521 * Bit 1 - I/O APIC/SAPIC 1522 */ 1523 u8 InterruptType; 1524 1525 /* If bit 0 of InterruptType is set, then this is the SCI 1526 interrupt in the GPEx_STS register. */ 1527 u8 GPE; 1528 1529 s16 Reserved; 1530 1531 /* If bit 1 of InterruptType is set, then this is the I/O 1532 APIC/SAPIC interrupt. */ 1533 u32 GlobalSystemInterrupt; 1534 1535 /* The actual register address. */ 1536 struct acpi_generic_address addr; 1537 1538 u8 UID[4]; 1539 1540 s8 spmi_id[1]; /* A '\0' terminated array starts here. */ 1541 }; 1542 1543 static __devinit int try_init_acpi(struct SPMITable *spmi) 1544 { 1545 struct smi_info *info; 1546 char *io_type; 1547 u8 addr_space; 1548 1549 if (spmi->IPMIlegacy != 1) { 1550 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 1551 return -ENODEV; 1552 } 1553 1554 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1555 addr_space = IPMI_MEM_ADDR_SPACE; 1556 else 1557 addr_space = IPMI_IO_ADDR_SPACE; 1558 1559 info = kzalloc(sizeof(*info), GFP_KERNEL); 1560 if (!info) { 1561 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); 1562 return -ENOMEM; 1563 } 1564 1565 info->addr_source = "ACPI"; 1566 1567 /* Figure out the interface type. */ 1568 switch (spmi->InterfaceType) 1569 { 1570 case 1: /* KCS */ 1571 info->si_type = SI_KCS; 1572 break; 1573 case 2: /* SMIC */ 1574 info->si_type = SI_SMIC; 1575 break; 1576 case 3: /* BT */ 1577 info->si_type = SI_BT; 1578 break; 1579 default: 1580 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", 1581 spmi->InterfaceType); 1582 kfree(info); 1583 return -EIO; 1584 } 1585 1586 if (spmi->InterruptType & 1) { 1587 /* We've got a GPE interrupt. */ 1588 info->irq = spmi->GPE; 1589 info->irq_setup = acpi_gpe_irq_setup; 1590 } else if (spmi->InterruptType & 2) { 1591 /* We've got an APIC/SAPIC interrupt. */ 1592 info->irq = spmi->GlobalSystemInterrupt; 1593 info->irq_setup = std_irq_setup; 1594 } else { 1595 /* Use the default interrupt setting. */ 1596 info->irq = 0; 1597 info->irq_setup = NULL; 1598 } 1599 1600 if (spmi->addr.register_bit_width) { 1601 /* A (hopefully) properly formed register bit width. */ 1602 info->io.regspacing = spmi->addr.register_bit_width / 8; 1603 } else { 1604 info->io.regspacing = DEFAULT_REGSPACING; 1605 } 1606 info->io.regsize = info->io.regspacing; 1607 info->io.regshift = spmi->addr.register_bit_offset; 1608 1609 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1610 io_type = "memory"; 1611 info->io_setup = mem_setup; 1612 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1613 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1614 io_type = "I/O"; 1615 info->io_setup = port_setup; 1616 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1617 } else { 1618 kfree(info); 1619 printk("ipmi_si: Unknown ACPI I/O Address type\n"); 1620 return -EIO; 1621 } 1622 info->io.addr_data = spmi->addr.address; 1623 1624 try_smi_init(info); 1625 1626 return 0; 1627 } 1628 1629 static __devinit void acpi_find_bmc(void) 1630 { 1631 acpi_status status; 1632 struct SPMITable *spmi; 1633 int i; 1634 1635 if (acpi_disabled) 1636 return; 1637 1638 if (acpi_failure) 1639 return; 1640 1641 for (i = 0; ; i++) { 1642 status = acpi_get_firmware_table("SPMI", i+1, 1643 ACPI_LOGICAL_ADDRESSING, 1644 (struct acpi_table_header **) 1645 &spmi); 1646 if (status != AE_OK) 1647 return; 1648 1649 try_init_acpi(spmi); 1650 } 1651 } 1652 #endif 1653 1654 #ifdef CONFIG_DMI 1655 struct dmi_ipmi_data 1656 { 1657 u8 type; 1658 u8 addr_space; 1659 unsigned long base_addr; 1660 u8 irq; 1661 u8 offset; 1662 u8 slave_addr; 1663 }; 1664 1665 static int __devinit decode_dmi(struct dmi_header *dm, 1666 struct dmi_ipmi_data *dmi) 1667 { 1668 u8 *data = (u8 *)dm; 1669 unsigned long base_addr; 1670 u8 reg_spacing; 1671 u8 len = dm->length; 1672 1673 dmi->type = data[4]; 1674 1675 memcpy(&base_addr, data+8, sizeof(unsigned long)); 1676 if (len >= 0x11) { 1677 if (base_addr & 1) { 1678 /* I/O */ 1679 base_addr &= 0xFFFE; 1680 dmi->addr_space = IPMI_IO_ADDR_SPACE; 1681 } 1682 else { 1683 /* Memory */ 1684 dmi->addr_space = IPMI_MEM_ADDR_SPACE; 1685 } 1686 /* If bit 4 of byte 0x10 is set, then the lsb for the address 1687 is odd. */ 1688 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); 1689 1690 dmi->irq = data[0x11]; 1691 1692 /* The top two bits of byte 0x10 hold the register spacing. */ 1693 reg_spacing = (data[0x10] & 0xC0) >> 6; 1694 switch(reg_spacing){ 1695 case 0x00: /* Byte boundaries */ 1696 dmi->offset = 1; 1697 break; 1698 case 0x01: /* 32-bit boundaries */ 1699 dmi->offset = 4; 1700 break; 1701 case 0x02: /* 16-byte boundaries */ 1702 dmi->offset = 16; 1703 break; 1704 default: 1705 /* Some other interface, just ignore it. */ 1706 return -EIO; 1707 } 1708 } else { 1709 /* Old DMI spec. */ 1710 /* Note that technically, the lower bit of the base 1711 * address should be 1 if the address is I/O and 0 if 1712 * the address is in memory. So many systems get that 1713 * wrong (and all that I have seen are I/O) so we just 1714 * ignore that bit and assume I/O. Systems that use 1715 * memory should use the newer spec, anyway. */ 1716 dmi->base_addr = base_addr & 0xfffe; 1717 dmi->addr_space = IPMI_IO_ADDR_SPACE; 1718 dmi->offset = 1; 1719 } 1720 1721 dmi->slave_addr = data[6]; 1722 1723 return 0; 1724 } 1725 1726 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) 1727 { 1728 struct smi_info *info; 1729 1730 info = kzalloc(sizeof(*info), GFP_KERNEL); 1731 if (!info) { 1732 printk(KERN_ERR 1733 "ipmi_si: Could not allocate SI data\n"); 1734 return; 1735 } 1736 1737 info->addr_source = "SMBIOS"; 1738 1739 switch (ipmi_data->type) { 1740 case 0x01: /* KCS */ 1741 info->si_type = SI_KCS; 1742 break; 1743 case 0x02: /* SMIC */ 1744 info->si_type = SI_SMIC; 1745 break; 1746 case 0x03: /* BT */ 1747 info->si_type = SI_BT; 1748 break; 1749 default: 1750 return; 1751 } 1752 1753 switch (ipmi_data->addr_space) { 1754 case IPMI_MEM_ADDR_SPACE: 1755 info->io_setup = mem_setup; 1756 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1757 break; 1758 1759 case IPMI_IO_ADDR_SPACE: 1760 info->io_setup = port_setup; 1761 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1762 break; 1763 1764 default: 1765 kfree(info); 1766 printk(KERN_WARNING 1767 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n", 1768 ipmi_data->addr_space); 1769 return; 1770 } 1771 info->io.addr_data = ipmi_data->base_addr; 1772 1773 info->io.regspacing = ipmi_data->offset; 1774 if (!info->io.regspacing) 1775 info->io.regspacing = DEFAULT_REGSPACING; 1776 info->io.regsize = DEFAULT_REGSPACING; 1777 info->io.regshift = 0; 1778 1779 info->slave_addr = ipmi_data->slave_addr; 1780 1781 info->irq = ipmi_data->irq; 1782 if (info->irq) 1783 info->irq_setup = std_irq_setup; 1784 1785 try_smi_init(info); 1786 } 1787 1788 static void __devinit dmi_find_bmc(void) 1789 { 1790 struct dmi_device *dev = NULL; 1791 struct dmi_ipmi_data data; 1792 int rv; 1793 1794 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { 1795 rv = decode_dmi((struct dmi_header *) dev->device_data, &data); 1796 if (!rv) 1797 try_init_dmi(&data); 1798 } 1799 } 1800 #endif /* CONFIG_DMI */ 1801 1802 #ifdef CONFIG_PCI 1803 1804 #define PCI_ERMC_CLASSCODE 0x0C0700 1805 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00 1806 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff 1807 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00 1808 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01 1809 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02 1810 1811 #define PCI_HP_VENDOR_ID 0x103C 1812 #define PCI_MMC_DEVICE_ID 0x121A 1813 #define PCI_MMC_ADDR_CW 0x10 1814 1815 static void ipmi_pci_cleanup(struct smi_info *info) 1816 { 1817 struct pci_dev *pdev = info->addr_source_data; 1818 1819 pci_disable_device(pdev); 1820 } 1821 1822 static int __devinit ipmi_pci_probe(struct pci_dev *pdev, 1823 const struct pci_device_id *ent) 1824 { 1825 int rv; 1826 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; 1827 struct smi_info *info; 1828 int first_reg_offset = 0; 1829 1830 info = kzalloc(sizeof(*info), GFP_KERNEL); 1831 if (!info) 1832 return ENOMEM; 1833 1834 info->addr_source = "PCI"; 1835 1836 switch (class_type) { 1837 case PCI_ERMC_CLASSCODE_TYPE_SMIC: 1838 info->si_type = SI_SMIC; 1839 break; 1840 1841 case PCI_ERMC_CLASSCODE_TYPE_KCS: 1842 info->si_type = SI_KCS; 1843 break; 1844 1845 case PCI_ERMC_CLASSCODE_TYPE_BT: 1846 info->si_type = SI_BT; 1847 break; 1848 1849 default: 1850 kfree(info); 1851 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", 1852 pci_name(pdev), class_type); 1853 return ENOMEM; 1854 } 1855 1856 rv = pci_enable_device(pdev); 1857 if (rv) { 1858 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", 1859 pci_name(pdev)); 1860 kfree(info); 1861 return rv; 1862 } 1863 1864 info->addr_source_cleanup = ipmi_pci_cleanup; 1865 info->addr_source_data = pdev; 1866 1867 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID) 1868 first_reg_offset = 1; 1869 1870 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { 1871 info->io_setup = port_setup; 1872 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1873 } else { 1874 info->io_setup = mem_setup; 1875 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1876 } 1877 info->io.addr_data = pci_resource_start(pdev, 0); 1878 1879 info->io.regspacing = DEFAULT_REGSPACING; 1880 info->io.regsize = DEFAULT_REGSPACING; 1881 info->io.regshift = 0; 1882 1883 info->irq = pdev->irq; 1884 if (info->irq) 1885 info->irq_setup = std_irq_setup; 1886 1887 info->dev = &pdev->dev; 1888 1889 return try_smi_init(info); 1890 } 1891 1892 static void __devexit ipmi_pci_remove(struct pci_dev *pdev) 1893 { 1894 } 1895 1896 #ifdef CONFIG_PM 1897 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1898 { 1899 return 0; 1900 } 1901 1902 static int ipmi_pci_resume(struct pci_dev *pdev) 1903 { 1904 return 0; 1905 } 1906 #endif 1907 1908 static struct pci_device_id ipmi_pci_devices[] = { 1909 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, 1910 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) } 1911 }; 1912 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); 1913 1914 static struct pci_driver ipmi_pci_driver = { 1915 .name = DEVICE_NAME, 1916 .id_table = ipmi_pci_devices, 1917 .probe = ipmi_pci_probe, 1918 .remove = __devexit_p(ipmi_pci_remove), 1919 #ifdef CONFIG_PM 1920 .suspend = ipmi_pci_suspend, 1921 .resume = ipmi_pci_resume, 1922 #endif 1923 }; 1924 #endif /* CONFIG_PCI */ 1925 1926 1927 static int try_get_dev_id(struct smi_info *smi_info) 1928 { 1929 unsigned char msg[2]; 1930 unsigned char *resp; 1931 unsigned long resp_len; 1932 enum si_sm_result smi_result; 1933 int rv = 0; 1934 1935 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1936 if (!resp) 1937 return -ENOMEM; 1938 1939 /* Do a Get Device ID command, since it comes back with some 1940 useful info. */ 1941 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1942 msg[1] = IPMI_GET_DEVICE_ID_CMD; 1943 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 1944 1945 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 1946 for (;;) 1947 { 1948 if (smi_result == SI_SM_CALL_WITH_DELAY || 1949 smi_result == SI_SM_CALL_WITH_TICK_DELAY) { 1950 schedule_timeout_uninterruptible(1); 1951 smi_result = smi_info->handlers->event( 1952 smi_info->si_sm, 100); 1953 } 1954 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) 1955 { 1956 smi_result = smi_info->handlers->event( 1957 smi_info->si_sm, 0); 1958 } 1959 else 1960 break; 1961 } 1962 if (smi_result == SI_SM_HOSED) { 1963 /* We couldn't get the state machine to run, so whatever's at 1964 the port is probably not an IPMI SMI interface. */ 1965 rv = -ENODEV; 1966 goto out; 1967 } 1968 1969 /* Otherwise, we got some data. */ 1970 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1971 resp, IPMI_MAX_MSG_LENGTH); 1972 if (resp_len < 14) { 1973 /* That's odd, it should be longer. */ 1974 rv = -EINVAL; 1975 goto out; 1976 } 1977 1978 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) { 1979 /* That's odd, it shouldn't be able to fail. */ 1980 rv = -EINVAL; 1981 goto out; 1982 } 1983 1984 /* Record info from the get device id, in case we need it. */ 1985 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id); 1986 1987 out: 1988 kfree(resp); 1989 return rv; 1990 } 1991 1992 static int type_file_read_proc(char *page, char **start, off_t off, 1993 int count, int *eof, void *data) 1994 { 1995 char *out = (char *) page; 1996 struct smi_info *smi = data; 1997 1998 switch (smi->si_type) { 1999 case SI_KCS: 2000 return sprintf(out, "kcs\n"); 2001 case SI_SMIC: 2002 return sprintf(out, "smic\n"); 2003 case SI_BT: 2004 return sprintf(out, "bt\n"); 2005 default: 2006 return 0; 2007 } 2008 } 2009 2010 static int stat_file_read_proc(char *page, char **start, off_t off, 2011 int count, int *eof, void *data) 2012 { 2013 char *out = (char *) page; 2014 struct smi_info *smi = data; 2015 2016 out += sprintf(out, "interrupts_enabled: %d\n", 2017 smi->irq && !smi->interrupt_disabled); 2018 out += sprintf(out, "short_timeouts: %ld\n", 2019 smi->short_timeouts); 2020 out += sprintf(out, "long_timeouts: %ld\n", 2021 smi->long_timeouts); 2022 out += sprintf(out, "timeout_restarts: %ld\n", 2023 smi->timeout_restarts); 2024 out += sprintf(out, "idles: %ld\n", 2025 smi->idles); 2026 out += sprintf(out, "interrupts: %ld\n", 2027 smi->interrupts); 2028 out += sprintf(out, "attentions: %ld\n", 2029 smi->attentions); 2030 out += sprintf(out, "flag_fetches: %ld\n", 2031 smi->flag_fetches); 2032 out += sprintf(out, "hosed_count: %ld\n", 2033 smi->hosed_count); 2034 out += sprintf(out, "complete_transactions: %ld\n", 2035 smi->complete_transactions); 2036 out += sprintf(out, "events: %ld\n", 2037 smi->events); 2038 out += sprintf(out, "watchdog_pretimeouts: %ld\n", 2039 smi->watchdog_pretimeouts); 2040 out += sprintf(out, "incoming_messages: %ld\n", 2041 smi->incoming_messages); 2042 2043 return (out - ((char *) page)); 2044 } 2045 2046 /* 2047 * oem_data_avail_to_receive_msg_avail 2048 * @info - smi_info structure with msg_flags set 2049 * 2050 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL 2051 * Returns 1 indicating need to re-run handle_flags(). 2052 */ 2053 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) 2054 { 2055 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | 2056 RECEIVE_MSG_AVAIL); 2057 return 1; 2058 } 2059 2060 /* 2061 * setup_dell_poweredge_oem_data_handler 2062 * @info - smi_info.device_id must be populated 2063 * 2064 * Systems that match, but have firmware version < 1.40 may assert 2065 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that 2066 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL 2067 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags 2068 * as RECEIVE_MSG_AVAIL instead. 2069 * 2070 * As Dell has no plans to release IPMI 1.5 firmware that *ever* 2071 * assert the OEM[012] bits, and if it did, the driver would have to 2072 * change to handle that properly, we don't actually check for the 2073 * firmware version. 2074 * Device ID = 0x20 BMC on PowerEdge 8G servers 2075 * Device Revision = 0x80 2076 * Firmware Revision1 = 0x01 BMC version 1.40 2077 * Firmware Revision2 = 0x40 BCD encoded 2078 * IPMI Version = 0x51 IPMI 1.5 2079 * Manufacturer ID = A2 02 00 Dell IANA 2080 * 2081 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert 2082 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. 2083 * 2084 */ 2085 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 2086 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 2087 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 2088 #define DELL_IANA_MFR_ID 0x0002a2 2089 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) 2090 { 2091 struct ipmi_device_id *id = &smi_info->device_id; 2092 if (id->manufacturer_id == DELL_IANA_MFR_ID) { 2093 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && 2094 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && 2095 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 2096 smi_info->oem_data_avail_handler = 2097 oem_data_avail_to_receive_msg_avail; 2098 } 2099 else if (ipmi_version_major(id) < 1 || 2100 (ipmi_version_major(id) == 1 && 2101 ipmi_version_minor(id) < 5)) { 2102 smi_info->oem_data_avail_handler = 2103 oem_data_avail_to_receive_msg_avail; 2104 } 2105 } 2106 } 2107 2108 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA 2109 static void return_hosed_msg_badsize(struct smi_info *smi_info) 2110 { 2111 struct ipmi_smi_msg *msg = smi_info->curr_msg; 2112 2113 /* Make it a reponse */ 2114 msg->rsp[0] = msg->data[0] | 4; 2115 msg->rsp[1] = msg->data[1]; 2116 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; 2117 msg->rsp_size = 3; 2118 smi_info->curr_msg = NULL; 2119 deliver_recv_msg(smi_info, msg); 2120 } 2121 2122 /* 2123 * dell_poweredge_bt_xaction_handler 2124 * @info - smi_info.device_id must be populated 2125 * 2126 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will 2127 * not respond to a Get SDR command if the length of the data 2128 * requested is exactly 0x3A, which leads to command timeouts and no 2129 * data returned. This intercepts such commands, and causes userspace 2130 * callers to try again with a different-sized buffer, which succeeds. 2131 */ 2132 2133 #define STORAGE_NETFN 0x0A 2134 #define STORAGE_CMD_GET_SDR 0x23 2135 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, 2136 unsigned long unused, 2137 void *in) 2138 { 2139 struct smi_info *smi_info = in; 2140 unsigned char *data = smi_info->curr_msg->data; 2141 unsigned int size = smi_info->curr_msg->data_size; 2142 if (size >= 8 && 2143 (data[0]>>2) == STORAGE_NETFN && 2144 data[1] == STORAGE_CMD_GET_SDR && 2145 data[7] == 0x3A) { 2146 return_hosed_msg_badsize(smi_info); 2147 return NOTIFY_STOP; 2148 } 2149 return NOTIFY_DONE; 2150 } 2151 2152 static struct notifier_block dell_poweredge_bt_xaction_notifier = { 2153 .notifier_call = dell_poweredge_bt_xaction_handler, 2154 }; 2155 2156 /* 2157 * setup_dell_poweredge_bt_xaction_handler 2158 * @info - smi_info.device_id must be filled in already 2159 * 2160 * Fills in smi_info.device_id.start_transaction_pre_hook 2161 * when we know what function to use there. 2162 */ 2163 static void 2164 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) 2165 { 2166 struct ipmi_device_id *id = &smi_info->device_id; 2167 if (id->manufacturer_id == DELL_IANA_MFR_ID && 2168 smi_info->si_type == SI_BT) 2169 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); 2170 } 2171 2172 /* 2173 * setup_oem_data_handler 2174 * @info - smi_info.device_id must be filled in already 2175 * 2176 * Fills in smi_info.device_id.oem_data_available_handler 2177 * when we know what function to use there. 2178 */ 2179 2180 static void setup_oem_data_handler(struct smi_info *smi_info) 2181 { 2182 setup_dell_poweredge_oem_data_handler(smi_info); 2183 } 2184 2185 static void setup_xaction_handlers(struct smi_info *smi_info) 2186 { 2187 setup_dell_poweredge_bt_xaction_handler(smi_info); 2188 } 2189 2190 static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 2191 { 2192 if (smi_info->intf) { 2193 /* The timer and thread are only running if the 2194 interface has been started up and registered. */ 2195 if (smi_info->thread != NULL) 2196 kthread_stop(smi_info->thread); 2197 del_timer_sync(&smi_info->si_timer); 2198 } 2199 } 2200 2201 static __devinitdata struct ipmi_default_vals 2202 { 2203 int type; 2204 int port; 2205 } ipmi_defaults[] = 2206 { 2207 { .type = SI_KCS, .port = 0xca2 }, 2208 { .type = SI_SMIC, .port = 0xca9 }, 2209 { .type = SI_BT, .port = 0xe4 }, 2210 { .port = 0 } 2211 }; 2212 2213 static __devinit void default_find_bmc(void) 2214 { 2215 struct smi_info *info; 2216 int i; 2217 2218 for (i = 0; ; i++) { 2219 if (!ipmi_defaults[i].port) 2220 break; 2221 2222 info = kzalloc(sizeof(*info), GFP_KERNEL); 2223 if (!info) 2224 return; 2225 2226 info->addr_source = NULL; 2227 2228 info->si_type = ipmi_defaults[i].type; 2229 info->io_setup = port_setup; 2230 info->io.addr_data = ipmi_defaults[i].port; 2231 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2232 2233 info->io.addr = NULL; 2234 info->io.regspacing = DEFAULT_REGSPACING; 2235 info->io.regsize = DEFAULT_REGSPACING; 2236 info->io.regshift = 0; 2237 2238 if (try_smi_init(info) == 0) { 2239 /* Found one... */ 2240 printk(KERN_INFO "ipmi_si: Found default %s state" 2241 " machine at %s address 0x%lx\n", 2242 si_to_str[info->si_type], 2243 addr_space_to_str[info->io.addr_type], 2244 info->io.addr_data); 2245 return; 2246 } 2247 } 2248 } 2249 2250 static int is_new_interface(struct smi_info *info) 2251 { 2252 struct smi_info *e; 2253 2254 list_for_each_entry(e, &smi_infos, link) { 2255 if (e->io.addr_type != info->io.addr_type) 2256 continue; 2257 if (e->io.addr_data == info->io.addr_data) 2258 return 0; 2259 } 2260 2261 return 1; 2262 } 2263 2264 static int try_smi_init(struct smi_info *new_smi) 2265 { 2266 int rv; 2267 2268 if (new_smi->addr_source) { 2269 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" 2270 " machine at %s address 0x%lx, slave address 0x%x," 2271 " irq %d\n", 2272 new_smi->addr_source, 2273 si_to_str[new_smi->si_type], 2274 addr_space_to_str[new_smi->io.addr_type], 2275 new_smi->io.addr_data, 2276 new_smi->slave_addr, new_smi->irq); 2277 } 2278 2279 mutex_lock(&smi_infos_lock); 2280 if (!is_new_interface(new_smi)) { 2281 printk(KERN_WARNING "ipmi_si: duplicate interface\n"); 2282 rv = -EBUSY; 2283 goto out_err; 2284 } 2285 2286 /* So we know not to free it unless we have allocated one. */ 2287 new_smi->intf = NULL; 2288 new_smi->si_sm = NULL; 2289 new_smi->handlers = NULL; 2290 2291 switch (new_smi->si_type) { 2292 case SI_KCS: 2293 new_smi->handlers = &kcs_smi_handlers; 2294 break; 2295 2296 case SI_SMIC: 2297 new_smi->handlers = &smic_smi_handlers; 2298 break; 2299 2300 case SI_BT: 2301 new_smi->handlers = &bt_smi_handlers; 2302 break; 2303 2304 default: 2305 /* No support for anything else yet. */ 2306 rv = -EIO; 2307 goto out_err; 2308 } 2309 2310 /* Allocate the state machine's data and initialize it. */ 2311 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2312 if (!new_smi->si_sm) { 2313 printk(" Could not allocate state machine memory\n"); 2314 rv = -ENOMEM; 2315 goto out_err; 2316 } 2317 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm, 2318 &new_smi->io); 2319 2320 /* Now that we know the I/O size, we can set up the I/O. */ 2321 rv = new_smi->io_setup(new_smi); 2322 if (rv) { 2323 printk(" Could not set up I/O space\n"); 2324 goto out_err; 2325 } 2326 2327 spin_lock_init(&(new_smi->si_lock)); 2328 spin_lock_init(&(new_smi->msg_lock)); 2329 spin_lock_init(&(new_smi->count_lock)); 2330 2331 /* Do low-level detection first. */ 2332 if (new_smi->handlers->detect(new_smi->si_sm)) { 2333 if (new_smi->addr_source) 2334 printk(KERN_INFO "ipmi_si: Interface detection" 2335 " failed\n"); 2336 rv = -ENODEV; 2337 goto out_err; 2338 } 2339 2340 /* Attempt a get device id command. If it fails, we probably 2341 don't have a BMC here. */ 2342 rv = try_get_dev_id(new_smi); 2343 if (rv) { 2344 if (new_smi->addr_source) 2345 printk(KERN_INFO "ipmi_si: There appears to be no BMC" 2346 " at this location\n"); 2347 goto out_err; 2348 } 2349 2350 setup_oem_data_handler(new_smi); 2351 setup_xaction_handlers(new_smi); 2352 2353 /* Try to claim any interrupts. */ 2354 if (new_smi->irq_setup) 2355 new_smi->irq_setup(new_smi); 2356 2357 INIT_LIST_HEAD(&(new_smi->xmit_msgs)); 2358 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); 2359 new_smi->curr_msg = NULL; 2360 atomic_set(&new_smi->req_events, 0); 2361 new_smi->run_to_completion = 0; 2362 2363 new_smi->interrupt_disabled = 0; 2364 atomic_set(&new_smi->stop_operation, 0); 2365 new_smi->intf_num = smi_num; 2366 smi_num++; 2367 2368 /* Start clearing the flags before we enable interrupts or the 2369 timer to avoid racing with the timer. */ 2370 start_clear_flags(new_smi); 2371 /* IRQ is defined to be set when non-zero. */ 2372 if (new_smi->irq) 2373 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; 2374 2375 if (!new_smi->dev) { 2376 /* If we don't already have a device from something 2377 * else (like PCI), then register a new one. */ 2378 new_smi->pdev = platform_device_alloc("ipmi_si", 2379 new_smi->intf_num); 2380 if (rv) { 2381 printk(KERN_ERR 2382 "ipmi_si_intf:" 2383 " Unable to allocate platform device\n"); 2384 goto out_err; 2385 } 2386 new_smi->dev = &new_smi->pdev->dev; 2387 new_smi->dev->driver = &ipmi_driver; 2388 2389 rv = platform_device_register(new_smi->pdev); 2390 if (rv) { 2391 printk(KERN_ERR 2392 "ipmi_si_intf:" 2393 " Unable to register system interface device:" 2394 " %d\n", 2395 rv); 2396 goto out_err; 2397 } 2398 new_smi->dev_registered = 1; 2399 } 2400 2401 rv = ipmi_register_smi(&handlers, 2402 new_smi, 2403 &new_smi->device_id, 2404 new_smi->dev, 2405 new_smi->slave_addr); 2406 if (rv) { 2407 printk(KERN_ERR 2408 "ipmi_si: Unable to register device: error %d\n", 2409 rv); 2410 goto out_err_stop_timer; 2411 } 2412 2413 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", 2414 type_file_read_proc, NULL, 2415 new_smi, THIS_MODULE); 2416 if (rv) { 2417 printk(KERN_ERR 2418 "ipmi_si: Unable to create proc entry: %d\n", 2419 rv); 2420 goto out_err_stop_timer; 2421 } 2422 2423 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", 2424 stat_file_read_proc, NULL, 2425 new_smi, THIS_MODULE); 2426 if (rv) { 2427 printk(KERN_ERR 2428 "ipmi_si: Unable to create proc entry: %d\n", 2429 rv); 2430 goto out_err_stop_timer; 2431 } 2432 2433 list_add_tail(&new_smi->link, &smi_infos); 2434 2435 mutex_unlock(&smi_infos_lock); 2436 2437 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); 2438 2439 return 0; 2440 2441 out_err_stop_timer: 2442 atomic_inc(&new_smi->stop_operation); 2443 wait_for_timer_and_thread(new_smi); 2444 2445 out_err: 2446 if (new_smi->intf) 2447 ipmi_unregister_smi(new_smi->intf); 2448 2449 if (new_smi->irq_cleanup) 2450 new_smi->irq_cleanup(new_smi); 2451 2452 /* Wait until we know that we are out of any interrupt 2453 handlers might have been running before we freed the 2454 interrupt. */ 2455 synchronize_sched(); 2456 2457 if (new_smi->si_sm) { 2458 if (new_smi->handlers) 2459 new_smi->handlers->cleanup(new_smi->si_sm); 2460 kfree(new_smi->si_sm); 2461 } 2462 if (new_smi->addr_source_cleanup) 2463 new_smi->addr_source_cleanup(new_smi); 2464 if (new_smi->io_cleanup) 2465 new_smi->io_cleanup(new_smi); 2466 2467 if (new_smi->dev_registered) 2468 platform_device_unregister(new_smi->pdev); 2469 2470 kfree(new_smi); 2471 2472 mutex_unlock(&smi_infos_lock); 2473 2474 return rv; 2475 } 2476 2477 static __devinit int init_ipmi_si(void) 2478 { 2479 int i; 2480 char *str; 2481 int rv; 2482 2483 if (initialized) 2484 return 0; 2485 initialized = 1; 2486 2487 /* Register the device drivers. */ 2488 rv = driver_register(&ipmi_driver); 2489 if (rv) { 2490 printk(KERN_ERR 2491 "init_ipmi_si: Unable to register driver: %d\n", 2492 rv); 2493 return rv; 2494 } 2495 2496 2497 /* Parse out the si_type string into its components. */ 2498 str = si_type_str; 2499 if (*str != '\0') { 2500 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) { 2501 si_type[i] = str; 2502 str = strchr(str, ','); 2503 if (str) { 2504 *str = '\0'; 2505 str++; 2506 } else { 2507 break; 2508 } 2509 } 2510 } 2511 2512 printk(KERN_INFO "IPMI System Interface driver.\n"); 2513 2514 hardcode_find_bmc(); 2515 2516 #ifdef CONFIG_DMI 2517 dmi_find_bmc(); 2518 #endif 2519 2520 #ifdef CONFIG_ACPI 2521 if (si_trydefaults) 2522 acpi_find_bmc(); 2523 #endif 2524 2525 #ifdef CONFIG_PCI 2526 pci_module_init(&ipmi_pci_driver); 2527 #endif 2528 2529 if (si_trydefaults) { 2530 mutex_lock(&smi_infos_lock); 2531 if (list_empty(&smi_infos)) { 2532 /* No BMC was found, try defaults. */ 2533 mutex_unlock(&smi_infos_lock); 2534 default_find_bmc(); 2535 } else { 2536 mutex_unlock(&smi_infos_lock); 2537 } 2538 } 2539 2540 mutex_lock(&smi_infos_lock); 2541 if (list_empty(&smi_infos)) { 2542 mutex_unlock(&smi_infos_lock); 2543 #ifdef CONFIG_PCI 2544 pci_unregister_driver(&ipmi_pci_driver); 2545 #endif 2546 printk("ipmi_si: Unable to find any System Interface(s)\n"); 2547 return -ENODEV; 2548 } else { 2549 mutex_unlock(&smi_infos_lock); 2550 return 0; 2551 } 2552 } 2553 module_init(init_ipmi_si); 2554 2555 static void __devexit cleanup_one_si(struct smi_info *to_clean) 2556 { 2557 int rv; 2558 unsigned long flags; 2559 2560 if (!to_clean) 2561 return; 2562 2563 list_del(&to_clean->link); 2564 2565 /* Tell the timer and interrupt handlers that we are shutting 2566 down. */ 2567 spin_lock_irqsave(&(to_clean->si_lock), flags); 2568 spin_lock(&(to_clean->msg_lock)); 2569 2570 atomic_inc(&to_clean->stop_operation); 2571 2572 if (to_clean->irq_cleanup) 2573 to_clean->irq_cleanup(to_clean); 2574 2575 spin_unlock(&(to_clean->msg_lock)); 2576 spin_unlock_irqrestore(&(to_clean->si_lock), flags); 2577 2578 /* Wait until we know that we are out of any interrupt 2579 handlers might have been running before we freed the 2580 interrupt. */ 2581 synchronize_sched(); 2582 2583 wait_for_timer_and_thread(to_clean); 2584 2585 /* Interrupts and timeouts are stopped, now make sure the 2586 interface is in a clean state. */ 2587 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 2588 poll(to_clean); 2589 schedule_timeout_uninterruptible(1); 2590 } 2591 2592 rv = ipmi_unregister_smi(to_clean->intf); 2593 if (rv) { 2594 printk(KERN_ERR 2595 "ipmi_si: Unable to unregister device: errno=%d\n", 2596 rv); 2597 } 2598 2599 to_clean->handlers->cleanup(to_clean->si_sm); 2600 2601 kfree(to_clean->si_sm); 2602 2603 if (to_clean->addr_source_cleanup) 2604 to_clean->addr_source_cleanup(to_clean); 2605 if (to_clean->io_cleanup) 2606 to_clean->io_cleanup(to_clean); 2607 2608 if (to_clean->dev_registered) 2609 platform_device_unregister(to_clean->pdev); 2610 2611 kfree(to_clean); 2612 } 2613 2614 static __exit void cleanup_ipmi_si(void) 2615 { 2616 struct smi_info *e, *tmp_e; 2617 2618 if (!initialized) 2619 return; 2620 2621 #ifdef CONFIG_PCI 2622 pci_unregister_driver(&ipmi_pci_driver); 2623 #endif 2624 2625 mutex_lock(&smi_infos_lock); 2626 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) 2627 cleanup_one_si(e); 2628 mutex_unlock(&smi_infos_lock); 2629 2630 driver_unregister(&ipmi_driver); 2631 } 2632 module_exit(cleanup_ipmi_si); 2633 2634 MODULE_LICENSE("GPL"); 2635 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 2636 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces."); 2637