1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "IPMI message handler: " fmt 15 #define dev_fmt(fmt) pr_fmt(fmt) 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/rcupdate.h> 31 #include <linux/interrupt.h> 32 #include <linux/moduleparam.h> 33 #include <linux/workqueue.h> 34 #include <linux/uuid.h> 35 #include <linux/nospec.h> 36 #include <linux/vmalloc.h> 37 #include <linux/delay.h> 38 39 #define IPMI_DRIVER_VERSION "39.2" 40 41 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user); 42 static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, 43 struct ipmi_user *user); 44 static int ipmi_init_msghandler(void); 45 static void smi_work(struct work_struct *t); 46 static void handle_new_recv_msgs(struct ipmi_smi *intf); 47 static void need_waiter(struct ipmi_smi *intf); 48 static int handle_one_recv_msg(struct ipmi_smi *intf, 49 struct ipmi_smi_msg *msg); 50 static void intf_free(struct kref *ref); 51 52 static bool initialized; 53 static bool drvregistered; 54 55 static struct timer_list ipmi_timer; 56 57 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 58 enum ipmi_panic_event_op { 59 IPMI_SEND_PANIC_EVENT_NONE, 60 IPMI_SEND_PANIC_EVENT, 61 IPMI_SEND_PANIC_EVENT_STRING, 62 IPMI_SEND_PANIC_EVENT_MAX 63 }; 64 65 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 66 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 67 68 #ifdef CONFIG_IPMI_PANIC_STRING 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 70 #elif defined(CONFIG_IPMI_PANIC_EVENT) 71 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 72 #else 73 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 74 #endif 75 76 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 77 78 static int panic_op_write_handler(const char *val, 79 const struct kernel_param *kp) 80 { 81 char valcp[16]; 82 int e; 83 84 strscpy(valcp, val, sizeof(valcp)); 85 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 86 if (e < 0) 87 return e; 88 89 ipmi_send_panic_event = e; 90 return 0; 91 } 92 93 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 94 { 95 const char *event_str; 96 97 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 98 event_str = "???"; 99 else 100 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 101 102 return sprintf(buffer, "%s\n", event_str); 103 } 104 105 static const struct kernel_param_ops panic_op_ops = { 106 .set = panic_op_write_handler, 107 .get = panic_op_read_handler 108 }; 109 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 110 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 111 112 113 #define MAX_EVENTS_IN_QUEUE 25 114 115 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 116 static unsigned long maintenance_mode_timeout_ms = 30000; 117 module_param(maintenance_mode_timeout_ms, ulong, 0644); 118 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 119 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 120 121 /* 122 * Don't let a message sit in a queue forever, always time it with at lest 123 * the max message timer. This is in milliseconds. 124 */ 125 #define MAX_MSG_TIMEOUT 60000 126 127 /* 128 * Timeout times below are in milliseconds, and are done off a 1 129 * second timer. So setting the value to 1000 would mean anything 130 * between 0 and 1000ms. So really the only reasonable minimum 131 * setting it 2000ms, which is between 1 and 2 seconds. 132 */ 133 134 /* The default timeout for message retries. */ 135 static unsigned long default_retry_ms = 2000; 136 module_param(default_retry_ms, ulong, 0644); 137 MODULE_PARM_DESC(default_retry_ms, 138 "The time (milliseconds) between retry sends"); 139 140 /* The default timeout for maintenance mode message retries. */ 141 static unsigned long default_maintenance_retry_ms = 3000; 142 module_param(default_maintenance_retry_ms, ulong, 0644); 143 MODULE_PARM_DESC(default_maintenance_retry_ms, 144 "The time (milliseconds) between retry sends in maintenance mode"); 145 146 /* The default maximum number of retries */ 147 static unsigned int default_max_retries = 4; 148 module_param(default_max_retries, uint, 0644); 149 MODULE_PARM_DESC(default_max_retries, 150 "The time (milliseconds) between retry sends in maintenance mode"); 151 152 /* The default maximum number of users that may register. */ 153 static unsigned int max_users = 30; 154 module_param(max_users, uint, 0644); 155 MODULE_PARM_DESC(max_users, 156 "The most users that may use the IPMI stack at one time."); 157 158 /* The default maximum number of message a user may have outstanding. */ 159 static unsigned int max_msgs_per_user = 100; 160 module_param(max_msgs_per_user, uint, 0644); 161 MODULE_PARM_DESC(max_msgs_per_user, 162 "The most message a user may have outstanding."); 163 164 /* Call every ~1000 ms. */ 165 #define IPMI_TIMEOUT_TIME 1000 166 167 /* How many jiffies does it take to get to the timeout time. */ 168 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 169 170 /* 171 * Request events from the queue every second (this is the number of 172 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 173 * future, IPMI will add a way to know immediately if an event is in 174 * the queue and this silliness can go away. 175 */ 176 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 177 178 /* How long should we cache dynamic device IDs? */ 179 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 180 181 /* 182 * The main "user" data structure. 183 */ 184 struct ipmi_user { 185 struct list_head link; 186 187 struct kref refcount; 188 refcount_t destroyed; 189 190 /* The upper layer that handles receive messages. */ 191 const struct ipmi_user_hndl *handler; 192 void *handler_data; 193 194 /* The interface this user is bound to. */ 195 struct ipmi_smi *intf; 196 197 /* Does this interface receive IPMI events? */ 198 bool gets_events; 199 200 atomic_t nr_msgs; 201 }; 202 203 struct cmd_rcvr { 204 struct list_head link; 205 206 struct ipmi_user *user; 207 unsigned char netfn; 208 unsigned char cmd; 209 unsigned int chans; 210 211 /* 212 * This is used to form a linked lised during mass deletion. 213 * Since this is in an RCU list, we cannot use the link above 214 * or change any data until the RCU period completes. So we 215 * use this next variable during mass deletion so we can have 216 * a list and don't have to wait and restart the search on 217 * every individual deletion of a command. 218 */ 219 struct cmd_rcvr *next; 220 }; 221 222 struct seq_table { 223 unsigned int inuse : 1; 224 unsigned int broadcast : 1; 225 226 unsigned long timeout; 227 unsigned long orig_timeout; 228 unsigned int retries_left; 229 230 /* 231 * To verify on an incoming send message response that this is 232 * the message that the response is for, we keep a sequence id 233 * and increment it every time we send a message. 234 */ 235 long seqid; 236 237 /* 238 * This is held so we can properly respond to the message on a 239 * timeout, and it is used to hold the temporary data for 240 * retransmission, too. 241 */ 242 struct ipmi_recv_msg *recv_msg; 243 }; 244 245 /* 246 * Store the information in a msgid (long) to allow us to find a 247 * sequence table entry from the msgid. 248 */ 249 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 250 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 251 252 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 253 do { \ 254 seq = (((msgid) >> 26) & 0x3f); \ 255 seqid = ((msgid) & 0x3ffffff); \ 256 } while (0) 257 258 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 259 260 #define IPMI_MAX_CHANNELS 16 261 struct ipmi_channel { 262 unsigned char medium; 263 unsigned char protocol; 264 }; 265 266 struct ipmi_channel_set { 267 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 268 }; 269 270 struct ipmi_my_addrinfo { 271 /* 272 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 273 * but may be changed by the user. 274 */ 275 unsigned char address; 276 277 /* 278 * My LUN. This should generally stay the SMS LUN, but just in 279 * case... 280 */ 281 unsigned char lun; 282 }; 283 284 /* 285 * Note that the product id, manufacturer id, guid, and device id are 286 * immutable in this structure, so dyn_mutex is not required for 287 * accessing those. If those change on a BMC, a new BMC is allocated. 288 */ 289 struct bmc_device { 290 struct platform_device pdev; 291 struct list_head intfs; /* Interfaces on this BMC. */ 292 struct ipmi_device_id id; 293 struct ipmi_device_id fetch_id; 294 int dyn_id_set; 295 unsigned long dyn_id_expiry; 296 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 297 guid_t guid; 298 guid_t fetch_guid; 299 int dyn_guid_set; 300 struct kref usecount; 301 struct work_struct remove_work; 302 unsigned char cc; /* completion code */ 303 }; 304 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 305 306 static struct workqueue_struct *bmc_remove_work_wq; 307 308 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 309 struct ipmi_device_id *id, 310 bool *guid_set, guid_t *guid); 311 312 /* 313 * Various statistics for IPMI, these index stats[] in the ipmi_smi 314 * structure. 315 */ 316 enum ipmi_stat_indexes { 317 /* Commands we got from the user that were invalid. */ 318 IPMI_STAT_sent_invalid_commands = 0, 319 320 /* Commands we sent to the MC. */ 321 IPMI_STAT_sent_local_commands, 322 323 /* Responses from the MC that were delivered to a user. */ 324 IPMI_STAT_handled_local_responses, 325 326 /* Responses from the MC that were not delivered to a user. */ 327 IPMI_STAT_unhandled_local_responses, 328 329 /* Commands we sent out to the IPMB bus. */ 330 IPMI_STAT_sent_ipmb_commands, 331 332 /* Commands sent on the IPMB that had errors on the SEND CMD */ 333 IPMI_STAT_sent_ipmb_command_errs, 334 335 /* Each retransmit increments this count. */ 336 IPMI_STAT_retransmitted_ipmb_commands, 337 338 /* 339 * When a message times out (runs out of retransmits) this is 340 * incremented. 341 */ 342 IPMI_STAT_timed_out_ipmb_commands, 343 344 /* 345 * This is like above, but for broadcasts. Broadcasts are 346 * *not* included in the above count (they are expected to 347 * time out). 348 */ 349 IPMI_STAT_timed_out_ipmb_broadcasts, 350 351 /* Responses I have sent to the IPMB bus. */ 352 IPMI_STAT_sent_ipmb_responses, 353 354 /* The response was delivered to the user. */ 355 IPMI_STAT_handled_ipmb_responses, 356 357 /* The response had invalid data in it. */ 358 IPMI_STAT_invalid_ipmb_responses, 359 360 /* The response didn't have anyone waiting for it. */ 361 IPMI_STAT_unhandled_ipmb_responses, 362 363 /* Commands we sent out to the IPMB bus. */ 364 IPMI_STAT_sent_lan_commands, 365 366 /* Commands sent on the IPMB that had errors on the SEND CMD */ 367 IPMI_STAT_sent_lan_command_errs, 368 369 /* Each retransmit increments this count. */ 370 IPMI_STAT_retransmitted_lan_commands, 371 372 /* 373 * When a message times out (runs out of retransmits) this is 374 * incremented. 375 */ 376 IPMI_STAT_timed_out_lan_commands, 377 378 /* Responses I have sent to the IPMB bus. */ 379 IPMI_STAT_sent_lan_responses, 380 381 /* The response was delivered to the user. */ 382 IPMI_STAT_handled_lan_responses, 383 384 /* The response had invalid data in it. */ 385 IPMI_STAT_invalid_lan_responses, 386 387 /* The response didn't have anyone waiting for it. */ 388 IPMI_STAT_unhandled_lan_responses, 389 390 /* The command was delivered to the user. */ 391 IPMI_STAT_handled_commands, 392 393 /* The command had invalid data in it. */ 394 IPMI_STAT_invalid_commands, 395 396 /* The command didn't have anyone waiting for it. */ 397 IPMI_STAT_unhandled_commands, 398 399 /* Invalid data in an event. */ 400 IPMI_STAT_invalid_events, 401 402 /* Events that were received with the proper format. */ 403 IPMI_STAT_events, 404 405 /* Retransmissions on IPMB that failed. */ 406 IPMI_STAT_dropped_rexmit_ipmb_commands, 407 408 /* Retransmissions on LAN that failed. */ 409 IPMI_STAT_dropped_rexmit_lan_commands, 410 411 /* This *must* remain last, add new values above this. */ 412 IPMI_NUM_STATS 413 }; 414 415 416 #define IPMI_IPMB_NUM_SEQ 64 417 struct ipmi_smi { 418 struct module *owner; 419 420 /* What interface number are we? */ 421 int intf_num; 422 423 struct kref refcount; 424 425 /* Set when the interface is being unregistered. */ 426 bool in_shutdown; 427 428 /* Used for a list of interfaces. */ 429 struct list_head link; 430 431 /* 432 * The list of upper layers that are using me. 433 */ 434 struct list_head users; 435 struct mutex users_mutex; 436 atomic_t nr_users; 437 struct device_attribute nr_users_devattr; 438 struct device_attribute nr_msgs_devattr; 439 struct device_attribute maintenance_mode_devattr; 440 441 442 /* Used for wake ups at startup. */ 443 wait_queue_head_t waitq; 444 445 /* 446 * Prevents the interface from being unregistered when the 447 * interface is used by being looked up through the BMC 448 * structure. 449 */ 450 struct mutex bmc_reg_mutex; 451 452 struct bmc_device tmp_bmc; 453 struct bmc_device *bmc; 454 bool bmc_registered; 455 struct list_head bmc_link; 456 char *my_dev_name; 457 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 458 struct work_struct bmc_reg_work; 459 460 const struct ipmi_smi_handlers *handlers; 461 void *send_info; 462 463 /* Driver-model device for the system interface. */ 464 struct device *si_dev; 465 466 /* 467 * A table of sequence numbers for this interface. We use the 468 * sequence numbers for IPMB messages that go out of the 469 * interface to match them up with their responses. A routine 470 * is called periodically to time the items in this list. 471 */ 472 struct mutex seq_lock; 473 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 474 int curr_seq; 475 476 /* 477 * Messages queued for deliver to the user. 478 */ 479 struct mutex user_msgs_mutex; 480 struct list_head user_msgs; 481 482 /* 483 * Messages queued for processing. If processing fails (out 484 * of memory for instance), They will stay in here to be 485 * processed later in a periodic timer interrupt. The 486 * workqueue is for handling received messages directly from 487 * the handler. 488 */ 489 spinlock_t waiting_rcv_msgs_lock; 490 struct list_head waiting_rcv_msgs; 491 atomic_t watchdog_pretimeouts_to_deliver; 492 struct work_struct smi_work; 493 494 spinlock_t xmit_msgs_lock; 495 struct list_head xmit_msgs; 496 struct ipmi_smi_msg *curr_msg; 497 struct list_head hp_xmit_msgs; 498 499 /* 500 * The list of command receivers that are registered for commands 501 * on this interface. 502 */ 503 struct mutex cmd_rcvrs_mutex; 504 struct list_head cmd_rcvrs; 505 506 /* 507 * Events that were queues because no one was there to receive 508 * them. 509 */ 510 struct mutex events_mutex; /* For dealing with event stuff. */ 511 struct list_head waiting_events; 512 unsigned int waiting_events_count; /* How many events in queue? */ 513 char event_msg_printed; 514 515 /* How many users are waiting for events? */ 516 atomic_t event_waiters; 517 unsigned int ticks_to_req_ev; 518 519 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 520 521 /* How many users are waiting for commands? */ 522 unsigned int command_waiters; 523 524 /* How many users are waiting for watchdogs? */ 525 unsigned int watchdog_waiters; 526 527 /* How many users are waiting for message responses? */ 528 unsigned int response_waiters; 529 530 /* 531 * Tells what the lower layer has last been asked to watch for, 532 * messages and/or watchdogs. Protected by watch_lock. 533 */ 534 unsigned int last_watch_mask; 535 536 /* 537 * The event receiver for my BMC, only really used at panic 538 * shutdown as a place to store this. 539 */ 540 unsigned char event_receiver; 541 unsigned char event_receiver_lun; 542 unsigned char local_sel_device; 543 unsigned char local_event_generator; 544 545 /* For handling of maintenance mode. */ 546 int maintenance_mode; 547 548 #define IPMI_MAINTENANCE_MODE_STATE_OFF 0 549 #define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1 550 #define IPMI_MAINTENANCE_MODE_STATE_RESET 2 551 int maintenance_mode_state; 552 int auto_maintenance_timeout; 553 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 554 555 /* 556 * If we are doing maintenance on something on IPMB, extend 557 * the timeout time to avoid timeouts writing firmware and 558 * such. 559 */ 560 int ipmb_maintenance_mode_timeout; 561 562 /* 563 * A cheap hack, if this is non-null and a message to an 564 * interface comes in with a NULL user, call this routine with 565 * it. Note that the message will still be freed by the 566 * caller. This only works on the system interface. 567 * 568 * Protected by bmc_reg_mutex. 569 */ 570 void (*null_user_handler)(struct ipmi_smi *intf, 571 struct ipmi_recv_msg *msg); 572 573 /* 574 * When we are scanning the channels for an SMI, this will 575 * tell which channel we are scanning. 576 */ 577 int curr_channel; 578 579 /* Channel information */ 580 struct ipmi_channel_set *channel_list; 581 unsigned int curr_working_cset; /* First index into the following. */ 582 struct ipmi_channel_set wchannels[2]; 583 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 584 bool channels_ready; 585 586 atomic_t stats[IPMI_NUM_STATS]; 587 588 /* 589 * run_to_completion duplicate of smb_info, smi_info 590 * and ipmi_serial_info structures. Used to decrease numbers of 591 * parameters passed by "low" level IPMI code. 592 */ 593 int run_to_completion; 594 }; 595 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 596 597 static void __get_guid(struct ipmi_smi *intf); 598 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 599 static int __ipmi_bmc_register(struct ipmi_smi *intf, 600 struct ipmi_device_id *id, 601 bool guid_set, guid_t *guid, int intf_num); 602 static int __scan_channels(struct ipmi_smi *intf, 603 struct ipmi_device_id *id, bool rescan); 604 605 static void ipmi_lock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion, 606 unsigned long *flags) 607 { 608 if (run_to_completion) 609 return; 610 spin_lock_irqsave(&intf->xmit_msgs_lock, *flags); 611 } 612 613 static void ipmi_unlock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion, 614 unsigned long *flags) 615 { 616 if (run_to_completion) 617 return; 618 spin_unlock_irqrestore(&intf->xmit_msgs_lock, *flags); 619 } 620 621 static void free_ipmi_user(struct kref *ref) 622 { 623 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 624 struct module *owner; 625 626 owner = user->intf->owner; 627 kref_put(&user->intf->refcount, intf_free); 628 module_put(owner); 629 vfree(user); 630 } 631 632 static void release_ipmi_user(struct ipmi_user *user) 633 { 634 kref_put(&user->refcount, free_ipmi_user); 635 } 636 637 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user) 638 { 639 if (!kref_get_unless_zero(&user->refcount)) 640 return NULL; 641 return user; 642 } 643 644 /* 645 * The driver model view of the IPMI messaging driver. 646 */ 647 static struct platform_driver ipmidriver = { 648 .driver = { 649 .name = "ipmi", 650 .bus = &platform_bus_type 651 } 652 }; 653 /* 654 * This mutex keeps us from adding the same BMC twice. 655 */ 656 static DEFINE_MUTEX(ipmidriver_mutex); 657 658 static LIST_HEAD(ipmi_interfaces); 659 static DEFINE_MUTEX(ipmi_interfaces_mutex); 660 661 /* 662 * List of watchers that want to know when smi's are added and deleted. 663 */ 664 static LIST_HEAD(smi_watchers); 665 static DEFINE_MUTEX(smi_watchers_mutex); 666 667 #define ipmi_inc_stat(intf, stat) \ 668 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 669 #define ipmi_get_stat(intf, stat) \ 670 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 671 672 static const char * const addr_src_to_str[] = { 673 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 674 "device-tree", "platform" 675 }; 676 677 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 678 { 679 if (src >= SI_LAST) 680 src = 0; /* Invalid */ 681 return addr_src_to_str[src]; 682 } 683 EXPORT_SYMBOL(ipmi_addr_src_to_str); 684 685 static int is_lan_addr(struct ipmi_addr *addr) 686 { 687 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 688 } 689 690 static int is_ipmb_addr(struct ipmi_addr *addr) 691 { 692 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 693 } 694 695 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 696 { 697 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 698 } 699 700 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 701 { 702 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 703 } 704 705 static void free_recv_msg_list(struct list_head *q) 706 { 707 struct ipmi_recv_msg *msg, *msg2; 708 709 list_for_each_entry_safe(msg, msg2, q, link) { 710 list_del(&msg->link); 711 ipmi_free_recv_msg(msg); 712 } 713 } 714 715 static void free_smi_msg_list(struct list_head *q) 716 { 717 struct ipmi_smi_msg *msg, *msg2; 718 719 list_for_each_entry_safe(msg, msg2, q, link) { 720 list_del(&msg->link); 721 ipmi_free_smi_msg(msg); 722 } 723 } 724 725 static void intf_free(struct kref *ref) 726 { 727 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 728 int i; 729 struct cmd_rcvr *rcvr, *rcvr2; 730 731 free_smi_msg_list(&intf->waiting_rcv_msgs); 732 free_recv_msg_list(&intf->waiting_events); 733 734 /* 735 * Wholesale remove all the entries from the list in the 736 * interface. No need for locks, this is single-threaded. 737 */ 738 list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link) 739 kfree(rcvr); 740 741 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 742 if ((intf->seq_table[i].inuse) 743 && (intf->seq_table[i].recv_msg)) 744 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 745 } 746 747 kfree(intf); 748 } 749 750 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 751 { 752 struct ipmi_smi *intf; 753 unsigned int count = 0, i; 754 int *interfaces = NULL; 755 struct device **devices = NULL; 756 int rv = 0; 757 758 /* 759 * Make sure the driver is actually initialized, this handles 760 * problems with initialization order. 761 */ 762 rv = ipmi_init_msghandler(); 763 if (rv) 764 return rv; 765 766 mutex_lock(&smi_watchers_mutex); 767 768 list_add(&watcher->link, &smi_watchers); 769 770 /* 771 * Build an array of ipmi interfaces and fill it in, and 772 * another array of the devices. We can't call the callback 773 * with ipmi_interfaces_mutex held. smi_watchers_mutex will 774 * keep things in order for the user. 775 */ 776 mutex_lock(&ipmi_interfaces_mutex); 777 list_for_each_entry(intf, &ipmi_interfaces, link) 778 count++; 779 if (count > 0) { 780 interfaces = kmalloc_objs(*interfaces, count); 781 if (!interfaces) { 782 rv = -ENOMEM; 783 } else { 784 devices = kmalloc_objs(*devices, count); 785 if (!devices) { 786 kfree(interfaces); 787 interfaces = NULL; 788 rv = -ENOMEM; 789 } 790 } 791 count = 0; 792 } 793 if (interfaces) { 794 list_for_each_entry(intf, &ipmi_interfaces, link) { 795 int intf_num = READ_ONCE(intf->intf_num); 796 797 if (intf_num == -1) 798 continue; 799 devices[count] = intf->si_dev; 800 interfaces[count++] = intf_num; 801 } 802 } 803 mutex_unlock(&ipmi_interfaces_mutex); 804 805 if (interfaces) { 806 for (i = 0; i < count; i++) 807 watcher->new_smi(interfaces[i], devices[i]); 808 kfree(interfaces); 809 kfree(devices); 810 } 811 812 mutex_unlock(&smi_watchers_mutex); 813 814 return rv; 815 } 816 EXPORT_SYMBOL(ipmi_smi_watcher_register); 817 818 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 819 { 820 mutex_lock(&smi_watchers_mutex); 821 list_del(&watcher->link); 822 mutex_unlock(&smi_watchers_mutex); 823 return 0; 824 } 825 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 826 827 static void 828 call_smi_watchers(int i, struct device *dev) 829 { 830 struct ipmi_smi_watcher *w; 831 832 list_for_each_entry(w, &smi_watchers, link) { 833 if (try_module_get(w->owner)) { 834 w->new_smi(i, dev); 835 module_put(w->owner); 836 } 837 } 838 } 839 840 static int 841 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 842 { 843 if (addr1->addr_type != addr2->addr_type) 844 return 0; 845 846 if (addr1->channel != addr2->channel) 847 return 0; 848 849 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 850 struct ipmi_system_interface_addr *smi_addr1 851 = (struct ipmi_system_interface_addr *) addr1; 852 struct ipmi_system_interface_addr *smi_addr2 853 = (struct ipmi_system_interface_addr *) addr2; 854 return (smi_addr1->lun == smi_addr2->lun); 855 } 856 857 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 858 struct ipmi_ipmb_addr *ipmb_addr1 859 = (struct ipmi_ipmb_addr *) addr1; 860 struct ipmi_ipmb_addr *ipmb_addr2 861 = (struct ipmi_ipmb_addr *) addr2; 862 863 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 864 && (ipmb_addr1->lun == ipmb_addr2->lun)); 865 } 866 867 if (is_ipmb_direct_addr(addr1)) { 868 struct ipmi_ipmb_direct_addr *daddr1 869 = (struct ipmi_ipmb_direct_addr *) addr1; 870 struct ipmi_ipmb_direct_addr *daddr2 871 = (struct ipmi_ipmb_direct_addr *) addr2; 872 873 return daddr1->slave_addr == daddr2->slave_addr && 874 daddr1->rq_lun == daddr2->rq_lun && 875 daddr1->rs_lun == daddr2->rs_lun; 876 } 877 878 if (is_lan_addr(addr1)) { 879 struct ipmi_lan_addr *lan_addr1 880 = (struct ipmi_lan_addr *) addr1; 881 struct ipmi_lan_addr *lan_addr2 882 = (struct ipmi_lan_addr *) addr2; 883 884 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 885 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 886 && (lan_addr1->session_handle 887 == lan_addr2->session_handle) 888 && (lan_addr1->lun == lan_addr2->lun)); 889 } 890 891 return 1; 892 } 893 894 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 895 { 896 if (len < sizeof(struct ipmi_system_interface_addr)) 897 return -EINVAL; 898 899 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 900 if (addr->channel != IPMI_BMC_CHANNEL) 901 return -EINVAL; 902 return 0; 903 } 904 905 if ((addr->channel == IPMI_BMC_CHANNEL) 906 || (addr->channel >= IPMI_MAX_CHANNELS) 907 || (addr->channel < 0)) 908 return -EINVAL; 909 910 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 911 if (len < sizeof(struct ipmi_ipmb_addr)) 912 return -EINVAL; 913 return 0; 914 } 915 916 if (is_ipmb_direct_addr(addr)) { 917 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 918 919 if (addr->channel != 0) 920 return -EINVAL; 921 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 922 return -EINVAL; 923 924 if (daddr->slave_addr & 0x01) 925 return -EINVAL; 926 if (daddr->rq_lun >= 4) 927 return -EINVAL; 928 if (daddr->rs_lun >= 4) 929 return -EINVAL; 930 return 0; 931 } 932 933 if (is_lan_addr(addr)) { 934 if (len < sizeof(struct ipmi_lan_addr)) 935 return -EINVAL; 936 return 0; 937 } 938 939 return -EINVAL; 940 } 941 EXPORT_SYMBOL(ipmi_validate_addr); 942 943 unsigned int ipmi_addr_length(int addr_type) 944 { 945 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 946 return sizeof(struct ipmi_system_interface_addr); 947 948 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 949 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 950 return sizeof(struct ipmi_ipmb_addr); 951 952 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 953 return sizeof(struct ipmi_ipmb_direct_addr); 954 955 if (addr_type == IPMI_LAN_ADDR_TYPE) 956 return sizeof(struct ipmi_lan_addr); 957 958 return 0; 959 } 960 EXPORT_SYMBOL(ipmi_addr_length); 961 962 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 963 { 964 int rv = 0; 965 966 if (!msg->user) { 967 /* Special handling for NULL users. */ 968 if (intf->null_user_handler) { 969 intf->null_user_handler(intf, msg); 970 } else { 971 /* No handler, so give up. */ 972 rv = -EINVAL; 973 } 974 ipmi_free_recv_msg(msg); 975 } else if (oops_in_progress) { 976 /* 977 * If we are running in the panic context, calling the 978 * receive handler doesn't much meaning and has a deadlock 979 * risk. At this moment, simply skip it in that case. 980 */ 981 ipmi_free_recv_msg(msg); 982 } else { 983 /* 984 * Deliver it in smi_work. The message will hold a 985 * refcount to the user. 986 */ 987 mutex_lock(&intf->user_msgs_mutex); 988 list_add_tail(&msg->link, &intf->user_msgs); 989 mutex_unlock(&intf->user_msgs_mutex); 990 queue_work(system_wq, &intf->smi_work); 991 } 992 993 return rv; 994 } 995 996 static void deliver_local_response(struct ipmi_smi *intf, 997 struct ipmi_recv_msg *msg) 998 { 999 if (deliver_response(intf, msg)) 1000 ipmi_inc_stat(intf, unhandled_local_responses); 1001 else 1002 ipmi_inc_stat(intf, handled_local_responses); 1003 } 1004 1005 static void deliver_err_response(struct ipmi_smi *intf, 1006 struct ipmi_recv_msg *msg, int err) 1007 { 1008 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 1009 msg->msg_data[0] = err; 1010 msg->msg.netfn |= 1; /* Convert to a response. */ 1011 msg->msg.data_len = 1; 1012 msg->msg.data = msg->msg_data; 1013 deliver_local_response(intf, msg); 1014 } 1015 1016 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 1017 { 1018 unsigned long iflags; 1019 1020 if (!intf->handlers->set_need_watch) 1021 return; 1022 1023 spin_lock_irqsave(&intf->watch_lock, iflags); 1024 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1025 intf->response_waiters++; 1026 1027 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1028 intf->watchdog_waiters++; 1029 1030 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1031 intf->command_waiters++; 1032 1033 if ((intf->last_watch_mask & flags) != flags) { 1034 intf->last_watch_mask |= flags; 1035 intf->handlers->set_need_watch(intf->send_info, 1036 intf->last_watch_mask); 1037 } 1038 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1039 } 1040 1041 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 1042 { 1043 unsigned long iflags; 1044 1045 if (!intf->handlers->set_need_watch) 1046 return; 1047 1048 spin_lock_irqsave(&intf->watch_lock, iflags); 1049 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1050 intf->response_waiters--; 1051 1052 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1053 intf->watchdog_waiters--; 1054 1055 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1056 intf->command_waiters--; 1057 1058 flags = 0; 1059 if (intf->response_waiters) 1060 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1061 if (intf->watchdog_waiters) 1062 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1063 if (intf->command_waiters) 1064 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1065 1066 if (intf->last_watch_mask != flags) { 1067 intf->last_watch_mask = flags; 1068 intf->handlers->set_need_watch(intf->send_info, 1069 intf->last_watch_mask); 1070 } 1071 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1072 } 1073 1074 /* 1075 * Find the next sequence number not being used and add the given 1076 * message with the given timeout to the sequence table. This must be 1077 * called with the interface's seq_lock held. 1078 */ 1079 static int intf_next_seq(struct ipmi_smi *intf, 1080 struct ipmi_recv_msg *recv_msg, 1081 unsigned long timeout, 1082 int retries, 1083 int broadcast, 1084 unsigned char *seq, 1085 long *seqid) 1086 { 1087 int rv = 0; 1088 unsigned int i; 1089 1090 if (timeout == 0) 1091 timeout = default_retry_ms; 1092 if (retries < 0) 1093 retries = default_max_retries; 1094 1095 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1096 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1097 if (!intf->seq_table[i].inuse) 1098 break; 1099 } 1100 1101 if (!intf->seq_table[i].inuse) { 1102 intf->seq_table[i].recv_msg = recv_msg; 1103 1104 /* 1105 * Start with the maximum timeout, when the send response 1106 * comes in we will start the real timer. 1107 */ 1108 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1109 intf->seq_table[i].orig_timeout = timeout; 1110 intf->seq_table[i].retries_left = retries; 1111 intf->seq_table[i].broadcast = broadcast; 1112 intf->seq_table[i].inuse = 1; 1113 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1114 *seq = i; 1115 *seqid = intf->seq_table[i].seqid; 1116 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1117 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1118 need_waiter(intf); 1119 } else { 1120 rv = -EAGAIN; 1121 } 1122 1123 return rv; 1124 } 1125 1126 /* 1127 * Return the receive message for the given sequence number and 1128 * release the sequence number so it can be reused. Some other data 1129 * is passed in to be sure the message matches up correctly (to help 1130 * guard against message coming in after their timeout and the 1131 * sequence number being reused). 1132 */ 1133 static int intf_find_seq(struct ipmi_smi *intf, 1134 unsigned char seq, 1135 short channel, 1136 unsigned char cmd, 1137 unsigned char netfn, 1138 struct ipmi_addr *addr, 1139 struct ipmi_recv_msg **recv_msg) 1140 { 1141 int rv = -ENODEV; 1142 1143 if (seq >= IPMI_IPMB_NUM_SEQ) 1144 return -EINVAL; 1145 1146 mutex_lock(&intf->seq_lock); 1147 if (intf->seq_table[seq].inuse) { 1148 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1149 1150 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1151 && (msg->msg.netfn == netfn) 1152 && (ipmi_addr_equal(addr, &msg->addr))) { 1153 *recv_msg = msg; 1154 intf->seq_table[seq].inuse = 0; 1155 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1156 rv = 0; 1157 } 1158 } 1159 mutex_unlock(&intf->seq_lock); 1160 1161 return rv; 1162 } 1163 1164 1165 /* Start the timer for a specific sequence table entry. */ 1166 static int intf_start_seq_timer(struct ipmi_smi *intf, 1167 long msgid) 1168 { 1169 int rv = -ENODEV; 1170 unsigned char seq; 1171 unsigned long seqid; 1172 1173 1174 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1175 1176 mutex_lock(&intf->seq_lock); 1177 /* 1178 * We do this verification because the user can be deleted 1179 * while a message is outstanding. 1180 */ 1181 if ((intf->seq_table[seq].inuse) 1182 && (intf->seq_table[seq].seqid == seqid)) { 1183 struct seq_table *ent = &intf->seq_table[seq]; 1184 ent->timeout = ent->orig_timeout; 1185 rv = 0; 1186 } 1187 mutex_unlock(&intf->seq_lock); 1188 1189 return rv; 1190 } 1191 1192 /* Got an error for the send message for a specific sequence number. */ 1193 static int intf_err_seq(struct ipmi_smi *intf, 1194 long msgid, 1195 unsigned int err) 1196 { 1197 int rv = -ENODEV; 1198 unsigned char seq; 1199 unsigned long seqid; 1200 struct ipmi_recv_msg *msg = NULL; 1201 1202 1203 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1204 1205 mutex_lock(&intf->seq_lock); 1206 /* 1207 * We do this verification because the user can be deleted 1208 * while a message is outstanding. 1209 */ 1210 if ((intf->seq_table[seq].inuse) 1211 && (intf->seq_table[seq].seqid == seqid)) { 1212 struct seq_table *ent = &intf->seq_table[seq]; 1213 1214 ent->inuse = 0; 1215 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1216 msg = ent->recv_msg; 1217 rv = 0; 1218 } 1219 mutex_unlock(&intf->seq_lock); 1220 1221 if (msg) 1222 deliver_err_response(intf, msg, err); 1223 1224 return rv; 1225 } 1226 1227 int ipmi_create_user(unsigned int if_num, 1228 const struct ipmi_user_hndl *handler, 1229 void *handler_data, 1230 struct ipmi_user **user) 1231 { 1232 struct ipmi_user *new_user = NULL; 1233 int rv = 0; 1234 struct ipmi_smi *intf; 1235 1236 /* 1237 * There is no module usecount here, because it's not 1238 * required. Since this can only be used by and called from 1239 * other modules, they will implicitly use this module, and 1240 * thus this can't be removed unless the other modules are 1241 * removed. 1242 */ 1243 1244 if (handler == NULL) 1245 return -EINVAL; 1246 1247 /* 1248 * Make sure the driver is actually initialized, this handles 1249 * problems with initialization order. 1250 */ 1251 rv = ipmi_init_msghandler(); 1252 if (rv) 1253 return rv; 1254 1255 mutex_lock(&ipmi_interfaces_mutex); 1256 list_for_each_entry(intf, &ipmi_interfaces, link) { 1257 if (intf->intf_num == if_num) 1258 goto found; 1259 } 1260 /* Not found, return an error */ 1261 rv = -EINVAL; 1262 goto out_unlock; 1263 1264 found: 1265 if (intf->in_shutdown) { 1266 rv = -ENODEV; 1267 goto out_unlock; 1268 } 1269 1270 if (atomic_add_return(1, &intf->nr_users) > max_users) { 1271 rv = -EBUSY; 1272 goto out_kfree; 1273 } 1274 1275 new_user = vzalloc(sizeof(*new_user)); 1276 if (!new_user) { 1277 rv = -ENOMEM; 1278 goto out_kfree; 1279 } 1280 1281 if (!try_module_get(intf->owner)) { 1282 rv = -ENODEV; 1283 goto out_kfree; 1284 } 1285 1286 /* Note that each existing user holds a refcount to the interface. */ 1287 kref_get(&intf->refcount); 1288 1289 atomic_set(&new_user->nr_msgs, 0); 1290 kref_init(&new_user->refcount); 1291 refcount_set(&new_user->destroyed, 1); 1292 kref_get(&new_user->refcount); /* Destroy owns a refcount. */ 1293 new_user->handler = handler; 1294 new_user->handler_data = handler_data; 1295 new_user->intf = intf; 1296 new_user->gets_events = false; 1297 1298 mutex_lock(&intf->users_mutex); 1299 mutex_lock(&intf->seq_lock); 1300 list_add(&new_user->link, &intf->users); 1301 mutex_unlock(&intf->seq_lock); 1302 mutex_unlock(&intf->users_mutex); 1303 1304 if (handler->ipmi_watchdog_pretimeout) 1305 /* User wants pretimeouts, so make sure to watch for them. */ 1306 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1307 1308 out_kfree: 1309 if (rv) { 1310 atomic_dec(&intf->nr_users); 1311 vfree(new_user); 1312 } else { 1313 *user = new_user; 1314 } 1315 out_unlock: 1316 mutex_unlock(&ipmi_interfaces_mutex); 1317 return rv; 1318 } 1319 EXPORT_SYMBOL(ipmi_create_user); 1320 1321 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1322 { 1323 int rv = -EINVAL; 1324 struct ipmi_smi *intf; 1325 1326 mutex_lock(&ipmi_interfaces_mutex); 1327 list_for_each_entry(intf, &ipmi_interfaces, link) { 1328 if (intf->intf_num == if_num) { 1329 if (!intf->handlers->get_smi_info) 1330 rv = -ENOTTY; 1331 else 1332 rv = intf->handlers->get_smi_info(intf->send_info, data); 1333 break; 1334 } 1335 } 1336 mutex_unlock(&ipmi_interfaces_mutex); 1337 1338 return rv; 1339 } 1340 EXPORT_SYMBOL(ipmi_get_smi_info); 1341 1342 /* Must be called with intf->users_mutex held. */ 1343 static void _ipmi_destroy_user(struct ipmi_user *user) 1344 { 1345 struct ipmi_smi *intf = user->intf; 1346 int i; 1347 struct cmd_rcvr *rcvr; 1348 struct cmd_rcvr *rcvrs = NULL; 1349 struct ipmi_recv_msg *msg, *msg2; 1350 1351 if (!refcount_dec_if_one(&user->destroyed)) 1352 return; 1353 1354 if (user->handler->shutdown) 1355 user->handler->shutdown(user->handler_data); 1356 1357 if (user->handler->ipmi_watchdog_pretimeout) 1358 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1359 1360 if (user->gets_events) 1361 atomic_dec(&intf->event_waiters); 1362 1363 /* Remove the user from the interface's list and sequence table. */ 1364 list_del(&user->link); 1365 atomic_dec(&intf->nr_users); 1366 1367 mutex_lock(&intf->seq_lock); 1368 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1369 if (intf->seq_table[i].inuse 1370 && (intf->seq_table[i].recv_msg->user == user)) { 1371 intf->seq_table[i].inuse = 0; 1372 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1373 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1374 } 1375 } 1376 mutex_unlock(&intf->seq_lock); 1377 1378 /* 1379 * Remove the user from the command receiver's table. First 1380 * we build a list of everything (not using the standard link, 1381 * since other things may be using it till we do 1382 * synchronize_rcu()) then free everything in that list. 1383 */ 1384 mutex_lock(&intf->cmd_rcvrs_mutex); 1385 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1386 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1387 if (rcvr->user == user) { 1388 list_del_rcu(&rcvr->link); 1389 rcvr->next = rcvrs; 1390 rcvrs = rcvr; 1391 } 1392 } 1393 mutex_unlock(&intf->cmd_rcvrs_mutex); 1394 while (rcvrs) { 1395 rcvr = rcvrs; 1396 rcvrs = rcvr->next; 1397 kfree(rcvr); 1398 } 1399 1400 mutex_lock(&intf->user_msgs_mutex); 1401 list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { 1402 if (msg->user != user) 1403 continue; 1404 list_del(&msg->link); 1405 ipmi_free_recv_msg(msg); 1406 } 1407 mutex_unlock(&intf->user_msgs_mutex); 1408 1409 release_ipmi_user(user); 1410 } 1411 1412 void ipmi_destroy_user(struct ipmi_user *user) 1413 { 1414 struct ipmi_smi *intf = user->intf; 1415 1416 mutex_lock(&intf->users_mutex); 1417 _ipmi_destroy_user(user); 1418 mutex_unlock(&intf->users_mutex); 1419 1420 kref_put(&user->refcount, free_ipmi_user); 1421 } 1422 EXPORT_SYMBOL(ipmi_destroy_user); 1423 1424 int ipmi_get_version(struct ipmi_user *user, 1425 unsigned char *major, 1426 unsigned char *minor) 1427 { 1428 struct ipmi_device_id id; 1429 int rv; 1430 1431 user = acquire_ipmi_user(user); 1432 if (!user) 1433 return -ENODEV; 1434 1435 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1436 if (!rv) { 1437 *major = ipmi_version_major(&id); 1438 *minor = ipmi_version_minor(&id); 1439 } 1440 release_ipmi_user(user); 1441 1442 return rv; 1443 } 1444 EXPORT_SYMBOL(ipmi_get_version); 1445 1446 int ipmi_set_my_address(struct ipmi_user *user, 1447 unsigned int channel, 1448 unsigned char address) 1449 { 1450 int rv = 0; 1451 1452 user = acquire_ipmi_user(user); 1453 if (!user) 1454 return -ENODEV; 1455 1456 if (channel >= IPMI_MAX_CHANNELS) { 1457 rv = -EINVAL; 1458 } else { 1459 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1460 user->intf->addrinfo[channel].address = address; 1461 } 1462 release_ipmi_user(user); 1463 1464 return rv; 1465 } 1466 EXPORT_SYMBOL(ipmi_set_my_address); 1467 1468 int ipmi_get_my_address(struct ipmi_user *user, 1469 unsigned int channel, 1470 unsigned char *address) 1471 { 1472 int rv = 0; 1473 1474 user = acquire_ipmi_user(user); 1475 if (!user) 1476 return -ENODEV; 1477 1478 if (channel >= IPMI_MAX_CHANNELS) { 1479 rv = -EINVAL; 1480 } else { 1481 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1482 *address = user->intf->addrinfo[channel].address; 1483 } 1484 release_ipmi_user(user); 1485 1486 return rv; 1487 } 1488 EXPORT_SYMBOL(ipmi_get_my_address); 1489 1490 int ipmi_set_my_LUN(struct ipmi_user *user, 1491 unsigned int channel, 1492 unsigned char LUN) 1493 { 1494 int rv = 0; 1495 1496 user = acquire_ipmi_user(user); 1497 if (!user) 1498 return -ENODEV; 1499 1500 if (channel >= IPMI_MAX_CHANNELS) { 1501 rv = -EINVAL; 1502 } else { 1503 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1504 user->intf->addrinfo[channel].lun = LUN & 0x3; 1505 } 1506 release_ipmi_user(user); 1507 1508 return rv; 1509 } 1510 EXPORT_SYMBOL(ipmi_set_my_LUN); 1511 1512 int ipmi_get_my_LUN(struct ipmi_user *user, 1513 unsigned int channel, 1514 unsigned char *address) 1515 { 1516 int rv = 0; 1517 1518 user = acquire_ipmi_user(user); 1519 if (!user) 1520 return -ENODEV; 1521 1522 if (channel >= IPMI_MAX_CHANNELS) { 1523 rv = -EINVAL; 1524 } else { 1525 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1526 *address = user->intf->addrinfo[channel].lun; 1527 } 1528 release_ipmi_user(user); 1529 1530 return rv; 1531 } 1532 EXPORT_SYMBOL(ipmi_get_my_LUN); 1533 1534 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1535 { 1536 int mode; 1537 unsigned long flags; 1538 1539 user = acquire_ipmi_user(user); 1540 if (!user) 1541 return -ENODEV; 1542 1543 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1544 mode = user->intf->maintenance_mode; 1545 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1546 release_ipmi_user(user); 1547 1548 return mode; 1549 } 1550 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1551 1552 static void maintenance_mode_update(struct ipmi_smi *intf) 1553 { 1554 if (intf->handlers->set_maintenance_mode) 1555 /* 1556 * Lower level drivers only care about firmware mode 1557 * as it affects their timing. They don't care about 1558 * reset, which disables all commands for a while. 1559 */ 1560 intf->handlers->set_maintenance_mode( 1561 intf->send_info, 1562 (intf->maintenance_mode_state == 1563 IPMI_MAINTENANCE_MODE_STATE_FIRMWARE)); 1564 } 1565 1566 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1567 { 1568 int rv = 0; 1569 unsigned long flags; 1570 struct ipmi_smi *intf = user->intf; 1571 1572 user = acquire_ipmi_user(user); 1573 if (!user) 1574 return -ENODEV; 1575 1576 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1577 if (intf->maintenance_mode != mode) { 1578 switch (mode) { 1579 case IPMI_MAINTENANCE_MODE_AUTO: 1580 /* Just leave it alone. */ 1581 break; 1582 1583 case IPMI_MAINTENANCE_MODE_OFF: 1584 intf->maintenance_mode_state = 1585 IPMI_MAINTENANCE_MODE_STATE_OFF; 1586 break; 1587 1588 case IPMI_MAINTENANCE_MODE_ON: 1589 intf->maintenance_mode_state = 1590 IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; 1591 break; 1592 1593 default: 1594 rv = -EINVAL; 1595 goto out_unlock; 1596 } 1597 intf->maintenance_mode = mode; 1598 1599 maintenance_mode_update(intf); 1600 } 1601 out_unlock: 1602 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1603 release_ipmi_user(user); 1604 1605 return rv; 1606 } 1607 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1608 1609 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1610 { 1611 struct ipmi_smi *intf = user->intf; 1612 struct ipmi_recv_msg *msg, *msg2; 1613 struct list_head msgs; 1614 1615 user = acquire_ipmi_user(user); 1616 if (!user) 1617 return -ENODEV; 1618 1619 INIT_LIST_HEAD(&msgs); 1620 1621 mutex_lock(&intf->events_mutex); 1622 if (user->gets_events == val) 1623 goto out; 1624 1625 user->gets_events = val; 1626 1627 if (val) { 1628 if (atomic_inc_return(&intf->event_waiters) == 1) 1629 need_waiter(intf); 1630 } else { 1631 atomic_dec(&intf->event_waiters); 1632 } 1633 1634 /* Deliver any queued events. */ 1635 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1636 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1637 list_move_tail(&msg->link, &msgs); 1638 intf->waiting_events_count = 0; 1639 if (intf->event_msg_printed) { 1640 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1641 intf->event_msg_printed = 0; 1642 } 1643 1644 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1645 ipmi_set_recv_msg_user(msg, user); 1646 deliver_local_response(intf, msg); 1647 } 1648 } 1649 1650 out: 1651 mutex_unlock(&intf->events_mutex); 1652 release_ipmi_user(user); 1653 1654 return 0; 1655 } 1656 EXPORT_SYMBOL(ipmi_set_gets_events); 1657 1658 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1659 unsigned char netfn, 1660 unsigned char cmd, 1661 unsigned char chan) 1662 { 1663 struct cmd_rcvr *rcvr; 1664 1665 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1666 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1667 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1668 && (rcvr->chans & (1 << chan))) 1669 return rcvr; 1670 } 1671 return NULL; 1672 } 1673 1674 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1675 unsigned char netfn, 1676 unsigned char cmd, 1677 unsigned int chans) 1678 { 1679 struct cmd_rcvr *rcvr; 1680 1681 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1682 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1683 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1684 && (rcvr->chans & chans)) 1685 return 0; 1686 } 1687 return 1; 1688 } 1689 1690 int ipmi_register_for_cmd(struct ipmi_user *user, 1691 unsigned char netfn, 1692 unsigned char cmd, 1693 unsigned int chans) 1694 { 1695 struct ipmi_smi *intf = user->intf; 1696 struct cmd_rcvr *rcvr; 1697 int rv = 0; 1698 1699 user = acquire_ipmi_user(user); 1700 if (!user) 1701 return -ENODEV; 1702 1703 rcvr = kmalloc_obj(*rcvr); 1704 if (!rcvr) { 1705 rv = -ENOMEM; 1706 goto out_release; 1707 } 1708 rcvr->cmd = cmd; 1709 rcvr->netfn = netfn; 1710 rcvr->chans = chans; 1711 rcvr->user = user; 1712 1713 mutex_lock(&intf->cmd_rcvrs_mutex); 1714 /* Make sure the command/netfn is not already registered. */ 1715 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1716 rv = -EBUSY; 1717 goto out_unlock; 1718 } 1719 1720 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1721 1722 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1723 1724 out_unlock: 1725 mutex_unlock(&intf->cmd_rcvrs_mutex); 1726 if (rv) 1727 kfree(rcvr); 1728 out_release: 1729 release_ipmi_user(user); 1730 1731 return rv; 1732 } 1733 EXPORT_SYMBOL(ipmi_register_for_cmd); 1734 1735 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1736 unsigned char netfn, 1737 unsigned char cmd, 1738 unsigned int chans) 1739 { 1740 struct ipmi_smi *intf = user->intf; 1741 struct cmd_rcvr *rcvr; 1742 struct cmd_rcvr *rcvrs = NULL; 1743 int i, rv = -ENOENT; 1744 1745 user = acquire_ipmi_user(user); 1746 if (!user) 1747 return -ENODEV; 1748 1749 mutex_lock(&intf->cmd_rcvrs_mutex); 1750 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1751 if (((1 << i) & chans) == 0) 1752 continue; 1753 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1754 if (rcvr == NULL) 1755 continue; 1756 if (rcvr->user == user) { 1757 rv = 0; 1758 rcvr->chans &= ~chans; 1759 if (rcvr->chans == 0) { 1760 list_del_rcu(&rcvr->link); 1761 rcvr->next = rcvrs; 1762 rcvrs = rcvr; 1763 } 1764 } 1765 } 1766 mutex_unlock(&intf->cmd_rcvrs_mutex); 1767 synchronize_rcu(); 1768 release_ipmi_user(user); 1769 while (rcvrs) { 1770 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1771 rcvr = rcvrs; 1772 rcvrs = rcvr->next; 1773 kfree(rcvr); 1774 } 1775 1776 return rv; 1777 } 1778 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1779 1780 unsigned char 1781 ipmb_checksum(unsigned char *data, int size) 1782 { 1783 unsigned char csum = 0; 1784 1785 for (; size > 0; size--, data++) 1786 csum += *data; 1787 1788 return -csum; 1789 } 1790 EXPORT_SYMBOL(ipmb_checksum); 1791 1792 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1793 struct kernel_ipmi_msg *msg, 1794 struct ipmi_ipmb_addr *ipmb_addr, 1795 long msgid, 1796 unsigned char ipmb_seq, 1797 int broadcast, 1798 unsigned char source_address, 1799 unsigned char source_lun) 1800 { 1801 int i = broadcast; 1802 1803 /* Format the IPMB header data. */ 1804 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1805 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1806 smi_msg->data[2] = ipmb_addr->channel; 1807 if (broadcast) 1808 smi_msg->data[3] = 0; 1809 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1810 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1811 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1812 smi_msg->data[i+6] = source_address; 1813 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1814 smi_msg->data[i+8] = msg->cmd; 1815 1816 /* Now tack on the data to the message. */ 1817 if (msg->data_len > 0) 1818 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1819 smi_msg->data_size = msg->data_len + 9; 1820 1821 /* Now calculate the checksum and tack it on. */ 1822 smi_msg->data[i+smi_msg->data_size] 1823 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1824 1825 /* 1826 * Add on the checksum size and the offset from the 1827 * broadcast. 1828 */ 1829 smi_msg->data_size += 1 + i; 1830 1831 smi_msg->msgid = msgid; 1832 } 1833 1834 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1835 struct kernel_ipmi_msg *msg, 1836 struct ipmi_lan_addr *lan_addr, 1837 long msgid, 1838 unsigned char ipmb_seq, 1839 unsigned char source_lun) 1840 { 1841 /* Format the IPMB header data. */ 1842 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1843 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1844 smi_msg->data[2] = lan_addr->channel; 1845 smi_msg->data[3] = lan_addr->session_handle; 1846 smi_msg->data[4] = lan_addr->remote_SWID; 1847 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1848 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1849 smi_msg->data[7] = lan_addr->local_SWID; 1850 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1851 smi_msg->data[9] = msg->cmd; 1852 1853 /* Now tack on the data to the message. */ 1854 if (msg->data_len > 0) 1855 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1856 smi_msg->data_size = msg->data_len + 10; 1857 1858 /* Now calculate the checksum and tack it on. */ 1859 smi_msg->data[smi_msg->data_size] 1860 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1861 1862 /* 1863 * Add on the checksum size and the offset from the 1864 * broadcast. 1865 */ 1866 smi_msg->data_size += 1; 1867 1868 smi_msg->msgid = msgid; 1869 } 1870 1871 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1872 struct ipmi_smi_msg *smi_msg, 1873 int priority) 1874 { 1875 if (intf->curr_msg) { 1876 if (priority > 0) 1877 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1878 else 1879 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1880 smi_msg = NULL; 1881 } else { 1882 intf->curr_msg = smi_msg; 1883 } 1884 1885 return smi_msg; 1886 } 1887 1888 static int smi_send(struct ipmi_smi *intf, 1889 const struct ipmi_smi_handlers *handlers, 1890 struct ipmi_smi_msg *smi_msg, int priority) 1891 { 1892 int run_to_completion = READ_ONCE(intf->run_to_completion); 1893 unsigned long flags = 0; 1894 int rv = 0; 1895 1896 ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 1897 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1898 ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 1899 1900 if (smi_msg) { 1901 rv = handlers->sender(intf->send_info, smi_msg); 1902 if (rv) { 1903 ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 1904 intf->curr_msg = NULL; 1905 ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 1906 /* 1907 * Something may have been added to the transmit 1908 * queue, so schedule a check for that. 1909 */ 1910 queue_work(system_wq, &intf->smi_work); 1911 } 1912 } 1913 return rv; 1914 } 1915 1916 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1917 { 1918 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1919 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1920 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1921 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1922 } 1923 1924 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1925 struct ipmi_addr *addr, 1926 long msgid, 1927 struct kernel_ipmi_msg *msg, 1928 struct ipmi_smi_msg *smi_msg, 1929 struct ipmi_recv_msg *recv_msg, 1930 int retries, 1931 unsigned int retry_time_ms) 1932 { 1933 struct ipmi_system_interface_addr *smi_addr; 1934 1935 if (msg->netfn & 1) 1936 /* Responses are not allowed to the SMI. */ 1937 return -EINVAL; 1938 1939 smi_addr = (struct ipmi_system_interface_addr *) addr; 1940 if (smi_addr->lun > 3) { 1941 ipmi_inc_stat(intf, sent_invalid_commands); 1942 return -EINVAL; 1943 } 1944 1945 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1946 1947 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1948 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1949 || (msg->cmd == IPMI_GET_MSG_CMD) 1950 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1951 /* 1952 * We don't let the user do these, since we manage 1953 * the sequence numbers. 1954 */ 1955 ipmi_inc_stat(intf, sent_invalid_commands); 1956 return -EINVAL; 1957 } 1958 1959 if (is_maintenance_mode_cmd(msg)) { 1960 unsigned long flags; 1961 int newst; 1962 1963 if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST) 1964 newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; 1965 else 1966 newst = IPMI_MAINTENANCE_MODE_STATE_RESET; 1967 1968 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1969 intf->auto_maintenance_timeout = maintenance_mode_timeout_ms; 1970 if (!intf->maintenance_mode 1971 && intf->maintenance_mode_state < newst) { 1972 intf->maintenance_mode_state = newst; 1973 maintenance_mode_update(intf); 1974 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 1975 } 1976 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1977 flags); 1978 } 1979 1980 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1981 ipmi_inc_stat(intf, sent_invalid_commands); 1982 return -EMSGSIZE; 1983 } 1984 1985 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1986 smi_msg->data[1] = msg->cmd; 1987 smi_msg->msgid = msgid; 1988 smi_msg->recv_msg = recv_msg; 1989 if (msg->data_len > 0) 1990 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1991 smi_msg->data_size = msg->data_len + 2; 1992 ipmi_inc_stat(intf, sent_local_commands); 1993 1994 return 0; 1995 } 1996 1997 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1998 struct ipmi_addr *addr, 1999 long msgid, 2000 struct kernel_ipmi_msg *msg, 2001 struct ipmi_smi_msg *smi_msg, 2002 struct ipmi_recv_msg *recv_msg, 2003 unsigned char source_address, 2004 unsigned char source_lun, 2005 int retries, 2006 unsigned int retry_time_ms) 2007 { 2008 struct ipmi_ipmb_addr *ipmb_addr; 2009 unsigned char ipmb_seq; 2010 long seqid; 2011 int broadcast = 0; 2012 struct ipmi_channel *chans; 2013 int rv = 0; 2014 2015 if (addr->channel >= IPMI_MAX_CHANNELS) { 2016 ipmi_inc_stat(intf, sent_invalid_commands); 2017 return -EINVAL; 2018 } 2019 2020 chans = READ_ONCE(intf->channel_list)->c; 2021 2022 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 2023 ipmi_inc_stat(intf, sent_invalid_commands); 2024 return -EINVAL; 2025 } 2026 2027 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 2028 /* 2029 * Broadcasts add a zero at the beginning of the 2030 * message, but otherwise is the same as an IPMB 2031 * address. 2032 */ 2033 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 2034 broadcast = 1; 2035 retries = 0; /* Don't retry broadcasts. */ 2036 } 2037 2038 /* 2039 * 9 for the header and 1 for the checksum, plus 2040 * possibly one for the broadcast. 2041 */ 2042 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 2043 ipmi_inc_stat(intf, sent_invalid_commands); 2044 return -EMSGSIZE; 2045 } 2046 2047 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2048 if (ipmb_addr->lun > 3) { 2049 ipmi_inc_stat(intf, sent_invalid_commands); 2050 return -EINVAL; 2051 } 2052 2053 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2054 2055 if (recv_msg->msg.netfn & 0x1) { 2056 /* 2057 * It's a response, so use the user's sequence 2058 * from msgid. 2059 */ 2060 ipmi_inc_stat(intf, sent_ipmb_responses); 2061 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2062 msgid, broadcast, 2063 source_address, source_lun); 2064 2065 /* 2066 * Save the receive message so we can use it 2067 * to deliver the response. 2068 */ 2069 smi_msg->recv_msg = recv_msg; 2070 } else { 2071 mutex_lock(&intf->seq_lock); 2072 2073 if (is_maintenance_mode_cmd(msg)) 2074 intf->ipmb_maintenance_mode_timeout = 2075 maintenance_mode_timeout_ms; 2076 2077 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2078 /* Different default in maintenance mode */ 2079 retry_time_ms = default_maintenance_retry_ms; 2080 2081 /* 2082 * Create a sequence number with a 1 second 2083 * timeout and 4 retries. 2084 */ 2085 rv = intf_next_seq(intf, 2086 recv_msg, 2087 retry_time_ms, 2088 retries, 2089 broadcast, 2090 &ipmb_seq, 2091 &seqid); 2092 if (rv) 2093 /* 2094 * We have used up all the sequence numbers, 2095 * probably, so abort. 2096 */ 2097 goto out_err; 2098 2099 ipmi_inc_stat(intf, sent_ipmb_commands); 2100 2101 /* 2102 * Store the sequence number in the message, 2103 * so that when the send message response 2104 * comes back we can start the timer. 2105 */ 2106 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2107 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2108 ipmb_seq, broadcast, 2109 source_address, source_lun); 2110 2111 /* 2112 * Copy the message into the recv message data, so we 2113 * can retransmit it later if necessary. 2114 */ 2115 memcpy(recv_msg->msg_data, smi_msg->data, 2116 smi_msg->data_size); 2117 recv_msg->msg.data = recv_msg->msg_data; 2118 recv_msg->msg.data_len = smi_msg->data_size; 2119 2120 /* 2121 * We don't unlock until here, because we need 2122 * to copy the completed message into the 2123 * recv_msg before we release the lock. 2124 * Otherwise, race conditions may bite us. I 2125 * know that's pretty paranoid, but I prefer 2126 * to be correct. 2127 */ 2128 out_err: 2129 mutex_unlock(&intf->seq_lock); 2130 } 2131 2132 return rv; 2133 } 2134 2135 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2136 struct ipmi_addr *addr, 2137 long msgid, 2138 struct kernel_ipmi_msg *msg, 2139 struct ipmi_smi_msg *smi_msg, 2140 struct ipmi_recv_msg *recv_msg, 2141 unsigned char source_lun) 2142 { 2143 struct ipmi_ipmb_direct_addr *daddr; 2144 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2145 2146 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2147 return -EAFNOSUPPORT; 2148 2149 /* Responses must have a completion code. */ 2150 if (!is_cmd && msg->data_len < 1) { 2151 ipmi_inc_stat(intf, sent_invalid_commands); 2152 return -EINVAL; 2153 } 2154 2155 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2156 ipmi_inc_stat(intf, sent_invalid_commands); 2157 return -EMSGSIZE; 2158 } 2159 2160 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2161 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2162 ipmi_inc_stat(intf, sent_invalid_commands); 2163 return -EINVAL; 2164 } 2165 2166 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2167 smi_msg->msgid = msgid; 2168 2169 if (is_cmd) { 2170 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2171 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2172 } else { 2173 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2174 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2175 } 2176 smi_msg->data[1] = daddr->slave_addr; 2177 smi_msg->data[3] = msg->cmd; 2178 2179 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2180 smi_msg->data_size = msg->data_len + 4; 2181 2182 smi_msg->recv_msg = recv_msg; 2183 2184 return 0; 2185 } 2186 2187 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2188 struct ipmi_addr *addr, 2189 long msgid, 2190 struct kernel_ipmi_msg *msg, 2191 struct ipmi_smi_msg *smi_msg, 2192 struct ipmi_recv_msg *recv_msg, 2193 unsigned char source_lun, 2194 int retries, 2195 unsigned int retry_time_ms) 2196 { 2197 struct ipmi_lan_addr *lan_addr; 2198 unsigned char ipmb_seq; 2199 long seqid; 2200 struct ipmi_channel *chans; 2201 int rv = 0; 2202 2203 if (addr->channel >= IPMI_MAX_CHANNELS) { 2204 ipmi_inc_stat(intf, sent_invalid_commands); 2205 return -EINVAL; 2206 } 2207 2208 chans = READ_ONCE(intf->channel_list)->c; 2209 2210 if ((chans[addr->channel].medium 2211 != IPMI_CHANNEL_MEDIUM_8023LAN) 2212 && (chans[addr->channel].medium 2213 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2214 ipmi_inc_stat(intf, sent_invalid_commands); 2215 return -EINVAL; 2216 } 2217 2218 /* 11 for the header and 1 for the checksum. */ 2219 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2220 ipmi_inc_stat(intf, sent_invalid_commands); 2221 return -EMSGSIZE; 2222 } 2223 2224 lan_addr = (struct ipmi_lan_addr *) addr; 2225 if (lan_addr->lun > 3) { 2226 ipmi_inc_stat(intf, sent_invalid_commands); 2227 return -EINVAL; 2228 } 2229 2230 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2231 2232 if (recv_msg->msg.netfn & 0x1) { 2233 /* 2234 * It's a response, so use the user's sequence 2235 * from msgid. 2236 */ 2237 ipmi_inc_stat(intf, sent_lan_responses); 2238 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2239 msgid, source_lun); 2240 2241 /* 2242 * Save the receive message so we can use it 2243 * to deliver the response. 2244 */ 2245 smi_msg->recv_msg = recv_msg; 2246 } else { 2247 mutex_lock(&intf->seq_lock); 2248 2249 /* 2250 * Create a sequence number with a 1 second 2251 * timeout and 4 retries. 2252 */ 2253 rv = intf_next_seq(intf, 2254 recv_msg, 2255 retry_time_ms, 2256 retries, 2257 0, 2258 &ipmb_seq, 2259 &seqid); 2260 if (rv) 2261 /* 2262 * We have used up all the sequence numbers, 2263 * probably, so abort. 2264 */ 2265 goto out_err; 2266 2267 ipmi_inc_stat(intf, sent_lan_commands); 2268 2269 /* 2270 * Store the sequence number in the message, 2271 * so that when the send message response 2272 * comes back we can start the timer. 2273 */ 2274 format_lan_msg(smi_msg, msg, lan_addr, 2275 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2276 ipmb_seq, source_lun); 2277 2278 /* 2279 * Copy the message into the recv message data, so we 2280 * can retransmit it later if necessary. 2281 */ 2282 memcpy(recv_msg->msg_data, smi_msg->data, 2283 smi_msg->data_size); 2284 recv_msg->msg.data = recv_msg->msg_data; 2285 recv_msg->msg.data_len = smi_msg->data_size; 2286 2287 /* 2288 * We don't unlock until here, because we need 2289 * to copy the completed message into the 2290 * recv_msg before we release the lock. 2291 * Otherwise, race conditions may bite us. I 2292 * know that's pretty paranoid, but I prefer 2293 * to be correct. 2294 */ 2295 out_err: 2296 mutex_unlock(&intf->seq_lock); 2297 } 2298 2299 return rv; 2300 } 2301 2302 /* 2303 * Separate from ipmi_request so that the user does not have to be 2304 * supplied in certain circumstances (mainly at panic time). If 2305 * messages are supplied, they will be freed, even if an error 2306 * occurs. 2307 */ 2308 static int i_ipmi_request(struct ipmi_user *user, 2309 struct ipmi_smi *intf, 2310 struct ipmi_addr *addr, 2311 long msgid, 2312 struct kernel_ipmi_msg *msg, 2313 void *user_msg_data, 2314 void *supplied_smi, 2315 struct ipmi_recv_msg *supplied_recv, 2316 int priority, 2317 unsigned char source_address, 2318 unsigned char source_lun, 2319 int retries, 2320 unsigned int retry_time_ms) 2321 { 2322 struct ipmi_smi_msg *smi_msg; 2323 struct ipmi_recv_msg *recv_msg; 2324 int run_to_completion = READ_ONCE(intf->run_to_completion); 2325 int rv = 0; 2326 bool in_seq_table = false; 2327 2328 if (supplied_recv) { 2329 recv_msg = supplied_recv; 2330 recv_msg->user = user; 2331 if (user) { 2332 atomic_inc(&user->nr_msgs); 2333 /* The put happens when the message is freed. */ 2334 kref_get(&user->refcount); 2335 } 2336 } else { 2337 recv_msg = ipmi_alloc_recv_msg(user); 2338 if (IS_ERR(recv_msg)) 2339 return PTR_ERR(recv_msg); 2340 } 2341 recv_msg->user_msg_data = user_msg_data; 2342 2343 if (supplied_smi) 2344 smi_msg = supplied_smi; 2345 else { 2346 smi_msg = ipmi_alloc_smi_msg(); 2347 if (smi_msg == NULL) { 2348 if (!supplied_recv) 2349 ipmi_free_recv_msg(recv_msg); 2350 return -ENOMEM; 2351 } 2352 } 2353 2354 if (!run_to_completion) 2355 mutex_lock(&intf->users_mutex); 2356 if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) { 2357 /* No messages while the BMC is in reset. */ 2358 rv = -EBUSY; 2359 goto out_err; 2360 } 2361 if (intf->in_shutdown) { 2362 rv = -ENODEV; 2363 goto out_err; 2364 } 2365 2366 recv_msg->msgid = msgid; 2367 /* 2368 * Store the message to send in the receive message so timeout 2369 * responses can get the proper response data. 2370 */ 2371 recv_msg->msg = *msg; 2372 2373 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2374 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2375 recv_msg, retries, retry_time_ms); 2376 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2377 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2378 source_address, source_lun, 2379 retries, retry_time_ms); 2380 in_seq_table = true; 2381 } else if (is_ipmb_direct_addr(addr)) { 2382 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2383 recv_msg, source_lun); 2384 } else if (is_lan_addr(addr)) { 2385 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2386 source_lun, retries, retry_time_ms); 2387 in_seq_table = true; 2388 } else { 2389 /* Unknown address type. */ 2390 ipmi_inc_stat(intf, sent_invalid_commands); 2391 rv = -EINVAL; 2392 } 2393 2394 if (!rv) { 2395 dev_dbg(intf->si_dev, "Send: %*ph\n", 2396 smi_msg->data_size, smi_msg->data); 2397 2398 rv = smi_send(intf, intf->handlers, smi_msg, priority); 2399 if (rv != IPMI_CC_NO_ERROR) 2400 /* smi_send() returns an IPMI err, return a Linux one. */ 2401 rv = -EIO; 2402 if (rv && in_seq_table) { 2403 /* 2404 * If it's in the sequence table, it will be 2405 * retried later, so ignore errors. 2406 */ 2407 rv = 0; 2408 /* But we need to fix the timeout. */ 2409 intf_start_seq_timer(intf, smi_msg->msgid); 2410 ipmi_free_smi_msg(smi_msg); 2411 smi_msg = NULL; 2412 } 2413 } 2414 out_err: 2415 if (!run_to_completion) 2416 mutex_unlock(&intf->users_mutex); 2417 2418 if (rv) { 2419 if (!supplied_smi) 2420 ipmi_free_smi_msg(smi_msg); 2421 if (!supplied_recv) 2422 ipmi_free_recv_msg(recv_msg); 2423 } 2424 return rv; 2425 } 2426 2427 static int check_addr(struct ipmi_smi *intf, 2428 struct ipmi_addr *addr, 2429 unsigned char *saddr, 2430 unsigned char *lun) 2431 { 2432 if (addr->channel >= IPMI_MAX_CHANNELS) 2433 return -EINVAL; 2434 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2435 *lun = intf->addrinfo[addr->channel].lun; 2436 *saddr = intf->addrinfo[addr->channel].address; 2437 return 0; 2438 } 2439 2440 int ipmi_request_settime(struct ipmi_user *user, 2441 struct ipmi_addr *addr, 2442 long msgid, 2443 struct kernel_ipmi_msg *msg, 2444 void *user_msg_data, 2445 int priority, 2446 int retries, 2447 unsigned int retry_time_ms) 2448 { 2449 unsigned char saddr = 0, lun = 0; 2450 int rv; 2451 2452 if (!user) 2453 return -EINVAL; 2454 2455 user = acquire_ipmi_user(user); 2456 if (!user) 2457 return -ENODEV; 2458 2459 rv = check_addr(user->intf, addr, &saddr, &lun); 2460 if (!rv) 2461 rv = i_ipmi_request(user, 2462 user->intf, 2463 addr, 2464 msgid, 2465 msg, 2466 user_msg_data, 2467 NULL, NULL, 2468 priority, 2469 saddr, 2470 lun, 2471 retries, 2472 retry_time_ms); 2473 2474 release_ipmi_user(user); 2475 return rv; 2476 } 2477 EXPORT_SYMBOL(ipmi_request_settime); 2478 2479 int ipmi_request_supply_msgs(struct ipmi_user *user, 2480 struct ipmi_addr *addr, 2481 long msgid, 2482 struct kernel_ipmi_msg *msg, 2483 void *user_msg_data, 2484 void *supplied_smi, 2485 struct ipmi_recv_msg *supplied_recv, 2486 int priority) 2487 { 2488 unsigned char saddr = 0, lun = 0; 2489 int rv; 2490 2491 if (!user) 2492 return -EINVAL; 2493 2494 user = acquire_ipmi_user(user); 2495 if (!user) 2496 return -ENODEV; 2497 2498 rv = check_addr(user->intf, addr, &saddr, &lun); 2499 if (!rv) 2500 rv = i_ipmi_request(user, 2501 user->intf, 2502 addr, 2503 msgid, 2504 msg, 2505 user_msg_data, 2506 supplied_smi, 2507 supplied_recv, 2508 priority, 2509 saddr, 2510 lun, 2511 -1, 0); 2512 2513 release_ipmi_user(user); 2514 return rv; 2515 } 2516 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2517 2518 static void bmc_device_id_handler(struct ipmi_smi *intf, 2519 struct ipmi_recv_msg *msg) 2520 { 2521 int rv; 2522 2523 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2524 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2525 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2526 dev_warn(intf->si_dev, 2527 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2528 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2529 return; 2530 } 2531 2532 if (msg->msg.data[0]) { 2533 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2534 msg->msg.data[0]); 2535 intf->bmc->dyn_id_set = 0; 2536 goto out; 2537 } 2538 2539 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2540 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2541 if (rv) { 2542 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2543 /* record completion code when error */ 2544 intf->bmc->cc = msg->msg.data[0]; 2545 intf->bmc->dyn_id_set = 0; 2546 } else { 2547 /* 2548 * Make sure the id data is available before setting 2549 * dyn_id_set. 2550 */ 2551 smp_wmb(); 2552 intf->bmc->dyn_id_set = 1; 2553 } 2554 out: 2555 wake_up(&intf->waitq); 2556 } 2557 2558 static int 2559 send_get_device_id_cmd(struct ipmi_smi *intf) 2560 { 2561 struct ipmi_system_interface_addr si; 2562 struct kernel_ipmi_msg msg; 2563 2564 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2565 si.channel = IPMI_BMC_CHANNEL; 2566 si.lun = 0; 2567 2568 msg.netfn = IPMI_NETFN_APP_REQUEST; 2569 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2570 msg.data = NULL; 2571 msg.data_len = 0; 2572 2573 return i_ipmi_request(NULL, 2574 intf, 2575 (struct ipmi_addr *) &si, 2576 0, 2577 &msg, 2578 intf, 2579 NULL, 2580 NULL, 2581 0, 2582 intf->addrinfo[0].address, 2583 intf->addrinfo[0].lun, 2584 -1, 0); 2585 } 2586 2587 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2588 { 2589 int rv; 2590 unsigned int retry_count = 0; 2591 2592 intf->null_user_handler = bmc_device_id_handler; 2593 2594 retry: 2595 bmc->cc = 0; 2596 bmc->dyn_id_set = 2; 2597 2598 rv = send_get_device_id_cmd(intf); 2599 if (rv) 2600 goto out_reset_handler; 2601 2602 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2603 2604 if (!bmc->dyn_id_set) { 2605 if (bmc->cc != IPMI_CC_NO_ERROR && 2606 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2607 msleep(500); 2608 dev_warn(intf->si_dev, 2609 "BMC returned 0x%2.2x, retry get bmc device id\n", 2610 bmc->cc); 2611 goto retry; 2612 } 2613 2614 rv = -EIO; /* Something went wrong in the fetch. */ 2615 } 2616 2617 /* dyn_id_set makes the id data available. */ 2618 smp_rmb(); 2619 2620 out_reset_handler: 2621 intf->null_user_handler = NULL; 2622 2623 return rv; 2624 } 2625 2626 /* 2627 * Fetch the device id for the bmc/interface. You must pass in either 2628 * bmc or intf, this code will get the other one. If the data has 2629 * been recently fetched, this will just use the cached data. Otherwise 2630 * it will run a new fetch. 2631 * 2632 * Except for the first time this is called (in ipmi_add_smi()), 2633 * this will always return good data; 2634 */ 2635 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2636 struct ipmi_device_id *id, 2637 bool *guid_set, guid_t *guid, int intf_num) 2638 { 2639 int rv = 0; 2640 int prev_dyn_id_set, prev_guid_set; 2641 bool intf_set = intf != NULL; 2642 2643 if (!intf) { 2644 mutex_lock(&bmc->dyn_mutex); 2645 retry_bmc_lock: 2646 if (list_empty(&bmc->intfs)) { 2647 mutex_unlock(&bmc->dyn_mutex); 2648 return -ENOENT; 2649 } 2650 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2651 bmc_link); 2652 kref_get(&intf->refcount); 2653 mutex_unlock(&bmc->dyn_mutex); 2654 mutex_lock(&intf->bmc_reg_mutex); 2655 mutex_lock(&bmc->dyn_mutex); 2656 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2657 bmc_link)) { 2658 mutex_unlock(&intf->bmc_reg_mutex); 2659 kref_put(&intf->refcount, intf_free); 2660 goto retry_bmc_lock; 2661 } 2662 } else { 2663 mutex_lock(&intf->bmc_reg_mutex); 2664 bmc = intf->bmc; 2665 mutex_lock(&bmc->dyn_mutex); 2666 kref_get(&intf->refcount); 2667 } 2668 2669 /* If we have a valid and current ID, just return that. */ 2670 if (intf->in_bmc_register || 2671 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2672 goto out_noprocessing; 2673 2674 /* Don't allow sysfs access when in maintenance mode. */ 2675 if (intf->maintenance_mode_state) { 2676 rv = -EBUSY; 2677 goto out_noprocessing; 2678 } 2679 2680 prev_guid_set = bmc->dyn_guid_set; 2681 __get_guid(intf); 2682 2683 prev_dyn_id_set = bmc->dyn_id_set; 2684 rv = __get_device_id(intf, bmc); 2685 if (rv) 2686 goto out; 2687 2688 /* 2689 * The guid, device id, manufacturer id, and product id should 2690 * not change on a BMC. If it does we have to do some dancing. 2691 */ 2692 if (!intf->bmc_registered 2693 || (!prev_guid_set && bmc->dyn_guid_set) 2694 || (!prev_dyn_id_set && bmc->dyn_id_set) 2695 || (prev_guid_set && bmc->dyn_guid_set 2696 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2697 || bmc->id.device_id != bmc->fetch_id.device_id 2698 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2699 || bmc->id.product_id != bmc->fetch_id.product_id) { 2700 struct ipmi_device_id id = bmc->fetch_id; 2701 int guid_set = bmc->dyn_guid_set; 2702 guid_t guid; 2703 2704 guid = bmc->fetch_guid; 2705 mutex_unlock(&bmc->dyn_mutex); 2706 2707 __ipmi_bmc_unregister(intf); 2708 /* Fill in the temporary BMC for good measure. */ 2709 intf->bmc->id = id; 2710 intf->bmc->dyn_guid_set = guid_set; 2711 intf->bmc->guid = guid; 2712 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2713 need_waiter(intf); /* Retry later on an error. */ 2714 else 2715 __scan_channels(intf, &id, false); 2716 2717 2718 if (!intf_set) { 2719 /* 2720 * We weren't given the interface on the 2721 * command line, so restart the operation on 2722 * the next interface for the BMC. 2723 */ 2724 mutex_unlock(&intf->bmc_reg_mutex); 2725 mutex_lock(&bmc->dyn_mutex); 2726 goto retry_bmc_lock; 2727 } 2728 2729 /* We have a new BMC, set it up. */ 2730 bmc = intf->bmc; 2731 mutex_lock(&bmc->dyn_mutex); 2732 goto out_noprocessing; 2733 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2734 /* Version info changes, scan the channels again. */ 2735 __scan_channels(intf, &bmc->fetch_id, true); 2736 2737 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2738 2739 out: 2740 if (rv && prev_dyn_id_set) { 2741 rv = 0; /* Ignore failures if we have previous data. */ 2742 bmc->dyn_id_set = prev_dyn_id_set; 2743 } 2744 if (!rv) { 2745 bmc->id = bmc->fetch_id; 2746 if (bmc->dyn_guid_set) 2747 bmc->guid = bmc->fetch_guid; 2748 else if (prev_guid_set) 2749 /* 2750 * The guid used to be valid and it failed to fetch, 2751 * just use the cached value. 2752 */ 2753 bmc->dyn_guid_set = prev_guid_set; 2754 } 2755 out_noprocessing: 2756 if (!rv) { 2757 if (id) 2758 *id = bmc->id; 2759 2760 if (guid_set) 2761 *guid_set = bmc->dyn_guid_set; 2762 2763 if (guid && bmc->dyn_guid_set) 2764 *guid = bmc->guid; 2765 } 2766 2767 mutex_unlock(&bmc->dyn_mutex); 2768 mutex_unlock(&intf->bmc_reg_mutex); 2769 2770 kref_put(&intf->refcount, intf_free); 2771 return rv; 2772 } 2773 2774 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2775 struct ipmi_device_id *id, 2776 bool *guid_set, guid_t *guid) 2777 { 2778 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2779 } 2780 2781 static ssize_t device_id_show(struct device *dev, 2782 struct device_attribute *attr, 2783 char *buf) 2784 { 2785 struct bmc_device *bmc = to_bmc_device(dev); 2786 struct ipmi_device_id id; 2787 int rv; 2788 2789 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2790 if (rv) 2791 return rv; 2792 2793 return sysfs_emit(buf, "%u\n", id.device_id); 2794 } 2795 static DEVICE_ATTR_RO(device_id); 2796 2797 static ssize_t provides_device_sdrs_show(struct device *dev, 2798 struct device_attribute *attr, 2799 char *buf) 2800 { 2801 struct bmc_device *bmc = to_bmc_device(dev); 2802 struct ipmi_device_id id; 2803 int rv; 2804 2805 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2806 if (rv) 2807 return rv; 2808 2809 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2810 } 2811 static DEVICE_ATTR_RO(provides_device_sdrs); 2812 2813 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2814 char *buf) 2815 { 2816 struct bmc_device *bmc = to_bmc_device(dev); 2817 struct ipmi_device_id id; 2818 int rv; 2819 2820 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2821 if (rv) 2822 return rv; 2823 2824 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2825 } 2826 static DEVICE_ATTR_RO(revision); 2827 2828 static ssize_t firmware_revision_show(struct device *dev, 2829 struct device_attribute *attr, 2830 char *buf) 2831 { 2832 struct bmc_device *bmc = to_bmc_device(dev); 2833 struct ipmi_device_id id; 2834 int rv; 2835 2836 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2837 if (rv) 2838 return rv; 2839 2840 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2841 id.firmware_revision_2); 2842 } 2843 static DEVICE_ATTR_RO(firmware_revision); 2844 2845 static ssize_t ipmi_version_show(struct device *dev, 2846 struct device_attribute *attr, 2847 char *buf) 2848 { 2849 struct bmc_device *bmc = to_bmc_device(dev); 2850 struct ipmi_device_id id; 2851 int rv; 2852 2853 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2854 if (rv) 2855 return rv; 2856 2857 return sysfs_emit(buf, "%u.%u\n", 2858 ipmi_version_major(&id), 2859 ipmi_version_minor(&id)); 2860 } 2861 static DEVICE_ATTR_RO(ipmi_version); 2862 2863 static ssize_t add_dev_support_show(struct device *dev, 2864 struct device_attribute *attr, 2865 char *buf) 2866 { 2867 struct bmc_device *bmc = to_bmc_device(dev); 2868 struct ipmi_device_id id; 2869 int rv; 2870 2871 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2872 if (rv) 2873 return rv; 2874 2875 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2876 } 2877 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2878 NULL); 2879 2880 static ssize_t manufacturer_id_show(struct device *dev, 2881 struct device_attribute *attr, 2882 char *buf) 2883 { 2884 struct bmc_device *bmc = to_bmc_device(dev); 2885 struct ipmi_device_id id; 2886 int rv; 2887 2888 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2889 if (rv) 2890 return rv; 2891 2892 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2893 } 2894 static DEVICE_ATTR_RO(manufacturer_id); 2895 2896 static ssize_t product_id_show(struct device *dev, 2897 struct device_attribute *attr, 2898 char *buf) 2899 { 2900 struct bmc_device *bmc = to_bmc_device(dev); 2901 struct ipmi_device_id id; 2902 int rv; 2903 2904 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2905 if (rv) 2906 return rv; 2907 2908 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2909 } 2910 static DEVICE_ATTR_RO(product_id); 2911 2912 static ssize_t aux_firmware_rev_show(struct device *dev, 2913 struct device_attribute *attr, 2914 char *buf) 2915 { 2916 struct bmc_device *bmc = to_bmc_device(dev); 2917 struct ipmi_device_id id; 2918 int rv; 2919 2920 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2921 if (rv) 2922 return rv; 2923 2924 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2925 id.aux_firmware_revision[3], 2926 id.aux_firmware_revision[2], 2927 id.aux_firmware_revision[1], 2928 id.aux_firmware_revision[0]); 2929 } 2930 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2931 2932 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2933 char *buf) 2934 { 2935 struct bmc_device *bmc = to_bmc_device(dev); 2936 bool guid_set; 2937 guid_t guid; 2938 int rv; 2939 2940 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2941 if (rv) 2942 return rv; 2943 if (!guid_set) 2944 return -ENOENT; 2945 2946 return sysfs_emit(buf, "%pUl\n", &guid); 2947 } 2948 static DEVICE_ATTR_RO(guid); 2949 2950 static struct attribute *bmc_dev_attrs[] = { 2951 &dev_attr_device_id.attr, 2952 &dev_attr_provides_device_sdrs.attr, 2953 &dev_attr_revision.attr, 2954 &dev_attr_firmware_revision.attr, 2955 &dev_attr_ipmi_version.attr, 2956 &dev_attr_additional_device_support.attr, 2957 &dev_attr_manufacturer_id.attr, 2958 &dev_attr_product_id.attr, 2959 &dev_attr_aux_firmware_revision.attr, 2960 &dev_attr_guid.attr, 2961 NULL 2962 }; 2963 2964 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2965 struct attribute *attr, int idx) 2966 { 2967 struct device *dev = kobj_to_dev(kobj); 2968 struct bmc_device *bmc = to_bmc_device(dev); 2969 umode_t mode = attr->mode; 2970 int rv; 2971 2972 if (attr == &dev_attr_aux_firmware_revision.attr) { 2973 struct ipmi_device_id id; 2974 2975 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2976 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2977 } 2978 if (attr == &dev_attr_guid.attr) { 2979 bool guid_set; 2980 2981 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2982 return (!rv && guid_set) ? mode : 0; 2983 } 2984 return mode; 2985 } 2986 2987 static const struct attribute_group bmc_dev_attr_group = { 2988 .attrs = bmc_dev_attrs, 2989 .is_visible = bmc_dev_attr_is_visible, 2990 }; 2991 2992 static const struct attribute_group *bmc_dev_attr_groups[] = { 2993 &bmc_dev_attr_group, 2994 NULL 2995 }; 2996 2997 static const struct device_type bmc_device_type = { 2998 .groups = bmc_dev_attr_groups, 2999 }; 3000 3001 static int __find_bmc_guid(struct device *dev, const void *data) 3002 { 3003 const guid_t *guid = data; 3004 struct bmc_device *bmc; 3005 int rv; 3006 3007 if (dev->type != &bmc_device_type) 3008 return 0; 3009 3010 bmc = to_bmc_device(dev); 3011 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 3012 if (rv) 3013 rv = kref_get_unless_zero(&bmc->usecount); 3014 return rv; 3015 } 3016 3017 /* 3018 * Returns with the bmc's usecount incremented, if it is non-NULL. 3019 */ 3020 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 3021 guid_t *guid) 3022 { 3023 struct device *dev; 3024 struct bmc_device *bmc = NULL; 3025 3026 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 3027 if (dev) { 3028 bmc = to_bmc_device(dev); 3029 put_device(dev); 3030 } 3031 return bmc; 3032 } 3033 3034 struct prod_dev_id { 3035 unsigned int product_id; 3036 unsigned char device_id; 3037 }; 3038 3039 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 3040 { 3041 const struct prod_dev_id *cid = data; 3042 struct bmc_device *bmc; 3043 int rv; 3044 3045 if (dev->type != &bmc_device_type) 3046 return 0; 3047 3048 bmc = to_bmc_device(dev); 3049 rv = (bmc->id.product_id == cid->product_id 3050 && bmc->id.device_id == cid->device_id); 3051 if (rv) 3052 rv = kref_get_unless_zero(&bmc->usecount); 3053 return rv; 3054 } 3055 3056 /* 3057 * Returns with the bmc's usecount incremented, if it is non-NULL. 3058 */ 3059 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 3060 struct device_driver *drv, 3061 unsigned int product_id, unsigned char device_id) 3062 { 3063 struct prod_dev_id id = { 3064 .product_id = product_id, 3065 .device_id = device_id, 3066 }; 3067 struct device *dev; 3068 struct bmc_device *bmc = NULL; 3069 3070 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 3071 if (dev) { 3072 bmc = to_bmc_device(dev); 3073 put_device(dev); 3074 } 3075 return bmc; 3076 } 3077 3078 static DEFINE_IDA(ipmi_bmc_ida); 3079 3080 static void 3081 release_bmc_device(struct device *dev) 3082 { 3083 kfree(to_bmc_device(dev)); 3084 } 3085 3086 static void cleanup_bmc_work(struct work_struct *work) 3087 { 3088 struct bmc_device *bmc = container_of(work, struct bmc_device, 3089 remove_work); 3090 int id = bmc->pdev.id; /* Unregister overwrites id */ 3091 3092 platform_device_unregister(&bmc->pdev); 3093 ida_free(&ipmi_bmc_ida, id); 3094 } 3095 3096 static void 3097 cleanup_bmc_device(struct kref *ref) 3098 { 3099 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3100 3101 /* 3102 * Remove the platform device in a work queue to avoid issues 3103 * with removing the device attributes while reading a device 3104 * attribute. 3105 */ 3106 queue_work(bmc_remove_work_wq, &bmc->remove_work); 3107 } 3108 3109 /* 3110 * Must be called with intf->bmc_reg_mutex held. 3111 */ 3112 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3113 { 3114 struct bmc_device *bmc = intf->bmc; 3115 3116 if (!intf->bmc_registered) 3117 return; 3118 3119 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3120 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3121 kfree(intf->my_dev_name); 3122 intf->my_dev_name = NULL; 3123 3124 mutex_lock(&bmc->dyn_mutex); 3125 list_del(&intf->bmc_link); 3126 mutex_unlock(&bmc->dyn_mutex); 3127 intf->bmc = &intf->tmp_bmc; 3128 kref_put(&bmc->usecount, cleanup_bmc_device); 3129 intf->bmc_registered = false; 3130 } 3131 3132 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3133 { 3134 mutex_lock(&intf->bmc_reg_mutex); 3135 __ipmi_bmc_unregister(intf); 3136 mutex_unlock(&intf->bmc_reg_mutex); 3137 } 3138 3139 /* 3140 * Must be called with intf->bmc_reg_mutex held. 3141 */ 3142 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3143 struct ipmi_device_id *id, 3144 bool guid_set, guid_t *guid, int intf_num) 3145 { 3146 int rv; 3147 struct bmc_device *bmc; 3148 struct bmc_device *old_bmc; 3149 3150 /* 3151 * platform_device_register() can cause bmc_reg_mutex to 3152 * be claimed because of the is_visible functions of 3153 * the attributes. Eliminate possible recursion and 3154 * release the lock. 3155 */ 3156 intf->in_bmc_register = true; 3157 mutex_unlock(&intf->bmc_reg_mutex); 3158 3159 /* 3160 * Try to find if there is an bmc_device struct 3161 * representing the interfaced BMC already 3162 */ 3163 mutex_lock(&ipmidriver_mutex); 3164 if (guid_set) 3165 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3166 else 3167 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3168 id->product_id, 3169 id->device_id); 3170 3171 /* 3172 * If there is already an bmc_device, free the new one, 3173 * otherwise register the new BMC device 3174 */ 3175 if (old_bmc) { 3176 bmc = old_bmc; 3177 /* 3178 * Note: old_bmc already has usecount incremented by 3179 * the BMC find functions. 3180 */ 3181 intf->bmc = old_bmc; 3182 mutex_lock(&bmc->dyn_mutex); 3183 list_add_tail(&intf->bmc_link, &bmc->intfs); 3184 mutex_unlock(&bmc->dyn_mutex); 3185 3186 dev_info(intf->si_dev, 3187 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3188 bmc->id.manufacturer_id, 3189 bmc->id.product_id, 3190 bmc->id.device_id); 3191 } else { 3192 bmc = kzalloc_obj(*bmc); 3193 if (!bmc) { 3194 rv = -ENOMEM; 3195 goto out; 3196 } 3197 INIT_LIST_HEAD(&bmc->intfs); 3198 mutex_init(&bmc->dyn_mutex); 3199 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3200 3201 bmc->id = *id; 3202 bmc->dyn_id_set = 1; 3203 bmc->dyn_guid_set = guid_set; 3204 bmc->guid = *guid; 3205 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3206 3207 bmc->pdev.name = "ipmi_bmc"; 3208 3209 rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL); 3210 if (rv < 0) { 3211 kfree(bmc); 3212 goto out; 3213 } 3214 3215 bmc->pdev.dev.driver = &ipmidriver.driver; 3216 bmc->pdev.id = rv; 3217 bmc->pdev.dev.release = release_bmc_device; 3218 bmc->pdev.dev.type = &bmc_device_type; 3219 kref_init(&bmc->usecount); 3220 3221 intf->bmc = bmc; 3222 mutex_lock(&bmc->dyn_mutex); 3223 list_add_tail(&intf->bmc_link, &bmc->intfs); 3224 mutex_unlock(&bmc->dyn_mutex); 3225 3226 rv = platform_device_register(&bmc->pdev); 3227 if (rv) { 3228 dev_err(intf->si_dev, 3229 "Unable to register bmc device: %d\n", 3230 rv); 3231 goto out_list_del; 3232 } 3233 3234 dev_info(intf->si_dev, 3235 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3236 bmc->id.manufacturer_id, 3237 bmc->id.product_id, 3238 bmc->id.device_id); 3239 } 3240 3241 /* 3242 * create symlink from system interface device to bmc device 3243 * and back. 3244 */ 3245 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3246 if (rv) { 3247 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3248 goto out_put_bmc; 3249 } 3250 3251 if (intf_num == -1) 3252 intf_num = intf->intf_num; 3253 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3254 if (!intf->my_dev_name) { 3255 rv = -ENOMEM; 3256 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3257 rv); 3258 goto out_unlink1; 3259 } 3260 3261 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3262 intf->my_dev_name); 3263 if (rv) { 3264 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3265 rv); 3266 goto out_free_my_dev_name; 3267 } 3268 3269 intf->bmc_registered = true; 3270 3271 out: 3272 mutex_unlock(&ipmidriver_mutex); 3273 mutex_lock(&intf->bmc_reg_mutex); 3274 intf->in_bmc_register = false; 3275 return rv; 3276 3277 3278 out_free_my_dev_name: 3279 kfree(intf->my_dev_name); 3280 intf->my_dev_name = NULL; 3281 3282 out_unlink1: 3283 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3284 3285 out_put_bmc: 3286 mutex_lock(&bmc->dyn_mutex); 3287 list_del(&intf->bmc_link); 3288 mutex_unlock(&bmc->dyn_mutex); 3289 intf->bmc = &intf->tmp_bmc; 3290 kref_put(&bmc->usecount, cleanup_bmc_device); 3291 goto out; 3292 3293 out_list_del: 3294 mutex_lock(&bmc->dyn_mutex); 3295 list_del(&intf->bmc_link); 3296 mutex_unlock(&bmc->dyn_mutex); 3297 intf->bmc = &intf->tmp_bmc; 3298 put_device(&bmc->pdev.dev); 3299 goto out; 3300 } 3301 3302 static int 3303 send_guid_cmd(struct ipmi_smi *intf, int chan) 3304 { 3305 struct kernel_ipmi_msg msg; 3306 struct ipmi_system_interface_addr si; 3307 3308 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3309 si.channel = IPMI_BMC_CHANNEL; 3310 si.lun = 0; 3311 3312 msg.netfn = IPMI_NETFN_APP_REQUEST; 3313 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3314 msg.data = NULL; 3315 msg.data_len = 0; 3316 return i_ipmi_request(NULL, 3317 intf, 3318 (struct ipmi_addr *) &si, 3319 0, 3320 &msg, 3321 intf, 3322 NULL, 3323 NULL, 3324 0, 3325 intf->addrinfo[0].address, 3326 intf->addrinfo[0].lun, 3327 -1, 0); 3328 } 3329 3330 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3331 { 3332 struct bmc_device *bmc = intf->bmc; 3333 3334 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3335 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3336 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3337 /* Not for me */ 3338 return; 3339 3340 if (msg->msg.data[0] != 0) { 3341 /* Error from getting the GUID, the BMC doesn't have one. */ 3342 bmc->dyn_guid_set = 0; 3343 goto out; 3344 } 3345 3346 if (msg->msg.data_len < UUID_SIZE + 1) { 3347 bmc->dyn_guid_set = 0; 3348 dev_warn(intf->si_dev, 3349 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3350 msg->msg.data_len, UUID_SIZE + 1); 3351 goto out; 3352 } 3353 3354 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3355 /* 3356 * Make sure the guid data is available before setting 3357 * dyn_guid_set. 3358 */ 3359 smp_wmb(); 3360 bmc->dyn_guid_set = 1; 3361 out: 3362 wake_up(&intf->waitq); 3363 } 3364 3365 static void __get_guid(struct ipmi_smi *intf) 3366 { 3367 int rv; 3368 struct bmc_device *bmc = intf->bmc; 3369 3370 bmc->dyn_guid_set = 2; 3371 intf->null_user_handler = guid_handler; 3372 rv = send_guid_cmd(intf, 0); 3373 if (rv) 3374 /* Send failed, no GUID available. */ 3375 bmc->dyn_guid_set = 0; 3376 else 3377 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3378 3379 /* dyn_guid_set makes the guid data available. */ 3380 smp_rmb(); 3381 3382 intf->null_user_handler = NULL; 3383 } 3384 3385 static int 3386 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3387 { 3388 struct kernel_ipmi_msg msg; 3389 unsigned char data[1]; 3390 struct ipmi_system_interface_addr si; 3391 3392 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3393 si.channel = IPMI_BMC_CHANNEL; 3394 si.lun = 0; 3395 3396 msg.netfn = IPMI_NETFN_APP_REQUEST; 3397 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3398 msg.data = data; 3399 msg.data_len = 1; 3400 data[0] = chan; 3401 return i_ipmi_request(NULL, 3402 intf, 3403 (struct ipmi_addr *) &si, 3404 0, 3405 &msg, 3406 intf, 3407 NULL, 3408 NULL, 3409 0, 3410 intf->addrinfo[0].address, 3411 intf->addrinfo[0].lun, 3412 -1, 0); 3413 } 3414 3415 static void 3416 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3417 { 3418 int rv = 0; 3419 int ch; 3420 unsigned int set = intf->curr_working_cset; 3421 struct ipmi_channel *chans; 3422 3423 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3424 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3425 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3426 /* It's the one we want */ 3427 if (msg->msg.data[0] != 0) { 3428 /* Got an error from the channel, just go on. */ 3429 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3430 /* 3431 * If the MC does not support this 3432 * command, that is legal. We just 3433 * assume it has one IPMB at channel 3434 * zero. 3435 */ 3436 intf->wchannels[set].c[0].medium 3437 = IPMI_CHANNEL_MEDIUM_IPMB; 3438 intf->wchannels[set].c[0].protocol 3439 = IPMI_CHANNEL_PROTOCOL_IPMB; 3440 3441 intf->channel_list = intf->wchannels + set; 3442 intf->channels_ready = true; 3443 wake_up(&intf->waitq); 3444 goto out; 3445 } 3446 goto next_channel; 3447 } 3448 if (msg->msg.data_len < 4) { 3449 /* Message not big enough, just go on. */ 3450 goto next_channel; 3451 } 3452 ch = intf->curr_channel; 3453 chans = intf->wchannels[set].c; 3454 chans[ch].medium = msg->msg.data[2] & 0x7f; 3455 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3456 3457 next_channel: 3458 intf->curr_channel++; 3459 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3460 intf->channel_list = intf->wchannels + set; 3461 intf->channels_ready = true; 3462 wake_up(&intf->waitq); 3463 } else { 3464 rv = send_channel_info_cmd(intf, intf->curr_channel); 3465 } 3466 3467 if (rv) { 3468 /* Got an error somehow, just give up. */ 3469 dev_warn(intf->si_dev, 3470 "Error sending channel information for channel %d: %d\n", 3471 intf->curr_channel, rv); 3472 3473 intf->channel_list = intf->wchannels + set; 3474 intf->channels_ready = true; 3475 wake_up(&intf->waitq); 3476 } 3477 } 3478 out: 3479 return; 3480 } 3481 3482 /* 3483 * Must be holding intf->bmc_reg_mutex to call this. 3484 */ 3485 static int __scan_channels(struct ipmi_smi *intf, 3486 struct ipmi_device_id *id, 3487 bool rescan) 3488 { 3489 int rv; 3490 3491 if (rescan) { 3492 /* Clear channels_ready to force channels rescan. */ 3493 intf->channels_ready = false; 3494 } 3495 3496 /* Skip channel scan if channels are already marked ready */ 3497 if (intf->channels_ready) 3498 return 0; 3499 3500 if (ipmi_version_major(id) > 1 3501 || (ipmi_version_major(id) == 1 3502 && ipmi_version_minor(id) >= 5)) { 3503 unsigned int set; 3504 3505 /* 3506 * Start scanning the channels to see what is 3507 * available. 3508 */ 3509 set = !intf->curr_working_cset; 3510 intf->curr_working_cset = set; 3511 memset(&intf->wchannels[set], 0, 3512 sizeof(struct ipmi_channel_set)); 3513 3514 intf->null_user_handler = channel_handler; 3515 intf->curr_channel = 0; 3516 rv = send_channel_info_cmd(intf, 0); 3517 if (rv) { 3518 dev_warn(intf->si_dev, 3519 "Error sending channel information for channel 0, %d\n", 3520 rv); 3521 intf->null_user_handler = NULL; 3522 return -EIO; 3523 } 3524 3525 /* Wait for the channel info to be read. */ 3526 wait_event(intf->waitq, intf->channels_ready); 3527 intf->null_user_handler = NULL; 3528 } else { 3529 unsigned int set = intf->curr_working_cset; 3530 3531 /* Assume a single IPMB channel at zero. */ 3532 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3533 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3534 intf->channel_list = intf->wchannels + set; 3535 intf->channels_ready = true; 3536 } 3537 3538 return 0; 3539 } 3540 3541 static void ipmi_poll(struct ipmi_smi *intf) 3542 { 3543 if (intf->handlers->poll) 3544 intf->handlers->poll(intf->send_info); 3545 /* In case something came in */ 3546 handle_new_recv_msgs(intf); 3547 } 3548 3549 void ipmi_poll_interface(struct ipmi_user *user) 3550 { 3551 ipmi_poll(user->intf); 3552 } 3553 EXPORT_SYMBOL(ipmi_poll_interface); 3554 3555 static ssize_t nr_users_show(struct device *dev, 3556 struct device_attribute *attr, 3557 char *buf) 3558 { 3559 struct ipmi_smi *intf = container_of(attr, 3560 struct ipmi_smi, nr_users_devattr); 3561 3562 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); 3563 } 3564 static DEVICE_ATTR_RO(nr_users); 3565 3566 static ssize_t nr_msgs_show(struct device *dev, 3567 struct device_attribute *attr, 3568 char *buf) 3569 { 3570 struct ipmi_smi *intf = container_of(attr, 3571 struct ipmi_smi, nr_msgs_devattr); 3572 struct ipmi_user *user; 3573 unsigned int count = 0; 3574 3575 mutex_lock(&intf->users_mutex); 3576 list_for_each_entry(user, &intf->users, link) 3577 count += atomic_read(&user->nr_msgs); 3578 mutex_unlock(&intf->users_mutex); 3579 3580 return sysfs_emit(buf, "%u\n", count); 3581 } 3582 static DEVICE_ATTR_RO(nr_msgs); 3583 3584 static ssize_t maintenance_mode_show(struct device *dev, 3585 struct device_attribute *attr, 3586 char *buf) 3587 { 3588 struct ipmi_smi *intf = container_of(attr, 3589 struct ipmi_smi, 3590 maintenance_mode_devattr); 3591 3592 return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state, 3593 intf->auto_maintenance_timeout); 3594 } 3595 static DEVICE_ATTR_RO(maintenance_mode); 3596 3597 static void redo_bmc_reg(struct work_struct *work) 3598 { 3599 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3600 bmc_reg_work); 3601 3602 if (!intf->in_shutdown) 3603 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3604 3605 kref_put(&intf->refcount, intf_free); 3606 } 3607 3608 int ipmi_add_smi(struct module *owner, 3609 const struct ipmi_smi_handlers *handlers, 3610 void *send_info, 3611 struct device *si_dev, 3612 unsigned char slave_addr) 3613 { 3614 int i, j; 3615 int rv; 3616 struct ipmi_smi *intf, *tintf; 3617 struct list_head *link; 3618 struct ipmi_device_id id; 3619 3620 /* 3621 * Make sure the driver is actually initialized, this handles 3622 * problems with initialization order. 3623 */ 3624 rv = ipmi_init_msghandler(); 3625 if (rv) 3626 return rv; 3627 3628 intf = kzalloc_obj(*intf); 3629 if (!intf) 3630 return -ENOMEM; 3631 3632 intf->owner = owner; 3633 intf->bmc = &intf->tmp_bmc; 3634 INIT_LIST_HEAD(&intf->bmc->intfs); 3635 mutex_init(&intf->bmc->dyn_mutex); 3636 INIT_LIST_HEAD(&intf->bmc_link); 3637 mutex_init(&intf->bmc_reg_mutex); 3638 intf->intf_num = -1; /* Mark it invalid for now. */ 3639 kref_init(&intf->refcount); 3640 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3641 intf->si_dev = si_dev; 3642 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3643 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3644 intf->addrinfo[j].lun = 2; 3645 } 3646 if (slave_addr != 0) 3647 intf->addrinfo[0].address = slave_addr; 3648 INIT_LIST_HEAD(&intf->user_msgs); 3649 mutex_init(&intf->user_msgs_mutex); 3650 INIT_LIST_HEAD(&intf->users); 3651 mutex_init(&intf->users_mutex); 3652 atomic_set(&intf->nr_users, 0); 3653 intf->handlers = handlers; 3654 intf->send_info = send_info; 3655 mutex_init(&intf->seq_lock); 3656 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3657 intf->seq_table[j].inuse = 0; 3658 intf->seq_table[j].seqid = 0; 3659 } 3660 intf->curr_seq = 0; 3661 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3662 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3663 INIT_WORK(&intf->smi_work, smi_work); 3664 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3665 spin_lock_init(&intf->xmit_msgs_lock); 3666 INIT_LIST_HEAD(&intf->xmit_msgs); 3667 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3668 mutex_init(&intf->events_mutex); 3669 spin_lock_init(&intf->watch_lock); 3670 atomic_set(&intf->event_waiters, 0); 3671 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3672 INIT_LIST_HEAD(&intf->waiting_events); 3673 intf->waiting_events_count = 0; 3674 mutex_init(&intf->cmd_rcvrs_mutex); 3675 spin_lock_init(&intf->maintenance_mode_lock); 3676 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3677 init_waitqueue_head(&intf->waitq); 3678 for (i = 0; i < IPMI_NUM_STATS; i++) 3679 atomic_set(&intf->stats[i], 0); 3680 3681 /* 3682 * Grab the watchers mutex so we can deliver the new interface 3683 * without races. 3684 */ 3685 mutex_lock(&smi_watchers_mutex); 3686 mutex_lock(&ipmi_interfaces_mutex); 3687 /* Look for a hole in the numbers. */ 3688 i = 0; 3689 link = &ipmi_interfaces; 3690 list_for_each_entry(tintf, &ipmi_interfaces, link) { 3691 if (tintf->intf_num != i) { 3692 link = &tintf->link; 3693 break; 3694 } 3695 i++; 3696 } 3697 /* Add the new interface in numeric order. */ 3698 if (i == 0) 3699 list_add(&intf->link, &ipmi_interfaces); 3700 else 3701 list_add_tail(&intf->link, link); 3702 3703 rv = handlers->start_processing(send_info, intf); 3704 if (rv) 3705 goto out_err; 3706 3707 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3708 if (rv) { 3709 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3710 goto out_err_started; 3711 } 3712 3713 mutex_lock(&intf->bmc_reg_mutex); 3714 rv = __scan_channels(intf, &id, false); 3715 mutex_unlock(&intf->bmc_reg_mutex); 3716 if (rv) 3717 goto out_err_bmc_reg; 3718 3719 intf->nr_users_devattr = dev_attr_nr_users; 3720 sysfs_attr_init(&intf->nr_users_devattr.attr); 3721 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); 3722 if (rv) 3723 goto out_err_bmc_reg; 3724 3725 intf->nr_msgs_devattr = dev_attr_nr_msgs; 3726 sysfs_attr_init(&intf->nr_msgs_devattr.attr); 3727 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); 3728 if (rv) { 3729 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3730 goto out_err_bmc_reg; 3731 } 3732 3733 intf->maintenance_mode_devattr = dev_attr_maintenance_mode; 3734 sysfs_attr_init(&intf->maintenance_mode_devattr.attr); 3735 rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr); 3736 if (rv) { 3737 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3738 goto out_err_bmc_reg; 3739 } 3740 3741 intf->intf_num = i; 3742 mutex_unlock(&ipmi_interfaces_mutex); 3743 3744 /* After this point the interface is legal to use. */ 3745 call_smi_watchers(i, intf->si_dev); 3746 3747 mutex_unlock(&smi_watchers_mutex); 3748 3749 return 0; 3750 3751 out_err_bmc_reg: 3752 ipmi_bmc_unregister(intf); 3753 out_err_started: 3754 if (intf->handlers->shutdown) 3755 intf->handlers->shutdown(intf->send_info); 3756 out_err: 3757 list_del(&intf->link); 3758 mutex_unlock(&ipmi_interfaces_mutex); 3759 mutex_unlock(&smi_watchers_mutex); 3760 kref_put(&intf->refcount, intf_free); 3761 3762 return rv; 3763 } 3764 EXPORT_SYMBOL(ipmi_add_smi); 3765 3766 static void deliver_smi_err_response(struct ipmi_smi *intf, 3767 struct ipmi_smi_msg *msg, 3768 unsigned char err) 3769 { 3770 int rv; 3771 msg->rsp[0] = msg->data[0] | 4; 3772 msg->rsp[1] = msg->data[1]; 3773 msg->rsp[2] = err; 3774 msg->rsp_size = 3; 3775 3776 /* This will never requeue, but it may ask us to free the message. */ 3777 rv = handle_one_recv_msg(intf, msg); 3778 if (rv == 0) 3779 ipmi_free_smi_msg(msg); 3780 } 3781 3782 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3783 { 3784 int i; 3785 struct seq_table *ent; 3786 struct ipmi_smi_msg *msg; 3787 struct list_head *entry; 3788 struct list_head tmplist; 3789 3790 /* Clear out our transmit queues and hold the messages. */ 3791 INIT_LIST_HEAD(&tmplist); 3792 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3793 list_splice_tail(&intf->xmit_msgs, &tmplist); 3794 3795 /* Current message first, to preserve order */ 3796 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3797 /* Wait for the message to clear out. */ 3798 schedule_timeout(1); 3799 } 3800 3801 /* No need for locks, the interface is down. */ 3802 3803 /* 3804 * Return errors for all pending messages in queue and in the 3805 * tables waiting for remote responses. 3806 */ 3807 while (!list_empty(&tmplist)) { 3808 entry = tmplist.next; 3809 list_del(entry); 3810 msg = list_entry(entry, struct ipmi_smi_msg, link); 3811 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3812 } 3813 3814 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3815 ent = &intf->seq_table[i]; 3816 if (!ent->inuse) 3817 continue; 3818 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3819 } 3820 } 3821 3822 void ipmi_unregister_smi(struct ipmi_smi *intf) 3823 { 3824 struct ipmi_smi_watcher *w; 3825 int intf_num; 3826 3827 if (!intf) 3828 return; 3829 3830 intf_num = intf->intf_num; 3831 mutex_lock(&ipmi_interfaces_mutex); 3832 cancel_work_sync(&intf->smi_work); 3833 /* smi_work() can no longer be in progress after this. */ 3834 3835 intf->intf_num = -1; 3836 intf->in_shutdown = true; 3837 list_del(&intf->link); 3838 mutex_unlock(&ipmi_interfaces_mutex); 3839 3840 /* 3841 * At this point no users can be added to the interface and no 3842 * new messages can be sent. 3843 */ 3844 3845 if (intf->handlers->shutdown) 3846 intf->handlers->shutdown(intf->send_info); 3847 3848 device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr); 3849 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); 3850 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3851 3852 /* 3853 * Call all the watcher interfaces to tell them that 3854 * an interface is going away. 3855 */ 3856 mutex_lock(&smi_watchers_mutex); 3857 list_for_each_entry(w, &smi_watchers, link) 3858 w->smi_gone(intf_num); 3859 mutex_unlock(&smi_watchers_mutex); 3860 3861 mutex_lock(&intf->users_mutex); 3862 while (!list_empty(&intf->users)) { 3863 struct ipmi_user *user = list_first_entry(&intf->users, 3864 struct ipmi_user, link); 3865 3866 _ipmi_destroy_user(user); 3867 } 3868 mutex_unlock(&intf->users_mutex); 3869 3870 cleanup_smi_msgs(intf); 3871 3872 ipmi_bmc_unregister(intf); 3873 3874 kref_put(&intf->refcount, intf_free); 3875 } 3876 EXPORT_SYMBOL(ipmi_unregister_smi); 3877 3878 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3879 struct ipmi_smi_msg *msg) 3880 { 3881 struct ipmi_ipmb_addr ipmb_addr; 3882 struct ipmi_recv_msg *recv_msg; 3883 3884 /* 3885 * This is 11, not 10, because the response must contain a 3886 * completion code. 3887 */ 3888 if (msg->rsp_size < 11) { 3889 /* Message not big enough, just ignore it. */ 3890 ipmi_inc_stat(intf, invalid_ipmb_responses); 3891 return 0; 3892 } 3893 3894 if (msg->rsp[2] != 0) { 3895 /* An error getting the response, just ignore it. */ 3896 return 0; 3897 } 3898 3899 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3900 ipmb_addr.slave_addr = msg->rsp[6]; 3901 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3902 ipmb_addr.lun = msg->rsp[7] & 3; 3903 3904 /* 3905 * It's a response from a remote entity. Look up the sequence 3906 * number and handle the response. 3907 */ 3908 if (intf_find_seq(intf, 3909 msg->rsp[7] >> 2, 3910 msg->rsp[3] & 0x0f, 3911 msg->rsp[8], 3912 (msg->rsp[4] >> 2) & (~1), 3913 (struct ipmi_addr *) &ipmb_addr, 3914 &recv_msg)) { 3915 /* 3916 * We were unable to find the sequence number, 3917 * so just nuke the message. 3918 */ 3919 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3920 return 0; 3921 } 3922 3923 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3924 /* 3925 * The other fields matched, so no need to set them, except 3926 * for netfn, which needs to be the response that was 3927 * returned, not the request value. 3928 */ 3929 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3930 recv_msg->msg.data = recv_msg->msg_data; 3931 recv_msg->msg.data_len = msg->rsp_size - 10; 3932 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3933 if (deliver_response(intf, recv_msg)) 3934 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3935 else 3936 ipmi_inc_stat(intf, handled_ipmb_responses); 3937 3938 return 0; 3939 } 3940 3941 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3942 struct ipmi_smi_msg *msg) 3943 { 3944 struct cmd_rcvr *rcvr; 3945 int rv = 0; 3946 unsigned char netfn; 3947 unsigned char cmd; 3948 unsigned char chan; 3949 struct ipmi_user *user = NULL; 3950 struct ipmi_ipmb_addr *ipmb_addr; 3951 struct ipmi_recv_msg *recv_msg = NULL; 3952 3953 if (msg->rsp_size < 10) { 3954 /* Message not big enough, just ignore it. */ 3955 ipmi_inc_stat(intf, invalid_commands); 3956 return 0; 3957 } 3958 3959 if (msg->rsp[2] != 0) { 3960 /* An error getting the response, just ignore it. */ 3961 return 0; 3962 } 3963 3964 netfn = msg->rsp[4] >> 2; 3965 cmd = msg->rsp[8]; 3966 chan = msg->rsp[3] & 0xf; 3967 3968 rcu_read_lock(); 3969 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3970 if (rcvr) { 3971 user = rcvr->user; 3972 recv_msg = ipmi_alloc_recv_msg(user); 3973 } 3974 rcu_read_unlock(); 3975 3976 if (user == NULL) { 3977 /* We didn't find a user, deliver an error response. */ 3978 ipmi_inc_stat(intf, unhandled_commands); 3979 3980 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3981 msg->data[1] = IPMI_SEND_MSG_CMD; 3982 msg->data[2] = msg->rsp[3]; 3983 msg->data[3] = msg->rsp[6]; 3984 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3985 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3986 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3987 /* rqseq/lun */ 3988 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3989 msg->data[8] = msg->rsp[8]; /* cmd */ 3990 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3991 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3992 msg->data_size = 11; 3993 3994 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 3995 msg->data_size, msg->data); 3996 3997 if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR) 3998 /* 3999 * We used the message, so return the value that 4000 * causes it to not be freed or queued. 4001 */ 4002 rv = -1; 4003 } else if (!IS_ERR(recv_msg)) { 4004 /* Extract the source address from the data. */ 4005 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 4006 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 4007 ipmb_addr->slave_addr = msg->rsp[6]; 4008 ipmb_addr->lun = msg->rsp[7] & 3; 4009 ipmb_addr->channel = msg->rsp[3] & 0xf; 4010 4011 /* 4012 * Extract the rest of the message information 4013 * from the IPMB header. 4014 */ 4015 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4016 recv_msg->msgid = msg->rsp[7] >> 2; 4017 recv_msg->msg.netfn = msg->rsp[4] >> 2; 4018 recv_msg->msg.cmd = msg->rsp[8]; 4019 recv_msg->msg.data = recv_msg->msg_data; 4020 4021 /* 4022 * We chop off 10, not 9 bytes because the checksum 4023 * at the end also needs to be removed. 4024 */ 4025 recv_msg->msg.data_len = msg->rsp_size - 10; 4026 memcpy(recv_msg->msg_data, &msg->rsp[9], 4027 msg->rsp_size - 10); 4028 if (deliver_response(intf, recv_msg)) 4029 ipmi_inc_stat(intf, unhandled_commands); 4030 else 4031 ipmi_inc_stat(intf, handled_commands); 4032 } else { 4033 /* 4034 * We couldn't allocate memory for the message, so 4035 * requeue it for handling later. 4036 */ 4037 rv = 1; 4038 } 4039 4040 return rv; 4041 } 4042 4043 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 4044 struct ipmi_smi_msg *msg) 4045 { 4046 struct cmd_rcvr *rcvr; 4047 int rv = 0; 4048 struct ipmi_user *user = NULL; 4049 struct ipmi_ipmb_direct_addr *daddr; 4050 struct ipmi_recv_msg *recv_msg = NULL; 4051 unsigned char netfn = msg->rsp[0] >> 2; 4052 unsigned char cmd = msg->rsp[3]; 4053 4054 rcu_read_lock(); 4055 /* We always use channel 0 for direct messages. */ 4056 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 4057 if (rcvr) { 4058 user = rcvr->user; 4059 recv_msg = ipmi_alloc_recv_msg(user); 4060 } 4061 rcu_read_unlock(); 4062 4063 if (user == NULL) { 4064 /* We didn't find a user, deliver an error response. */ 4065 ipmi_inc_stat(intf, unhandled_commands); 4066 4067 msg->data[0] = (netfn + 1) << 2; 4068 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 4069 msg->data[1] = msg->rsp[1]; /* Addr */ 4070 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 4071 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 4072 msg->data[3] = cmd; 4073 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 4074 msg->data_size = 5; 4075 4076 if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR) 4077 /* 4078 * We used the message, so return the value that 4079 * causes it to not be freed or queued. 4080 */ 4081 rv = -1; 4082 } else if (!IS_ERR(recv_msg)) { 4083 /* Extract the source address from the data. */ 4084 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 4085 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4086 daddr->channel = 0; 4087 daddr->slave_addr = msg->rsp[1]; 4088 daddr->rs_lun = msg->rsp[0] & 3; 4089 daddr->rq_lun = msg->rsp[2] & 3; 4090 4091 /* 4092 * Extract the rest of the message information 4093 * from the IPMB header. 4094 */ 4095 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4096 recv_msg->msgid = (msg->rsp[2] >> 2); 4097 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4098 recv_msg->msg.cmd = msg->rsp[3]; 4099 recv_msg->msg.data = recv_msg->msg_data; 4100 4101 recv_msg->msg.data_len = msg->rsp_size - 4; 4102 memcpy(recv_msg->msg_data, msg->rsp + 4, 4103 msg->rsp_size - 4); 4104 if (deliver_response(intf, recv_msg)) 4105 ipmi_inc_stat(intf, unhandled_commands); 4106 else 4107 ipmi_inc_stat(intf, handled_commands); 4108 } else { 4109 /* 4110 * We couldn't allocate memory for the message, so 4111 * requeue it for handling later. 4112 */ 4113 rv = 1; 4114 } 4115 4116 return rv; 4117 } 4118 4119 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 4120 struct ipmi_smi_msg *msg) 4121 { 4122 struct ipmi_recv_msg *recv_msg; 4123 struct ipmi_ipmb_direct_addr *daddr; 4124 4125 recv_msg = msg->recv_msg; 4126 if (recv_msg == NULL) { 4127 dev_warn(intf->si_dev, 4128 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4129 return 0; 4130 } 4131 4132 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4133 recv_msg->msgid = msg->msgid; 4134 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4135 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4136 daddr->channel = 0; 4137 daddr->slave_addr = msg->rsp[1]; 4138 daddr->rq_lun = msg->rsp[0] & 3; 4139 daddr->rs_lun = msg->rsp[2] & 3; 4140 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4141 recv_msg->msg.cmd = msg->rsp[3]; 4142 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4143 recv_msg->msg.data = recv_msg->msg_data; 4144 recv_msg->msg.data_len = msg->rsp_size - 4; 4145 deliver_local_response(intf, recv_msg); 4146 4147 return 0; 4148 } 4149 4150 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4151 struct ipmi_smi_msg *msg) 4152 { 4153 struct ipmi_lan_addr lan_addr; 4154 struct ipmi_recv_msg *recv_msg; 4155 4156 4157 /* 4158 * This is 13, not 12, because the response must contain a 4159 * completion code. 4160 */ 4161 if (msg->rsp_size < 13) { 4162 /* Message not big enough, just ignore it. */ 4163 ipmi_inc_stat(intf, invalid_lan_responses); 4164 return 0; 4165 } 4166 4167 if (msg->rsp[2] != 0) { 4168 /* An error getting the response, just ignore it. */ 4169 return 0; 4170 } 4171 4172 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4173 lan_addr.session_handle = msg->rsp[4]; 4174 lan_addr.remote_SWID = msg->rsp[8]; 4175 lan_addr.local_SWID = msg->rsp[5]; 4176 lan_addr.channel = msg->rsp[3] & 0x0f; 4177 lan_addr.privilege = msg->rsp[3] >> 4; 4178 lan_addr.lun = msg->rsp[9] & 3; 4179 4180 /* 4181 * It's a response from a remote entity. Look up the sequence 4182 * number and handle the response. 4183 */ 4184 if (intf_find_seq(intf, 4185 msg->rsp[9] >> 2, 4186 msg->rsp[3] & 0x0f, 4187 msg->rsp[10], 4188 (msg->rsp[6] >> 2) & (~1), 4189 (struct ipmi_addr *) &lan_addr, 4190 &recv_msg)) { 4191 /* 4192 * We were unable to find the sequence number, 4193 * so just nuke the message. 4194 */ 4195 ipmi_inc_stat(intf, unhandled_lan_responses); 4196 return 0; 4197 } 4198 4199 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4200 /* 4201 * The other fields matched, so no need to set them, except 4202 * for netfn, which needs to be the response that was 4203 * returned, not the request value. 4204 */ 4205 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4206 recv_msg->msg.data = recv_msg->msg_data; 4207 recv_msg->msg.data_len = msg->rsp_size - 12; 4208 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4209 if (deliver_response(intf, recv_msg)) 4210 ipmi_inc_stat(intf, unhandled_lan_responses); 4211 else 4212 ipmi_inc_stat(intf, handled_lan_responses); 4213 4214 return 0; 4215 } 4216 4217 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4218 struct ipmi_smi_msg *msg) 4219 { 4220 struct cmd_rcvr *rcvr; 4221 int rv = 0; /* Free by default */ 4222 unsigned char netfn; 4223 unsigned char cmd; 4224 unsigned char chan; 4225 struct ipmi_user *user = NULL; 4226 struct ipmi_lan_addr *lan_addr; 4227 struct ipmi_recv_msg *recv_msg = NULL; 4228 4229 if (msg->rsp_size < 12) { 4230 /* Message not big enough, just ignore it. */ 4231 ipmi_inc_stat(intf, invalid_commands); 4232 return 0; 4233 } 4234 4235 if (msg->rsp[2] != 0) { 4236 /* An error getting the response, just ignore it. */ 4237 return 0; 4238 } 4239 4240 netfn = msg->rsp[6] >> 2; 4241 cmd = msg->rsp[10]; 4242 chan = msg->rsp[3] & 0xf; 4243 4244 rcu_read_lock(); 4245 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4246 if (rcvr) { 4247 user = rcvr->user; 4248 recv_msg = ipmi_alloc_recv_msg(user); 4249 } 4250 rcu_read_unlock(); 4251 4252 if (user == NULL) { 4253 /* We didn't find a user, just give up and return an error. */ 4254 ipmi_inc_stat(intf, unhandled_commands); 4255 4256 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 4257 msg->data[1] = IPMI_SEND_MSG_CMD; 4258 msg->data[2] = chan; 4259 msg->data[3] = msg->rsp[4]; /* handle */ 4260 msg->data[4] = msg->rsp[8]; /* rsSWID */ 4261 msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3); 4262 msg->data[6] = ipmb_checksum(&msg->data[3], 3); 4263 msg->data[7] = msg->rsp[5]; /* rqSWID */ 4264 /* rqseq/lun */ 4265 msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3); 4266 msg->data[9] = cmd; 4267 msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE; 4268 msg->data[11] = ipmb_checksum(&msg->data[7], 4); 4269 msg->data_size = 12; 4270 4271 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 4272 msg->data_size, msg->data); 4273 4274 if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR) 4275 /* 4276 * We used the message, so return the value that 4277 * causes it to not be freed or queued. 4278 */ 4279 rv = -1; 4280 } else if (!IS_ERR(recv_msg)) { 4281 /* Extract the source address from the data. */ 4282 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4283 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4284 lan_addr->session_handle = msg->rsp[4]; 4285 lan_addr->remote_SWID = msg->rsp[8]; 4286 lan_addr->local_SWID = msg->rsp[5]; 4287 lan_addr->lun = msg->rsp[9] & 3; 4288 lan_addr->channel = msg->rsp[3] & 0xf; 4289 lan_addr->privilege = msg->rsp[3] >> 4; 4290 4291 /* 4292 * Extract the rest of the message information 4293 * from the IPMB header. 4294 */ 4295 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4296 recv_msg->msgid = msg->rsp[9] >> 2; 4297 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4298 recv_msg->msg.cmd = msg->rsp[10]; 4299 recv_msg->msg.data = recv_msg->msg_data; 4300 4301 /* 4302 * We chop off 12, not 11 bytes because the checksum 4303 * at the end also needs to be removed. 4304 */ 4305 recv_msg->msg.data_len = msg->rsp_size - 12; 4306 memcpy(recv_msg->msg_data, &msg->rsp[11], 4307 msg->rsp_size - 12); 4308 if (deliver_response(intf, recv_msg)) 4309 ipmi_inc_stat(intf, unhandled_commands); 4310 else 4311 ipmi_inc_stat(intf, handled_commands); 4312 } else { 4313 /* 4314 * We couldn't allocate memory for the message, so 4315 * requeue it for handling later. 4316 */ 4317 rv = 1; 4318 } 4319 4320 return rv; 4321 } 4322 4323 /* 4324 * This routine will handle "Get Message" command responses with 4325 * channels that use an OEM Medium. The message format belongs to 4326 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4327 * Chapter 22, sections 22.6 and 22.24 for more details. 4328 */ 4329 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4330 struct ipmi_smi_msg *msg) 4331 { 4332 struct cmd_rcvr *rcvr; 4333 int rv = 0; 4334 unsigned char netfn; 4335 unsigned char cmd; 4336 unsigned char chan; 4337 struct ipmi_user *user = NULL; 4338 struct ipmi_system_interface_addr *smi_addr; 4339 struct ipmi_recv_msg *recv_msg = NULL; 4340 4341 /* 4342 * We expect the OEM SW to perform error checking 4343 * so we just do some basic sanity checks 4344 */ 4345 if (msg->rsp_size < 4) { 4346 /* Message not big enough, just ignore it. */ 4347 ipmi_inc_stat(intf, invalid_commands); 4348 return 0; 4349 } 4350 4351 if (msg->rsp[2] != 0) { 4352 /* An error getting the response, just ignore it. */ 4353 return 0; 4354 } 4355 4356 /* 4357 * This is an OEM Message so the OEM needs to know how 4358 * handle the message. We do no interpretation. 4359 */ 4360 netfn = msg->rsp[0] >> 2; 4361 cmd = msg->rsp[1]; 4362 chan = msg->rsp[3] & 0xf; 4363 4364 rcu_read_lock(); 4365 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4366 if (rcvr) { 4367 user = rcvr->user; 4368 recv_msg = ipmi_alloc_recv_msg(user); 4369 } 4370 rcu_read_unlock(); 4371 4372 if (user == NULL) { 4373 /* We didn't find a user, just give up. */ 4374 ipmi_inc_stat(intf, unhandled_commands); 4375 4376 /* 4377 * Don't do anything with these messages, just allow 4378 * them to be freed. 4379 */ 4380 4381 rv = 0; 4382 } else if (!IS_ERR(recv_msg)) { 4383 /* 4384 * OEM Messages are expected to be delivered via 4385 * the system interface to SMS software. We might 4386 * need to visit this again depending on OEM 4387 * requirements 4388 */ 4389 smi_addr = ((struct ipmi_system_interface_addr *) 4390 &recv_msg->addr); 4391 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4392 smi_addr->channel = IPMI_BMC_CHANNEL; 4393 smi_addr->lun = msg->rsp[0] & 3; 4394 4395 recv_msg->user_msg_data = NULL; 4396 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4397 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4398 recv_msg->msg.cmd = msg->rsp[1]; 4399 recv_msg->msg.data = recv_msg->msg_data; 4400 4401 /* 4402 * The message starts at byte 4 which follows the 4403 * Channel Byte in the "GET MESSAGE" command 4404 */ 4405 recv_msg->msg.data_len = msg->rsp_size - 4; 4406 memcpy(recv_msg->msg_data, &msg->rsp[4], 4407 msg->rsp_size - 4); 4408 if (deliver_response(intf, recv_msg)) 4409 ipmi_inc_stat(intf, unhandled_commands); 4410 else 4411 ipmi_inc_stat(intf, handled_commands); 4412 } else { 4413 /* 4414 * We couldn't allocate memory for the message, so 4415 * requeue it for handling later. 4416 */ 4417 rv = 1; 4418 } 4419 4420 return rv; 4421 } 4422 4423 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4424 struct ipmi_smi_msg *msg) 4425 { 4426 struct ipmi_system_interface_addr *smi_addr; 4427 4428 recv_msg->msgid = 0; 4429 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4430 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4431 smi_addr->channel = IPMI_BMC_CHANNEL; 4432 smi_addr->lun = msg->rsp[0] & 3; 4433 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4434 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4435 recv_msg->msg.cmd = msg->rsp[1]; 4436 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4437 recv_msg->msg.data = recv_msg->msg_data; 4438 recv_msg->msg.data_len = msg->rsp_size - 3; 4439 } 4440 4441 static int handle_read_event_rsp(struct ipmi_smi *intf, 4442 struct ipmi_smi_msg *msg) 4443 { 4444 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4445 struct list_head msgs; 4446 struct ipmi_user *user; 4447 int rv = 0, deliver_count = 0; 4448 4449 if (msg->rsp_size < 19) { 4450 /* Message is too small to be an IPMB event. */ 4451 ipmi_inc_stat(intf, invalid_events); 4452 return 0; 4453 } 4454 4455 if (msg->rsp[2] != 0) { 4456 /* An error getting the event, just ignore it. */ 4457 return 0; 4458 } 4459 4460 INIT_LIST_HEAD(&msgs); 4461 4462 mutex_lock(&intf->events_mutex); 4463 4464 ipmi_inc_stat(intf, events); 4465 4466 /* 4467 * Allocate and fill in one message for every user that is 4468 * getting events. 4469 */ 4470 mutex_lock(&intf->users_mutex); 4471 list_for_each_entry(user, &intf->users, link) { 4472 if (!user->gets_events) 4473 continue; 4474 4475 recv_msg = ipmi_alloc_recv_msg(user); 4476 if (IS_ERR(recv_msg)) { 4477 mutex_unlock(&intf->users_mutex); 4478 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4479 link) { 4480 user = recv_msg->user; 4481 list_del(&recv_msg->link); 4482 ipmi_free_recv_msg(recv_msg); 4483 kref_put(&user->refcount, free_ipmi_user); 4484 } 4485 /* 4486 * We couldn't allocate memory for the 4487 * message, so requeue it for handling 4488 * later. 4489 */ 4490 rv = 1; 4491 goto out; 4492 } 4493 4494 deliver_count++; 4495 4496 copy_event_into_recv_msg(recv_msg, msg); 4497 list_add_tail(&recv_msg->link, &msgs); 4498 } 4499 mutex_unlock(&intf->users_mutex); 4500 4501 if (deliver_count) { 4502 /* Now deliver all the messages. */ 4503 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4504 list_del(&recv_msg->link); 4505 deliver_local_response(intf, recv_msg); 4506 } 4507 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4508 /* 4509 * No one to receive the message, put it in queue if there's 4510 * not already too many things in the queue. 4511 */ 4512 recv_msg = ipmi_alloc_recv_msg(NULL); 4513 if (IS_ERR(recv_msg)) { 4514 /* 4515 * We couldn't allocate memory for the 4516 * message, so requeue it for handling 4517 * later. 4518 */ 4519 rv = 1; 4520 goto out; 4521 } 4522 4523 copy_event_into_recv_msg(recv_msg, msg); 4524 list_add_tail(&recv_msg->link, &intf->waiting_events); 4525 intf->waiting_events_count++; 4526 } else if (!intf->event_msg_printed) { 4527 /* 4528 * There's too many things in the queue, discard this 4529 * message. 4530 */ 4531 dev_warn(intf->si_dev, 4532 "Event queue full, discarding incoming events\n"); 4533 intf->event_msg_printed = 1; 4534 } 4535 4536 out: 4537 mutex_unlock(&intf->events_mutex); 4538 4539 return rv; 4540 } 4541 4542 static int handle_bmc_rsp(struct ipmi_smi *intf, 4543 struct ipmi_smi_msg *msg) 4544 { 4545 struct ipmi_recv_msg *recv_msg; 4546 struct ipmi_system_interface_addr *smi_addr; 4547 4548 recv_msg = msg->recv_msg; 4549 if (recv_msg == NULL) { 4550 dev_warn(intf->si_dev, 4551 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4552 return 0; 4553 } 4554 4555 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4556 recv_msg->msgid = msg->msgid; 4557 smi_addr = ((struct ipmi_system_interface_addr *) 4558 &recv_msg->addr); 4559 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4560 smi_addr->channel = IPMI_BMC_CHANNEL; 4561 smi_addr->lun = msg->rsp[0] & 3; 4562 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4563 recv_msg->msg.cmd = msg->rsp[1]; 4564 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4565 recv_msg->msg.data = recv_msg->msg_data; 4566 recv_msg->msg.data_len = msg->rsp_size - 2; 4567 deliver_local_response(intf, recv_msg); 4568 4569 return 0; 4570 } 4571 4572 /* 4573 * Handle a received message. Return 1 if the message should be requeued, 4574 * 0 if the message should be freed, or -1 if the message should not 4575 * be freed or requeued. 4576 */ 4577 static int handle_one_recv_msg(struct ipmi_smi *intf, 4578 struct ipmi_smi_msg *msg) 4579 { 4580 int requeue = 0; 4581 int chan; 4582 unsigned char cc; 4583 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4584 4585 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); 4586 4587 if (msg->rsp_size < 2) { 4588 /* Message is too small to be correct. */ 4589 dev_warn_ratelimited(intf->si_dev, 4590 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4591 (msg->data[0] >> 2) | 1, 4592 msg->data[1], msg->rsp_size); 4593 4594 return_unspecified: 4595 /* Generate an error response for the message. */ 4596 msg->rsp[0] = msg->data[0] | (1 << 2); 4597 msg->rsp[1] = msg->data[1]; 4598 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4599 msg->rsp_size = 3; 4600 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4601 /* commands must have at least 4 bytes, responses 5. */ 4602 if (is_cmd && (msg->rsp_size < 4)) { 4603 ipmi_inc_stat(intf, invalid_commands); 4604 goto out; 4605 } 4606 if (!is_cmd && (msg->rsp_size < 5)) { 4607 ipmi_inc_stat(intf, invalid_ipmb_responses); 4608 /* Construct a valid error response. */ 4609 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4610 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4611 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4612 msg->rsp[1] = msg->data[1]; /* Addr */ 4613 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4614 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4615 msg->rsp[3] = msg->data[3]; /* Cmd */ 4616 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4617 msg->rsp_size = 5; 4618 } 4619 } else if ((msg->data_size >= 2) 4620 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4621 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4622 && (msg->recv_msg == NULL)) { 4623 4624 if (intf->in_shutdown || intf->run_to_completion) 4625 goto out; 4626 4627 /* 4628 * This is the local response to a command send, start 4629 * the timer for these. The recv_msg will not be 4630 * NULL if this is a response send, and we will let 4631 * response sends just go through. 4632 */ 4633 4634 /* 4635 * Check for errors, if we get certain errors (ones 4636 * that mean basically we can try again later), we 4637 * ignore them and start the timer. Otherwise we 4638 * report the error immediately. 4639 */ 4640 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4641 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4642 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4643 && (msg->rsp[2] != IPMI_BUS_ERR) 4644 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4645 int ch = msg->rsp[3] & 0xf; 4646 struct ipmi_channel *chans; 4647 4648 /* Got an error sending the message, handle it. */ 4649 4650 chans = READ_ONCE(intf->channel_list)->c; 4651 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4652 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4653 ipmi_inc_stat(intf, sent_lan_command_errs); 4654 else 4655 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4656 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4657 } else 4658 /* The message was sent, start the timer. */ 4659 intf_start_seq_timer(intf, msg->msgid); 4660 requeue = 0; 4661 goto out; 4662 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4663 || (msg->rsp[1] != msg->data[1])) { 4664 /* 4665 * The NetFN and Command in the response is not even 4666 * marginally correct. 4667 */ 4668 dev_warn_ratelimited(intf->si_dev, 4669 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4670 (msg->data[0] >> 2) | 1, msg->data[1], 4671 msg->rsp[0] >> 2, msg->rsp[1]); 4672 4673 goto return_unspecified; 4674 } 4675 4676 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4677 if ((msg->data[0] >> 2) & 1) { 4678 /* It's a response to a sent response. */ 4679 chan = 0; 4680 cc = msg->rsp[4]; 4681 goto process_response_response; 4682 } 4683 if (is_cmd) 4684 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4685 else 4686 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4687 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4688 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4689 && (msg->recv_msg != NULL)) { 4690 /* 4691 * It's a response to a response we sent. For this we 4692 * deliver a send message response to the user. 4693 */ 4694 struct ipmi_recv_msg *recv_msg; 4695 4696 if (intf->run_to_completion) 4697 goto out; 4698 4699 chan = msg->data[2] & 0x0f; 4700 if (chan >= IPMI_MAX_CHANNELS) 4701 /* Invalid channel number */ 4702 goto out; 4703 cc = msg->rsp[2]; 4704 4705 process_response_response: 4706 recv_msg = msg->recv_msg; 4707 4708 requeue = 0; 4709 if (!recv_msg) 4710 goto out; 4711 4712 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4713 recv_msg->msg.data = recv_msg->msg_data; 4714 recv_msg->msg_data[0] = cc; 4715 recv_msg->msg.data_len = 1; 4716 deliver_local_response(intf, recv_msg); 4717 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4718 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4719 struct ipmi_channel *chans; 4720 4721 if (intf->run_to_completion) 4722 goto out; 4723 4724 /* It's from the receive queue. */ 4725 chan = msg->rsp[3] & 0xf; 4726 if (chan >= IPMI_MAX_CHANNELS) { 4727 /* Invalid channel number */ 4728 requeue = 0; 4729 goto out; 4730 } 4731 4732 /* 4733 * We need to make sure the channels have been initialized. 4734 * The channel_handler routine will set the "curr_channel" 4735 * equal to or greater than IPMI_MAX_CHANNELS when all the 4736 * channels for this interface have been initialized. 4737 */ 4738 if (!intf->channels_ready) { 4739 requeue = 0; /* Throw the message away */ 4740 goto out; 4741 } 4742 4743 chans = READ_ONCE(intf->channel_list)->c; 4744 4745 switch (chans[chan].medium) { 4746 case IPMI_CHANNEL_MEDIUM_IPMB: 4747 if (msg->rsp[4] & 0x04) { 4748 /* 4749 * It's a response, so find the 4750 * requesting message and send it up. 4751 */ 4752 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4753 } else { 4754 /* 4755 * It's a command to the SMS from some other 4756 * entity. Handle that. 4757 */ 4758 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4759 } 4760 break; 4761 4762 case IPMI_CHANNEL_MEDIUM_8023LAN: 4763 case IPMI_CHANNEL_MEDIUM_ASYNC: 4764 if (msg->rsp[6] & 0x04) { 4765 /* 4766 * It's a response, so find the 4767 * requesting message and send it up. 4768 */ 4769 requeue = handle_lan_get_msg_rsp(intf, msg); 4770 } else { 4771 /* 4772 * It's a command to the SMS from some other 4773 * entity. Handle that. 4774 */ 4775 requeue = handle_lan_get_msg_cmd(intf, msg); 4776 } 4777 break; 4778 4779 default: 4780 /* Check for OEM Channels. Clients had better 4781 register for these commands. */ 4782 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4783 && (chans[chan].medium 4784 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4785 requeue = handle_oem_get_msg_cmd(intf, msg); 4786 } else { 4787 /* 4788 * We don't handle the channel type, so just 4789 * free the message. 4790 */ 4791 requeue = 0; 4792 } 4793 } 4794 4795 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4796 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4797 /* It's an asynchronous event. */ 4798 if (intf->run_to_completion) 4799 goto out; 4800 4801 requeue = handle_read_event_rsp(intf, msg); 4802 } else { 4803 /* It's a response from the local BMC. */ 4804 requeue = handle_bmc_rsp(intf, msg); 4805 } 4806 4807 out: 4808 return requeue; 4809 } 4810 4811 /* 4812 * If there are messages in the queue or pretimeouts, handle them. 4813 */ 4814 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4815 { 4816 struct ipmi_smi_msg *smi_msg; 4817 unsigned long flags = 0; 4818 int rv; 4819 int run_to_completion = READ_ONCE(intf->run_to_completion); 4820 4821 /* See if any waiting messages need to be processed. */ 4822 if (!run_to_completion) 4823 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4824 while (!list_empty(&intf->waiting_rcv_msgs)) { 4825 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4826 struct ipmi_smi_msg, link); 4827 list_del(&smi_msg->link); 4828 if (!run_to_completion) 4829 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4830 flags); 4831 rv = handle_one_recv_msg(intf, smi_msg); 4832 if (!run_to_completion) 4833 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4834 if (rv > 0) { 4835 /* 4836 * To preserve message order, quit if we 4837 * can't handle a message. Add the message 4838 * back at the head, this is safe because this 4839 * workqueue is the only thing that pulls the 4840 * messages. 4841 */ 4842 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4843 break; 4844 } else { 4845 if (rv == 0) 4846 /* Message handled */ 4847 ipmi_free_smi_msg(smi_msg); 4848 /* If rv < 0, fatal error, del but don't free. */ 4849 } 4850 } 4851 if (!run_to_completion) 4852 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4853 } 4854 4855 static void smi_work(struct work_struct *t) 4856 { 4857 unsigned long flags = 0; /* keep us warning-free. */ 4858 struct ipmi_smi *intf = from_work(intf, t, smi_work); 4859 int run_to_completion = READ_ONCE(intf->run_to_completion); 4860 struct ipmi_smi_msg *newmsg = NULL; 4861 struct ipmi_recv_msg *msg, *msg2; 4862 int cc; 4863 4864 /* 4865 * Start the next message if available. 4866 * 4867 * Do this here, not in the actual receiver, because we may deadlock 4868 * because the lower layer is allowed to hold locks while calling 4869 * message delivery. 4870 */ 4871 restart: 4872 ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 4873 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4874 struct list_head *entry = NULL; 4875 4876 /* Pick the high priority queue first. */ 4877 if (!list_empty(&intf->hp_xmit_msgs)) 4878 entry = intf->hp_xmit_msgs.next; 4879 else if (!list_empty(&intf->xmit_msgs)) 4880 entry = intf->xmit_msgs.next; 4881 4882 if (entry) { 4883 list_del(entry); 4884 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4885 intf->curr_msg = newmsg; 4886 } 4887 } 4888 ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 4889 4890 if (newmsg) { 4891 cc = intf->handlers->sender(intf->send_info, newmsg); 4892 if (cc) { 4893 if (newmsg->recv_msg) 4894 deliver_err_response(intf, 4895 newmsg->recv_msg, cc); 4896 ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 4897 intf->curr_msg = NULL; 4898 ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 4899 ipmi_free_smi_msg(newmsg); 4900 newmsg = NULL; 4901 goto restart; 4902 } 4903 } 4904 4905 handle_new_recv_msgs(intf); 4906 4907 /* Nothing below applies during panic time. */ 4908 if (run_to_completion) 4909 return; 4910 4911 /* 4912 * If the pretimout count is non-zero, decrement one from it and 4913 * deliver pretimeouts to all the users. 4914 */ 4915 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4916 struct ipmi_user *user; 4917 4918 mutex_lock(&intf->users_mutex); 4919 list_for_each_entry(user, &intf->users, link) { 4920 if (user->handler->ipmi_watchdog_pretimeout) 4921 user->handler->ipmi_watchdog_pretimeout( 4922 user->handler_data); 4923 } 4924 mutex_unlock(&intf->users_mutex); 4925 } 4926 4927 /* 4928 * Freeing the message can cause a user to be released, which 4929 * can then cause the interface to be freed. Make sure that 4930 * doesn't happen until we are ready. 4931 */ 4932 kref_get(&intf->refcount); 4933 4934 mutex_lock(&intf->user_msgs_mutex); 4935 list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { 4936 struct ipmi_user *user = msg->user; 4937 4938 list_del(&msg->link); 4939 4940 if (refcount_read(&user->destroyed) == 0) 4941 ipmi_free_recv_msg(msg); 4942 else 4943 user->handler->ipmi_recv_hndl(msg, user->handler_data); 4944 } 4945 mutex_unlock(&intf->user_msgs_mutex); 4946 4947 kref_put(&intf->refcount, intf_free); 4948 } 4949 4950 /* Handle a new message from the lower layer. */ 4951 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4952 struct ipmi_smi_msg *msg) 4953 { 4954 unsigned long flags = 0; /* keep us warning-free. */ 4955 int run_to_completion = READ_ONCE(intf->run_to_completion); 4956 4957 /* 4958 * To preserve message order, we keep a queue and deliver from 4959 * a workqueue. 4960 */ 4961 if (!run_to_completion) 4962 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4963 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4964 if (!run_to_completion) 4965 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4966 flags); 4967 4968 ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 4969 /* 4970 * We can get an asynchronous event or receive message in addition 4971 * to commands we send. 4972 */ 4973 if (msg == intf->curr_msg) 4974 intf->curr_msg = NULL; 4975 ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 4976 4977 if (run_to_completion) 4978 smi_work(&intf->smi_work); 4979 else 4980 queue_work(system_wq, &intf->smi_work); 4981 } 4982 EXPORT_SYMBOL(ipmi_smi_msg_received); 4983 4984 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4985 { 4986 if (intf->in_shutdown) 4987 return; 4988 4989 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4990 queue_work(system_wq, &intf->smi_work); 4991 } 4992 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4993 4994 static struct ipmi_smi_msg * 4995 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4996 unsigned char seq, long seqid) 4997 { 4998 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4999 if (!smi_msg) 5000 /* 5001 * If we can't allocate the message, then just return, we 5002 * get 4 retries, so this should be ok. 5003 */ 5004 return NULL; 5005 5006 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 5007 smi_msg->data_size = recv_msg->msg.data_len; 5008 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 5009 5010 dev_dbg(intf->si_dev, "Resend: %*ph\n", 5011 smi_msg->data_size, smi_msg->data); 5012 5013 return smi_msg; 5014 } 5015 5016 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 5017 struct list_head *timeouts, 5018 unsigned long timeout_period, 5019 int slot, bool *need_timer) 5020 { 5021 struct ipmi_recv_msg *msg; 5022 5023 if (intf->in_shutdown) 5024 return; 5025 5026 if (!ent->inuse) 5027 return; 5028 5029 if (timeout_period < ent->timeout) { 5030 ent->timeout -= timeout_period; 5031 *need_timer = true; 5032 return; 5033 } 5034 5035 if (ent->retries_left == 0) { 5036 /* The message has used all its retries. */ 5037 ent->inuse = 0; 5038 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 5039 msg = ent->recv_msg; 5040 list_add_tail(&msg->link, timeouts); 5041 if (ent->broadcast) 5042 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 5043 else if (is_lan_addr(&ent->recv_msg->addr)) 5044 ipmi_inc_stat(intf, timed_out_lan_commands); 5045 else 5046 ipmi_inc_stat(intf, timed_out_ipmb_commands); 5047 } else { 5048 struct ipmi_smi_msg *smi_msg; 5049 /* More retries, send again. */ 5050 5051 *need_timer = true; 5052 5053 /* 5054 * Start with the max timer, set to normal timer after 5055 * the message is sent. 5056 */ 5057 ent->timeout = MAX_MSG_TIMEOUT; 5058 ent->retries_left--; 5059 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 5060 ent->seqid); 5061 if (!smi_msg) { 5062 if (is_lan_addr(&ent->recv_msg->addr)) 5063 ipmi_inc_stat(intf, 5064 dropped_rexmit_lan_commands); 5065 else 5066 ipmi_inc_stat(intf, 5067 dropped_rexmit_ipmb_commands); 5068 return; 5069 } 5070 5071 mutex_unlock(&intf->seq_lock); 5072 5073 /* 5074 * Send the new message. We send with a zero 5075 * priority. It timed out, I doubt time is that 5076 * critical now, and high priority messages are really 5077 * only for messages to the local MC, which don't get 5078 * resent. 5079 */ 5080 if (intf->handlers) { 5081 if (is_lan_addr(&ent->recv_msg->addr)) 5082 ipmi_inc_stat(intf, 5083 retransmitted_lan_commands); 5084 else 5085 ipmi_inc_stat(intf, 5086 retransmitted_ipmb_commands); 5087 5088 /* If this fails we'll retry later or timeout. */ 5089 if (smi_send(intf, intf->handlers, smi_msg, 0) != IPMI_CC_NO_ERROR) { 5090 /* But fix the timeout. */ 5091 intf_start_seq_timer(intf, smi_msg->msgid); 5092 ipmi_free_smi_msg(smi_msg); 5093 } 5094 } else 5095 ipmi_free_smi_msg(smi_msg); 5096 5097 mutex_lock(&intf->seq_lock); 5098 } 5099 } 5100 5101 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 5102 unsigned long timeout_period) 5103 { 5104 struct list_head timeouts; 5105 struct ipmi_recv_msg *msg, *msg2; 5106 unsigned long flags; 5107 int i; 5108 bool need_timer = false; 5109 5110 if (!intf->bmc_registered) { 5111 kref_get(&intf->refcount); 5112 if (!schedule_work(&intf->bmc_reg_work)) { 5113 kref_put(&intf->refcount, intf_free); 5114 need_timer = true; 5115 } 5116 } 5117 5118 /* 5119 * Go through the seq table and find any messages that 5120 * have timed out, putting them in the timeouts 5121 * list. 5122 */ 5123 INIT_LIST_HEAD(&timeouts); 5124 mutex_lock(&intf->seq_lock); 5125 if (intf->ipmb_maintenance_mode_timeout) { 5126 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 5127 intf->ipmb_maintenance_mode_timeout = 0; 5128 else 5129 intf->ipmb_maintenance_mode_timeout -= timeout_period; 5130 } 5131 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 5132 check_msg_timeout(intf, &intf->seq_table[i], 5133 &timeouts, timeout_period, i, 5134 &need_timer); 5135 mutex_unlock(&intf->seq_lock); 5136 5137 list_for_each_entry_safe(msg, msg2, &timeouts, link) 5138 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 5139 5140 /* 5141 * Maintenance mode handling. Check the timeout 5142 * optimistically before we claim the lock. It may 5143 * mean a timeout gets missed occasionally, but that 5144 * only means the timeout gets extended by one period 5145 * in that case. No big deal, and it avoids the lock 5146 * most of the time. 5147 */ 5148 if (intf->auto_maintenance_timeout > 0) { 5149 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 5150 if (intf->auto_maintenance_timeout > 0) { 5151 intf->auto_maintenance_timeout 5152 -= timeout_period; 5153 if (!intf->maintenance_mode 5154 && (intf->auto_maintenance_timeout <= 0)) { 5155 intf->maintenance_mode_state = 5156 IPMI_MAINTENANCE_MODE_STATE_OFF; 5157 intf->auto_maintenance_timeout = 0; 5158 maintenance_mode_update(intf); 5159 } 5160 } 5161 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 5162 flags); 5163 } 5164 5165 queue_work(system_wq, &intf->smi_work); 5166 5167 return need_timer; 5168 } 5169 5170 static void ipmi_request_event(struct ipmi_smi *intf) 5171 { 5172 /* No event requests when in maintenance mode. */ 5173 if (intf->maintenance_mode_state) 5174 return; 5175 5176 if (!intf->in_shutdown) 5177 intf->handlers->request_events(intf->send_info); 5178 } 5179 5180 static atomic_t stop_operation; 5181 5182 static void ipmi_timeout_work(struct work_struct *work) 5183 { 5184 if (atomic_read(&stop_operation)) 5185 return; 5186 5187 struct ipmi_smi *intf; 5188 bool need_timer = false; 5189 5190 if (atomic_read(&stop_operation)) 5191 return; 5192 5193 mutex_lock(&ipmi_interfaces_mutex); 5194 list_for_each_entry(intf, &ipmi_interfaces, link) { 5195 if (atomic_read(&intf->event_waiters)) { 5196 intf->ticks_to_req_ev--; 5197 if (intf->ticks_to_req_ev == 0) { 5198 ipmi_request_event(intf); 5199 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5200 } 5201 need_timer = true; 5202 } 5203 if (intf->maintenance_mode_state) 5204 need_timer = true; 5205 5206 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5207 } 5208 mutex_unlock(&ipmi_interfaces_mutex); 5209 5210 if (need_timer) 5211 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5212 } 5213 5214 static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work); 5215 5216 static void ipmi_timeout(struct timer_list *unused) 5217 { 5218 if (atomic_read(&stop_operation)) 5219 return; 5220 5221 queue_work(system_wq, &ipmi_timer_work); 5222 } 5223 5224 static void need_waiter(struct ipmi_smi *intf) 5225 { 5226 /* Racy, but worst case we start the timer twice. */ 5227 if (!timer_pending(&ipmi_timer)) 5228 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5229 } 5230 5231 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5232 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5233 5234 static void free_smi_msg(struct ipmi_smi_msg *msg) 5235 { 5236 atomic_dec(&smi_msg_inuse_count); 5237 /* Try to keep as much stuff out of the panic path as possible. */ 5238 if (!oops_in_progress) 5239 kfree(msg); 5240 } 5241 5242 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5243 { 5244 struct ipmi_smi_msg *rv; 5245 rv = kmalloc_obj(struct ipmi_smi_msg, GFP_ATOMIC); 5246 if (rv) { 5247 rv->done = free_smi_msg; 5248 rv->recv_msg = NULL; 5249 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5250 atomic_inc(&smi_msg_inuse_count); 5251 } 5252 return rv; 5253 } 5254 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5255 5256 static void free_recv_msg(struct ipmi_recv_msg *msg) 5257 { 5258 atomic_dec(&recv_msg_inuse_count); 5259 /* Try to keep as much stuff out of the panic path as possible. */ 5260 if (!oops_in_progress) 5261 kfree(msg); 5262 } 5263 5264 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user) 5265 { 5266 struct ipmi_recv_msg *rv; 5267 5268 if (user) { 5269 if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { 5270 atomic_dec(&user->nr_msgs); 5271 return ERR_PTR(-EBUSY); 5272 } 5273 } 5274 5275 rv = kmalloc_obj(struct ipmi_recv_msg, GFP_ATOMIC); 5276 if (!rv) { 5277 if (user) 5278 atomic_dec(&user->nr_msgs); 5279 return ERR_PTR(-ENOMEM); 5280 } 5281 5282 rv->user = user; 5283 rv->done = free_recv_msg; 5284 if (user) 5285 kref_get(&user->refcount); 5286 atomic_inc(&recv_msg_inuse_count); 5287 return rv; 5288 } 5289 5290 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5291 { 5292 if (msg->user && !oops_in_progress) { 5293 atomic_dec(&msg->user->nr_msgs); 5294 kref_put(&msg->user->refcount, free_ipmi_user); 5295 } 5296 msg->done(msg); 5297 } 5298 EXPORT_SYMBOL(ipmi_free_recv_msg); 5299 5300 static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, 5301 struct ipmi_user *user) 5302 { 5303 WARN_ON_ONCE(msg->user); /* User should not be set. */ 5304 msg->user = user; 5305 atomic_inc(&user->nr_msgs); 5306 kref_get(&user->refcount); 5307 } 5308 5309 static atomic_t panic_done_count = ATOMIC_INIT(0); 5310 5311 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5312 { 5313 atomic_dec(&panic_done_count); 5314 } 5315 5316 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5317 { 5318 atomic_dec(&panic_done_count); 5319 } 5320 5321 /* 5322 * Inside a panic, send a message and wait for a response. 5323 */ 5324 static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5325 struct ipmi_addr *addr, 5326 struct kernel_ipmi_msg *msg) 5327 { 5328 struct ipmi_smi_msg smi_msg; 5329 struct ipmi_recv_msg recv_msg; 5330 int rv; 5331 5332 smi_msg.done = dummy_smi_done_handler; 5333 recv_msg.done = dummy_recv_done_handler; 5334 atomic_add(2, &panic_done_count); 5335 rv = i_ipmi_request(NULL, 5336 intf, 5337 addr, 5338 0, 5339 msg, 5340 intf, 5341 &smi_msg, 5342 &recv_msg, 5343 0, 5344 intf->addrinfo[0].address, 5345 intf->addrinfo[0].lun, 5346 0, 1); /* Don't retry, and don't wait. */ 5347 if (rv) 5348 atomic_sub(2, &panic_done_count); 5349 else if (intf->handlers->flush_messages) 5350 intf->handlers->flush_messages(intf->send_info); 5351 5352 while (atomic_read(&panic_done_count) != 0) 5353 ipmi_poll(intf); 5354 } 5355 5356 void ipmi_panic_request_and_wait(struct ipmi_user *user, 5357 struct ipmi_addr *addr, 5358 struct kernel_ipmi_msg *msg) 5359 { 5360 user->intf->run_to_completion = 1; 5361 _ipmi_panic_request_and_wait(user->intf, addr, msg); 5362 } 5363 EXPORT_SYMBOL(ipmi_panic_request_and_wait); 5364 5365 static void event_receiver_fetcher(struct ipmi_smi *intf, 5366 struct ipmi_recv_msg *msg) 5367 { 5368 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5369 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5370 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5371 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5372 /* A get event receiver command, save it. */ 5373 intf->event_receiver = msg->msg.data[1]; 5374 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5375 } 5376 } 5377 5378 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5379 { 5380 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5381 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5382 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5383 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5384 /* 5385 * A get device id command, save if we are an event 5386 * receiver or generator. 5387 */ 5388 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5389 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5390 } 5391 } 5392 5393 static void send_panic_events(struct ipmi_smi *intf, char *str) 5394 { 5395 struct kernel_ipmi_msg msg; 5396 unsigned char data[16]; 5397 struct ipmi_system_interface_addr *si; 5398 struct ipmi_addr addr; 5399 char *p = str; 5400 struct ipmi_ipmb_addr *ipmb; 5401 int j; 5402 5403 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5404 return; 5405 5406 si = (struct ipmi_system_interface_addr *) &addr; 5407 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5408 si->channel = IPMI_BMC_CHANNEL; 5409 si->lun = 0; 5410 5411 /* Fill in an event telling that we have failed. */ 5412 msg.netfn = 0x04; /* Sensor or Event. */ 5413 msg.cmd = 2; /* Platform event command. */ 5414 msg.data = data; 5415 msg.data_len = 8; 5416 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5417 data[1] = 0x03; /* This is for IPMI 1.0. */ 5418 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5419 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5420 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5421 5422 /* 5423 * Put a few breadcrumbs in. Hopefully later we can add more things 5424 * to make the panic events more useful. 5425 */ 5426 if (str) { 5427 data[3] = str[0]; 5428 data[6] = str[1]; 5429 data[7] = str[2]; 5430 } 5431 5432 /* Send the event announcing the panic. */ 5433 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5434 5435 /* 5436 * On every interface, dump a bunch of OEM event holding the 5437 * string. 5438 */ 5439 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5440 return; 5441 5442 /* 5443 * intf_num is used as an marker to tell if the 5444 * interface is valid. Thus we need a read barrier to 5445 * make sure data fetched before checking intf_num 5446 * won't be used. 5447 */ 5448 smp_rmb(); 5449 5450 /* 5451 * First job here is to figure out where to send the 5452 * OEM events. There's no way in IPMI to send OEM 5453 * events using an event send command, so we have to 5454 * find the SEL to put them in and stick them in 5455 * there. 5456 */ 5457 5458 /* Get capabilities from the get device id. */ 5459 intf->local_sel_device = 0; 5460 intf->local_event_generator = 0; 5461 intf->event_receiver = 0; 5462 5463 /* Request the device info from the local MC. */ 5464 msg.netfn = IPMI_NETFN_APP_REQUEST; 5465 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5466 msg.data = NULL; 5467 msg.data_len = 0; 5468 intf->null_user_handler = device_id_fetcher; 5469 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5470 5471 if (intf->local_event_generator) { 5472 /* Request the event receiver from the local MC. */ 5473 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5474 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5475 msg.data = NULL; 5476 msg.data_len = 0; 5477 intf->null_user_handler = event_receiver_fetcher; 5478 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5479 } 5480 intf->null_user_handler = NULL; 5481 5482 /* 5483 * Validate the event receiver. The low bit must not 5484 * be 1 (it must be a valid IPMB address), it cannot 5485 * be zero, and it must not be my address. 5486 */ 5487 if (((intf->event_receiver & 1) == 0) 5488 && (intf->event_receiver != 0) 5489 && (intf->event_receiver != intf->addrinfo[0].address)) { 5490 /* 5491 * The event receiver is valid, send an IPMB 5492 * message. 5493 */ 5494 ipmb = (struct ipmi_ipmb_addr *) &addr; 5495 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5496 ipmb->channel = 0; /* FIXME - is this right? */ 5497 ipmb->lun = intf->event_receiver_lun; 5498 ipmb->slave_addr = intf->event_receiver; 5499 } else if (intf->local_sel_device) { 5500 /* 5501 * The event receiver was not valid (or was 5502 * me), but I am an SEL device, just dump it 5503 * in my SEL. 5504 */ 5505 si = (struct ipmi_system_interface_addr *) &addr; 5506 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5507 si->channel = IPMI_BMC_CHANNEL; 5508 si->lun = 0; 5509 } else 5510 return; /* No where to send the event. */ 5511 5512 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5513 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5514 msg.data = data; 5515 msg.data_len = 16; 5516 5517 j = 0; 5518 while (*p) { 5519 int size = strnlen(p, 11); 5520 5521 data[0] = 0; 5522 data[1] = 0; 5523 data[2] = 0xf0; /* OEM event without timestamp. */ 5524 data[3] = intf->addrinfo[0].address; 5525 data[4] = j++; /* sequence # */ 5526 5527 memcpy_and_pad(data+5, 11, p, size, '\0'); 5528 p += size; 5529 5530 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5531 } 5532 } 5533 5534 static int has_panicked; 5535 5536 static int panic_event(struct notifier_block *this, 5537 unsigned long event, 5538 void *ptr) 5539 { 5540 struct ipmi_smi *intf; 5541 struct ipmi_user *user; 5542 5543 if (has_panicked) 5544 return NOTIFY_DONE; 5545 has_panicked = 1; 5546 5547 /* For every registered interface, set it to run to completion. */ 5548 list_for_each_entry(intf, &ipmi_interfaces, link) { 5549 if (!intf->handlers || intf->intf_num == -1) 5550 /* Interface is not ready. */ 5551 continue; 5552 5553 if (!intf->handlers->poll) 5554 continue; 5555 5556 /* 5557 * If we were interrupted while locking xmit_msgs_lock or 5558 * waiting_rcv_msgs_lock, the corresponding list may be 5559 * corrupted. In this case, drop items on the list for 5560 * the safety. 5561 */ 5562 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5563 INIT_LIST_HEAD(&intf->xmit_msgs); 5564 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5565 } else 5566 spin_unlock(&intf->xmit_msgs_lock); 5567 5568 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5569 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5570 else 5571 spin_unlock(&intf->waiting_rcv_msgs_lock); 5572 5573 intf->run_to_completion = 1; 5574 if (intf->handlers->set_run_to_completion) 5575 intf->handlers->set_run_to_completion(intf->send_info, 5576 1); 5577 5578 list_for_each_entry(user, &intf->users, link) { 5579 if (user->handler->ipmi_panic_handler) 5580 user->handler->ipmi_panic_handler( 5581 user->handler_data); 5582 } 5583 5584 send_panic_events(intf, ptr); 5585 } 5586 5587 return NOTIFY_DONE; 5588 } 5589 5590 /* Must be called with ipmi_interfaces_mutex held. */ 5591 static int ipmi_register_driver(void) 5592 { 5593 int rv; 5594 5595 if (drvregistered) 5596 return 0; 5597 5598 rv = driver_register(&ipmidriver.driver); 5599 if (rv) 5600 pr_err("Could not register IPMI driver\n"); 5601 else 5602 drvregistered = true; 5603 return rv; 5604 } 5605 5606 static struct notifier_block panic_block = { 5607 .notifier_call = panic_event, 5608 .next = NULL, 5609 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5610 }; 5611 5612 static int ipmi_init_msghandler(void) 5613 { 5614 int rv; 5615 5616 mutex_lock(&ipmi_interfaces_mutex); 5617 rv = ipmi_register_driver(); 5618 if (rv) 5619 goto out; 5620 if (initialized) 5621 goto out; 5622 5623 bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5624 if (!bmc_remove_work_wq) { 5625 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5626 rv = -ENOMEM; 5627 goto out; 5628 } 5629 5630 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5631 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5632 5633 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5634 5635 initialized = true; 5636 5637 out: 5638 mutex_unlock(&ipmi_interfaces_mutex); 5639 return rv; 5640 } 5641 5642 static int __init ipmi_init_msghandler_mod(void) 5643 { 5644 int rv; 5645 5646 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5647 5648 mutex_lock(&ipmi_interfaces_mutex); 5649 rv = ipmi_register_driver(); 5650 mutex_unlock(&ipmi_interfaces_mutex); 5651 5652 return rv; 5653 } 5654 5655 static void __exit cleanup_ipmi(void) 5656 { 5657 int count; 5658 5659 if (initialized) { 5660 destroy_workqueue(bmc_remove_work_wq); 5661 5662 atomic_notifier_chain_unregister(&panic_notifier_list, 5663 &panic_block); 5664 5665 /* 5666 * This can't be called if any interfaces exist, so no worry 5667 * about shutting down the interfaces. 5668 */ 5669 5670 /* 5671 * Tell the timer to stop, then wait for it to stop. This 5672 * avoids problems with race conditions removing the timer 5673 * here. 5674 */ 5675 atomic_set(&stop_operation, 1); 5676 timer_delete_sync(&ipmi_timer); 5677 cancel_work_sync(&ipmi_timer_work); 5678 5679 initialized = false; 5680 5681 /* Check for buffer leaks. */ 5682 count = atomic_read(&smi_msg_inuse_count); 5683 if (count != 0) 5684 pr_warn("SMI message count %d at exit\n", count); 5685 count = atomic_read(&recv_msg_inuse_count); 5686 if (count != 0) 5687 pr_warn("recv message count %d at exit\n", count); 5688 } 5689 if (drvregistered) 5690 driver_unregister(&ipmidriver.driver); 5691 } 5692 module_exit(cleanup_ipmi); 5693 5694 module_init(ipmi_init_msghandler_mod); 5695 MODULE_LICENSE("GPL"); 5696 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5697 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5698 MODULE_VERSION(IPMI_DRIVER_VERSION); 5699 MODULE_SOFTDEP("post: ipmi_devintf"); 5700