1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "IPMI message handler: " fmt 15 #define dev_fmt(fmt) pr_fmt(fmt) 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/rcupdate.h> 31 #include <linux/interrupt.h> 32 #include <linux/moduleparam.h> 33 #include <linux/workqueue.h> 34 #include <linux/uuid.h> 35 #include <linux/nospec.h> 36 #include <linux/vmalloc.h> 37 #include <linux/delay.h> 38 39 #define IPMI_DRIVER_VERSION "39.2" 40 41 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user); 42 static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, 43 struct ipmi_user *user); 44 static int ipmi_init_msghandler(void); 45 static void smi_work(struct work_struct *t); 46 static void handle_new_recv_msgs(struct ipmi_smi *intf); 47 static void need_waiter(struct ipmi_smi *intf); 48 static int handle_one_recv_msg(struct ipmi_smi *intf, 49 struct ipmi_smi_msg *msg); 50 static void intf_free(struct kref *ref); 51 52 static bool initialized; 53 static bool drvregistered; 54 55 static struct timer_list ipmi_timer; 56 57 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 58 enum ipmi_panic_event_op { 59 IPMI_SEND_PANIC_EVENT_NONE, 60 IPMI_SEND_PANIC_EVENT, 61 IPMI_SEND_PANIC_EVENT_STRING, 62 IPMI_SEND_PANIC_EVENT_MAX 63 }; 64 65 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 66 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 67 68 #ifdef CONFIG_IPMI_PANIC_STRING 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 70 #elif defined(CONFIG_IPMI_PANIC_EVENT) 71 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 72 #else 73 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 74 #endif 75 76 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 77 78 static int panic_op_write_handler(const char *val, 79 const struct kernel_param *kp) 80 { 81 char valcp[16]; 82 int e; 83 84 strscpy(valcp, val, sizeof(valcp)); 85 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 86 if (e < 0) 87 return e; 88 89 ipmi_send_panic_event = e; 90 return 0; 91 } 92 93 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 94 { 95 const char *event_str; 96 97 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 98 event_str = "???"; 99 else 100 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 101 102 return sprintf(buffer, "%s\n", event_str); 103 } 104 105 static const struct kernel_param_ops panic_op_ops = { 106 .set = panic_op_write_handler, 107 .get = panic_op_read_handler 108 }; 109 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 110 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 111 112 113 #define MAX_EVENTS_IN_QUEUE 25 114 115 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 116 static unsigned long maintenance_mode_timeout_ms = 30000; 117 module_param(maintenance_mode_timeout_ms, ulong, 0644); 118 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 119 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 120 121 /* 122 * Don't let a message sit in a queue forever, always time it with at lest 123 * the max message timer. This is in milliseconds. 124 */ 125 #define MAX_MSG_TIMEOUT 60000 126 127 /* 128 * Timeout times below are in milliseconds, and are done off a 1 129 * second timer. So setting the value to 1000 would mean anything 130 * between 0 and 1000ms. So really the only reasonable minimum 131 * setting it 2000ms, which is between 1 and 2 seconds. 132 */ 133 134 /* The default timeout for message retries. */ 135 static unsigned long default_retry_ms = 2000; 136 module_param(default_retry_ms, ulong, 0644); 137 MODULE_PARM_DESC(default_retry_ms, 138 "The time (milliseconds) between retry sends"); 139 140 /* The default timeout for maintenance mode message retries. */ 141 static unsigned long default_maintenance_retry_ms = 3000; 142 module_param(default_maintenance_retry_ms, ulong, 0644); 143 MODULE_PARM_DESC(default_maintenance_retry_ms, 144 "The time (milliseconds) between retry sends in maintenance mode"); 145 146 /* The default maximum number of retries */ 147 static unsigned int default_max_retries = 4; 148 module_param(default_max_retries, uint, 0644); 149 MODULE_PARM_DESC(default_max_retries, 150 "The time (milliseconds) between retry sends in maintenance mode"); 151 152 /* The default maximum number of users that may register. */ 153 static unsigned int max_users = 30; 154 module_param(max_users, uint, 0644); 155 MODULE_PARM_DESC(max_users, 156 "The most users that may use the IPMI stack at one time."); 157 158 /* The default maximum number of message a user may have outstanding. */ 159 static unsigned int max_msgs_per_user = 100; 160 module_param(max_msgs_per_user, uint, 0644); 161 MODULE_PARM_DESC(max_msgs_per_user, 162 "The most message a user may have outstanding."); 163 164 /* Call every ~1000 ms. */ 165 #define IPMI_TIMEOUT_TIME 1000 166 167 /* How many jiffies does it take to get to the timeout time. */ 168 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 169 170 /* 171 * Request events from the queue every second (this is the number of 172 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 173 * future, IPMI will add a way to know immediately if an event is in 174 * the queue and this silliness can go away. 175 */ 176 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 177 178 /* How long should we cache dynamic device IDs? */ 179 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 180 181 /* 182 * The main "user" data structure. 183 */ 184 struct ipmi_user { 185 struct list_head link; 186 187 struct kref refcount; 188 refcount_t destroyed; 189 190 /* The upper layer that handles receive messages. */ 191 const struct ipmi_user_hndl *handler; 192 void *handler_data; 193 194 /* The interface this user is bound to. */ 195 struct ipmi_smi *intf; 196 197 /* Does this interface receive IPMI events? */ 198 bool gets_events; 199 200 atomic_t nr_msgs; 201 }; 202 203 struct cmd_rcvr { 204 struct list_head link; 205 206 struct ipmi_user *user; 207 unsigned char netfn; 208 unsigned char cmd; 209 unsigned int chans; 210 211 /* 212 * This is used to form a linked lised during mass deletion. 213 * Since this is in an RCU list, we cannot use the link above 214 * or change any data until the RCU period completes. So we 215 * use this next variable during mass deletion so we can have 216 * a list and don't have to wait and restart the search on 217 * every individual deletion of a command. 218 */ 219 struct cmd_rcvr *next; 220 }; 221 222 struct seq_table { 223 unsigned int inuse : 1; 224 unsigned int broadcast : 1; 225 226 unsigned long timeout; 227 unsigned long orig_timeout; 228 unsigned int retries_left; 229 230 /* 231 * To verify on an incoming send message response that this is 232 * the message that the response is for, we keep a sequence id 233 * and increment it every time we send a message. 234 */ 235 long seqid; 236 237 /* 238 * This is held so we can properly respond to the message on a 239 * timeout, and it is used to hold the temporary data for 240 * retransmission, too. 241 */ 242 struct ipmi_recv_msg *recv_msg; 243 }; 244 245 /* 246 * Store the information in a msgid (long) to allow us to find a 247 * sequence table entry from the msgid. 248 */ 249 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 250 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 251 252 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 253 do { \ 254 seq = (((msgid) >> 26) & 0x3f); \ 255 seqid = ((msgid) & 0x3ffffff); \ 256 } while (0) 257 258 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 259 260 #define IPMI_MAX_CHANNELS 16 261 struct ipmi_channel { 262 unsigned char medium; 263 unsigned char protocol; 264 }; 265 266 struct ipmi_channel_set { 267 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 268 }; 269 270 struct ipmi_my_addrinfo { 271 /* 272 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 273 * but may be changed by the user. 274 */ 275 unsigned char address; 276 277 /* 278 * My LUN. This should generally stay the SMS LUN, but just in 279 * case... 280 */ 281 unsigned char lun; 282 }; 283 284 /* 285 * Note that the product id, manufacturer id, guid, and device id are 286 * immutable in this structure, so dyn_mutex is not required for 287 * accessing those. If those change on a BMC, a new BMC is allocated. 288 */ 289 struct bmc_device { 290 struct platform_device pdev; 291 struct list_head intfs; /* Interfaces on this BMC. */ 292 struct ipmi_device_id id; 293 struct ipmi_device_id fetch_id; 294 int dyn_id_set; 295 unsigned long dyn_id_expiry; 296 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 297 guid_t guid; 298 guid_t fetch_guid; 299 int dyn_guid_set; 300 struct kref usecount; 301 struct work_struct remove_work; 302 unsigned char cc; /* completion code */ 303 }; 304 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 305 306 static struct workqueue_struct *bmc_remove_work_wq; 307 308 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 309 struct ipmi_device_id *id, 310 bool *guid_set, guid_t *guid); 311 312 /* 313 * Various statistics for IPMI, these index stats[] in the ipmi_smi 314 * structure. 315 */ 316 enum ipmi_stat_indexes { 317 /* Commands we got from the user that were invalid. */ 318 IPMI_STAT_sent_invalid_commands = 0, 319 320 /* Commands we sent to the MC. */ 321 IPMI_STAT_sent_local_commands, 322 323 /* Responses from the MC that were delivered to a user. */ 324 IPMI_STAT_handled_local_responses, 325 326 /* Responses from the MC that were not delivered to a user. */ 327 IPMI_STAT_unhandled_local_responses, 328 329 /* Commands we sent out to the IPMB bus. */ 330 IPMI_STAT_sent_ipmb_commands, 331 332 /* Commands sent on the IPMB that had errors on the SEND CMD */ 333 IPMI_STAT_sent_ipmb_command_errs, 334 335 /* Each retransmit increments this count. */ 336 IPMI_STAT_retransmitted_ipmb_commands, 337 338 /* 339 * When a message times out (runs out of retransmits) this is 340 * incremented. 341 */ 342 IPMI_STAT_timed_out_ipmb_commands, 343 344 /* 345 * This is like above, but for broadcasts. Broadcasts are 346 * *not* included in the above count (they are expected to 347 * time out). 348 */ 349 IPMI_STAT_timed_out_ipmb_broadcasts, 350 351 /* Responses I have sent to the IPMB bus. */ 352 IPMI_STAT_sent_ipmb_responses, 353 354 /* The response was delivered to the user. */ 355 IPMI_STAT_handled_ipmb_responses, 356 357 /* The response had invalid data in it. */ 358 IPMI_STAT_invalid_ipmb_responses, 359 360 /* The response didn't have anyone waiting for it. */ 361 IPMI_STAT_unhandled_ipmb_responses, 362 363 /* Commands we sent out to the IPMB bus. */ 364 IPMI_STAT_sent_lan_commands, 365 366 /* Commands sent on the IPMB that had errors on the SEND CMD */ 367 IPMI_STAT_sent_lan_command_errs, 368 369 /* Each retransmit increments this count. */ 370 IPMI_STAT_retransmitted_lan_commands, 371 372 /* 373 * When a message times out (runs out of retransmits) this is 374 * incremented. 375 */ 376 IPMI_STAT_timed_out_lan_commands, 377 378 /* Responses I have sent to the IPMB bus. */ 379 IPMI_STAT_sent_lan_responses, 380 381 /* The response was delivered to the user. */ 382 IPMI_STAT_handled_lan_responses, 383 384 /* The response had invalid data in it. */ 385 IPMI_STAT_invalid_lan_responses, 386 387 /* The response didn't have anyone waiting for it. */ 388 IPMI_STAT_unhandled_lan_responses, 389 390 /* The command was delivered to the user. */ 391 IPMI_STAT_handled_commands, 392 393 /* The command had invalid data in it. */ 394 IPMI_STAT_invalid_commands, 395 396 /* The command didn't have anyone waiting for it. */ 397 IPMI_STAT_unhandled_commands, 398 399 /* Invalid data in an event. */ 400 IPMI_STAT_invalid_events, 401 402 /* Events that were received with the proper format. */ 403 IPMI_STAT_events, 404 405 /* Retransmissions on IPMB that failed. */ 406 IPMI_STAT_dropped_rexmit_ipmb_commands, 407 408 /* Retransmissions on LAN that failed. */ 409 IPMI_STAT_dropped_rexmit_lan_commands, 410 411 /* This *must* remain last, add new values above this. */ 412 IPMI_NUM_STATS 413 }; 414 415 416 #define IPMI_IPMB_NUM_SEQ 64 417 struct ipmi_smi { 418 struct module *owner; 419 420 /* What interface number are we? */ 421 int intf_num; 422 423 struct kref refcount; 424 425 /* Set when the interface is being unregistered. */ 426 bool in_shutdown; 427 428 /* Used for a list of interfaces. */ 429 struct list_head link; 430 431 /* 432 * The list of upper layers that are using me. 433 */ 434 struct list_head users; 435 struct mutex users_mutex; 436 atomic_t nr_users; 437 struct device_attribute nr_users_devattr; 438 struct device_attribute nr_msgs_devattr; 439 struct device_attribute maintenance_mode_devattr; 440 441 442 /* Used for wake ups at startup. */ 443 wait_queue_head_t waitq; 444 445 /* 446 * Prevents the interface from being unregistered when the 447 * interface is used by being looked up through the BMC 448 * structure. 449 */ 450 struct mutex bmc_reg_mutex; 451 452 struct bmc_device tmp_bmc; 453 struct bmc_device *bmc; 454 bool bmc_registered; 455 struct list_head bmc_link; 456 char *my_dev_name; 457 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 458 struct work_struct bmc_reg_work; 459 460 const struct ipmi_smi_handlers *handlers; 461 void *send_info; 462 463 /* Driver-model device for the system interface. */ 464 struct device *si_dev; 465 466 /* 467 * A table of sequence numbers for this interface. We use the 468 * sequence numbers for IPMB messages that go out of the 469 * interface to match them up with their responses. A routine 470 * is called periodically to time the items in this list. 471 */ 472 struct mutex seq_lock; 473 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 474 int curr_seq; 475 476 /* 477 * Messages queued for deliver to the user. 478 */ 479 struct mutex user_msgs_mutex; 480 struct list_head user_msgs; 481 482 /* 483 * Messages queued for processing. If processing fails (out 484 * of memory for instance), They will stay in here to be 485 * processed later in a periodic timer interrupt. The 486 * workqueue is for handling received messages directly from 487 * the handler. 488 */ 489 spinlock_t waiting_rcv_msgs_lock; 490 struct list_head waiting_rcv_msgs; 491 atomic_t watchdog_pretimeouts_to_deliver; 492 struct work_struct smi_work; 493 494 spinlock_t xmit_msgs_lock; 495 struct list_head xmit_msgs; 496 struct ipmi_smi_msg *curr_msg; 497 struct list_head hp_xmit_msgs; 498 499 /* 500 * The list of command receivers that are registered for commands 501 * on this interface. 502 */ 503 struct mutex cmd_rcvrs_mutex; 504 struct list_head cmd_rcvrs; 505 506 /* 507 * Events that were queues because no one was there to receive 508 * them. 509 */ 510 struct mutex events_mutex; /* For dealing with event stuff. */ 511 struct list_head waiting_events; 512 unsigned int waiting_events_count; /* How many events in queue? */ 513 char event_msg_printed; 514 515 /* How many users are waiting for events? */ 516 atomic_t event_waiters; 517 unsigned int ticks_to_req_ev; 518 519 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 520 521 /* How many users are waiting for commands? */ 522 unsigned int command_waiters; 523 524 /* How many users are waiting for watchdogs? */ 525 unsigned int watchdog_waiters; 526 527 /* How many users are waiting for message responses? */ 528 unsigned int response_waiters; 529 530 /* 531 * Tells what the lower layer has last been asked to watch for, 532 * messages and/or watchdogs. Protected by watch_lock. 533 */ 534 unsigned int last_watch_mask; 535 536 /* 537 * The event receiver for my BMC, only really used at panic 538 * shutdown as a place to store this. 539 */ 540 unsigned char event_receiver; 541 unsigned char event_receiver_lun; 542 unsigned char local_sel_device; 543 unsigned char local_event_generator; 544 545 /* For handling of maintenance mode. */ 546 int maintenance_mode; 547 548 #define IPMI_MAINTENANCE_MODE_STATE_OFF 0 549 #define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1 550 #define IPMI_MAINTENANCE_MODE_STATE_RESET 2 551 int maintenance_mode_state; 552 int auto_maintenance_timeout; 553 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 554 555 /* 556 * If we are doing maintenance on something on IPMB, extend 557 * the timeout time to avoid timeouts writing firmware and 558 * such. 559 */ 560 int ipmb_maintenance_mode_timeout; 561 562 /* 563 * A cheap hack, if this is non-null and a message to an 564 * interface comes in with a NULL user, call this routine with 565 * it. Note that the message will still be freed by the 566 * caller. This only works on the system interface. 567 * 568 * Protected by bmc_reg_mutex. 569 */ 570 void (*null_user_handler)(struct ipmi_smi *intf, 571 struct ipmi_recv_msg *msg); 572 573 /* 574 * When we are scanning the channels for an SMI, this will 575 * tell which channel we are scanning. 576 */ 577 int curr_channel; 578 579 /* Channel information */ 580 struct ipmi_channel_set *channel_list; 581 unsigned int curr_working_cset; /* First index into the following. */ 582 struct ipmi_channel_set wchannels[2]; 583 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 584 bool channels_ready; 585 586 atomic_t stats[IPMI_NUM_STATS]; 587 588 /* 589 * run_to_completion duplicate of smb_info, smi_info 590 * and ipmi_serial_info structures. Used to decrease numbers of 591 * parameters passed by "low" level IPMI code. 592 */ 593 int run_to_completion; 594 }; 595 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 596 597 static void __get_guid(struct ipmi_smi *intf); 598 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 599 static int __ipmi_bmc_register(struct ipmi_smi *intf, 600 struct ipmi_device_id *id, 601 bool guid_set, guid_t *guid, int intf_num); 602 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 603 604 static void free_ipmi_user(struct kref *ref) 605 { 606 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 607 struct module *owner; 608 609 owner = user->intf->owner; 610 kref_put(&user->intf->refcount, intf_free); 611 module_put(owner); 612 vfree(user); 613 } 614 615 static void release_ipmi_user(struct ipmi_user *user) 616 { 617 kref_put(&user->refcount, free_ipmi_user); 618 } 619 620 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user) 621 { 622 if (!kref_get_unless_zero(&user->refcount)) 623 return NULL; 624 return user; 625 } 626 627 /* 628 * The driver model view of the IPMI messaging driver. 629 */ 630 static struct platform_driver ipmidriver = { 631 .driver = { 632 .name = "ipmi", 633 .bus = &platform_bus_type 634 } 635 }; 636 /* 637 * This mutex keeps us from adding the same BMC twice. 638 */ 639 static DEFINE_MUTEX(ipmidriver_mutex); 640 641 static LIST_HEAD(ipmi_interfaces); 642 static DEFINE_MUTEX(ipmi_interfaces_mutex); 643 644 /* 645 * List of watchers that want to know when smi's are added and deleted. 646 */ 647 static LIST_HEAD(smi_watchers); 648 static DEFINE_MUTEX(smi_watchers_mutex); 649 650 #define ipmi_inc_stat(intf, stat) \ 651 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 652 #define ipmi_get_stat(intf, stat) \ 653 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 654 655 static const char * const addr_src_to_str[] = { 656 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 657 "device-tree", "platform" 658 }; 659 660 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 661 { 662 if (src >= SI_LAST) 663 src = 0; /* Invalid */ 664 return addr_src_to_str[src]; 665 } 666 EXPORT_SYMBOL(ipmi_addr_src_to_str); 667 668 static int is_lan_addr(struct ipmi_addr *addr) 669 { 670 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 671 } 672 673 static int is_ipmb_addr(struct ipmi_addr *addr) 674 { 675 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 676 } 677 678 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 679 { 680 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 681 } 682 683 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 684 { 685 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 686 } 687 688 static void free_recv_msg_list(struct list_head *q) 689 { 690 struct ipmi_recv_msg *msg, *msg2; 691 692 list_for_each_entry_safe(msg, msg2, q, link) { 693 list_del(&msg->link); 694 ipmi_free_recv_msg(msg); 695 } 696 } 697 698 static void free_smi_msg_list(struct list_head *q) 699 { 700 struct ipmi_smi_msg *msg, *msg2; 701 702 list_for_each_entry_safe(msg, msg2, q, link) { 703 list_del(&msg->link); 704 ipmi_free_smi_msg(msg); 705 } 706 } 707 708 static void intf_free(struct kref *ref) 709 { 710 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 711 int i; 712 struct cmd_rcvr *rcvr, *rcvr2; 713 714 free_smi_msg_list(&intf->waiting_rcv_msgs); 715 free_recv_msg_list(&intf->waiting_events); 716 717 /* 718 * Wholesale remove all the entries from the list in the 719 * interface. No need for locks, this is single-threaded. 720 */ 721 list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link) 722 kfree(rcvr); 723 724 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 725 if ((intf->seq_table[i].inuse) 726 && (intf->seq_table[i].recv_msg)) 727 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 728 } 729 730 kfree(intf); 731 } 732 733 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 734 { 735 struct ipmi_smi *intf; 736 unsigned int count = 0, i; 737 int *interfaces = NULL; 738 struct device **devices = NULL; 739 int rv = 0; 740 741 /* 742 * Make sure the driver is actually initialized, this handles 743 * problems with initialization order. 744 */ 745 rv = ipmi_init_msghandler(); 746 if (rv) 747 return rv; 748 749 mutex_lock(&smi_watchers_mutex); 750 751 list_add(&watcher->link, &smi_watchers); 752 753 /* 754 * Build an array of ipmi interfaces and fill it in, and 755 * another array of the devices. We can't call the callback 756 * with ipmi_interfaces_mutex held. smi_watchers_mutex will 757 * keep things in order for the user. 758 */ 759 mutex_lock(&ipmi_interfaces_mutex); 760 list_for_each_entry(intf, &ipmi_interfaces, link) 761 count++; 762 if (count > 0) { 763 interfaces = kmalloc_array(count, sizeof(*interfaces), 764 GFP_KERNEL); 765 if (!interfaces) { 766 rv = -ENOMEM; 767 } else { 768 devices = kmalloc_array(count, sizeof(*devices), 769 GFP_KERNEL); 770 if (!devices) { 771 kfree(interfaces); 772 interfaces = NULL; 773 rv = -ENOMEM; 774 } 775 } 776 count = 0; 777 } 778 if (interfaces) { 779 list_for_each_entry(intf, &ipmi_interfaces, link) { 780 int intf_num = READ_ONCE(intf->intf_num); 781 782 if (intf_num == -1) 783 continue; 784 devices[count] = intf->si_dev; 785 interfaces[count++] = intf_num; 786 } 787 } 788 mutex_unlock(&ipmi_interfaces_mutex); 789 790 if (interfaces) { 791 for (i = 0; i < count; i++) 792 watcher->new_smi(interfaces[i], devices[i]); 793 kfree(interfaces); 794 kfree(devices); 795 } 796 797 mutex_unlock(&smi_watchers_mutex); 798 799 return rv; 800 } 801 EXPORT_SYMBOL(ipmi_smi_watcher_register); 802 803 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 804 { 805 mutex_lock(&smi_watchers_mutex); 806 list_del(&watcher->link); 807 mutex_unlock(&smi_watchers_mutex); 808 return 0; 809 } 810 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 811 812 static void 813 call_smi_watchers(int i, struct device *dev) 814 { 815 struct ipmi_smi_watcher *w; 816 817 list_for_each_entry(w, &smi_watchers, link) { 818 if (try_module_get(w->owner)) { 819 w->new_smi(i, dev); 820 module_put(w->owner); 821 } 822 } 823 } 824 825 static int 826 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 827 { 828 if (addr1->addr_type != addr2->addr_type) 829 return 0; 830 831 if (addr1->channel != addr2->channel) 832 return 0; 833 834 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 835 struct ipmi_system_interface_addr *smi_addr1 836 = (struct ipmi_system_interface_addr *) addr1; 837 struct ipmi_system_interface_addr *smi_addr2 838 = (struct ipmi_system_interface_addr *) addr2; 839 return (smi_addr1->lun == smi_addr2->lun); 840 } 841 842 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 843 struct ipmi_ipmb_addr *ipmb_addr1 844 = (struct ipmi_ipmb_addr *) addr1; 845 struct ipmi_ipmb_addr *ipmb_addr2 846 = (struct ipmi_ipmb_addr *) addr2; 847 848 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 849 && (ipmb_addr1->lun == ipmb_addr2->lun)); 850 } 851 852 if (is_ipmb_direct_addr(addr1)) { 853 struct ipmi_ipmb_direct_addr *daddr1 854 = (struct ipmi_ipmb_direct_addr *) addr1; 855 struct ipmi_ipmb_direct_addr *daddr2 856 = (struct ipmi_ipmb_direct_addr *) addr2; 857 858 return daddr1->slave_addr == daddr2->slave_addr && 859 daddr1->rq_lun == daddr2->rq_lun && 860 daddr1->rs_lun == daddr2->rs_lun; 861 } 862 863 if (is_lan_addr(addr1)) { 864 struct ipmi_lan_addr *lan_addr1 865 = (struct ipmi_lan_addr *) addr1; 866 struct ipmi_lan_addr *lan_addr2 867 = (struct ipmi_lan_addr *) addr2; 868 869 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 870 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 871 && (lan_addr1->session_handle 872 == lan_addr2->session_handle) 873 && (lan_addr1->lun == lan_addr2->lun)); 874 } 875 876 return 1; 877 } 878 879 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 880 { 881 if (len < sizeof(struct ipmi_system_interface_addr)) 882 return -EINVAL; 883 884 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 885 if (addr->channel != IPMI_BMC_CHANNEL) 886 return -EINVAL; 887 return 0; 888 } 889 890 if ((addr->channel == IPMI_BMC_CHANNEL) 891 || (addr->channel >= IPMI_MAX_CHANNELS) 892 || (addr->channel < 0)) 893 return -EINVAL; 894 895 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 896 if (len < sizeof(struct ipmi_ipmb_addr)) 897 return -EINVAL; 898 return 0; 899 } 900 901 if (is_ipmb_direct_addr(addr)) { 902 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 903 904 if (addr->channel != 0) 905 return -EINVAL; 906 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 907 return -EINVAL; 908 909 if (daddr->slave_addr & 0x01) 910 return -EINVAL; 911 if (daddr->rq_lun >= 4) 912 return -EINVAL; 913 if (daddr->rs_lun >= 4) 914 return -EINVAL; 915 return 0; 916 } 917 918 if (is_lan_addr(addr)) { 919 if (len < sizeof(struct ipmi_lan_addr)) 920 return -EINVAL; 921 return 0; 922 } 923 924 return -EINVAL; 925 } 926 EXPORT_SYMBOL(ipmi_validate_addr); 927 928 unsigned int ipmi_addr_length(int addr_type) 929 { 930 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 931 return sizeof(struct ipmi_system_interface_addr); 932 933 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 934 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 935 return sizeof(struct ipmi_ipmb_addr); 936 937 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 938 return sizeof(struct ipmi_ipmb_direct_addr); 939 940 if (addr_type == IPMI_LAN_ADDR_TYPE) 941 return sizeof(struct ipmi_lan_addr); 942 943 return 0; 944 } 945 EXPORT_SYMBOL(ipmi_addr_length); 946 947 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 948 { 949 int rv = 0; 950 951 if (!msg->user) { 952 /* Special handling for NULL users. */ 953 if (intf->null_user_handler) { 954 intf->null_user_handler(intf, msg); 955 } else { 956 /* No handler, so give up. */ 957 rv = -EINVAL; 958 } 959 ipmi_free_recv_msg(msg); 960 } else if (oops_in_progress) { 961 /* 962 * If we are running in the panic context, calling the 963 * receive handler doesn't much meaning and has a deadlock 964 * risk. At this moment, simply skip it in that case. 965 */ 966 ipmi_free_recv_msg(msg); 967 } else { 968 /* 969 * Deliver it in smi_work. The message will hold a 970 * refcount to the user. 971 */ 972 mutex_lock(&intf->user_msgs_mutex); 973 list_add_tail(&msg->link, &intf->user_msgs); 974 mutex_unlock(&intf->user_msgs_mutex); 975 queue_work(system_wq, &intf->smi_work); 976 } 977 978 return rv; 979 } 980 981 static void deliver_local_response(struct ipmi_smi *intf, 982 struct ipmi_recv_msg *msg) 983 { 984 if (deliver_response(intf, msg)) 985 ipmi_inc_stat(intf, unhandled_local_responses); 986 else 987 ipmi_inc_stat(intf, handled_local_responses); 988 } 989 990 static void deliver_err_response(struct ipmi_smi *intf, 991 struct ipmi_recv_msg *msg, int err) 992 { 993 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 994 msg->msg_data[0] = err; 995 msg->msg.netfn |= 1; /* Convert to a response. */ 996 msg->msg.data_len = 1; 997 msg->msg.data = msg->msg_data; 998 deliver_local_response(intf, msg); 999 } 1000 1001 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 1002 { 1003 unsigned long iflags; 1004 1005 if (!intf->handlers->set_need_watch) 1006 return; 1007 1008 spin_lock_irqsave(&intf->watch_lock, iflags); 1009 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1010 intf->response_waiters++; 1011 1012 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1013 intf->watchdog_waiters++; 1014 1015 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1016 intf->command_waiters++; 1017 1018 if ((intf->last_watch_mask & flags) != flags) { 1019 intf->last_watch_mask |= flags; 1020 intf->handlers->set_need_watch(intf->send_info, 1021 intf->last_watch_mask); 1022 } 1023 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1024 } 1025 1026 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 1027 { 1028 unsigned long iflags; 1029 1030 if (!intf->handlers->set_need_watch) 1031 return; 1032 1033 spin_lock_irqsave(&intf->watch_lock, iflags); 1034 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1035 intf->response_waiters--; 1036 1037 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1038 intf->watchdog_waiters--; 1039 1040 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1041 intf->command_waiters--; 1042 1043 flags = 0; 1044 if (intf->response_waiters) 1045 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1046 if (intf->watchdog_waiters) 1047 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1048 if (intf->command_waiters) 1049 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1050 1051 if (intf->last_watch_mask != flags) { 1052 intf->last_watch_mask = flags; 1053 intf->handlers->set_need_watch(intf->send_info, 1054 intf->last_watch_mask); 1055 } 1056 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1057 } 1058 1059 /* 1060 * Find the next sequence number not being used and add the given 1061 * message with the given timeout to the sequence table. This must be 1062 * called with the interface's seq_lock held. 1063 */ 1064 static int intf_next_seq(struct ipmi_smi *intf, 1065 struct ipmi_recv_msg *recv_msg, 1066 unsigned long timeout, 1067 int retries, 1068 int broadcast, 1069 unsigned char *seq, 1070 long *seqid) 1071 { 1072 int rv = 0; 1073 unsigned int i; 1074 1075 if (timeout == 0) 1076 timeout = default_retry_ms; 1077 if (retries < 0) 1078 retries = default_max_retries; 1079 1080 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1081 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1082 if (!intf->seq_table[i].inuse) 1083 break; 1084 } 1085 1086 if (!intf->seq_table[i].inuse) { 1087 intf->seq_table[i].recv_msg = recv_msg; 1088 1089 /* 1090 * Start with the maximum timeout, when the send response 1091 * comes in we will start the real timer. 1092 */ 1093 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1094 intf->seq_table[i].orig_timeout = timeout; 1095 intf->seq_table[i].retries_left = retries; 1096 intf->seq_table[i].broadcast = broadcast; 1097 intf->seq_table[i].inuse = 1; 1098 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1099 *seq = i; 1100 *seqid = intf->seq_table[i].seqid; 1101 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1102 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1103 need_waiter(intf); 1104 } else { 1105 rv = -EAGAIN; 1106 } 1107 1108 return rv; 1109 } 1110 1111 /* 1112 * Return the receive message for the given sequence number and 1113 * release the sequence number so it can be reused. Some other data 1114 * is passed in to be sure the message matches up correctly (to help 1115 * guard against message coming in after their timeout and the 1116 * sequence number being reused). 1117 */ 1118 static int intf_find_seq(struct ipmi_smi *intf, 1119 unsigned char seq, 1120 short channel, 1121 unsigned char cmd, 1122 unsigned char netfn, 1123 struct ipmi_addr *addr, 1124 struct ipmi_recv_msg **recv_msg) 1125 { 1126 int rv = -ENODEV; 1127 1128 if (seq >= IPMI_IPMB_NUM_SEQ) 1129 return -EINVAL; 1130 1131 mutex_lock(&intf->seq_lock); 1132 if (intf->seq_table[seq].inuse) { 1133 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1134 1135 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1136 && (msg->msg.netfn == netfn) 1137 && (ipmi_addr_equal(addr, &msg->addr))) { 1138 *recv_msg = msg; 1139 intf->seq_table[seq].inuse = 0; 1140 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1141 rv = 0; 1142 } 1143 } 1144 mutex_unlock(&intf->seq_lock); 1145 1146 return rv; 1147 } 1148 1149 1150 /* Start the timer for a specific sequence table entry. */ 1151 static int intf_start_seq_timer(struct ipmi_smi *intf, 1152 long msgid) 1153 { 1154 int rv = -ENODEV; 1155 unsigned char seq; 1156 unsigned long seqid; 1157 1158 1159 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1160 1161 mutex_lock(&intf->seq_lock); 1162 /* 1163 * We do this verification because the user can be deleted 1164 * while a message is outstanding. 1165 */ 1166 if ((intf->seq_table[seq].inuse) 1167 && (intf->seq_table[seq].seqid == seqid)) { 1168 struct seq_table *ent = &intf->seq_table[seq]; 1169 ent->timeout = ent->orig_timeout; 1170 rv = 0; 1171 } 1172 mutex_unlock(&intf->seq_lock); 1173 1174 return rv; 1175 } 1176 1177 /* Got an error for the send message for a specific sequence number. */ 1178 static int intf_err_seq(struct ipmi_smi *intf, 1179 long msgid, 1180 unsigned int err) 1181 { 1182 int rv = -ENODEV; 1183 unsigned char seq; 1184 unsigned long seqid; 1185 struct ipmi_recv_msg *msg = NULL; 1186 1187 1188 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1189 1190 mutex_lock(&intf->seq_lock); 1191 /* 1192 * We do this verification because the user can be deleted 1193 * while a message is outstanding. 1194 */ 1195 if ((intf->seq_table[seq].inuse) 1196 && (intf->seq_table[seq].seqid == seqid)) { 1197 struct seq_table *ent = &intf->seq_table[seq]; 1198 1199 ent->inuse = 0; 1200 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1201 msg = ent->recv_msg; 1202 rv = 0; 1203 } 1204 mutex_unlock(&intf->seq_lock); 1205 1206 if (msg) 1207 deliver_err_response(intf, msg, err); 1208 1209 return rv; 1210 } 1211 1212 int ipmi_create_user(unsigned int if_num, 1213 const struct ipmi_user_hndl *handler, 1214 void *handler_data, 1215 struct ipmi_user **user) 1216 { 1217 struct ipmi_user *new_user = NULL; 1218 int rv = 0; 1219 struct ipmi_smi *intf; 1220 1221 /* 1222 * There is no module usecount here, because it's not 1223 * required. Since this can only be used by and called from 1224 * other modules, they will implicitly use this module, and 1225 * thus this can't be removed unless the other modules are 1226 * removed. 1227 */ 1228 1229 if (handler == NULL) 1230 return -EINVAL; 1231 1232 /* 1233 * Make sure the driver is actually initialized, this handles 1234 * problems with initialization order. 1235 */ 1236 rv = ipmi_init_msghandler(); 1237 if (rv) 1238 return rv; 1239 1240 mutex_lock(&ipmi_interfaces_mutex); 1241 list_for_each_entry(intf, &ipmi_interfaces, link) { 1242 if (intf->intf_num == if_num) 1243 goto found; 1244 } 1245 /* Not found, return an error */ 1246 rv = -EINVAL; 1247 goto out_unlock; 1248 1249 found: 1250 if (intf->in_shutdown) { 1251 rv = -ENODEV; 1252 goto out_unlock; 1253 } 1254 1255 if (atomic_add_return(1, &intf->nr_users) > max_users) { 1256 rv = -EBUSY; 1257 goto out_kfree; 1258 } 1259 1260 new_user = vzalloc(sizeof(*new_user)); 1261 if (!new_user) { 1262 rv = -ENOMEM; 1263 goto out_kfree; 1264 } 1265 1266 if (!try_module_get(intf->owner)) { 1267 rv = -ENODEV; 1268 goto out_kfree; 1269 } 1270 1271 /* Note that each existing user holds a refcount to the interface. */ 1272 kref_get(&intf->refcount); 1273 1274 atomic_set(&new_user->nr_msgs, 0); 1275 kref_init(&new_user->refcount); 1276 refcount_set(&new_user->destroyed, 1); 1277 kref_get(&new_user->refcount); /* Destroy owns a refcount. */ 1278 new_user->handler = handler; 1279 new_user->handler_data = handler_data; 1280 new_user->intf = intf; 1281 new_user->gets_events = false; 1282 1283 mutex_lock(&intf->users_mutex); 1284 mutex_lock(&intf->seq_lock); 1285 list_add(&new_user->link, &intf->users); 1286 mutex_unlock(&intf->seq_lock); 1287 mutex_unlock(&intf->users_mutex); 1288 1289 if (handler->ipmi_watchdog_pretimeout) 1290 /* User wants pretimeouts, so make sure to watch for them. */ 1291 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1292 1293 out_kfree: 1294 if (rv) { 1295 atomic_dec(&intf->nr_users); 1296 vfree(new_user); 1297 } else { 1298 *user = new_user; 1299 } 1300 out_unlock: 1301 mutex_unlock(&ipmi_interfaces_mutex); 1302 return rv; 1303 } 1304 EXPORT_SYMBOL(ipmi_create_user); 1305 1306 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1307 { 1308 int rv = -EINVAL; 1309 struct ipmi_smi *intf; 1310 1311 mutex_lock(&ipmi_interfaces_mutex); 1312 list_for_each_entry(intf, &ipmi_interfaces, link) { 1313 if (intf->intf_num == if_num) { 1314 if (!intf->handlers->get_smi_info) 1315 rv = -ENOTTY; 1316 else 1317 rv = intf->handlers->get_smi_info(intf->send_info, data); 1318 break; 1319 } 1320 } 1321 mutex_unlock(&ipmi_interfaces_mutex); 1322 1323 return rv; 1324 } 1325 EXPORT_SYMBOL(ipmi_get_smi_info); 1326 1327 /* Must be called with intf->users_mutex held. */ 1328 static void _ipmi_destroy_user(struct ipmi_user *user) 1329 { 1330 struct ipmi_smi *intf = user->intf; 1331 int i; 1332 struct cmd_rcvr *rcvr; 1333 struct cmd_rcvr *rcvrs = NULL; 1334 struct ipmi_recv_msg *msg, *msg2; 1335 1336 if (!refcount_dec_if_one(&user->destroyed)) 1337 return; 1338 1339 if (user->handler->shutdown) 1340 user->handler->shutdown(user->handler_data); 1341 1342 if (user->handler->ipmi_watchdog_pretimeout) 1343 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1344 1345 if (user->gets_events) 1346 atomic_dec(&intf->event_waiters); 1347 1348 /* Remove the user from the interface's list and sequence table. */ 1349 list_del(&user->link); 1350 atomic_dec(&intf->nr_users); 1351 1352 mutex_lock(&intf->seq_lock); 1353 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1354 if (intf->seq_table[i].inuse 1355 && (intf->seq_table[i].recv_msg->user == user)) { 1356 intf->seq_table[i].inuse = 0; 1357 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1358 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1359 } 1360 } 1361 mutex_unlock(&intf->seq_lock); 1362 1363 /* 1364 * Remove the user from the command receiver's table. First 1365 * we build a list of everything (not using the standard link, 1366 * since other things may be using it till we do 1367 * synchronize_rcu()) then free everything in that list. 1368 */ 1369 mutex_lock(&intf->cmd_rcvrs_mutex); 1370 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1371 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1372 if (rcvr->user == user) { 1373 list_del_rcu(&rcvr->link); 1374 rcvr->next = rcvrs; 1375 rcvrs = rcvr; 1376 } 1377 } 1378 mutex_unlock(&intf->cmd_rcvrs_mutex); 1379 while (rcvrs) { 1380 rcvr = rcvrs; 1381 rcvrs = rcvr->next; 1382 kfree(rcvr); 1383 } 1384 1385 mutex_lock(&intf->user_msgs_mutex); 1386 list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { 1387 if (msg->user != user) 1388 continue; 1389 list_del(&msg->link); 1390 ipmi_free_recv_msg(msg); 1391 } 1392 mutex_unlock(&intf->user_msgs_mutex); 1393 1394 release_ipmi_user(user); 1395 } 1396 1397 void ipmi_destroy_user(struct ipmi_user *user) 1398 { 1399 struct ipmi_smi *intf = user->intf; 1400 1401 mutex_lock(&intf->users_mutex); 1402 _ipmi_destroy_user(user); 1403 mutex_unlock(&intf->users_mutex); 1404 1405 kref_put(&user->refcount, free_ipmi_user); 1406 } 1407 EXPORT_SYMBOL(ipmi_destroy_user); 1408 1409 int ipmi_get_version(struct ipmi_user *user, 1410 unsigned char *major, 1411 unsigned char *minor) 1412 { 1413 struct ipmi_device_id id; 1414 int rv; 1415 1416 user = acquire_ipmi_user(user); 1417 if (!user) 1418 return -ENODEV; 1419 1420 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1421 if (!rv) { 1422 *major = ipmi_version_major(&id); 1423 *minor = ipmi_version_minor(&id); 1424 } 1425 release_ipmi_user(user); 1426 1427 return rv; 1428 } 1429 EXPORT_SYMBOL(ipmi_get_version); 1430 1431 int ipmi_set_my_address(struct ipmi_user *user, 1432 unsigned int channel, 1433 unsigned char address) 1434 { 1435 int rv = 0; 1436 1437 user = acquire_ipmi_user(user); 1438 if (!user) 1439 return -ENODEV; 1440 1441 if (channel >= IPMI_MAX_CHANNELS) { 1442 rv = -EINVAL; 1443 } else { 1444 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1445 user->intf->addrinfo[channel].address = address; 1446 } 1447 release_ipmi_user(user); 1448 1449 return rv; 1450 } 1451 EXPORT_SYMBOL(ipmi_set_my_address); 1452 1453 int ipmi_get_my_address(struct ipmi_user *user, 1454 unsigned int channel, 1455 unsigned char *address) 1456 { 1457 int rv = 0; 1458 1459 user = acquire_ipmi_user(user); 1460 if (!user) 1461 return -ENODEV; 1462 1463 if (channel >= IPMI_MAX_CHANNELS) { 1464 rv = -EINVAL; 1465 } else { 1466 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1467 *address = user->intf->addrinfo[channel].address; 1468 } 1469 release_ipmi_user(user); 1470 1471 return rv; 1472 } 1473 EXPORT_SYMBOL(ipmi_get_my_address); 1474 1475 int ipmi_set_my_LUN(struct ipmi_user *user, 1476 unsigned int channel, 1477 unsigned char LUN) 1478 { 1479 int rv = 0; 1480 1481 user = acquire_ipmi_user(user); 1482 if (!user) 1483 return -ENODEV; 1484 1485 if (channel >= IPMI_MAX_CHANNELS) { 1486 rv = -EINVAL; 1487 } else { 1488 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1489 user->intf->addrinfo[channel].lun = LUN & 0x3; 1490 } 1491 release_ipmi_user(user); 1492 1493 return rv; 1494 } 1495 EXPORT_SYMBOL(ipmi_set_my_LUN); 1496 1497 int ipmi_get_my_LUN(struct ipmi_user *user, 1498 unsigned int channel, 1499 unsigned char *address) 1500 { 1501 int rv = 0; 1502 1503 user = acquire_ipmi_user(user); 1504 if (!user) 1505 return -ENODEV; 1506 1507 if (channel >= IPMI_MAX_CHANNELS) { 1508 rv = -EINVAL; 1509 } else { 1510 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1511 *address = user->intf->addrinfo[channel].lun; 1512 } 1513 release_ipmi_user(user); 1514 1515 return rv; 1516 } 1517 EXPORT_SYMBOL(ipmi_get_my_LUN); 1518 1519 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1520 { 1521 int mode; 1522 unsigned long flags; 1523 1524 user = acquire_ipmi_user(user); 1525 if (!user) 1526 return -ENODEV; 1527 1528 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1529 mode = user->intf->maintenance_mode; 1530 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1531 release_ipmi_user(user); 1532 1533 return mode; 1534 } 1535 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1536 1537 static void maintenance_mode_update(struct ipmi_smi *intf) 1538 { 1539 if (intf->handlers->set_maintenance_mode) 1540 /* 1541 * Lower level drivers only care about firmware mode 1542 * as it affects their timing. They don't care about 1543 * reset, which disables all commands for a while. 1544 */ 1545 intf->handlers->set_maintenance_mode( 1546 intf->send_info, 1547 (intf->maintenance_mode_state == 1548 IPMI_MAINTENANCE_MODE_STATE_FIRMWARE)); 1549 } 1550 1551 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1552 { 1553 int rv = 0; 1554 unsigned long flags; 1555 struct ipmi_smi *intf = user->intf; 1556 1557 user = acquire_ipmi_user(user); 1558 if (!user) 1559 return -ENODEV; 1560 1561 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1562 if (intf->maintenance_mode != mode) { 1563 switch (mode) { 1564 case IPMI_MAINTENANCE_MODE_AUTO: 1565 /* Just leave it alone. */ 1566 break; 1567 1568 case IPMI_MAINTENANCE_MODE_OFF: 1569 intf->maintenance_mode_state = 1570 IPMI_MAINTENANCE_MODE_STATE_OFF; 1571 break; 1572 1573 case IPMI_MAINTENANCE_MODE_ON: 1574 intf->maintenance_mode_state = 1575 IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; 1576 break; 1577 1578 default: 1579 rv = -EINVAL; 1580 goto out_unlock; 1581 } 1582 intf->maintenance_mode = mode; 1583 1584 maintenance_mode_update(intf); 1585 } 1586 out_unlock: 1587 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1588 release_ipmi_user(user); 1589 1590 return rv; 1591 } 1592 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1593 1594 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1595 { 1596 struct ipmi_smi *intf = user->intf; 1597 struct ipmi_recv_msg *msg, *msg2; 1598 struct list_head msgs; 1599 1600 user = acquire_ipmi_user(user); 1601 if (!user) 1602 return -ENODEV; 1603 1604 INIT_LIST_HEAD(&msgs); 1605 1606 mutex_lock(&intf->events_mutex); 1607 if (user->gets_events == val) 1608 goto out; 1609 1610 user->gets_events = val; 1611 1612 if (val) { 1613 if (atomic_inc_return(&intf->event_waiters) == 1) 1614 need_waiter(intf); 1615 } else { 1616 atomic_dec(&intf->event_waiters); 1617 } 1618 1619 /* Deliver any queued events. */ 1620 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1621 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1622 list_move_tail(&msg->link, &msgs); 1623 intf->waiting_events_count = 0; 1624 if (intf->event_msg_printed) { 1625 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1626 intf->event_msg_printed = 0; 1627 } 1628 1629 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1630 ipmi_set_recv_msg_user(msg, user); 1631 deliver_local_response(intf, msg); 1632 } 1633 } 1634 1635 out: 1636 mutex_unlock(&intf->events_mutex); 1637 release_ipmi_user(user); 1638 1639 return 0; 1640 } 1641 EXPORT_SYMBOL(ipmi_set_gets_events); 1642 1643 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1644 unsigned char netfn, 1645 unsigned char cmd, 1646 unsigned char chan) 1647 { 1648 struct cmd_rcvr *rcvr; 1649 1650 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1651 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1652 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1653 && (rcvr->chans & (1 << chan))) 1654 return rcvr; 1655 } 1656 return NULL; 1657 } 1658 1659 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1660 unsigned char netfn, 1661 unsigned char cmd, 1662 unsigned int chans) 1663 { 1664 struct cmd_rcvr *rcvr; 1665 1666 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1667 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1668 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1669 && (rcvr->chans & chans)) 1670 return 0; 1671 } 1672 return 1; 1673 } 1674 1675 int ipmi_register_for_cmd(struct ipmi_user *user, 1676 unsigned char netfn, 1677 unsigned char cmd, 1678 unsigned int chans) 1679 { 1680 struct ipmi_smi *intf = user->intf; 1681 struct cmd_rcvr *rcvr; 1682 int rv = 0; 1683 1684 user = acquire_ipmi_user(user); 1685 if (!user) 1686 return -ENODEV; 1687 1688 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1689 if (!rcvr) { 1690 rv = -ENOMEM; 1691 goto out_release; 1692 } 1693 rcvr->cmd = cmd; 1694 rcvr->netfn = netfn; 1695 rcvr->chans = chans; 1696 rcvr->user = user; 1697 1698 mutex_lock(&intf->cmd_rcvrs_mutex); 1699 /* Make sure the command/netfn is not already registered. */ 1700 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1701 rv = -EBUSY; 1702 goto out_unlock; 1703 } 1704 1705 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1706 1707 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1708 1709 out_unlock: 1710 mutex_unlock(&intf->cmd_rcvrs_mutex); 1711 if (rv) 1712 kfree(rcvr); 1713 out_release: 1714 release_ipmi_user(user); 1715 1716 return rv; 1717 } 1718 EXPORT_SYMBOL(ipmi_register_for_cmd); 1719 1720 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1721 unsigned char netfn, 1722 unsigned char cmd, 1723 unsigned int chans) 1724 { 1725 struct ipmi_smi *intf = user->intf; 1726 struct cmd_rcvr *rcvr; 1727 struct cmd_rcvr *rcvrs = NULL; 1728 int i, rv = -ENOENT; 1729 1730 user = acquire_ipmi_user(user); 1731 if (!user) 1732 return -ENODEV; 1733 1734 mutex_lock(&intf->cmd_rcvrs_mutex); 1735 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1736 if (((1 << i) & chans) == 0) 1737 continue; 1738 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1739 if (rcvr == NULL) 1740 continue; 1741 if (rcvr->user == user) { 1742 rv = 0; 1743 rcvr->chans &= ~chans; 1744 if (rcvr->chans == 0) { 1745 list_del_rcu(&rcvr->link); 1746 rcvr->next = rcvrs; 1747 rcvrs = rcvr; 1748 } 1749 } 1750 } 1751 mutex_unlock(&intf->cmd_rcvrs_mutex); 1752 synchronize_rcu(); 1753 release_ipmi_user(user); 1754 while (rcvrs) { 1755 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1756 rcvr = rcvrs; 1757 rcvrs = rcvr->next; 1758 kfree(rcvr); 1759 } 1760 1761 return rv; 1762 } 1763 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1764 1765 unsigned char 1766 ipmb_checksum(unsigned char *data, int size) 1767 { 1768 unsigned char csum = 0; 1769 1770 for (; size > 0; size--, data++) 1771 csum += *data; 1772 1773 return -csum; 1774 } 1775 EXPORT_SYMBOL(ipmb_checksum); 1776 1777 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1778 struct kernel_ipmi_msg *msg, 1779 struct ipmi_ipmb_addr *ipmb_addr, 1780 long msgid, 1781 unsigned char ipmb_seq, 1782 int broadcast, 1783 unsigned char source_address, 1784 unsigned char source_lun) 1785 { 1786 int i = broadcast; 1787 1788 /* Format the IPMB header data. */ 1789 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1790 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1791 smi_msg->data[2] = ipmb_addr->channel; 1792 if (broadcast) 1793 smi_msg->data[3] = 0; 1794 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1795 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1796 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1797 smi_msg->data[i+6] = source_address; 1798 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1799 smi_msg->data[i+8] = msg->cmd; 1800 1801 /* Now tack on the data to the message. */ 1802 if (msg->data_len > 0) 1803 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1804 smi_msg->data_size = msg->data_len + 9; 1805 1806 /* Now calculate the checksum and tack it on. */ 1807 smi_msg->data[i+smi_msg->data_size] 1808 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1809 1810 /* 1811 * Add on the checksum size and the offset from the 1812 * broadcast. 1813 */ 1814 smi_msg->data_size += 1 + i; 1815 1816 smi_msg->msgid = msgid; 1817 } 1818 1819 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1820 struct kernel_ipmi_msg *msg, 1821 struct ipmi_lan_addr *lan_addr, 1822 long msgid, 1823 unsigned char ipmb_seq, 1824 unsigned char source_lun) 1825 { 1826 /* Format the IPMB header data. */ 1827 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1828 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1829 smi_msg->data[2] = lan_addr->channel; 1830 smi_msg->data[3] = lan_addr->session_handle; 1831 smi_msg->data[4] = lan_addr->remote_SWID; 1832 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1833 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1834 smi_msg->data[7] = lan_addr->local_SWID; 1835 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1836 smi_msg->data[9] = msg->cmd; 1837 1838 /* Now tack on the data to the message. */ 1839 if (msg->data_len > 0) 1840 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1841 smi_msg->data_size = msg->data_len + 10; 1842 1843 /* Now calculate the checksum and tack it on. */ 1844 smi_msg->data[smi_msg->data_size] 1845 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1846 1847 /* 1848 * Add on the checksum size and the offset from the 1849 * broadcast. 1850 */ 1851 smi_msg->data_size += 1; 1852 1853 smi_msg->msgid = msgid; 1854 } 1855 1856 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1857 struct ipmi_smi_msg *smi_msg, 1858 int priority) 1859 { 1860 if (intf->curr_msg) { 1861 if (priority > 0) 1862 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1863 else 1864 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1865 smi_msg = NULL; 1866 } else { 1867 intf->curr_msg = smi_msg; 1868 } 1869 1870 return smi_msg; 1871 } 1872 1873 static void smi_send(struct ipmi_smi *intf, 1874 const struct ipmi_smi_handlers *handlers, 1875 struct ipmi_smi_msg *smi_msg, int priority) 1876 { 1877 int run_to_completion = READ_ONCE(intf->run_to_completion); 1878 unsigned long flags = 0; 1879 1880 if (!run_to_completion) 1881 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1882 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1883 if (!run_to_completion) 1884 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1885 1886 if (smi_msg) 1887 handlers->sender(intf->send_info, smi_msg); 1888 } 1889 1890 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1891 { 1892 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1893 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1894 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1895 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1896 } 1897 1898 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1899 struct ipmi_addr *addr, 1900 long msgid, 1901 struct kernel_ipmi_msg *msg, 1902 struct ipmi_smi_msg *smi_msg, 1903 struct ipmi_recv_msg *recv_msg, 1904 int retries, 1905 unsigned int retry_time_ms) 1906 { 1907 struct ipmi_system_interface_addr *smi_addr; 1908 1909 if (msg->netfn & 1) 1910 /* Responses are not allowed to the SMI. */ 1911 return -EINVAL; 1912 1913 smi_addr = (struct ipmi_system_interface_addr *) addr; 1914 if (smi_addr->lun > 3) { 1915 ipmi_inc_stat(intf, sent_invalid_commands); 1916 return -EINVAL; 1917 } 1918 1919 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1920 1921 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1922 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1923 || (msg->cmd == IPMI_GET_MSG_CMD) 1924 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1925 /* 1926 * We don't let the user do these, since we manage 1927 * the sequence numbers. 1928 */ 1929 ipmi_inc_stat(intf, sent_invalid_commands); 1930 return -EINVAL; 1931 } 1932 1933 if (is_maintenance_mode_cmd(msg)) { 1934 unsigned long flags; 1935 int newst; 1936 1937 if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST) 1938 newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; 1939 else 1940 newst = IPMI_MAINTENANCE_MODE_STATE_RESET; 1941 1942 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1943 intf->auto_maintenance_timeout = maintenance_mode_timeout_ms; 1944 if (!intf->maintenance_mode 1945 && intf->maintenance_mode_state < newst) { 1946 intf->maintenance_mode_state = newst; 1947 maintenance_mode_update(intf); 1948 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 1949 } 1950 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1951 flags); 1952 } 1953 1954 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1955 ipmi_inc_stat(intf, sent_invalid_commands); 1956 return -EMSGSIZE; 1957 } 1958 1959 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1960 smi_msg->data[1] = msg->cmd; 1961 smi_msg->msgid = msgid; 1962 smi_msg->recv_msg = recv_msg; 1963 if (msg->data_len > 0) 1964 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1965 smi_msg->data_size = msg->data_len + 2; 1966 ipmi_inc_stat(intf, sent_local_commands); 1967 1968 return 0; 1969 } 1970 1971 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1972 struct ipmi_addr *addr, 1973 long msgid, 1974 struct kernel_ipmi_msg *msg, 1975 struct ipmi_smi_msg *smi_msg, 1976 struct ipmi_recv_msg *recv_msg, 1977 unsigned char source_address, 1978 unsigned char source_lun, 1979 int retries, 1980 unsigned int retry_time_ms) 1981 { 1982 struct ipmi_ipmb_addr *ipmb_addr; 1983 unsigned char ipmb_seq; 1984 long seqid; 1985 int broadcast = 0; 1986 struct ipmi_channel *chans; 1987 int rv = 0; 1988 1989 if (addr->channel >= IPMI_MAX_CHANNELS) { 1990 ipmi_inc_stat(intf, sent_invalid_commands); 1991 return -EINVAL; 1992 } 1993 1994 chans = READ_ONCE(intf->channel_list)->c; 1995 1996 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1997 ipmi_inc_stat(intf, sent_invalid_commands); 1998 return -EINVAL; 1999 } 2000 2001 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 2002 /* 2003 * Broadcasts add a zero at the beginning of the 2004 * message, but otherwise is the same as an IPMB 2005 * address. 2006 */ 2007 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 2008 broadcast = 1; 2009 retries = 0; /* Don't retry broadcasts. */ 2010 } 2011 2012 /* 2013 * 9 for the header and 1 for the checksum, plus 2014 * possibly one for the broadcast. 2015 */ 2016 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 2017 ipmi_inc_stat(intf, sent_invalid_commands); 2018 return -EMSGSIZE; 2019 } 2020 2021 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2022 if (ipmb_addr->lun > 3) { 2023 ipmi_inc_stat(intf, sent_invalid_commands); 2024 return -EINVAL; 2025 } 2026 2027 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2028 2029 if (recv_msg->msg.netfn & 0x1) { 2030 /* 2031 * It's a response, so use the user's sequence 2032 * from msgid. 2033 */ 2034 ipmi_inc_stat(intf, sent_ipmb_responses); 2035 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2036 msgid, broadcast, 2037 source_address, source_lun); 2038 2039 /* 2040 * Save the receive message so we can use it 2041 * to deliver the response. 2042 */ 2043 smi_msg->recv_msg = recv_msg; 2044 } else { 2045 mutex_lock(&intf->seq_lock); 2046 2047 if (is_maintenance_mode_cmd(msg)) 2048 intf->ipmb_maintenance_mode_timeout = 2049 maintenance_mode_timeout_ms; 2050 2051 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2052 /* Different default in maintenance mode */ 2053 retry_time_ms = default_maintenance_retry_ms; 2054 2055 /* 2056 * Create a sequence number with a 1 second 2057 * timeout and 4 retries. 2058 */ 2059 rv = intf_next_seq(intf, 2060 recv_msg, 2061 retry_time_ms, 2062 retries, 2063 broadcast, 2064 &ipmb_seq, 2065 &seqid); 2066 if (rv) 2067 /* 2068 * We have used up all the sequence numbers, 2069 * probably, so abort. 2070 */ 2071 goto out_err; 2072 2073 ipmi_inc_stat(intf, sent_ipmb_commands); 2074 2075 /* 2076 * Store the sequence number in the message, 2077 * so that when the send message response 2078 * comes back we can start the timer. 2079 */ 2080 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2081 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2082 ipmb_seq, broadcast, 2083 source_address, source_lun); 2084 2085 /* 2086 * Copy the message into the recv message data, so we 2087 * can retransmit it later if necessary. 2088 */ 2089 memcpy(recv_msg->msg_data, smi_msg->data, 2090 smi_msg->data_size); 2091 recv_msg->msg.data = recv_msg->msg_data; 2092 recv_msg->msg.data_len = smi_msg->data_size; 2093 2094 /* 2095 * We don't unlock until here, because we need 2096 * to copy the completed message into the 2097 * recv_msg before we release the lock. 2098 * Otherwise, race conditions may bite us. I 2099 * know that's pretty paranoid, but I prefer 2100 * to be correct. 2101 */ 2102 out_err: 2103 mutex_unlock(&intf->seq_lock); 2104 } 2105 2106 return rv; 2107 } 2108 2109 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2110 struct ipmi_addr *addr, 2111 long msgid, 2112 struct kernel_ipmi_msg *msg, 2113 struct ipmi_smi_msg *smi_msg, 2114 struct ipmi_recv_msg *recv_msg, 2115 unsigned char source_lun) 2116 { 2117 struct ipmi_ipmb_direct_addr *daddr; 2118 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2119 2120 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2121 return -EAFNOSUPPORT; 2122 2123 /* Responses must have a completion code. */ 2124 if (!is_cmd && msg->data_len < 1) { 2125 ipmi_inc_stat(intf, sent_invalid_commands); 2126 return -EINVAL; 2127 } 2128 2129 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2130 ipmi_inc_stat(intf, sent_invalid_commands); 2131 return -EMSGSIZE; 2132 } 2133 2134 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2135 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2136 ipmi_inc_stat(intf, sent_invalid_commands); 2137 return -EINVAL; 2138 } 2139 2140 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2141 smi_msg->msgid = msgid; 2142 2143 if (is_cmd) { 2144 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2145 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2146 } else { 2147 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2148 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2149 } 2150 smi_msg->data[1] = daddr->slave_addr; 2151 smi_msg->data[3] = msg->cmd; 2152 2153 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2154 smi_msg->data_size = msg->data_len + 4; 2155 2156 smi_msg->recv_msg = recv_msg; 2157 2158 return 0; 2159 } 2160 2161 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2162 struct ipmi_addr *addr, 2163 long msgid, 2164 struct kernel_ipmi_msg *msg, 2165 struct ipmi_smi_msg *smi_msg, 2166 struct ipmi_recv_msg *recv_msg, 2167 unsigned char source_lun, 2168 int retries, 2169 unsigned int retry_time_ms) 2170 { 2171 struct ipmi_lan_addr *lan_addr; 2172 unsigned char ipmb_seq; 2173 long seqid; 2174 struct ipmi_channel *chans; 2175 int rv = 0; 2176 2177 if (addr->channel >= IPMI_MAX_CHANNELS) { 2178 ipmi_inc_stat(intf, sent_invalid_commands); 2179 return -EINVAL; 2180 } 2181 2182 chans = READ_ONCE(intf->channel_list)->c; 2183 2184 if ((chans[addr->channel].medium 2185 != IPMI_CHANNEL_MEDIUM_8023LAN) 2186 && (chans[addr->channel].medium 2187 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2188 ipmi_inc_stat(intf, sent_invalid_commands); 2189 return -EINVAL; 2190 } 2191 2192 /* 11 for the header and 1 for the checksum. */ 2193 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2194 ipmi_inc_stat(intf, sent_invalid_commands); 2195 return -EMSGSIZE; 2196 } 2197 2198 lan_addr = (struct ipmi_lan_addr *) addr; 2199 if (lan_addr->lun > 3) { 2200 ipmi_inc_stat(intf, sent_invalid_commands); 2201 return -EINVAL; 2202 } 2203 2204 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2205 2206 if (recv_msg->msg.netfn & 0x1) { 2207 /* 2208 * It's a response, so use the user's sequence 2209 * from msgid. 2210 */ 2211 ipmi_inc_stat(intf, sent_lan_responses); 2212 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2213 msgid, source_lun); 2214 2215 /* 2216 * Save the receive message so we can use it 2217 * to deliver the response. 2218 */ 2219 smi_msg->recv_msg = recv_msg; 2220 } else { 2221 mutex_lock(&intf->seq_lock); 2222 2223 /* 2224 * Create a sequence number with a 1 second 2225 * timeout and 4 retries. 2226 */ 2227 rv = intf_next_seq(intf, 2228 recv_msg, 2229 retry_time_ms, 2230 retries, 2231 0, 2232 &ipmb_seq, 2233 &seqid); 2234 if (rv) 2235 /* 2236 * We have used up all the sequence numbers, 2237 * probably, so abort. 2238 */ 2239 goto out_err; 2240 2241 ipmi_inc_stat(intf, sent_lan_commands); 2242 2243 /* 2244 * Store the sequence number in the message, 2245 * so that when the send message response 2246 * comes back we can start the timer. 2247 */ 2248 format_lan_msg(smi_msg, msg, lan_addr, 2249 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2250 ipmb_seq, source_lun); 2251 2252 /* 2253 * Copy the message into the recv message data, so we 2254 * can retransmit it later if necessary. 2255 */ 2256 memcpy(recv_msg->msg_data, smi_msg->data, 2257 smi_msg->data_size); 2258 recv_msg->msg.data = recv_msg->msg_data; 2259 recv_msg->msg.data_len = smi_msg->data_size; 2260 2261 /* 2262 * We don't unlock until here, because we need 2263 * to copy the completed message into the 2264 * recv_msg before we release the lock. 2265 * Otherwise, race conditions may bite us. I 2266 * know that's pretty paranoid, but I prefer 2267 * to be correct. 2268 */ 2269 out_err: 2270 mutex_unlock(&intf->seq_lock); 2271 } 2272 2273 return rv; 2274 } 2275 2276 /* 2277 * Separate from ipmi_request so that the user does not have to be 2278 * supplied in certain circumstances (mainly at panic time). If 2279 * messages are supplied, they will be freed, even if an error 2280 * occurs. 2281 */ 2282 static int i_ipmi_request(struct ipmi_user *user, 2283 struct ipmi_smi *intf, 2284 struct ipmi_addr *addr, 2285 long msgid, 2286 struct kernel_ipmi_msg *msg, 2287 void *user_msg_data, 2288 void *supplied_smi, 2289 struct ipmi_recv_msg *supplied_recv, 2290 int priority, 2291 unsigned char source_address, 2292 unsigned char source_lun, 2293 int retries, 2294 unsigned int retry_time_ms) 2295 { 2296 struct ipmi_smi_msg *smi_msg; 2297 struct ipmi_recv_msg *recv_msg; 2298 int run_to_completion = READ_ONCE(intf->run_to_completion); 2299 int rv = 0; 2300 2301 if (supplied_recv) { 2302 recv_msg = supplied_recv; 2303 recv_msg->user = user; 2304 if (user) { 2305 atomic_inc(&user->nr_msgs); 2306 /* The put happens when the message is freed. */ 2307 kref_get(&user->refcount); 2308 } 2309 } else { 2310 recv_msg = ipmi_alloc_recv_msg(user); 2311 if (IS_ERR(recv_msg)) 2312 return PTR_ERR(recv_msg); 2313 } 2314 recv_msg->user_msg_data = user_msg_data; 2315 2316 if (supplied_smi) 2317 smi_msg = supplied_smi; 2318 else { 2319 smi_msg = ipmi_alloc_smi_msg(); 2320 if (smi_msg == NULL) { 2321 if (!supplied_recv) 2322 ipmi_free_recv_msg(recv_msg); 2323 return -ENOMEM; 2324 } 2325 } 2326 2327 if (!run_to_completion) 2328 mutex_lock(&intf->users_mutex); 2329 if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) { 2330 /* No messages while the BMC is in reset. */ 2331 rv = -EBUSY; 2332 goto out_err; 2333 } 2334 if (intf->in_shutdown) { 2335 rv = -ENODEV; 2336 goto out_err; 2337 } 2338 2339 recv_msg->msgid = msgid; 2340 /* 2341 * Store the message to send in the receive message so timeout 2342 * responses can get the proper response data. 2343 */ 2344 recv_msg->msg = *msg; 2345 2346 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2347 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2348 recv_msg, retries, retry_time_ms); 2349 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2350 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2351 source_address, source_lun, 2352 retries, retry_time_ms); 2353 } else if (is_ipmb_direct_addr(addr)) { 2354 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2355 recv_msg, source_lun); 2356 } else if (is_lan_addr(addr)) { 2357 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2358 source_lun, retries, retry_time_ms); 2359 } else { 2360 /* Unknown address type. */ 2361 ipmi_inc_stat(intf, sent_invalid_commands); 2362 rv = -EINVAL; 2363 } 2364 2365 if (rv) { 2366 out_err: 2367 if (!supplied_smi) 2368 ipmi_free_smi_msg(smi_msg); 2369 if (!supplied_recv) 2370 ipmi_free_recv_msg(recv_msg); 2371 } else { 2372 dev_dbg(intf->si_dev, "Send: %*ph\n", 2373 smi_msg->data_size, smi_msg->data); 2374 2375 smi_send(intf, intf->handlers, smi_msg, priority); 2376 } 2377 if (!run_to_completion) 2378 mutex_unlock(&intf->users_mutex); 2379 2380 return rv; 2381 } 2382 2383 static int check_addr(struct ipmi_smi *intf, 2384 struct ipmi_addr *addr, 2385 unsigned char *saddr, 2386 unsigned char *lun) 2387 { 2388 if (addr->channel >= IPMI_MAX_CHANNELS) 2389 return -EINVAL; 2390 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2391 *lun = intf->addrinfo[addr->channel].lun; 2392 *saddr = intf->addrinfo[addr->channel].address; 2393 return 0; 2394 } 2395 2396 int ipmi_request_settime(struct ipmi_user *user, 2397 struct ipmi_addr *addr, 2398 long msgid, 2399 struct kernel_ipmi_msg *msg, 2400 void *user_msg_data, 2401 int priority, 2402 int retries, 2403 unsigned int retry_time_ms) 2404 { 2405 unsigned char saddr = 0, lun = 0; 2406 int rv; 2407 2408 if (!user) 2409 return -EINVAL; 2410 2411 user = acquire_ipmi_user(user); 2412 if (!user) 2413 return -ENODEV; 2414 2415 rv = check_addr(user->intf, addr, &saddr, &lun); 2416 if (!rv) 2417 rv = i_ipmi_request(user, 2418 user->intf, 2419 addr, 2420 msgid, 2421 msg, 2422 user_msg_data, 2423 NULL, NULL, 2424 priority, 2425 saddr, 2426 lun, 2427 retries, 2428 retry_time_ms); 2429 2430 release_ipmi_user(user); 2431 return rv; 2432 } 2433 EXPORT_SYMBOL(ipmi_request_settime); 2434 2435 int ipmi_request_supply_msgs(struct ipmi_user *user, 2436 struct ipmi_addr *addr, 2437 long msgid, 2438 struct kernel_ipmi_msg *msg, 2439 void *user_msg_data, 2440 void *supplied_smi, 2441 struct ipmi_recv_msg *supplied_recv, 2442 int priority) 2443 { 2444 unsigned char saddr = 0, lun = 0; 2445 int rv; 2446 2447 if (!user) 2448 return -EINVAL; 2449 2450 user = acquire_ipmi_user(user); 2451 if (!user) 2452 return -ENODEV; 2453 2454 rv = check_addr(user->intf, addr, &saddr, &lun); 2455 if (!rv) 2456 rv = i_ipmi_request(user, 2457 user->intf, 2458 addr, 2459 msgid, 2460 msg, 2461 user_msg_data, 2462 supplied_smi, 2463 supplied_recv, 2464 priority, 2465 saddr, 2466 lun, 2467 -1, 0); 2468 2469 release_ipmi_user(user); 2470 return rv; 2471 } 2472 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2473 2474 static void bmc_device_id_handler(struct ipmi_smi *intf, 2475 struct ipmi_recv_msg *msg) 2476 { 2477 int rv; 2478 2479 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2480 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2481 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2482 dev_warn(intf->si_dev, 2483 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2484 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2485 return; 2486 } 2487 2488 if (msg->msg.data[0]) { 2489 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2490 msg->msg.data[0]); 2491 intf->bmc->dyn_id_set = 0; 2492 goto out; 2493 } 2494 2495 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2496 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2497 if (rv) { 2498 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2499 /* record completion code when error */ 2500 intf->bmc->cc = msg->msg.data[0]; 2501 intf->bmc->dyn_id_set = 0; 2502 } else { 2503 /* 2504 * Make sure the id data is available before setting 2505 * dyn_id_set. 2506 */ 2507 smp_wmb(); 2508 intf->bmc->dyn_id_set = 1; 2509 } 2510 out: 2511 wake_up(&intf->waitq); 2512 } 2513 2514 static int 2515 send_get_device_id_cmd(struct ipmi_smi *intf) 2516 { 2517 struct ipmi_system_interface_addr si; 2518 struct kernel_ipmi_msg msg; 2519 2520 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2521 si.channel = IPMI_BMC_CHANNEL; 2522 si.lun = 0; 2523 2524 msg.netfn = IPMI_NETFN_APP_REQUEST; 2525 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2526 msg.data = NULL; 2527 msg.data_len = 0; 2528 2529 return i_ipmi_request(NULL, 2530 intf, 2531 (struct ipmi_addr *) &si, 2532 0, 2533 &msg, 2534 intf, 2535 NULL, 2536 NULL, 2537 0, 2538 intf->addrinfo[0].address, 2539 intf->addrinfo[0].lun, 2540 -1, 0); 2541 } 2542 2543 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2544 { 2545 int rv; 2546 unsigned int retry_count = 0; 2547 2548 intf->null_user_handler = bmc_device_id_handler; 2549 2550 retry: 2551 bmc->cc = 0; 2552 bmc->dyn_id_set = 2; 2553 2554 rv = send_get_device_id_cmd(intf); 2555 if (rv) 2556 goto out_reset_handler; 2557 2558 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2559 2560 if (!bmc->dyn_id_set) { 2561 if (bmc->cc != IPMI_CC_NO_ERROR && 2562 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2563 msleep(500); 2564 dev_warn(intf->si_dev, 2565 "BMC returned 0x%2.2x, retry get bmc device id\n", 2566 bmc->cc); 2567 goto retry; 2568 } 2569 2570 rv = -EIO; /* Something went wrong in the fetch. */ 2571 } 2572 2573 /* dyn_id_set makes the id data available. */ 2574 smp_rmb(); 2575 2576 out_reset_handler: 2577 intf->null_user_handler = NULL; 2578 2579 return rv; 2580 } 2581 2582 /* 2583 * Fetch the device id for the bmc/interface. You must pass in either 2584 * bmc or intf, this code will get the other one. If the data has 2585 * been recently fetched, this will just use the cached data. Otherwise 2586 * it will run a new fetch. 2587 * 2588 * Except for the first time this is called (in ipmi_add_smi()), 2589 * this will always return good data; 2590 */ 2591 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2592 struct ipmi_device_id *id, 2593 bool *guid_set, guid_t *guid, int intf_num) 2594 { 2595 int rv = 0; 2596 int prev_dyn_id_set, prev_guid_set; 2597 bool intf_set = intf != NULL; 2598 2599 if (!intf) { 2600 mutex_lock(&bmc->dyn_mutex); 2601 retry_bmc_lock: 2602 if (list_empty(&bmc->intfs)) { 2603 mutex_unlock(&bmc->dyn_mutex); 2604 return -ENOENT; 2605 } 2606 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2607 bmc_link); 2608 kref_get(&intf->refcount); 2609 mutex_unlock(&bmc->dyn_mutex); 2610 mutex_lock(&intf->bmc_reg_mutex); 2611 mutex_lock(&bmc->dyn_mutex); 2612 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2613 bmc_link)) { 2614 mutex_unlock(&intf->bmc_reg_mutex); 2615 kref_put(&intf->refcount, intf_free); 2616 goto retry_bmc_lock; 2617 } 2618 } else { 2619 mutex_lock(&intf->bmc_reg_mutex); 2620 bmc = intf->bmc; 2621 mutex_lock(&bmc->dyn_mutex); 2622 kref_get(&intf->refcount); 2623 } 2624 2625 /* If we have a valid and current ID, just return that. */ 2626 if (intf->in_bmc_register || 2627 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2628 goto out_noprocessing; 2629 2630 /* Don't allow sysfs access when in maintenance mode. */ 2631 if (intf->maintenance_mode_state) { 2632 rv = -EBUSY; 2633 goto out_noprocessing; 2634 } 2635 2636 prev_guid_set = bmc->dyn_guid_set; 2637 __get_guid(intf); 2638 2639 prev_dyn_id_set = bmc->dyn_id_set; 2640 rv = __get_device_id(intf, bmc); 2641 if (rv) 2642 goto out; 2643 2644 /* 2645 * The guid, device id, manufacturer id, and product id should 2646 * not change on a BMC. If it does we have to do some dancing. 2647 */ 2648 if (!intf->bmc_registered 2649 || (!prev_guid_set && bmc->dyn_guid_set) 2650 || (!prev_dyn_id_set && bmc->dyn_id_set) 2651 || (prev_guid_set && bmc->dyn_guid_set 2652 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2653 || bmc->id.device_id != bmc->fetch_id.device_id 2654 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2655 || bmc->id.product_id != bmc->fetch_id.product_id) { 2656 struct ipmi_device_id id = bmc->fetch_id; 2657 int guid_set = bmc->dyn_guid_set; 2658 guid_t guid; 2659 2660 guid = bmc->fetch_guid; 2661 mutex_unlock(&bmc->dyn_mutex); 2662 2663 __ipmi_bmc_unregister(intf); 2664 /* Fill in the temporary BMC for good measure. */ 2665 intf->bmc->id = id; 2666 intf->bmc->dyn_guid_set = guid_set; 2667 intf->bmc->guid = guid; 2668 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2669 need_waiter(intf); /* Retry later on an error. */ 2670 else 2671 __scan_channels(intf, &id); 2672 2673 2674 if (!intf_set) { 2675 /* 2676 * We weren't given the interface on the 2677 * command line, so restart the operation on 2678 * the next interface for the BMC. 2679 */ 2680 mutex_unlock(&intf->bmc_reg_mutex); 2681 mutex_lock(&bmc->dyn_mutex); 2682 goto retry_bmc_lock; 2683 } 2684 2685 /* We have a new BMC, set it up. */ 2686 bmc = intf->bmc; 2687 mutex_lock(&bmc->dyn_mutex); 2688 goto out_noprocessing; 2689 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2690 /* Version info changes, scan the channels again. */ 2691 __scan_channels(intf, &bmc->fetch_id); 2692 2693 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2694 2695 out: 2696 if (rv && prev_dyn_id_set) { 2697 rv = 0; /* Ignore failures if we have previous data. */ 2698 bmc->dyn_id_set = prev_dyn_id_set; 2699 } 2700 if (!rv) { 2701 bmc->id = bmc->fetch_id; 2702 if (bmc->dyn_guid_set) 2703 bmc->guid = bmc->fetch_guid; 2704 else if (prev_guid_set) 2705 /* 2706 * The guid used to be valid and it failed to fetch, 2707 * just use the cached value. 2708 */ 2709 bmc->dyn_guid_set = prev_guid_set; 2710 } 2711 out_noprocessing: 2712 if (!rv) { 2713 if (id) 2714 *id = bmc->id; 2715 2716 if (guid_set) 2717 *guid_set = bmc->dyn_guid_set; 2718 2719 if (guid && bmc->dyn_guid_set) 2720 *guid = bmc->guid; 2721 } 2722 2723 mutex_unlock(&bmc->dyn_mutex); 2724 mutex_unlock(&intf->bmc_reg_mutex); 2725 2726 kref_put(&intf->refcount, intf_free); 2727 return rv; 2728 } 2729 2730 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2731 struct ipmi_device_id *id, 2732 bool *guid_set, guid_t *guid) 2733 { 2734 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2735 } 2736 2737 static ssize_t device_id_show(struct device *dev, 2738 struct device_attribute *attr, 2739 char *buf) 2740 { 2741 struct bmc_device *bmc = to_bmc_device(dev); 2742 struct ipmi_device_id id; 2743 int rv; 2744 2745 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2746 if (rv) 2747 return rv; 2748 2749 return sysfs_emit(buf, "%u\n", id.device_id); 2750 } 2751 static DEVICE_ATTR_RO(device_id); 2752 2753 static ssize_t provides_device_sdrs_show(struct device *dev, 2754 struct device_attribute *attr, 2755 char *buf) 2756 { 2757 struct bmc_device *bmc = to_bmc_device(dev); 2758 struct ipmi_device_id id; 2759 int rv; 2760 2761 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2762 if (rv) 2763 return rv; 2764 2765 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2766 } 2767 static DEVICE_ATTR_RO(provides_device_sdrs); 2768 2769 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2770 char *buf) 2771 { 2772 struct bmc_device *bmc = to_bmc_device(dev); 2773 struct ipmi_device_id id; 2774 int rv; 2775 2776 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2777 if (rv) 2778 return rv; 2779 2780 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2781 } 2782 static DEVICE_ATTR_RO(revision); 2783 2784 static ssize_t firmware_revision_show(struct device *dev, 2785 struct device_attribute *attr, 2786 char *buf) 2787 { 2788 struct bmc_device *bmc = to_bmc_device(dev); 2789 struct ipmi_device_id id; 2790 int rv; 2791 2792 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2793 if (rv) 2794 return rv; 2795 2796 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2797 id.firmware_revision_2); 2798 } 2799 static DEVICE_ATTR_RO(firmware_revision); 2800 2801 static ssize_t ipmi_version_show(struct device *dev, 2802 struct device_attribute *attr, 2803 char *buf) 2804 { 2805 struct bmc_device *bmc = to_bmc_device(dev); 2806 struct ipmi_device_id id; 2807 int rv; 2808 2809 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2810 if (rv) 2811 return rv; 2812 2813 return sysfs_emit(buf, "%u.%u\n", 2814 ipmi_version_major(&id), 2815 ipmi_version_minor(&id)); 2816 } 2817 static DEVICE_ATTR_RO(ipmi_version); 2818 2819 static ssize_t add_dev_support_show(struct device *dev, 2820 struct device_attribute *attr, 2821 char *buf) 2822 { 2823 struct bmc_device *bmc = to_bmc_device(dev); 2824 struct ipmi_device_id id; 2825 int rv; 2826 2827 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2828 if (rv) 2829 return rv; 2830 2831 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2832 } 2833 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2834 NULL); 2835 2836 static ssize_t manufacturer_id_show(struct device *dev, 2837 struct device_attribute *attr, 2838 char *buf) 2839 { 2840 struct bmc_device *bmc = to_bmc_device(dev); 2841 struct ipmi_device_id id; 2842 int rv; 2843 2844 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2845 if (rv) 2846 return rv; 2847 2848 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2849 } 2850 static DEVICE_ATTR_RO(manufacturer_id); 2851 2852 static ssize_t product_id_show(struct device *dev, 2853 struct device_attribute *attr, 2854 char *buf) 2855 { 2856 struct bmc_device *bmc = to_bmc_device(dev); 2857 struct ipmi_device_id id; 2858 int rv; 2859 2860 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2861 if (rv) 2862 return rv; 2863 2864 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2865 } 2866 static DEVICE_ATTR_RO(product_id); 2867 2868 static ssize_t aux_firmware_rev_show(struct device *dev, 2869 struct device_attribute *attr, 2870 char *buf) 2871 { 2872 struct bmc_device *bmc = to_bmc_device(dev); 2873 struct ipmi_device_id id; 2874 int rv; 2875 2876 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2877 if (rv) 2878 return rv; 2879 2880 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2881 id.aux_firmware_revision[3], 2882 id.aux_firmware_revision[2], 2883 id.aux_firmware_revision[1], 2884 id.aux_firmware_revision[0]); 2885 } 2886 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2887 2888 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2889 char *buf) 2890 { 2891 struct bmc_device *bmc = to_bmc_device(dev); 2892 bool guid_set; 2893 guid_t guid; 2894 int rv; 2895 2896 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2897 if (rv) 2898 return rv; 2899 if (!guid_set) 2900 return -ENOENT; 2901 2902 return sysfs_emit(buf, "%pUl\n", &guid); 2903 } 2904 static DEVICE_ATTR_RO(guid); 2905 2906 static struct attribute *bmc_dev_attrs[] = { 2907 &dev_attr_device_id.attr, 2908 &dev_attr_provides_device_sdrs.attr, 2909 &dev_attr_revision.attr, 2910 &dev_attr_firmware_revision.attr, 2911 &dev_attr_ipmi_version.attr, 2912 &dev_attr_additional_device_support.attr, 2913 &dev_attr_manufacturer_id.attr, 2914 &dev_attr_product_id.attr, 2915 &dev_attr_aux_firmware_revision.attr, 2916 &dev_attr_guid.attr, 2917 NULL 2918 }; 2919 2920 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2921 struct attribute *attr, int idx) 2922 { 2923 struct device *dev = kobj_to_dev(kobj); 2924 struct bmc_device *bmc = to_bmc_device(dev); 2925 umode_t mode = attr->mode; 2926 int rv; 2927 2928 if (attr == &dev_attr_aux_firmware_revision.attr) { 2929 struct ipmi_device_id id; 2930 2931 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2932 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2933 } 2934 if (attr == &dev_attr_guid.attr) { 2935 bool guid_set; 2936 2937 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2938 return (!rv && guid_set) ? mode : 0; 2939 } 2940 return mode; 2941 } 2942 2943 static const struct attribute_group bmc_dev_attr_group = { 2944 .attrs = bmc_dev_attrs, 2945 .is_visible = bmc_dev_attr_is_visible, 2946 }; 2947 2948 static const struct attribute_group *bmc_dev_attr_groups[] = { 2949 &bmc_dev_attr_group, 2950 NULL 2951 }; 2952 2953 static const struct device_type bmc_device_type = { 2954 .groups = bmc_dev_attr_groups, 2955 }; 2956 2957 static int __find_bmc_guid(struct device *dev, const void *data) 2958 { 2959 const guid_t *guid = data; 2960 struct bmc_device *bmc; 2961 int rv; 2962 2963 if (dev->type != &bmc_device_type) 2964 return 0; 2965 2966 bmc = to_bmc_device(dev); 2967 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2968 if (rv) 2969 rv = kref_get_unless_zero(&bmc->usecount); 2970 return rv; 2971 } 2972 2973 /* 2974 * Returns with the bmc's usecount incremented, if it is non-NULL. 2975 */ 2976 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2977 guid_t *guid) 2978 { 2979 struct device *dev; 2980 struct bmc_device *bmc = NULL; 2981 2982 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2983 if (dev) { 2984 bmc = to_bmc_device(dev); 2985 put_device(dev); 2986 } 2987 return bmc; 2988 } 2989 2990 struct prod_dev_id { 2991 unsigned int product_id; 2992 unsigned char device_id; 2993 }; 2994 2995 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2996 { 2997 const struct prod_dev_id *cid = data; 2998 struct bmc_device *bmc; 2999 int rv; 3000 3001 if (dev->type != &bmc_device_type) 3002 return 0; 3003 3004 bmc = to_bmc_device(dev); 3005 rv = (bmc->id.product_id == cid->product_id 3006 && bmc->id.device_id == cid->device_id); 3007 if (rv) 3008 rv = kref_get_unless_zero(&bmc->usecount); 3009 return rv; 3010 } 3011 3012 /* 3013 * Returns with the bmc's usecount incremented, if it is non-NULL. 3014 */ 3015 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 3016 struct device_driver *drv, 3017 unsigned int product_id, unsigned char device_id) 3018 { 3019 struct prod_dev_id id = { 3020 .product_id = product_id, 3021 .device_id = device_id, 3022 }; 3023 struct device *dev; 3024 struct bmc_device *bmc = NULL; 3025 3026 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 3027 if (dev) { 3028 bmc = to_bmc_device(dev); 3029 put_device(dev); 3030 } 3031 return bmc; 3032 } 3033 3034 static DEFINE_IDA(ipmi_bmc_ida); 3035 3036 static void 3037 release_bmc_device(struct device *dev) 3038 { 3039 kfree(to_bmc_device(dev)); 3040 } 3041 3042 static void cleanup_bmc_work(struct work_struct *work) 3043 { 3044 struct bmc_device *bmc = container_of(work, struct bmc_device, 3045 remove_work); 3046 int id = bmc->pdev.id; /* Unregister overwrites id */ 3047 3048 platform_device_unregister(&bmc->pdev); 3049 ida_free(&ipmi_bmc_ida, id); 3050 } 3051 3052 static void 3053 cleanup_bmc_device(struct kref *ref) 3054 { 3055 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3056 3057 /* 3058 * Remove the platform device in a work queue to avoid issues 3059 * with removing the device attributes while reading a device 3060 * attribute. 3061 */ 3062 queue_work(bmc_remove_work_wq, &bmc->remove_work); 3063 } 3064 3065 /* 3066 * Must be called with intf->bmc_reg_mutex held. 3067 */ 3068 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3069 { 3070 struct bmc_device *bmc = intf->bmc; 3071 3072 if (!intf->bmc_registered) 3073 return; 3074 3075 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3076 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3077 kfree(intf->my_dev_name); 3078 intf->my_dev_name = NULL; 3079 3080 mutex_lock(&bmc->dyn_mutex); 3081 list_del(&intf->bmc_link); 3082 mutex_unlock(&bmc->dyn_mutex); 3083 intf->bmc = &intf->tmp_bmc; 3084 kref_put(&bmc->usecount, cleanup_bmc_device); 3085 intf->bmc_registered = false; 3086 } 3087 3088 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3089 { 3090 mutex_lock(&intf->bmc_reg_mutex); 3091 __ipmi_bmc_unregister(intf); 3092 mutex_unlock(&intf->bmc_reg_mutex); 3093 } 3094 3095 /* 3096 * Must be called with intf->bmc_reg_mutex held. 3097 */ 3098 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3099 struct ipmi_device_id *id, 3100 bool guid_set, guid_t *guid, int intf_num) 3101 { 3102 int rv; 3103 struct bmc_device *bmc; 3104 struct bmc_device *old_bmc; 3105 3106 /* 3107 * platform_device_register() can cause bmc_reg_mutex to 3108 * be claimed because of the is_visible functions of 3109 * the attributes. Eliminate possible recursion and 3110 * release the lock. 3111 */ 3112 intf->in_bmc_register = true; 3113 mutex_unlock(&intf->bmc_reg_mutex); 3114 3115 /* 3116 * Try to find if there is an bmc_device struct 3117 * representing the interfaced BMC already 3118 */ 3119 mutex_lock(&ipmidriver_mutex); 3120 if (guid_set) 3121 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3122 else 3123 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3124 id->product_id, 3125 id->device_id); 3126 3127 /* 3128 * If there is already an bmc_device, free the new one, 3129 * otherwise register the new BMC device 3130 */ 3131 if (old_bmc) { 3132 bmc = old_bmc; 3133 /* 3134 * Note: old_bmc already has usecount incremented by 3135 * the BMC find functions. 3136 */ 3137 intf->bmc = old_bmc; 3138 mutex_lock(&bmc->dyn_mutex); 3139 list_add_tail(&intf->bmc_link, &bmc->intfs); 3140 mutex_unlock(&bmc->dyn_mutex); 3141 3142 dev_info(intf->si_dev, 3143 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3144 bmc->id.manufacturer_id, 3145 bmc->id.product_id, 3146 bmc->id.device_id); 3147 } else { 3148 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3149 if (!bmc) { 3150 rv = -ENOMEM; 3151 goto out; 3152 } 3153 INIT_LIST_HEAD(&bmc->intfs); 3154 mutex_init(&bmc->dyn_mutex); 3155 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3156 3157 bmc->id = *id; 3158 bmc->dyn_id_set = 1; 3159 bmc->dyn_guid_set = guid_set; 3160 bmc->guid = *guid; 3161 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3162 3163 bmc->pdev.name = "ipmi_bmc"; 3164 3165 rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL); 3166 if (rv < 0) { 3167 kfree(bmc); 3168 goto out; 3169 } 3170 3171 bmc->pdev.dev.driver = &ipmidriver.driver; 3172 bmc->pdev.id = rv; 3173 bmc->pdev.dev.release = release_bmc_device; 3174 bmc->pdev.dev.type = &bmc_device_type; 3175 kref_init(&bmc->usecount); 3176 3177 intf->bmc = bmc; 3178 mutex_lock(&bmc->dyn_mutex); 3179 list_add_tail(&intf->bmc_link, &bmc->intfs); 3180 mutex_unlock(&bmc->dyn_mutex); 3181 3182 rv = platform_device_register(&bmc->pdev); 3183 if (rv) { 3184 dev_err(intf->si_dev, 3185 "Unable to register bmc device: %d\n", 3186 rv); 3187 goto out_list_del; 3188 } 3189 3190 dev_info(intf->si_dev, 3191 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3192 bmc->id.manufacturer_id, 3193 bmc->id.product_id, 3194 bmc->id.device_id); 3195 } 3196 3197 /* 3198 * create symlink from system interface device to bmc device 3199 * and back. 3200 */ 3201 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3202 if (rv) { 3203 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3204 goto out_put_bmc; 3205 } 3206 3207 if (intf_num == -1) 3208 intf_num = intf->intf_num; 3209 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3210 if (!intf->my_dev_name) { 3211 rv = -ENOMEM; 3212 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3213 rv); 3214 goto out_unlink1; 3215 } 3216 3217 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3218 intf->my_dev_name); 3219 if (rv) { 3220 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3221 rv); 3222 goto out_free_my_dev_name; 3223 } 3224 3225 intf->bmc_registered = true; 3226 3227 out: 3228 mutex_unlock(&ipmidriver_mutex); 3229 mutex_lock(&intf->bmc_reg_mutex); 3230 intf->in_bmc_register = false; 3231 return rv; 3232 3233 3234 out_free_my_dev_name: 3235 kfree(intf->my_dev_name); 3236 intf->my_dev_name = NULL; 3237 3238 out_unlink1: 3239 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3240 3241 out_put_bmc: 3242 mutex_lock(&bmc->dyn_mutex); 3243 list_del(&intf->bmc_link); 3244 mutex_unlock(&bmc->dyn_mutex); 3245 intf->bmc = &intf->tmp_bmc; 3246 kref_put(&bmc->usecount, cleanup_bmc_device); 3247 goto out; 3248 3249 out_list_del: 3250 mutex_lock(&bmc->dyn_mutex); 3251 list_del(&intf->bmc_link); 3252 mutex_unlock(&bmc->dyn_mutex); 3253 intf->bmc = &intf->tmp_bmc; 3254 put_device(&bmc->pdev.dev); 3255 goto out; 3256 } 3257 3258 static int 3259 send_guid_cmd(struct ipmi_smi *intf, int chan) 3260 { 3261 struct kernel_ipmi_msg msg; 3262 struct ipmi_system_interface_addr si; 3263 3264 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3265 si.channel = IPMI_BMC_CHANNEL; 3266 si.lun = 0; 3267 3268 msg.netfn = IPMI_NETFN_APP_REQUEST; 3269 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3270 msg.data = NULL; 3271 msg.data_len = 0; 3272 return i_ipmi_request(NULL, 3273 intf, 3274 (struct ipmi_addr *) &si, 3275 0, 3276 &msg, 3277 intf, 3278 NULL, 3279 NULL, 3280 0, 3281 intf->addrinfo[0].address, 3282 intf->addrinfo[0].lun, 3283 -1, 0); 3284 } 3285 3286 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3287 { 3288 struct bmc_device *bmc = intf->bmc; 3289 3290 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3291 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3292 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3293 /* Not for me */ 3294 return; 3295 3296 if (msg->msg.data[0] != 0) { 3297 /* Error from getting the GUID, the BMC doesn't have one. */ 3298 bmc->dyn_guid_set = 0; 3299 goto out; 3300 } 3301 3302 if (msg->msg.data_len < UUID_SIZE + 1) { 3303 bmc->dyn_guid_set = 0; 3304 dev_warn(intf->si_dev, 3305 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3306 msg->msg.data_len, UUID_SIZE + 1); 3307 goto out; 3308 } 3309 3310 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3311 /* 3312 * Make sure the guid data is available before setting 3313 * dyn_guid_set. 3314 */ 3315 smp_wmb(); 3316 bmc->dyn_guid_set = 1; 3317 out: 3318 wake_up(&intf->waitq); 3319 } 3320 3321 static void __get_guid(struct ipmi_smi *intf) 3322 { 3323 int rv; 3324 struct bmc_device *bmc = intf->bmc; 3325 3326 bmc->dyn_guid_set = 2; 3327 intf->null_user_handler = guid_handler; 3328 rv = send_guid_cmd(intf, 0); 3329 if (rv) 3330 /* Send failed, no GUID available. */ 3331 bmc->dyn_guid_set = 0; 3332 else 3333 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3334 3335 /* dyn_guid_set makes the guid data available. */ 3336 smp_rmb(); 3337 3338 intf->null_user_handler = NULL; 3339 } 3340 3341 static int 3342 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3343 { 3344 struct kernel_ipmi_msg msg; 3345 unsigned char data[1]; 3346 struct ipmi_system_interface_addr si; 3347 3348 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3349 si.channel = IPMI_BMC_CHANNEL; 3350 si.lun = 0; 3351 3352 msg.netfn = IPMI_NETFN_APP_REQUEST; 3353 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3354 msg.data = data; 3355 msg.data_len = 1; 3356 data[0] = chan; 3357 return i_ipmi_request(NULL, 3358 intf, 3359 (struct ipmi_addr *) &si, 3360 0, 3361 &msg, 3362 intf, 3363 NULL, 3364 NULL, 3365 0, 3366 intf->addrinfo[0].address, 3367 intf->addrinfo[0].lun, 3368 -1, 0); 3369 } 3370 3371 static void 3372 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3373 { 3374 int rv = 0; 3375 int ch; 3376 unsigned int set = intf->curr_working_cset; 3377 struct ipmi_channel *chans; 3378 3379 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3380 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3381 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3382 /* It's the one we want */ 3383 if (msg->msg.data[0] != 0) { 3384 /* Got an error from the channel, just go on. */ 3385 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3386 /* 3387 * If the MC does not support this 3388 * command, that is legal. We just 3389 * assume it has one IPMB at channel 3390 * zero. 3391 */ 3392 intf->wchannels[set].c[0].medium 3393 = IPMI_CHANNEL_MEDIUM_IPMB; 3394 intf->wchannels[set].c[0].protocol 3395 = IPMI_CHANNEL_PROTOCOL_IPMB; 3396 3397 intf->channel_list = intf->wchannels + set; 3398 intf->channels_ready = true; 3399 wake_up(&intf->waitq); 3400 goto out; 3401 } 3402 goto next_channel; 3403 } 3404 if (msg->msg.data_len < 4) { 3405 /* Message not big enough, just go on. */ 3406 goto next_channel; 3407 } 3408 ch = intf->curr_channel; 3409 chans = intf->wchannels[set].c; 3410 chans[ch].medium = msg->msg.data[2] & 0x7f; 3411 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3412 3413 next_channel: 3414 intf->curr_channel++; 3415 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3416 intf->channel_list = intf->wchannels + set; 3417 intf->channels_ready = true; 3418 wake_up(&intf->waitq); 3419 } else { 3420 intf->channel_list = intf->wchannels + set; 3421 intf->channels_ready = true; 3422 rv = send_channel_info_cmd(intf, intf->curr_channel); 3423 } 3424 3425 if (rv) { 3426 /* Got an error somehow, just give up. */ 3427 dev_warn(intf->si_dev, 3428 "Error sending channel information for channel %d: %d\n", 3429 intf->curr_channel, rv); 3430 3431 intf->channel_list = intf->wchannels + set; 3432 intf->channels_ready = true; 3433 wake_up(&intf->waitq); 3434 } 3435 } 3436 out: 3437 return; 3438 } 3439 3440 /* 3441 * Must be holding intf->bmc_reg_mutex to call this. 3442 */ 3443 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3444 { 3445 int rv; 3446 3447 if (ipmi_version_major(id) > 1 3448 || (ipmi_version_major(id) == 1 3449 && ipmi_version_minor(id) >= 5)) { 3450 unsigned int set; 3451 3452 /* 3453 * Start scanning the channels to see what is 3454 * available. 3455 */ 3456 set = !intf->curr_working_cset; 3457 intf->curr_working_cset = set; 3458 memset(&intf->wchannels[set], 0, 3459 sizeof(struct ipmi_channel_set)); 3460 3461 intf->null_user_handler = channel_handler; 3462 intf->curr_channel = 0; 3463 rv = send_channel_info_cmd(intf, 0); 3464 if (rv) { 3465 dev_warn(intf->si_dev, 3466 "Error sending channel information for channel 0, %d\n", 3467 rv); 3468 intf->null_user_handler = NULL; 3469 return -EIO; 3470 } 3471 3472 /* Wait for the channel info to be read. */ 3473 wait_event(intf->waitq, intf->channels_ready); 3474 intf->null_user_handler = NULL; 3475 } else { 3476 unsigned int set = intf->curr_working_cset; 3477 3478 /* Assume a single IPMB channel at zero. */ 3479 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3480 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3481 intf->channel_list = intf->wchannels + set; 3482 intf->channels_ready = true; 3483 } 3484 3485 return 0; 3486 } 3487 3488 static void ipmi_poll(struct ipmi_smi *intf) 3489 { 3490 if (intf->handlers->poll) 3491 intf->handlers->poll(intf->send_info); 3492 /* In case something came in */ 3493 handle_new_recv_msgs(intf); 3494 } 3495 3496 void ipmi_poll_interface(struct ipmi_user *user) 3497 { 3498 ipmi_poll(user->intf); 3499 } 3500 EXPORT_SYMBOL(ipmi_poll_interface); 3501 3502 static ssize_t nr_users_show(struct device *dev, 3503 struct device_attribute *attr, 3504 char *buf) 3505 { 3506 struct ipmi_smi *intf = container_of(attr, 3507 struct ipmi_smi, nr_users_devattr); 3508 3509 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); 3510 } 3511 static DEVICE_ATTR_RO(nr_users); 3512 3513 static ssize_t nr_msgs_show(struct device *dev, 3514 struct device_attribute *attr, 3515 char *buf) 3516 { 3517 struct ipmi_smi *intf = container_of(attr, 3518 struct ipmi_smi, nr_msgs_devattr); 3519 struct ipmi_user *user; 3520 unsigned int count = 0; 3521 3522 mutex_lock(&intf->users_mutex); 3523 list_for_each_entry(user, &intf->users, link) 3524 count += atomic_read(&user->nr_msgs); 3525 mutex_unlock(&intf->users_mutex); 3526 3527 return sysfs_emit(buf, "%u\n", count); 3528 } 3529 static DEVICE_ATTR_RO(nr_msgs); 3530 3531 static ssize_t maintenance_mode_show(struct device *dev, 3532 struct device_attribute *attr, 3533 char *buf) 3534 { 3535 struct ipmi_smi *intf = container_of(attr, 3536 struct ipmi_smi, 3537 maintenance_mode_devattr); 3538 3539 return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state, 3540 intf->auto_maintenance_timeout); 3541 } 3542 static DEVICE_ATTR_RO(maintenance_mode); 3543 3544 static void redo_bmc_reg(struct work_struct *work) 3545 { 3546 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3547 bmc_reg_work); 3548 3549 if (!intf->in_shutdown) 3550 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3551 3552 kref_put(&intf->refcount, intf_free); 3553 } 3554 3555 int ipmi_add_smi(struct module *owner, 3556 const struct ipmi_smi_handlers *handlers, 3557 void *send_info, 3558 struct device *si_dev, 3559 unsigned char slave_addr) 3560 { 3561 int i, j; 3562 int rv; 3563 struct ipmi_smi *intf, *tintf; 3564 struct list_head *link; 3565 struct ipmi_device_id id; 3566 3567 /* 3568 * Make sure the driver is actually initialized, this handles 3569 * problems with initialization order. 3570 */ 3571 rv = ipmi_init_msghandler(); 3572 if (rv) 3573 return rv; 3574 3575 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3576 if (!intf) 3577 return -ENOMEM; 3578 3579 intf->owner = owner; 3580 intf->bmc = &intf->tmp_bmc; 3581 INIT_LIST_HEAD(&intf->bmc->intfs); 3582 mutex_init(&intf->bmc->dyn_mutex); 3583 INIT_LIST_HEAD(&intf->bmc_link); 3584 mutex_init(&intf->bmc_reg_mutex); 3585 intf->intf_num = -1; /* Mark it invalid for now. */ 3586 kref_init(&intf->refcount); 3587 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3588 intf->si_dev = si_dev; 3589 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3590 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3591 intf->addrinfo[j].lun = 2; 3592 } 3593 if (slave_addr != 0) 3594 intf->addrinfo[0].address = slave_addr; 3595 INIT_LIST_HEAD(&intf->user_msgs); 3596 mutex_init(&intf->user_msgs_mutex); 3597 INIT_LIST_HEAD(&intf->users); 3598 mutex_init(&intf->users_mutex); 3599 atomic_set(&intf->nr_users, 0); 3600 intf->handlers = handlers; 3601 intf->send_info = send_info; 3602 mutex_init(&intf->seq_lock); 3603 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3604 intf->seq_table[j].inuse = 0; 3605 intf->seq_table[j].seqid = 0; 3606 } 3607 intf->curr_seq = 0; 3608 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3609 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3610 INIT_WORK(&intf->smi_work, smi_work); 3611 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3612 spin_lock_init(&intf->xmit_msgs_lock); 3613 INIT_LIST_HEAD(&intf->xmit_msgs); 3614 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3615 mutex_init(&intf->events_mutex); 3616 spin_lock_init(&intf->watch_lock); 3617 atomic_set(&intf->event_waiters, 0); 3618 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3619 INIT_LIST_HEAD(&intf->waiting_events); 3620 intf->waiting_events_count = 0; 3621 mutex_init(&intf->cmd_rcvrs_mutex); 3622 spin_lock_init(&intf->maintenance_mode_lock); 3623 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3624 init_waitqueue_head(&intf->waitq); 3625 for (i = 0; i < IPMI_NUM_STATS; i++) 3626 atomic_set(&intf->stats[i], 0); 3627 3628 /* 3629 * Grab the watchers mutex so we can deliver the new interface 3630 * without races. 3631 */ 3632 mutex_lock(&smi_watchers_mutex); 3633 mutex_lock(&ipmi_interfaces_mutex); 3634 /* Look for a hole in the numbers. */ 3635 i = 0; 3636 link = &ipmi_interfaces; 3637 list_for_each_entry(tintf, &ipmi_interfaces, link) { 3638 if (tintf->intf_num != i) { 3639 link = &tintf->link; 3640 break; 3641 } 3642 i++; 3643 } 3644 /* Add the new interface in numeric order. */ 3645 if (i == 0) 3646 list_add(&intf->link, &ipmi_interfaces); 3647 else 3648 list_add_tail(&intf->link, link); 3649 3650 rv = handlers->start_processing(send_info, intf); 3651 if (rv) 3652 goto out_err; 3653 3654 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3655 if (rv) { 3656 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3657 goto out_err_started; 3658 } 3659 3660 mutex_lock(&intf->bmc_reg_mutex); 3661 rv = __scan_channels(intf, &id); 3662 mutex_unlock(&intf->bmc_reg_mutex); 3663 if (rv) 3664 goto out_err_bmc_reg; 3665 3666 intf->nr_users_devattr = dev_attr_nr_users; 3667 sysfs_attr_init(&intf->nr_users_devattr.attr); 3668 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); 3669 if (rv) 3670 goto out_err_bmc_reg; 3671 3672 intf->nr_msgs_devattr = dev_attr_nr_msgs; 3673 sysfs_attr_init(&intf->nr_msgs_devattr.attr); 3674 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); 3675 if (rv) { 3676 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3677 goto out_err_bmc_reg; 3678 } 3679 3680 intf->maintenance_mode_devattr = dev_attr_maintenance_mode; 3681 sysfs_attr_init(&intf->maintenance_mode_devattr.attr); 3682 rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr); 3683 if (rv) { 3684 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3685 goto out_err_bmc_reg; 3686 } 3687 3688 intf->intf_num = i; 3689 mutex_unlock(&ipmi_interfaces_mutex); 3690 3691 /* After this point the interface is legal to use. */ 3692 call_smi_watchers(i, intf->si_dev); 3693 3694 mutex_unlock(&smi_watchers_mutex); 3695 3696 return 0; 3697 3698 out_err_bmc_reg: 3699 ipmi_bmc_unregister(intf); 3700 out_err_started: 3701 if (intf->handlers->shutdown) 3702 intf->handlers->shutdown(intf->send_info); 3703 out_err: 3704 list_del(&intf->link); 3705 mutex_unlock(&ipmi_interfaces_mutex); 3706 mutex_unlock(&smi_watchers_mutex); 3707 kref_put(&intf->refcount, intf_free); 3708 3709 return rv; 3710 } 3711 EXPORT_SYMBOL(ipmi_add_smi); 3712 3713 static void deliver_smi_err_response(struct ipmi_smi *intf, 3714 struct ipmi_smi_msg *msg, 3715 unsigned char err) 3716 { 3717 int rv; 3718 msg->rsp[0] = msg->data[0] | 4; 3719 msg->rsp[1] = msg->data[1]; 3720 msg->rsp[2] = err; 3721 msg->rsp_size = 3; 3722 3723 /* This will never requeue, but it may ask us to free the message. */ 3724 rv = handle_one_recv_msg(intf, msg); 3725 if (rv == 0) 3726 ipmi_free_smi_msg(msg); 3727 } 3728 3729 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3730 { 3731 int i; 3732 struct seq_table *ent; 3733 struct ipmi_smi_msg *msg; 3734 struct list_head *entry; 3735 struct list_head tmplist; 3736 3737 /* Clear out our transmit queues and hold the messages. */ 3738 INIT_LIST_HEAD(&tmplist); 3739 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3740 list_splice_tail(&intf->xmit_msgs, &tmplist); 3741 3742 /* Current message first, to preserve order */ 3743 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3744 /* Wait for the message to clear out. */ 3745 schedule_timeout(1); 3746 } 3747 3748 /* No need for locks, the interface is down. */ 3749 3750 /* 3751 * Return errors for all pending messages in queue and in the 3752 * tables waiting for remote responses. 3753 */ 3754 while (!list_empty(&tmplist)) { 3755 entry = tmplist.next; 3756 list_del(entry); 3757 msg = list_entry(entry, struct ipmi_smi_msg, link); 3758 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3759 } 3760 3761 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3762 ent = &intf->seq_table[i]; 3763 if (!ent->inuse) 3764 continue; 3765 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3766 } 3767 } 3768 3769 void ipmi_unregister_smi(struct ipmi_smi *intf) 3770 { 3771 struct ipmi_smi_watcher *w; 3772 int intf_num; 3773 3774 if (!intf) 3775 return; 3776 3777 intf_num = intf->intf_num; 3778 mutex_lock(&ipmi_interfaces_mutex); 3779 cancel_work_sync(&intf->smi_work); 3780 /* smi_work() can no longer be in progress after this. */ 3781 3782 intf->intf_num = -1; 3783 intf->in_shutdown = true; 3784 list_del(&intf->link); 3785 mutex_unlock(&ipmi_interfaces_mutex); 3786 3787 /* 3788 * At this point no users can be added to the interface and no 3789 * new messages can be sent. 3790 */ 3791 3792 if (intf->handlers->shutdown) 3793 intf->handlers->shutdown(intf->send_info); 3794 3795 device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr); 3796 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); 3797 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3798 3799 /* 3800 * Call all the watcher interfaces to tell them that 3801 * an interface is going away. 3802 */ 3803 mutex_lock(&smi_watchers_mutex); 3804 list_for_each_entry(w, &smi_watchers, link) 3805 w->smi_gone(intf_num); 3806 mutex_unlock(&smi_watchers_mutex); 3807 3808 mutex_lock(&intf->users_mutex); 3809 while (!list_empty(&intf->users)) { 3810 struct ipmi_user *user = list_first_entry(&intf->users, 3811 struct ipmi_user, link); 3812 3813 _ipmi_destroy_user(user); 3814 } 3815 mutex_unlock(&intf->users_mutex); 3816 3817 cleanup_smi_msgs(intf); 3818 3819 ipmi_bmc_unregister(intf); 3820 3821 kref_put(&intf->refcount, intf_free); 3822 } 3823 EXPORT_SYMBOL(ipmi_unregister_smi); 3824 3825 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3826 struct ipmi_smi_msg *msg) 3827 { 3828 struct ipmi_ipmb_addr ipmb_addr; 3829 struct ipmi_recv_msg *recv_msg; 3830 3831 /* 3832 * This is 11, not 10, because the response must contain a 3833 * completion code. 3834 */ 3835 if (msg->rsp_size < 11) { 3836 /* Message not big enough, just ignore it. */ 3837 ipmi_inc_stat(intf, invalid_ipmb_responses); 3838 return 0; 3839 } 3840 3841 if (msg->rsp[2] != 0) { 3842 /* An error getting the response, just ignore it. */ 3843 return 0; 3844 } 3845 3846 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3847 ipmb_addr.slave_addr = msg->rsp[6]; 3848 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3849 ipmb_addr.lun = msg->rsp[7] & 3; 3850 3851 /* 3852 * It's a response from a remote entity. Look up the sequence 3853 * number and handle the response. 3854 */ 3855 if (intf_find_seq(intf, 3856 msg->rsp[7] >> 2, 3857 msg->rsp[3] & 0x0f, 3858 msg->rsp[8], 3859 (msg->rsp[4] >> 2) & (~1), 3860 (struct ipmi_addr *) &ipmb_addr, 3861 &recv_msg)) { 3862 /* 3863 * We were unable to find the sequence number, 3864 * so just nuke the message. 3865 */ 3866 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3867 return 0; 3868 } 3869 3870 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3871 /* 3872 * The other fields matched, so no need to set them, except 3873 * for netfn, which needs to be the response that was 3874 * returned, not the request value. 3875 */ 3876 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3877 recv_msg->msg.data = recv_msg->msg_data; 3878 recv_msg->msg.data_len = msg->rsp_size - 10; 3879 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3880 if (deliver_response(intf, recv_msg)) 3881 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3882 else 3883 ipmi_inc_stat(intf, handled_ipmb_responses); 3884 3885 return 0; 3886 } 3887 3888 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3889 struct ipmi_smi_msg *msg) 3890 { 3891 struct cmd_rcvr *rcvr; 3892 int rv = 0; 3893 unsigned char netfn; 3894 unsigned char cmd; 3895 unsigned char chan; 3896 struct ipmi_user *user = NULL; 3897 struct ipmi_ipmb_addr *ipmb_addr; 3898 struct ipmi_recv_msg *recv_msg = NULL; 3899 3900 if (msg->rsp_size < 10) { 3901 /* Message not big enough, just ignore it. */ 3902 ipmi_inc_stat(intf, invalid_commands); 3903 return 0; 3904 } 3905 3906 if (msg->rsp[2] != 0) { 3907 /* An error getting the response, just ignore it. */ 3908 return 0; 3909 } 3910 3911 netfn = msg->rsp[4] >> 2; 3912 cmd = msg->rsp[8]; 3913 chan = msg->rsp[3] & 0xf; 3914 3915 rcu_read_lock(); 3916 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3917 if (rcvr) { 3918 user = rcvr->user; 3919 recv_msg = ipmi_alloc_recv_msg(user); 3920 } 3921 rcu_read_unlock(); 3922 3923 if (user == NULL) { 3924 /* We didn't find a user, deliver an error response. */ 3925 ipmi_inc_stat(intf, unhandled_commands); 3926 3927 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3928 msg->data[1] = IPMI_SEND_MSG_CMD; 3929 msg->data[2] = msg->rsp[3]; 3930 msg->data[3] = msg->rsp[6]; 3931 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3932 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3933 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3934 /* rqseq/lun */ 3935 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3936 msg->data[8] = msg->rsp[8]; /* cmd */ 3937 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3938 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3939 msg->data_size = 11; 3940 3941 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 3942 msg->data_size, msg->data); 3943 3944 smi_send(intf, intf->handlers, msg, 0); 3945 /* 3946 * We used the message, so return the value that 3947 * causes it to not be freed or queued. 3948 */ 3949 rv = -1; 3950 } else if (!IS_ERR(recv_msg)) { 3951 /* Extract the source address from the data. */ 3952 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3953 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3954 ipmb_addr->slave_addr = msg->rsp[6]; 3955 ipmb_addr->lun = msg->rsp[7] & 3; 3956 ipmb_addr->channel = msg->rsp[3] & 0xf; 3957 3958 /* 3959 * Extract the rest of the message information 3960 * from the IPMB header. 3961 */ 3962 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3963 recv_msg->msgid = msg->rsp[7] >> 2; 3964 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3965 recv_msg->msg.cmd = msg->rsp[8]; 3966 recv_msg->msg.data = recv_msg->msg_data; 3967 3968 /* 3969 * We chop off 10, not 9 bytes because the checksum 3970 * at the end also needs to be removed. 3971 */ 3972 recv_msg->msg.data_len = msg->rsp_size - 10; 3973 memcpy(recv_msg->msg_data, &msg->rsp[9], 3974 msg->rsp_size - 10); 3975 if (deliver_response(intf, recv_msg)) 3976 ipmi_inc_stat(intf, unhandled_commands); 3977 else 3978 ipmi_inc_stat(intf, handled_commands); 3979 } else { 3980 /* 3981 * We couldn't allocate memory for the message, so 3982 * requeue it for handling later. 3983 */ 3984 rv = 1; 3985 } 3986 3987 return rv; 3988 } 3989 3990 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3991 struct ipmi_smi_msg *msg) 3992 { 3993 struct cmd_rcvr *rcvr; 3994 int rv = 0; 3995 struct ipmi_user *user = NULL; 3996 struct ipmi_ipmb_direct_addr *daddr; 3997 struct ipmi_recv_msg *recv_msg = NULL; 3998 unsigned char netfn = msg->rsp[0] >> 2; 3999 unsigned char cmd = msg->rsp[3]; 4000 4001 rcu_read_lock(); 4002 /* We always use channel 0 for direct messages. */ 4003 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 4004 if (rcvr) { 4005 user = rcvr->user; 4006 recv_msg = ipmi_alloc_recv_msg(user); 4007 } 4008 rcu_read_unlock(); 4009 4010 if (user == NULL) { 4011 /* We didn't find a user, deliver an error response. */ 4012 ipmi_inc_stat(intf, unhandled_commands); 4013 4014 msg->data[0] = (netfn + 1) << 2; 4015 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 4016 msg->data[1] = msg->rsp[1]; /* Addr */ 4017 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 4018 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 4019 msg->data[3] = cmd; 4020 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 4021 msg->data_size = 5; 4022 4023 smi_send(intf, intf->handlers, msg, 0); 4024 /* 4025 * We used the message, so return the value that 4026 * causes it to not be freed or queued. 4027 */ 4028 rv = -1; 4029 } else if (!IS_ERR(recv_msg)) { 4030 /* Extract the source address from the data. */ 4031 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 4032 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4033 daddr->channel = 0; 4034 daddr->slave_addr = msg->rsp[1]; 4035 daddr->rs_lun = msg->rsp[0] & 3; 4036 daddr->rq_lun = msg->rsp[2] & 3; 4037 4038 /* 4039 * Extract the rest of the message information 4040 * from the IPMB header. 4041 */ 4042 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4043 recv_msg->msgid = (msg->rsp[2] >> 2); 4044 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4045 recv_msg->msg.cmd = msg->rsp[3]; 4046 recv_msg->msg.data = recv_msg->msg_data; 4047 4048 recv_msg->msg.data_len = msg->rsp_size - 4; 4049 memcpy(recv_msg->msg_data, msg->rsp + 4, 4050 msg->rsp_size - 4); 4051 if (deliver_response(intf, recv_msg)) 4052 ipmi_inc_stat(intf, unhandled_commands); 4053 else 4054 ipmi_inc_stat(intf, handled_commands); 4055 } else { 4056 /* 4057 * We couldn't allocate memory for the message, so 4058 * requeue it for handling later. 4059 */ 4060 rv = 1; 4061 } 4062 4063 return rv; 4064 } 4065 4066 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 4067 struct ipmi_smi_msg *msg) 4068 { 4069 struct ipmi_recv_msg *recv_msg; 4070 struct ipmi_ipmb_direct_addr *daddr; 4071 4072 recv_msg = msg->recv_msg; 4073 if (recv_msg == NULL) { 4074 dev_warn(intf->si_dev, 4075 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4076 return 0; 4077 } 4078 4079 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4080 recv_msg->msgid = msg->msgid; 4081 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4082 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4083 daddr->channel = 0; 4084 daddr->slave_addr = msg->rsp[1]; 4085 daddr->rq_lun = msg->rsp[0] & 3; 4086 daddr->rs_lun = msg->rsp[2] & 3; 4087 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4088 recv_msg->msg.cmd = msg->rsp[3]; 4089 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4090 recv_msg->msg.data = recv_msg->msg_data; 4091 recv_msg->msg.data_len = msg->rsp_size - 4; 4092 deliver_local_response(intf, recv_msg); 4093 4094 return 0; 4095 } 4096 4097 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4098 struct ipmi_smi_msg *msg) 4099 { 4100 struct ipmi_lan_addr lan_addr; 4101 struct ipmi_recv_msg *recv_msg; 4102 4103 4104 /* 4105 * This is 13, not 12, because the response must contain a 4106 * completion code. 4107 */ 4108 if (msg->rsp_size < 13) { 4109 /* Message not big enough, just ignore it. */ 4110 ipmi_inc_stat(intf, invalid_lan_responses); 4111 return 0; 4112 } 4113 4114 if (msg->rsp[2] != 0) { 4115 /* An error getting the response, just ignore it. */ 4116 return 0; 4117 } 4118 4119 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4120 lan_addr.session_handle = msg->rsp[4]; 4121 lan_addr.remote_SWID = msg->rsp[8]; 4122 lan_addr.local_SWID = msg->rsp[5]; 4123 lan_addr.channel = msg->rsp[3] & 0x0f; 4124 lan_addr.privilege = msg->rsp[3] >> 4; 4125 lan_addr.lun = msg->rsp[9] & 3; 4126 4127 /* 4128 * It's a response from a remote entity. Look up the sequence 4129 * number and handle the response. 4130 */ 4131 if (intf_find_seq(intf, 4132 msg->rsp[9] >> 2, 4133 msg->rsp[3] & 0x0f, 4134 msg->rsp[10], 4135 (msg->rsp[6] >> 2) & (~1), 4136 (struct ipmi_addr *) &lan_addr, 4137 &recv_msg)) { 4138 /* 4139 * We were unable to find the sequence number, 4140 * so just nuke the message. 4141 */ 4142 ipmi_inc_stat(intf, unhandled_lan_responses); 4143 return 0; 4144 } 4145 4146 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4147 /* 4148 * The other fields matched, so no need to set them, except 4149 * for netfn, which needs to be the response that was 4150 * returned, not the request value. 4151 */ 4152 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4153 recv_msg->msg.data = recv_msg->msg_data; 4154 recv_msg->msg.data_len = msg->rsp_size - 12; 4155 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4156 if (deliver_response(intf, recv_msg)) 4157 ipmi_inc_stat(intf, unhandled_lan_responses); 4158 else 4159 ipmi_inc_stat(intf, handled_lan_responses); 4160 4161 return 0; 4162 } 4163 4164 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4165 struct ipmi_smi_msg *msg) 4166 { 4167 struct cmd_rcvr *rcvr; 4168 int rv = 0; 4169 unsigned char netfn; 4170 unsigned char cmd; 4171 unsigned char chan; 4172 struct ipmi_user *user = NULL; 4173 struct ipmi_lan_addr *lan_addr; 4174 struct ipmi_recv_msg *recv_msg = NULL; 4175 4176 if (msg->rsp_size < 12) { 4177 /* Message not big enough, just ignore it. */ 4178 ipmi_inc_stat(intf, invalid_commands); 4179 return 0; 4180 } 4181 4182 if (msg->rsp[2] != 0) { 4183 /* An error getting the response, just ignore it. */ 4184 return 0; 4185 } 4186 4187 netfn = msg->rsp[6] >> 2; 4188 cmd = msg->rsp[10]; 4189 chan = msg->rsp[3] & 0xf; 4190 4191 rcu_read_lock(); 4192 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4193 if (rcvr) { 4194 user = rcvr->user; 4195 recv_msg = ipmi_alloc_recv_msg(user); 4196 } 4197 rcu_read_unlock(); 4198 4199 if (user == NULL) { 4200 /* We didn't find a user, just give up and return an error. */ 4201 ipmi_inc_stat(intf, unhandled_commands); 4202 4203 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 4204 msg->data[1] = IPMI_SEND_MSG_CMD; 4205 msg->data[2] = chan; 4206 msg->data[3] = msg->rsp[4]; /* handle */ 4207 msg->data[4] = msg->rsp[8]; /* rsSWID */ 4208 msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3); 4209 msg->data[6] = ipmb_checksum(&msg->data[3], 3); 4210 msg->data[7] = msg->rsp[5]; /* rqSWID */ 4211 /* rqseq/lun */ 4212 msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3); 4213 msg->data[9] = cmd; 4214 msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE; 4215 msg->data[11] = ipmb_checksum(&msg->data[7], 4); 4216 msg->data_size = 12; 4217 4218 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 4219 msg->data_size, msg->data); 4220 4221 smi_send(intf, intf->handlers, msg, 0); 4222 /* 4223 * We used the message, so return the value that 4224 * causes it to not be freed or queued. 4225 */ 4226 rv = -1; 4227 } else if (!IS_ERR(recv_msg)) { 4228 /* Extract the source address from the data. */ 4229 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4230 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4231 lan_addr->session_handle = msg->rsp[4]; 4232 lan_addr->remote_SWID = msg->rsp[8]; 4233 lan_addr->local_SWID = msg->rsp[5]; 4234 lan_addr->lun = msg->rsp[9] & 3; 4235 lan_addr->channel = msg->rsp[3] & 0xf; 4236 lan_addr->privilege = msg->rsp[3] >> 4; 4237 4238 /* 4239 * Extract the rest of the message information 4240 * from the IPMB header. 4241 */ 4242 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4243 recv_msg->msgid = msg->rsp[9] >> 2; 4244 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4245 recv_msg->msg.cmd = msg->rsp[10]; 4246 recv_msg->msg.data = recv_msg->msg_data; 4247 4248 /* 4249 * We chop off 12, not 11 bytes because the checksum 4250 * at the end also needs to be removed. 4251 */ 4252 recv_msg->msg.data_len = msg->rsp_size - 12; 4253 memcpy(recv_msg->msg_data, &msg->rsp[11], 4254 msg->rsp_size - 12); 4255 if (deliver_response(intf, recv_msg)) 4256 ipmi_inc_stat(intf, unhandled_commands); 4257 else 4258 ipmi_inc_stat(intf, handled_commands); 4259 } else { 4260 /* 4261 * We couldn't allocate memory for the message, so 4262 * requeue it for handling later. 4263 */ 4264 rv = 1; 4265 } 4266 4267 return rv; 4268 } 4269 4270 /* 4271 * This routine will handle "Get Message" command responses with 4272 * channels that use an OEM Medium. The message format belongs to 4273 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4274 * Chapter 22, sections 22.6 and 22.24 for more details. 4275 */ 4276 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4277 struct ipmi_smi_msg *msg) 4278 { 4279 struct cmd_rcvr *rcvr; 4280 int rv = 0; 4281 unsigned char netfn; 4282 unsigned char cmd; 4283 unsigned char chan; 4284 struct ipmi_user *user = NULL; 4285 struct ipmi_system_interface_addr *smi_addr; 4286 struct ipmi_recv_msg *recv_msg = NULL; 4287 4288 /* 4289 * We expect the OEM SW to perform error checking 4290 * so we just do some basic sanity checks 4291 */ 4292 if (msg->rsp_size < 4) { 4293 /* Message not big enough, just ignore it. */ 4294 ipmi_inc_stat(intf, invalid_commands); 4295 return 0; 4296 } 4297 4298 if (msg->rsp[2] != 0) { 4299 /* An error getting the response, just ignore it. */ 4300 return 0; 4301 } 4302 4303 /* 4304 * This is an OEM Message so the OEM needs to know how 4305 * handle the message. We do no interpretation. 4306 */ 4307 netfn = msg->rsp[0] >> 2; 4308 cmd = msg->rsp[1]; 4309 chan = msg->rsp[3] & 0xf; 4310 4311 rcu_read_lock(); 4312 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4313 if (rcvr) { 4314 user = rcvr->user; 4315 recv_msg = ipmi_alloc_recv_msg(user); 4316 } 4317 rcu_read_unlock(); 4318 4319 if (user == NULL) { 4320 /* We didn't find a user, just give up. */ 4321 ipmi_inc_stat(intf, unhandled_commands); 4322 4323 /* 4324 * Don't do anything with these messages, just allow 4325 * them to be freed. 4326 */ 4327 4328 rv = 0; 4329 } else if (!IS_ERR(recv_msg)) { 4330 /* 4331 * OEM Messages are expected to be delivered via 4332 * the system interface to SMS software. We might 4333 * need to visit this again depending on OEM 4334 * requirements 4335 */ 4336 smi_addr = ((struct ipmi_system_interface_addr *) 4337 &recv_msg->addr); 4338 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4339 smi_addr->channel = IPMI_BMC_CHANNEL; 4340 smi_addr->lun = msg->rsp[0] & 3; 4341 4342 recv_msg->user_msg_data = NULL; 4343 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4344 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4345 recv_msg->msg.cmd = msg->rsp[1]; 4346 recv_msg->msg.data = recv_msg->msg_data; 4347 4348 /* 4349 * The message starts at byte 4 which follows the 4350 * Channel Byte in the "GET MESSAGE" command 4351 */ 4352 recv_msg->msg.data_len = msg->rsp_size - 4; 4353 memcpy(recv_msg->msg_data, &msg->rsp[4], 4354 msg->rsp_size - 4); 4355 if (deliver_response(intf, recv_msg)) 4356 ipmi_inc_stat(intf, unhandled_commands); 4357 else 4358 ipmi_inc_stat(intf, handled_commands); 4359 } else { 4360 /* 4361 * We couldn't allocate memory for the message, so 4362 * requeue it for handling later. 4363 */ 4364 rv = 1; 4365 } 4366 4367 return rv; 4368 } 4369 4370 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4371 struct ipmi_smi_msg *msg) 4372 { 4373 struct ipmi_system_interface_addr *smi_addr; 4374 4375 recv_msg->msgid = 0; 4376 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4377 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4378 smi_addr->channel = IPMI_BMC_CHANNEL; 4379 smi_addr->lun = msg->rsp[0] & 3; 4380 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4381 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4382 recv_msg->msg.cmd = msg->rsp[1]; 4383 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4384 recv_msg->msg.data = recv_msg->msg_data; 4385 recv_msg->msg.data_len = msg->rsp_size - 3; 4386 } 4387 4388 static int handle_read_event_rsp(struct ipmi_smi *intf, 4389 struct ipmi_smi_msg *msg) 4390 { 4391 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4392 struct list_head msgs; 4393 struct ipmi_user *user; 4394 int rv = 0, deliver_count = 0; 4395 4396 if (msg->rsp_size < 19) { 4397 /* Message is too small to be an IPMB event. */ 4398 ipmi_inc_stat(intf, invalid_events); 4399 return 0; 4400 } 4401 4402 if (msg->rsp[2] != 0) { 4403 /* An error getting the event, just ignore it. */ 4404 return 0; 4405 } 4406 4407 INIT_LIST_HEAD(&msgs); 4408 4409 mutex_lock(&intf->events_mutex); 4410 4411 ipmi_inc_stat(intf, events); 4412 4413 /* 4414 * Allocate and fill in one message for every user that is 4415 * getting events. 4416 */ 4417 mutex_lock(&intf->users_mutex); 4418 list_for_each_entry(user, &intf->users, link) { 4419 if (!user->gets_events) 4420 continue; 4421 4422 recv_msg = ipmi_alloc_recv_msg(user); 4423 if (IS_ERR(recv_msg)) { 4424 mutex_unlock(&intf->users_mutex); 4425 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4426 link) { 4427 user = recv_msg->user; 4428 list_del(&recv_msg->link); 4429 ipmi_free_recv_msg(recv_msg); 4430 kref_put(&user->refcount, free_ipmi_user); 4431 } 4432 /* 4433 * We couldn't allocate memory for the 4434 * message, so requeue it for handling 4435 * later. 4436 */ 4437 rv = 1; 4438 goto out; 4439 } 4440 4441 deliver_count++; 4442 4443 copy_event_into_recv_msg(recv_msg, msg); 4444 list_add_tail(&recv_msg->link, &msgs); 4445 } 4446 mutex_unlock(&intf->users_mutex); 4447 4448 if (deliver_count) { 4449 /* Now deliver all the messages. */ 4450 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4451 list_del(&recv_msg->link); 4452 deliver_local_response(intf, recv_msg); 4453 } 4454 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4455 /* 4456 * No one to receive the message, put it in queue if there's 4457 * not already too many things in the queue. 4458 */ 4459 recv_msg = ipmi_alloc_recv_msg(NULL); 4460 if (IS_ERR(recv_msg)) { 4461 /* 4462 * We couldn't allocate memory for the 4463 * message, so requeue it for handling 4464 * later. 4465 */ 4466 rv = 1; 4467 goto out; 4468 } 4469 4470 copy_event_into_recv_msg(recv_msg, msg); 4471 list_add_tail(&recv_msg->link, &intf->waiting_events); 4472 intf->waiting_events_count++; 4473 } else if (!intf->event_msg_printed) { 4474 /* 4475 * There's too many things in the queue, discard this 4476 * message. 4477 */ 4478 dev_warn(intf->si_dev, 4479 "Event queue full, discarding incoming events\n"); 4480 intf->event_msg_printed = 1; 4481 } 4482 4483 out: 4484 mutex_unlock(&intf->events_mutex); 4485 4486 return rv; 4487 } 4488 4489 static int handle_bmc_rsp(struct ipmi_smi *intf, 4490 struct ipmi_smi_msg *msg) 4491 { 4492 struct ipmi_recv_msg *recv_msg; 4493 struct ipmi_system_interface_addr *smi_addr; 4494 4495 recv_msg = msg->recv_msg; 4496 if (recv_msg == NULL) { 4497 dev_warn(intf->si_dev, 4498 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4499 return 0; 4500 } 4501 4502 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4503 recv_msg->msgid = msg->msgid; 4504 smi_addr = ((struct ipmi_system_interface_addr *) 4505 &recv_msg->addr); 4506 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4507 smi_addr->channel = IPMI_BMC_CHANNEL; 4508 smi_addr->lun = msg->rsp[0] & 3; 4509 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4510 recv_msg->msg.cmd = msg->rsp[1]; 4511 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4512 recv_msg->msg.data = recv_msg->msg_data; 4513 recv_msg->msg.data_len = msg->rsp_size - 2; 4514 deliver_local_response(intf, recv_msg); 4515 4516 return 0; 4517 } 4518 4519 /* 4520 * Handle a received message. Return 1 if the message should be requeued, 4521 * 0 if the message should be freed, or -1 if the message should not 4522 * be freed or requeued. 4523 */ 4524 static int handle_one_recv_msg(struct ipmi_smi *intf, 4525 struct ipmi_smi_msg *msg) 4526 { 4527 int requeue = 0; 4528 int chan; 4529 unsigned char cc; 4530 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4531 4532 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); 4533 4534 if (msg->rsp_size < 2) { 4535 /* Message is too small to be correct. */ 4536 dev_warn_ratelimited(intf->si_dev, 4537 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4538 (msg->data[0] >> 2) | 1, 4539 msg->data[1], msg->rsp_size); 4540 4541 return_unspecified: 4542 /* Generate an error response for the message. */ 4543 msg->rsp[0] = msg->data[0] | (1 << 2); 4544 msg->rsp[1] = msg->data[1]; 4545 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4546 msg->rsp_size = 3; 4547 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4548 /* commands must have at least 4 bytes, responses 5. */ 4549 if (is_cmd && (msg->rsp_size < 4)) { 4550 ipmi_inc_stat(intf, invalid_commands); 4551 goto out; 4552 } 4553 if (!is_cmd && (msg->rsp_size < 5)) { 4554 ipmi_inc_stat(intf, invalid_ipmb_responses); 4555 /* Construct a valid error response. */ 4556 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4557 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4558 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4559 msg->rsp[1] = msg->data[1]; /* Addr */ 4560 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4561 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4562 msg->rsp[3] = msg->data[3]; /* Cmd */ 4563 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4564 msg->rsp_size = 5; 4565 } 4566 } else if ((msg->data_size >= 2) 4567 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4568 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4569 && (msg->recv_msg == NULL)) { 4570 4571 if (intf->in_shutdown || intf->run_to_completion) 4572 goto out; 4573 4574 /* 4575 * This is the local response to a command send, start 4576 * the timer for these. The recv_msg will not be 4577 * NULL if this is a response send, and we will let 4578 * response sends just go through. 4579 */ 4580 4581 /* 4582 * Check for errors, if we get certain errors (ones 4583 * that mean basically we can try again later), we 4584 * ignore them and start the timer. Otherwise we 4585 * report the error immediately. 4586 */ 4587 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4588 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4589 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4590 && (msg->rsp[2] != IPMI_BUS_ERR) 4591 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4592 int ch = msg->rsp[3] & 0xf; 4593 struct ipmi_channel *chans; 4594 4595 /* Got an error sending the message, handle it. */ 4596 4597 chans = READ_ONCE(intf->channel_list)->c; 4598 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4599 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4600 ipmi_inc_stat(intf, sent_lan_command_errs); 4601 else 4602 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4603 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4604 } else 4605 /* The message was sent, start the timer. */ 4606 intf_start_seq_timer(intf, msg->msgid); 4607 requeue = 0; 4608 goto out; 4609 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4610 || (msg->rsp[1] != msg->data[1])) { 4611 /* 4612 * The NetFN and Command in the response is not even 4613 * marginally correct. 4614 */ 4615 dev_warn_ratelimited(intf->si_dev, 4616 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4617 (msg->data[0] >> 2) | 1, msg->data[1], 4618 msg->rsp[0] >> 2, msg->rsp[1]); 4619 4620 goto return_unspecified; 4621 } 4622 4623 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4624 if ((msg->data[0] >> 2) & 1) { 4625 /* It's a response to a sent response. */ 4626 chan = 0; 4627 cc = msg->rsp[4]; 4628 goto process_response_response; 4629 } 4630 if (is_cmd) 4631 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4632 else 4633 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4634 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4635 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4636 && (msg->recv_msg != NULL)) { 4637 /* 4638 * It's a response to a response we sent. For this we 4639 * deliver a send message response to the user. 4640 */ 4641 struct ipmi_recv_msg *recv_msg; 4642 4643 if (intf->run_to_completion) 4644 goto out; 4645 4646 chan = msg->data[2] & 0x0f; 4647 if (chan >= IPMI_MAX_CHANNELS) 4648 /* Invalid channel number */ 4649 goto out; 4650 cc = msg->rsp[2]; 4651 4652 process_response_response: 4653 recv_msg = msg->recv_msg; 4654 4655 requeue = 0; 4656 if (!recv_msg) 4657 goto out; 4658 4659 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4660 recv_msg->msg.data = recv_msg->msg_data; 4661 recv_msg->msg_data[0] = cc; 4662 recv_msg->msg.data_len = 1; 4663 deliver_local_response(intf, recv_msg); 4664 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4665 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4666 struct ipmi_channel *chans; 4667 4668 if (intf->run_to_completion) 4669 goto out; 4670 4671 /* It's from the receive queue. */ 4672 chan = msg->rsp[3] & 0xf; 4673 if (chan >= IPMI_MAX_CHANNELS) { 4674 /* Invalid channel number */ 4675 requeue = 0; 4676 goto out; 4677 } 4678 4679 /* 4680 * We need to make sure the channels have been initialized. 4681 * The channel_handler routine will set the "curr_channel" 4682 * equal to or greater than IPMI_MAX_CHANNELS when all the 4683 * channels for this interface have been initialized. 4684 */ 4685 if (!intf->channels_ready) { 4686 requeue = 0; /* Throw the message away */ 4687 goto out; 4688 } 4689 4690 chans = READ_ONCE(intf->channel_list)->c; 4691 4692 switch (chans[chan].medium) { 4693 case IPMI_CHANNEL_MEDIUM_IPMB: 4694 if (msg->rsp[4] & 0x04) { 4695 /* 4696 * It's a response, so find the 4697 * requesting message and send it up. 4698 */ 4699 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4700 } else { 4701 /* 4702 * It's a command to the SMS from some other 4703 * entity. Handle that. 4704 */ 4705 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4706 } 4707 break; 4708 4709 case IPMI_CHANNEL_MEDIUM_8023LAN: 4710 case IPMI_CHANNEL_MEDIUM_ASYNC: 4711 if (msg->rsp[6] & 0x04) { 4712 /* 4713 * It's a response, so find the 4714 * requesting message and send it up. 4715 */ 4716 requeue = handle_lan_get_msg_rsp(intf, msg); 4717 } else { 4718 /* 4719 * It's a command to the SMS from some other 4720 * entity. Handle that. 4721 */ 4722 requeue = handle_lan_get_msg_cmd(intf, msg); 4723 } 4724 break; 4725 4726 default: 4727 /* Check for OEM Channels. Clients had better 4728 register for these commands. */ 4729 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4730 && (chans[chan].medium 4731 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4732 requeue = handle_oem_get_msg_cmd(intf, msg); 4733 } else { 4734 /* 4735 * We don't handle the channel type, so just 4736 * free the message. 4737 */ 4738 requeue = 0; 4739 } 4740 } 4741 4742 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4743 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4744 /* It's an asynchronous event. */ 4745 if (intf->run_to_completion) 4746 goto out; 4747 4748 requeue = handle_read_event_rsp(intf, msg); 4749 } else { 4750 /* It's a response from the local BMC. */ 4751 requeue = handle_bmc_rsp(intf, msg); 4752 } 4753 4754 out: 4755 return requeue; 4756 } 4757 4758 /* 4759 * If there are messages in the queue or pretimeouts, handle them. 4760 */ 4761 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4762 { 4763 struct ipmi_smi_msg *smi_msg; 4764 unsigned long flags = 0; 4765 int rv; 4766 int run_to_completion = READ_ONCE(intf->run_to_completion); 4767 4768 /* See if any waiting messages need to be processed. */ 4769 if (!run_to_completion) 4770 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4771 while (!list_empty(&intf->waiting_rcv_msgs)) { 4772 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4773 struct ipmi_smi_msg, link); 4774 list_del(&smi_msg->link); 4775 if (!run_to_completion) 4776 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4777 flags); 4778 rv = handle_one_recv_msg(intf, smi_msg); 4779 if (!run_to_completion) 4780 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4781 if (rv > 0) { 4782 /* 4783 * To preserve message order, quit if we 4784 * can't handle a message. Add the message 4785 * back at the head, this is safe because this 4786 * workqueue is the only thing that pulls the 4787 * messages. 4788 */ 4789 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4790 break; 4791 } else { 4792 if (rv == 0) 4793 /* Message handled */ 4794 ipmi_free_smi_msg(smi_msg); 4795 /* If rv < 0, fatal error, del but don't free. */ 4796 } 4797 } 4798 if (!run_to_completion) 4799 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4800 } 4801 4802 static void smi_work(struct work_struct *t) 4803 { 4804 unsigned long flags = 0; /* keep us warning-free. */ 4805 struct ipmi_smi *intf = from_work(intf, t, smi_work); 4806 int run_to_completion = READ_ONCE(intf->run_to_completion); 4807 struct ipmi_smi_msg *newmsg = NULL; 4808 struct ipmi_recv_msg *msg, *msg2; 4809 int cc; 4810 4811 /* 4812 * Start the next message if available. 4813 * 4814 * Do this here, not in the actual receiver, because we may deadlock 4815 * because the lower layer is allowed to hold locks while calling 4816 * message delivery. 4817 */ 4818 restart: 4819 if (!run_to_completion) 4820 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4821 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4822 struct list_head *entry = NULL; 4823 4824 /* Pick the high priority queue first. */ 4825 if (!list_empty(&intf->hp_xmit_msgs)) 4826 entry = intf->hp_xmit_msgs.next; 4827 else if (!list_empty(&intf->xmit_msgs)) 4828 entry = intf->xmit_msgs.next; 4829 4830 if (entry) { 4831 list_del(entry); 4832 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4833 intf->curr_msg = newmsg; 4834 } 4835 } 4836 if (!run_to_completion) 4837 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4838 4839 if (newmsg) { 4840 cc = intf->handlers->sender(intf->send_info, newmsg); 4841 if (cc) { 4842 if (newmsg->recv_msg) 4843 deliver_err_response(intf, 4844 newmsg->recv_msg, cc); 4845 else 4846 ipmi_free_smi_msg(newmsg); 4847 goto restart; 4848 } 4849 } 4850 4851 handle_new_recv_msgs(intf); 4852 4853 /* Nothing below applies during panic time. */ 4854 if (run_to_completion) 4855 return; 4856 4857 /* 4858 * If the pretimout count is non-zero, decrement one from it and 4859 * deliver pretimeouts to all the users. 4860 */ 4861 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4862 struct ipmi_user *user; 4863 4864 mutex_lock(&intf->users_mutex); 4865 list_for_each_entry(user, &intf->users, link) { 4866 if (user->handler->ipmi_watchdog_pretimeout) 4867 user->handler->ipmi_watchdog_pretimeout( 4868 user->handler_data); 4869 } 4870 mutex_unlock(&intf->users_mutex); 4871 } 4872 4873 /* 4874 * Freeing the message can cause a user to be released, which 4875 * can then cause the interface to be freed. Make sure that 4876 * doesn't happen until we are ready. 4877 */ 4878 kref_get(&intf->refcount); 4879 4880 mutex_lock(&intf->user_msgs_mutex); 4881 list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { 4882 struct ipmi_user *user = msg->user; 4883 4884 list_del(&msg->link); 4885 4886 if (refcount_read(&user->destroyed) == 0) 4887 ipmi_free_recv_msg(msg); 4888 else 4889 user->handler->ipmi_recv_hndl(msg, user->handler_data); 4890 } 4891 mutex_unlock(&intf->user_msgs_mutex); 4892 4893 kref_put(&intf->refcount, intf_free); 4894 } 4895 4896 /* Handle a new message from the lower layer. */ 4897 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4898 struct ipmi_smi_msg *msg) 4899 { 4900 unsigned long flags = 0; /* keep us warning-free. */ 4901 int run_to_completion = READ_ONCE(intf->run_to_completion); 4902 4903 /* 4904 * To preserve message order, we keep a queue and deliver from 4905 * a workqueue. 4906 */ 4907 if (!run_to_completion) 4908 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4909 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4910 if (!run_to_completion) 4911 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4912 flags); 4913 4914 if (!run_to_completion) 4915 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4916 /* 4917 * We can get an asynchronous event or receive message in addition 4918 * to commands we send. 4919 */ 4920 if (msg == intf->curr_msg) 4921 intf->curr_msg = NULL; 4922 if (!run_to_completion) 4923 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4924 4925 if (run_to_completion) 4926 smi_work(&intf->smi_work); 4927 else 4928 queue_work(system_wq, &intf->smi_work); 4929 } 4930 EXPORT_SYMBOL(ipmi_smi_msg_received); 4931 4932 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4933 { 4934 if (intf->in_shutdown) 4935 return; 4936 4937 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4938 queue_work(system_wq, &intf->smi_work); 4939 } 4940 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4941 4942 static struct ipmi_smi_msg * 4943 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4944 unsigned char seq, long seqid) 4945 { 4946 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4947 if (!smi_msg) 4948 /* 4949 * If we can't allocate the message, then just return, we 4950 * get 4 retries, so this should be ok. 4951 */ 4952 return NULL; 4953 4954 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4955 smi_msg->data_size = recv_msg->msg.data_len; 4956 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4957 4958 dev_dbg(intf->si_dev, "Resend: %*ph\n", 4959 smi_msg->data_size, smi_msg->data); 4960 4961 return smi_msg; 4962 } 4963 4964 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4965 struct list_head *timeouts, 4966 unsigned long timeout_period, 4967 int slot, bool *need_timer) 4968 { 4969 struct ipmi_recv_msg *msg; 4970 4971 if (intf->in_shutdown) 4972 return; 4973 4974 if (!ent->inuse) 4975 return; 4976 4977 if (timeout_period < ent->timeout) { 4978 ent->timeout -= timeout_period; 4979 *need_timer = true; 4980 return; 4981 } 4982 4983 if (ent->retries_left == 0) { 4984 /* The message has used all its retries. */ 4985 ent->inuse = 0; 4986 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4987 msg = ent->recv_msg; 4988 list_add_tail(&msg->link, timeouts); 4989 if (ent->broadcast) 4990 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4991 else if (is_lan_addr(&ent->recv_msg->addr)) 4992 ipmi_inc_stat(intf, timed_out_lan_commands); 4993 else 4994 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4995 } else { 4996 struct ipmi_smi_msg *smi_msg; 4997 /* More retries, send again. */ 4998 4999 *need_timer = true; 5000 5001 /* 5002 * Start with the max timer, set to normal timer after 5003 * the message is sent. 5004 */ 5005 ent->timeout = MAX_MSG_TIMEOUT; 5006 ent->retries_left--; 5007 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 5008 ent->seqid); 5009 if (!smi_msg) { 5010 if (is_lan_addr(&ent->recv_msg->addr)) 5011 ipmi_inc_stat(intf, 5012 dropped_rexmit_lan_commands); 5013 else 5014 ipmi_inc_stat(intf, 5015 dropped_rexmit_ipmb_commands); 5016 return; 5017 } 5018 5019 mutex_unlock(&intf->seq_lock); 5020 5021 /* 5022 * Send the new message. We send with a zero 5023 * priority. It timed out, I doubt time is that 5024 * critical now, and high priority messages are really 5025 * only for messages to the local MC, which don't get 5026 * resent. 5027 */ 5028 if (intf->handlers) { 5029 if (is_lan_addr(&ent->recv_msg->addr)) 5030 ipmi_inc_stat(intf, 5031 retransmitted_lan_commands); 5032 else 5033 ipmi_inc_stat(intf, 5034 retransmitted_ipmb_commands); 5035 5036 smi_send(intf, intf->handlers, smi_msg, 0); 5037 } else 5038 ipmi_free_smi_msg(smi_msg); 5039 5040 mutex_lock(&intf->seq_lock); 5041 } 5042 } 5043 5044 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 5045 unsigned long timeout_period) 5046 { 5047 struct list_head timeouts; 5048 struct ipmi_recv_msg *msg, *msg2; 5049 unsigned long flags; 5050 int i; 5051 bool need_timer = false; 5052 5053 if (!intf->bmc_registered) { 5054 kref_get(&intf->refcount); 5055 if (!schedule_work(&intf->bmc_reg_work)) { 5056 kref_put(&intf->refcount, intf_free); 5057 need_timer = true; 5058 } 5059 } 5060 5061 /* 5062 * Go through the seq table and find any messages that 5063 * have timed out, putting them in the timeouts 5064 * list. 5065 */ 5066 INIT_LIST_HEAD(&timeouts); 5067 mutex_lock(&intf->seq_lock); 5068 if (intf->ipmb_maintenance_mode_timeout) { 5069 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 5070 intf->ipmb_maintenance_mode_timeout = 0; 5071 else 5072 intf->ipmb_maintenance_mode_timeout -= timeout_period; 5073 } 5074 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 5075 check_msg_timeout(intf, &intf->seq_table[i], 5076 &timeouts, timeout_period, i, 5077 &need_timer); 5078 mutex_unlock(&intf->seq_lock); 5079 5080 list_for_each_entry_safe(msg, msg2, &timeouts, link) 5081 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 5082 5083 /* 5084 * Maintenance mode handling. Check the timeout 5085 * optimistically before we claim the lock. It may 5086 * mean a timeout gets missed occasionally, but that 5087 * only means the timeout gets extended by one period 5088 * in that case. No big deal, and it avoids the lock 5089 * most of the time. 5090 */ 5091 if (intf->auto_maintenance_timeout > 0) { 5092 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 5093 if (intf->auto_maintenance_timeout > 0) { 5094 intf->auto_maintenance_timeout 5095 -= timeout_period; 5096 if (!intf->maintenance_mode 5097 && (intf->auto_maintenance_timeout <= 0)) { 5098 intf->maintenance_mode_state = 5099 IPMI_MAINTENANCE_MODE_STATE_OFF; 5100 intf->auto_maintenance_timeout = 0; 5101 maintenance_mode_update(intf); 5102 } 5103 } 5104 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 5105 flags); 5106 } 5107 5108 queue_work(system_wq, &intf->smi_work); 5109 5110 return need_timer; 5111 } 5112 5113 static void ipmi_request_event(struct ipmi_smi *intf) 5114 { 5115 /* No event requests when in maintenance mode. */ 5116 if (intf->maintenance_mode_state) 5117 return; 5118 5119 if (!intf->in_shutdown) 5120 intf->handlers->request_events(intf->send_info); 5121 } 5122 5123 static atomic_t stop_operation; 5124 5125 static void ipmi_timeout_work(struct work_struct *work) 5126 { 5127 if (atomic_read(&stop_operation)) 5128 return; 5129 5130 struct ipmi_smi *intf; 5131 bool need_timer = false; 5132 5133 if (atomic_read(&stop_operation)) 5134 return; 5135 5136 mutex_lock(&ipmi_interfaces_mutex); 5137 list_for_each_entry(intf, &ipmi_interfaces, link) { 5138 if (atomic_read(&intf->event_waiters)) { 5139 intf->ticks_to_req_ev--; 5140 if (intf->ticks_to_req_ev == 0) { 5141 ipmi_request_event(intf); 5142 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5143 } 5144 need_timer = true; 5145 } 5146 if (intf->maintenance_mode_state) 5147 need_timer = true; 5148 5149 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5150 } 5151 mutex_unlock(&ipmi_interfaces_mutex); 5152 5153 if (need_timer) 5154 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5155 } 5156 5157 static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work); 5158 5159 static void ipmi_timeout(struct timer_list *unused) 5160 { 5161 if (atomic_read(&stop_operation)) 5162 return; 5163 5164 queue_work(system_wq, &ipmi_timer_work); 5165 } 5166 5167 static void need_waiter(struct ipmi_smi *intf) 5168 { 5169 /* Racy, but worst case we start the timer twice. */ 5170 if (!timer_pending(&ipmi_timer)) 5171 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5172 } 5173 5174 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5175 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5176 5177 static void free_smi_msg(struct ipmi_smi_msg *msg) 5178 { 5179 atomic_dec(&smi_msg_inuse_count); 5180 /* Try to keep as much stuff out of the panic path as possible. */ 5181 if (!oops_in_progress) 5182 kfree(msg); 5183 } 5184 5185 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5186 { 5187 struct ipmi_smi_msg *rv; 5188 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5189 if (rv) { 5190 rv->done = free_smi_msg; 5191 rv->recv_msg = NULL; 5192 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5193 atomic_inc(&smi_msg_inuse_count); 5194 } 5195 return rv; 5196 } 5197 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5198 5199 static void free_recv_msg(struct ipmi_recv_msg *msg) 5200 { 5201 atomic_dec(&recv_msg_inuse_count); 5202 /* Try to keep as much stuff out of the panic path as possible. */ 5203 if (!oops_in_progress) 5204 kfree(msg); 5205 } 5206 5207 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user) 5208 { 5209 struct ipmi_recv_msg *rv; 5210 5211 if (user) { 5212 if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { 5213 atomic_dec(&user->nr_msgs); 5214 return ERR_PTR(-EBUSY); 5215 } 5216 } 5217 5218 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5219 if (!rv) { 5220 if (user) 5221 atomic_dec(&user->nr_msgs); 5222 return ERR_PTR(-ENOMEM); 5223 } 5224 5225 rv->user = user; 5226 rv->done = free_recv_msg; 5227 if (user) 5228 kref_get(&user->refcount); 5229 atomic_inc(&recv_msg_inuse_count); 5230 return rv; 5231 } 5232 5233 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5234 { 5235 if (msg->user && !oops_in_progress) { 5236 atomic_dec(&msg->user->nr_msgs); 5237 kref_put(&msg->user->refcount, free_ipmi_user); 5238 } 5239 msg->done(msg); 5240 } 5241 EXPORT_SYMBOL(ipmi_free_recv_msg); 5242 5243 static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, 5244 struct ipmi_user *user) 5245 { 5246 WARN_ON_ONCE(msg->user); /* User should not be set. */ 5247 msg->user = user; 5248 atomic_inc(&user->nr_msgs); 5249 kref_get(&user->refcount); 5250 } 5251 5252 static atomic_t panic_done_count = ATOMIC_INIT(0); 5253 5254 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5255 { 5256 atomic_dec(&panic_done_count); 5257 } 5258 5259 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5260 { 5261 atomic_dec(&panic_done_count); 5262 } 5263 5264 /* 5265 * Inside a panic, send a message and wait for a response. 5266 */ 5267 static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5268 struct ipmi_addr *addr, 5269 struct kernel_ipmi_msg *msg) 5270 { 5271 struct ipmi_smi_msg smi_msg; 5272 struct ipmi_recv_msg recv_msg; 5273 int rv; 5274 5275 smi_msg.done = dummy_smi_done_handler; 5276 recv_msg.done = dummy_recv_done_handler; 5277 atomic_add(2, &panic_done_count); 5278 rv = i_ipmi_request(NULL, 5279 intf, 5280 addr, 5281 0, 5282 msg, 5283 intf, 5284 &smi_msg, 5285 &recv_msg, 5286 0, 5287 intf->addrinfo[0].address, 5288 intf->addrinfo[0].lun, 5289 0, 1); /* Don't retry, and don't wait. */ 5290 if (rv) 5291 atomic_sub(2, &panic_done_count); 5292 else if (intf->handlers->flush_messages) 5293 intf->handlers->flush_messages(intf->send_info); 5294 5295 while (atomic_read(&panic_done_count) != 0) 5296 ipmi_poll(intf); 5297 } 5298 5299 void ipmi_panic_request_and_wait(struct ipmi_user *user, 5300 struct ipmi_addr *addr, 5301 struct kernel_ipmi_msg *msg) 5302 { 5303 user->intf->run_to_completion = 1; 5304 _ipmi_panic_request_and_wait(user->intf, addr, msg); 5305 } 5306 EXPORT_SYMBOL(ipmi_panic_request_and_wait); 5307 5308 static void event_receiver_fetcher(struct ipmi_smi *intf, 5309 struct ipmi_recv_msg *msg) 5310 { 5311 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5312 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5313 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5314 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5315 /* A get event receiver command, save it. */ 5316 intf->event_receiver = msg->msg.data[1]; 5317 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5318 } 5319 } 5320 5321 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5322 { 5323 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5324 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5325 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5326 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5327 /* 5328 * A get device id command, save if we are an event 5329 * receiver or generator. 5330 */ 5331 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5332 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5333 } 5334 } 5335 5336 static void send_panic_events(struct ipmi_smi *intf, char *str) 5337 { 5338 struct kernel_ipmi_msg msg; 5339 unsigned char data[16]; 5340 struct ipmi_system_interface_addr *si; 5341 struct ipmi_addr addr; 5342 char *p = str; 5343 struct ipmi_ipmb_addr *ipmb; 5344 int j; 5345 5346 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5347 return; 5348 5349 si = (struct ipmi_system_interface_addr *) &addr; 5350 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5351 si->channel = IPMI_BMC_CHANNEL; 5352 si->lun = 0; 5353 5354 /* Fill in an event telling that we have failed. */ 5355 msg.netfn = 0x04; /* Sensor or Event. */ 5356 msg.cmd = 2; /* Platform event command. */ 5357 msg.data = data; 5358 msg.data_len = 8; 5359 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5360 data[1] = 0x03; /* This is for IPMI 1.0. */ 5361 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5362 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5363 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5364 5365 /* 5366 * Put a few breadcrumbs in. Hopefully later we can add more things 5367 * to make the panic events more useful. 5368 */ 5369 if (str) { 5370 data[3] = str[0]; 5371 data[6] = str[1]; 5372 data[7] = str[2]; 5373 } 5374 5375 /* Send the event announcing the panic. */ 5376 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5377 5378 /* 5379 * On every interface, dump a bunch of OEM event holding the 5380 * string. 5381 */ 5382 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5383 return; 5384 5385 /* 5386 * intf_num is used as an marker to tell if the 5387 * interface is valid. Thus we need a read barrier to 5388 * make sure data fetched before checking intf_num 5389 * won't be used. 5390 */ 5391 smp_rmb(); 5392 5393 /* 5394 * First job here is to figure out where to send the 5395 * OEM events. There's no way in IPMI to send OEM 5396 * events using an event send command, so we have to 5397 * find the SEL to put them in and stick them in 5398 * there. 5399 */ 5400 5401 /* Get capabilities from the get device id. */ 5402 intf->local_sel_device = 0; 5403 intf->local_event_generator = 0; 5404 intf->event_receiver = 0; 5405 5406 /* Request the device info from the local MC. */ 5407 msg.netfn = IPMI_NETFN_APP_REQUEST; 5408 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5409 msg.data = NULL; 5410 msg.data_len = 0; 5411 intf->null_user_handler = device_id_fetcher; 5412 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5413 5414 if (intf->local_event_generator) { 5415 /* Request the event receiver from the local MC. */ 5416 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5417 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5418 msg.data = NULL; 5419 msg.data_len = 0; 5420 intf->null_user_handler = event_receiver_fetcher; 5421 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5422 } 5423 intf->null_user_handler = NULL; 5424 5425 /* 5426 * Validate the event receiver. The low bit must not 5427 * be 1 (it must be a valid IPMB address), it cannot 5428 * be zero, and it must not be my address. 5429 */ 5430 if (((intf->event_receiver & 1) == 0) 5431 && (intf->event_receiver != 0) 5432 && (intf->event_receiver != intf->addrinfo[0].address)) { 5433 /* 5434 * The event receiver is valid, send an IPMB 5435 * message. 5436 */ 5437 ipmb = (struct ipmi_ipmb_addr *) &addr; 5438 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5439 ipmb->channel = 0; /* FIXME - is this right? */ 5440 ipmb->lun = intf->event_receiver_lun; 5441 ipmb->slave_addr = intf->event_receiver; 5442 } else if (intf->local_sel_device) { 5443 /* 5444 * The event receiver was not valid (or was 5445 * me), but I am an SEL device, just dump it 5446 * in my SEL. 5447 */ 5448 si = (struct ipmi_system_interface_addr *) &addr; 5449 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5450 si->channel = IPMI_BMC_CHANNEL; 5451 si->lun = 0; 5452 } else 5453 return; /* No where to send the event. */ 5454 5455 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5456 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5457 msg.data = data; 5458 msg.data_len = 16; 5459 5460 j = 0; 5461 while (*p) { 5462 int size = strnlen(p, 11); 5463 5464 data[0] = 0; 5465 data[1] = 0; 5466 data[2] = 0xf0; /* OEM event without timestamp. */ 5467 data[3] = intf->addrinfo[0].address; 5468 data[4] = j++; /* sequence # */ 5469 5470 memcpy_and_pad(data+5, 11, p, size, '\0'); 5471 p += size; 5472 5473 _ipmi_panic_request_and_wait(intf, &addr, &msg); 5474 } 5475 } 5476 5477 static int has_panicked; 5478 5479 static int panic_event(struct notifier_block *this, 5480 unsigned long event, 5481 void *ptr) 5482 { 5483 struct ipmi_smi *intf; 5484 struct ipmi_user *user; 5485 5486 if (has_panicked) 5487 return NOTIFY_DONE; 5488 has_panicked = 1; 5489 5490 /* For every registered interface, set it to run to completion. */ 5491 list_for_each_entry(intf, &ipmi_interfaces, link) { 5492 if (!intf->handlers || intf->intf_num == -1) 5493 /* Interface is not ready. */ 5494 continue; 5495 5496 if (!intf->handlers->poll) 5497 continue; 5498 5499 /* 5500 * If we were interrupted while locking xmit_msgs_lock or 5501 * waiting_rcv_msgs_lock, the corresponding list may be 5502 * corrupted. In this case, drop items on the list for 5503 * the safety. 5504 */ 5505 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5506 INIT_LIST_HEAD(&intf->xmit_msgs); 5507 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5508 } else 5509 spin_unlock(&intf->xmit_msgs_lock); 5510 5511 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5512 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5513 else 5514 spin_unlock(&intf->waiting_rcv_msgs_lock); 5515 5516 intf->run_to_completion = 1; 5517 if (intf->handlers->set_run_to_completion) 5518 intf->handlers->set_run_to_completion(intf->send_info, 5519 1); 5520 5521 list_for_each_entry(user, &intf->users, link) { 5522 if (user->handler->ipmi_panic_handler) 5523 user->handler->ipmi_panic_handler( 5524 user->handler_data); 5525 } 5526 5527 send_panic_events(intf, ptr); 5528 } 5529 5530 return NOTIFY_DONE; 5531 } 5532 5533 /* Must be called with ipmi_interfaces_mutex held. */ 5534 static int ipmi_register_driver(void) 5535 { 5536 int rv; 5537 5538 if (drvregistered) 5539 return 0; 5540 5541 rv = driver_register(&ipmidriver.driver); 5542 if (rv) 5543 pr_err("Could not register IPMI driver\n"); 5544 else 5545 drvregistered = true; 5546 return rv; 5547 } 5548 5549 static struct notifier_block panic_block = { 5550 .notifier_call = panic_event, 5551 .next = NULL, 5552 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5553 }; 5554 5555 static int ipmi_init_msghandler(void) 5556 { 5557 int rv; 5558 5559 mutex_lock(&ipmi_interfaces_mutex); 5560 rv = ipmi_register_driver(); 5561 if (rv) 5562 goto out; 5563 if (initialized) 5564 goto out; 5565 5566 bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5567 if (!bmc_remove_work_wq) { 5568 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5569 rv = -ENOMEM; 5570 goto out; 5571 } 5572 5573 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5574 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5575 5576 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5577 5578 initialized = true; 5579 5580 out: 5581 mutex_unlock(&ipmi_interfaces_mutex); 5582 return rv; 5583 } 5584 5585 static int __init ipmi_init_msghandler_mod(void) 5586 { 5587 int rv; 5588 5589 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5590 5591 mutex_lock(&ipmi_interfaces_mutex); 5592 rv = ipmi_register_driver(); 5593 mutex_unlock(&ipmi_interfaces_mutex); 5594 5595 return rv; 5596 } 5597 5598 static void __exit cleanup_ipmi(void) 5599 { 5600 int count; 5601 5602 if (initialized) { 5603 destroy_workqueue(bmc_remove_work_wq); 5604 5605 atomic_notifier_chain_unregister(&panic_notifier_list, 5606 &panic_block); 5607 5608 /* 5609 * This can't be called if any interfaces exist, so no worry 5610 * about shutting down the interfaces. 5611 */ 5612 5613 /* 5614 * Tell the timer to stop, then wait for it to stop. This 5615 * avoids problems with race conditions removing the timer 5616 * here. 5617 */ 5618 atomic_set(&stop_operation, 1); 5619 timer_delete_sync(&ipmi_timer); 5620 cancel_work_sync(&ipmi_timer_work); 5621 5622 initialized = false; 5623 5624 /* Check for buffer leaks. */ 5625 count = atomic_read(&smi_msg_inuse_count); 5626 if (count != 0) 5627 pr_warn("SMI message count %d at exit\n", count); 5628 count = atomic_read(&recv_msg_inuse_count); 5629 if (count != 0) 5630 pr_warn("recv message count %d at exit\n", count); 5631 } 5632 if (drvregistered) 5633 driver_unregister(&ipmidriver.driver); 5634 } 5635 module_exit(cleanup_ipmi); 5636 5637 module_init(ipmi_init_msghandler_mod); 5638 MODULE_LICENSE("GPL"); 5639 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5640 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5641 MODULE_VERSION(IPMI_DRIVER_VERSION); 5642 MODULE_SOFTDEP("post: ipmi_devintf"); 5643