xref: /linux/drivers/char/ipmi/ipmi_msghandler.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * ipmi_msghandler.c
3  *
4  * Incoming and outgoing message routing for an IPMI interface.
5  *
6  * Author: MontaVista Software, Inc.
7  *         Corey Minyard <minyard@mvista.com>
8  *         source@mvista.com
9  *
10  * Copyright 2002 MontaVista Software Inc.
11  *
12  *  This program is free software; you can redistribute it and/or modify it
13  *  under the terms of the GNU General Public License as published by the
14  *  Free Software Foundation; either version 2 of the License, or (at your
15  *  option) any later version.
16  *
17  *
18  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  *  You should have received a copy of the GNU General Public License along
30  *  with this program; if not, write to the Free Software Foundation, Inc.,
31  *  675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/sched.h>
39 #include <linux/seq_file.h>
40 #include <linux/spinlock.h>
41 #include <linux/mutex.h>
42 #include <linux/slab.h>
43 #include <linux/ipmi.h>
44 #include <linux/ipmi_smi.h>
45 #include <linux/notifier.h>
46 #include <linux/init.h>
47 #include <linux/proc_fs.h>
48 #include <linux/rcupdate.h>
49 
50 #define PFX "IPMI message handler: "
51 
52 #define IPMI_DRIVER_VERSION "39.2"
53 
54 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
55 static int ipmi_init_msghandler(void);
56 
57 static int initialized;
58 
59 #ifdef CONFIG_PROC_FS
60 static struct proc_dir_entry *proc_ipmi_root;
61 #endif /* CONFIG_PROC_FS */
62 
63 /* Remain in auto-maintenance mode for this amount of time (in ms). */
64 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
65 
66 #define MAX_EVENTS_IN_QUEUE	25
67 
68 /*
69  * Don't let a message sit in a queue forever, always time it with at lest
70  * the max message timer.  This is in milliseconds.
71  */
72 #define MAX_MSG_TIMEOUT		60000
73 
74 /*
75  * The main "user" data structure.
76  */
77 struct ipmi_user {
78 	struct list_head link;
79 
80 	/* Set to "0" when the user is destroyed. */
81 	int valid;
82 
83 	struct kref refcount;
84 
85 	/* The upper layer that handles receive messages. */
86 	struct ipmi_user_hndl *handler;
87 	void             *handler_data;
88 
89 	/* The interface this user is bound to. */
90 	ipmi_smi_t intf;
91 
92 	/* Does this interface receive IPMI events? */
93 	int gets_events;
94 };
95 
96 struct cmd_rcvr {
97 	struct list_head link;
98 
99 	ipmi_user_t   user;
100 	unsigned char netfn;
101 	unsigned char cmd;
102 	unsigned int  chans;
103 
104 	/*
105 	 * This is used to form a linked lised during mass deletion.
106 	 * Since this is in an RCU list, we cannot use the link above
107 	 * or change any data until the RCU period completes.  So we
108 	 * use this next variable during mass deletion so we can have
109 	 * a list and don't have to wait and restart the search on
110 	 * every individual deletion of a command.
111 	 */
112 	struct cmd_rcvr *next;
113 };
114 
115 struct seq_table {
116 	unsigned int         inuse : 1;
117 	unsigned int         broadcast : 1;
118 
119 	unsigned long        timeout;
120 	unsigned long        orig_timeout;
121 	unsigned int         retries_left;
122 
123 	/*
124 	 * To verify on an incoming send message response that this is
125 	 * the message that the response is for, we keep a sequence id
126 	 * and increment it every time we send a message.
127 	 */
128 	long                 seqid;
129 
130 	/*
131 	 * This is held so we can properly respond to the message on a
132 	 * timeout, and it is used to hold the temporary data for
133 	 * retransmission, too.
134 	 */
135 	struct ipmi_recv_msg *recv_msg;
136 };
137 
138 /*
139  * Store the information in a msgid (long) to allow us to find a
140  * sequence table entry from the msgid.
141  */
142 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
143 
144 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
145 	do {								\
146 		seq = ((msgid >> 26) & 0x3f);				\
147 		seqid = (msgid & 0x3fffff);				\
148 	} while (0)
149 
150 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
151 
152 struct ipmi_channel {
153 	unsigned char medium;
154 	unsigned char protocol;
155 
156 	/*
157 	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
158 	 * but may be changed by the user.
159 	 */
160 	unsigned char address;
161 
162 	/*
163 	 * My LUN.  This should generally stay the SMS LUN, but just in
164 	 * case...
165 	 */
166 	unsigned char lun;
167 };
168 
169 #ifdef CONFIG_PROC_FS
170 struct ipmi_proc_entry {
171 	char                   *name;
172 	struct ipmi_proc_entry *next;
173 };
174 #endif
175 
176 struct bmc_device {
177 	struct platform_device *dev;
178 	struct ipmi_device_id  id;
179 	unsigned char          guid[16];
180 	int                    guid_set;
181 
182 	struct kref	       refcount;
183 
184 	/* bmc device attributes */
185 	struct device_attribute device_id_attr;
186 	struct device_attribute provides_dev_sdrs_attr;
187 	struct device_attribute revision_attr;
188 	struct device_attribute firmware_rev_attr;
189 	struct device_attribute version_attr;
190 	struct device_attribute add_dev_support_attr;
191 	struct device_attribute manufacturer_id_attr;
192 	struct device_attribute product_id_attr;
193 	struct device_attribute guid_attr;
194 	struct device_attribute aux_firmware_rev_attr;
195 };
196 
197 /*
198  * Various statistics for IPMI, these index stats[] in the ipmi_smi
199  * structure.
200  */
201 enum ipmi_stat_indexes {
202 	/* Commands we got from the user that were invalid. */
203 	IPMI_STAT_sent_invalid_commands = 0,
204 
205 	/* Commands we sent to the MC. */
206 	IPMI_STAT_sent_local_commands,
207 
208 	/* Responses from the MC that were delivered to a user. */
209 	IPMI_STAT_handled_local_responses,
210 
211 	/* Responses from the MC that were not delivered to a user. */
212 	IPMI_STAT_unhandled_local_responses,
213 
214 	/* Commands we sent out to the IPMB bus. */
215 	IPMI_STAT_sent_ipmb_commands,
216 
217 	/* Commands sent on the IPMB that had errors on the SEND CMD */
218 	IPMI_STAT_sent_ipmb_command_errs,
219 
220 	/* Each retransmit increments this count. */
221 	IPMI_STAT_retransmitted_ipmb_commands,
222 
223 	/*
224 	 * When a message times out (runs out of retransmits) this is
225 	 * incremented.
226 	 */
227 	IPMI_STAT_timed_out_ipmb_commands,
228 
229 	/*
230 	 * This is like above, but for broadcasts.  Broadcasts are
231 	 * *not* included in the above count (they are expected to
232 	 * time out).
233 	 */
234 	IPMI_STAT_timed_out_ipmb_broadcasts,
235 
236 	/* Responses I have sent to the IPMB bus. */
237 	IPMI_STAT_sent_ipmb_responses,
238 
239 	/* The response was delivered to the user. */
240 	IPMI_STAT_handled_ipmb_responses,
241 
242 	/* The response had invalid data in it. */
243 	IPMI_STAT_invalid_ipmb_responses,
244 
245 	/* The response didn't have anyone waiting for it. */
246 	IPMI_STAT_unhandled_ipmb_responses,
247 
248 	/* Commands we sent out to the IPMB bus. */
249 	IPMI_STAT_sent_lan_commands,
250 
251 	/* Commands sent on the IPMB that had errors on the SEND CMD */
252 	IPMI_STAT_sent_lan_command_errs,
253 
254 	/* Each retransmit increments this count. */
255 	IPMI_STAT_retransmitted_lan_commands,
256 
257 	/*
258 	 * When a message times out (runs out of retransmits) this is
259 	 * incremented.
260 	 */
261 	IPMI_STAT_timed_out_lan_commands,
262 
263 	/* Responses I have sent to the IPMB bus. */
264 	IPMI_STAT_sent_lan_responses,
265 
266 	/* The response was delivered to the user. */
267 	IPMI_STAT_handled_lan_responses,
268 
269 	/* The response had invalid data in it. */
270 	IPMI_STAT_invalid_lan_responses,
271 
272 	/* The response didn't have anyone waiting for it. */
273 	IPMI_STAT_unhandled_lan_responses,
274 
275 	/* The command was delivered to the user. */
276 	IPMI_STAT_handled_commands,
277 
278 	/* The command had invalid data in it. */
279 	IPMI_STAT_invalid_commands,
280 
281 	/* The command didn't have anyone waiting for it. */
282 	IPMI_STAT_unhandled_commands,
283 
284 	/* Invalid data in an event. */
285 	IPMI_STAT_invalid_events,
286 
287 	/* Events that were received with the proper format. */
288 	IPMI_STAT_events,
289 
290 	/* Retransmissions on IPMB that failed. */
291 	IPMI_STAT_dropped_rexmit_ipmb_commands,
292 
293 	/* Retransmissions on LAN that failed. */
294 	IPMI_STAT_dropped_rexmit_lan_commands,
295 
296 	/* This *must* remain last, add new values above this. */
297 	IPMI_NUM_STATS
298 };
299 
300 
301 #define IPMI_IPMB_NUM_SEQ	64
302 #define IPMI_MAX_CHANNELS       16
303 struct ipmi_smi {
304 	/* What interface number are we? */
305 	int intf_num;
306 
307 	struct kref refcount;
308 
309 	/* Used for a list of interfaces. */
310 	struct list_head link;
311 
312 	/*
313 	 * The list of upper layers that are using me.  seq_lock
314 	 * protects this.
315 	 */
316 	struct list_head users;
317 
318 	/* Information to supply to users. */
319 	unsigned char ipmi_version_major;
320 	unsigned char ipmi_version_minor;
321 
322 	/* Used for wake ups at startup. */
323 	wait_queue_head_t waitq;
324 
325 	struct bmc_device *bmc;
326 	char *my_dev_name;
327 	char *sysfs_name;
328 
329 	/*
330 	 * This is the lower-layer's sender routine.  Note that you
331 	 * must either be holding the ipmi_interfaces_mutex or be in
332 	 * an umpreemptible region to use this.  You must fetch the
333 	 * value into a local variable and make sure it is not NULL.
334 	 */
335 	struct ipmi_smi_handlers *handlers;
336 	void                     *send_info;
337 
338 #ifdef CONFIG_PROC_FS
339 	/* A list of proc entries for this interface. */
340 	struct mutex           proc_entry_lock;
341 	struct ipmi_proc_entry *proc_entries;
342 #endif
343 
344 	/* Driver-model device for the system interface. */
345 	struct device          *si_dev;
346 
347 	/*
348 	 * A table of sequence numbers for this interface.  We use the
349 	 * sequence numbers for IPMB messages that go out of the
350 	 * interface to match them up with their responses.  A routine
351 	 * is called periodically to time the items in this list.
352 	 */
353 	spinlock_t       seq_lock;
354 	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
355 	int curr_seq;
356 
357 	/*
358 	 * Messages that were delayed for some reason (out of memory,
359 	 * for instance), will go in here to be processed later in a
360 	 * periodic timer interrupt.
361 	 */
362 	spinlock_t       waiting_msgs_lock;
363 	struct list_head waiting_msgs;
364 
365 	/*
366 	 * The list of command receivers that are registered for commands
367 	 * on this interface.
368 	 */
369 	struct mutex     cmd_rcvrs_mutex;
370 	struct list_head cmd_rcvrs;
371 
372 	/*
373 	 * Events that were queues because no one was there to receive
374 	 * them.
375 	 */
376 	spinlock_t       events_lock; /* For dealing with event stuff. */
377 	struct list_head waiting_events;
378 	unsigned int     waiting_events_count; /* How many events in queue? */
379 	char             delivering_events;
380 	char             event_msg_printed;
381 
382 	/*
383 	 * The event receiver for my BMC, only really used at panic
384 	 * shutdown as a place to store this.
385 	 */
386 	unsigned char event_receiver;
387 	unsigned char event_receiver_lun;
388 	unsigned char local_sel_device;
389 	unsigned char local_event_generator;
390 
391 	/* For handling of maintenance mode. */
392 	int maintenance_mode;
393 	int maintenance_mode_enable;
394 	int auto_maintenance_timeout;
395 	spinlock_t maintenance_mode_lock; /* Used in a timer... */
396 
397 	/*
398 	 * A cheap hack, if this is non-null and a message to an
399 	 * interface comes in with a NULL user, call this routine with
400 	 * it.  Note that the message will still be freed by the
401 	 * caller.  This only works on the system interface.
402 	 */
403 	void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
404 
405 	/*
406 	 * When we are scanning the channels for an SMI, this will
407 	 * tell which channel we are scanning.
408 	 */
409 	int curr_channel;
410 
411 	/* Channel information */
412 	struct ipmi_channel channels[IPMI_MAX_CHANNELS];
413 
414 	/* Proc FS stuff. */
415 	struct proc_dir_entry *proc_dir;
416 	char                  proc_dir_name[10];
417 
418 	atomic_t stats[IPMI_NUM_STATS];
419 
420 	/*
421 	 * run_to_completion duplicate of smb_info, smi_info
422 	 * and ipmi_serial_info structures. Used to decrease numbers of
423 	 * parameters passed by "low" level IPMI code.
424 	 */
425 	int run_to_completion;
426 };
427 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
428 
429 /**
430  * The driver model view of the IPMI messaging driver.
431  */
432 static struct platform_driver ipmidriver = {
433 	.driver = {
434 		.name = "ipmi",
435 		.bus = &platform_bus_type
436 	}
437 };
438 static DEFINE_MUTEX(ipmidriver_mutex);
439 
440 static LIST_HEAD(ipmi_interfaces);
441 static DEFINE_MUTEX(ipmi_interfaces_mutex);
442 
443 /*
444  * List of watchers that want to know when smi's are added and deleted.
445  */
446 static LIST_HEAD(smi_watchers);
447 static DEFINE_MUTEX(smi_watchers_mutex);
448 
449 
450 #define ipmi_inc_stat(intf, stat) \
451 	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
452 #define ipmi_get_stat(intf, stat) \
453 	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
454 
455 static int is_lan_addr(struct ipmi_addr *addr)
456 {
457 	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
458 }
459 
460 static int is_ipmb_addr(struct ipmi_addr *addr)
461 {
462 	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
463 }
464 
465 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
466 {
467 	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
468 }
469 
470 static void free_recv_msg_list(struct list_head *q)
471 {
472 	struct ipmi_recv_msg *msg, *msg2;
473 
474 	list_for_each_entry_safe(msg, msg2, q, link) {
475 		list_del(&msg->link);
476 		ipmi_free_recv_msg(msg);
477 	}
478 }
479 
480 static void free_smi_msg_list(struct list_head *q)
481 {
482 	struct ipmi_smi_msg *msg, *msg2;
483 
484 	list_for_each_entry_safe(msg, msg2, q, link) {
485 		list_del(&msg->link);
486 		ipmi_free_smi_msg(msg);
487 	}
488 }
489 
490 static void clean_up_interface_data(ipmi_smi_t intf)
491 {
492 	int              i;
493 	struct cmd_rcvr  *rcvr, *rcvr2;
494 	struct list_head list;
495 
496 	free_smi_msg_list(&intf->waiting_msgs);
497 	free_recv_msg_list(&intf->waiting_events);
498 
499 	/*
500 	 * Wholesale remove all the entries from the list in the
501 	 * interface and wait for RCU to know that none are in use.
502 	 */
503 	mutex_lock(&intf->cmd_rcvrs_mutex);
504 	INIT_LIST_HEAD(&list);
505 	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
506 	mutex_unlock(&intf->cmd_rcvrs_mutex);
507 
508 	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
509 		kfree(rcvr);
510 
511 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
512 		if ((intf->seq_table[i].inuse)
513 					&& (intf->seq_table[i].recv_msg))
514 			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
515 	}
516 }
517 
518 static void intf_free(struct kref *ref)
519 {
520 	ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
521 
522 	clean_up_interface_data(intf);
523 	kfree(intf);
524 }
525 
526 struct watcher_entry {
527 	int              intf_num;
528 	ipmi_smi_t       intf;
529 	struct list_head link;
530 };
531 
532 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
533 {
534 	ipmi_smi_t intf;
535 	LIST_HEAD(to_deliver);
536 	struct watcher_entry *e, *e2;
537 
538 	mutex_lock(&smi_watchers_mutex);
539 
540 	mutex_lock(&ipmi_interfaces_mutex);
541 
542 	/* Build a list of things to deliver. */
543 	list_for_each_entry(intf, &ipmi_interfaces, link) {
544 		if (intf->intf_num == -1)
545 			continue;
546 		e = kmalloc(sizeof(*e), GFP_KERNEL);
547 		if (!e)
548 			goto out_err;
549 		kref_get(&intf->refcount);
550 		e->intf = intf;
551 		e->intf_num = intf->intf_num;
552 		list_add_tail(&e->link, &to_deliver);
553 	}
554 
555 	/* We will succeed, so add it to the list. */
556 	list_add(&watcher->link, &smi_watchers);
557 
558 	mutex_unlock(&ipmi_interfaces_mutex);
559 
560 	list_for_each_entry_safe(e, e2, &to_deliver, link) {
561 		list_del(&e->link);
562 		watcher->new_smi(e->intf_num, e->intf->si_dev);
563 		kref_put(&e->intf->refcount, intf_free);
564 		kfree(e);
565 	}
566 
567 	mutex_unlock(&smi_watchers_mutex);
568 
569 	return 0;
570 
571  out_err:
572 	mutex_unlock(&ipmi_interfaces_mutex);
573 	mutex_unlock(&smi_watchers_mutex);
574 	list_for_each_entry_safe(e, e2, &to_deliver, link) {
575 		list_del(&e->link);
576 		kref_put(&e->intf->refcount, intf_free);
577 		kfree(e);
578 	}
579 	return -ENOMEM;
580 }
581 EXPORT_SYMBOL(ipmi_smi_watcher_register);
582 
583 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
584 {
585 	mutex_lock(&smi_watchers_mutex);
586 	list_del(&(watcher->link));
587 	mutex_unlock(&smi_watchers_mutex);
588 	return 0;
589 }
590 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
591 
592 /*
593  * Must be called with smi_watchers_mutex held.
594  */
595 static void
596 call_smi_watchers(int i, struct device *dev)
597 {
598 	struct ipmi_smi_watcher *w;
599 
600 	list_for_each_entry(w, &smi_watchers, link) {
601 		if (try_module_get(w->owner)) {
602 			w->new_smi(i, dev);
603 			module_put(w->owner);
604 		}
605 	}
606 }
607 
608 static int
609 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
610 {
611 	if (addr1->addr_type != addr2->addr_type)
612 		return 0;
613 
614 	if (addr1->channel != addr2->channel)
615 		return 0;
616 
617 	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
618 		struct ipmi_system_interface_addr *smi_addr1
619 		    = (struct ipmi_system_interface_addr *) addr1;
620 		struct ipmi_system_interface_addr *smi_addr2
621 		    = (struct ipmi_system_interface_addr *) addr2;
622 		return (smi_addr1->lun == smi_addr2->lun);
623 	}
624 
625 	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
626 		struct ipmi_ipmb_addr *ipmb_addr1
627 		    = (struct ipmi_ipmb_addr *) addr1;
628 		struct ipmi_ipmb_addr *ipmb_addr2
629 		    = (struct ipmi_ipmb_addr *) addr2;
630 
631 		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
632 			&& (ipmb_addr1->lun == ipmb_addr2->lun));
633 	}
634 
635 	if (is_lan_addr(addr1)) {
636 		struct ipmi_lan_addr *lan_addr1
637 			= (struct ipmi_lan_addr *) addr1;
638 		struct ipmi_lan_addr *lan_addr2
639 		    = (struct ipmi_lan_addr *) addr2;
640 
641 		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
642 			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
643 			&& (lan_addr1->session_handle
644 			    == lan_addr2->session_handle)
645 			&& (lan_addr1->lun == lan_addr2->lun));
646 	}
647 
648 	return 1;
649 }
650 
651 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
652 {
653 	if (len < sizeof(struct ipmi_system_interface_addr))
654 		return -EINVAL;
655 
656 	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
657 		if (addr->channel != IPMI_BMC_CHANNEL)
658 			return -EINVAL;
659 		return 0;
660 	}
661 
662 	if ((addr->channel == IPMI_BMC_CHANNEL)
663 	    || (addr->channel >= IPMI_MAX_CHANNELS)
664 	    || (addr->channel < 0))
665 		return -EINVAL;
666 
667 	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
668 		if (len < sizeof(struct ipmi_ipmb_addr))
669 			return -EINVAL;
670 		return 0;
671 	}
672 
673 	if (is_lan_addr(addr)) {
674 		if (len < sizeof(struct ipmi_lan_addr))
675 			return -EINVAL;
676 		return 0;
677 	}
678 
679 	return -EINVAL;
680 }
681 EXPORT_SYMBOL(ipmi_validate_addr);
682 
683 unsigned int ipmi_addr_length(int addr_type)
684 {
685 	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
686 		return sizeof(struct ipmi_system_interface_addr);
687 
688 	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
689 			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
690 		return sizeof(struct ipmi_ipmb_addr);
691 
692 	if (addr_type == IPMI_LAN_ADDR_TYPE)
693 		return sizeof(struct ipmi_lan_addr);
694 
695 	return 0;
696 }
697 EXPORT_SYMBOL(ipmi_addr_length);
698 
699 static void deliver_response(struct ipmi_recv_msg *msg)
700 {
701 	if (!msg->user) {
702 		ipmi_smi_t    intf = msg->user_msg_data;
703 
704 		/* Special handling for NULL users. */
705 		if (intf->null_user_handler) {
706 			intf->null_user_handler(intf, msg);
707 			ipmi_inc_stat(intf, handled_local_responses);
708 		} else {
709 			/* No handler, so give up. */
710 			ipmi_inc_stat(intf, unhandled_local_responses);
711 		}
712 		ipmi_free_recv_msg(msg);
713 	} else {
714 		ipmi_user_t user = msg->user;
715 		user->handler->ipmi_recv_hndl(msg, user->handler_data);
716 	}
717 }
718 
719 static void
720 deliver_err_response(struct ipmi_recv_msg *msg, int err)
721 {
722 	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
723 	msg->msg_data[0] = err;
724 	msg->msg.netfn |= 1; /* Convert to a response. */
725 	msg->msg.data_len = 1;
726 	msg->msg.data = msg->msg_data;
727 	deliver_response(msg);
728 }
729 
730 /*
731  * Find the next sequence number not being used and add the given
732  * message with the given timeout to the sequence table.  This must be
733  * called with the interface's seq_lock held.
734  */
735 static int intf_next_seq(ipmi_smi_t           intf,
736 			 struct ipmi_recv_msg *recv_msg,
737 			 unsigned long        timeout,
738 			 int                  retries,
739 			 int                  broadcast,
740 			 unsigned char        *seq,
741 			 long                 *seqid)
742 {
743 	int          rv = 0;
744 	unsigned int i;
745 
746 	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
747 					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
748 		if (!intf->seq_table[i].inuse)
749 			break;
750 	}
751 
752 	if (!intf->seq_table[i].inuse) {
753 		intf->seq_table[i].recv_msg = recv_msg;
754 
755 		/*
756 		 * Start with the maximum timeout, when the send response
757 		 * comes in we will start the real timer.
758 		 */
759 		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
760 		intf->seq_table[i].orig_timeout = timeout;
761 		intf->seq_table[i].retries_left = retries;
762 		intf->seq_table[i].broadcast = broadcast;
763 		intf->seq_table[i].inuse = 1;
764 		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
765 		*seq = i;
766 		*seqid = intf->seq_table[i].seqid;
767 		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
768 	} else {
769 		rv = -EAGAIN;
770 	}
771 
772 	return rv;
773 }
774 
775 /*
776  * Return the receive message for the given sequence number and
777  * release the sequence number so it can be reused.  Some other data
778  * is passed in to be sure the message matches up correctly (to help
779  * guard against message coming in after their timeout and the
780  * sequence number being reused).
781  */
782 static int intf_find_seq(ipmi_smi_t           intf,
783 			 unsigned char        seq,
784 			 short                channel,
785 			 unsigned char        cmd,
786 			 unsigned char        netfn,
787 			 struct ipmi_addr     *addr,
788 			 struct ipmi_recv_msg **recv_msg)
789 {
790 	int           rv = -ENODEV;
791 	unsigned long flags;
792 
793 	if (seq >= IPMI_IPMB_NUM_SEQ)
794 		return -EINVAL;
795 
796 	spin_lock_irqsave(&(intf->seq_lock), flags);
797 	if (intf->seq_table[seq].inuse) {
798 		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
799 
800 		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
801 				&& (msg->msg.netfn == netfn)
802 				&& (ipmi_addr_equal(addr, &(msg->addr)))) {
803 			*recv_msg = msg;
804 			intf->seq_table[seq].inuse = 0;
805 			rv = 0;
806 		}
807 	}
808 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
809 
810 	return rv;
811 }
812 
813 
814 /* Start the timer for a specific sequence table entry. */
815 static int intf_start_seq_timer(ipmi_smi_t intf,
816 				long       msgid)
817 {
818 	int           rv = -ENODEV;
819 	unsigned long flags;
820 	unsigned char seq;
821 	unsigned long seqid;
822 
823 
824 	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
825 
826 	spin_lock_irqsave(&(intf->seq_lock), flags);
827 	/*
828 	 * We do this verification because the user can be deleted
829 	 * while a message is outstanding.
830 	 */
831 	if ((intf->seq_table[seq].inuse)
832 				&& (intf->seq_table[seq].seqid == seqid)) {
833 		struct seq_table *ent = &(intf->seq_table[seq]);
834 		ent->timeout = ent->orig_timeout;
835 		rv = 0;
836 	}
837 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
838 
839 	return rv;
840 }
841 
842 /* Got an error for the send message for a specific sequence number. */
843 static int intf_err_seq(ipmi_smi_t   intf,
844 			long         msgid,
845 			unsigned int err)
846 {
847 	int                  rv = -ENODEV;
848 	unsigned long        flags;
849 	unsigned char        seq;
850 	unsigned long        seqid;
851 	struct ipmi_recv_msg *msg = NULL;
852 
853 
854 	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
855 
856 	spin_lock_irqsave(&(intf->seq_lock), flags);
857 	/*
858 	 * We do this verification because the user can be deleted
859 	 * while a message is outstanding.
860 	 */
861 	if ((intf->seq_table[seq].inuse)
862 				&& (intf->seq_table[seq].seqid == seqid)) {
863 		struct seq_table *ent = &(intf->seq_table[seq]);
864 
865 		ent->inuse = 0;
866 		msg = ent->recv_msg;
867 		rv = 0;
868 	}
869 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
870 
871 	if (msg)
872 		deliver_err_response(msg, err);
873 
874 	return rv;
875 }
876 
877 
878 int ipmi_create_user(unsigned int          if_num,
879 		     struct ipmi_user_hndl *handler,
880 		     void                  *handler_data,
881 		     ipmi_user_t           *user)
882 {
883 	unsigned long flags;
884 	ipmi_user_t   new_user;
885 	int           rv = 0;
886 	ipmi_smi_t    intf;
887 
888 	/*
889 	 * There is no module usecount here, because it's not
890 	 * required.  Since this can only be used by and called from
891 	 * other modules, they will implicitly use this module, and
892 	 * thus this can't be removed unless the other modules are
893 	 * removed.
894 	 */
895 
896 	if (handler == NULL)
897 		return -EINVAL;
898 
899 	/*
900 	 * Make sure the driver is actually initialized, this handles
901 	 * problems with initialization order.
902 	 */
903 	if (!initialized) {
904 		rv = ipmi_init_msghandler();
905 		if (rv)
906 			return rv;
907 
908 		/*
909 		 * The init code doesn't return an error if it was turned
910 		 * off, but it won't initialize.  Check that.
911 		 */
912 		if (!initialized)
913 			return -ENODEV;
914 	}
915 
916 	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
917 	if (!new_user)
918 		return -ENOMEM;
919 
920 	mutex_lock(&ipmi_interfaces_mutex);
921 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
922 		if (intf->intf_num == if_num)
923 			goto found;
924 	}
925 	/* Not found, return an error */
926 	rv = -EINVAL;
927 	goto out_kfree;
928 
929  found:
930 	/* Note that each existing user holds a refcount to the interface. */
931 	kref_get(&intf->refcount);
932 
933 	kref_init(&new_user->refcount);
934 	new_user->handler = handler;
935 	new_user->handler_data = handler_data;
936 	new_user->intf = intf;
937 	new_user->gets_events = 0;
938 
939 	if (!try_module_get(intf->handlers->owner)) {
940 		rv = -ENODEV;
941 		goto out_kref;
942 	}
943 
944 	if (intf->handlers->inc_usecount) {
945 		rv = intf->handlers->inc_usecount(intf->send_info);
946 		if (rv) {
947 			module_put(intf->handlers->owner);
948 			goto out_kref;
949 		}
950 	}
951 
952 	/*
953 	 * Hold the lock so intf->handlers is guaranteed to be good
954 	 * until now
955 	 */
956 	mutex_unlock(&ipmi_interfaces_mutex);
957 
958 	new_user->valid = 1;
959 	spin_lock_irqsave(&intf->seq_lock, flags);
960 	list_add_rcu(&new_user->link, &intf->users);
961 	spin_unlock_irqrestore(&intf->seq_lock, flags);
962 	*user = new_user;
963 	return 0;
964 
965 out_kref:
966 	kref_put(&intf->refcount, intf_free);
967 out_kfree:
968 	mutex_unlock(&ipmi_interfaces_mutex);
969 	kfree(new_user);
970 	return rv;
971 }
972 EXPORT_SYMBOL(ipmi_create_user);
973 
974 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
975 {
976 	int           rv = 0;
977 	ipmi_smi_t    intf;
978 	struct ipmi_smi_handlers *handlers;
979 
980 	mutex_lock(&ipmi_interfaces_mutex);
981 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
982 		if (intf->intf_num == if_num)
983 			goto found;
984 	}
985 	/* Not found, return an error */
986 	rv = -EINVAL;
987 	mutex_unlock(&ipmi_interfaces_mutex);
988 	return rv;
989 
990 found:
991 	handlers = intf->handlers;
992 	rv = -ENOSYS;
993 	if (handlers->get_smi_info)
994 		rv = handlers->get_smi_info(intf->send_info, data);
995 	mutex_unlock(&ipmi_interfaces_mutex);
996 
997 	return rv;
998 }
999 EXPORT_SYMBOL(ipmi_get_smi_info);
1000 
1001 static void free_user(struct kref *ref)
1002 {
1003 	ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
1004 	kfree(user);
1005 }
1006 
1007 int ipmi_destroy_user(ipmi_user_t user)
1008 {
1009 	ipmi_smi_t       intf = user->intf;
1010 	int              i;
1011 	unsigned long    flags;
1012 	struct cmd_rcvr  *rcvr;
1013 	struct cmd_rcvr  *rcvrs = NULL;
1014 
1015 	user->valid = 0;
1016 
1017 	/* Remove the user from the interface's sequence table. */
1018 	spin_lock_irqsave(&intf->seq_lock, flags);
1019 	list_del_rcu(&user->link);
1020 
1021 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1022 		if (intf->seq_table[i].inuse
1023 		    && (intf->seq_table[i].recv_msg->user == user)) {
1024 			intf->seq_table[i].inuse = 0;
1025 			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1026 		}
1027 	}
1028 	spin_unlock_irqrestore(&intf->seq_lock, flags);
1029 
1030 	/*
1031 	 * Remove the user from the command receiver's table.  First
1032 	 * we build a list of everything (not using the standard link,
1033 	 * since other things may be using it till we do
1034 	 * synchronize_rcu()) then free everything in that list.
1035 	 */
1036 	mutex_lock(&intf->cmd_rcvrs_mutex);
1037 	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1038 		if (rcvr->user == user) {
1039 			list_del_rcu(&rcvr->link);
1040 			rcvr->next = rcvrs;
1041 			rcvrs = rcvr;
1042 		}
1043 	}
1044 	mutex_unlock(&intf->cmd_rcvrs_mutex);
1045 	synchronize_rcu();
1046 	while (rcvrs) {
1047 		rcvr = rcvrs;
1048 		rcvrs = rcvr->next;
1049 		kfree(rcvr);
1050 	}
1051 
1052 	mutex_lock(&ipmi_interfaces_mutex);
1053 	if (intf->handlers) {
1054 		module_put(intf->handlers->owner);
1055 		if (intf->handlers->dec_usecount)
1056 			intf->handlers->dec_usecount(intf->send_info);
1057 	}
1058 	mutex_unlock(&ipmi_interfaces_mutex);
1059 
1060 	kref_put(&intf->refcount, intf_free);
1061 
1062 	kref_put(&user->refcount, free_user);
1063 
1064 	return 0;
1065 }
1066 EXPORT_SYMBOL(ipmi_destroy_user);
1067 
1068 void ipmi_get_version(ipmi_user_t   user,
1069 		      unsigned char *major,
1070 		      unsigned char *minor)
1071 {
1072 	*major = user->intf->ipmi_version_major;
1073 	*minor = user->intf->ipmi_version_minor;
1074 }
1075 EXPORT_SYMBOL(ipmi_get_version);
1076 
1077 int ipmi_set_my_address(ipmi_user_t   user,
1078 			unsigned int  channel,
1079 			unsigned char address)
1080 {
1081 	if (channel >= IPMI_MAX_CHANNELS)
1082 		return -EINVAL;
1083 	user->intf->channels[channel].address = address;
1084 	return 0;
1085 }
1086 EXPORT_SYMBOL(ipmi_set_my_address);
1087 
1088 int ipmi_get_my_address(ipmi_user_t   user,
1089 			unsigned int  channel,
1090 			unsigned char *address)
1091 {
1092 	if (channel >= IPMI_MAX_CHANNELS)
1093 		return -EINVAL;
1094 	*address = user->intf->channels[channel].address;
1095 	return 0;
1096 }
1097 EXPORT_SYMBOL(ipmi_get_my_address);
1098 
1099 int ipmi_set_my_LUN(ipmi_user_t   user,
1100 		    unsigned int  channel,
1101 		    unsigned char LUN)
1102 {
1103 	if (channel >= IPMI_MAX_CHANNELS)
1104 		return -EINVAL;
1105 	user->intf->channels[channel].lun = LUN & 0x3;
1106 	return 0;
1107 }
1108 EXPORT_SYMBOL(ipmi_set_my_LUN);
1109 
1110 int ipmi_get_my_LUN(ipmi_user_t   user,
1111 		    unsigned int  channel,
1112 		    unsigned char *address)
1113 {
1114 	if (channel >= IPMI_MAX_CHANNELS)
1115 		return -EINVAL;
1116 	*address = user->intf->channels[channel].lun;
1117 	return 0;
1118 }
1119 EXPORT_SYMBOL(ipmi_get_my_LUN);
1120 
1121 int ipmi_get_maintenance_mode(ipmi_user_t user)
1122 {
1123 	int           mode;
1124 	unsigned long flags;
1125 
1126 	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1127 	mode = user->intf->maintenance_mode;
1128 	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1129 
1130 	return mode;
1131 }
1132 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1133 
1134 static void maintenance_mode_update(ipmi_smi_t intf)
1135 {
1136 	if (intf->handlers->set_maintenance_mode)
1137 		intf->handlers->set_maintenance_mode(
1138 			intf->send_info, intf->maintenance_mode_enable);
1139 }
1140 
1141 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1142 {
1143 	int           rv = 0;
1144 	unsigned long flags;
1145 	ipmi_smi_t    intf = user->intf;
1146 
1147 	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1148 	if (intf->maintenance_mode != mode) {
1149 		switch (mode) {
1150 		case IPMI_MAINTENANCE_MODE_AUTO:
1151 			intf->maintenance_mode = mode;
1152 			intf->maintenance_mode_enable
1153 				= (intf->auto_maintenance_timeout > 0);
1154 			break;
1155 
1156 		case IPMI_MAINTENANCE_MODE_OFF:
1157 			intf->maintenance_mode = mode;
1158 			intf->maintenance_mode_enable = 0;
1159 			break;
1160 
1161 		case IPMI_MAINTENANCE_MODE_ON:
1162 			intf->maintenance_mode = mode;
1163 			intf->maintenance_mode_enable = 1;
1164 			break;
1165 
1166 		default:
1167 			rv = -EINVAL;
1168 			goto out_unlock;
1169 		}
1170 
1171 		maintenance_mode_update(intf);
1172 	}
1173  out_unlock:
1174 	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1175 
1176 	return rv;
1177 }
1178 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1179 
1180 int ipmi_set_gets_events(ipmi_user_t user, int val)
1181 {
1182 	unsigned long        flags;
1183 	ipmi_smi_t           intf = user->intf;
1184 	struct ipmi_recv_msg *msg, *msg2;
1185 	struct list_head     msgs;
1186 
1187 	INIT_LIST_HEAD(&msgs);
1188 
1189 	spin_lock_irqsave(&intf->events_lock, flags);
1190 	user->gets_events = val;
1191 
1192 	if (intf->delivering_events)
1193 		/*
1194 		 * Another thread is delivering events for this, so
1195 		 * let it handle any new events.
1196 		 */
1197 		goto out;
1198 
1199 	/* Deliver any queued events. */
1200 	while (user->gets_events && !list_empty(&intf->waiting_events)) {
1201 		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1202 			list_move_tail(&msg->link, &msgs);
1203 		intf->waiting_events_count = 0;
1204 		if (intf->event_msg_printed) {
1205 			printk(KERN_WARNING PFX "Event queue no longer"
1206 			       " full\n");
1207 			intf->event_msg_printed = 0;
1208 		}
1209 
1210 		intf->delivering_events = 1;
1211 		spin_unlock_irqrestore(&intf->events_lock, flags);
1212 
1213 		list_for_each_entry_safe(msg, msg2, &msgs, link) {
1214 			msg->user = user;
1215 			kref_get(&user->refcount);
1216 			deliver_response(msg);
1217 		}
1218 
1219 		spin_lock_irqsave(&intf->events_lock, flags);
1220 		intf->delivering_events = 0;
1221 	}
1222 
1223  out:
1224 	spin_unlock_irqrestore(&intf->events_lock, flags);
1225 
1226 	return 0;
1227 }
1228 EXPORT_SYMBOL(ipmi_set_gets_events);
1229 
1230 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t    intf,
1231 				      unsigned char netfn,
1232 				      unsigned char cmd,
1233 				      unsigned char chan)
1234 {
1235 	struct cmd_rcvr *rcvr;
1236 
1237 	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1238 		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1239 					&& (rcvr->chans & (1 << chan)))
1240 			return rcvr;
1241 	}
1242 	return NULL;
1243 }
1244 
1245 static int is_cmd_rcvr_exclusive(ipmi_smi_t    intf,
1246 				 unsigned char netfn,
1247 				 unsigned char cmd,
1248 				 unsigned int  chans)
1249 {
1250 	struct cmd_rcvr *rcvr;
1251 
1252 	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1253 		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1254 					&& (rcvr->chans & chans))
1255 			return 0;
1256 	}
1257 	return 1;
1258 }
1259 
1260 int ipmi_register_for_cmd(ipmi_user_t   user,
1261 			  unsigned char netfn,
1262 			  unsigned char cmd,
1263 			  unsigned int  chans)
1264 {
1265 	ipmi_smi_t      intf = user->intf;
1266 	struct cmd_rcvr *rcvr;
1267 	int             rv = 0;
1268 
1269 
1270 	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1271 	if (!rcvr)
1272 		return -ENOMEM;
1273 	rcvr->cmd = cmd;
1274 	rcvr->netfn = netfn;
1275 	rcvr->chans = chans;
1276 	rcvr->user = user;
1277 
1278 	mutex_lock(&intf->cmd_rcvrs_mutex);
1279 	/* Make sure the command/netfn is not already registered. */
1280 	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1281 		rv = -EBUSY;
1282 		goto out_unlock;
1283 	}
1284 
1285 	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1286 
1287  out_unlock:
1288 	mutex_unlock(&intf->cmd_rcvrs_mutex);
1289 	if (rv)
1290 		kfree(rcvr);
1291 
1292 	return rv;
1293 }
1294 EXPORT_SYMBOL(ipmi_register_for_cmd);
1295 
1296 int ipmi_unregister_for_cmd(ipmi_user_t   user,
1297 			    unsigned char netfn,
1298 			    unsigned char cmd,
1299 			    unsigned int  chans)
1300 {
1301 	ipmi_smi_t      intf = user->intf;
1302 	struct cmd_rcvr *rcvr;
1303 	struct cmd_rcvr *rcvrs = NULL;
1304 	int i, rv = -ENOENT;
1305 
1306 	mutex_lock(&intf->cmd_rcvrs_mutex);
1307 	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1308 		if (((1 << i) & chans) == 0)
1309 			continue;
1310 		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1311 		if (rcvr == NULL)
1312 			continue;
1313 		if (rcvr->user == user) {
1314 			rv = 0;
1315 			rcvr->chans &= ~chans;
1316 			if (rcvr->chans == 0) {
1317 				list_del_rcu(&rcvr->link);
1318 				rcvr->next = rcvrs;
1319 				rcvrs = rcvr;
1320 			}
1321 		}
1322 	}
1323 	mutex_unlock(&intf->cmd_rcvrs_mutex);
1324 	synchronize_rcu();
1325 	while (rcvrs) {
1326 		rcvr = rcvrs;
1327 		rcvrs = rcvr->next;
1328 		kfree(rcvr);
1329 	}
1330 	return rv;
1331 }
1332 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1333 
1334 static unsigned char
1335 ipmb_checksum(unsigned char *data, int size)
1336 {
1337 	unsigned char csum = 0;
1338 
1339 	for (; size > 0; size--, data++)
1340 		csum += *data;
1341 
1342 	return -csum;
1343 }
1344 
1345 static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1346 				   struct kernel_ipmi_msg *msg,
1347 				   struct ipmi_ipmb_addr *ipmb_addr,
1348 				   long                  msgid,
1349 				   unsigned char         ipmb_seq,
1350 				   int                   broadcast,
1351 				   unsigned char         source_address,
1352 				   unsigned char         source_lun)
1353 {
1354 	int i = broadcast;
1355 
1356 	/* Format the IPMB header data. */
1357 	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1358 	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1359 	smi_msg->data[2] = ipmb_addr->channel;
1360 	if (broadcast)
1361 		smi_msg->data[3] = 0;
1362 	smi_msg->data[i+3] = ipmb_addr->slave_addr;
1363 	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1364 	smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1365 	smi_msg->data[i+6] = source_address;
1366 	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1367 	smi_msg->data[i+8] = msg->cmd;
1368 
1369 	/* Now tack on the data to the message. */
1370 	if (msg->data_len > 0)
1371 		memcpy(&(smi_msg->data[i+9]), msg->data,
1372 		       msg->data_len);
1373 	smi_msg->data_size = msg->data_len + 9;
1374 
1375 	/* Now calculate the checksum and tack it on. */
1376 	smi_msg->data[i+smi_msg->data_size]
1377 		= ipmb_checksum(&(smi_msg->data[i+6]),
1378 				smi_msg->data_size-6);
1379 
1380 	/*
1381 	 * Add on the checksum size and the offset from the
1382 	 * broadcast.
1383 	 */
1384 	smi_msg->data_size += 1 + i;
1385 
1386 	smi_msg->msgid = msgid;
1387 }
1388 
1389 static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1390 				  struct kernel_ipmi_msg *msg,
1391 				  struct ipmi_lan_addr  *lan_addr,
1392 				  long                  msgid,
1393 				  unsigned char         ipmb_seq,
1394 				  unsigned char         source_lun)
1395 {
1396 	/* Format the IPMB header data. */
1397 	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1398 	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1399 	smi_msg->data[2] = lan_addr->channel;
1400 	smi_msg->data[3] = lan_addr->session_handle;
1401 	smi_msg->data[4] = lan_addr->remote_SWID;
1402 	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1403 	smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1404 	smi_msg->data[7] = lan_addr->local_SWID;
1405 	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1406 	smi_msg->data[9] = msg->cmd;
1407 
1408 	/* Now tack on the data to the message. */
1409 	if (msg->data_len > 0)
1410 		memcpy(&(smi_msg->data[10]), msg->data,
1411 		       msg->data_len);
1412 	smi_msg->data_size = msg->data_len + 10;
1413 
1414 	/* Now calculate the checksum and tack it on. */
1415 	smi_msg->data[smi_msg->data_size]
1416 		= ipmb_checksum(&(smi_msg->data[7]),
1417 				smi_msg->data_size-7);
1418 
1419 	/*
1420 	 * Add on the checksum size and the offset from the
1421 	 * broadcast.
1422 	 */
1423 	smi_msg->data_size += 1;
1424 
1425 	smi_msg->msgid = msgid;
1426 }
1427 
1428 /*
1429  * Separate from ipmi_request so that the user does not have to be
1430  * supplied in certain circumstances (mainly at panic time).  If
1431  * messages are supplied, they will be freed, even if an error
1432  * occurs.
1433  */
1434 static int i_ipmi_request(ipmi_user_t          user,
1435 			  ipmi_smi_t           intf,
1436 			  struct ipmi_addr     *addr,
1437 			  long                 msgid,
1438 			  struct kernel_ipmi_msg *msg,
1439 			  void                 *user_msg_data,
1440 			  void                 *supplied_smi,
1441 			  struct ipmi_recv_msg *supplied_recv,
1442 			  int                  priority,
1443 			  unsigned char        source_address,
1444 			  unsigned char        source_lun,
1445 			  int                  retries,
1446 			  unsigned int         retry_time_ms)
1447 {
1448 	int                      rv = 0;
1449 	struct ipmi_smi_msg      *smi_msg;
1450 	struct ipmi_recv_msg     *recv_msg;
1451 	unsigned long            flags;
1452 	struct ipmi_smi_handlers *handlers;
1453 
1454 
1455 	if (supplied_recv)
1456 		recv_msg = supplied_recv;
1457 	else {
1458 		recv_msg = ipmi_alloc_recv_msg();
1459 		if (recv_msg == NULL)
1460 			return -ENOMEM;
1461 	}
1462 	recv_msg->user_msg_data = user_msg_data;
1463 
1464 	if (supplied_smi)
1465 		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1466 	else {
1467 		smi_msg = ipmi_alloc_smi_msg();
1468 		if (smi_msg == NULL) {
1469 			ipmi_free_recv_msg(recv_msg);
1470 			return -ENOMEM;
1471 		}
1472 	}
1473 
1474 	rcu_read_lock();
1475 	handlers = intf->handlers;
1476 	if (!handlers) {
1477 		rv = -ENODEV;
1478 		goto out_err;
1479 	}
1480 
1481 	recv_msg->user = user;
1482 	if (user)
1483 		kref_get(&user->refcount);
1484 	recv_msg->msgid = msgid;
1485 	/*
1486 	 * Store the message to send in the receive message so timeout
1487 	 * responses can get the proper response data.
1488 	 */
1489 	recv_msg->msg = *msg;
1490 
1491 	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1492 		struct ipmi_system_interface_addr *smi_addr;
1493 
1494 		if (msg->netfn & 1) {
1495 			/* Responses are not allowed to the SMI. */
1496 			rv = -EINVAL;
1497 			goto out_err;
1498 		}
1499 
1500 		smi_addr = (struct ipmi_system_interface_addr *) addr;
1501 		if (smi_addr->lun > 3) {
1502 			ipmi_inc_stat(intf, sent_invalid_commands);
1503 			rv = -EINVAL;
1504 			goto out_err;
1505 		}
1506 
1507 		memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1508 
1509 		if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1510 		    && ((msg->cmd == IPMI_SEND_MSG_CMD)
1511 			|| (msg->cmd == IPMI_GET_MSG_CMD)
1512 			|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1513 			/*
1514 			 * We don't let the user do these, since we manage
1515 			 * the sequence numbers.
1516 			 */
1517 			ipmi_inc_stat(intf, sent_invalid_commands);
1518 			rv = -EINVAL;
1519 			goto out_err;
1520 		}
1521 
1522 		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1523 		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
1524 			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
1525 		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
1526 			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1527 			intf->auto_maintenance_timeout
1528 				= IPMI_MAINTENANCE_MODE_TIMEOUT;
1529 			if (!intf->maintenance_mode
1530 			    && !intf->maintenance_mode_enable) {
1531 				intf->maintenance_mode_enable = 1;
1532 				maintenance_mode_update(intf);
1533 			}
1534 			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1535 					       flags);
1536 		}
1537 
1538 		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1539 			ipmi_inc_stat(intf, sent_invalid_commands);
1540 			rv = -EMSGSIZE;
1541 			goto out_err;
1542 		}
1543 
1544 		smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1545 		smi_msg->data[1] = msg->cmd;
1546 		smi_msg->msgid = msgid;
1547 		smi_msg->user_data = recv_msg;
1548 		if (msg->data_len > 0)
1549 			memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1550 		smi_msg->data_size = msg->data_len + 2;
1551 		ipmi_inc_stat(intf, sent_local_commands);
1552 	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
1553 		struct ipmi_ipmb_addr *ipmb_addr;
1554 		unsigned char         ipmb_seq;
1555 		long                  seqid;
1556 		int                   broadcast = 0;
1557 
1558 		if (addr->channel >= IPMI_MAX_CHANNELS) {
1559 			ipmi_inc_stat(intf, sent_invalid_commands);
1560 			rv = -EINVAL;
1561 			goto out_err;
1562 		}
1563 
1564 		if (intf->channels[addr->channel].medium
1565 					!= IPMI_CHANNEL_MEDIUM_IPMB) {
1566 			ipmi_inc_stat(intf, sent_invalid_commands);
1567 			rv = -EINVAL;
1568 			goto out_err;
1569 		}
1570 
1571 		if (retries < 0) {
1572 		    if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1573 			retries = 0; /* Don't retry broadcasts. */
1574 		    else
1575 			retries = 4;
1576 		}
1577 		if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1578 		    /*
1579 		     * Broadcasts add a zero at the beginning of the
1580 		     * message, but otherwise is the same as an IPMB
1581 		     * address.
1582 		     */
1583 		    addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1584 		    broadcast = 1;
1585 		}
1586 
1587 
1588 		/* Default to 1 second retries. */
1589 		if (retry_time_ms == 0)
1590 		    retry_time_ms = 1000;
1591 
1592 		/*
1593 		 * 9 for the header and 1 for the checksum, plus
1594 		 * possibly one for the broadcast.
1595 		 */
1596 		if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1597 			ipmi_inc_stat(intf, sent_invalid_commands);
1598 			rv = -EMSGSIZE;
1599 			goto out_err;
1600 		}
1601 
1602 		ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1603 		if (ipmb_addr->lun > 3) {
1604 			ipmi_inc_stat(intf, sent_invalid_commands);
1605 			rv = -EINVAL;
1606 			goto out_err;
1607 		}
1608 
1609 		memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1610 
1611 		if (recv_msg->msg.netfn & 0x1) {
1612 			/*
1613 			 * It's a response, so use the user's sequence
1614 			 * from msgid.
1615 			 */
1616 			ipmi_inc_stat(intf, sent_ipmb_responses);
1617 			format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1618 					msgid, broadcast,
1619 					source_address, source_lun);
1620 
1621 			/*
1622 			 * Save the receive message so we can use it
1623 			 * to deliver the response.
1624 			 */
1625 			smi_msg->user_data = recv_msg;
1626 		} else {
1627 			/* It's a command, so get a sequence for it. */
1628 
1629 			spin_lock_irqsave(&(intf->seq_lock), flags);
1630 
1631 			/*
1632 			 * Create a sequence number with a 1 second
1633 			 * timeout and 4 retries.
1634 			 */
1635 			rv = intf_next_seq(intf,
1636 					   recv_msg,
1637 					   retry_time_ms,
1638 					   retries,
1639 					   broadcast,
1640 					   &ipmb_seq,
1641 					   &seqid);
1642 			if (rv) {
1643 				/*
1644 				 * We have used up all the sequence numbers,
1645 				 * probably, so abort.
1646 				 */
1647 				spin_unlock_irqrestore(&(intf->seq_lock),
1648 						       flags);
1649 				goto out_err;
1650 			}
1651 
1652 			ipmi_inc_stat(intf, sent_ipmb_commands);
1653 
1654 			/*
1655 			 * Store the sequence number in the message,
1656 			 * so that when the send message response
1657 			 * comes back we can start the timer.
1658 			 */
1659 			format_ipmb_msg(smi_msg, msg, ipmb_addr,
1660 					STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1661 					ipmb_seq, broadcast,
1662 					source_address, source_lun);
1663 
1664 			/*
1665 			 * Copy the message into the recv message data, so we
1666 			 * can retransmit it later if necessary.
1667 			 */
1668 			memcpy(recv_msg->msg_data, smi_msg->data,
1669 			       smi_msg->data_size);
1670 			recv_msg->msg.data = recv_msg->msg_data;
1671 			recv_msg->msg.data_len = smi_msg->data_size;
1672 
1673 			/*
1674 			 * We don't unlock until here, because we need
1675 			 * to copy the completed message into the
1676 			 * recv_msg before we release the lock.
1677 			 * Otherwise, race conditions may bite us.  I
1678 			 * know that's pretty paranoid, but I prefer
1679 			 * to be correct.
1680 			 */
1681 			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1682 		}
1683 	} else if (is_lan_addr(addr)) {
1684 		struct ipmi_lan_addr  *lan_addr;
1685 		unsigned char         ipmb_seq;
1686 		long                  seqid;
1687 
1688 		if (addr->channel >= IPMI_MAX_CHANNELS) {
1689 			ipmi_inc_stat(intf, sent_invalid_commands);
1690 			rv = -EINVAL;
1691 			goto out_err;
1692 		}
1693 
1694 		if ((intf->channels[addr->channel].medium
1695 				!= IPMI_CHANNEL_MEDIUM_8023LAN)
1696 		    && (intf->channels[addr->channel].medium
1697 				!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
1698 			ipmi_inc_stat(intf, sent_invalid_commands);
1699 			rv = -EINVAL;
1700 			goto out_err;
1701 		}
1702 
1703 		retries = 4;
1704 
1705 		/* Default to 1 second retries. */
1706 		if (retry_time_ms == 0)
1707 		    retry_time_ms = 1000;
1708 
1709 		/* 11 for the header and 1 for the checksum. */
1710 		if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1711 			ipmi_inc_stat(intf, sent_invalid_commands);
1712 			rv = -EMSGSIZE;
1713 			goto out_err;
1714 		}
1715 
1716 		lan_addr = (struct ipmi_lan_addr *) addr;
1717 		if (lan_addr->lun > 3) {
1718 			ipmi_inc_stat(intf, sent_invalid_commands);
1719 			rv = -EINVAL;
1720 			goto out_err;
1721 		}
1722 
1723 		memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1724 
1725 		if (recv_msg->msg.netfn & 0x1) {
1726 			/*
1727 			 * It's a response, so use the user's sequence
1728 			 * from msgid.
1729 			 */
1730 			ipmi_inc_stat(intf, sent_lan_responses);
1731 			format_lan_msg(smi_msg, msg, lan_addr, msgid,
1732 				       msgid, source_lun);
1733 
1734 			/*
1735 			 * Save the receive message so we can use it
1736 			 * to deliver the response.
1737 			 */
1738 			smi_msg->user_data = recv_msg;
1739 		} else {
1740 			/* It's a command, so get a sequence for it. */
1741 
1742 			spin_lock_irqsave(&(intf->seq_lock), flags);
1743 
1744 			/*
1745 			 * Create a sequence number with a 1 second
1746 			 * timeout and 4 retries.
1747 			 */
1748 			rv = intf_next_seq(intf,
1749 					   recv_msg,
1750 					   retry_time_ms,
1751 					   retries,
1752 					   0,
1753 					   &ipmb_seq,
1754 					   &seqid);
1755 			if (rv) {
1756 				/*
1757 				 * We have used up all the sequence numbers,
1758 				 * probably, so abort.
1759 				 */
1760 				spin_unlock_irqrestore(&(intf->seq_lock),
1761 						       flags);
1762 				goto out_err;
1763 			}
1764 
1765 			ipmi_inc_stat(intf, sent_lan_commands);
1766 
1767 			/*
1768 			 * Store the sequence number in the message,
1769 			 * so that when the send message response
1770 			 * comes back we can start the timer.
1771 			 */
1772 			format_lan_msg(smi_msg, msg, lan_addr,
1773 				       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1774 				       ipmb_seq, source_lun);
1775 
1776 			/*
1777 			 * Copy the message into the recv message data, so we
1778 			 * can retransmit it later if necessary.
1779 			 */
1780 			memcpy(recv_msg->msg_data, smi_msg->data,
1781 			       smi_msg->data_size);
1782 			recv_msg->msg.data = recv_msg->msg_data;
1783 			recv_msg->msg.data_len = smi_msg->data_size;
1784 
1785 			/*
1786 			 * We don't unlock until here, because we need
1787 			 * to copy the completed message into the
1788 			 * recv_msg before we release the lock.
1789 			 * Otherwise, race conditions may bite us.  I
1790 			 * know that's pretty paranoid, but I prefer
1791 			 * to be correct.
1792 			 */
1793 			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1794 		}
1795 	} else {
1796 	    /* Unknown address type. */
1797 		ipmi_inc_stat(intf, sent_invalid_commands);
1798 		rv = -EINVAL;
1799 		goto out_err;
1800 	}
1801 
1802 #ifdef DEBUG_MSGING
1803 	{
1804 		int m;
1805 		for (m = 0; m < smi_msg->data_size; m++)
1806 			printk(" %2.2x", smi_msg->data[m]);
1807 		printk("\n");
1808 	}
1809 #endif
1810 
1811 	handlers->sender(intf->send_info, smi_msg, priority);
1812 	rcu_read_unlock();
1813 
1814 	return 0;
1815 
1816  out_err:
1817 	rcu_read_unlock();
1818 	ipmi_free_smi_msg(smi_msg);
1819 	ipmi_free_recv_msg(recv_msg);
1820 	return rv;
1821 }
1822 
1823 static int check_addr(ipmi_smi_t       intf,
1824 		      struct ipmi_addr *addr,
1825 		      unsigned char    *saddr,
1826 		      unsigned char    *lun)
1827 {
1828 	if (addr->channel >= IPMI_MAX_CHANNELS)
1829 		return -EINVAL;
1830 	*lun = intf->channels[addr->channel].lun;
1831 	*saddr = intf->channels[addr->channel].address;
1832 	return 0;
1833 }
1834 
1835 int ipmi_request_settime(ipmi_user_t      user,
1836 			 struct ipmi_addr *addr,
1837 			 long             msgid,
1838 			 struct kernel_ipmi_msg  *msg,
1839 			 void             *user_msg_data,
1840 			 int              priority,
1841 			 int              retries,
1842 			 unsigned int     retry_time_ms)
1843 {
1844 	unsigned char saddr, lun;
1845 	int           rv;
1846 
1847 	if (!user)
1848 		return -EINVAL;
1849 	rv = check_addr(user->intf, addr, &saddr, &lun);
1850 	if (rv)
1851 		return rv;
1852 	return i_ipmi_request(user,
1853 			      user->intf,
1854 			      addr,
1855 			      msgid,
1856 			      msg,
1857 			      user_msg_data,
1858 			      NULL, NULL,
1859 			      priority,
1860 			      saddr,
1861 			      lun,
1862 			      retries,
1863 			      retry_time_ms);
1864 }
1865 EXPORT_SYMBOL(ipmi_request_settime);
1866 
1867 int ipmi_request_supply_msgs(ipmi_user_t          user,
1868 			     struct ipmi_addr     *addr,
1869 			     long                 msgid,
1870 			     struct kernel_ipmi_msg *msg,
1871 			     void                 *user_msg_data,
1872 			     void                 *supplied_smi,
1873 			     struct ipmi_recv_msg *supplied_recv,
1874 			     int                  priority)
1875 {
1876 	unsigned char saddr, lun;
1877 	int           rv;
1878 
1879 	if (!user)
1880 		return -EINVAL;
1881 	rv = check_addr(user->intf, addr, &saddr, &lun);
1882 	if (rv)
1883 		return rv;
1884 	return i_ipmi_request(user,
1885 			      user->intf,
1886 			      addr,
1887 			      msgid,
1888 			      msg,
1889 			      user_msg_data,
1890 			      supplied_smi,
1891 			      supplied_recv,
1892 			      priority,
1893 			      saddr,
1894 			      lun,
1895 			      -1, 0);
1896 }
1897 EXPORT_SYMBOL(ipmi_request_supply_msgs);
1898 
1899 #ifdef CONFIG_PROC_FS
1900 static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1901 {
1902 	ipmi_smi_t intf = m->private;
1903 	int        i;
1904 
1905 	seq_printf(m, "%x", intf->channels[0].address);
1906 	for (i = 1; i < IPMI_MAX_CHANNELS; i++)
1907 		seq_printf(m, " %x", intf->channels[i].address);
1908 	return seq_putc(m, '\n');
1909 }
1910 
1911 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
1912 {
1913 	return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
1914 }
1915 
1916 static const struct file_operations smi_ipmb_proc_ops = {
1917 	.open		= smi_ipmb_proc_open,
1918 	.read		= seq_read,
1919 	.llseek		= seq_lseek,
1920 	.release	= single_release,
1921 };
1922 
1923 static int smi_version_proc_show(struct seq_file *m, void *v)
1924 {
1925 	ipmi_smi_t intf = m->private;
1926 
1927 	return seq_printf(m, "%u.%u\n",
1928 		       ipmi_version_major(&intf->bmc->id),
1929 		       ipmi_version_minor(&intf->bmc->id));
1930 }
1931 
1932 static int smi_version_proc_open(struct inode *inode, struct file *file)
1933 {
1934 	return single_open(file, smi_version_proc_show, PDE(inode)->data);
1935 }
1936 
1937 static const struct file_operations smi_version_proc_ops = {
1938 	.open		= smi_version_proc_open,
1939 	.read		= seq_read,
1940 	.llseek		= seq_lseek,
1941 	.release	= single_release,
1942 };
1943 
1944 static int smi_stats_proc_show(struct seq_file *m, void *v)
1945 {
1946 	ipmi_smi_t intf = m->private;
1947 
1948 	seq_printf(m, "sent_invalid_commands:       %u\n",
1949 		       ipmi_get_stat(intf, sent_invalid_commands));
1950 	seq_printf(m, "sent_local_commands:         %u\n",
1951 		       ipmi_get_stat(intf, sent_local_commands));
1952 	seq_printf(m, "handled_local_responses:     %u\n",
1953 		       ipmi_get_stat(intf, handled_local_responses));
1954 	seq_printf(m, "unhandled_local_responses:   %u\n",
1955 		       ipmi_get_stat(intf, unhandled_local_responses));
1956 	seq_printf(m, "sent_ipmb_commands:          %u\n",
1957 		       ipmi_get_stat(intf, sent_ipmb_commands));
1958 	seq_printf(m, "sent_ipmb_command_errs:      %u\n",
1959 		       ipmi_get_stat(intf, sent_ipmb_command_errs));
1960 	seq_printf(m, "retransmitted_ipmb_commands: %u\n",
1961 		       ipmi_get_stat(intf, retransmitted_ipmb_commands));
1962 	seq_printf(m, "timed_out_ipmb_commands:     %u\n",
1963 		       ipmi_get_stat(intf, timed_out_ipmb_commands));
1964 	seq_printf(m, "timed_out_ipmb_broadcasts:   %u\n",
1965 		       ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
1966 	seq_printf(m, "sent_ipmb_responses:         %u\n",
1967 		       ipmi_get_stat(intf, sent_ipmb_responses));
1968 	seq_printf(m, "handled_ipmb_responses:      %u\n",
1969 		       ipmi_get_stat(intf, handled_ipmb_responses));
1970 	seq_printf(m, "invalid_ipmb_responses:      %u\n",
1971 		       ipmi_get_stat(intf, invalid_ipmb_responses));
1972 	seq_printf(m, "unhandled_ipmb_responses:    %u\n",
1973 		       ipmi_get_stat(intf, unhandled_ipmb_responses));
1974 	seq_printf(m, "sent_lan_commands:           %u\n",
1975 		       ipmi_get_stat(intf, sent_lan_commands));
1976 	seq_printf(m, "sent_lan_command_errs:       %u\n",
1977 		       ipmi_get_stat(intf, sent_lan_command_errs));
1978 	seq_printf(m, "retransmitted_lan_commands:  %u\n",
1979 		       ipmi_get_stat(intf, retransmitted_lan_commands));
1980 	seq_printf(m, "timed_out_lan_commands:      %u\n",
1981 		       ipmi_get_stat(intf, timed_out_lan_commands));
1982 	seq_printf(m, "sent_lan_responses:          %u\n",
1983 		       ipmi_get_stat(intf, sent_lan_responses));
1984 	seq_printf(m, "handled_lan_responses:       %u\n",
1985 		       ipmi_get_stat(intf, handled_lan_responses));
1986 	seq_printf(m, "invalid_lan_responses:       %u\n",
1987 		       ipmi_get_stat(intf, invalid_lan_responses));
1988 	seq_printf(m, "unhandled_lan_responses:     %u\n",
1989 		       ipmi_get_stat(intf, unhandled_lan_responses));
1990 	seq_printf(m, "handled_commands:            %u\n",
1991 		       ipmi_get_stat(intf, handled_commands));
1992 	seq_printf(m, "invalid_commands:            %u\n",
1993 		       ipmi_get_stat(intf, invalid_commands));
1994 	seq_printf(m, "unhandled_commands:          %u\n",
1995 		       ipmi_get_stat(intf, unhandled_commands));
1996 	seq_printf(m, "invalid_events:              %u\n",
1997 		       ipmi_get_stat(intf, invalid_events));
1998 	seq_printf(m, "events:                      %u\n",
1999 		       ipmi_get_stat(intf, events));
2000 	seq_printf(m, "failed rexmit LAN msgs:      %u\n",
2001 		       ipmi_get_stat(intf, dropped_rexmit_lan_commands));
2002 	seq_printf(m, "failed rexmit IPMB msgs:     %u\n",
2003 		       ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
2004 	return 0;
2005 }
2006 
2007 static int smi_stats_proc_open(struct inode *inode, struct file *file)
2008 {
2009 	return single_open(file, smi_stats_proc_show, PDE(inode)->data);
2010 }
2011 
2012 static const struct file_operations smi_stats_proc_ops = {
2013 	.open		= smi_stats_proc_open,
2014 	.read		= seq_read,
2015 	.llseek		= seq_lseek,
2016 	.release	= single_release,
2017 };
2018 #endif /* CONFIG_PROC_FS */
2019 
2020 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2021 			    const struct file_operations *proc_ops,
2022 			    void *data)
2023 {
2024 	int                    rv = 0;
2025 #ifdef CONFIG_PROC_FS
2026 	struct proc_dir_entry  *file;
2027 	struct ipmi_proc_entry *entry;
2028 
2029 	/* Create a list element. */
2030 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2031 	if (!entry)
2032 		return -ENOMEM;
2033 	entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
2034 	if (!entry->name) {
2035 		kfree(entry);
2036 		return -ENOMEM;
2037 	}
2038 	strcpy(entry->name, name);
2039 
2040 	file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2041 	if (!file) {
2042 		kfree(entry->name);
2043 		kfree(entry);
2044 		rv = -ENOMEM;
2045 	} else {
2046 		mutex_lock(&smi->proc_entry_lock);
2047 		/* Stick it on the list. */
2048 		entry->next = smi->proc_entries;
2049 		smi->proc_entries = entry;
2050 		mutex_unlock(&smi->proc_entry_lock);
2051 	}
2052 #endif /* CONFIG_PROC_FS */
2053 
2054 	return rv;
2055 }
2056 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
2057 
2058 static int add_proc_entries(ipmi_smi_t smi, int num)
2059 {
2060 	int rv = 0;
2061 
2062 #ifdef CONFIG_PROC_FS
2063 	sprintf(smi->proc_dir_name, "%d", num);
2064 	smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
2065 	if (!smi->proc_dir)
2066 		rv = -ENOMEM;
2067 
2068 	if (rv == 0)
2069 		rv = ipmi_smi_add_proc_entry(smi, "stats",
2070 					     &smi_stats_proc_ops,
2071 					     smi);
2072 
2073 	if (rv == 0)
2074 		rv = ipmi_smi_add_proc_entry(smi, "ipmb",
2075 					     &smi_ipmb_proc_ops,
2076 					     smi);
2077 
2078 	if (rv == 0)
2079 		rv = ipmi_smi_add_proc_entry(smi, "version",
2080 					     &smi_version_proc_ops,
2081 					     smi);
2082 #endif /* CONFIG_PROC_FS */
2083 
2084 	return rv;
2085 }
2086 
2087 static void remove_proc_entries(ipmi_smi_t smi)
2088 {
2089 #ifdef CONFIG_PROC_FS
2090 	struct ipmi_proc_entry *entry;
2091 
2092 	mutex_lock(&smi->proc_entry_lock);
2093 	while (smi->proc_entries) {
2094 		entry = smi->proc_entries;
2095 		smi->proc_entries = entry->next;
2096 
2097 		remove_proc_entry(entry->name, smi->proc_dir);
2098 		kfree(entry->name);
2099 		kfree(entry);
2100 	}
2101 	mutex_unlock(&smi->proc_entry_lock);
2102 	remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
2103 #endif /* CONFIG_PROC_FS */
2104 }
2105 
2106 static int __find_bmc_guid(struct device *dev, void *data)
2107 {
2108 	unsigned char *id = data;
2109 	struct bmc_device *bmc = dev_get_drvdata(dev);
2110 	return memcmp(bmc->guid, id, 16) == 0;
2111 }
2112 
2113 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2114 					     unsigned char *guid)
2115 {
2116 	struct device *dev;
2117 
2118 	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2119 	if (dev)
2120 		return dev_get_drvdata(dev);
2121 	else
2122 		return NULL;
2123 }
2124 
2125 struct prod_dev_id {
2126 	unsigned int  product_id;
2127 	unsigned char device_id;
2128 };
2129 
2130 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2131 {
2132 	struct prod_dev_id *id = data;
2133 	struct bmc_device *bmc = dev_get_drvdata(dev);
2134 
2135 	return (bmc->id.product_id == id->product_id
2136 		&& bmc->id.device_id == id->device_id);
2137 }
2138 
2139 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2140 	struct device_driver *drv,
2141 	unsigned int product_id, unsigned char device_id)
2142 {
2143 	struct prod_dev_id id = {
2144 		.product_id = product_id,
2145 		.device_id = device_id,
2146 	};
2147 	struct device *dev;
2148 
2149 	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2150 	if (dev)
2151 		return dev_get_drvdata(dev);
2152 	else
2153 		return NULL;
2154 }
2155 
2156 static ssize_t device_id_show(struct device *dev,
2157 			      struct device_attribute *attr,
2158 			      char *buf)
2159 {
2160 	struct bmc_device *bmc = dev_get_drvdata(dev);
2161 
2162 	return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2163 }
2164 
2165 static ssize_t provides_dev_sdrs_show(struct device *dev,
2166 				      struct device_attribute *attr,
2167 				      char *buf)
2168 {
2169 	struct bmc_device *bmc = dev_get_drvdata(dev);
2170 
2171 	return snprintf(buf, 10, "%u\n",
2172 			(bmc->id.device_revision & 0x80) >> 7);
2173 }
2174 
2175 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2176 			     char *buf)
2177 {
2178 	struct bmc_device *bmc = dev_get_drvdata(dev);
2179 
2180 	return snprintf(buf, 20, "%u\n",
2181 			bmc->id.device_revision & 0x0F);
2182 }
2183 
2184 static ssize_t firmware_rev_show(struct device *dev,
2185 				 struct device_attribute *attr,
2186 				 char *buf)
2187 {
2188 	struct bmc_device *bmc = dev_get_drvdata(dev);
2189 
2190 	return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2191 			bmc->id.firmware_revision_2);
2192 }
2193 
2194 static ssize_t ipmi_version_show(struct device *dev,
2195 				 struct device_attribute *attr,
2196 				 char *buf)
2197 {
2198 	struct bmc_device *bmc = dev_get_drvdata(dev);
2199 
2200 	return snprintf(buf, 20, "%u.%u\n",
2201 			ipmi_version_major(&bmc->id),
2202 			ipmi_version_minor(&bmc->id));
2203 }
2204 
2205 static ssize_t add_dev_support_show(struct device *dev,
2206 				    struct device_attribute *attr,
2207 				    char *buf)
2208 {
2209 	struct bmc_device *bmc = dev_get_drvdata(dev);
2210 
2211 	return snprintf(buf, 10, "0x%02x\n",
2212 			bmc->id.additional_device_support);
2213 }
2214 
2215 static ssize_t manufacturer_id_show(struct device *dev,
2216 				    struct device_attribute *attr,
2217 				    char *buf)
2218 {
2219 	struct bmc_device *bmc = dev_get_drvdata(dev);
2220 
2221 	return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2222 }
2223 
2224 static ssize_t product_id_show(struct device *dev,
2225 			       struct device_attribute *attr,
2226 			       char *buf)
2227 {
2228 	struct bmc_device *bmc = dev_get_drvdata(dev);
2229 
2230 	return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2231 }
2232 
2233 static ssize_t aux_firmware_rev_show(struct device *dev,
2234 				     struct device_attribute *attr,
2235 				     char *buf)
2236 {
2237 	struct bmc_device *bmc = dev_get_drvdata(dev);
2238 
2239 	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2240 			bmc->id.aux_firmware_revision[3],
2241 			bmc->id.aux_firmware_revision[2],
2242 			bmc->id.aux_firmware_revision[1],
2243 			bmc->id.aux_firmware_revision[0]);
2244 }
2245 
2246 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2247 			 char *buf)
2248 {
2249 	struct bmc_device *bmc = dev_get_drvdata(dev);
2250 
2251 	return snprintf(buf, 100, "%Lx%Lx\n",
2252 			(long long) bmc->guid[0],
2253 			(long long) bmc->guid[8]);
2254 }
2255 
2256 static void remove_files(struct bmc_device *bmc)
2257 {
2258 	if (!bmc->dev)
2259 		return;
2260 
2261 	device_remove_file(&bmc->dev->dev,
2262 			   &bmc->device_id_attr);
2263 	device_remove_file(&bmc->dev->dev,
2264 			   &bmc->provides_dev_sdrs_attr);
2265 	device_remove_file(&bmc->dev->dev,
2266 			   &bmc->revision_attr);
2267 	device_remove_file(&bmc->dev->dev,
2268 			   &bmc->firmware_rev_attr);
2269 	device_remove_file(&bmc->dev->dev,
2270 			   &bmc->version_attr);
2271 	device_remove_file(&bmc->dev->dev,
2272 			   &bmc->add_dev_support_attr);
2273 	device_remove_file(&bmc->dev->dev,
2274 			   &bmc->manufacturer_id_attr);
2275 	device_remove_file(&bmc->dev->dev,
2276 			   &bmc->product_id_attr);
2277 
2278 	if (bmc->id.aux_firmware_revision_set)
2279 		device_remove_file(&bmc->dev->dev,
2280 				   &bmc->aux_firmware_rev_attr);
2281 	if (bmc->guid_set)
2282 		device_remove_file(&bmc->dev->dev,
2283 				   &bmc->guid_attr);
2284 }
2285 
2286 static void
2287 cleanup_bmc_device(struct kref *ref)
2288 {
2289 	struct bmc_device *bmc;
2290 
2291 	bmc = container_of(ref, struct bmc_device, refcount);
2292 
2293 	remove_files(bmc);
2294 	platform_device_unregister(bmc->dev);
2295 	kfree(bmc);
2296 }
2297 
2298 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2299 {
2300 	struct bmc_device *bmc = intf->bmc;
2301 
2302 	if (intf->sysfs_name) {
2303 		sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2304 		kfree(intf->sysfs_name);
2305 		intf->sysfs_name = NULL;
2306 	}
2307 	if (intf->my_dev_name) {
2308 		sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2309 		kfree(intf->my_dev_name);
2310 		intf->my_dev_name = NULL;
2311 	}
2312 
2313 	mutex_lock(&ipmidriver_mutex);
2314 	kref_put(&bmc->refcount, cleanup_bmc_device);
2315 	intf->bmc = NULL;
2316 	mutex_unlock(&ipmidriver_mutex);
2317 }
2318 
2319 static int create_files(struct bmc_device *bmc)
2320 {
2321 	int err;
2322 
2323 	bmc->device_id_attr.attr.name = "device_id";
2324 	bmc->device_id_attr.attr.mode = S_IRUGO;
2325 	bmc->device_id_attr.show = device_id_show;
2326 	sysfs_attr_init(&bmc->device_id_attr.attr);
2327 
2328 	bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2329 	bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2330 	bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2331 	sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
2332 
2333 	bmc->revision_attr.attr.name = "revision";
2334 	bmc->revision_attr.attr.mode = S_IRUGO;
2335 	bmc->revision_attr.show = revision_show;
2336 	sysfs_attr_init(&bmc->revision_attr.attr);
2337 
2338 	bmc->firmware_rev_attr.attr.name = "firmware_revision";
2339 	bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2340 	bmc->firmware_rev_attr.show = firmware_rev_show;
2341 	sysfs_attr_init(&bmc->firmware_rev_attr.attr);
2342 
2343 	bmc->version_attr.attr.name = "ipmi_version";
2344 	bmc->version_attr.attr.mode = S_IRUGO;
2345 	bmc->version_attr.show = ipmi_version_show;
2346 	sysfs_attr_init(&bmc->version_attr.attr);
2347 
2348 	bmc->add_dev_support_attr.attr.name = "additional_device_support";
2349 	bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2350 	bmc->add_dev_support_attr.show = add_dev_support_show;
2351 	sysfs_attr_init(&bmc->add_dev_support_attr.attr);
2352 
2353 	bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2354 	bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2355 	bmc->manufacturer_id_attr.show = manufacturer_id_show;
2356 	sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
2357 
2358 	bmc->product_id_attr.attr.name = "product_id";
2359 	bmc->product_id_attr.attr.mode = S_IRUGO;
2360 	bmc->product_id_attr.show = product_id_show;
2361 	sysfs_attr_init(&bmc->product_id_attr.attr);
2362 
2363 	bmc->guid_attr.attr.name = "guid";
2364 	bmc->guid_attr.attr.mode = S_IRUGO;
2365 	bmc->guid_attr.show = guid_show;
2366 	sysfs_attr_init(&bmc->guid_attr.attr);
2367 
2368 	bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2369 	bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2370 	bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2371 	sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
2372 
2373 	err = device_create_file(&bmc->dev->dev,
2374 			   &bmc->device_id_attr);
2375 	if (err)
2376 		goto out;
2377 	err = device_create_file(&bmc->dev->dev,
2378 			   &bmc->provides_dev_sdrs_attr);
2379 	if (err)
2380 		goto out_devid;
2381 	err = device_create_file(&bmc->dev->dev,
2382 			   &bmc->revision_attr);
2383 	if (err)
2384 		goto out_sdrs;
2385 	err = device_create_file(&bmc->dev->dev,
2386 			   &bmc->firmware_rev_attr);
2387 	if (err)
2388 		goto out_rev;
2389 	err = device_create_file(&bmc->dev->dev,
2390 			   &bmc->version_attr);
2391 	if (err)
2392 		goto out_firm;
2393 	err = device_create_file(&bmc->dev->dev,
2394 			   &bmc->add_dev_support_attr);
2395 	if (err)
2396 		goto out_version;
2397 	err = device_create_file(&bmc->dev->dev,
2398 			   &bmc->manufacturer_id_attr);
2399 	if (err)
2400 		goto out_add_dev;
2401 	err = device_create_file(&bmc->dev->dev,
2402 			   &bmc->product_id_attr);
2403 	if (err)
2404 		goto out_manu;
2405 	if (bmc->id.aux_firmware_revision_set) {
2406 		err = device_create_file(&bmc->dev->dev,
2407 				   &bmc->aux_firmware_rev_attr);
2408 		if (err)
2409 			goto out_prod_id;
2410 	}
2411 	if (bmc->guid_set) {
2412 		err = device_create_file(&bmc->dev->dev,
2413 				   &bmc->guid_attr);
2414 		if (err)
2415 			goto out_aux_firm;
2416 	}
2417 
2418 	return 0;
2419 
2420 out_aux_firm:
2421 	if (bmc->id.aux_firmware_revision_set)
2422 		device_remove_file(&bmc->dev->dev,
2423 				   &bmc->aux_firmware_rev_attr);
2424 out_prod_id:
2425 	device_remove_file(&bmc->dev->dev,
2426 			   &bmc->product_id_attr);
2427 out_manu:
2428 	device_remove_file(&bmc->dev->dev,
2429 			   &bmc->manufacturer_id_attr);
2430 out_add_dev:
2431 	device_remove_file(&bmc->dev->dev,
2432 			   &bmc->add_dev_support_attr);
2433 out_version:
2434 	device_remove_file(&bmc->dev->dev,
2435 			   &bmc->version_attr);
2436 out_firm:
2437 	device_remove_file(&bmc->dev->dev,
2438 			   &bmc->firmware_rev_attr);
2439 out_rev:
2440 	device_remove_file(&bmc->dev->dev,
2441 			   &bmc->revision_attr);
2442 out_sdrs:
2443 	device_remove_file(&bmc->dev->dev,
2444 			   &bmc->provides_dev_sdrs_attr);
2445 out_devid:
2446 	device_remove_file(&bmc->dev->dev,
2447 			   &bmc->device_id_attr);
2448 out:
2449 	return err;
2450 }
2451 
2452 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2453 			     const char *sysfs_name)
2454 {
2455 	int               rv;
2456 	struct bmc_device *bmc = intf->bmc;
2457 	struct bmc_device *old_bmc;
2458 	int               size;
2459 	char              dummy[1];
2460 
2461 	mutex_lock(&ipmidriver_mutex);
2462 
2463 	/*
2464 	 * Try to find if there is an bmc_device struct
2465 	 * representing the interfaced BMC already
2466 	 */
2467 	if (bmc->guid_set)
2468 		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
2469 	else
2470 		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2471 						    bmc->id.product_id,
2472 						    bmc->id.device_id);
2473 
2474 	/*
2475 	 * If there is already an bmc_device, free the new one,
2476 	 * otherwise register the new BMC device
2477 	 */
2478 	if (old_bmc) {
2479 		kfree(bmc);
2480 		intf->bmc = old_bmc;
2481 		bmc = old_bmc;
2482 
2483 		kref_get(&bmc->refcount);
2484 		mutex_unlock(&ipmidriver_mutex);
2485 
2486 		printk(KERN_INFO
2487 		       "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2488 		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2489 		       bmc->id.manufacturer_id,
2490 		       bmc->id.product_id,
2491 		       bmc->id.device_id);
2492 	} else {
2493 		char name[14];
2494 		unsigned char orig_dev_id = bmc->id.device_id;
2495 		int warn_printed = 0;
2496 
2497 		snprintf(name, sizeof(name),
2498 			 "ipmi_bmc.%4.4x", bmc->id.product_id);
2499 
2500 		while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2501 						 bmc->id.product_id,
2502 						 bmc->id.device_id)) {
2503 			if (!warn_printed) {
2504 				printk(KERN_WARNING PFX
2505 				       "This machine has two different BMCs"
2506 				       " with the same product id and device"
2507 				       " id.  This is an error in the"
2508 				       " firmware, but incrementing the"
2509 				       " device id to work around the problem."
2510 				       " Prod ID = 0x%x, Dev ID = 0x%x\n",
2511 				       bmc->id.product_id, bmc->id.device_id);
2512 				warn_printed = 1;
2513 			}
2514 			bmc->id.device_id++; /* Wraps at 255 */
2515 			if (bmc->id.device_id == orig_dev_id) {
2516 				printk(KERN_ERR PFX
2517 				       "Out of device ids!\n");
2518 				break;
2519 			}
2520 		}
2521 
2522 		bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2523 		if (!bmc->dev) {
2524 			mutex_unlock(&ipmidriver_mutex);
2525 			printk(KERN_ERR
2526 			       "ipmi_msghandler:"
2527 			       " Unable to allocate platform device\n");
2528 			return -ENOMEM;
2529 		}
2530 		bmc->dev->dev.driver = &ipmidriver.driver;
2531 		dev_set_drvdata(&bmc->dev->dev, bmc);
2532 		kref_init(&bmc->refcount);
2533 
2534 		rv = platform_device_add(bmc->dev);
2535 		mutex_unlock(&ipmidriver_mutex);
2536 		if (rv) {
2537 			platform_device_put(bmc->dev);
2538 			bmc->dev = NULL;
2539 			printk(KERN_ERR
2540 			       "ipmi_msghandler:"
2541 			       " Unable to register bmc device: %d\n",
2542 			       rv);
2543 			/*
2544 			 * Don't go to out_err, you can only do that if
2545 			 * the device is registered already.
2546 			 */
2547 			return rv;
2548 		}
2549 
2550 		rv = create_files(bmc);
2551 		if (rv) {
2552 			mutex_lock(&ipmidriver_mutex);
2553 			platform_device_unregister(bmc->dev);
2554 			mutex_unlock(&ipmidriver_mutex);
2555 
2556 			return rv;
2557 		}
2558 
2559 		dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2560 			 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2561 			 bmc->id.manufacturer_id,
2562 			 bmc->id.product_id,
2563 			 bmc->id.device_id);
2564 	}
2565 
2566 	/*
2567 	 * create symlink from system interface device to bmc device
2568 	 * and back.
2569 	 */
2570 	intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2571 	if (!intf->sysfs_name) {
2572 		rv = -ENOMEM;
2573 		printk(KERN_ERR
2574 		       "ipmi_msghandler: allocate link to BMC: %d\n",
2575 		       rv);
2576 		goto out_err;
2577 	}
2578 
2579 	rv = sysfs_create_link(&intf->si_dev->kobj,
2580 			       &bmc->dev->dev.kobj, intf->sysfs_name);
2581 	if (rv) {
2582 		kfree(intf->sysfs_name);
2583 		intf->sysfs_name = NULL;
2584 		printk(KERN_ERR
2585 		       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2586 		       rv);
2587 		goto out_err;
2588 	}
2589 
2590 	size = snprintf(dummy, 0, "ipmi%d", ifnum);
2591 	intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2592 	if (!intf->my_dev_name) {
2593 		kfree(intf->sysfs_name);
2594 		intf->sysfs_name = NULL;
2595 		rv = -ENOMEM;
2596 		printk(KERN_ERR
2597 		       "ipmi_msghandler: allocate link from BMC: %d\n",
2598 		       rv);
2599 		goto out_err;
2600 	}
2601 	snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2602 
2603 	rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2604 			       intf->my_dev_name);
2605 	if (rv) {
2606 		kfree(intf->sysfs_name);
2607 		intf->sysfs_name = NULL;
2608 		kfree(intf->my_dev_name);
2609 		intf->my_dev_name = NULL;
2610 		printk(KERN_ERR
2611 		       "ipmi_msghandler:"
2612 		       " Unable to create symlink to bmc: %d\n",
2613 		       rv);
2614 		goto out_err;
2615 	}
2616 
2617 	return 0;
2618 
2619 out_err:
2620 	ipmi_bmc_unregister(intf);
2621 	return rv;
2622 }
2623 
2624 static int
2625 send_guid_cmd(ipmi_smi_t intf, int chan)
2626 {
2627 	struct kernel_ipmi_msg            msg;
2628 	struct ipmi_system_interface_addr si;
2629 
2630 	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2631 	si.channel = IPMI_BMC_CHANNEL;
2632 	si.lun = 0;
2633 
2634 	msg.netfn = IPMI_NETFN_APP_REQUEST;
2635 	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2636 	msg.data = NULL;
2637 	msg.data_len = 0;
2638 	return i_ipmi_request(NULL,
2639 			      intf,
2640 			      (struct ipmi_addr *) &si,
2641 			      0,
2642 			      &msg,
2643 			      intf,
2644 			      NULL,
2645 			      NULL,
2646 			      0,
2647 			      intf->channels[0].address,
2648 			      intf->channels[0].lun,
2649 			      -1, 0);
2650 }
2651 
2652 static void
2653 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2654 {
2655 	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2656 	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2657 	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2658 		/* Not for me */
2659 		return;
2660 
2661 	if (msg->msg.data[0] != 0) {
2662 		/* Error from getting the GUID, the BMC doesn't have one. */
2663 		intf->bmc->guid_set = 0;
2664 		goto out;
2665 	}
2666 
2667 	if (msg->msg.data_len < 17) {
2668 		intf->bmc->guid_set = 0;
2669 		printk(KERN_WARNING PFX
2670 		       "guid_handler: The GUID response from the BMC was too"
2671 		       " short, it was %d but should have been 17.  Assuming"
2672 		       " GUID is not available.\n",
2673 		       msg->msg.data_len);
2674 		goto out;
2675 	}
2676 
2677 	memcpy(intf->bmc->guid, msg->msg.data, 16);
2678 	intf->bmc->guid_set = 1;
2679  out:
2680 	wake_up(&intf->waitq);
2681 }
2682 
2683 static void
2684 get_guid(ipmi_smi_t intf)
2685 {
2686 	int rv;
2687 
2688 	intf->bmc->guid_set = 0x2;
2689 	intf->null_user_handler = guid_handler;
2690 	rv = send_guid_cmd(intf, 0);
2691 	if (rv)
2692 		/* Send failed, no GUID available. */
2693 		intf->bmc->guid_set = 0;
2694 	wait_event(intf->waitq, intf->bmc->guid_set != 2);
2695 	intf->null_user_handler = NULL;
2696 }
2697 
2698 static int
2699 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2700 {
2701 	struct kernel_ipmi_msg            msg;
2702 	unsigned char                     data[1];
2703 	struct ipmi_system_interface_addr si;
2704 
2705 	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2706 	si.channel = IPMI_BMC_CHANNEL;
2707 	si.lun = 0;
2708 
2709 	msg.netfn = IPMI_NETFN_APP_REQUEST;
2710 	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2711 	msg.data = data;
2712 	msg.data_len = 1;
2713 	data[0] = chan;
2714 	return i_ipmi_request(NULL,
2715 			      intf,
2716 			      (struct ipmi_addr *) &si,
2717 			      0,
2718 			      &msg,
2719 			      intf,
2720 			      NULL,
2721 			      NULL,
2722 			      0,
2723 			      intf->channels[0].address,
2724 			      intf->channels[0].lun,
2725 			      -1, 0);
2726 }
2727 
2728 static void
2729 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2730 {
2731 	int rv = 0;
2732 	int chan;
2733 
2734 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2735 	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2736 	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
2737 		/* It's the one we want */
2738 		if (msg->msg.data[0] != 0) {
2739 			/* Got an error from the channel, just go on. */
2740 
2741 			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2742 				/*
2743 				 * If the MC does not support this
2744 				 * command, that is legal.  We just
2745 				 * assume it has one IPMB at channel
2746 				 * zero.
2747 				 */
2748 				intf->channels[0].medium
2749 					= IPMI_CHANNEL_MEDIUM_IPMB;
2750 				intf->channels[0].protocol
2751 					= IPMI_CHANNEL_PROTOCOL_IPMB;
2752 				rv = -ENOSYS;
2753 
2754 				intf->curr_channel = IPMI_MAX_CHANNELS;
2755 				wake_up(&intf->waitq);
2756 				goto out;
2757 			}
2758 			goto next_channel;
2759 		}
2760 		if (msg->msg.data_len < 4) {
2761 			/* Message not big enough, just go on. */
2762 			goto next_channel;
2763 		}
2764 		chan = intf->curr_channel;
2765 		intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2766 		intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2767 
2768  next_channel:
2769 		intf->curr_channel++;
2770 		if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2771 			wake_up(&intf->waitq);
2772 		else
2773 			rv = send_channel_info_cmd(intf, intf->curr_channel);
2774 
2775 		if (rv) {
2776 			/* Got an error somehow, just give up. */
2777 			intf->curr_channel = IPMI_MAX_CHANNELS;
2778 			wake_up(&intf->waitq);
2779 
2780 			printk(KERN_WARNING PFX
2781 			       "Error sending channel information: %d\n",
2782 			       rv);
2783 		}
2784 	}
2785  out:
2786 	return;
2787 }
2788 
2789 void ipmi_poll_interface(ipmi_user_t user)
2790 {
2791 	ipmi_smi_t intf = user->intf;
2792 
2793 	if (intf->handlers->poll)
2794 		intf->handlers->poll(intf->send_info);
2795 }
2796 EXPORT_SYMBOL(ipmi_poll_interface);
2797 
2798 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2799 		      void		       *send_info,
2800 		      struct ipmi_device_id    *device_id,
2801 		      struct device            *si_dev,
2802 		      const char               *sysfs_name,
2803 		      unsigned char            slave_addr)
2804 {
2805 	int              i, j;
2806 	int              rv;
2807 	ipmi_smi_t       intf;
2808 	ipmi_smi_t       tintf;
2809 	struct list_head *link;
2810 
2811 	/*
2812 	 * Make sure the driver is actually initialized, this handles
2813 	 * problems with initialization order.
2814 	 */
2815 	if (!initialized) {
2816 		rv = ipmi_init_msghandler();
2817 		if (rv)
2818 			return rv;
2819 		/*
2820 		 * The init code doesn't return an error if it was turned
2821 		 * off, but it won't initialize.  Check that.
2822 		 */
2823 		if (!initialized)
2824 			return -ENODEV;
2825 	}
2826 
2827 	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2828 	if (!intf)
2829 		return -ENOMEM;
2830 
2831 	intf->ipmi_version_major = ipmi_version_major(device_id);
2832 	intf->ipmi_version_minor = ipmi_version_minor(device_id);
2833 
2834 	intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2835 	if (!intf->bmc) {
2836 		kfree(intf);
2837 		return -ENOMEM;
2838 	}
2839 	intf->intf_num = -1; /* Mark it invalid for now. */
2840 	kref_init(&intf->refcount);
2841 	intf->bmc->id = *device_id;
2842 	intf->si_dev = si_dev;
2843 	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2844 		intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2845 		intf->channels[j].lun = 2;
2846 	}
2847 	if (slave_addr != 0)
2848 		intf->channels[0].address = slave_addr;
2849 	INIT_LIST_HEAD(&intf->users);
2850 	intf->handlers = handlers;
2851 	intf->send_info = send_info;
2852 	spin_lock_init(&intf->seq_lock);
2853 	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2854 		intf->seq_table[j].inuse = 0;
2855 		intf->seq_table[j].seqid = 0;
2856 	}
2857 	intf->curr_seq = 0;
2858 #ifdef CONFIG_PROC_FS
2859 	mutex_init(&intf->proc_entry_lock);
2860 #endif
2861 	spin_lock_init(&intf->waiting_msgs_lock);
2862 	INIT_LIST_HEAD(&intf->waiting_msgs);
2863 	spin_lock_init(&intf->events_lock);
2864 	INIT_LIST_HEAD(&intf->waiting_events);
2865 	intf->waiting_events_count = 0;
2866 	mutex_init(&intf->cmd_rcvrs_mutex);
2867 	spin_lock_init(&intf->maintenance_mode_lock);
2868 	INIT_LIST_HEAD(&intf->cmd_rcvrs);
2869 	init_waitqueue_head(&intf->waitq);
2870 	for (i = 0; i < IPMI_NUM_STATS; i++)
2871 		atomic_set(&intf->stats[i], 0);
2872 
2873 	intf->proc_dir = NULL;
2874 
2875 	mutex_lock(&smi_watchers_mutex);
2876 	mutex_lock(&ipmi_interfaces_mutex);
2877 	/* Look for a hole in the numbers. */
2878 	i = 0;
2879 	link = &ipmi_interfaces;
2880 	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2881 		if (tintf->intf_num != i) {
2882 			link = &tintf->link;
2883 			break;
2884 		}
2885 		i++;
2886 	}
2887 	/* Add the new interface in numeric order. */
2888 	if (i == 0)
2889 		list_add_rcu(&intf->link, &ipmi_interfaces);
2890 	else
2891 		list_add_tail_rcu(&intf->link, link);
2892 
2893 	rv = handlers->start_processing(send_info, intf);
2894 	if (rv)
2895 		goto out;
2896 
2897 	get_guid(intf);
2898 
2899 	if ((intf->ipmi_version_major > 1)
2900 			|| ((intf->ipmi_version_major == 1)
2901 			    && (intf->ipmi_version_minor >= 5))) {
2902 		/*
2903 		 * Start scanning the channels to see what is
2904 		 * available.
2905 		 */
2906 		intf->null_user_handler = channel_handler;
2907 		intf->curr_channel = 0;
2908 		rv = send_channel_info_cmd(intf, 0);
2909 		if (rv)
2910 			goto out;
2911 
2912 		/* Wait for the channel info to be read. */
2913 		wait_event(intf->waitq,
2914 			   intf->curr_channel >= IPMI_MAX_CHANNELS);
2915 		intf->null_user_handler = NULL;
2916 	} else {
2917 		/* Assume a single IPMB channel at zero. */
2918 		intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2919 		intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2920 		intf->curr_channel = IPMI_MAX_CHANNELS;
2921 	}
2922 
2923 	if (rv == 0)
2924 		rv = add_proc_entries(intf, i);
2925 
2926 	rv = ipmi_bmc_register(intf, i, sysfs_name);
2927 
2928  out:
2929 	if (rv) {
2930 		if (intf->proc_dir)
2931 			remove_proc_entries(intf);
2932 		intf->handlers = NULL;
2933 		list_del_rcu(&intf->link);
2934 		mutex_unlock(&ipmi_interfaces_mutex);
2935 		mutex_unlock(&smi_watchers_mutex);
2936 		synchronize_rcu();
2937 		kref_put(&intf->refcount, intf_free);
2938 	} else {
2939 		/*
2940 		 * Keep memory order straight for RCU readers.  Make
2941 		 * sure everything else is committed to memory before
2942 		 * setting intf_num to mark the interface valid.
2943 		 */
2944 		smp_wmb();
2945 		intf->intf_num = i;
2946 		mutex_unlock(&ipmi_interfaces_mutex);
2947 		/* After this point the interface is legal to use. */
2948 		call_smi_watchers(i, intf->si_dev);
2949 		mutex_unlock(&smi_watchers_mutex);
2950 	}
2951 
2952 	return rv;
2953 }
2954 EXPORT_SYMBOL(ipmi_register_smi);
2955 
2956 static void cleanup_smi_msgs(ipmi_smi_t intf)
2957 {
2958 	int              i;
2959 	struct seq_table *ent;
2960 
2961 	/* No need for locks, the interface is down. */
2962 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2963 		ent = &(intf->seq_table[i]);
2964 		if (!ent->inuse)
2965 			continue;
2966 		deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2967 	}
2968 }
2969 
2970 int ipmi_unregister_smi(ipmi_smi_t intf)
2971 {
2972 	struct ipmi_smi_watcher *w;
2973 	int    intf_num = intf->intf_num;
2974 
2975 	ipmi_bmc_unregister(intf);
2976 
2977 	mutex_lock(&smi_watchers_mutex);
2978 	mutex_lock(&ipmi_interfaces_mutex);
2979 	intf->intf_num = -1;
2980 	intf->handlers = NULL;
2981 	list_del_rcu(&intf->link);
2982 	mutex_unlock(&ipmi_interfaces_mutex);
2983 	synchronize_rcu();
2984 
2985 	cleanup_smi_msgs(intf);
2986 
2987 	remove_proc_entries(intf);
2988 
2989 	/*
2990 	 * Call all the watcher interfaces to tell them that
2991 	 * an interface is gone.
2992 	 */
2993 	list_for_each_entry(w, &smi_watchers, link)
2994 		w->smi_gone(intf_num);
2995 	mutex_unlock(&smi_watchers_mutex);
2996 
2997 	kref_put(&intf->refcount, intf_free);
2998 	return 0;
2999 }
3000 EXPORT_SYMBOL(ipmi_unregister_smi);
3001 
3002 static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
3003 				   struct ipmi_smi_msg *msg)
3004 {
3005 	struct ipmi_ipmb_addr ipmb_addr;
3006 	struct ipmi_recv_msg  *recv_msg;
3007 
3008 	/*
3009 	 * This is 11, not 10, because the response must contain a
3010 	 * completion code.
3011 	 */
3012 	if (msg->rsp_size < 11) {
3013 		/* Message not big enough, just ignore it. */
3014 		ipmi_inc_stat(intf, invalid_ipmb_responses);
3015 		return 0;
3016 	}
3017 
3018 	if (msg->rsp[2] != 0) {
3019 		/* An error getting the response, just ignore it. */
3020 		return 0;
3021 	}
3022 
3023 	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3024 	ipmb_addr.slave_addr = msg->rsp[6];
3025 	ipmb_addr.channel = msg->rsp[3] & 0x0f;
3026 	ipmb_addr.lun = msg->rsp[7] & 3;
3027 
3028 	/*
3029 	 * It's a response from a remote entity.  Look up the sequence
3030 	 * number and handle the response.
3031 	 */
3032 	if (intf_find_seq(intf,
3033 			  msg->rsp[7] >> 2,
3034 			  msg->rsp[3] & 0x0f,
3035 			  msg->rsp[8],
3036 			  (msg->rsp[4] >> 2) & (~1),
3037 			  (struct ipmi_addr *) &(ipmb_addr),
3038 			  &recv_msg)) {
3039 		/*
3040 		 * We were unable to find the sequence number,
3041 		 * so just nuke the message.
3042 		 */
3043 		ipmi_inc_stat(intf, unhandled_ipmb_responses);
3044 		return 0;
3045 	}
3046 
3047 	memcpy(recv_msg->msg_data,
3048 	       &(msg->rsp[9]),
3049 	       msg->rsp_size - 9);
3050 	/*
3051 	 * The other fields matched, so no need to set them, except
3052 	 * for netfn, which needs to be the response that was
3053 	 * returned, not the request value.
3054 	 */
3055 	recv_msg->msg.netfn = msg->rsp[4] >> 2;
3056 	recv_msg->msg.data = recv_msg->msg_data;
3057 	recv_msg->msg.data_len = msg->rsp_size - 10;
3058 	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3059 	ipmi_inc_stat(intf, handled_ipmb_responses);
3060 	deliver_response(recv_msg);
3061 
3062 	return 0;
3063 }
3064 
3065 static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
3066 				   struct ipmi_smi_msg *msg)
3067 {
3068 	struct cmd_rcvr          *rcvr;
3069 	int                      rv = 0;
3070 	unsigned char            netfn;
3071 	unsigned char            cmd;
3072 	unsigned char            chan;
3073 	ipmi_user_t              user = NULL;
3074 	struct ipmi_ipmb_addr    *ipmb_addr;
3075 	struct ipmi_recv_msg     *recv_msg;
3076 	struct ipmi_smi_handlers *handlers;
3077 
3078 	if (msg->rsp_size < 10) {
3079 		/* Message not big enough, just ignore it. */
3080 		ipmi_inc_stat(intf, invalid_commands);
3081 		return 0;
3082 	}
3083 
3084 	if (msg->rsp[2] != 0) {
3085 		/* An error getting the response, just ignore it. */
3086 		return 0;
3087 	}
3088 
3089 	netfn = msg->rsp[4] >> 2;
3090 	cmd = msg->rsp[8];
3091 	chan = msg->rsp[3] & 0xf;
3092 
3093 	rcu_read_lock();
3094 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3095 	if (rcvr) {
3096 		user = rcvr->user;
3097 		kref_get(&user->refcount);
3098 	} else
3099 		user = NULL;
3100 	rcu_read_unlock();
3101 
3102 	if (user == NULL) {
3103 		/* We didn't find a user, deliver an error response. */
3104 		ipmi_inc_stat(intf, unhandled_commands);
3105 
3106 		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3107 		msg->data[1] = IPMI_SEND_MSG_CMD;
3108 		msg->data[2] = msg->rsp[3];
3109 		msg->data[3] = msg->rsp[6];
3110 		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3111 		msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
3112 		msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
3113 		/* rqseq/lun */
3114 		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3115 		msg->data[8] = msg->rsp[8]; /* cmd */
3116 		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3117 		msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
3118 		msg->data_size = 11;
3119 
3120 #ifdef DEBUG_MSGING
3121 	{
3122 		int m;
3123 		printk("Invalid command:");
3124 		for (m = 0; m < msg->data_size; m++)
3125 			printk(" %2.2x", msg->data[m]);
3126 		printk("\n");
3127 	}
3128 #endif
3129 		rcu_read_lock();
3130 		handlers = intf->handlers;
3131 		if (handlers) {
3132 			handlers->sender(intf->send_info, msg, 0);
3133 			/*
3134 			 * We used the message, so return the value
3135 			 * that causes it to not be freed or
3136 			 * queued.
3137 			 */
3138 			rv = -1;
3139 		}
3140 		rcu_read_unlock();
3141 	} else {
3142 		/* Deliver the message to the user. */
3143 		ipmi_inc_stat(intf, handled_commands);
3144 
3145 		recv_msg = ipmi_alloc_recv_msg();
3146 		if (!recv_msg) {
3147 			/*
3148 			 * We couldn't allocate memory for the
3149 			 * message, so requeue it for handling
3150 			 * later.
3151 			 */
3152 			rv = 1;
3153 			kref_put(&user->refcount, free_user);
3154 		} else {
3155 			/* Extract the source address from the data. */
3156 			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3157 			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3158 			ipmb_addr->slave_addr = msg->rsp[6];
3159 			ipmb_addr->lun = msg->rsp[7] & 3;
3160 			ipmb_addr->channel = msg->rsp[3] & 0xf;
3161 
3162 			/*
3163 			 * Extract the rest of the message information
3164 			 * from the IPMB header.
3165 			 */
3166 			recv_msg->user = user;
3167 			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3168 			recv_msg->msgid = msg->rsp[7] >> 2;
3169 			recv_msg->msg.netfn = msg->rsp[4] >> 2;
3170 			recv_msg->msg.cmd = msg->rsp[8];
3171 			recv_msg->msg.data = recv_msg->msg_data;
3172 
3173 			/*
3174 			 * We chop off 10, not 9 bytes because the checksum
3175 			 * at the end also needs to be removed.
3176 			 */
3177 			recv_msg->msg.data_len = msg->rsp_size - 10;
3178 			memcpy(recv_msg->msg_data,
3179 			       &(msg->rsp[9]),
3180 			       msg->rsp_size - 10);
3181 			deliver_response(recv_msg);
3182 		}
3183 	}
3184 
3185 	return rv;
3186 }
3187 
3188 static int handle_lan_get_msg_rsp(ipmi_smi_t          intf,
3189 				  struct ipmi_smi_msg *msg)
3190 {
3191 	struct ipmi_lan_addr  lan_addr;
3192 	struct ipmi_recv_msg  *recv_msg;
3193 
3194 
3195 	/*
3196 	 * This is 13, not 12, because the response must contain a
3197 	 * completion code.
3198 	 */
3199 	if (msg->rsp_size < 13) {
3200 		/* Message not big enough, just ignore it. */
3201 		ipmi_inc_stat(intf, invalid_lan_responses);
3202 		return 0;
3203 	}
3204 
3205 	if (msg->rsp[2] != 0) {
3206 		/* An error getting the response, just ignore it. */
3207 		return 0;
3208 	}
3209 
3210 	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3211 	lan_addr.session_handle = msg->rsp[4];
3212 	lan_addr.remote_SWID = msg->rsp[8];
3213 	lan_addr.local_SWID = msg->rsp[5];
3214 	lan_addr.channel = msg->rsp[3] & 0x0f;
3215 	lan_addr.privilege = msg->rsp[3] >> 4;
3216 	lan_addr.lun = msg->rsp[9] & 3;
3217 
3218 	/*
3219 	 * It's a response from a remote entity.  Look up the sequence
3220 	 * number and handle the response.
3221 	 */
3222 	if (intf_find_seq(intf,
3223 			  msg->rsp[9] >> 2,
3224 			  msg->rsp[3] & 0x0f,
3225 			  msg->rsp[10],
3226 			  (msg->rsp[6] >> 2) & (~1),
3227 			  (struct ipmi_addr *) &(lan_addr),
3228 			  &recv_msg)) {
3229 		/*
3230 		 * We were unable to find the sequence number,
3231 		 * so just nuke the message.
3232 		 */
3233 		ipmi_inc_stat(intf, unhandled_lan_responses);
3234 		return 0;
3235 	}
3236 
3237 	memcpy(recv_msg->msg_data,
3238 	       &(msg->rsp[11]),
3239 	       msg->rsp_size - 11);
3240 	/*
3241 	 * The other fields matched, so no need to set them, except
3242 	 * for netfn, which needs to be the response that was
3243 	 * returned, not the request value.
3244 	 */
3245 	recv_msg->msg.netfn = msg->rsp[6] >> 2;
3246 	recv_msg->msg.data = recv_msg->msg_data;
3247 	recv_msg->msg.data_len = msg->rsp_size - 12;
3248 	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3249 	ipmi_inc_stat(intf, handled_lan_responses);
3250 	deliver_response(recv_msg);
3251 
3252 	return 0;
3253 }
3254 
3255 static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
3256 				  struct ipmi_smi_msg *msg)
3257 {
3258 	struct cmd_rcvr          *rcvr;
3259 	int                      rv = 0;
3260 	unsigned char            netfn;
3261 	unsigned char            cmd;
3262 	unsigned char            chan;
3263 	ipmi_user_t              user = NULL;
3264 	struct ipmi_lan_addr     *lan_addr;
3265 	struct ipmi_recv_msg     *recv_msg;
3266 
3267 	if (msg->rsp_size < 12) {
3268 		/* Message not big enough, just ignore it. */
3269 		ipmi_inc_stat(intf, invalid_commands);
3270 		return 0;
3271 	}
3272 
3273 	if (msg->rsp[2] != 0) {
3274 		/* An error getting the response, just ignore it. */
3275 		return 0;
3276 	}
3277 
3278 	netfn = msg->rsp[6] >> 2;
3279 	cmd = msg->rsp[10];
3280 	chan = msg->rsp[3] & 0xf;
3281 
3282 	rcu_read_lock();
3283 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3284 	if (rcvr) {
3285 		user = rcvr->user;
3286 		kref_get(&user->refcount);
3287 	} else
3288 		user = NULL;
3289 	rcu_read_unlock();
3290 
3291 	if (user == NULL) {
3292 		/* We didn't find a user, just give up. */
3293 		ipmi_inc_stat(intf, unhandled_commands);
3294 
3295 		/*
3296 		 * Don't do anything with these messages, just allow
3297 		 * them to be freed.
3298 		 */
3299 		rv = 0;
3300 	} else {
3301 		/* Deliver the message to the user. */
3302 		ipmi_inc_stat(intf, handled_commands);
3303 
3304 		recv_msg = ipmi_alloc_recv_msg();
3305 		if (!recv_msg) {
3306 			/*
3307 			 * We couldn't allocate memory for the
3308 			 * message, so requeue it for handling later.
3309 			 */
3310 			rv = 1;
3311 			kref_put(&user->refcount, free_user);
3312 		} else {
3313 			/* Extract the source address from the data. */
3314 			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3315 			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3316 			lan_addr->session_handle = msg->rsp[4];
3317 			lan_addr->remote_SWID = msg->rsp[8];
3318 			lan_addr->local_SWID = msg->rsp[5];
3319 			lan_addr->lun = msg->rsp[9] & 3;
3320 			lan_addr->channel = msg->rsp[3] & 0xf;
3321 			lan_addr->privilege = msg->rsp[3] >> 4;
3322 
3323 			/*
3324 			 * Extract the rest of the message information
3325 			 * from the IPMB header.
3326 			 */
3327 			recv_msg->user = user;
3328 			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3329 			recv_msg->msgid = msg->rsp[9] >> 2;
3330 			recv_msg->msg.netfn = msg->rsp[6] >> 2;
3331 			recv_msg->msg.cmd = msg->rsp[10];
3332 			recv_msg->msg.data = recv_msg->msg_data;
3333 
3334 			/*
3335 			 * We chop off 12, not 11 bytes because the checksum
3336 			 * at the end also needs to be removed.
3337 			 */
3338 			recv_msg->msg.data_len = msg->rsp_size - 12;
3339 			memcpy(recv_msg->msg_data,
3340 			       &(msg->rsp[11]),
3341 			       msg->rsp_size - 12);
3342 			deliver_response(recv_msg);
3343 		}
3344 	}
3345 
3346 	return rv;
3347 }
3348 
3349 /*
3350  * This routine will handle "Get Message" command responses with
3351  * channels that use an OEM Medium. The message format belongs to
3352  * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3353  * Chapter 22, sections 22.6 and 22.24 for more details.
3354  */
3355 static int handle_oem_get_msg_cmd(ipmi_smi_t          intf,
3356 				  struct ipmi_smi_msg *msg)
3357 {
3358 	struct cmd_rcvr       *rcvr;
3359 	int                   rv = 0;
3360 	unsigned char         netfn;
3361 	unsigned char         cmd;
3362 	unsigned char         chan;
3363 	ipmi_user_t           user = NULL;
3364 	struct ipmi_system_interface_addr *smi_addr;
3365 	struct ipmi_recv_msg  *recv_msg;
3366 
3367 	/*
3368 	 * We expect the OEM SW to perform error checking
3369 	 * so we just do some basic sanity checks
3370 	 */
3371 	if (msg->rsp_size < 4) {
3372 		/* Message not big enough, just ignore it. */
3373 		ipmi_inc_stat(intf, invalid_commands);
3374 		return 0;
3375 	}
3376 
3377 	if (msg->rsp[2] != 0) {
3378 		/* An error getting the response, just ignore it. */
3379 		return 0;
3380 	}
3381 
3382 	/*
3383 	 * This is an OEM Message so the OEM needs to know how
3384 	 * handle the message. We do no interpretation.
3385 	 */
3386 	netfn = msg->rsp[0] >> 2;
3387 	cmd = msg->rsp[1];
3388 	chan = msg->rsp[3] & 0xf;
3389 
3390 	rcu_read_lock();
3391 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3392 	if (rcvr) {
3393 		user = rcvr->user;
3394 		kref_get(&user->refcount);
3395 	} else
3396 		user = NULL;
3397 	rcu_read_unlock();
3398 
3399 	if (user == NULL) {
3400 		/* We didn't find a user, just give up. */
3401 		ipmi_inc_stat(intf, unhandled_commands);
3402 
3403 		/*
3404 		 * Don't do anything with these messages, just allow
3405 		 * them to be freed.
3406 		 */
3407 
3408 		rv = 0;
3409 	} else {
3410 		/* Deliver the message to the user. */
3411 		ipmi_inc_stat(intf, handled_commands);
3412 
3413 		recv_msg = ipmi_alloc_recv_msg();
3414 		if (!recv_msg) {
3415 			/*
3416 			 * We couldn't allocate memory for the
3417 			 * message, so requeue it for handling
3418 			 * later.
3419 			 */
3420 			rv = 1;
3421 			kref_put(&user->refcount, free_user);
3422 		} else {
3423 			/*
3424 			 * OEM Messages are expected to be delivered via
3425 			 * the system interface to SMS software.  We might
3426 			 * need to visit this again depending on OEM
3427 			 * requirements
3428 			 */
3429 			smi_addr = ((struct ipmi_system_interface_addr *)
3430 				    &(recv_msg->addr));
3431 			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3432 			smi_addr->channel = IPMI_BMC_CHANNEL;
3433 			smi_addr->lun = msg->rsp[0] & 3;
3434 
3435 			recv_msg->user = user;
3436 			recv_msg->user_msg_data = NULL;
3437 			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3438 			recv_msg->msg.netfn = msg->rsp[0] >> 2;
3439 			recv_msg->msg.cmd = msg->rsp[1];
3440 			recv_msg->msg.data = recv_msg->msg_data;
3441 
3442 			/*
3443 			 * The message starts at byte 4 which follows the
3444 			 * the Channel Byte in the "GET MESSAGE" command
3445 			 */
3446 			recv_msg->msg.data_len = msg->rsp_size - 4;
3447 			memcpy(recv_msg->msg_data,
3448 			       &(msg->rsp[4]),
3449 			       msg->rsp_size - 4);
3450 			deliver_response(recv_msg);
3451 		}
3452 	}
3453 
3454 	return rv;
3455 }
3456 
3457 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3458 				     struct ipmi_smi_msg  *msg)
3459 {
3460 	struct ipmi_system_interface_addr *smi_addr;
3461 
3462 	recv_msg->msgid = 0;
3463 	smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3464 	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3465 	smi_addr->channel = IPMI_BMC_CHANNEL;
3466 	smi_addr->lun = msg->rsp[0] & 3;
3467 	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3468 	recv_msg->msg.netfn = msg->rsp[0] >> 2;
3469 	recv_msg->msg.cmd = msg->rsp[1];
3470 	memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3471 	recv_msg->msg.data = recv_msg->msg_data;
3472 	recv_msg->msg.data_len = msg->rsp_size - 3;
3473 }
3474 
3475 static int handle_read_event_rsp(ipmi_smi_t          intf,
3476 				 struct ipmi_smi_msg *msg)
3477 {
3478 	struct ipmi_recv_msg *recv_msg, *recv_msg2;
3479 	struct list_head     msgs;
3480 	ipmi_user_t          user;
3481 	int                  rv = 0;
3482 	int                  deliver_count = 0;
3483 	unsigned long        flags;
3484 
3485 	if (msg->rsp_size < 19) {
3486 		/* Message is too small to be an IPMB event. */
3487 		ipmi_inc_stat(intf, invalid_events);
3488 		return 0;
3489 	}
3490 
3491 	if (msg->rsp[2] != 0) {
3492 		/* An error getting the event, just ignore it. */
3493 		return 0;
3494 	}
3495 
3496 	INIT_LIST_HEAD(&msgs);
3497 
3498 	spin_lock_irqsave(&intf->events_lock, flags);
3499 
3500 	ipmi_inc_stat(intf, events);
3501 
3502 	/*
3503 	 * Allocate and fill in one message for every user that is
3504 	 * getting events.
3505 	 */
3506 	rcu_read_lock();
3507 	list_for_each_entry_rcu(user, &intf->users, link) {
3508 		if (!user->gets_events)
3509 			continue;
3510 
3511 		recv_msg = ipmi_alloc_recv_msg();
3512 		if (!recv_msg) {
3513 			rcu_read_unlock();
3514 			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3515 						 link) {
3516 				list_del(&recv_msg->link);
3517 				ipmi_free_recv_msg(recv_msg);
3518 			}
3519 			/*
3520 			 * We couldn't allocate memory for the
3521 			 * message, so requeue it for handling
3522 			 * later.
3523 			 */
3524 			rv = 1;
3525 			goto out;
3526 		}
3527 
3528 		deliver_count++;
3529 
3530 		copy_event_into_recv_msg(recv_msg, msg);
3531 		recv_msg->user = user;
3532 		kref_get(&user->refcount);
3533 		list_add_tail(&(recv_msg->link), &msgs);
3534 	}
3535 	rcu_read_unlock();
3536 
3537 	if (deliver_count) {
3538 		/* Now deliver all the messages. */
3539 		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3540 			list_del(&recv_msg->link);
3541 			deliver_response(recv_msg);
3542 		}
3543 	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3544 		/*
3545 		 * No one to receive the message, put it in queue if there's
3546 		 * not already too many things in the queue.
3547 		 */
3548 		recv_msg = ipmi_alloc_recv_msg();
3549 		if (!recv_msg) {
3550 			/*
3551 			 * We couldn't allocate memory for the
3552 			 * message, so requeue it for handling
3553 			 * later.
3554 			 */
3555 			rv = 1;
3556 			goto out;
3557 		}
3558 
3559 		copy_event_into_recv_msg(recv_msg, msg);
3560 		list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3561 		intf->waiting_events_count++;
3562 	} else if (!intf->event_msg_printed) {
3563 		/*
3564 		 * There's too many things in the queue, discard this
3565 		 * message.
3566 		 */
3567 		printk(KERN_WARNING PFX "Event queue full, discarding"
3568 		       " incoming events\n");
3569 		intf->event_msg_printed = 1;
3570 	}
3571 
3572  out:
3573 	spin_unlock_irqrestore(&(intf->events_lock), flags);
3574 
3575 	return rv;
3576 }
3577 
3578 static int handle_bmc_rsp(ipmi_smi_t          intf,
3579 			  struct ipmi_smi_msg *msg)
3580 {
3581 	struct ipmi_recv_msg *recv_msg;
3582 	struct ipmi_user     *user;
3583 
3584 	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3585 	if (recv_msg == NULL) {
3586 		printk(KERN_WARNING
3587 		       "IPMI message received with no owner. This\n"
3588 		       "could be because of a malformed message, or\n"
3589 		       "because of a hardware error.  Contact your\n"
3590 		       "hardware vender for assistance\n");
3591 		return 0;
3592 	}
3593 
3594 	user = recv_msg->user;
3595 	/* Make sure the user still exists. */
3596 	if (user && !user->valid) {
3597 		/* The user for the message went away, so give up. */
3598 		ipmi_inc_stat(intf, unhandled_local_responses);
3599 		ipmi_free_recv_msg(recv_msg);
3600 	} else {
3601 		struct ipmi_system_interface_addr *smi_addr;
3602 
3603 		ipmi_inc_stat(intf, handled_local_responses);
3604 		recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3605 		recv_msg->msgid = msg->msgid;
3606 		smi_addr = ((struct ipmi_system_interface_addr *)
3607 			    &(recv_msg->addr));
3608 		smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3609 		smi_addr->channel = IPMI_BMC_CHANNEL;
3610 		smi_addr->lun = msg->rsp[0] & 3;
3611 		recv_msg->msg.netfn = msg->rsp[0] >> 2;
3612 		recv_msg->msg.cmd = msg->rsp[1];
3613 		memcpy(recv_msg->msg_data,
3614 		       &(msg->rsp[2]),
3615 		       msg->rsp_size - 2);
3616 		recv_msg->msg.data = recv_msg->msg_data;
3617 		recv_msg->msg.data_len = msg->rsp_size - 2;
3618 		deliver_response(recv_msg);
3619 	}
3620 
3621 	return 0;
3622 }
3623 
3624 /*
3625  * Handle a new message.  Return 1 if the message should be requeued,
3626  * 0 if the message should be freed, or -1 if the message should not
3627  * be freed or requeued.
3628  */
3629 static int handle_new_recv_msg(ipmi_smi_t          intf,
3630 			       struct ipmi_smi_msg *msg)
3631 {
3632 	int requeue;
3633 	int chan;
3634 
3635 #ifdef DEBUG_MSGING
3636 	int m;
3637 	printk("Recv:");
3638 	for (m = 0; m < msg->rsp_size; m++)
3639 		printk(" %2.2x", msg->rsp[m]);
3640 	printk("\n");
3641 #endif
3642 	if (msg->rsp_size < 2) {
3643 		/* Message is too small to be correct. */
3644 		printk(KERN_WARNING PFX "BMC returned to small a message"
3645 		       " for netfn %x cmd %x, got %d bytes\n",
3646 		       (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3647 
3648 		/* Generate an error response for the message. */
3649 		msg->rsp[0] = msg->data[0] | (1 << 2);
3650 		msg->rsp[1] = msg->data[1];
3651 		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3652 		msg->rsp_size = 3;
3653 	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
3654 		   || (msg->rsp[1] != msg->data[1])) {
3655 		/*
3656 		 * The NetFN and Command in the response is not even
3657 		 * marginally correct.
3658 		 */
3659 		printk(KERN_WARNING PFX "BMC returned incorrect response,"
3660 		       " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3661 		       (msg->data[0] >> 2) | 1, msg->data[1],
3662 		       msg->rsp[0] >> 2, msg->rsp[1]);
3663 
3664 		/* Generate an error response for the message. */
3665 		msg->rsp[0] = msg->data[0] | (1 << 2);
3666 		msg->rsp[1] = msg->data[1];
3667 		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3668 		msg->rsp_size = 3;
3669 	}
3670 
3671 	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3672 	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3673 	    && (msg->user_data != NULL)) {
3674 		/*
3675 		 * It's a response to a response we sent.  For this we
3676 		 * deliver a send message response to the user.
3677 		 */
3678 		struct ipmi_recv_msg     *recv_msg = msg->user_data;
3679 
3680 		requeue = 0;
3681 		if (msg->rsp_size < 2)
3682 			/* Message is too small to be correct. */
3683 			goto out;
3684 
3685 		chan = msg->data[2] & 0x0f;
3686 		if (chan >= IPMI_MAX_CHANNELS)
3687 			/* Invalid channel number */
3688 			goto out;
3689 
3690 		if (!recv_msg)
3691 			goto out;
3692 
3693 		/* Make sure the user still exists. */
3694 		if (!recv_msg->user || !recv_msg->user->valid)
3695 			goto out;
3696 
3697 		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3698 		recv_msg->msg.data = recv_msg->msg_data;
3699 		recv_msg->msg.data_len = 1;
3700 		recv_msg->msg_data[0] = msg->rsp[2];
3701 		deliver_response(recv_msg);
3702 	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3703 		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
3704 		/* It's from the receive queue. */
3705 		chan = msg->rsp[3] & 0xf;
3706 		if (chan >= IPMI_MAX_CHANNELS) {
3707 			/* Invalid channel number */
3708 			requeue = 0;
3709 			goto out;
3710 		}
3711 
3712 		/*
3713 		 * We need to make sure the channels have been initialized.
3714 		 * The channel_handler routine will set the "curr_channel"
3715 		 * equal to or greater than IPMI_MAX_CHANNELS when all the
3716 		 * channels for this interface have been initialized.
3717 		 */
3718 		if (intf->curr_channel < IPMI_MAX_CHANNELS) {
3719 			requeue = 0; /* Throw the message away */
3720 			goto out;
3721 		}
3722 
3723 		switch (intf->channels[chan].medium) {
3724 		case IPMI_CHANNEL_MEDIUM_IPMB:
3725 			if (msg->rsp[4] & 0x04) {
3726 				/*
3727 				 * It's a response, so find the
3728 				 * requesting message and send it up.
3729 				 */
3730 				requeue = handle_ipmb_get_msg_rsp(intf, msg);
3731 			} else {
3732 				/*
3733 				 * It's a command to the SMS from some other
3734 				 * entity.  Handle that.
3735 				 */
3736 				requeue = handle_ipmb_get_msg_cmd(intf, msg);
3737 			}
3738 			break;
3739 
3740 		case IPMI_CHANNEL_MEDIUM_8023LAN:
3741 		case IPMI_CHANNEL_MEDIUM_ASYNC:
3742 			if (msg->rsp[6] & 0x04) {
3743 				/*
3744 				 * It's a response, so find the
3745 				 * requesting message and send it up.
3746 				 */
3747 				requeue = handle_lan_get_msg_rsp(intf, msg);
3748 			} else {
3749 				/*
3750 				 * It's a command to the SMS from some other
3751 				 * entity.  Handle that.
3752 				 */
3753 				requeue = handle_lan_get_msg_cmd(intf, msg);
3754 			}
3755 			break;
3756 
3757 		default:
3758 			/* Check for OEM Channels.  Clients had better
3759 			   register for these commands. */
3760 			if ((intf->channels[chan].medium
3761 			     >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
3762 			    && (intf->channels[chan].medium
3763 				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
3764 				requeue = handle_oem_get_msg_cmd(intf, msg);
3765 			} else {
3766 				/*
3767 				 * We don't handle the channel type, so just
3768 				 * free the message.
3769 				 */
3770 				requeue = 0;
3771 			}
3772 		}
3773 
3774 	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3775 		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
3776 		/* It's an asyncronous event. */
3777 		requeue = handle_read_event_rsp(intf, msg);
3778 	} else {
3779 		/* It's a response from the local BMC. */
3780 		requeue = handle_bmc_rsp(intf, msg);
3781 	}
3782 
3783  out:
3784 	return requeue;
3785 }
3786 
3787 /* Handle a new message from the lower layer. */
3788 void ipmi_smi_msg_received(ipmi_smi_t          intf,
3789 			   struct ipmi_smi_msg *msg)
3790 {
3791 	unsigned long flags = 0; /* keep us warning-free. */
3792 	int           rv;
3793 	int           run_to_completion;
3794 
3795 
3796 	if ((msg->data_size >= 2)
3797 	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3798 	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
3799 	    && (msg->user_data == NULL)) {
3800 		/*
3801 		 * This is the local response to a command send, start
3802 		 * the timer for these.  The user_data will not be
3803 		 * NULL if this is a response send, and we will let
3804 		 * response sends just go through.
3805 		 */
3806 
3807 		/*
3808 		 * Check for errors, if we get certain errors (ones
3809 		 * that mean basically we can try again later), we
3810 		 * ignore them and start the timer.  Otherwise we
3811 		 * report the error immediately.
3812 		 */
3813 		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3814 		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3815 		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3816 		    && (msg->rsp[2] != IPMI_BUS_ERR)
3817 		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
3818 			int chan = msg->rsp[3] & 0xf;
3819 
3820 			/* Got an error sending the message, handle it. */
3821 			if (chan >= IPMI_MAX_CHANNELS)
3822 				; /* This shouldn't happen */
3823 			else if ((intf->channels[chan].medium
3824 				  == IPMI_CHANNEL_MEDIUM_8023LAN)
3825 				 || (intf->channels[chan].medium
3826 				     == IPMI_CHANNEL_MEDIUM_ASYNC))
3827 				ipmi_inc_stat(intf, sent_lan_command_errs);
3828 			else
3829 				ipmi_inc_stat(intf, sent_ipmb_command_errs);
3830 			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3831 		} else
3832 			/* The message was sent, start the timer. */
3833 			intf_start_seq_timer(intf, msg->msgid);
3834 
3835 		ipmi_free_smi_msg(msg);
3836 		goto out;
3837 	}
3838 
3839 	/*
3840 	 * To preserve message order, if the list is not empty, we
3841 	 * tack this message onto the end of the list.
3842 	 */
3843 	run_to_completion = intf->run_to_completion;
3844 	if (!run_to_completion)
3845 		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3846 	if (!list_empty(&intf->waiting_msgs)) {
3847 		list_add_tail(&msg->link, &intf->waiting_msgs);
3848 		if (!run_to_completion)
3849 			spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3850 		goto out;
3851 	}
3852 	if (!run_to_completion)
3853 		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3854 
3855 	rv = handle_new_recv_msg(intf, msg);
3856 	if (rv > 0) {
3857 		/*
3858 		 * Could not handle the message now, just add it to a
3859 		 * list to handle later.
3860 		 */
3861 		run_to_completion = intf->run_to_completion;
3862 		if (!run_to_completion)
3863 			spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3864 		list_add_tail(&msg->link, &intf->waiting_msgs);
3865 		if (!run_to_completion)
3866 			spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3867 	} else if (rv == 0) {
3868 		ipmi_free_smi_msg(msg);
3869 	}
3870 
3871  out:
3872 	return;
3873 }
3874 EXPORT_SYMBOL(ipmi_smi_msg_received);
3875 
3876 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3877 {
3878 	ipmi_user_t user;
3879 
3880 	rcu_read_lock();
3881 	list_for_each_entry_rcu(user, &intf->users, link) {
3882 		if (!user->handler->ipmi_watchdog_pretimeout)
3883 			continue;
3884 
3885 		user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3886 	}
3887 	rcu_read_unlock();
3888 }
3889 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3890 
3891 static struct ipmi_smi_msg *
3892 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3893 		  unsigned char seq, long seqid)
3894 {
3895 	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3896 	if (!smi_msg)
3897 		/*
3898 		 * If we can't allocate the message, then just return, we
3899 		 * get 4 retries, so this should be ok.
3900 		 */
3901 		return NULL;
3902 
3903 	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3904 	smi_msg->data_size = recv_msg->msg.data_len;
3905 	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3906 
3907 #ifdef DEBUG_MSGING
3908 	{
3909 		int m;
3910 		printk("Resend: ");
3911 		for (m = 0; m < smi_msg->data_size; m++)
3912 			printk(" %2.2x", smi_msg->data[m]);
3913 		printk("\n");
3914 	}
3915 #endif
3916 	return smi_msg;
3917 }
3918 
3919 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3920 			      struct list_head *timeouts, long timeout_period,
3921 			      int slot, unsigned long *flags)
3922 {
3923 	struct ipmi_recv_msg     *msg;
3924 	struct ipmi_smi_handlers *handlers;
3925 
3926 	if (intf->intf_num == -1)
3927 		return;
3928 
3929 	if (!ent->inuse)
3930 		return;
3931 
3932 	ent->timeout -= timeout_period;
3933 	if (ent->timeout > 0)
3934 		return;
3935 
3936 	if (ent->retries_left == 0) {
3937 		/* The message has used all its retries. */
3938 		ent->inuse = 0;
3939 		msg = ent->recv_msg;
3940 		list_add_tail(&msg->link, timeouts);
3941 		if (ent->broadcast)
3942 			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
3943 		else if (is_lan_addr(&ent->recv_msg->addr))
3944 			ipmi_inc_stat(intf, timed_out_lan_commands);
3945 		else
3946 			ipmi_inc_stat(intf, timed_out_ipmb_commands);
3947 	} else {
3948 		struct ipmi_smi_msg *smi_msg;
3949 		/* More retries, send again. */
3950 
3951 		/*
3952 		 * Start with the max timer, set to normal timer after
3953 		 * the message is sent.
3954 		 */
3955 		ent->timeout = MAX_MSG_TIMEOUT;
3956 		ent->retries_left--;
3957 		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3958 					    ent->seqid);
3959 		if (!smi_msg) {
3960 			if (is_lan_addr(&ent->recv_msg->addr))
3961 				ipmi_inc_stat(intf,
3962 					      dropped_rexmit_lan_commands);
3963 			else
3964 				ipmi_inc_stat(intf,
3965 					      dropped_rexmit_ipmb_commands);
3966 			return;
3967 		}
3968 
3969 		spin_unlock_irqrestore(&intf->seq_lock, *flags);
3970 
3971 		/*
3972 		 * Send the new message.  We send with a zero
3973 		 * priority.  It timed out, I doubt time is that
3974 		 * critical now, and high priority messages are really
3975 		 * only for messages to the local MC, which don't get
3976 		 * resent.
3977 		 */
3978 		handlers = intf->handlers;
3979 		if (handlers) {
3980 			if (is_lan_addr(&ent->recv_msg->addr))
3981 				ipmi_inc_stat(intf,
3982 					      retransmitted_lan_commands);
3983 			else
3984 				ipmi_inc_stat(intf,
3985 					      retransmitted_ipmb_commands);
3986 
3987 			intf->handlers->sender(intf->send_info,
3988 					       smi_msg, 0);
3989 		} else
3990 			ipmi_free_smi_msg(smi_msg);
3991 
3992 		spin_lock_irqsave(&intf->seq_lock, *flags);
3993 	}
3994 }
3995 
3996 static void ipmi_timeout_handler(long timeout_period)
3997 {
3998 	ipmi_smi_t           intf;
3999 	struct list_head     timeouts;
4000 	struct ipmi_recv_msg *msg, *msg2;
4001 	struct ipmi_smi_msg  *smi_msg, *smi_msg2;
4002 	unsigned long        flags;
4003 	int                  i;
4004 
4005 	rcu_read_lock();
4006 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4007 		/* See if any waiting messages need to be processed. */
4008 		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
4009 		list_for_each_entry_safe(smi_msg, smi_msg2,
4010 					 &intf->waiting_msgs, link) {
4011 			if (!handle_new_recv_msg(intf, smi_msg)) {
4012 				list_del(&smi_msg->link);
4013 				ipmi_free_smi_msg(smi_msg);
4014 			} else {
4015 				/*
4016 				 * To preserve message order, quit if we
4017 				 * can't handle a message.
4018 				 */
4019 				break;
4020 			}
4021 		}
4022 		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
4023 
4024 		/*
4025 		 * Go through the seq table and find any messages that
4026 		 * have timed out, putting them in the timeouts
4027 		 * list.
4028 		 */
4029 		INIT_LIST_HEAD(&timeouts);
4030 		spin_lock_irqsave(&intf->seq_lock, flags);
4031 		for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4032 			check_msg_timeout(intf, &(intf->seq_table[i]),
4033 					  &timeouts, timeout_period, i,
4034 					  &flags);
4035 		spin_unlock_irqrestore(&intf->seq_lock, flags);
4036 
4037 		list_for_each_entry_safe(msg, msg2, &timeouts, link)
4038 			deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
4039 
4040 		/*
4041 		 * Maintenance mode handling.  Check the timeout
4042 		 * optimistically before we claim the lock.  It may
4043 		 * mean a timeout gets missed occasionally, but that
4044 		 * only means the timeout gets extended by one period
4045 		 * in that case.  No big deal, and it avoids the lock
4046 		 * most of the time.
4047 		 */
4048 		if (intf->auto_maintenance_timeout > 0) {
4049 			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4050 			if (intf->auto_maintenance_timeout > 0) {
4051 				intf->auto_maintenance_timeout
4052 					-= timeout_period;
4053 				if (!intf->maintenance_mode
4054 				    && (intf->auto_maintenance_timeout <= 0)) {
4055 					intf->maintenance_mode_enable = 0;
4056 					maintenance_mode_update(intf);
4057 				}
4058 			}
4059 			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4060 					       flags);
4061 		}
4062 	}
4063 	rcu_read_unlock();
4064 }
4065 
4066 static void ipmi_request_event(void)
4067 {
4068 	ipmi_smi_t               intf;
4069 	struct ipmi_smi_handlers *handlers;
4070 
4071 	rcu_read_lock();
4072 	/*
4073 	 * Called from the timer, no need to check if handlers is
4074 	 * valid.
4075 	 */
4076 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4077 		/* No event requests when in maintenance mode. */
4078 		if (intf->maintenance_mode_enable)
4079 			continue;
4080 
4081 		handlers = intf->handlers;
4082 		if (handlers)
4083 			handlers->request_events(intf->send_info);
4084 	}
4085 	rcu_read_unlock();
4086 }
4087 
4088 static struct timer_list ipmi_timer;
4089 
4090 /* Call every ~1000 ms. */
4091 #define IPMI_TIMEOUT_TIME	1000
4092 
4093 /* How many jiffies does it take to get to the timeout time. */
4094 #define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
4095 
4096 /*
4097  * Request events from the queue every second (this is the number of
4098  * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
4099  * future, IPMI will add a way to know immediately if an event is in
4100  * the queue and this silliness can go away.
4101  */
4102 #define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
4103 
4104 static atomic_t stop_operation;
4105 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4106 
4107 static void ipmi_timeout(unsigned long data)
4108 {
4109 	if (atomic_read(&stop_operation))
4110 		return;
4111 
4112 	ticks_to_req_ev--;
4113 	if (ticks_to_req_ev == 0) {
4114 		ipmi_request_event();
4115 		ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4116 	}
4117 
4118 	ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
4119 
4120 	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4121 }
4122 
4123 
4124 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4125 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4126 
4127 /* FIXME - convert these to slabs. */
4128 static void free_smi_msg(struct ipmi_smi_msg *msg)
4129 {
4130 	atomic_dec(&smi_msg_inuse_count);
4131 	kfree(msg);
4132 }
4133 
4134 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4135 {
4136 	struct ipmi_smi_msg *rv;
4137 	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4138 	if (rv) {
4139 		rv->done = free_smi_msg;
4140 		rv->user_data = NULL;
4141 		atomic_inc(&smi_msg_inuse_count);
4142 	}
4143 	return rv;
4144 }
4145 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4146 
4147 static void free_recv_msg(struct ipmi_recv_msg *msg)
4148 {
4149 	atomic_dec(&recv_msg_inuse_count);
4150 	kfree(msg);
4151 }
4152 
4153 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4154 {
4155 	struct ipmi_recv_msg *rv;
4156 
4157 	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4158 	if (rv) {
4159 		rv->user = NULL;
4160 		rv->done = free_recv_msg;
4161 		atomic_inc(&recv_msg_inuse_count);
4162 	}
4163 	return rv;
4164 }
4165 
4166 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4167 {
4168 	if (msg->user)
4169 		kref_put(&msg->user->refcount, free_user);
4170 	msg->done(msg);
4171 }
4172 EXPORT_SYMBOL(ipmi_free_recv_msg);
4173 
4174 #ifdef CONFIG_IPMI_PANIC_EVENT
4175 
4176 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4177 {
4178 }
4179 
4180 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4181 {
4182 }
4183 
4184 #ifdef CONFIG_IPMI_PANIC_STRING
4185 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4186 {
4187 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4188 	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4189 	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4190 	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4191 		/* A get event receiver command, save it. */
4192 		intf->event_receiver = msg->msg.data[1];
4193 		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4194 	}
4195 }
4196 
4197 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4198 {
4199 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4200 	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4201 	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4202 	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4203 		/*
4204 		 * A get device id command, save if we are an event
4205 		 * receiver or generator.
4206 		 */
4207 		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4208 		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4209 	}
4210 }
4211 #endif
4212 
4213 static void send_panic_events(char *str)
4214 {
4215 	struct kernel_ipmi_msg            msg;
4216 	ipmi_smi_t                        intf;
4217 	unsigned char                     data[16];
4218 	struct ipmi_system_interface_addr *si;
4219 	struct ipmi_addr                  addr;
4220 	struct ipmi_smi_msg               smi_msg;
4221 	struct ipmi_recv_msg              recv_msg;
4222 
4223 	si = (struct ipmi_system_interface_addr *) &addr;
4224 	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4225 	si->channel = IPMI_BMC_CHANNEL;
4226 	si->lun = 0;
4227 
4228 	/* Fill in an event telling that we have failed. */
4229 	msg.netfn = 0x04; /* Sensor or Event. */
4230 	msg.cmd = 2; /* Platform event command. */
4231 	msg.data = data;
4232 	msg.data_len = 8;
4233 	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4234 	data[1] = 0x03; /* This is for IPMI 1.0. */
4235 	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4236 	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4237 	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4238 
4239 	/*
4240 	 * Put a few breadcrumbs in.  Hopefully later we can add more things
4241 	 * to make the panic events more useful.
4242 	 */
4243 	if (str) {
4244 		data[3] = str[0];
4245 		data[6] = str[1];
4246 		data[7] = str[2];
4247 	}
4248 
4249 	smi_msg.done = dummy_smi_done_handler;
4250 	recv_msg.done = dummy_recv_done_handler;
4251 
4252 	/* For every registered interface, send the event. */
4253 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4254 		if (!intf->handlers)
4255 			/* Interface is not ready. */
4256 			continue;
4257 
4258 		intf->run_to_completion = 1;
4259 		/* Send the event announcing the panic. */
4260 		intf->handlers->set_run_to_completion(intf->send_info, 1);
4261 		i_ipmi_request(NULL,
4262 			       intf,
4263 			       &addr,
4264 			       0,
4265 			       &msg,
4266 			       intf,
4267 			       &smi_msg,
4268 			       &recv_msg,
4269 			       0,
4270 			       intf->channels[0].address,
4271 			       intf->channels[0].lun,
4272 			       0, 1); /* Don't retry, and don't wait. */
4273 	}
4274 
4275 #ifdef CONFIG_IPMI_PANIC_STRING
4276 	/*
4277 	 * On every interface, dump a bunch of OEM event holding the
4278 	 * string.
4279 	 */
4280 	if (!str)
4281 		return;
4282 
4283 	/* For every registered interface, send the event. */
4284 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4285 		char                  *p = str;
4286 		struct ipmi_ipmb_addr *ipmb;
4287 		int                   j;
4288 
4289 		if (intf->intf_num == -1)
4290 			/* Interface was not ready yet. */
4291 			continue;
4292 
4293 		/*
4294 		 * intf_num is used as an marker to tell if the
4295 		 * interface is valid.  Thus we need a read barrier to
4296 		 * make sure data fetched before checking intf_num
4297 		 * won't be used.
4298 		 */
4299 		smp_rmb();
4300 
4301 		/*
4302 		 * First job here is to figure out where to send the
4303 		 * OEM events.  There's no way in IPMI to send OEM
4304 		 * events using an event send command, so we have to
4305 		 * find the SEL to put them in and stick them in
4306 		 * there.
4307 		 */
4308 
4309 		/* Get capabilities from the get device id. */
4310 		intf->local_sel_device = 0;
4311 		intf->local_event_generator = 0;
4312 		intf->event_receiver = 0;
4313 
4314 		/* Request the device info from the local MC. */
4315 		msg.netfn = IPMI_NETFN_APP_REQUEST;
4316 		msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4317 		msg.data = NULL;
4318 		msg.data_len = 0;
4319 		intf->null_user_handler = device_id_fetcher;
4320 		i_ipmi_request(NULL,
4321 			       intf,
4322 			       &addr,
4323 			       0,
4324 			       &msg,
4325 			       intf,
4326 			       &smi_msg,
4327 			       &recv_msg,
4328 			       0,
4329 			       intf->channels[0].address,
4330 			       intf->channels[0].lun,
4331 			       0, 1); /* Don't retry, and don't wait. */
4332 
4333 		if (intf->local_event_generator) {
4334 			/* Request the event receiver from the local MC. */
4335 			msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4336 			msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4337 			msg.data = NULL;
4338 			msg.data_len = 0;
4339 			intf->null_user_handler = event_receiver_fetcher;
4340 			i_ipmi_request(NULL,
4341 				       intf,
4342 				       &addr,
4343 				       0,
4344 				       &msg,
4345 				       intf,
4346 				       &smi_msg,
4347 				       &recv_msg,
4348 				       0,
4349 				       intf->channels[0].address,
4350 				       intf->channels[0].lun,
4351 				       0, 1); /* no retry, and no wait. */
4352 		}
4353 		intf->null_user_handler = NULL;
4354 
4355 		/*
4356 		 * Validate the event receiver.  The low bit must not
4357 		 * be 1 (it must be a valid IPMB address), it cannot
4358 		 * be zero, and it must not be my address.
4359 		 */
4360 		if (((intf->event_receiver & 1) == 0)
4361 		    && (intf->event_receiver != 0)
4362 		    && (intf->event_receiver != intf->channels[0].address)) {
4363 			/*
4364 			 * The event receiver is valid, send an IPMB
4365 			 * message.
4366 			 */
4367 			ipmb = (struct ipmi_ipmb_addr *) &addr;
4368 			ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4369 			ipmb->channel = 0; /* FIXME - is this right? */
4370 			ipmb->lun = intf->event_receiver_lun;
4371 			ipmb->slave_addr = intf->event_receiver;
4372 		} else if (intf->local_sel_device) {
4373 			/*
4374 			 * The event receiver was not valid (or was
4375 			 * me), but I am an SEL device, just dump it
4376 			 * in my SEL.
4377 			 */
4378 			si = (struct ipmi_system_interface_addr *) &addr;
4379 			si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4380 			si->channel = IPMI_BMC_CHANNEL;
4381 			si->lun = 0;
4382 		} else
4383 			continue; /* No where to send the event. */
4384 
4385 		msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4386 		msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4387 		msg.data = data;
4388 		msg.data_len = 16;
4389 
4390 		j = 0;
4391 		while (*p) {
4392 			int size = strlen(p);
4393 
4394 			if (size > 11)
4395 				size = 11;
4396 			data[0] = 0;
4397 			data[1] = 0;
4398 			data[2] = 0xf0; /* OEM event without timestamp. */
4399 			data[3] = intf->channels[0].address;
4400 			data[4] = j++; /* sequence # */
4401 			/*
4402 			 * Always give 11 bytes, so strncpy will fill
4403 			 * it with zeroes for me.
4404 			 */
4405 			strncpy(data+5, p, 11);
4406 			p += size;
4407 
4408 			i_ipmi_request(NULL,
4409 				       intf,
4410 				       &addr,
4411 				       0,
4412 				       &msg,
4413 				       intf,
4414 				       &smi_msg,
4415 				       &recv_msg,
4416 				       0,
4417 				       intf->channels[0].address,
4418 				       intf->channels[0].lun,
4419 				       0, 1); /* no retry, and no wait. */
4420 		}
4421 	}
4422 #endif /* CONFIG_IPMI_PANIC_STRING */
4423 }
4424 #endif /* CONFIG_IPMI_PANIC_EVENT */
4425 
4426 static int has_panicked;
4427 
4428 static int panic_event(struct notifier_block *this,
4429 		       unsigned long         event,
4430 		       void                  *ptr)
4431 {
4432 	ipmi_smi_t intf;
4433 
4434 	if (has_panicked)
4435 		return NOTIFY_DONE;
4436 	has_panicked = 1;
4437 
4438 	/* For every registered interface, set it to run to completion. */
4439 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4440 		if (!intf->handlers)
4441 			/* Interface is not ready. */
4442 			continue;
4443 
4444 		intf->run_to_completion = 1;
4445 		intf->handlers->set_run_to_completion(intf->send_info, 1);
4446 	}
4447 
4448 #ifdef CONFIG_IPMI_PANIC_EVENT
4449 	send_panic_events(ptr);
4450 #endif
4451 
4452 	return NOTIFY_DONE;
4453 }
4454 
4455 static struct notifier_block panic_block = {
4456 	.notifier_call	= panic_event,
4457 	.next		= NULL,
4458 	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
4459 };
4460 
4461 static int ipmi_init_msghandler(void)
4462 {
4463 	int rv;
4464 
4465 	if (initialized)
4466 		return 0;
4467 
4468 	rv = driver_register(&ipmidriver.driver);
4469 	if (rv) {
4470 		printk(KERN_ERR PFX "Could not register IPMI driver\n");
4471 		return rv;
4472 	}
4473 
4474 	printk(KERN_INFO "ipmi message handler version "
4475 	       IPMI_DRIVER_VERSION "\n");
4476 
4477 #ifdef CONFIG_PROC_FS
4478 	proc_ipmi_root = proc_mkdir("ipmi", NULL);
4479 	if (!proc_ipmi_root) {
4480 	    printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4481 	    return -ENOMEM;
4482 	}
4483 
4484 #endif /* CONFIG_PROC_FS */
4485 
4486 	setup_timer(&ipmi_timer, ipmi_timeout, 0);
4487 	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4488 
4489 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4490 
4491 	initialized = 1;
4492 
4493 	return 0;
4494 }
4495 
4496 static int __init ipmi_init_msghandler_mod(void)
4497 {
4498 	ipmi_init_msghandler();
4499 	return 0;
4500 }
4501 
4502 static void __exit cleanup_ipmi(void)
4503 {
4504 	int count;
4505 
4506 	if (!initialized)
4507 		return;
4508 
4509 	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4510 
4511 	/*
4512 	 * This can't be called if any interfaces exist, so no worry
4513 	 * about shutting down the interfaces.
4514 	 */
4515 
4516 	/*
4517 	 * Tell the timer to stop, then wait for it to stop.  This
4518 	 * avoids problems with race conditions removing the timer
4519 	 * here.
4520 	 */
4521 	atomic_inc(&stop_operation);
4522 	del_timer_sync(&ipmi_timer);
4523 
4524 #ifdef CONFIG_PROC_FS
4525 	remove_proc_entry(proc_ipmi_root->name, NULL);
4526 #endif /* CONFIG_PROC_FS */
4527 
4528 	driver_unregister(&ipmidriver.driver);
4529 
4530 	initialized = 0;
4531 
4532 	/* Check for buffer leaks. */
4533 	count = atomic_read(&smi_msg_inuse_count);
4534 	if (count != 0)
4535 		printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4536 		       count);
4537 	count = atomic_read(&recv_msg_inuse_count);
4538 	if (count != 0)
4539 		printk(KERN_WARNING PFX "recv message count %d at exit\n",
4540 		       count);
4541 }
4542 module_exit(cleanup_ipmi);
4543 
4544 module_init(ipmi_init_msghandler_mod);
4545 MODULE_LICENSE("GPL");
4546 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4547 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
4548 		   " interface.");
4549 MODULE_VERSION(IPMI_DRIVER_VERSION);
4550