xref: /freebsd/sys/dev/ipmi/ipmi.c (revision 3de0952fba9607fbcad3009366bc9c4d9c899b27)
1 /*-
2  * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/condvar.h>
34 #include <sys/conf.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/poll.h>
39 #include <sys/reboot.h>
40 #include <sys/rman.h>
41 #include <sys/selinfo.h>
42 #include <sys/sysctl.h>
43 #include <sys/watchdog.h>
44 
45 #ifdef LOCAL_MODULE
46 #include <ipmi.h>
47 #include <ipmivars.h>
48 #else
49 #include <sys/ipmi.h>
50 #include <dev/ipmi/ipmivars.h>
51 #endif
52 
53 /*
54  * Driver request structures are allocated on the stack via alloca() to
55  * avoid calling malloc(), especially for the watchdog handler.
56  * To avoid too much stack growth, a previously allocated structure can
57  * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
58  * that there is adequate reply/request space in the original allocation.
59  */
60 #define	IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)	\
61 	bzero((req), sizeof(struct ipmi_request));			\
62 	ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
63 
64 #define	IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)	\
65 	(req) = __builtin_alloca(sizeof(struct ipmi_request) +		\
66 	    (reqlen) + (replylen));					\
67 	IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen),	\
68 	    (replylen))
69 
70 #ifdef IPMB
71 static int ipmi_ipmb_checksum(u_char, int);
72 static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
73      u_char, u_char, int)
74 #endif
75 
76 static d_ioctl_t ipmi_ioctl;
77 static d_poll_t ipmi_poll;
78 static d_open_t ipmi_open;
79 static void ipmi_dtor(void *arg);
80 
81 int ipmi_attached = 0;
82 
83 static int on = 1;
84 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0,
85     "IPMI driver parameters");
86 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RW,
87 	&on, 0, "");
88 
89 static struct cdevsw ipmi_cdevsw = {
90 	.d_version =    D_VERSION,
91 	.d_open =	ipmi_open,
92 	.d_ioctl =	ipmi_ioctl,
93 	.d_poll =	ipmi_poll,
94 	.d_name =	"ipmi",
95 };
96 
97 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
98 
99 static int
100 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
101 {
102 	struct ipmi_device *dev;
103 	struct ipmi_softc *sc;
104 	int error;
105 
106 	if (!on)
107 		return (ENOENT);
108 
109 	/* Initialize the per file descriptor data. */
110 	dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
111 	error = devfs_set_cdevpriv(dev, ipmi_dtor);
112 	if (error) {
113 		free(dev, M_IPMI);
114 		return (error);
115 	}
116 
117 	sc = cdev->si_drv1;
118 	TAILQ_INIT(&dev->ipmi_completed_requests);
119 	dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
120 	dev->ipmi_lun = IPMI_BMC_SMS_LUN;
121 	dev->ipmi_softc = sc;
122 	IPMI_LOCK(sc);
123 	sc->ipmi_opened++;
124 	IPMI_UNLOCK(sc);
125 
126 	return (0);
127 }
128 
129 static int
130 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
131 {
132 	struct ipmi_device *dev;
133 	struct ipmi_softc *sc;
134 	int revents = 0;
135 
136 	if (devfs_get_cdevpriv((void **)&dev))
137 		return (0);
138 
139 	sc = cdev->si_drv1;
140 	IPMI_LOCK(sc);
141 	if (poll_events & (POLLIN | POLLRDNORM)) {
142 		if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
143 		    revents |= poll_events & (POLLIN | POLLRDNORM);
144 		if (dev->ipmi_requests == 0)
145 		    revents |= POLLERR;
146 	}
147 
148 	if (revents == 0) {
149 		if (poll_events & (POLLIN | POLLRDNORM))
150 			selrecord(td, &dev->ipmi_select);
151 	}
152 	IPMI_UNLOCK(sc);
153 
154 	return (revents);
155 }
156 
157 static void
158 ipmi_purge_completed_requests(struct ipmi_device *dev)
159 {
160 	struct ipmi_request *req;
161 
162 	while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
163 		req = TAILQ_FIRST(&dev->ipmi_completed_requests);
164 		TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
165 		dev->ipmi_requests--;
166 		ipmi_free_request(req);
167 	}
168 }
169 
170 static void
171 ipmi_dtor(void *arg)
172 {
173 	struct ipmi_request *req, *nreq;
174 	struct ipmi_device *dev;
175 	struct ipmi_softc *sc;
176 
177 	dev = arg;
178 	sc = dev->ipmi_softc;
179 
180 	IPMI_LOCK(sc);
181 	if (dev->ipmi_requests) {
182 		/* Throw away any pending requests for this device. */
183 		TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
184 		    nreq) {
185 			if (req->ir_owner == dev) {
186 				TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
187 				    ir_link);
188 				dev->ipmi_requests--;
189 				ipmi_free_request(req);
190 			}
191 		}
192 
193 		/* Throw away any pending completed requests for this device. */
194 		ipmi_purge_completed_requests(dev);
195 
196 		/*
197 		 * If we still have outstanding requests, they must be stuck
198 		 * in an interface driver, so wait for those to drain.
199 		 */
200 		dev->ipmi_closing = 1;
201 		while (dev->ipmi_requests > 0) {
202 			msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
203 			    PWAIT, "ipmidrain", 0);
204 			ipmi_purge_completed_requests(dev);
205 		}
206 	}
207 	sc->ipmi_opened--;
208 	IPMI_UNLOCK(sc);
209 
210 	/* Cleanup. */
211 	free(dev, M_IPMI);
212 }
213 
214 #ifdef IPMB
215 static int
216 ipmi_ipmb_checksum(u_char *data, int len)
217 {
218 	u_char sum = 0;
219 
220 	for (; len; len--) {
221 		sum += *data++;
222 	}
223 	return (-sum);
224 }
225 
226 /* XXX: Needs work */
227 static int
228 ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
229     u_char command, u_char seq, u_char *data, int data_len)
230 {
231 	struct ipmi_softc *sc = device_get_softc(dev);
232 	struct ipmi_request *req;
233 	u_char slave_addr = 0x52;
234 	int error;
235 
236 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
237 	    IPMI_SEND_MSG, data_len + 8, 0);
238 	req->ir_request[0] = channel;
239 	req->ir_request[1] = slave_addr;
240 	req->ir_request[2] = IPMI_ADDR(netfn, 0);
241 	req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
242 	req->ir_request[4] = sc->ipmi_address;
243 	req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
244 	req->ir_request[6] = command;
245 
246 	bcopy(data, &req->ir_request[7], data_len);
247 	temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
248 	    data_len + 3);
249 
250 	ipmi_submit_driver_request(sc, req);
251 	error = req->ir_error;
252 
253 	return (error);
254 }
255 
256 static int
257 ipmi_handle_attn(struct ipmi_softc *sc)
258 {
259 	struct ipmi_request *req;
260 	int error;
261 
262 	device_printf(sc->ipmi_dev, "BMC has a message\n");
263 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
264 	    IPMI_GET_MSG_FLAGS, 0, 1);
265 
266 	ipmi_submit_driver_request(sc, req);
267 
268 	if (req->ir_error == 0 && req->ir_compcode == 0) {
269 		if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
270 			device_printf(sc->ipmi_dev, "message buffer full");
271 		}
272 		if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
273 			device_printf(sc->ipmi_dev,
274 			    "watchdog about to go off");
275 		}
276 		if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
277 			IPMI_ALLOC_DRIVER_REQUEST(req,
278 			    IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
279 			    16);
280 
281 			device_printf(sc->ipmi_dev, "throw out message ");
282 			dump_buf(temp, 16);
283 		}
284 	}
285 	error = req->ir_error;
286 
287 	return (error);
288 }
289 #endif
290 
291 #ifdef IPMICTL_SEND_COMMAND_32
292 #define	PTRIN(p)	((void *)(uintptr_t)(p))
293 #define	PTROUT(p)	((uintptr_t)(p))
294 #endif
295 
296 static int
297 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
298     int flags, struct thread *td)
299 {
300 	struct ipmi_softc *sc;
301 	struct ipmi_device *dev;
302 	struct ipmi_request *kreq;
303 	struct ipmi_req *req = (struct ipmi_req *)data;
304 	struct ipmi_recv *recv = (struct ipmi_recv *)data;
305 	struct ipmi_addr addr;
306 #ifdef IPMICTL_SEND_COMMAND_32
307 	struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
308 	struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
309 	union {
310 		struct ipmi_req req;
311 		struct ipmi_recv recv;
312 	} thunk32;
313 #endif
314 	int error, len;
315 
316 	error = devfs_get_cdevpriv((void **)&dev);
317 	if (error)
318 		return (error);
319 
320 	sc = cdev->si_drv1;
321 
322 #ifdef IPMICTL_SEND_COMMAND_32
323 	/* Convert 32-bit structures to native. */
324 	switch (cmd) {
325 	case IPMICTL_SEND_COMMAND_32:
326 		req = &thunk32.req;
327 		req->addr = PTRIN(req32->addr);
328 		req->addr_len = req32->addr_len;
329 		req->msgid = req32->msgid;
330 		req->msg.netfn = req32->msg.netfn;
331 		req->msg.cmd = req32->msg.cmd;
332 		req->msg.data_len = req32->msg.data_len;
333 		req->msg.data = PTRIN(req32->msg.data);
334 		break;
335 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
336 	case IPMICTL_RECEIVE_MSG_32:
337 		recv = &thunk32.recv;
338 		recv->addr = PTRIN(recv32->addr);
339 		recv->addr_len = recv32->addr_len;
340 		recv->msg.data_len = recv32->msg.data_len;
341 		recv->msg.data = PTRIN(recv32->msg.data);
342 		break;
343 	}
344 #endif
345 
346 	switch (cmd) {
347 #ifdef IPMICTL_SEND_COMMAND_32
348 	case IPMICTL_SEND_COMMAND_32:
349 #endif
350 	case IPMICTL_SEND_COMMAND:
351 		/*
352 		 * XXX: Need to add proper handling of this.
353 		 */
354 		error = copyin(req->addr, &addr, sizeof(addr));
355 		if (error)
356 			return (error);
357 
358 		IPMI_LOCK(sc);
359 		/* clear out old stuff in queue of stuff done */
360 		/* XXX: This seems odd. */
361 		while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
362 			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
363 			    ir_link);
364 			dev->ipmi_requests--;
365 			ipmi_free_request(kreq);
366 		}
367 		IPMI_UNLOCK(sc);
368 
369 		kreq = ipmi_alloc_request(dev, req->msgid,
370 		    IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
371 		    req->msg.data_len, IPMI_MAX_RX);
372 		error = copyin(req->msg.data, kreq->ir_request,
373 		    req->msg.data_len);
374 		if (error) {
375 			ipmi_free_request(kreq);
376 			return (error);
377 		}
378 		IPMI_LOCK(sc);
379 		dev->ipmi_requests++;
380 		error = sc->ipmi_enqueue_request(sc, kreq);
381 		IPMI_UNLOCK(sc);
382 		if (error)
383 			return (error);
384 		break;
385 #ifdef IPMICTL_SEND_COMMAND_32
386 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
387 	case IPMICTL_RECEIVE_MSG_32:
388 #endif
389 	case IPMICTL_RECEIVE_MSG_TRUNC:
390 	case IPMICTL_RECEIVE_MSG:
391 		error = copyin(recv->addr, &addr, sizeof(addr));
392 		if (error)
393 			return (error);
394 
395 		IPMI_LOCK(sc);
396 		kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
397 		if (kreq == NULL) {
398 			IPMI_UNLOCK(sc);
399 			return (EAGAIN);
400 		}
401 		addr.channel = IPMI_BMC_CHANNEL;
402 		/* XXX */
403 		recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
404 		recv->msgid = kreq->ir_msgid;
405 		recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
406 		recv->msg.cmd = kreq->ir_command;
407 		error = kreq->ir_error;
408 		if (error) {
409 			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
410 			    ir_link);
411 			dev->ipmi_requests--;
412 			IPMI_UNLOCK(sc);
413 			ipmi_free_request(kreq);
414 			return (error);
415 		}
416 		len = kreq->ir_replylen + 1;
417 		if (recv->msg.data_len < len &&
418 		    (cmd == IPMICTL_RECEIVE_MSG
419 #ifdef IPMICTL_RECEIVE_MSG_32
420 		     || cmd == IPMICTL_RECEIVE_MSG_32
421 #endif
422 		    )) {
423 			IPMI_UNLOCK(sc);
424 			return (EMSGSIZE);
425 		}
426 		TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
427 		dev->ipmi_requests--;
428 		IPMI_UNLOCK(sc);
429 		len = min(recv->msg.data_len, len);
430 		recv->msg.data_len = len;
431 		error = copyout(&addr, recv->addr,sizeof(addr));
432 		if (error == 0)
433 			error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
434 		if (error == 0)
435 			error = copyout(kreq->ir_reply, recv->msg.data + 1,
436 			    len - 1);
437 		ipmi_free_request(kreq);
438 		if (error)
439 			return (error);
440 		break;
441 	case IPMICTL_SET_MY_ADDRESS_CMD:
442 		IPMI_LOCK(sc);
443 		dev->ipmi_address = *(int*)data;
444 		IPMI_UNLOCK(sc);
445 		break;
446 	case IPMICTL_GET_MY_ADDRESS_CMD:
447 		IPMI_LOCK(sc);
448 		*(int*)data = dev->ipmi_address;
449 		IPMI_UNLOCK(sc);
450 		break;
451 	case IPMICTL_SET_MY_LUN_CMD:
452 		IPMI_LOCK(sc);
453 		dev->ipmi_lun = *(int*)data & 0x3;
454 		IPMI_UNLOCK(sc);
455 		break;
456 	case IPMICTL_GET_MY_LUN_CMD:
457 		IPMI_LOCK(sc);
458 		*(int*)data = dev->ipmi_lun;
459 		IPMI_UNLOCK(sc);
460 		break;
461 	case IPMICTL_SET_GETS_EVENTS_CMD:
462 		/*
463 		device_printf(sc->ipmi_dev,
464 		    "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
465 		*/
466 		break;
467 	case IPMICTL_REGISTER_FOR_CMD:
468 	case IPMICTL_UNREGISTER_FOR_CMD:
469 		return (EOPNOTSUPP);
470 	default:
471 		device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
472 		return (ENOIOCTL);
473 	}
474 
475 #ifdef IPMICTL_SEND_COMMAND_32
476 	/* Update changed fields in 32-bit structures. */
477 	switch (cmd) {
478 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
479 	case IPMICTL_RECEIVE_MSG_32:
480 		recv32->recv_type = recv->recv_type;
481 		recv32->msgid = recv->msgid;
482 		recv32->msg.netfn = recv->msg.netfn;
483 		recv32->msg.cmd = recv->msg.cmd;
484 		recv32->msg.data_len = recv->msg.data_len;
485 		break;
486 	}
487 #endif
488 	return (0);
489 }
490 
491 /*
492  * Request management.
493  */
494 
495 static __inline void
496 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
497     uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
498 {
499 
500 	req->ir_owner = dev;
501 	req->ir_msgid = msgid;
502 	req->ir_addr = addr;
503 	req->ir_command = command;
504 	if (requestlen) {
505 		req->ir_request = (char *)&req[1];
506 		req->ir_requestlen = requestlen;
507 	}
508 	if (replylen) {
509 		req->ir_reply = (char *)&req[1] + requestlen;
510 		req->ir_replybuflen = replylen;
511 	}
512 }
513 
514 /* Allocate a new request with request and reply buffers. */
515 struct ipmi_request *
516 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
517     uint8_t command, size_t requestlen, size_t replylen)
518 {
519 	struct ipmi_request *req;
520 
521 	req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
522 	    M_IPMI, M_WAITOK | M_ZERO);
523 	ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
524 	return (req);
525 }
526 
527 /* Free a request no longer in use. */
528 void
529 ipmi_free_request(struct ipmi_request *req)
530 {
531 
532 	free(req, M_IPMI);
533 }
534 
535 /* Store a processed request on the appropriate completion queue. */
536 void
537 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
538 {
539 	struct ipmi_device *dev;
540 
541 	IPMI_LOCK_ASSERT(sc);
542 
543 	/*
544 	 * Anonymous requests (from inside the driver) always have a
545 	 * waiter that we awaken.
546 	 */
547 	if (req->ir_owner == NULL)
548 		wakeup(req);
549 	else {
550 		dev = req->ir_owner;
551 		TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
552 		selwakeup(&dev->ipmi_select);
553 		if (dev->ipmi_closing)
554 			wakeup(&dev->ipmi_requests);
555 	}
556 }
557 
558 /* Perform an internal driver request. */
559 int
560 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
561     int timo)
562 {
563 
564 	return (sc->ipmi_driver_request(sc, req, timo));
565 }
566 
567 /*
568  * Helper routine for polled system interfaces that use
569  * ipmi_polled_enqueue_request() to queue requests.  This request
570  * waits until there is a pending request and then returns the first
571  * request.  If the driver is shutting down, it returns NULL.
572  */
573 struct ipmi_request *
574 ipmi_dequeue_request(struct ipmi_softc *sc)
575 {
576 	struct ipmi_request *req;
577 
578 	IPMI_LOCK_ASSERT(sc);
579 
580 	while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
581 		cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
582 	if (sc->ipmi_detaching)
583 		return (NULL);
584 
585 	req = TAILQ_FIRST(&sc->ipmi_pending_requests);
586 	TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
587 	return (req);
588 }
589 
590 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
591 int
592 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
593 {
594 
595 	IPMI_LOCK_ASSERT(sc);
596 
597 	TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
598 	cv_signal(&sc->ipmi_request_added);
599 	return (0);
600 }
601 
602 /*
603  * Watchdog event handler.
604  */
605 
606 static int
607 ipmi_reset_watchdog(struct ipmi_softc *sc)
608 {
609 	struct ipmi_request *req;
610 	int error;
611 
612 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
613 	    IPMI_RESET_WDOG, 0, 0);
614 	error = ipmi_submit_driver_request(sc, req, 0);
615 	if (error)
616 		device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
617 	return (error);
618 }
619 
620 static int
621 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
622 {
623 	struct ipmi_request *req;
624 	int error;
625 
626 	if (sec > 0xffff / 10)
627 		return (EINVAL);
628 
629 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
630 	    IPMI_SET_WDOG, 6, 0);
631 	if (sec) {
632 		req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
633 		    | IPMI_SET_WD_TIMER_SMS_OS;
634 		req->ir_request[1] = IPMI_SET_WD_ACTION_RESET;
635 		req->ir_request[2] = 0;
636 		req->ir_request[3] = 0;	/* Timer use */
637 		req->ir_request[4] = (sec * 10) & 0xff;
638 		req->ir_request[5] = (sec * 10) >> 8;
639 	} else {
640 		req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
641 		req->ir_request[1] = 0;
642 		req->ir_request[2] = 0;
643 		req->ir_request[3] = 0;	/* Timer use */
644 		req->ir_request[4] = 0;
645 		req->ir_request[5] = 0;
646 	}
647 	error = ipmi_submit_driver_request(sc, req, 0);
648 	if (error)
649 		device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
650 	return (error);
651 }
652 
653 static void
654 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
655 {
656 	struct ipmi_softc *sc = arg;
657 	unsigned int timeout;
658 	int e;
659 
660 	if (dumping)
661 		return;
662 
663 	cmd &= WD_INTERVAL;
664 	if (cmd > 0 && cmd <= 63) {
665 		timeout = ((uint64_t)1 << cmd) / 1000000000;
666 		if (timeout == 0)
667 			timeout = 1;
668 		if (timeout != sc->ipmi_watchdog_active) {
669 			e = ipmi_set_watchdog(sc, timeout);
670 			if (e == 0) {
671 				sc->ipmi_watchdog_active = timeout;
672 			} else {
673 				(void)ipmi_set_watchdog(sc, 0);
674 				sc->ipmi_watchdog_active = 0;
675 			}
676 		}
677 		if (sc->ipmi_watchdog_active != 0) {
678 			e = ipmi_reset_watchdog(sc);
679 			if (e == 0) {
680 				*error = 0;
681 			} else {
682 				(void)ipmi_set_watchdog(sc, 0);
683 				sc->ipmi_watchdog_active = 0;
684 			}
685 		}
686 	} else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
687 		e = ipmi_set_watchdog(sc, 0);
688 		if (e != 0 && cmd == 0)
689 			*error = EOPNOTSUPP;
690 	}
691 }
692 
693 static void
694 ipmi_power_cycle(void *arg, int howto)
695 {
696 	struct ipmi_softc *sc = arg;
697 	struct ipmi_request *req;
698 
699 	/*
700 	 * Ignore everything except power cycling requests
701 	 */
702 	if ((howto & RB_POWERCYCLE) == 0)
703 		return;
704 
705 	device_printf(sc->ipmi_dev, "Power cycling using IPMI\n");
706 
707 	/*
708 	 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2
709 	 * as described in IPMI v2.0 spec section 28.3.
710 	 */
711 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0),
712 	    IPMI_CHASSIS_CONTROL, 1, 0);
713 	req->ir_request[0] = IPMI_CC_POWER_CYCLE;
714 
715 	ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
716 
717 	if (req->ir_error != 0 || req->ir_compcode != 0) {
718 		device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
719 		    req->ir_error, req->ir_compcode);
720 		return;
721 	}
722 
723 	/*
724 	 * BMCs are notoriously slow, give it up to 10s to effect the power
725 	 * down leg of the power cycle. If that fails, fallback to the next
726 	 * hanlder in the shutdown_final chain and/or the platform failsafe.
727 	 */
728 	DELAY(10 * 1000 * 1000);
729 	device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n");
730 }
731 
732 static void
733 ipmi_startup(void *arg)
734 {
735 	struct ipmi_softc *sc = arg;
736 	struct ipmi_request *req;
737 	device_t dev;
738 	int error, i;
739 
740 	config_intrhook_disestablish(&sc->ipmi_ich);
741 	dev = sc->ipmi_dev;
742 
743 	/* Initialize interface-independent state. */
744 	mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
745 	mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
746 	cv_init(&sc->ipmi_request_added, "ipmireq");
747 	TAILQ_INIT(&sc->ipmi_pending_requests);
748 
749 	/* Initialize interface-dependent state. */
750 	error = sc->ipmi_startup(sc);
751 	if (error) {
752 		device_printf(dev, "Failed to initialize interface: %d\n",
753 		    error);
754 		return;
755 	}
756 
757 	/* Send a GET_DEVICE_ID request. */
758 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
759 	    IPMI_GET_DEVICE_ID, 0, 15);
760 
761 	error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
762 	if (error == EWOULDBLOCK) {
763 		device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
764 		return;
765 	} else if (error) {
766 		device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
767 		return;
768 	} else if (req->ir_compcode != 0) {
769 		device_printf(dev,
770 		    "Bad completion code for GET_DEVICE_ID: %d\n",
771 		    req->ir_compcode);
772 		return;
773 	} else if (req->ir_replylen < 5) {
774 		device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
775 		    req->ir_replylen);
776 		return;
777 	}
778 
779 	device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
780 	    "version %d.%d, device support mask %#x\n",
781 	    req->ir_reply[1] & 0x0f,
782 	    req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
783 	    req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]);
784 
785 	sc->ipmi_dev_support = req->ir_reply[5];
786 
787 	IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
788 	    IPMI_CLEAR_FLAGS, 1, 0);
789 
790 	ipmi_submit_driver_request(sc, req, 0);
791 
792 	/* XXX: Magic numbers */
793 	if (req->ir_compcode == 0xc0) {
794 		device_printf(dev, "Clear flags is busy\n");
795 	}
796 	if (req->ir_compcode == 0xc1) {
797 		device_printf(dev, "Clear flags illegal\n");
798 	}
799 
800 	for (i = 0; i < 8; i++) {
801 		IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
802 		    IPMI_GET_CHANNEL_INFO, 1, 0);
803 		req->ir_request[0] = i;
804 
805 		ipmi_submit_driver_request(sc, req, 0);
806 
807 		if (req->ir_compcode != 0)
808 			break;
809 	}
810 	device_printf(dev, "Number of channels %d\n", i);
811 
812 	/*
813 	 * Probe for watchdog, but only for backends which support
814 	 * polled driver requests.
815 	 */
816 	if (sc->ipmi_driver_requests_polled) {
817 		IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
818 		    IPMI_GET_WDOG, 0, 0);
819 
820 		ipmi_submit_driver_request(sc, req, 0);
821 
822 		if (req->ir_compcode == 0x00) {
823 			device_printf(dev, "Attached watchdog\n");
824 			/* register the watchdog event handler */
825 			sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
826 			    watchdog_list, ipmi_wd_event, sc, 0);
827 		}
828 	}
829 
830 	sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
831 	    UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
832 	if (sc->ipmi_cdev == NULL) {
833 		device_printf(dev, "Failed to create cdev\n");
834 		return;
835 	}
836 	sc->ipmi_cdev->si_drv1 = sc;
837 
838 	/*
839 	 * Power cycle the system off using IPMI. We use last - 1 since we don't
840 	 * handle all the other kinds of reboots. We'll let others handle them.
841 	 * We only try to do this if the BMC supports the Chassis device.
842 	 */
843 	if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) {
844 		device_printf(dev, "Establishing power cycle handler\n");
845 		sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final,
846 		    ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 1);
847 	}
848 }
849 
850 int
851 ipmi_attach(device_t dev)
852 {
853 	struct ipmi_softc *sc = device_get_softc(dev);
854 	int error;
855 
856 	if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
857 		error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
858 		    NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
859 		if (error) {
860 			device_printf(dev, "can't set up interrupt\n");
861 			return (error);
862 		}
863 	}
864 
865 	bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
866 	sc->ipmi_ich.ich_func = ipmi_startup;
867 	sc->ipmi_ich.ich_arg = sc;
868 	if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
869 		device_printf(dev, "can't establish configuration hook\n");
870 		return (ENOMEM);
871 	}
872 
873 	ipmi_attached = 1;
874 	return (0);
875 }
876 
877 int
878 ipmi_detach(device_t dev)
879 {
880 	struct ipmi_softc *sc;
881 
882 	sc = device_get_softc(dev);
883 
884 	/* Fail if there are any open handles. */
885 	IPMI_LOCK(sc);
886 	if (sc->ipmi_opened) {
887 		IPMI_UNLOCK(sc);
888 		return (EBUSY);
889 	}
890 	IPMI_UNLOCK(sc);
891 	if (sc->ipmi_cdev)
892 		destroy_dev(sc->ipmi_cdev);
893 
894 	/* Detach from watchdog handling and turn off watchdog. */
895 	if (sc->ipmi_watchdog_tag) {
896 		EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
897 		ipmi_set_watchdog(sc, 0);
898 	}
899 
900 	/* Detach from shutdown handling for power cycle reboot */
901 	if (sc->ipmi_power_cycle_tag)
902 		EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag);
903 
904 	/* XXX: should use shutdown callout I think. */
905 	/* If the backend uses a kthread, shut it down. */
906 	IPMI_LOCK(sc);
907 	sc->ipmi_detaching = 1;
908 	if (sc->ipmi_kthread) {
909 		cv_broadcast(&sc->ipmi_request_added);
910 		msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
911 		    "ipmi_wait", 0);
912 	}
913 	IPMI_UNLOCK(sc);
914 	if (sc->ipmi_irq)
915 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
916 
917 	ipmi_release_resources(dev);
918 	mtx_destroy(&sc->ipmi_io_lock);
919 	mtx_destroy(&sc->ipmi_requests_lock);
920 	return (0);
921 }
922 
923 void
924 ipmi_release_resources(device_t dev)
925 {
926 	struct ipmi_softc *sc;
927 	int i;
928 
929 	sc = device_get_softc(dev);
930 	if (sc->ipmi_irq)
931 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
932 	if (sc->ipmi_irq_res)
933 		bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
934 		    sc->ipmi_irq_res);
935 	for (i = 0; i < MAX_RES; i++)
936 		if (sc->ipmi_io_res[i])
937 			bus_release_resource(dev, sc->ipmi_io_type,
938 			    sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
939 }
940 
941 devclass_t ipmi_devclass;
942 
943 /* XXX: Why? */
944 static void
945 ipmi_unload(void *arg)
946 {
947 	device_t *	devs;
948 	int		count;
949 	int		i;
950 
951 	if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
952 		return;
953 	for (i = 0; i < count; i++)
954 		device_delete_child(device_get_parent(devs[i]), devs[i]);
955 	free(devs, M_TEMP);
956 }
957 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
958 
959 #ifdef IMPI_DEBUG
960 static void
961 dump_buf(u_char *data, int len)
962 {
963 	char buf[20];
964 	char line[1024];
965 	char temp[30];
966 	int count = 0;
967 	int i=0;
968 
969 	printf("Address %p len %d\n", data, len);
970 	if (len > 256)
971 		len = 256;
972 	line[0] = '\000';
973 	for (; len > 0; len--, data++) {
974 		sprintf(temp, "%02x ", *data);
975 		strcat(line, temp);
976 		if (*data >= ' ' && *data <= '~')
977 			buf[count] = *data;
978 		else if (*data >= 'A' && *data <= 'Z')
979 			buf[count] = *data;
980 		else
981 			buf[count] = '.';
982 		if (++count == 16) {
983 			buf[count] = '\000';
984 			count = 0;
985 			printf("  %3x  %s %s\n", i, line, buf);
986 			i+=16;
987 			line[0] = '\000';
988 		}
989 	}
990 	buf[count] = '\000';
991 
992 	for (; count != 16; count++) {
993 		strcat(line, "   ");
994 	}
995 	printf("  %3x  %s %s\n", i, line, buf);
996 }
997 #endif
998