xref: /freebsd/sys/dev/ipmi/ipmi.c (revision b43b8f81578cbb7bddbd6f7b8ebe06a219c88140)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/condvar.h>
36 #include <sys/conf.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/poll.h>
44 #include <sys/reboot.h>
45 #include <sys/rman.h>
46 #include <sys/selinfo.h>
47 #include <sys/sysctl.h>
48 #include <sys/watchdog.h>
49 
50 #ifdef LOCAL_MODULE
51 #include <ipmi.h>
52 #include <ipmivars.h>
53 #else
54 #include <sys/ipmi.h>
55 #include <dev/ipmi/ipmivars.h>
56 #endif
57 
58 #ifdef IPMICTL_SEND_COMMAND_32
59 #include <sys/abi_compat.h>
60 #endif
61 
62 /*
63  * Driver request structures are allocated on the stack via alloca() to
64  * avoid calling malloc(), especially for the watchdog handler.
65  * To avoid too much stack growth, a previously allocated structure can
66  * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
67  * that there is adequate reply/request space in the original allocation.
68  */
69 #define	IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)	\
70 	bzero((req), sizeof(struct ipmi_request));			\
71 	ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
72 
73 #define	IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)	\
74 	(req) = __builtin_alloca(sizeof(struct ipmi_request) +		\
75 	    (reqlen) + (replylen));					\
76 	IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen),	\
77 	    (replylen))
78 
79 static d_ioctl_t ipmi_ioctl;
80 static d_poll_t ipmi_poll;
81 static d_open_t ipmi_open;
82 static void ipmi_dtor(void *arg);
83 
84 int ipmi_attached = 0;
85 
86 static int on = 1;
87 static bool wd_in_shutdown = false;
88 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
89 static int wd_shutdown_countdown = 0; /* sec */
90 static int wd_startup_countdown = 0; /* sec */
91 static int wd_pretimeout_countdown = 120; /* sec */
92 static int cycle_wait = 10; /* sec */
93 static int wd_init_enable = 1;
94 
95 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
96     "IPMI driver parameters");
97 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN,
98 	&on, 0, "");
99 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_init_enable, CTLFLAG_RWTUN,
100 	&wd_init_enable, 1, "Enable watchdog initialization");
101 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RWTUN,
102 	&wd_timer_actions, 0,
103 	"IPMI watchdog timer actions (including pre-timeout interrupt)");
104 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RWTUN,
105 	&wd_shutdown_countdown, 0,
106 	"IPMI watchdog countdown for shutdown (seconds)");
107 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN,
108 	&wd_startup_countdown, 0,
109 	"IPMI watchdog countdown initialized during startup (seconds)");
110 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RWTUN,
111 	&wd_pretimeout_countdown, 0,
112 	"IPMI watchdog pre-timeout countdown (seconds)");
113 SYSCTL_INT(_hw_ipmi, OID_AUTO, cycle_wait, CTLFLAG_RWTUN,
114 	&cycle_wait, 0,
115 	"IPMI power cycle on reboot delay time (seconds)");
116 
117 static struct cdevsw ipmi_cdevsw = {
118 	.d_version =    D_VERSION,
119 	.d_open =	ipmi_open,
120 	.d_ioctl =	ipmi_ioctl,
121 	.d_poll =	ipmi_poll,
122 	.d_name =	"ipmi",
123 };
124 
125 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
126 
127 static int
128 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
129 {
130 	struct ipmi_device *dev;
131 	struct ipmi_softc *sc;
132 	int error;
133 
134 	if (!on)
135 		return (ENOENT);
136 
137 	/* Initialize the per file descriptor data. */
138 	dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
139 	error = devfs_set_cdevpriv(dev, ipmi_dtor);
140 	if (error) {
141 		free(dev, M_IPMI);
142 		return (error);
143 	}
144 
145 	sc = cdev->si_drv1;
146 	TAILQ_INIT(&dev->ipmi_completed_requests);
147 	dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
148 	dev->ipmi_lun = IPMI_BMC_SMS_LUN;
149 	dev->ipmi_softc = sc;
150 	IPMI_LOCK(sc);
151 	sc->ipmi_opened++;
152 	IPMI_UNLOCK(sc);
153 
154 	return (0);
155 }
156 
157 static int
158 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
159 {
160 	struct ipmi_device *dev;
161 	struct ipmi_softc *sc;
162 	int revents = 0;
163 
164 	if (devfs_get_cdevpriv((void **)&dev))
165 		return (0);
166 
167 	sc = cdev->si_drv1;
168 	IPMI_LOCK(sc);
169 	if (poll_events & (POLLIN | POLLRDNORM)) {
170 		if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
171 		    revents |= poll_events & (POLLIN | POLLRDNORM);
172 		if (dev->ipmi_requests == 0)
173 		    revents |= POLLERR;
174 	}
175 
176 	if (revents == 0) {
177 		if (poll_events & (POLLIN | POLLRDNORM))
178 			selrecord(td, &dev->ipmi_select);
179 	}
180 	IPMI_UNLOCK(sc);
181 
182 	return (revents);
183 }
184 
185 static void
186 ipmi_purge_completed_requests(struct ipmi_device *dev)
187 {
188 	struct ipmi_request *req;
189 
190 	while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
191 		req = TAILQ_FIRST(&dev->ipmi_completed_requests);
192 		TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
193 		dev->ipmi_requests--;
194 		ipmi_free_request(req);
195 	}
196 }
197 
198 static void
199 ipmi_dtor(void *arg)
200 {
201 	struct ipmi_request *req, *nreq;
202 	struct ipmi_device *dev;
203 	struct ipmi_softc *sc;
204 
205 	dev = arg;
206 	sc = dev->ipmi_softc;
207 
208 	IPMI_LOCK(sc);
209 	if (dev->ipmi_requests) {
210 		/* Throw away any pending requests for this device. */
211 		TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
212 		    nreq) {
213 			if (req->ir_owner == dev) {
214 				TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
215 				    ir_link);
216 				dev->ipmi_requests--;
217 				ipmi_free_request(req);
218 			}
219 		}
220 
221 		/* Throw away any pending completed requests for this device. */
222 		ipmi_purge_completed_requests(dev);
223 
224 		/*
225 		 * If we still have outstanding requests, they must be stuck
226 		 * in an interface driver, so wait for those to drain.
227 		 */
228 		dev->ipmi_closing = 1;
229 		while (dev->ipmi_requests > 0) {
230 			msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
231 			    PWAIT, "ipmidrain", 0);
232 			ipmi_purge_completed_requests(dev);
233 		}
234 	}
235 	sc->ipmi_opened--;
236 	IPMI_UNLOCK(sc);
237 
238 	/* Cleanup. */
239 	free(dev, M_IPMI);
240 }
241 
242 static u_char
243 ipmi_ipmb_checksum(u_char *data, int len)
244 {
245 	u_char sum = 0;
246 
247 	for (; len; len--)
248 		sum += *data++;
249 	return (-sum);
250 }
251 
252 static int
253 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
254     int flags, struct thread *td)
255 {
256 	struct ipmi_softc *sc;
257 	struct ipmi_device *dev;
258 	struct ipmi_request *kreq;
259 	struct ipmi_req *req = (struct ipmi_req *)data;
260 	struct ipmi_recv *recv = (struct ipmi_recv *)data;
261 	struct ipmi_addr addr;
262 #ifdef IPMICTL_SEND_COMMAND_32
263 	struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
264 	struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
265 	union {
266 		struct ipmi_req req;
267 		struct ipmi_recv recv;
268 	} thunk32;
269 #endif
270 	int error, len;
271 
272 	error = devfs_get_cdevpriv((void **)&dev);
273 	if (error)
274 		return (error);
275 
276 	sc = cdev->si_drv1;
277 
278 #ifdef IPMICTL_SEND_COMMAND_32
279 	/* Convert 32-bit structures to native. */
280 	switch (cmd) {
281 	case IPMICTL_SEND_COMMAND_32:
282 		req = &thunk32.req;
283 		req->addr = PTRIN(req32->addr);
284 		req->addr_len = req32->addr_len;
285 		req->msgid = req32->msgid;
286 		req->msg.netfn = req32->msg.netfn;
287 		req->msg.cmd = req32->msg.cmd;
288 		req->msg.data_len = req32->msg.data_len;
289 		req->msg.data = PTRIN(req32->msg.data);
290 		break;
291 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
292 	case IPMICTL_RECEIVE_MSG_32:
293 		recv = &thunk32.recv;
294 		recv->addr = PTRIN(recv32->addr);
295 		recv->addr_len = recv32->addr_len;
296 		recv->msg.data_len = recv32->msg.data_len;
297 		recv->msg.data = PTRIN(recv32->msg.data);
298 		break;
299 	}
300 #endif
301 
302 	switch (cmd) {
303 #ifdef IPMICTL_SEND_COMMAND_32
304 	case IPMICTL_SEND_COMMAND_32:
305 #endif
306 	case IPMICTL_SEND_COMMAND:
307 		error = copyin(req->addr, &addr, sizeof(addr));
308 		if (error)
309 			return (error);
310 
311 		if (addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
312 			struct ipmi_system_interface_addr *saddr =
313 			    (struct ipmi_system_interface_addr *)&addr;
314 
315 			kreq = ipmi_alloc_request(dev, req->msgid,
316 			    IPMI_ADDR(req->msg.netfn, saddr->lun & 0x3),
317 			    req->msg.cmd, req->msg.data_len, IPMI_MAX_RX);
318 			error = copyin(req->msg.data, kreq->ir_request,
319 			    req->msg.data_len);
320 			if (error) {
321 				ipmi_free_request(kreq);
322 				return (error);
323 			}
324 			IPMI_LOCK(sc);
325 			dev->ipmi_requests++;
326 			error = sc->ipmi_enqueue_request(sc, kreq);
327 			IPMI_UNLOCK(sc);
328 			if (error)
329 				return (error);
330 			break;
331 		}
332 
333 		/* Special processing for IPMB commands */
334 		struct ipmi_ipmb_addr *iaddr = (struct ipmi_ipmb_addr *)&addr;
335 
336 		IPMI_ALLOC_DRIVER_REQUEST(kreq, IPMI_ADDR(IPMI_APP_REQUEST, 0),
337 		    IPMI_SEND_MSG, req->msg.data_len + 8, IPMI_MAX_RX);
338 		/* Construct the SEND MSG header */
339 		kreq->ir_request[0] = iaddr->channel;
340 		kreq->ir_request[1] = iaddr->slave_addr;
341 		kreq->ir_request[2] = IPMI_ADDR(req->msg.netfn, iaddr->lun);
342 		kreq->ir_request[3] =
343 		    ipmi_ipmb_checksum(&kreq->ir_request[1], 2);
344 		kreq->ir_request[4] = dev->ipmi_address;
345 		kreq->ir_request[5] = IPMI_ADDR(0, dev->ipmi_lun);
346 		kreq->ir_request[6] = req->msg.cmd;
347 		/* Copy the message data */
348 		if (req->msg.data_len > 0) {
349 			error = copyin(req->msg.data, &kreq->ir_request[7],
350 			    req->msg.data_len);
351 			if (error != 0)
352 				return (error);
353 		}
354 		kreq->ir_request[req->msg.data_len + 7] =
355 		    ipmi_ipmb_checksum(&kreq->ir_request[4],
356 		    req->msg.data_len + 3);
357 		error = ipmi_submit_driver_request(sc, kreq, MAX_TIMEOUT);
358 		if (error != 0)
359 			return (error);
360 
361 		kreq = ipmi_alloc_request(dev, req->msgid,
362 		    IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG,
363 		    0, IPMI_MAX_RX);
364 		kreq->ir_ipmb = true;
365 		kreq->ir_ipmb_addr = IPMI_ADDR(req->msg.netfn, 0);
366 		kreq->ir_ipmb_command = req->msg.cmd;
367 		IPMI_LOCK(sc);
368 		dev->ipmi_requests++;
369 		error = sc->ipmi_enqueue_request(sc, kreq);
370 		IPMI_UNLOCK(sc);
371 		if (error != 0)
372 			return (error);
373 		break;
374 #ifdef IPMICTL_SEND_COMMAND_32
375 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
376 	case IPMICTL_RECEIVE_MSG_32:
377 #endif
378 	case IPMICTL_RECEIVE_MSG_TRUNC:
379 	case IPMICTL_RECEIVE_MSG:
380 		error = copyin(recv->addr, &addr, sizeof(addr));
381 		if (error)
382 			return (error);
383 
384 		IPMI_LOCK(sc);
385 		kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
386 		if (kreq == NULL) {
387 			IPMI_UNLOCK(sc);
388 			return (EAGAIN);
389 		}
390 		if (kreq->ir_error != 0) {
391 			error = kreq->ir_error;
392 			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
393 			    ir_link);
394 			dev->ipmi_requests--;
395 			IPMI_UNLOCK(sc);
396 			ipmi_free_request(kreq);
397 			return (error);
398 		}
399 
400 		recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
401 		recv->msgid = kreq->ir_msgid;
402 		if (kreq->ir_ipmb) {
403 			addr.channel = IPMI_IPMB_CHANNEL;
404 			recv->msg.netfn =
405 			    IPMI_REPLY_ADDR(kreq->ir_ipmb_addr) >> 2;
406 			recv->msg.cmd = kreq->ir_ipmb_command;
407 			/* Get the compcode of response */
408 			kreq->ir_compcode = kreq->ir_reply[6];
409 			/* Move the reply head past response header */
410 			kreq->ir_reply += 7;
411 			len = kreq->ir_replylen - 7;
412 		} else {
413 			addr.channel = IPMI_BMC_CHANNEL;
414 			recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
415 			recv->msg.cmd = kreq->ir_command;
416 			len = kreq->ir_replylen + 1;
417 		}
418 
419 		if (recv->msg.data_len < len &&
420 		    (cmd == IPMICTL_RECEIVE_MSG
421 #ifdef IPMICTL_RECEIVE_MSG_32
422 		    || cmd == IPMICTL_RECEIVE_MSG_32
423 #endif
424 		    )) {
425 			IPMI_UNLOCK(sc);
426 			return (EMSGSIZE);
427 		}
428 		TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
429 		dev->ipmi_requests--;
430 		IPMI_UNLOCK(sc);
431 		len = min(recv->msg.data_len, len);
432 		recv->msg.data_len = len;
433 		error = copyout(&addr, recv->addr,sizeof(addr));
434 		if (error == 0)
435 			error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
436 		if (error == 0)
437 			error = copyout(kreq->ir_reply, recv->msg.data + 1,
438 			    len - 1);
439 		ipmi_free_request(kreq);
440 		if (error)
441 			return (error);
442 		break;
443 	case IPMICTL_SET_MY_ADDRESS_CMD:
444 		IPMI_LOCK(sc);
445 		dev->ipmi_address = *(int*)data;
446 		IPMI_UNLOCK(sc);
447 		break;
448 	case IPMICTL_GET_MY_ADDRESS_CMD:
449 		IPMI_LOCK(sc);
450 		*(int*)data = dev->ipmi_address;
451 		IPMI_UNLOCK(sc);
452 		break;
453 	case IPMICTL_SET_MY_LUN_CMD:
454 		IPMI_LOCK(sc);
455 		dev->ipmi_lun = *(int*)data & 0x3;
456 		IPMI_UNLOCK(sc);
457 		break;
458 	case IPMICTL_GET_MY_LUN_CMD:
459 		IPMI_LOCK(sc);
460 		*(int*)data = dev->ipmi_lun;
461 		IPMI_UNLOCK(sc);
462 		break;
463 	case IPMICTL_SET_GETS_EVENTS_CMD:
464 		/*
465 		device_printf(sc->ipmi_dev,
466 		    "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
467 		*/
468 		break;
469 	case IPMICTL_REGISTER_FOR_CMD:
470 	case IPMICTL_UNREGISTER_FOR_CMD:
471 		return (EOPNOTSUPP);
472 	default:
473 		device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
474 		return (ENOIOCTL);
475 	}
476 
477 #ifdef IPMICTL_SEND_COMMAND_32
478 	/* Update changed fields in 32-bit structures. */
479 	switch (cmd) {
480 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
481 	case IPMICTL_RECEIVE_MSG_32:
482 		recv32->recv_type = recv->recv_type;
483 		recv32->msgid = recv->msgid;
484 		recv32->msg.netfn = recv->msg.netfn;
485 		recv32->msg.cmd = recv->msg.cmd;
486 		recv32->msg.data_len = recv->msg.data_len;
487 		break;
488 	}
489 #endif
490 	return (0);
491 }
492 
493 /*
494  * Request management.
495  */
496 
497 __inline void
498 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
499     uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
500 {
501 
502 	req->ir_owner = dev;
503 	req->ir_msgid = msgid;
504 	req->ir_addr = addr;
505 	req->ir_command = command;
506 	if (requestlen) {
507 		req->ir_request = (char *)&req[1];
508 		req->ir_requestlen = requestlen;
509 	}
510 	if (replylen) {
511 		req->ir_reply = (char *)&req[1] + requestlen;
512 		req->ir_replybuflen = replylen;
513 	}
514 }
515 
516 /* Allocate a new request with request and reply buffers. */
517 struct ipmi_request *
518 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
519     uint8_t command, size_t requestlen, size_t replylen)
520 {
521 	struct ipmi_request *req;
522 
523 	req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
524 	    M_IPMI, M_WAITOK | M_ZERO);
525 	ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
526 	return (req);
527 }
528 
529 /* Free a request no longer in use. */
530 void
531 ipmi_free_request(struct ipmi_request *req)
532 {
533 
534 	free(req, M_IPMI);
535 }
536 
537 /* Store a processed request on the appropriate completion queue. */
538 void
539 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
540 {
541 	struct ipmi_device *dev;
542 
543 	IPMI_LOCK_ASSERT(sc);
544 
545 	/*
546 	 * Anonymous requests (from inside the driver) always have a
547 	 * waiter that we awaken.
548 	 */
549 	if (req->ir_owner == NULL)
550 		wakeup(req);
551 	else {
552 		dev = req->ir_owner;
553 		TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
554 		selwakeup(&dev->ipmi_select);
555 		if (dev->ipmi_closing)
556 			wakeup(&dev->ipmi_requests);
557 	}
558 }
559 
560 /* Perform an internal driver request. */
561 int
562 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
563     int timo)
564 {
565 
566 	return (sc->ipmi_driver_request(sc, req, timo));
567 }
568 
569 /*
570  * Helper routine for polled system interfaces that use
571  * ipmi_polled_enqueue_request() to queue requests.  This request
572  * waits until there is a pending request and then returns the first
573  * request.  If the driver is shutting down, it returns NULL.
574  */
575 struct ipmi_request *
576 ipmi_dequeue_request(struct ipmi_softc *sc)
577 {
578 	struct ipmi_request *req;
579 
580 	IPMI_LOCK_ASSERT(sc);
581 
582 	while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
583 		cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
584 	if (sc->ipmi_detaching)
585 		return (NULL);
586 
587 	req = TAILQ_FIRST(&sc->ipmi_pending_requests);
588 	TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
589 	return (req);
590 }
591 
592 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
593 int
594 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
595 {
596 
597 	IPMI_LOCK_ASSERT(sc);
598 
599 	TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
600 	cv_signal(&sc->ipmi_request_added);
601 	return (0);
602 }
603 
604 /*
605  * Watchdog event handler.
606  */
607 
608 static int
609 ipmi_reset_watchdog(struct ipmi_softc *sc)
610 {
611 	struct ipmi_request *req;
612 	int error;
613 
614 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
615 	    IPMI_RESET_WDOG, 0, 0);
616 	error = ipmi_submit_driver_request(sc, req, 0);
617 	if (error) {
618 		device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
619 	} else if (req->ir_compcode == 0x80) {
620 		error = ENOENT;
621 	} else if (req->ir_compcode != 0) {
622 		device_printf(sc->ipmi_dev, "Watchdog reset returned 0x%x\n",
623 		    req->ir_compcode);
624 		error = EINVAL;
625 	}
626 	return (error);
627 }
628 
629 static int
630 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
631 {
632 	struct ipmi_request *req;
633 	int error;
634 
635 	if (sec > 0xffff / 10)
636 		return (EINVAL);
637 
638 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
639 	    IPMI_SET_WDOG, 6, 0);
640 	if (sec) {
641 		req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
642 		    | IPMI_SET_WD_TIMER_SMS_OS;
643 		req->ir_request[1] = (wd_timer_actions & 0xff);
644 		req->ir_request[2] = min(0xff,
645 		    min(wd_pretimeout_countdown, (sec + 2) / 4));
646 		req->ir_request[3] = 0;	/* Timer use */
647 		req->ir_request[4] = (sec * 10) & 0xff;
648 		req->ir_request[5] = (sec * 10) >> 8;
649 	} else {
650 		req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
651 		req->ir_request[1] = 0;
652 		req->ir_request[2] = 0;
653 		req->ir_request[3] = 0;	/* Timer use */
654 		req->ir_request[4] = 0;
655 		req->ir_request[5] = 0;
656 	}
657 	error = ipmi_submit_driver_request(sc, req, 0);
658 	if (error) {
659 		device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
660 	} else if (req->ir_compcode != 0) {
661 		device_printf(sc->ipmi_dev, "Watchdog set returned 0x%x\n",
662 		    req->ir_compcode);
663 		error = EINVAL;
664 	}
665 	return (error);
666 }
667 
668 static void
669 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
670 {
671 	struct ipmi_softc *sc = arg;
672 	unsigned int timeout;
673 	int e;
674 
675 	/* Ignore requests while disabled. */
676 	if (!on)
677 		return;
678 
679 	/*
680 	 * To prevent infinite hangs, we don't let anyone pat or change
681 	 * the watchdog when we're shutting down. (See ipmi_shutdown_event().)
682 	 * However, we do want to keep patting the watchdog while we are doing
683 	 * a coredump.
684 	 */
685 	if (wd_in_shutdown) {
686 		if (dumping && sc->ipmi_watchdog_active)
687 			ipmi_reset_watchdog(sc);
688 		return;
689 	}
690 
691 	cmd &= WD_INTERVAL;
692 	if (cmd > 0 && cmd <= 63) {
693 		timeout = ((uint64_t)1 << cmd) / 1000000000;
694 		if (timeout == 0)
695 			timeout = 1;
696 		if (timeout != sc->ipmi_watchdog_active ||
697 		    wd_timer_actions != sc->ipmi_watchdog_actions ||
698 		    wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) {
699 			e = ipmi_set_watchdog(sc, timeout);
700 			if (e == 0) {
701 				sc->ipmi_watchdog_active = timeout;
702 				sc->ipmi_watchdog_actions = wd_timer_actions;
703 				sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
704 			} else {
705 				(void)ipmi_set_watchdog(sc, 0);
706 				sc->ipmi_watchdog_active = 0;
707 				sc->ipmi_watchdog_actions = 0;
708 				sc->ipmi_watchdog_pretimeout = 0;
709 			}
710 		}
711 		if (sc->ipmi_watchdog_active != 0) {
712 			e = ipmi_reset_watchdog(sc);
713 			if (e == 0) {
714 				*error = 0;
715 			} else {
716 				(void)ipmi_set_watchdog(sc, 0);
717 				sc->ipmi_watchdog_active = 0;
718 				sc->ipmi_watchdog_actions = 0;
719 				sc->ipmi_watchdog_pretimeout = 0;
720 			}
721 		}
722 	} else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
723 		sc->ipmi_watchdog_actions = 0;
724 		sc->ipmi_watchdog_pretimeout = 0;
725 
726 		e = ipmi_set_watchdog(sc, 0);
727 		if (e != 0 && cmd == 0)
728 			*error = EOPNOTSUPP;
729 	}
730 }
731 
732 static void
733 ipmi_shutdown_event(void *arg, unsigned int cmd, int *error)
734 {
735 	struct ipmi_softc *sc = arg;
736 
737 	/* Ignore event if disabled. */
738 	if (!on)
739 		return;
740 
741 	/*
742 	 * Positive wd_shutdown_countdown value will re-arm watchdog;
743 	 * Zero value in wd_shutdown_countdown will disable watchdog;
744 	 * Negative value in wd_shutdown_countdown will keep existing state;
745 	 *
746 	 * Revert to using a power cycle to ensure that the watchdog will
747 	 * do something useful here.  Having the watchdog send an NMI
748 	 * instead is useless during shutdown, and might be ignored if an
749 	 * NMI already triggered.
750 	 */
751 
752 	wd_in_shutdown = true;
753 	if (wd_shutdown_countdown == 0) {
754 		/* disable watchdog */
755 		ipmi_set_watchdog(sc, 0);
756 		sc->ipmi_watchdog_active = 0;
757 	} else if (wd_shutdown_countdown > 0) {
758 		/* set desired action and time, and, reset watchdog */
759 		wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
760 		ipmi_set_watchdog(sc, wd_shutdown_countdown);
761 		sc->ipmi_watchdog_active = wd_shutdown_countdown;
762 		ipmi_reset_watchdog(sc);
763 	}
764 }
765 
766 static void
767 ipmi_power_cycle(void *arg, int howto)
768 {
769 	struct ipmi_softc *sc = arg;
770 	struct ipmi_request *req;
771 
772 	/*
773 	 * Ignore everything except power cycling requests
774 	 */
775 	if ((howto & RB_POWERCYCLE) == 0)
776 		return;
777 
778 	device_printf(sc->ipmi_dev, "Power cycling using IPMI\n");
779 
780 	/*
781 	 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2
782 	 * as described in IPMI v2.0 spec section 28.3.
783 	 */
784 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0),
785 	    IPMI_CHASSIS_CONTROL, 1, 0);
786 	req->ir_request[0] = IPMI_CC_POWER_CYCLE;
787 
788 	ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
789 
790 	if (req->ir_error != 0 || req->ir_compcode != 0) {
791 		device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
792 		    req->ir_error, req->ir_compcode);
793 		return;
794 	}
795 
796 	/*
797 	 * BMCs are notoriously slow, give it cycle_wait seconds for the power
798 	 * down leg of the power cycle. If that fails, fallback to the next
799 	 * hanlder in the shutdown_final chain and/or the platform failsafe.
800 	 */
801 	DELAY(cycle_wait * 1000 * 1000);
802 	device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n");
803 }
804 
805 static void
806 ipmi_startup(void *arg)
807 {
808 	struct ipmi_softc *sc = arg;
809 	struct ipmi_request *req;
810 	device_t dev;
811 	int error, i;
812 
813 	config_intrhook_disestablish(&sc->ipmi_ich);
814 	dev = sc->ipmi_dev;
815 
816 	/* Initialize interface-independent state. */
817 	mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
818 	mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
819 	cv_init(&sc->ipmi_request_added, "ipmireq");
820 	TAILQ_INIT(&sc->ipmi_pending_requests);
821 
822 	/* Initialize interface-dependent state. */
823 	error = sc->ipmi_startup(sc);
824 	if (error) {
825 		device_printf(dev, "Failed to initialize interface: %d\n",
826 		    error);
827 		return;
828 	}
829 
830 	/* Send a GET_DEVICE_ID request. */
831 	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
832 	    IPMI_GET_DEVICE_ID, 0, 15);
833 
834 	error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
835 	if (error == EWOULDBLOCK) {
836 		device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
837 		return;
838 	} else if (error) {
839 		device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
840 		return;
841 	} else if (req->ir_compcode != 0) {
842 		device_printf(dev,
843 		    "Bad completion code for GET_DEVICE_ID: %d\n",
844 		    req->ir_compcode);
845 		return;
846 	} else if (req->ir_replylen < 5) {
847 		device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
848 		    req->ir_replylen);
849 		return;
850 	}
851 
852 	device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
853 	    "version %d.%d, device support mask %#x\n",
854 	    req->ir_reply[1] & 0x0f,
855 	    req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
856 	    req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]);
857 
858 	sc->ipmi_dev_support = req->ir_reply[5];
859 
860 	IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
861 	    IPMI_CLEAR_FLAGS, 1, 0);
862 
863 	ipmi_submit_driver_request(sc, req, 0);
864 
865 	/* XXX: Magic numbers */
866 	if (req->ir_compcode == 0xc0) {
867 		device_printf(dev, "Clear flags is busy\n");
868 	}
869 	if (req->ir_compcode == 0xc1) {
870 		device_printf(dev, "Clear flags illegal\n");
871 	}
872 
873 	for (i = 0; i < 8; i++) {
874 		IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
875 		    IPMI_GET_CHANNEL_INFO, 1, 0);
876 		req->ir_request[0] = i;
877 
878 		error = ipmi_submit_driver_request(sc, req, 0);
879 
880 		if (error != 0 || req->ir_compcode != 0)
881 			break;
882 	}
883 	device_printf(dev, "Number of channels %d\n", i);
884 
885 	/*
886 	 * Probe for watchdog, but only for backends which support
887 	 * polled driver requests.
888 	 */
889 	if (wd_init_enable && sc->ipmi_driver_requests_polled) {
890 		IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
891 		    IPMI_GET_WDOG, 0, 0);
892 
893 		error = ipmi_submit_driver_request(sc, req, 0);
894 
895 		if (error == 0 && req->ir_compcode == 0x00) {
896 			device_printf(dev, "Attached watchdog\n");
897 			/* register the watchdog event handler */
898 			sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
899 				watchdog_list, ipmi_wd_event, sc, 0);
900 			sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER(
901 				shutdown_pre_sync, ipmi_shutdown_event,
902 				sc, 0);
903 		}
904 	}
905 
906 	sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
907 	    UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
908 	if (sc->ipmi_cdev == NULL) {
909 		device_printf(dev, "Failed to create cdev\n");
910 		return;
911 	}
912 	sc->ipmi_cdev->si_drv1 = sc;
913 
914 	/*
915 	 * Set initial watchdog state. If desired, set an initial
916 	 * watchdog on startup. Or, if the watchdog device is
917 	 * disabled, clear any existing watchdog.
918 	 */
919 	if (on && wd_startup_countdown > 0) {
920 		if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 &&
921 		    ipmi_reset_watchdog(sc) == 0) {
922 			sc->ipmi_watchdog_active = wd_startup_countdown;
923 			sc->ipmi_watchdog_actions = wd_timer_actions;
924 			sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
925 		} else
926 			(void)ipmi_set_watchdog(sc, 0);
927 		ipmi_reset_watchdog(sc);
928 	} else if (!on)
929 		(void)ipmi_set_watchdog(sc, 0);
930 	/*
931 	 * Power cycle the system off using IPMI. We use last - 2 since we don't
932 	 * handle all the other kinds of reboots. We'll let others handle them.
933 	 * We only try to do this if the BMC supports the Chassis device.
934 	 */
935 	if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) {
936 		device_printf(dev, "Establishing power cycle handler\n");
937 		sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final,
938 		    ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2);
939 	}
940 }
941 
942 int
943 ipmi_attach(device_t dev)
944 {
945 	struct ipmi_softc *sc = device_get_softc(dev);
946 	int error;
947 
948 	if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
949 		error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
950 		    NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
951 		if (error) {
952 			device_printf(dev, "can't set up interrupt\n");
953 			return (error);
954 		}
955 	}
956 
957 	bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
958 	sc->ipmi_ich.ich_func = ipmi_startup;
959 	sc->ipmi_ich.ich_arg = sc;
960 	if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
961 		device_printf(dev, "can't establish configuration hook\n");
962 		return (ENOMEM);
963 	}
964 
965 	ipmi_attached = 1;
966 	return (0);
967 }
968 
969 int
970 ipmi_detach(device_t dev)
971 {
972 	struct ipmi_softc *sc;
973 
974 	sc = device_get_softc(dev);
975 
976 	/* Fail if there are any open handles. */
977 	IPMI_LOCK(sc);
978 	if (sc->ipmi_opened) {
979 		IPMI_UNLOCK(sc);
980 		return (EBUSY);
981 	}
982 	IPMI_UNLOCK(sc);
983 	if (sc->ipmi_cdev)
984 		destroy_dev(sc->ipmi_cdev);
985 
986 	/* Detach from watchdog handling and turn off watchdog. */
987 	if (sc->ipmi_shutdown_tag)
988 		EVENTHANDLER_DEREGISTER(shutdown_pre_sync,
989 		sc->ipmi_shutdown_tag);
990 	if (sc->ipmi_watchdog_tag) {
991 		EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
992 		ipmi_set_watchdog(sc, 0);
993 	}
994 
995 	/* Detach from shutdown handling for power cycle reboot */
996 	if (sc->ipmi_power_cycle_tag)
997 		EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag);
998 
999 	/* XXX: should use shutdown callout I think. */
1000 	/* If the backend uses a kthread, shut it down. */
1001 	IPMI_LOCK(sc);
1002 	sc->ipmi_detaching = 1;
1003 	if (sc->ipmi_kthread) {
1004 		cv_broadcast(&sc->ipmi_request_added);
1005 		msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
1006 		    "ipmi_wait", 0);
1007 	}
1008 	IPMI_UNLOCK(sc);
1009 	if (sc->ipmi_irq)
1010 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1011 
1012 	ipmi_release_resources(dev);
1013 	mtx_destroy(&sc->ipmi_io_lock);
1014 	mtx_destroy(&sc->ipmi_requests_lock);
1015 	return (0);
1016 }
1017 
1018 void
1019 ipmi_release_resources(device_t dev)
1020 {
1021 	struct ipmi_softc *sc;
1022 	int i;
1023 
1024 	sc = device_get_softc(dev);
1025 	if (sc->ipmi_irq)
1026 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1027 	if (sc->ipmi_irq_res)
1028 		bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
1029 		    sc->ipmi_irq_res);
1030 	for (i = 0; i < MAX_RES; i++)
1031 		if (sc->ipmi_io_res[i])
1032 			bus_release_resource(dev, sc->ipmi_io_type,
1033 			    sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
1034 }
1035 
1036 /* XXX: Why? */
1037 static void
1038 ipmi_unload(void *arg)
1039 {
1040 	device_t *	devs;
1041 	int		count;
1042 	int		i;
1043 
1044 	if (devclass_get_devices(devclass_find("ipmi"), &devs, &count) != 0)
1045 		return;
1046 	for (i = 0; i < count; i++)
1047 		device_delete_child(device_get_parent(devs[i]), devs[i]);
1048 	free(devs, M_TEMP);
1049 }
1050 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
1051 
1052 #ifdef IMPI_DEBUG
1053 static void
1054 dump_buf(u_char *data, int len)
1055 {
1056 	char buf[20];
1057 	char line[1024];
1058 	char temp[30];
1059 	int count = 0;
1060 	int i=0;
1061 
1062 	printf("Address %p len %d\n", data, len);
1063 	if (len > 256)
1064 		len = 256;
1065 	line[0] = '\000';
1066 	for (; len > 0; len--, data++) {
1067 		sprintf(temp, "%02x ", *data);
1068 		strcat(line, temp);
1069 		if (*data >= ' ' && *data <= '~')
1070 			buf[count] = *data;
1071 		else if (*data >= 'A' && *data <= 'Z')
1072 			buf[count] = *data;
1073 		else
1074 			buf[count] = '.';
1075 		if (++count == 16) {
1076 			buf[count] = '\000';
1077 			count = 0;
1078 			printf("  %3x  %s %s\n", i, line, buf);
1079 			i+=16;
1080 			line[0] = '\000';
1081 		}
1082 	}
1083 	buf[count] = '\000';
1084 
1085 	for (; count != 16; count++) {
1086 		strcat(line, "   ");
1087 	}
1088 	printf("  %3x  %s %s\n", i, line, buf);
1089 }
1090 #endif
1091