1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/condvar.h>
33 #include <sys/conf.h>
34 #include <sys/eventhandler.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/poll.h>
41 #include <sys/reboot.h>
42 #include <sys/rman.h>
43 #include <sys/selinfo.h>
44 #include <sys/sysctl.h>
45 #include <sys/watchdog.h>
46
47 #ifdef LOCAL_MODULE
48 #include <ipmi.h>
49 #include <ipmivars.h>
50 #else
51 #include <sys/ipmi.h>
52 #include <dev/ipmi/ipmivars.h>
53 #endif
54
55 #ifdef IPMICTL_SEND_COMMAND_32
56 #include <sys/abi_compat.h>
57 #endif
58
59 /*
60 * Driver request structures are allocated on the stack via alloca() to
61 * avoid calling malloc(), especially for the watchdog handler.
62 * To avoid too much stack growth, a previously allocated structure can
63 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
64 * that there is adequate reply/request space in the original allocation.
65 */
66 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
67 bzero((req), sizeof(struct ipmi_request)); \
68 ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
69
70 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
71 (req) = __builtin_alloca(sizeof(struct ipmi_request) + \
72 (reqlen) + (replylen)); \
73 IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \
74 (replylen))
75
76 static d_ioctl_t ipmi_ioctl;
77 static d_poll_t ipmi_poll;
78 static d_open_t ipmi_open;
79 static void ipmi_dtor(void *arg);
80
81 int ipmi_attached = 0;
82
83 static int on = 1;
84 static bool wd_in_shutdown = false;
85 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
86 static int wd_shutdown_countdown = 0; /* sec */
87 static int wd_startup_countdown = 0; /* sec */
88 static int wd_pretimeout_countdown = 120; /* sec */
89 static int cycle_wait = 10; /* sec */
90 static int wd_init_enable = 1;
91
92 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
93 "IPMI driver parameters");
94 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN,
95 &on, 0, "");
96 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_init_enable, CTLFLAG_RWTUN,
97 &wd_init_enable, 1, "Enable watchdog initialization");
98 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RWTUN,
99 &wd_timer_actions, 0,
100 "IPMI watchdog timer actions (including pre-timeout interrupt)");
101 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RWTUN,
102 &wd_shutdown_countdown, 0,
103 "IPMI watchdog countdown for shutdown (seconds)");
104 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN,
105 &wd_startup_countdown, 0,
106 "IPMI watchdog countdown initialized during startup (seconds)");
107 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RWTUN,
108 &wd_pretimeout_countdown, 0,
109 "IPMI watchdog pre-timeout countdown (seconds)");
110 SYSCTL_INT(_hw_ipmi, OID_AUTO, cycle_wait, CTLFLAG_RWTUN,
111 &cycle_wait, 0,
112 "IPMI power cycle on reboot delay time (seconds)");
113
114 static struct cdevsw ipmi_cdevsw = {
115 .d_version = D_VERSION,
116 .d_open = ipmi_open,
117 .d_ioctl = ipmi_ioctl,
118 .d_poll = ipmi_poll,
119 .d_name = "ipmi",
120 };
121
122 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
123
124 static int
ipmi_open(struct cdev * cdev,int flags,int fmt,struct thread * td)125 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
126 {
127 struct ipmi_device *dev;
128 struct ipmi_softc *sc;
129 int error;
130
131 if (!on)
132 return (ENOENT);
133
134 /* Initialize the per file descriptor data. */
135 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
136 error = devfs_set_cdevpriv(dev, ipmi_dtor);
137 if (error) {
138 free(dev, M_IPMI);
139 return (error);
140 }
141
142 sc = cdev->si_drv1;
143 TAILQ_INIT(&dev->ipmi_completed_requests);
144 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
145 dev->ipmi_lun = IPMI_BMC_SMS_LUN;
146 dev->ipmi_softc = sc;
147 IPMI_LOCK(sc);
148 sc->ipmi_opened++;
149 IPMI_UNLOCK(sc);
150
151 return (0);
152 }
153
154 static int
ipmi_poll(struct cdev * cdev,int poll_events,struct thread * td)155 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
156 {
157 struct ipmi_device *dev;
158 struct ipmi_softc *sc;
159 int revents = 0;
160
161 if (devfs_get_cdevpriv((void **)&dev))
162 return (0);
163
164 sc = cdev->si_drv1;
165 IPMI_LOCK(sc);
166 if (poll_events & (POLLIN | POLLRDNORM)) {
167 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
168 revents |= poll_events & (POLLIN | POLLRDNORM);
169 if (dev->ipmi_requests == 0)
170 revents |= POLLERR;
171 }
172
173 if (revents == 0) {
174 if (poll_events & (POLLIN | POLLRDNORM))
175 selrecord(td, &dev->ipmi_select);
176 }
177 IPMI_UNLOCK(sc);
178
179 return (revents);
180 }
181
182 static void
ipmi_purge_completed_requests(struct ipmi_device * dev)183 ipmi_purge_completed_requests(struct ipmi_device *dev)
184 {
185 struct ipmi_request *req;
186
187 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
188 req = TAILQ_FIRST(&dev->ipmi_completed_requests);
189 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
190 dev->ipmi_requests--;
191 ipmi_free_request(req);
192 }
193 }
194
195 static void
ipmi_dtor(void * arg)196 ipmi_dtor(void *arg)
197 {
198 struct ipmi_request *req, *nreq;
199 struct ipmi_device *dev;
200 struct ipmi_softc *sc;
201
202 dev = arg;
203 sc = dev->ipmi_softc;
204
205 IPMI_LOCK(sc);
206 if (dev->ipmi_requests) {
207 /* Throw away any pending requests for this device. */
208 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests_highpri, ir_link,
209 nreq) {
210 if (req->ir_owner == dev) {
211 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req,
212 ir_link);
213 dev->ipmi_requests--;
214 ipmi_free_request(req);
215 }
216 }
217 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
218 nreq) {
219 if (req->ir_owner == dev) {
220 TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
221 ir_link);
222 dev->ipmi_requests--;
223 ipmi_free_request(req);
224 }
225 }
226
227 /* Throw away any pending completed requests for this device. */
228 ipmi_purge_completed_requests(dev);
229
230 /*
231 * If we still have outstanding requests, they must be stuck
232 * in an interface driver, so wait for those to drain.
233 */
234 dev->ipmi_closing = 1;
235 while (dev->ipmi_requests > 0) {
236 msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
237 PWAIT, "ipmidrain", 0);
238 ipmi_purge_completed_requests(dev);
239 }
240 }
241 sc->ipmi_opened--;
242 IPMI_UNLOCK(sc);
243
244 /* Cleanup. */
245 free(dev, M_IPMI);
246 }
247
248 static u_char
ipmi_ipmb_checksum(u_char * data,int len)249 ipmi_ipmb_checksum(u_char *data, int len)
250 {
251 u_char sum = 0;
252
253 for (; len; len--)
254 sum += *data++;
255 return (-sum);
256 }
257
258 static int
ipmi_ioctl(struct cdev * cdev,u_long cmd,caddr_t data,int flags,struct thread * td)259 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
260 int flags, struct thread *td)
261 {
262 struct ipmi_softc *sc;
263 struct ipmi_device *dev;
264 struct ipmi_request *kreq;
265 struct ipmi_req *req = (struct ipmi_req *)data;
266 struct ipmi_recv *recv = (struct ipmi_recv *)data;
267 struct ipmi_addr addr;
268 #ifdef IPMICTL_SEND_COMMAND_32
269 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
270 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
271 union {
272 struct ipmi_req req;
273 struct ipmi_recv recv;
274 } thunk32;
275 #endif
276 int error, len;
277
278 error = devfs_get_cdevpriv((void **)&dev);
279 if (error)
280 return (error);
281
282 sc = cdev->si_drv1;
283
284 #ifdef IPMICTL_SEND_COMMAND_32
285 /* Convert 32-bit structures to native. */
286 switch (cmd) {
287 case IPMICTL_SEND_COMMAND_32:
288 req = &thunk32.req;
289 req->addr = PTRIN(req32->addr);
290 req->addr_len = req32->addr_len;
291 req->msgid = req32->msgid;
292 req->msg.netfn = req32->msg.netfn;
293 req->msg.cmd = req32->msg.cmd;
294 req->msg.data_len = req32->msg.data_len;
295 req->msg.data = PTRIN(req32->msg.data);
296 break;
297 case IPMICTL_RECEIVE_MSG_TRUNC_32:
298 case IPMICTL_RECEIVE_MSG_32:
299 recv = &thunk32.recv;
300 recv->addr = PTRIN(recv32->addr);
301 recv->addr_len = recv32->addr_len;
302 recv->msg.data_len = recv32->msg.data_len;
303 recv->msg.data = PTRIN(recv32->msg.data);
304 break;
305 }
306 #endif
307
308 switch (cmd) {
309 #ifdef IPMICTL_SEND_COMMAND_32
310 case IPMICTL_SEND_COMMAND_32:
311 #endif
312 case IPMICTL_SEND_COMMAND:
313 error = copyin(req->addr, &addr, sizeof(addr));
314 if (error)
315 return (error);
316
317 if (addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
318 struct ipmi_system_interface_addr *saddr =
319 (struct ipmi_system_interface_addr *)&addr;
320
321 kreq = ipmi_alloc_request(dev, req->msgid,
322 IPMI_ADDR(req->msg.netfn, saddr->lun & 0x3),
323 req->msg.cmd, req->msg.data_len, IPMI_MAX_RX);
324 error = copyin(req->msg.data, kreq->ir_request,
325 req->msg.data_len);
326 if (error) {
327 ipmi_free_request(kreq);
328 return (error);
329 }
330 IPMI_LOCK(sc);
331 dev->ipmi_requests++;
332 error = sc->ipmi_enqueue_request(sc, kreq);
333 IPMI_UNLOCK(sc);
334 if (error)
335 return (error);
336 break;
337 }
338
339 /* Special processing for IPMB commands */
340 struct ipmi_ipmb_addr *iaddr = (struct ipmi_ipmb_addr *)&addr;
341
342 IPMI_ALLOC_DRIVER_REQUEST(kreq, IPMI_ADDR(IPMI_APP_REQUEST, 0),
343 IPMI_SEND_MSG, req->msg.data_len + 8, IPMI_MAX_RX);
344 /* Construct the SEND MSG header */
345 kreq->ir_request[0] = iaddr->channel;
346 kreq->ir_request[1] = iaddr->slave_addr;
347 kreq->ir_request[2] = IPMI_ADDR(req->msg.netfn, iaddr->lun);
348 kreq->ir_request[3] =
349 ipmi_ipmb_checksum(&kreq->ir_request[1], 2);
350 kreq->ir_request[4] = dev->ipmi_address;
351 kreq->ir_request[5] = IPMI_ADDR(0, dev->ipmi_lun);
352 kreq->ir_request[6] = req->msg.cmd;
353 /* Copy the message data */
354 if (req->msg.data_len > 0) {
355 error = copyin(req->msg.data, &kreq->ir_request[7],
356 req->msg.data_len);
357 if (error != 0)
358 return (error);
359 }
360 kreq->ir_request[req->msg.data_len + 7] =
361 ipmi_ipmb_checksum(&kreq->ir_request[4],
362 req->msg.data_len + 3);
363 error = ipmi_submit_driver_request(sc, kreq);
364 if (error != 0)
365 return (error);
366
367 kreq = ipmi_alloc_request(dev, req->msgid,
368 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG,
369 0, IPMI_MAX_RX);
370 kreq->ir_ipmb = true;
371 kreq->ir_ipmb_addr = IPMI_ADDR(req->msg.netfn, 0);
372 kreq->ir_ipmb_command = req->msg.cmd;
373 IPMI_LOCK(sc);
374 dev->ipmi_requests++;
375 error = sc->ipmi_enqueue_request(sc, kreq);
376 IPMI_UNLOCK(sc);
377 if (error != 0)
378 return (error);
379 break;
380 #ifdef IPMICTL_SEND_COMMAND_32
381 case IPMICTL_RECEIVE_MSG_TRUNC_32:
382 case IPMICTL_RECEIVE_MSG_32:
383 #endif
384 case IPMICTL_RECEIVE_MSG_TRUNC:
385 case IPMICTL_RECEIVE_MSG:
386 error = copyin(recv->addr, &addr, sizeof(addr));
387 if (error)
388 return (error);
389
390 IPMI_LOCK(sc);
391 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
392 if (kreq == NULL) {
393 IPMI_UNLOCK(sc);
394 return (EAGAIN);
395 }
396 if (kreq->ir_error != 0) {
397 error = kreq->ir_error;
398 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
399 ir_link);
400 dev->ipmi_requests--;
401 IPMI_UNLOCK(sc);
402 ipmi_free_request(kreq);
403 return (error);
404 }
405
406 recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
407 recv->msgid = kreq->ir_msgid;
408 if (kreq->ir_ipmb) {
409 addr.channel = IPMI_IPMB_CHANNEL;
410 recv->msg.netfn =
411 IPMI_REPLY_ADDR(kreq->ir_ipmb_addr) >> 2;
412 recv->msg.cmd = kreq->ir_ipmb_command;
413 /* Get the compcode of response */
414 kreq->ir_compcode = kreq->ir_reply[6];
415 /* Move the reply head past response header */
416 kreq->ir_reply += 7;
417 len = kreq->ir_replylen - 7;
418 } else {
419 addr.channel = IPMI_BMC_CHANNEL;
420 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
421 recv->msg.cmd = kreq->ir_command;
422 len = kreq->ir_replylen + 1;
423 }
424
425 if (recv->msg.data_len < len &&
426 (cmd == IPMICTL_RECEIVE_MSG
427 #ifdef IPMICTL_RECEIVE_MSG_32
428 || cmd == IPMICTL_RECEIVE_MSG_32
429 #endif
430 )) {
431 IPMI_UNLOCK(sc);
432 return (EMSGSIZE);
433 }
434 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
435 dev->ipmi_requests--;
436 IPMI_UNLOCK(sc);
437 len = min(recv->msg.data_len, len);
438 recv->msg.data_len = len;
439 error = copyout(&addr, recv->addr,sizeof(addr));
440 if (error == 0)
441 error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
442 if (error == 0)
443 error = copyout(kreq->ir_reply, recv->msg.data + 1,
444 len - 1);
445 ipmi_free_request(kreq);
446 if (error)
447 return (error);
448 break;
449 case IPMICTL_SET_MY_ADDRESS_CMD:
450 IPMI_LOCK(sc);
451 dev->ipmi_address = *(int*)data;
452 IPMI_UNLOCK(sc);
453 break;
454 case IPMICTL_GET_MY_ADDRESS_CMD:
455 IPMI_LOCK(sc);
456 *(int*)data = dev->ipmi_address;
457 IPMI_UNLOCK(sc);
458 break;
459 case IPMICTL_SET_MY_LUN_CMD:
460 IPMI_LOCK(sc);
461 dev->ipmi_lun = *(int*)data & 0x3;
462 IPMI_UNLOCK(sc);
463 break;
464 case IPMICTL_GET_MY_LUN_CMD:
465 IPMI_LOCK(sc);
466 *(int*)data = dev->ipmi_lun;
467 IPMI_UNLOCK(sc);
468 break;
469 case IPMICTL_SET_GETS_EVENTS_CMD:
470 /*
471 device_printf(sc->ipmi_dev,
472 "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
473 */
474 break;
475 case IPMICTL_REGISTER_FOR_CMD:
476 case IPMICTL_UNREGISTER_FOR_CMD:
477 return (EOPNOTSUPP);
478 default:
479 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
480 return (ENOIOCTL);
481 }
482
483 #ifdef IPMICTL_SEND_COMMAND_32
484 /* Update changed fields in 32-bit structures. */
485 switch (cmd) {
486 case IPMICTL_RECEIVE_MSG_TRUNC_32:
487 case IPMICTL_RECEIVE_MSG_32:
488 recv32->recv_type = recv->recv_type;
489 recv32->msgid = recv->msgid;
490 recv32->msg.netfn = recv->msg.netfn;
491 recv32->msg.cmd = recv->msg.cmd;
492 recv32->msg.data_len = recv->msg.data_len;
493 break;
494 }
495 #endif
496 return (0);
497 }
498
499 /*
500 * Request management.
501 */
502
503 __inline void
ipmi_init_request(struct ipmi_request * req,struct ipmi_device * dev,long msgid,uint8_t addr,uint8_t command,size_t requestlen,size_t replylen)504 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
505 uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
506 {
507
508 req->ir_owner = dev;
509 req->ir_msgid = msgid;
510 req->ir_addr = addr;
511 req->ir_command = command;
512 if (requestlen) {
513 req->ir_request = (char *)&req[1];
514 req->ir_requestlen = requestlen;
515 }
516 if (replylen) {
517 req->ir_reply = (char *)&req[1] + requestlen;
518 req->ir_replybuflen = replylen;
519 }
520 }
521
522 /* Allocate a new request with request and reply buffers. */
523 struct ipmi_request *
ipmi_alloc_request(struct ipmi_device * dev,long msgid,uint8_t addr,uint8_t command,size_t requestlen,size_t replylen)524 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
525 uint8_t command, size_t requestlen, size_t replylen)
526 {
527 struct ipmi_request *req;
528
529 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
530 M_IPMI, M_WAITOK | M_ZERO);
531 ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
532 return (req);
533 }
534
535 /* Free a request no longer in use. */
536 void
ipmi_free_request(struct ipmi_request * req)537 ipmi_free_request(struct ipmi_request *req)
538 {
539
540 free(req, M_IPMI);
541 }
542
543 /* Store a processed request on the appropriate completion queue. */
544 void
ipmi_complete_request(struct ipmi_softc * sc,struct ipmi_request * req)545 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
546 {
547 struct ipmi_device *dev;
548
549 IPMI_LOCK_ASSERT(sc);
550
551 /*
552 * Anonymous requests (from inside the driver) always have a
553 * waiter that we awaken.
554 */
555 if (req->ir_owner == NULL)
556 wakeup(req);
557 else {
558 dev = req->ir_owner;
559 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
560 selwakeup(&dev->ipmi_select);
561 if (dev->ipmi_closing)
562 wakeup(&dev->ipmi_requests);
563 }
564 }
565
566 /* Perform an internal driver request. */
567 int
ipmi_submit_driver_request(struct ipmi_softc * sc,struct ipmi_request * req)568 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req)
569 {
570
571 return (sc->ipmi_driver_request(sc, req));
572 }
573
574 /*
575 * Helper routine for polled system interfaces that use
576 * ipmi_polled_enqueue_request() to queue requests. This request
577 * waits until there is a pending request and then returns the first
578 * request. If the driver is shutting down, it returns NULL.
579 */
580 struct ipmi_request *
ipmi_dequeue_request(struct ipmi_softc * sc)581 ipmi_dequeue_request(struct ipmi_softc *sc)
582 {
583 struct ipmi_request *req;
584
585 IPMI_LOCK_ASSERT(sc);
586
587 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests) &&
588 TAILQ_EMPTY(&sc->ipmi_pending_requests_highpri))
589 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
590 if (sc->ipmi_detaching)
591 return (NULL);
592
593 req = TAILQ_FIRST(&sc->ipmi_pending_requests_highpri);
594 if (req != NULL)
595 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req, ir_link);
596 else {
597 req = TAILQ_FIRST(&sc->ipmi_pending_requests);
598 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
599 }
600 return (req);
601 }
602
603 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
604 int
ipmi_polled_enqueue_request(struct ipmi_softc * sc,struct ipmi_request * req)605 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
606 {
607
608 IPMI_LOCK_ASSERT(sc);
609
610 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
611 cv_signal(&sc->ipmi_request_added);
612 return (0);
613 }
614
615 int
ipmi_polled_enqueue_request_highpri(struct ipmi_softc * sc,struct ipmi_request * req)616 ipmi_polled_enqueue_request_highpri(struct ipmi_softc *sc, struct ipmi_request *req)
617 {
618
619 IPMI_LOCK_ASSERT(sc);
620
621 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests_highpri, req, ir_link);
622 cv_signal(&sc->ipmi_request_added);
623 return (0);
624 }
625
626 /*
627 * Watchdog event handler.
628 */
629
630 static int
ipmi_reset_watchdog(struct ipmi_softc * sc)631 ipmi_reset_watchdog(struct ipmi_softc *sc)
632 {
633 struct ipmi_request *req;
634 int error;
635
636 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
637 IPMI_RESET_WDOG, 0, 0);
638 error = ipmi_submit_driver_request(sc, req);
639 if (error) {
640 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
641 } else if (req->ir_compcode == 0x80) {
642 error = ENOENT;
643 } else if (req->ir_compcode != 0) {
644 device_printf(sc->ipmi_dev, "Watchdog reset returned 0x%x\n",
645 req->ir_compcode);
646 error = EINVAL;
647 }
648 return (error);
649 }
650
651 static int
ipmi_set_watchdog(struct ipmi_softc * sc,unsigned int sec)652 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
653 {
654 struct ipmi_request *req;
655 int error;
656
657 if (sec > 0xffff / 10)
658 return (EINVAL);
659
660 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
661 IPMI_SET_WDOG, 6, 0);
662 if (sec) {
663 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
664 | IPMI_SET_WD_TIMER_SMS_OS;
665 req->ir_request[1] = (wd_timer_actions & 0xff);
666 req->ir_request[2] = min(0xff,
667 min(wd_pretimeout_countdown, (sec + 2) / 4));
668 req->ir_request[3] = 0; /* Timer use */
669 req->ir_request[4] = (sec * 10) & 0xff;
670 req->ir_request[5] = (sec * 10) >> 8;
671 } else {
672 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
673 req->ir_request[1] = 0;
674 req->ir_request[2] = 0;
675 req->ir_request[3] = 0; /* Timer use */
676 req->ir_request[4] = 0;
677 req->ir_request[5] = 0;
678 }
679 error = ipmi_submit_driver_request(sc, req);
680 if (error) {
681 device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
682 } else if (req->ir_compcode != 0) {
683 device_printf(sc->ipmi_dev, "Watchdog set returned 0x%x\n",
684 req->ir_compcode);
685 error = EINVAL;
686 }
687 return (error);
688 }
689
690 static void
ipmi_wd_event(void * arg,unsigned int cmd,int * error)691 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
692 {
693 struct ipmi_softc *sc = arg;
694 unsigned int timeout;
695 int e;
696
697 /* Ignore requests while disabled. */
698 if (!on)
699 return;
700
701 /*
702 * To prevent infinite hangs, we don't let anyone pat or change
703 * the watchdog when we're shutting down. (See ipmi_shutdown_event().)
704 * However, we do want to keep patting the watchdog while we are doing
705 * a coredump.
706 */
707 if (wd_in_shutdown) {
708 if (dumping && sc->ipmi_watchdog_active)
709 ipmi_reset_watchdog(sc);
710 return;
711 }
712
713 cmd &= WD_INTERVAL;
714 if (cmd > 0 && cmd <= 63) {
715 timeout = ((uint64_t)1 << cmd) / 1000000000;
716 if (timeout == 0)
717 timeout = 1;
718 if (timeout != sc->ipmi_watchdog_active ||
719 wd_timer_actions != sc->ipmi_watchdog_actions ||
720 wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) {
721 e = ipmi_set_watchdog(sc, timeout);
722 if (e == 0) {
723 sc->ipmi_watchdog_active = timeout;
724 sc->ipmi_watchdog_actions = wd_timer_actions;
725 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
726 } else {
727 (void)ipmi_set_watchdog(sc, 0);
728 sc->ipmi_watchdog_active = 0;
729 sc->ipmi_watchdog_actions = 0;
730 sc->ipmi_watchdog_pretimeout = 0;
731 }
732 }
733 if (sc->ipmi_watchdog_active != 0) {
734 e = ipmi_reset_watchdog(sc);
735 if (e == 0) {
736 *error = 0;
737 } else {
738 (void)ipmi_set_watchdog(sc, 0);
739 sc->ipmi_watchdog_active = 0;
740 sc->ipmi_watchdog_actions = 0;
741 sc->ipmi_watchdog_pretimeout = 0;
742 }
743 }
744 } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
745 sc->ipmi_watchdog_actions = 0;
746 sc->ipmi_watchdog_pretimeout = 0;
747
748 e = ipmi_set_watchdog(sc, 0);
749 if (e != 0 && cmd == 0)
750 *error = EOPNOTSUPP;
751 }
752 }
753
754 static void
ipmi_shutdown_event(void * arg,int howto)755 ipmi_shutdown_event(void *arg, int howto)
756 {
757 struct ipmi_softc *sc = arg;
758
759 /* Ignore event if disabled. */
760 if (!on)
761 return;
762
763 /*
764 * Positive wd_shutdown_countdown value will re-arm watchdog;
765 * Zero value in wd_shutdown_countdown will disable watchdog;
766 * Negative value in wd_shutdown_countdown will keep existing state;
767 *
768 * System halt is a special case of shutdown where wd_shutdown_countdown
769 * is ignored and watchdog is disabled to ensure that the system remains
770 * halted as requested.
771 *
772 * Revert to using a power cycle to ensure that the watchdog will
773 * do something useful here. Having the watchdog send an NMI
774 * instead is useless during shutdown, and might be ignored if an
775 * NMI already triggered.
776 */
777
778 wd_in_shutdown = true;
779 if (wd_shutdown_countdown == 0 || (howto & RB_HALT) != 0) {
780 /* disable watchdog */
781 ipmi_set_watchdog(sc, 0);
782 sc->ipmi_watchdog_active = 0;
783 } else if (wd_shutdown_countdown > 0) {
784 /* set desired action and time, and, reset watchdog */
785 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
786 ipmi_set_watchdog(sc, wd_shutdown_countdown);
787 sc->ipmi_watchdog_active = wd_shutdown_countdown;
788 ipmi_reset_watchdog(sc);
789 }
790 }
791
792 static void
ipmi_power_cycle(void * arg,int howto)793 ipmi_power_cycle(void *arg, int howto)
794 {
795 struct ipmi_softc *sc = arg;
796 struct ipmi_request *req;
797
798 /*
799 * Ignore everything except power cycling requests
800 */
801 if ((howto & RB_POWERCYCLE) == 0)
802 return;
803
804 device_printf(sc->ipmi_dev, "Power cycling using IPMI\n");
805
806 /*
807 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2
808 * as described in IPMI v2.0 spec section 28.3.
809 */
810 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0),
811 IPMI_CHASSIS_CONTROL, 1, 0);
812 req->ir_request[0] = IPMI_CC_POWER_CYCLE;
813
814 ipmi_submit_driver_request(sc, req);
815
816 if (req->ir_error != 0 || req->ir_compcode != 0) {
817 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
818 req->ir_error, req->ir_compcode);
819 return;
820 }
821
822 /*
823 * BMCs are notoriously slow, give it cycle_wait seconds for the power
824 * down leg of the power cycle. If that fails, fallback to the next
825 * hanlder in the shutdown_final chain and/or the platform failsafe.
826 */
827 DELAY(cycle_wait * 1000 * 1000);
828 device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n");
829 }
830
831 static void
ipmi_startup(void * arg)832 ipmi_startup(void *arg)
833 {
834 struct ipmi_softc *sc = arg;
835 struct ipmi_request *req;
836 device_t dev;
837 int error, i;
838
839 config_intrhook_disestablish(&sc->ipmi_ich);
840 dev = sc->ipmi_dev;
841
842 /* Initialize interface-independent state. */
843 mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
844 mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
845 cv_init(&sc->ipmi_request_added, "ipmireq");
846 TAILQ_INIT(&sc->ipmi_pending_requests_highpri);
847 TAILQ_INIT(&sc->ipmi_pending_requests);
848
849 /* Initialize interface-dependent state. */
850 error = sc->ipmi_startup(sc);
851 if (error) {
852 device_printf(dev, "Failed to initialize interface: %d\n",
853 error);
854 return;
855 }
856
857 /* Send a GET_DEVICE_ID request. */
858 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
859 IPMI_GET_DEVICE_ID, 0, 15);
860
861 error = ipmi_submit_driver_request(sc, req);
862 if (error == EWOULDBLOCK) {
863 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
864 return;
865 } else if (error) {
866 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
867 return;
868 } else if (req->ir_compcode != 0) {
869 device_printf(dev,
870 "Bad completion code for GET_DEVICE_ID: %d\n",
871 req->ir_compcode);
872 return;
873 } else if (req->ir_replylen < 5) {
874 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
875 req->ir_replylen);
876 return;
877 }
878
879 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
880 "version %d.%d, device support mask %#x\n",
881 req->ir_reply[1] & 0x0f,
882 req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
883 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]);
884
885 sc->ipmi_dev_support = req->ir_reply[5];
886
887 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
888 IPMI_CLEAR_FLAGS, 1, 0);
889
890 ipmi_submit_driver_request(sc, req);
891
892 /* XXX: Magic numbers */
893 if (req->ir_compcode == 0xc0) {
894 device_printf(dev, "Clear flags is busy\n");
895 }
896 if (req->ir_compcode == 0xc1) {
897 device_printf(dev, "Clear flags illegal\n");
898 }
899
900 for (i = 0; i < 8; i++) {
901 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
902 IPMI_GET_CHANNEL_INFO, 1, 0);
903 req->ir_request[0] = i;
904
905 error = ipmi_submit_driver_request(sc, req);
906
907 if (error != 0 || req->ir_compcode != 0)
908 break;
909 }
910 device_printf(dev, "Number of channels %d\n", i);
911
912 /*
913 * Probe for watchdog, but only for backends which support
914 * polled driver requests.
915 */
916 if (wd_init_enable && sc->ipmi_driver_requests_polled) {
917 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
918 IPMI_GET_WDOG, 0, 0);
919
920 error = ipmi_submit_driver_request(sc, req);
921
922 if (error == 0 && req->ir_compcode == 0x00) {
923 device_printf(dev, "Attached watchdog\n");
924 /* register the watchdog event handler */
925 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
926 watchdog_list, ipmi_wd_event, sc, 0);
927 sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER(
928 shutdown_pre_sync, ipmi_shutdown_event,
929 sc, 0);
930 }
931 }
932
933 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
934 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
935 if (sc->ipmi_cdev == NULL) {
936 device_printf(dev, "Failed to create cdev\n");
937 return;
938 }
939 sc->ipmi_cdev->si_drv1 = sc;
940
941 /*
942 * Set initial watchdog state. If desired, set an initial
943 * watchdog on startup. Or, if the watchdog device is
944 * disabled, clear any existing watchdog.
945 */
946 if (on && wd_startup_countdown > 0) {
947 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 &&
948 ipmi_reset_watchdog(sc) == 0) {
949 sc->ipmi_watchdog_active = wd_startup_countdown;
950 sc->ipmi_watchdog_actions = wd_timer_actions;
951 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
952 } else
953 (void)ipmi_set_watchdog(sc, 0);
954 ipmi_reset_watchdog(sc);
955 } else if (!on)
956 (void)ipmi_set_watchdog(sc, 0);
957 /*
958 * Power cycle the system off using IPMI. We use last - 2 since we don't
959 * handle all the other kinds of reboots. We'll let others handle them.
960 * We only try to do this if the BMC supports the Chassis device.
961 */
962 if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) {
963 device_printf(dev, "Establishing power cycle handler\n");
964 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final,
965 ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2);
966 }
967 }
968
969 int
ipmi_attach(device_t dev)970 ipmi_attach(device_t dev)
971 {
972 struct ipmi_softc *sc = device_get_softc(dev);
973 int error;
974
975 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
976 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
977 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
978 if (error) {
979 device_printf(dev, "can't set up interrupt\n");
980 return (error);
981 }
982 }
983
984 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
985 sc->ipmi_ich.ich_func = ipmi_startup;
986 sc->ipmi_ich.ich_arg = sc;
987 if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
988 device_printf(dev, "can't establish configuration hook\n");
989 return (ENOMEM);
990 }
991
992 ipmi_attached = 1;
993 return (0);
994 }
995
996 int
ipmi_detach(device_t dev)997 ipmi_detach(device_t dev)
998 {
999 struct ipmi_softc *sc;
1000
1001 sc = device_get_softc(dev);
1002
1003 /* Fail if there are any open handles. */
1004 IPMI_LOCK(sc);
1005 if (sc->ipmi_opened) {
1006 IPMI_UNLOCK(sc);
1007 return (EBUSY);
1008 }
1009 IPMI_UNLOCK(sc);
1010 if (sc->ipmi_cdev)
1011 destroy_dev(sc->ipmi_cdev);
1012
1013 /* Detach from watchdog handling and turn off watchdog. */
1014 if (sc->ipmi_shutdown_tag)
1015 EVENTHANDLER_DEREGISTER(shutdown_pre_sync,
1016 sc->ipmi_shutdown_tag);
1017 if (sc->ipmi_watchdog_tag) {
1018 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
1019 ipmi_set_watchdog(sc, 0);
1020 }
1021
1022 /* Detach from shutdown handling for power cycle reboot */
1023 if (sc->ipmi_power_cycle_tag)
1024 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag);
1025
1026 /* XXX: should use shutdown callout I think. */
1027 /* If the backend uses a kthread, shut it down. */
1028 IPMI_LOCK(sc);
1029 sc->ipmi_detaching = 1;
1030 if (sc->ipmi_kthread) {
1031 cv_broadcast(&sc->ipmi_request_added);
1032 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
1033 "ipmi_wait", 0);
1034 }
1035 IPMI_UNLOCK(sc);
1036 if (sc->ipmi_irq)
1037 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1038
1039 ipmi_release_resources(dev);
1040 mtx_destroy(&sc->ipmi_io_lock);
1041 mtx_destroy(&sc->ipmi_requests_lock);
1042 return (0);
1043 }
1044
1045 void
ipmi_release_resources(device_t dev)1046 ipmi_release_resources(device_t dev)
1047 {
1048 struct ipmi_softc *sc;
1049 int i;
1050
1051 sc = device_get_softc(dev);
1052 if (sc->ipmi_irq)
1053 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
1054 if (sc->ipmi_irq_res)
1055 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
1056 sc->ipmi_irq_res);
1057 for (i = 0; i < MAX_RES; i++)
1058 if (sc->ipmi_io_res[i])
1059 bus_release_resource(dev, sc->ipmi_io_type,
1060 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
1061 }
1062
1063 /* XXX: Why? */
1064 static void
ipmi_unload(void * arg)1065 ipmi_unload(void *arg)
1066 {
1067 device_t * devs;
1068 int count;
1069 int i;
1070
1071 if (devclass_get_devices(devclass_find("ipmi"), &devs, &count) != 0)
1072 return;
1073 for (i = 0; i < count; i++)
1074 device_delete_child(device_get_parent(devs[i]), devs[i]);
1075 free(devs, M_TEMP);
1076 }
1077 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
1078
1079 #ifdef IMPI_DEBUG
1080 static void
dump_buf(u_char * data,int len)1081 dump_buf(u_char *data, int len)
1082 {
1083 char buf[20];
1084 char line[1024];
1085 char temp[30];
1086 int count = 0;
1087 int i=0;
1088
1089 printf("Address %p len %d\n", data, len);
1090 if (len > 256)
1091 len = 256;
1092 line[0] = '\000';
1093 for (; len > 0; len--, data++) {
1094 sprintf(temp, "%02x ", *data);
1095 strcat(line, temp);
1096 if (*data >= ' ' && *data <= '~')
1097 buf[count] = *data;
1098 else if (*data >= 'A' && *data <= 'Z')
1099 buf[count] = *data;
1100 else
1101 buf[count] = '.';
1102 if (++count == 16) {
1103 buf[count] = '\000';
1104 count = 0;
1105 printf(" %3x %s %s\n", i, line, buf);
1106 i+=16;
1107 line[0] = '\000';
1108 }
1109 }
1110 buf[count] = '\000';
1111
1112 for (; count != 16; count++) {
1113 strcat(line, " ");
1114 }
1115 printf(" %3x %s %s\n", i, line, buf);
1116 }
1117 #endif
1118