1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/condvar.h> 36 #include <sys/conf.h> 37 #include <sys/eventhandler.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/poll.h> 44 #include <sys/reboot.h> 45 #include <sys/rman.h> 46 #include <sys/selinfo.h> 47 #include <sys/sysctl.h> 48 #include <sys/watchdog.h> 49 50 #ifdef LOCAL_MODULE 51 #include <ipmi.h> 52 #include <ipmivars.h> 53 #else 54 #include <sys/ipmi.h> 55 #include <dev/ipmi/ipmivars.h> 56 #endif 57 58 #ifdef IPMICTL_SEND_COMMAND_32 59 #include <sys/abi_compat.h> 60 #endif 61 62 /* 63 * Driver request structures are allocated on the stack via alloca() to 64 * avoid calling malloc(), especially for the watchdog handler. 65 * To avoid too much stack growth, a previously allocated structure can 66 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure 67 * that there is adequate reply/request space in the original allocation. 68 */ 69 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \ 70 bzero((req), sizeof(struct ipmi_request)); \ 71 ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen)) 72 73 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \ 74 (req) = __builtin_alloca(sizeof(struct ipmi_request) + \ 75 (reqlen) + (replylen)); \ 76 IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \ 77 (replylen)) 78 79 #ifdef IPMB 80 static int ipmi_ipmb_checksum(u_char, int); 81 static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char, 82 u_char, u_char, int) 83 #endif 84 85 static d_ioctl_t ipmi_ioctl; 86 static d_poll_t ipmi_poll; 87 static d_open_t ipmi_open; 88 static void ipmi_dtor(void *arg); 89 90 int ipmi_attached = 0; 91 92 static int on = 1; 93 static bool wd_in_shutdown = false; 94 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 95 static int wd_shutdown_countdown = 0; /* sec */ 96 static int wd_startup_countdown = 0; /* sec */ 97 static int wd_pretimeout_countdown = 120; /* sec */ 98 static int cycle_wait = 10; /* sec */ 99 100 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 101 "IPMI driver parameters"); 102 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN, 103 &on, 0, ""); 104 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RW, 105 &wd_timer_actions, 0, 106 "IPMI watchdog timer actions (including pre-timeout interrupt)"); 107 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RW, 108 &wd_shutdown_countdown, 0, 109 "IPMI watchdog countdown for shutdown (seconds)"); 110 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN, 111 &wd_startup_countdown, 0, 112 "IPMI watchdog countdown initialized during startup (seconds)"); 113 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RW, 114 &wd_pretimeout_countdown, 0, 115 "IPMI watchdog pre-timeout countdown (seconds)"); 116 SYSCTL_INT(_hw_ipmi, OID_AUTO, cyle_wait, CTLFLAG_RWTUN, 117 &cycle_wait, 0, 118 "IPMI power cycle on reboot delay time (seconds)"); 119 120 static struct cdevsw ipmi_cdevsw = { 121 .d_version = D_VERSION, 122 .d_open = ipmi_open, 123 .d_ioctl = ipmi_ioctl, 124 .d_poll = ipmi_poll, 125 .d_name = "ipmi", 126 }; 127 128 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi"); 129 130 static int 131 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td) 132 { 133 struct ipmi_device *dev; 134 struct ipmi_softc *sc; 135 int error; 136 137 if (!on) 138 return (ENOENT); 139 140 /* Initialize the per file descriptor data. */ 141 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO); 142 error = devfs_set_cdevpriv(dev, ipmi_dtor); 143 if (error) { 144 free(dev, M_IPMI); 145 return (error); 146 } 147 148 sc = cdev->si_drv1; 149 TAILQ_INIT(&dev->ipmi_completed_requests); 150 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR; 151 dev->ipmi_lun = IPMI_BMC_SMS_LUN; 152 dev->ipmi_softc = sc; 153 IPMI_LOCK(sc); 154 sc->ipmi_opened++; 155 IPMI_UNLOCK(sc); 156 157 return (0); 158 } 159 160 static int 161 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td) 162 { 163 struct ipmi_device *dev; 164 struct ipmi_softc *sc; 165 int revents = 0; 166 167 if (devfs_get_cdevpriv((void **)&dev)) 168 return (0); 169 170 sc = cdev->si_drv1; 171 IPMI_LOCK(sc); 172 if (poll_events & (POLLIN | POLLRDNORM)) { 173 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) 174 revents |= poll_events & (POLLIN | POLLRDNORM); 175 if (dev->ipmi_requests == 0) 176 revents |= POLLERR; 177 } 178 179 if (revents == 0) { 180 if (poll_events & (POLLIN | POLLRDNORM)) 181 selrecord(td, &dev->ipmi_select); 182 } 183 IPMI_UNLOCK(sc); 184 185 return (revents); 186 } 187 188 static void 189 ipmi_purge_completed_requests(struct ipmi_device *dev) 190 { 191 struct ipmi_request *req; 192 193 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) { 194 req = TAILQ_FIRST(&dev->ipmi_completed_requests); 195 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link); 196 dev->ipmi_requests--; 197 ipmi_free_request(req); 198 } 199 } 200 201 static void 202 ipmi_dtor(void *arg) 203 { 204 struct ipmi_request *req, *nreq; 205 struct ipmi_device *dev; 206 struct ipmi_softc *sc; 207 208 dev = arg; 209 sc = dev->ipmi_softc; 210 211 IPMI_LOCK(sc); 212 if (dev->ipmi_requests) { 213 /* Throw away any pending requests for this device. */ 214 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link, 215 nreq) { 216 if (req->ir_owner == dev) { 217 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, 218 ir_link); 219 dev->ipmi_requests--; 220 ipmi_free_request(req); 221 } 222 } 223 224 /* Throw away any pending completed requests for this device. */ 225 ipmi_purge_completed_requests(dev); 226 227 /* 228 * If we still have outstanding requests, they must be stuck 229 * in an interface driver, so wait for those to drain. 230 */ 231 dev->ipmi_closing = 1; 232 while (dev->ipmi_requests > 0) { 233 msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock, 234 PWAIT, "ipmidrain", 0); 235 ipmi_purge_completed_requests(dev); 236 } 237 } 238 sc->ipmi_opened--; 239 IPMI_UNLOCK(sc); 240 241 /* Cleanup. */ 242 free(dev, M_IPMI); 243 } 244 245 #ifdef IPMB 246 static int 247 ipmi_ipmb_checksum(u_char *data, int len) 248 { 249 u_char sum = 0; 250 251 for (; len; len--) { 252 sum += *data++; 253 } 254 return (-sum); 255 } 256 257 /* XXX: Needs work */ 258 static int 259 ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn, 260 u_char command, u_char seq, u_char *data, int data_len) 261 { 262 struct ipmi_softc *sc = device_get_softc(dev); 263 struct ipmi_request *req; 264 u_char slave_addr = 0x52; 265 int error; 266 267 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 268 IPMI_SEND_MSG, data_len + 8, 0); 269 req->ir_request[0] = channel; 270 req->ir_request[1] = slave_addr; 271 req->ir_request[2] = IPMI_ADDR(netfn, 0); 272 req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2); 273 req->ir_request[4] = sc->ipmi_address; 274 req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun); 275 req->ir_request[6] = command; 276 277 bcopy(data, &req->ir_request[7], data_len); 278 temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4], 279 data_len + 3); 280 281 ipmi_submit_driver_request(sc, req); 282 error = req->ir_error; 283 284 return (error); 285 } 286 287 static int 288 ipmi_handle_attn(struct ipmi_softc *sc) 289 { 290 struct ipmi_request *req; 291 int error; 292 293 device_printf(sc->ipmi_dev, "BMC has a message\n"); 294 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 295 IPMI_GET_MSG_FLAGS, 0, 1); 296 297 ipmi_submit_driver_request(sc, req); 298 299 if (req->ir_error == 0 && req->ir_compcode == 0) { 300 if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) { 301 device_printf(sc->ipmi_dev, "message buffer full"); 302 } 303 if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) { 304 device_printf(sc->ipmi_dev, 305 "watchdog about to go off"); 306 } 307 if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) { 308 IPMI_ALLOC_DRIVER_REQUEST(req, 309 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0, 310 16); 311 312 device_printf(sc->ipmi_dev, "throw out message "); 313 dump_buf(temp, 16); 314 } 315 } 316 error = req->ir_error; 317 318 return (error); 319 } 320 #endif 321 322 static int 323 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, 324 int flags, struct thread *td) 325 { 326 struct ipmi_softc *sc; 327 struct ipmi_device *dev; 328 struct ipmi_request *kreq; 329 struct ipmi_req *req = (struct ipmi_req *)data; 330 struct ipmi_recv *recv = (struct ipmi_recv *)data; 331 struct ipmi_addr addr; 332 #ifdef IPMICTL_SEND_COMMAND_32 333 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data; 334 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data; 335 union { 336 struct ipmi_req req; 337 struct ipmi_recv recv; 338 } thunk32; 339 #endif 340 int error, len; 341 342 error = devfs_get_cdevpriv((void **)&dev); 343 if (error) 344 return (error); 345 346 sc = cdev->si_drv1; 347 348 #ifdef IPMICTL_SEND_COMMAND_32 349 /* Convert 32-bit structures to native. */ 350 switch (cmd) { 351 case IPMICTL_SEND_COMMAND_32: 352 req = &thunk32.req; 353 req->addr = PTRIN(req32->addr); 354 req->addr_len = req32->addr_len; 355 req->msgid = req32->msgid; 356 req->msg.netfn = req32->msg.netfn; 357 req->msg.cmd = req32->msg.cmd; 358 req->msg.data_len = req32->msg.data_len; 359 req->msg.data = PTRIN(req32->msg.data); 360 break; 361 case IPMICTL_RECEIVE_MSG_TRUNC_32: 362 case IPMICTL_RECEIVE_MSG_32: 363 recv = &thunk32.recv; 364 recv->addr = PTRIN(recv32->addr); 365 recv->addr_len = recv32->addr_len; 366 recv->msg.data_len = recv32->msg.data_len; 367 recv->msg.data = PTRIN(recv32->msg.data); 368 break; 369 } 370 #endif 371 372 switch (cmd) { 373 #ifdef IPMICTL_SEND_COMMAND_32 374 case IPMICTL_SEND_COMMAND_32: 375 #endif 376 case IPMICTL_SEND_COMMAND: 377 /* 378 * XXX: Need to add proper handling of this. 379 */ 380 error = copyin(req->addr, &addr, sizeof(addr)); 381 if (error) 382 return (error); 383 384 IPMI_LOCK(sc); 385 /* clear out old stuff in queue of stuff done */ 386 /* XXX: This seems odd. */ 387 while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) { 388 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, 389 ir_link); 390 dev->ipmi_requests--; 391 ipmi_free_request(kreq); 392 } 393 IPMI_UNLOCK(sc); 394 395 kreq = ipmi_alloc_request(dev, req->msgid, 396 IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd, 397 req->msg.data_len, IPMI_MAX_RX); 398 error = copyin(req->msg.data, kreq->ir_request, 399 req->msg.data_len); 400 if (error) { 401 ipmi_free_request(kreq); 402 return (error); 403 } 404 IPMI_LOCK(sc); 405 dev->ipmi_requests++; 406 error = sc->ipmi_enqueue_request(sc, kreq); 407 IPMI_UNLOCK(sc); 408 if (error) 409 return (error); 410 break; 411 #ifdef IPMICTL_SEND_COMMAND_32 412 case IPMICTL_RECEIVE_MSG_TRUNC_32: 413 case IPMICTL_RECEIVE_MSG_32: 414 #endif 415 case IPMICTL_RECEIVE_MSG_TRUNC: 416 case IPMICTL_RECEIVE_MSG: 417 error = copyin(recv->addr, &addr, sizeof(addr)); 418 if (error) 419 return (error); 420 421 IPMI_LOCK(sc); 422 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests); 423 if (kreq == NULL) { 424 IPMI_UNLOCK(sc); 425 return (EAGAIN); 426 } 427 addr.channel = IPMI_BMC_CHANNEL; 428 /* XXX */ 429 recv->recv_type = IPMI_RESPONSE_RECV_TYPE; 430 recv->msgid = kreq->ir_msgid; 431 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2; 432 recv->msg.cmd = kreq->ir_command; 433 error = kreq->ir_error; 434 if (error) { 435 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, 436 ir_link); 437 dev->ipmi_requests--; 438 IPMI_UNLOCK(sc); 439 ipmi_free_request(kreq); 440 return (error); 441 } 442 len = kreq->ir_replylen + 1; 443 if (recv->msg.data_len < len && 444 (cmd == IPMICTL_RECEIVE_MSG 445 #ifdef IPMICTL_RECEIVE_MSG_32 446 || cmd == IPMICTL_RECEIVE_MSG_32 447 #endif 448 )) { 449 IPMI_UNLOCK(sc); 450 return (EMSGSIZE); 451 } 452 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); 453 dev->ipmi_requests--; 454 IPMI_UNLOCK(sc); 455 len = min(recv->msg.data_len, len); 456 recv->msg.data_len = len; 457 error = copyout(&addr, recv->addr,sizeof(addr)); 458 if (error == 0) 459 error = copyout(&kreq->ir_compcode, recv->msg.data, 1); 460 if (error == 0) 461 error = copyout(kreq->ir_reply, recv->msg.data + 1, 462 len - 1); 463 ipmi_free_request(kreq); 464 if (error) 465 return (error); 466 break; 467 case IPMICTL_SET_MY_ADDRESS_CMD: 468 IPMI_LOCK(sc); 469 dev->ipmi_address = *(int*)data; 470 IPMI_UNLOCK(sc); 471 break; 472 case IPMICTL_GET_MY_ADDRESS_CMD: 473 IPMI_LOCK(sc); 474 *(int*)data = dev->ipmi_address; 475 IPMI_UNLOCK(sc); 476 break; 477 case IPMICTL_SET_MY_LUN_CMD: 478 IPMI_LOCK(sc); 479 dev->ipmi_lun = *(int*)data & 0x3; 480 IPMI_UNLOCK(sc); 481 break; 482 case IPMICTL_GET_MY_LUN_CMD: 483 IPMI_LOCK(sc); 484 *(int*)data = dev->ipmi_lun; 485 IPMI_UNLOCK(sc); 486 break; 487 case IPMICTL_SET_GETS_EVENTS_CMD: 488 /* 489 device_printf(sc->ipmi_dev, 490 "IPMICTL_SET_GETS_EVENTS_CMD NA\n"); 491 */ 492 break; 493 case IPMICTL_REGISTER_FOR_CMD: 494 case IPMICTL_UNREGISTER_FOR_CMD: 495 return (EOPNOTSUPP); 496 default: 497 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd); 498 return (ENOIOCTL); 499 } 500 501 #ifdef IPMICTL_SEND_COMMAND_32 502 /* Update changed fields in 32-bit structures. */ 503 switch (cmd) { 504 case IPMICTL_RECEIVE_MSG_TRUNC_32: 505 case IPMICTL_RECEIVE_MSG_32: 506 recv32->recv_type = recv->recv_type; 507 recv32->msgid = recv->msgid; 508 recv32->msg.netfn = recv->msg.netfn; 509 recv32->msg.cmd = recv->msg.cmd; 510 recv32->msg.data_len = recv->msg.data_len; 511 break; 512 } 513 #endif 514 return (0); 515 } 516 517 /* 518 * Request management. 519 */ 520 521 static __inline void 522 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid, 523 uint8_t addr, uint8_t command, size_t requestlen, size_t replylen) 524 { 525 526 req->ir_owner = dev; 527 req->ir_msgid = msgid; 528 req->ir_addr = addr; 529 req->ir_command = command; 530 if (requestlen) { 531 req->ir_request = (char *)&req[1]; 532 req->ir_requestlen = requestlen; 533 } 534 if (replylen) { 535 req->ir_reply = (char *)&req[1] + requestlen; 536 req->ir_replybuflen = replylen; 537 } 538 } 539 540 /* Allocate a new request with request and reply buffers. */ 541 struct ipmi_request * 542 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr, 543 uint8_t command, size_t requestlen, size_t replylen) 544 { 545 struct ipmi_request *req; 546 547 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen, 548 M_IPMI, M_WAITOK | M_ZERO); 549 ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen); 550 return (req); 551 } 552 553 /* Free a request no longer in use. */ 554 void 555 ipmi_free_request(struct ipmi_request *req) 556 { 557 558 free(req, M_IPMI); 559 } 560 561 /* Store a processed request on the appropriate completion queue. */ 562 void 563 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req) 564 { 565 struct ipmi_device *dev; 566 567 IPMI_LOCK_ASSERT(sc); 568 569 /* 570 * Anonymous requests (from inside the driver) always have a 571 * waiter that we awaken. 572 */ 573 if (req->ir_owner == NULL) 574 wakeup(req); 575 else { 576 dev = req->ir_owner; 577 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link); 578 selwakeup(&dev->ipmi_select); 579 if (dev->ipmi_closing) 580 wakeup(&dev->ipmi_requests); 581 } 582 } 583 584 /* Perform an internal driver request. */ 585 int 586 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, 587 int timo) 588 { 589 590 return (sc->ipmi_driver_request(sc, req, timo)); 591 } 592 593 /* 594 * Helper routine for polled system interfaces that use 595 * ipmi_polled_enqueue_request() to queue requests. This request 596 * waits until there is a pending request and then returns the first 597 * request. If the driver is shutting down, it returns NULL. 598 */ 599 struct ipmi_request * 600 ipmi_dequeue_request(struct ipmi_softc *sc) 601 { 602 struct ipmi_request *req; 603 604 IPMI_LOCK_ASSERT(sc); 605 606 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests)) 607 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock); 608 if (sc->ipmi_detaching) 609 return (NULL); 610 611 req = TAILQ_FIRST(&sc->ipmi_pending_requests); 612 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link); 613 return (req); 614 } 615 616 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */ 617 int 618 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req) 619 { 620 621 IPMI_LOCK_ASSERT(sc); 622 623 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link); 624 cv_signal(&sc->ipmi_request_added); 625 return (0); 626 } 627 628 /* 629 * Watchdog event handler. 630 */ 631 632 static int 633 ipmi_reset_watchdog(struct ipmi_softc *sc) 634 { 635 struct ipmi_request *req; 636 int error; 637 638 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 639 IPMI_RESET_WDOG, 0, 0); 640 error = ipmi_submit_driver_request(sc, req, 0); 641 if (error) 642 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n"); 643 return (error); 644 } 645 646 static int 647 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec) 648 { 649 struct ipmi_request *req; 650 int error; 651 652 if (sec > 0xffff / 10) 653 return (EINVAL); 654 655 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 656 IPMI_SET_WDOG, 6, 0); 657 if (sec) { 658 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP 659 | IPMI_SET_WD_TIMER_SMS_OS; 660 req->ir_request[1] = (wd_timer_actions & 0xff); 661 req->ir_request[2] = (wd_pretimeout_countdown & 0xff); 662 req->ir_request[3] = 0; /* Timer use */ 663 req->ir_request[4] = (sec * 10) & 0xff; 664 req->ir_request[5] = (sec * 10) >> 8; 665 } else { 666 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS; 667 req->ir_request[1] = 0; 668 req->ir_request[2] = 0; 669 req->ir_request[3] = 0; /* Timer use */ 670 req->ir_request[4] = 0; 671 req->ir_request[5] = 0; 672 } 673 error = ipmi_submit_driver_request(sc, req, 0); 674 if (error) 675 device_printf(sc->ipmi_dev, "Failed to set watchdog\n"); 676 return (error); 677 } 678 679 static void 680 ipmi_wd_event(void *arg, unsigned int cmd, int *error) 681 { 682 struct ipmi_softc *sc = arg; 683 unsigned int timeout; 684 int e; 685 686 /* Ignore requests while disabled. */ 687 if (!on) 688 return; 689 690 /* 691 * To prevent infinite hangs, we don't let anyone pat or change 692 * the watchdog when we're shutting down. (See ipmi_shutdown_event().) 693 * However, we do want to keep patting the watchdog while we are doing 694 * a coredump. 695 */ 696 if (wd_in_shutdown) { 697 if (dumping && sc->ipmi_watchdog_active) 698 ipmi_reset_watchdog(sc); 699 return; 700 } 701 702 cmd &= WD_INTERVAL; 703 if (cmd > 0 && cmd <= 63) { 704 timeout = ((uint64_t)1 << cmd) / 1000000000; 705 if (timeout == 0) 706 timeout = 1; 707 if (timeout != sc->ipmi_watchdog_active || 708 wd_timer_actions != sc->ipmi_watchdog_actions || 709 wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) { 710 e = ipmi_set_watchdog(sc, timeout); 711 if (e == 0) { 712 sc->ipmi_watchdog_active = timeout; 713 sc->ipmi_watchdog_actions = wd_timer_actions; 714 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown; 715 } else { 716 (void)ipmi_set_watchdog(sc, 0); 717 sc->ipmi_watchdog_active = 0; 718 sc->ipmi_watchdog_actions = 0; 719 sc->ipmi_watchdog_pretimeout = 0; 720 } 721 } 722 if (sc->ipmi_watchdog_active != 0) { 723 e = ipmi_reset_watchdog(sc); 724 if (e == 0) { 725 *error = 0; 726 } else { 727 (void)ipmi_set_watchdog(sc, 0); 728 sc->ipmi_watchdog_active = 0; 729 sc->ipmi_watchdog_actions = 0; 730 sc->ipmi_watchdog_pretimeout = 0; 731 } 732 } 733 } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) { 734 sc->ipmi_watchdog_actions = 0; 735 sc->ipmi_watchdog_pretimeout = 0; 736 737 e = ipmi_set_watchdog(sc, 0); 738 if (e != 0 && cmd == 0) 739 *error = EOPNOTSUPP; 740 } 741 } 742 743 static void 744 ipmi_shutdown_event(void *arg, unsigned int cmd, int *error) 745 { 746 struct ipmi_softc *sc = arg; 747 748 /* Ignore event if disabled. */ 749 if (!on) 750 return; 751 752 /* 753 * Positive wd_shutdown_countdown value will re-arm watchdog; 754 * Zero value in wd_shutdown_countdown will disable watchdog; 755 * Negative value in wd_shutdown_countdown will keep existing state; 756 * 757 * Revert to using a power cycle to ensure that the watchdog will 758 * do something useful here. Having the watchdog send an NMI 759 * instead is useless during shutdown, and might be ignored if an 760 * NMI already triggered. 761 */ 762 763 wd_in_shutdown = true; 764 if (wd_shutdown_countdown == 0) { 765 /* disable watchdog */ 766 ipmi_set_watchdog(sc, 0); 767 sc->ipmi_watchdog_active = 0; 768 } else if (wd_shutdown_countdown > 0) { 769 /* set desired action and time, and, reset watchdog */ 770 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 771 ipmi_set_watchdog(sc, wd_shutdown_countdown); 772 sc->ipmi_watchdog_active = wd_shutdown_countdown; 773 ipmi_reset_watchdog(sc); 774 } 775 } 776 777 static void 778 ipmi_power_cycle(void *arg, int howto) 779 { 780 struct ipmi_softc *sc = arg; 781 struct ipmi_request *req; 782 783 /* 784 * Ignore everything except power cycling requests 785 */ 786 if ((howto & RB_POWERCYCLE) == 0) 787 return; 788 789 device_printf(sc->ipmi_dev, "Power cycling using IPMI\n"); 790 791 /* 792 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2 793 * as described in IPMI v2.0 spec section 28.3. 794 */ 795 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0), 796 IPMI_CHASSIS_CONTROL, 1, 0); 797 req->ir_request[0] = IPMI_CC_POWER_CYCLE; 798 799 ipmi_submit_driver_request(sc, req, MAX_TIMEOUT); 800 801 if (req->ir_error != 0 || req->ir_compcode != 0) { 802 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n", 803 req->ir_error, req->ir_compcode); 804 return; 805 } 806 807 /* 808 * BMCs are notoriously slow, give it cyle_wait seconds for the power 809 * down leg of the power cycle. If that fails, fallback to the next 810 * hanlder in the shutdown_final chain and/or the platform failsafe. 811 */ 812 DELAY(cycle_wait * 1000 * 1000); 813 device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n"); 814 } 815 816 static void 817 ipmi_startup(void *arg) 818 { 819 struct ipmi_softc *sc = arg; 820 struct ipmi_request *req; 821 device_t dev; 822 int error, i; 823 824 config_intrhook_disestablish(&sc->ipmi_ich); 825 dev = sc->ipmi_dev; 826 827 /* Initialize interface-independent state. */ 828 mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF); 829 mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF); 830 cv_init(&sc->ipmi_request_added, "ipmireq"); 831 TAILQ_INIT(&sc->ipmi_pending_requests); 832 833 /* Initialize interface-dependent state. */ 834 error = sc->ipmi_startup(sc); 835 if (error) { 836 device_printf(dev, "Failed to initialize interface: %d\n", 837 error); 838 return; 839 } 840 841 /* Send a GET_DEVICE_ID request. */ 842 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 843 IPMI_GET_DEVICE_ID, 0, 15); 844 845 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT); 846 if (error == EWOULDBLOCK) { 847 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n"); 848 return; 849 } else if (error) { 850 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error); 851 return; 852 } else if (req->ir_compcode != 0) { 853 device_printf(dev, 854 "Bad completion code for GET_DEVICE_ID: %d\n", 855 req->ir_compcode); 856 return; 857 } else if (req->ir_replylen < 5) { 858 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n", 859 req->ir_replylen); 860 return; 861 } 862 863 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, " 864 "version %d.%d, device support mask %#x\n", 865 req->ir_reply[1] & 0x0f, 866 req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f, 867 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]); 868 869 sc->ipmi_dev_support = req->ir_reply[5]; 870 871 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 872 IPMI_CLEAR_FLAGS, 1, 0); 873 874 ipmi_submit_driver_request(sc, req, 0); 875 876 /* XXX: Magic numbers */ 877 if (req->ir_compcode == 0xc0) { 878 device_printf(dev, "Clear flags is busy\n"); 879 } 880 if (req->ir_compcode == 0xc1) { 881 device_printf(dev, "Clear flags illegal\n"); 882 } 883 884 for (i = 0; i < 8; i++) { 885 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 886 IPMI_GET_CHANNEL_INFO, 1, 0); 887 req->ir_request[0] = i; 888 889 ipmi_submit_driver_request(sc, req, 0); 890 891 if (req->ir_compcode != 0) 892 break; 893 } 894 device_printf(dev, "Number of channels %d\n", i); 895 896 /* 897 * Probe for watchdog, but only for backends which support 898 * polled driver requests. 899 */ 900 if (sc->ipmi_driver_requests_polled) { 901 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 902 IPMI_GET_WDOG, 0, 0); 903 904 ipmi_submit_driver_request(sc, req, 0); 905 906 if (req->ir_compcode == 0x00) { 907 device_printf(dev, "Attached watchdog\n"); 908 /* register the watchdog event handler */ 909 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER( 910 watchdog_list, ipmi_wd_event, sc, 0); 911 sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER( 912 shutdown_pre_sync, ipmi_shutdown_event, 913 sc, 0); 914 } 915 } 916 917 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev), 918 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev)); 919 if (sc->ipmi_cdev == NULL) { 920 device_printf(dev, "Failed to create cdev\n"); 921 return; 922 } 923 sc->ipmi_cdev->si_drv1 = sc; 924 925 /* 926 * Set initial watchdog state. If desired, set an initial 927 * watchdog on startup. Or, if the watchdog device is 928 * disabled, clear any existing watchdog. 929 */ 930 if (on && wd_startup_countdown > 0) { 931 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 932 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 && 933 ipmi_reset_watchdog(sc) == 0) { 934 sc->ipmi_watchdog_active = wd_startup_countdown; 935 sc->ipmi_watchdog_actions = wd_timer_actions; 936 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown; 937 } else 938 (void)ipmi_set_watchdog(sc, 0); 939 ipmi_reset_watchdog(sc); 940 } else if (!on) 941 (void)ipmi_set_watchdog(sc, 0); 942 /* 943 * Power cycle the system off using IPMI. We use last - 2 since we don't 944 * handle all the other kinds of reboots. We'll let others handle them. 945 * We only try to do this if the BMC supports the Chassis device. 946 */ 947 if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) { 948 device_printf(dev, "Establishing power cycle handler\n"); 949 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final, 950 ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2); 951 } 952 } 953 954 int 955 ipmi_attach(device_t dev) 956 { 957 struct ipmi_softc *sc = device_get_softc(dev); 958 int error; 959 960 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) { 961 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC, 962 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq); 963 if (error) { 964 device_printf(dev, "can't set up interrupt\n"); 965 return (error); 966 } 967 } 968 969 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook)); 970 sc->ipmi_ich.ich_func = ipmi_startup; 971 sc->ipmi_ich.ich_arg = sc; 972 if (config_intrhook_establish(&sc->ipmi_ich) != 0) { 973 device_printf(dev, "can't establish configuration hook\n"); 974 return (ENOMEM); 975 } 976 977 ipmi_attached = 1; 978 return (0); 979 } 980 981 int 982 ipmi_detach(device_t dev) 983 { 984 struct ipmi_softc *sc; 985 986 sc = device_get_softc(dev); 987 988 /* Fail if there are any open handles. */ 989 IPMI_LOCK(sc); 990 if (sc->ipmi_opened) { 991 IPMI_UNLOCK(sc); 992 return (EBUSY); 993 } 994 IPMI_UNLOCK(sc); 995 if (sc->ipmi_cdev) 996 destroy_dev(sc->ipmi_cdev); 997 998 /* Detach from watchdog handling and turn off watchdog. */ 999 if (sc->ipmi_shutdown_tag) 1000 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, 1001 sc->ipmi_shutdown_tag); 1002 if (sc->ipmi_watchdog_tag) { 1003 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag); 1004 ipmi_set_watchdog(sc, 0); 1005 } 1006 1007 /* Detach from shutdown handling for power cycle reboot */ 1008 if (sc->ipmi_power_cycle_tag) 1009 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag); 1010 1011 /* XXX: should use shutdown callout I think. */ 1012 /* If the backend uses a kthread, shut it down. */ 1013 IPMI_LOCK(sc); 1014 sc->ipmi_detaching = 1; 1015 if (sc->ipmi_kthread) { 1016 cv_broadcast(&sc->ipmi_request_added); 1017 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0, 1018 "ipmi_wait", 0); 1019 } 1020 IPMI_UNLOCK(sc); 1021 if (sc->ipmi_irq) 1022 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq); 1023 1024 ipmi_release_resources(dev); 1025 mtx_destroy(&sc->ipmi_io_lock); 1026 mtx_destroy(&sc->ipmi_requests_lock); 1027 return (0); 1028 } 1029 1030 void 1031 ipmi_release_resources(device_t dev) 1032 { 1033 struct ipmi_softc *sc; 1034 int i; 1035 1036 sc = device_get_softc(dev); 1037 if (sc->ipmi_irq) 1038 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq); 1039 if (sc->ipmi_irq_res) 1040 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid, 1041 sc->ipmi_irq_res); 1042 for (i = 0; i < MAX_RES; i++) 1043 if (sc->ipmi_io_res[i]) 1044 bus_release_resource(dev, sc->ipmi_io_type, 1045 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]); 1046 } 1047 1048 devclass_t ipmi_devclass; 1049 1050 /* XXX: Why? */ 1051 static void 1052 ipmi_unload(void *arg) 1053 { 1054 device_t * devs; 1055 int count; 1056 int i; 1057 1058 if (ipmi_devclass == NULL) 1059 return; 1060 if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0) 1061 return; 1062 for (i = 0; i < count; i++) 1063 device_delete_child(device_get_parent(devs[i]), devs[i]); 1064 free(devs, M_TEMP); 1065 } 1066 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL); 1067 1068 #ifdef IMPI_DEBUG 1069 static void 1070 dump_buf(u_char *data, int len) 1071 { 1072 char buf[20]; 1073 char line[1024]; 1074 char temp[30]; 1075 int count = 0; 1076 int i=0; 1077 1078 printf("Address %p len %d\n", data, len); 1079 if (len > 256) 1080 len = 256; 1081 line[0] = '\000'; 1082 for (; len > 0; len--, data++) { 1083 sprintf(temp, "%02x ", *data); 1084 strcat(line, temp); 1085 if (*data >= ' ' && *data <= '~') 1086 buf[count] = *data; 1087 else if (*data >= 'A' && *data <= 'Z') 1088 buf[count] = *data; 1089 else 1090 buf[count] = '.'; 1091 if (++count == 16) { 1092 buf[count] = '\000'; 1093 count = 0; 1094 printf(" %3x %s %s\n", i, line, buf); 1095 i+=16; 1096 line[0] = '\000'; 1097 } 1098 } 1099 buf[count] = '\000'; 1100 1101 for (; count != 16; count++) { 1102 strcat(line, " "); 1103 } 1104 printf(" %3x %s %s\n", i, line, buf); 1105 } 1106 #endif 1107