1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/condvar.h> 36 #include <sys/conf.h> 37 #include <sys/eventhandler.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/poll.h> 44 #include <sys/reboot.h> 45 #include <sys/rman.h> 46 #include <sys/selinfo.h> 47 #include <sys/sysctl.h> 48 #include <sys/watchdog.h> 49 50 #ifdef LOCAL_MODULE 51 #include <ipmi.h> 52 #include <ipmivars.h> 53 #else 54 #include <sys/ipmi.h> 55 #include <dev/ipmi/ipmivars.h> 56 #endif 57 58 /* 59 * Driver request structures are allocated on the stack via alloca() to 60 * avoid calling malloc(), especially for the watchdog handler. 61 * To avoid too much stack growth, a previously allocated structure can 62 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure 63 * that there is adequate reply/request space in the original allocation. 64 */ 65 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \ 66 bzero((req), sizeof(struct ipmi_request)); \ 67 ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen)) 68 69 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \ 70 (req) = __builtin_alloca(sizeof(struct ipmi_request) + \ 71 (reqlen) + (replylen)); \ 72 IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \ 73 (replylen)) 74 75 #ifdef IPMB 76 static int ipmi_ipmb_checksum(u_char, int); 77 static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char, 78 u_char, u_char, int) 79 #endif 80 81 static d_ioctl_t ipmi_ioctl; 82 static d_poll_t ipmi_poll; 83 static d_open_t ipmi_open; 84 static void ipmi_dtor(void *arg); 85 86 int ipmi_attached = 0; 87 88 static int on = 1; 89 static bool wd_in_shutdown = false; 90 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 91 static int wd_shutdown_countdown = 0; /* sec */ 92 static int wd_startup_countdown = 0; /* sec */ 93 static int wd_pretimeout_countdown = 120; /* sec */ 94 static int cycle_wait = 10; /* sec */ 95 96 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0, 97 "IPMI driver parameters"); 98 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN, 99 &on, 0, ""); 100 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RW, 101 &wd_timer_actions, 0, 102 "IPMI watchdog timer actions (including pre-timeout interrupt)"); 103 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RW, 104 &wd_shutdown_countdown, 0, 105 "IPMI watchdog countdown for shutdown (seconds)"); 106 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN, 107 &wd_startup_countdown, 0, 108 "IPMI watchdog countdown initialized during startup (seconds)"); 109 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RW, 110 &wd_pretimeout_countdown, 0, 111 "IPMI watchdog pre-timeout countdown (seconds)"); 112 SYSCTL_INT(_hw_ipmi, OID_AUTO, cyle_wait, CTLFLAG_RWTUN, 113 &cycle_wait, 0, 114 "IPMI power cycle on reboot delay time (seconds)"); 115 116 static struct cdevsw ipmi_cdevsw = { 117 .d_version = D_VERSION, 118 .d_open = ipmi_open, 119 .d_ioctl = ipmi_ioctl, 120 .d_poll = ipmi_poll, 121 .d_name = "ipmi", 122 }; 123 124 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi"); 125 126 static int 127 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td) 128 { 129 struct ipmi_device *dev; 130 struct ipmi_softc *sc; 131 int error; 132 133 if (!on) 134 return (ENOENT); 135 136 /* Initialize the per file descriptor data. */ 137 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO); 138 error = devfs_set_cdevpriv(dev, ipmi_dtor); 139 if (error) { 140 free(dev, M_IPMI); 141 return (error); 142 } 143 144 sc = cdev->si_drv1; 145 TAILQ_INIT(&dev->ipmi_completed_requests); 146 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR; 147 dev->ipmi_lun = IPMI_BMC_SMS_LUN; 148 dev->ipmi_softc = sc; 149 IPMI_LOCK(sc); 150 sc->ipmi_opened++; 151 IPMI_UNLOCK(sc); 152 153 return (0); 154 } 155 156 static int 157 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td) 158 { 159 struct ipmi_device *dev; 160 struct ipmi_softc *sc; 161 int revents = 0; 162 163 if (devfs_get_cdevpriv((void **)&dev)) 164 return (0); 165 166 sc = cdev->si_drv1; 167 IPMI_LOCK(sc); 168 if (poll_events & (POLLIN | POLLRDNORM)) { 169 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) 170 revents |= poll_events & (POLLIN | POLLRDNORM); 171 if (dev->ipmi_requests == 0) 172 revents |= POLLERR; 173 } 174 175 if (revents == 0) { 176 if (poll_events & (POLLIN | POLLRDNORM)) 177 selrecord(td, &dev->ipmi_select); 178 } 179 IPMI_UNLOCK(sc); 180 181 return (revents); 182 } 183 184 static void 185 ipmi_purge_completed_requests(struct ipmi_device *dev) 186 { 187 struct ipmi_request *req; 188 189 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) { 190 req = TAILQ_FIRST(&dev->ipmi_completed_requests); 191 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link); 192 dev->ipmi_requests--; 193 ipmi_free_request(req); 194 } 195 } 196 197 static void 198 ipmi_dtor(void *arg) 199 { 200 struct ipmi_request *req, *nreq; 201 struct ipmi_device *dev; 202 struct ipmi_softc *sc; 203 204 dev = arg; 205 sc = dev->ipmi_softc; 206 207 IPMI_LOCK(sc); 208 if (dev->ipmi_requests) { 209 /* Throw away any pending requests for this device. */ 210 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link, 211 nreq) { 212 if (req->ir_owner == dev) { 213 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, 214 ir_link); 215 dev->ipmi_requests--; 216 ipmi_free_request(req); 217 } 218 } 219 220 /* Throw away any pending completed requests for this device. */ 221 ipmi_purge_completed_requests(dev); 222 223 /* 224 * If we still have outstanding requests, they must be stuck 225 * in an interface driver, so wait for those to drain. 226 */ 227 dev->ipmi_closing = 1; 228 while (dev->ipmi_requests > 0) { 229 msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock, 230 PWAIT, "ipmidrain", 0); 231 ipmi_purge_completed_requests(dev); 232 } 233 } 234 sc->ipmi_opened--; 235 IPMI_UNLOCK(sc); 236 237 /* Cleanup. */ 238 free(dev, M_IPMI); 239 } 240 241 #ifdef IPMB 242 static int 243 ipmi_ipmb_checksum(u_char *data, int len) 244 { 245 u_char sum = 0; 246 247 for (; len; len--) { 248 sum += *data++; 249 } 250 return (-sum); 251 } 252 253 /* XXX: Needs work */ 254 static int 255 ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn, 256 u_char command, u_char seq, u_char *data, int data_len) 257 { 258 struct ipmi_softc *sc = device_get_softc(dev); 259 struct ipmi_request *req; 260 u_char slave_addr = 0x52; 261 int error; 262 263 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 264 IPMI_SEND_MSG, data_len + 8, 0); 265 req->ir_request[0] = channel; 266 req->ir_request[1] = slave_addr; 267 req->ir_request[2] = IPMI_ADDR(netfn, 0); 268 req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2); 269 req->ir_request[4] = sc->ipmi_address; 270 req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun); 271 req->ir_request[6] = command; 272 273 bcopy(data, &req->ir_request[7], data_len); 274 temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4], 275 data_len + 3); 276 277 ipmi_submit_driver_request(sc, req); 278 error = req->ir_error; 279 280 return (error); 281 } 282 283 static int 284 ipmi_handle_attn(struct ipmi_softc *sc) 285 { 286 struct ipmi_request *req; 287 int error; 288 289 device_printf(sc->ipmi_dev, "BMC has a message\n"); 290 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 291 IPMI_GET_MSG_FLAGS, 0, 1); 292 293 ipmi_submit_driver_request(sc, req); 294 295 if (req->ir_error == 0 && req->ir_compcode == 0) { 296 if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) { 297 device_printf(sc->ipmi_dev, "message buffer full"); 298 } 299 if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) { 300 device_printf(sc->ipmi_dev, 301 "watchdog about to go off"); 302 } 303 if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) { 304 IPMI_ALLOC_DRIVER_REQUEST(req, 305 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0, 306 16); 307 308 device_printf(sc->ipmi_dev, "throw out message "); 309 dump_buf(temp, 16); 310 } 311 } 312 error = req->ir_error; 313 314 return (error); 315 } 316 #endif 317 318 #ifdef IPMICTL_SEND_COMMAND_32 319 #define PTRIN(p) ((void *)(uintptr_t)(p)) 320 #define PTROUT(p) ((uintptr_t)(p)) 321 #endif 322 323 static int 324 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, 325 int flags, struct thread *td) 326 { 327 struct ipmi_softc *sc; 328 struct ipmi_device *dev; 329 struct ipmi_request *kreq; 330 struct ipmi_req *req = (struct ipmi_req *)data; 331 struct ipmi_recv *recv = (struct ipmi_recv *)data; 332 struct ipmi_addr addr; 333 #ifdef IPMICTL_SEND_COMMAND_32 334 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data; 335 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data; 336 union { 337 struct ipmi_req req; 338 struct ipmi_recv recv; 339 } thunk32; 340 #endif 341 int error, len; 342 343 error = devfs_get_cdevpriv((void **)&dev); 344 if (error) 345 return (error); 346 347 sc = cdev->si_drv1; 348 349 #ifdef IPMICTL_SEND_COMMAND_32 350 /* Convert 32-bit structures to native. */ 351 switch (cmd) { 352 case IPMICTL_SEND_COMMAND_32: 353 req = &thunk32.req; 354 req->addr = PTRIN(req32->addr); 355 req->addr_len = req32->addr_len; 356 req->msgid = req32->msgid; 357 req->msg.netfn = req32->msg.netfn; 358 req->msg.cmd = req32->msg.cmd; 359 req->msg.data_len = req32->msg.data_len; 360 req->msg.data = PTRIN(req32->msg.data); 361 break; 362 case IPMICTL_RECEIVE_MSG_TRUNC_32: 363 case IPMICTL_RECEIVE_MSG_32: 364 recv = &thunk32.recv; 365 recv->addr = PTRIN(recv32->addr); 366 recv->addr_len = recv32->addr_len; 367 recv->msg.data_len = recv32->msg.data_len; 368 recv->msg.data = PTRIN(recv32->msg.data); 369 break; 370 } 371 #endif 372 373 switch (cmd) { 374 #ifdef IPMICTL_SEND_COMMAND_32 375 case IPMICTL_SEND_COMMAND_32: 376 #endif 377 case IPMICTL_SEND_COMMAND: 378 /* 379 * XXX: Need to add proper handling of this. 380 */ 381 error = copyin(req->addr, &addr, sizeof(addr)); 382 if (error) 383 return (error); 384 385 IPMI_LOCK(sc); 386 /* clear out old stuff in queue of stuff done */ 387 /* XXX: This seems odd. */ 388 while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) { 389 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, 390 ir_link); 391 dev->ipmi_requests--; 392 ipmi_free_request(kreq); 393 } 394 IPMI_UNLOCK(sc); 395 396 kreq = ipmi_alloc_request(dev, req->msgid, 397 IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd, 398 req->msg.data_len, IPMI_MAX_RX); 399 error = copyin(req->msg.data, kreq->ir_request, 400 req->msg.data_len); 401 if (error) { 402 ipmi_free_request(kreq); 403 return (error); 404 } 405 IPMI_LOCK(sc); 406 dev->ipmi_requests++; 407 error = sc->ipmi_enqueue_request(sc, kreq); 408 IPMI_UNLOCK(sc); 409 if (error) 410 return (error); 411 break; 412 #ifdef IPMICTL_SEND_COMMAND_32 413 case IPMICTL_RECEIVE_MSG_TRUNC_32: 414 case IPMICTL_RECEIVE_MSG_32: 415 #endif 416 case IPMICTL_RECEIVE_MSG_TRUNC: 417 case IPMICTL_RECEIVE_MSG: 418 error = copyin(recv->addr, &addr, sizeof(addr)); 419 if (error) 420 return (error); 421 422 IPMI_LOCK(sc); 423 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests); 424 if (kreq == NULL) { 425 IPMI_UNLOCK(sc); 426 return (EAGAIN); 427 } 428 addr.channel = IPMI_BMC_CHANNEL; 429 /* XXX */ 430 recv->recv_type = IPMI_RESPONSE_RECV_TYPE; 431 recv->msgid = kreq->ir_msgid; 432 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2; 433 recv->msg.cmd = kreq->ir_command; 434 error = kreq->ir_error; 435 if (error) { 436 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, 437 ir_link); 438 dev->ipmi_requests--; 439 IPMI_UNLOCK(sc); 440 ipmi_free_request(kreq); 441 return (error); 442 } 443 len = kreq->ir_replylen + 1; 444 if (recv->msg.data_len < len && 445 (cmd == IPMICTL_RECEIVE_MSG 446 #ifdef IPMICTL_RECEIVE_MSG_32 447 || cmd == IPMICTL_RECEIVE_MSG_32 448 #endif 449 )) { 450 IPMI_UNLOCK(sc); 451 return (EMSGSIZE); 452 } 453 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); 454 dev->ipmi_requests--; 455 IPMI_UNLOCK(sc); 456 len = min(recv->msg.data_len, len); 457 recv->msg.data_len = len; 458 error = copyout(&addr, recv->addr,sizeof(addr)); 459 if (error == 0) 460 error = copyout(&kreq->ir_compcode, recv->msg.data, 1); 461 if (error == 0) 462 error = copyout(kreq->ir_reply, recv->msg.data + 1, 463 len - 1); 464 ipmi_free_request(kreq); 465 if (error) 466 return (error); 467 break; 468 case IPMICTL_SET_MY_ADDRESS_CMD: 469 IPMI_LOCK(sc); 470 dev->ipmi_address = *(int*)data; 471 IPMI_UNLOCK(sc); 472 break; 473 case IPMICTL_GET_MY_ADDRESS_CMD: 474 IPMI_LOCK(sc); 475 *(int*)data = dev->ipmi_address; 476 IPMI_UNLOCK(sc); 477 break; 478 case IPMICTL_SET_MY_LUN_CMD: 479 IPMI_LOCK(sc); 480 dev->ipmi_lun = *(int*)data & 0x3; 481 IPMI_UNLOCK(sc); 482 break; 483 case IPMICTL_GET_MY_LUN_CMD: 484 IPMI_LOCK(sc); 485 *(int*)data = dev->ipmi_lun; 486 IPMI_UNLOCK(sc); 487 break; 488 case IPMICTL_SET_GETS_EVENTS_CMD: 489 /* 490 device_printf(sc->ipmi_dev, 491 "IPMICTL_SET_GETS_EVENTS_CMD NA\n"); 492 */ 493 break; 494 case IPMICTL_REGISTER_FOR_CMD: 495 case IPMICTL_UNREGISTER_FOR_CMD: 496 return (EOPNOTSUPP); 497 default: 498 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd); 499 return (ENOIOCTL); 500 } 501 502 #ifdef IPMICTL_SEND_COMMAND_32 503 /* Update changed fields in 32-bit structures. */ 504 switch (cmd) { 505 case IPMICTL_RECEIVE_MSG_TRUNC_32: 506 case IPMICTL_RECEIVE_MSG_32: 507 recv32->recv_type = recv->recv_type; 508 recv32->msgid = recv->msgid; 509 recv32->msg.netfn = recv->msg.netfn; 510 recv32->msg.cmd = recv->msg.cmd; 511 recv32->msg.data_len = recv->msg.data_len; 512 break; 513 } 514 #endif 515 return (0); 516 } 517 518 /* 519 * Request management. 520 */ 521 522 static __inline void 523 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid, 524 uint8_t addr, uint8_t command, size_t requestlen, size_t replylen) 525 { 526 527 req->ir_owner = dev; 528 req->ir_msgid = msgid; 529 req->ir_addr = addr; 530 req->ir_command = command; 531 if (requestlen) { 532 req->ir_request = (char *)&req[1]; 533 req->ir_requestlen = requestlen; 534 } 535 if (replylen) { 536 req->ir_reply = (char *)&req[1] + requestlen; 537 req->ir_replybuflen = replylen; 538 } 539 } 540 541 /* Allocate a new request with request and reply buffers. */ 542 struct ipmi_request * 543 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr, 544 uint8_t command, size_t requestlen, size_t replylen) 545 { 546 struct ipmi_request *req; 547 548 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen, 549 M_IPMI, M_WAITOK | M_ZERO); 550 ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen); 551 return (req); 552 } 553 554 /* Free a request no longer in use. */ 555 void 556 ipmi_free_request(struct ipmi_request *req) 557 { 558 559 free(req, M_IPMI); 560 } 561 562 /* Store a processed request on the appropriate completion queue. */ 563 void 564 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req) 565 { 566 struct ipmi_device *dev; 567 568 IPMI_LOCK_ASSERT(sc); 569 570 /* 571 * Anonymous requests (from inside the driver) always have a 572 * waiter that we awaken. 573 */ 574 if (req->ir_owner == NULL) 575 wakeup(req); 576 else { 577 dev = req->ir_owner; 578 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link); 579 selwakeup(&dev->ipmi_select); 580 if (dev->ipmi_closing) 581 wakeup(&dev->ipmi_requests); 582 } 583 } 584 585 /* Perform an internal driver request. */ 586 int 587 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, 588 int timo) 589 { 590 591 return (sc->ipmi_driver_request(sc, req, timo)); 592 } 593 594 /* 595 * Helper routine for polled system interfaces that use 596 * ipmi_polled_enqueue_request() to queue requests. This request 597 * waits until there is a pending request and then returns the first 598 * request. If the driver is shutting down, it returns NULL. 599 */ 600 struct ipmi_request * 601 ipmi_dequeue_request(struct ipmi_softc *sc) 602 { 603 struct ipmi_request *req; 604 605 IPMI_LOCK_ASSERT(sc); 606 607 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests)) 608 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock); 609 if (sc->ipmi_detaching) 610 return (NULL); 611 612 req = TAILQ_FIRST(&sc->ipmi_pending_requests); 613 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link); 614 return (req); 615 } 616 617 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */ 618 int 619 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req) 620 { 621 622 IPMI_LOCK_ASSERT(sc); 623 624 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link); 625 cv_signal(&sc->ipmi_request_added); 626 return (0); 627 } 628 629 /* 630 * Watchdog event handler. 631 */ 632 633 static int 634 ipmi_reset_watchdog(struct ipmi_softc *sc) 635 { 636 struct ipmi_request *req; 637 int error; 638 639 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 640 IPMI_RESET_WDOG, 0, 0); 641 error = ipmi_submit_driver_request(sc, req, 0); 642 if (error) 643 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n"); 644 return (error); 645 } 646 647 static int 648 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec) 649 { 650 struct ipmi_request *req; 651 int error; 652 653 if (sec > 0xffff / 10) 654 return (EINVAL); 655 656 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 657 IPMI_SET_WDOG, 6, 0); 658 if (sec) { 659 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP 660 | IPMI_SET_WD_TIMER_SMS_OS; 661 req->ir_request[1] = (wd_timer_actions & 0xff); 662 req->ir_request[2] = (wd_pretimeout_countdown & 0xff); 663 req->ir_request[3] = 0; /* Timer use */ 664 req->ir_request[4] = (sec * 10) & 0xff; 665 req->ir_request[5] = (sec * 10) >> 8; 666 } else { 667 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS; 668 req->ir_request[1] = 0; 669 req->ir_request[2] = 0; 670 req->ir_request[3] = 0; /* Timer use */ 671 req->ir_request[4] = 0; 672 req->ir_request[5] = 0; 673 } 674 error = ipmi_submit_driver_request(sc, req, 0); 675 if (error) 676 device_printf(sc->ipmi_dev, "Failed to set watchdog\n"); 677 return (error); 678 } 679 680 static void 681 ipmi_wd_event(void *arg, unsigned int cmd, int *error) 682 { 683 struct ipmi_softc *sc = arg; 684 unsigned int timeout; 685 int e; 686 687 /* Ignore requests while disabled. */ 688 if (!on) 689 return; 690 691 /* 692 * To prevent infinite hangs, we don't let anyone pat or change 693 * the watchdog when we're shutting down. (See ipmi_shutdown_event().) 694 * However, we do want to keep patting the watchdog while we are doing 695 * a coredump. 696 */ 697 if (wd_in_shutdown) { 698 if (dumping && sc->ipmi_watchdog_active) 699 ipmi_reset_watchdog(sc); 700 return; 701 } 702 703 cmd &= WD_INTERVAL; 704 if (cmd > 0 && cmd <= 63) { 705 timeout = ((uint64_t)1 << cmd) / 1000000000; 706 if (timeout == 0) 707 timeout = 1; 708 if (timeout != sc->ipmi_watchdog_active || 709 wd_timer_actions != sc->ipmi_watchdog_actions || 710 wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) { 711 e = ipmi_set_watchdog(sc, timeout); 712 if (e == 0) { 713 sc->ipmi_watchdog_active = timeout; 714 sc->ipmi_watchdog_actions = wd_timer_actions; 715 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown; 716 } else { 717 (void)ipmi_set_watchdog(sc, 0); 718 sc->ipmi_watchdog_active = 0; 719 sc->ipmi_watchdog_actions = 0; 720 sc->ipmi_watchdog_pretimeout = 0; 721 } 722 } 723 if (sc->ipmi_watchdog_active != 0) { 724 e = ipmi_reset_watchdog(sc); 725 if (e == 0) { 726 *error = 0; 727 } else { 728 (void)ipmi_set_watchdog(sc, 0); 729 sc->ipmi_watchdog_active = 0; 730 sc->ipmi_watchdog_actions = 0; 731 sc->ipmi_watchdog_pretimeout = 0; 732 } 733 } 734 } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) { 735 sc->ipmi_watchdog_actions = 0; 736 sc->ipmi_watchdog_pretimeout = 0; 737 738 e = ipmi_set_watchdog(sc, 0); 739 if (e != 0 && cmd == 0) 740 *error = EOPNOTSUPP; 741 } 742 } 743 744 static void 745 ipmi_shutdown_event(void *arg, unsigned int cmd, int *error) 746 { 747 struct ipmi_softc *sc = arg; 748 749 /* Ignore event if disabled. */ 750 if (!on) 751 return; 752 753 /* 754 * Positive wd_shutdown_countdown value will re-arm watchdog; 755 * Zero value in wd_shutdown_countdown will disable watchdog; 756 * Negative value in wd_shutdown_countdown will keep existing state; 757 * 758 * Revert to using a power cycle to ensure that the watchdog will 759 * do something useful here. Having the watchdog send an NMI 760 * instead is useless during shutdown, and might be ignored if an 761 * NMI already triggered. 762 */ 763 764 wd_in_shutdown = true; 765 if (wd_shutdown_countdown == 0) { 766 /* disable watchdog */ 767 ipmi_set_watchdog(sc, 0); 768 sc->ipmi_watchdog_active = 0; 769 } else if (wd_shutdown_countdown > 0) { 770 /* set desired action and time, and, reset watchdog */ 771 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 772 ipmi_set_watchdog(sc, wd_shutdown_countdown); 773 sc->ipmi_watchdog_active = wd_shutdown_countdown; 774 ipmi_reset_watchdog(sc); 775 } 776 } 777 778 static void 779 ipmi_power_cycle(void *arg, int howto) 780 { 781 struct ipmi_softc *sc = arg; 782 struct ipmi_request *req; 783 784 /* 785 * Ignore everything except power cycling requests 786 */ 787 if ((howto & RB_POWERCYCLE) == 0) 788 return; 789 790 device_printf(sc->ipmi_dev, "Power cycling using IPMI\n"); 791 792 /* 793 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2 794 * as described in IPMI v2.0 spec section 28.3. 795 */ 796 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0), 797 IPMI_CHASSIS_CONTROL, 1, 0); 798 req->ir_request[0] = IPMI_CC_POWER_CYCLE; 799 800 ipmi_submit_driver_request(sc, req, MAX_TIMEOUT); 801 802 if (req->ir_error != 0 || req->ir_compcode != 0) { 803 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n", 804 req->ir_error, req->ir_compcode); 805 return; 806 } 807 808 /* 809 * BMCs are notoriously slow, give it cyle_wait seconds for the power 810 * down leg of the power cycle. If that fails, fallback to the next 811 * hanlder in the shutdown_final chain and/or the platform failsafe. 812 */ 813 DELAY(cycle_wait * 1000 * 1000); 814 device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n"); 815 } 816 817 static void 818 ipmi_startup(void *arg) 819 { 820 struct ipmi_softc *sc = arg; 821 struct ipmi_request *req; 822 device_t dev; 823 int error, i; 824 825 config_intrhook_disestablish(&sc->ipmi_ich); 826 dev = sc->ipmi_dev; 827 828 /* Initialize interface-independent state. */ 829 mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF); 830 mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF); 831 cv_init(&sc->ipmi_request_added, "ipmireq"); 832 TAILQ_INIT(&sc->ipmi_pending_requests); 833 834 /* Initialize interface-dependent state. */ 835 error = sc->ipmi_startup(sc); 836 if (error) { 837 device_printf(dev, "Failed to initialize interface: %d\n", 838 error); 839 return; 840 } 841 842 /* Send a GET_DEVICE_ID request. */ 843 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 844 IPMI_GET_DEVICE_ID, 0, 15); 845 846 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT); 847 if (error == EWOULDBLOCK) { 848 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n"); 849 return; 850 } else if (error) { 851 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error); 852 return; 853 } else if (req->ir_compcode != 0) { 854 device_printf(dev, 855 "Bad completion code for GET_DEVICE_ID: %d\n", 856 req->ir_compcode); 857 return; 858 } else if (req->ir_replylen < 5) { 859 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n", 860 req->ir_replylen); 861 return; 862 } 863 864 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, " 865 "version %d.%d, device support mask %#x\n", 866 req->ir_reply[1] & 0x0f, 867 req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f, 868 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]); 869 870 sc->ipmi_dev_support = req->ir_reply[5]; 871 872 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 873 IPMI_CLEAR_FLAGS, 1, 0); 874 875 ipmi_submit_driver_request(sc, req, 0); 876 877 /* XXX: Magic numbers */ 878 if (req->ir_compcode == 0xc0) { 879 device_printf(dev, "Clear flags is busy\n"); 880 } 881 if (req->ir_compcode == 0xc1) { 882 device_printf(dev, "Clear flags illegal\n"); 883 } 884 885 for (i = 0; i < 8; i++) { 886 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 887 IPMI_GET_CHANNEL_INFO, 1, 0); 888 req->ir_request[0] = i; 889 890 ipmi_submit_driver_request(sc, req, 0); 891 892 if (req->ir_compcode != 0) 893 break; 894 } 895 device_printf(dev, "Number of channels %d\n", i); 896 897 /* 898 * Probe for watchdog, but only for backends which support 899 * polled driver requests. 900 */ 901 if (sc->ipmi_driver_requests_polled) { 902 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 903 IPMI_GET_WDOG, 0, 0); 904 905 ipmi_submit_driver_request(sc, req, 0); 906 907 if (req->ir_compcode == 0x00) { 908 device_printf(dev, "Attached watchdog\n"); 909 /* register the watchdog event handler */ 910 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER( 911 watchdog_list, ipmi_wd_event, sc, 0); 912 sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER( 913 shutdown_pre_sync, ipmi_shutdown_event, 914 sc, 0); 915 } 916 } 917 918 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev), 919 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev)); 920 if (sc->ipmi_cdev == NULL) { 921 device_printf(dev, "Failed to create cdev\n"); 922 return; 923 } 924 sc->ipmi_cdev->si_drv1 = sc; 925 926 /* 927 * Set initial watchdog state. If desired, set an initial 928 * watchdog on startup. Or, if the watchdog device is 929 * disabled, clear any existing watchdog. 930 */ 931 if (on && wd_startup_countdown > 0) { 932 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 933 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 && 934 ipmi_reset_watchdog(sc) == 0) { 935 sc->ipmi_watchdog_active = wd_startup_countdown; 936 sc->ipmi_watchdog_actions = wd_timer_actions; 937 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown; 938 } else 939 (void)ipmi_set_watchdog(sc, 0); 940 ipmi_reset_watchdog(sc); 941 } else if (!on) 942 (void)ipmi_set_watchdog(sc, 0); 943 /* 944 * Power cycle the system off using IPMI. We use last - 2 since we don't 945 * handle all the other kinds of reboots. We'll let others handle them. 946 * We only try to do this if the BMC supports the Chassis device. 947 */ 948 if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) { 949 device_printf(dev, "Establishing power cycle handler\n"); 950 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final, 951 ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2); 952 } 953 } 954 955 int 956 ipmi_attach(device_t dev) 957 { 958 struct ipmi_softc *sc = device_get_softc(dev); 959 int error; 960 961 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) { 962 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC, 963 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq); 964 if (error) { 965 device_printf(dev, "can't set up interrupt\n"); 966 return (error); 967 } 968 } 969 970 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook)); 971 sc->ipmi_ich.ich_func = ipmi_startup; 972 sc->ipmi_ich.ich_arg = sc; 973 if (config_intrhook_establish(&sc->ipmi_ich) != 0) { 974 device_printf(dev, "can't establish configuration hook\n"); 975 return (ENOMEM); 976 } 977 978 ipmi_attached = 1; 979 return (0); 980 } 981 982 int 983 ipmi_detach(device_t dev) 984 { 985 struct ipmi_softc *sc; 986 987 sc = device_get_softc(dev); 988 989 /* Fail if there are any open handles. */ 990 IPMI_LOCK(sc); 991 if (sc->ipmi_opened) { 992 IPMI_UNLOCK(sc); 993 return (EBUSY); 994 } 995 IPMI_UNLOCK(sc); 996 if (sc->ipmi_cdev) 997 destroy_dev(sc->ipmi_cdev); 998 999 /* Detach from watchdog handling and turn off watchdog. */ 1000 if (sc->ipmi_shutdown_tag) 1001 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, 1002 sc->ipmi_shutdown_tag); 1003 if (sc->ipmi_watchdog_tag) { 1004 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag); 1005 ipmi_set_watchdog(sc, 0); 1006 } 1007 1008 /* Detach from shutdown handling for power cycle reboot */ 1009 if (sc->ipmi_power_cycle_tag) 1010 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag); 1011 1012 /* XXX: should use shutdown callout I think. */ 1013 /* If the backend uses a kthread, shut it down. */ 1014 IPMI_LOCK(sc); 1015 sc->ipmi_detaching = 1; 1016 if (sc->ipmi_kthread) { 1017 cv_broadcast(&sc->ipmi_request_added); 1018 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0, 1019 "ipmi_wait", 0); 1020 } 1021 IPMI_UNLOCK(sc); 1022 if (sc->ipmi_irq) 1023 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq); 1024 1025 ipmi_release_resources(dev); 1026 mtx_destroy(&sc->ipmi_io_lock); 1027 mtx_destroy(&sc->ipmi_requests_lock); 1028 return (0); 1029 } 1030 1031 void 1032 ipmi_release_resources(device_t dev) 1033 { 1034 struct ipmi_softc *sc; 1035 int i; 1036 1037 sc = device_get_softc(dev); 1038 if (sc->ipmi_irq) 1039 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq); 1040 if (sc->ipmi_irq_res) 1041 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid, 1042 sc->ipmi_irq_res); 1043 for (i = 0; i < MAX_RES; i++) 1044 if (sc->ipmi_io_res[i]) 1045 bus_release_resource(dev, sc->ipmi_io_type, 1046 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]); 1047 } 1048 1049 devclass_t ipmi_devclass; 1050 1051 /* XXX: Why? */ 1052 static void 1053 ipmi_unload(void *arg) 1054 { 1055 device_t * devs; 1056 int count; 1057 int i; 1058 1059 if (ipmi_devclass == NULL) 1060 return; 1061 if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0) 1062 return; 1063 for (i = 0; i < count; i++) 1064 device_delete_child(device_get_parent(devs[i]), devs[i]); 1065 free(devs, M_TEMP); 1066 } 1067 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL); 1068 1069 #ifdef IMPI_DEBUG 1070 static void 1071 dump_buf(u_char *data, int len) 1072 { 1073 char buf[20]; 1074 char line[1024]; 1075 char temp[30]; 1076 int count = 0; 1077 int i=0; 1078 1079 printf("Address %p len %d\n", data, len); 1080 if (len > 256) 1081 len = 256; 1082 line[0] = '\000'; 1083 for (; len > 0; len--, data++) { 1084 sprintf(temp, "%02x ", *data); 1085 strcat(line, temp); 1086 if (*data >= ' ' && *data <= '~') 1087 buf[count] = *data; 1088 else if (*data >= 'A' && *data <= 'Z') 1089 buf[count] = *data; 1090 else 1091 buf[count] = '.'; 1092 if (++count == 16) { 1093 buf[count] = '\000'; 1094 count = 0; 1095 printf(" %3x %s %s\n", i, line, buf); 1096 i+=16; 1097 line[0] = '\000'; 1098 } 1099 } 1100 buf[count] = '\000'; 1101 1102 for (; count != 16; count++) { 1103 strcat(line, " "); 1104 } 1105 printf(" %3x %s %s\n", i, line, buf); 1106 } 1107 #endif 1108