1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/bus.h> 32 #include <sys/condvar.h> 33 #include <sys/conf.h> 34 #include <sys/eventhandler.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/poll.h> 41 #include <sys/reboot.h> 42 #include <sys/rman.h> 43 #include <sys/selinfo.h> 44 #include <sys/sysctl.h> 45 #include <sys/watchdog.h> 46 47 #ifdef LOCAL_MODULE 48 #include <ipmi.h> 49 #include <ipmivars.h> 50 #else 51 #include <sys/ipmi.h> 52 #include <dev/ipmi/ipmivars.h> 53 #endif 54 55 #ifdef IPMICTL_SEND_COMMAND_32 56 #include <sys/abi_compat.h> 57 #endif 58 59 /* 60 * Driver request structures are allocated on the stack via alloca() to 61 * avoid calling malloc(), especially for the watchdog handler. 62 * To avoid too much stack growth, a previously allocated structure can 63 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure 64 * that there is adequate reply/request space in the original allocation. 65 */ 66 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \ 67 bzero((req), sizeof(struct ipmi_request)); \ 68 ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen)) 69 70 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \ 71 (req) = __builtin_alloca(sizeof(struct ipmi_request) + \ 72 (reqlen) + (replylen)); \ 73 IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \ 74 (replylen)) 75 76 static d_ioctl_t ipmi_ioctl; 77 static d_poll_t ipmi_poll; 78 static d_open_t ipmi_open; 79 static void ipmi_dtor(void *arg); 80 81 int ipmi_attached = 0; 82 83 static int on = 1; 84 static bool wd_in_shutdown = false; 85 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 86 static int wd_shutdown_countdown = 0; /* sec */ 87 static int wd_startup_countdown = 0; /* sec */ 88 static int wd_pretimeout_countdown = 120; /* sec */ 89 static int cycle_wait = 10; /* sec */ 90 static int wd_init_enable = 1; 91 92 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 93 "IPMI driver parameters"); 94 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN, 95 &on, 0, ""); 96 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_init_enable, CTLFLAG_RWTUN, 97 &wd_init_enable, 1, "Enable watchdog initialization"); 98 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RWTUN, 99 &wd_timer_actions, 0, 100 "IPMI watchdog timer actions (including pre-timeout interrupt)"); 101 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RWTUN, 102 &wd_shutdown_countdown, 0, 103 "IPMI watchdog countdown for shutdown (seconds)"); 104 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN, 105 &wd_startup_countdown, 0, 106 "IPMI watchdog countdown initialized during startup (seconds)"); 107 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RWTUN, 108 &wd_pretimeout_countdown, 0, 109 "IPMI watchdog pre-timeout countdown (seconds)"); 110 SYSCTL_INT(_hw_ipmi, OID_AUTO, cycle_wait, CTLFLAG_RWTUN, 111 &cycle_wait, 0, 112 "IPMI power cycle on reboot delay time (seconds)"); 113 114 static struct cdevsw ipmi_cdevsw = { 115 .d_version = D_VERSION, 116 .d_open = ipmi_open, 117 .d_ioctl = ipmi_ioctl, 118 .d_poll = ipmi_poll, 119 .d_name = "ipmi", 120 }; 121 122 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi"); 123 124 static int 125 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td) 126 { 127 struct ipmi_device *dev; 128 struct ipmi_softc *sc; 129 int error; 130 131 if (!on) 132 return (ENOENT); 133 134 /* Initialize the per file descriptor data. */ 135 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO); 136 error = devfs_set_cdevpriv(dev, ipmi_dtor); 137 if (error) { 138 free(dev, M_IPMI); 139 return (error); 140 } 141 142 sc = cdev->si_drv1; 143 TAILQ_INIT(&dev->ipmi_completed_requests); 144 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR; 145 dev->ipmi_lun = IPMI_BMC_SMS_LUN; 146 dev->ipmi_softc = sc; 147 IPMI_LOCK(sc); 148 sc->ipmi_opened++; 149 IPMI_UNLOCK(sc); 150 151 return (0); 152 } 153 154 static int 155 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td) 156 { 157 struct ipmi_device *dev; 158 struct ipmi_softc *sc; 159 int revents = 0; 160 161 if (devfs_get_cdevpriv((void **)&dev)) 162 return (0); 163 164 sc = cdev->si_drv1; 165 IPMI_LOCK(sc); 166 if (poll_events & (POLLIN | POLLRDNORM)) { 167 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) 168 revents |= poll_events & (POLLIN | POLLRDNORM); 169 if (dev->ipmi_requests == 0) 170 revents |= POLLERR; 171 } 172 173 if (revents == 0) { 174 if (poll_events & (POLLIN | POLLRDNORM)) 175 selrecord(td, &dev->ipmi_select); 176 } 177 IPMI_UNLOCK(sc); 178 179 return (revents); 180 } 181 182 static void 183 ipmi_purge_completed_requests(struct ipmi_device *dev) 184 { 185 struct ipmi_request *req; 186 187 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) { 188 req = TAILQ_FIRST(&dev->ipmi_completed_requests); 189 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link); 190 dev->ipmi_requests--; 191 ipmi_free_request(req); 192 } 193 } 194 195 static void 196 ipmi_dtor(void *arg) 197 { 198 struct ipmi_request *req, *nreq; 199 struct ipmi_device *dev; 200 struct ipmi_softc *sc; 201 202 dev = arg; 203 sc = dev->ipmi_softc; 204 205 IPMI_LOCK(sc); 206 if (dev->ipmi_requests) { 207 /* Throw away any pending requests for this device. */ 208 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests_highpri, ir_link, 209 nreq) { 210 if (req->ir_owner == dev) { 211 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req, 212 ir_link); 213 dev->ipmi_requests--; 214 ipmi_free_request(req); 215 } 216 } 217 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link, 218 nreq) { 219 if (req->ir_owner == dev) { 220 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, 221 ir_link); 222 dev->ipmi_requests--; 223 ipmi_free_request(req); 224 } 225 } 226 227 /* Throw away any pending completed requests for this device. */ 228 ipmi_purge_completed_requests(dev); 229 230 /* 231 * If we still have outstanding requests, they must be stuck 232 * in an interface driver, so wait for those to drain. 233 */ 234 dev->ipmi_closing = 1; 235 while (dev->ipmi_requests > 0) { 236 msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock, 237 PWAIT, "ipmidrain", 0); 238 ipmi_purge_completed_requests(dev); 239 } 240 } 241 sc->ipmi_opened--; 242 IPMI_UNLOCK(sc); 243 244 /* Cleanup. */ 245 free(dev, M_IPMI); 246 } 247 248 static u_char 249 ipmi_ipmb_checksum(u_char *data, int len) 250 { 251 u_char sum = 0; 252 253 for (; len; len--) 254 sum += *data++; 255 return (-sum); 256 } 257 258 static int 259 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, 260 int flags, struct thread *td) 261 { 262 struct ipmi_softc *sc; 263 struct ipmi_device *dev; 264 struct ipmi_request *kreq; 265 struct ipmi_req *req = (struct ipmi_req *)data; 266 struct ipmi_recv *recv = (struct ipmi_recv *)data; 267 struct ipmi_addr addr; 268 #ifdef IPMICTL_SEND_COMMAND_32 269 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data; 270 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data; 271 union { 272 struct ipmi_req req; 273 struct ipmi_recv recv; 274 } thunk32; 275 #endif 276 int error, len; 277 278 error = devfs_get_cdevpriv((void **)&dev); 279 if (error) 280 return (error); 281 282 sc = cdev->si_drv1; 283 284 #ifdef IPMICTL_SEND_COMMAND_32 285 /* Convert 32-bit structures to native. */ 286 switch (cmd) { 287 case IPMICTL_SEND_COMMAND_32: 288 req = &thunk32.req; 289 req->addr = PTRIN(req32->addr); 290 req->addr_len = req32->addr_len; 291 req->msgid = req32->msgid; 292 req->msg.netfn = req32->msg.netfn; 293 req->msg.cmd = req32->msg.cmd; 294 req->msg.data_len = req32->msg.data_len; 295 req->msg.data = PTRIN(req32->msg.data); 296 break; 297 case IPMICTL_RECEIVE_MSG_TRUNC_32: 298 case IPMICTL_RECEIVE_MSG_32: 299 recv = &thunk32.recv; 300 recv->addr = PTRIN(recv32->addr); 301 recv->addr_len = recv32->addr_len; 302 recv->msg.data_len = recv32->msg.data_len; 303 recv->msg.data = PTRIN(recv32->msg.data); 304 break; 305 } 306 #endif 307 308 switch (cmd) { 309 #ifdef IPMICTL_SEND_COMMAND_32 310 case IPMICTL_SEND_COMMAND_32: 311 #endif 312 case IPMICTL_SEND_COMMAND: 313 error = copyin(req->addr, &addr, sizeof(addr)); 314 if (error) 315 return (error); 316 317 if (addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 318 struct ipmi_system_interface_addr *saddr = 319 (struct ipmi_system_interface_addr *)&addr; 320 321 kreq = ipmi_alloc_request(dev, req->msgid, 322 IPMI_ADDR(req->msg.netfn, saddr->lun & 0x3), 323 req->msg.cmd, req->msg.data_len, IPMI_MAX_RX); 324 error = copyin(req->msg.data, kreq->ir_request, 325 req->msg.data_len); 326 if (error) { 327 ipmi_free_request(kreq); 328 return (error); 329 } 330 IPMI_LOCK(sc); 331 dev->ipmi_requests++; 332 error = sc->ipmi_enqueue_request(sc, kreq); 333 IPMI_UNLOCK(sc); 334 if (error) 335 return (error); 336 break; 337 } 338 339 /* Special processing for IPMB commands */ 340 struct ipmi_ipmb_addr *iaddr = (struct ipmi_ipmb_addr *)&addr; 341 342 IPMI_ALLOC_DRIVER_REQUEST(kreq, IPMI_ADDR(IPMI_APP_REQUEST, 0), 343 IPMI_SEND_MSG, req->msg.data_len + 8, IPMI_MAX_RX); 344 /* Construct the SEND MSG header */ 345 kreq->ir_request[0] = iaddr->channel; 346 kreq->ir_request[1] = iaddr->slave_addr; 347 kreq->ir_request[2] = IPMI_ADDR(req->msg.netfn, iaddr->lun); 348 kreq->ir_request[3] = 349 ipmi_ipmb_checksum(&kreq->ir_request[1], 2); 350 kreq->ir_request[4] = dev->ipmi_address; 351 kreq->ir_request[5] = IPMI_ADDR(0, dev->ipmi_lun); 352 kreq->ir_request[6] = req->msg.cmd; 353 /* Copy the message data */ 354 if (req->msg.data_len > 0) { 355 error = copyin(req->msg.data, &kreq->ir_request[7], 356 req->msg.data_len); 357 if (error != 0) 358 return (error); 359 } 360 kreq->ir_request[req->msg.data_len + 7] = 361 ipmi_ipmb_checksum(&kreq->ir_request[4], 362 req->msg.data_len + 3); 363 error = ipmi_submit_driver_request(sc, kreq, MAX_TIMEOUT); 364 if (error != 0) 365 return (error); 366 367 kreq = ipmi_alloc_request(dev, req->msgid, 368 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 369 0, IPMI_MAX_RX); 370 kreq->ir_ipmb = true; 371 kreq->ir_ipmb_addr = IPMI_ADDR(req->msg.netfn, 0); 372 kreq->ir_ipmb_command = req->msg.cmd; 373 IPMI_LOCK(sc); 374 dev->ipmi_requests++; 375 error = sc->ipmi_enqueue_request(sc, kreq); 376 IPMI_UNLOCK(sc); 377 if (error != 0) 378 return (error); 379 break; 380 #ifdef IPMICTL_SEND_COMMAND_32 381 case IPMICTL_RECEIVE_MSG_TRUNC_32: 382 case IPMICTL_RECEIVE_MSG_32: 383 #endif 384 case IPMICTL_RECEIVE_MSG_TRUNC: 385 case IPMICTL_RECEIVE_MSG: 386 error = copyin(recv->addr, &addr, sizeof(addr)); 387 if (error) 388 return (error); 389 390 IPMI_LOCK(sc); 391 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests); 392 if (kreq == NULL) { 393 IPMI_UNLOCK(sc); 394 return (EAGAIN); 395 } 396 if (kreq->ir_error != 0) { 397 error = kreq->ir_error; 398 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, 399 ir_link); 400 dev->ipmi_requests--; 401 IPMI_UNLOCK(sc); 402 ipmi_free_request(kreq); 403 return (error); 404 } 405 406 recv->recv_type = IPMI_RESPONSE_RECV_TYPE; 407 recv->msgid = kreq->ir_msgid; 408 if (kreq->ir_ipmb) { 409 addr.channel = IPMI_IPMB_CHANNEL; 410 recv->msg.netfn = 411 IPMI_REPLY_ADDR(kreq->ir_ipmb_addr) >> 2; 412 recv->msg.cmd = kreq->ir_ipmb_command; 413 /* Get the compcode of response */ 414 kreq->ir_compcode = kreq->ir_reply[6]; 415 /* Move the reply head past response header */ 416 kreq->ir_reply += 7; 417 len = kreq->ir_replylen - 7; 418 } else { 419 addr.channel = IPMI_BMC_CHANNEL; 420 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2; 421 recv->msg.cmd = kreq->ir_command; 422 len = kreq->ir_replylen + 1; 423 } 424 425 if (recv->msg.data_len < len && 426 (cmd == IPMICTL_RECEIVE_MSG 427 #ifdef IPMICTL_RECEIVE_MSG_32 428 || cmd == IPMICTL_RECEIVE_MSG_32 429 #endif 430 )) { 431 IPMI_UNLOCK(sc); 432 return (EMSGSIZE); 433 } 434 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); 435 dev->ipmi_requests--; 436 IPMI_UNLOCK(sc); 437 len = min(recv->msg.data_len, len); 438 recv->msg.data_len = len; 439 error = copyout(&addr, recv->addr,sizeof(addr)); 440 if (error == 0) 441 error = copyout(&kreq->ir_compcode, recv->msg.data, 1); 442 if (error == 0) 443 error = copyout(kreq->ir_reply, recv->msg.data + 1, 444 len - 1); 445 ipmi_free_request(kreq); 446 if (error) 447 return (error); 448 break; 449 case IPMICTL_SET_MY_ADDRESS_CMD: 450 IPMI_LOCK(sc); 451 dev->ipmi_address = *(int*)data; 452 IPMI_UNLOCK(sc); 453 break; 454 case IPMICTL_GET_MY_ADDRESS_CMD: 455 IPMI_LOCK(sc); 456 *(int*)data = dev->ipmi_address; 457 IPMI_UNLOCK(sc); 458 break; 459 case IPMICTL_SET_MY_LUN_CMD: 460 IPMI_LOCK(sc); 461 dev->ipmi_lun = *(int*)data & 0x3; 462 IPMI_UNLOCK(sc); 463 break; 464 case IPMICTL_GET_MY_LUN_CMD: 465 IPMI_LOCK(sc); 466 *(int*)data = dev->ipmi_lun; 467 IPMI_UNLOCK(sc); 468 break; 469 case IPMICTL_SET_GETS_EVENTS_CMD: 470 /* 471 device_printf(sc->ipmi_dev, 472 "IPMICTL_SET_GETS_EVENTS_CMD NA\n"); 473 */ 474 break; 475 case IPMICTL_REGISTER_FOR_CMD: 476 case IPMICTL_UNREGISTER_FOR_CMD: 477 return (EOPNOTSUPP); 478 default: 479 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd); 480 return (ENOIOCTL); 481 } 482 483 #ifdef IPMICTL_SEND_COMMAND_32 484 /* Update changed fields in 32-bit structures. */ 485 switch (cmd) { 486 case IPMICTL_RECEIVE_MSG_TRUNC_32: 487 case IPMICTL_RECEIVE_MSG_32: 488 recv32->recv_type = recv->recv_type; 489 recv32->msgid = recv->msgid; 490 recv32->msg.netfn = recv->msg.netfn; 491 recv32->msg.cmd = recv->msg.cmd; 492 recv32->msg.data_len = recv->msg.data_len; 493 break; 494 } 495 #endif 496 return (0); 497 } 498 499 /* 500 * Request management. 501 */ 502 503 __inline void 504 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid, 505 uint8_t addr, uint8_t command, size_t requestlen, size_t replylen) 506 { 507 508 req->ir_owner = dev; 509 req->ir_msgid = msgid; 510 req->ir_addr = addr; 511 req->ir_command = command; 512 if (requestlen) { 513 req->ir_request = (char *)&req[1]; 514 req->ir_requestlen = requestlen; 515 } 516 if (replylen) { 517 req->ir_reply = (char *)&req[1] + requestlen; 518 req->ir_replybuflen = replylen; 519 } 520 } 521 522 /* Allocate a new request with request and reply buffers. */ 523 struct ipmi_request * 524 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr, 525 uint8_t command, size_t requestlen, size_t replylen) 526 { 527 struct ipmi_request *req; 528 529 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen, 530 M_IPMI, M_WAITOK | M_ZERO); 531 ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen); 532 return (req); 533 } 534 535 /* Free a request no longer in use. */ 536 void 537 ipmi_free_request(struct ipmi_request *req) 538 { 539 540 free(req, M_IPMI); 541 } 542 543 /* Store a processed request on the appropriate completion queue. */ 544 void 545 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req) 546 { 547 struct ipmi_device *dev; 548 549 IPMI_LOCK_ASSERT(sc); 550 551 /* 552 * Anonymous requests (from inside the driver) always have a 553 * waiter that we awaken. 554 */ 555 if (req->ir_owner == NULL) 556 wakeup(req); 557 else { 558 dev = req->ir_owner; 559 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link); 560 selwakeup(&dev->ipmi_select); 561 if (dev->ipmi_closing) 562 wakeup(&dev->ipmi_requests); 563 } 564 } 565 566 /* Perform an internal driver request. */ 567 int 568 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, 569 int timo) 570 { 571 572 return (sc->ipmi_driver_request(sc, req, timo)); 573 } 574 575 /* 576 * Helper routine for polled system interfaces that use 577 * ipmi_polled_enqueue_request() to queue requests. This request 578 * waits until there is a pending request and then returns the first 579 * request. If the driver is shutting down, it returns NULL. 580 */ 581 struct ipmi_request * 582 ipmi_dequeue_request(struct ipmi_softc *sc) 583 { 584 struct ipmi_request *req; 585 586 IPMI_LOCK_ASSERT(sc); 587 588 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests) && 589 TAILQ_EMPTY(&sc->ipmi_pending_requests_highpri)) 590 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock); 591 if (sc->ipmi_detaching) 592 return (NULL); 593 594 req = TAILQ_FIRST(&sc->ipmi_pending_requests_highpri); 595 if (req != NULL) 596 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req, ir_link); 597 else { 598 req = TAILQ_FIRST(&sc->ipmi_pending_requests); 599 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link); 600 } 601 return (req); 602 } 603 604 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */ 605 int 606 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req) 607 { 608 609 IPMI_LOCK_ASSERT(sc); 610 611 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link); 612 cv_signal(&sc->ipmi_request_added); 613 return (0); 614 } 615 616 int 617 ipmi_polled_enqueue_request_highpri(struct ipmi_softc *sc, struct ipmi_request *req) 618 { 619 620 IPMI_LOCK_ASSERT(sc); 621 622 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests_highpri, req, ir_link); 623 cv_signal(&sc->ipmi_request_added); 624 return (0); 625 } 626 627 /* 628 * Watchdog event handler. 629 */ 630 631 static int 632 ipmi_reset_watchdog(struct ipmi_softc *sc) 633 { 634 struct ipmi_request *req; 635 int error; 636 637 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 638 IPMI_RESET_WDOG, 0, 0); 639 error = ipmi_submit_driver_request(sc, req, 0); 640 if (error) { 641 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n"); 642 } else if (req->ir_compcode == 0x80) { 643 error = ENOENT; 644 } else if (req->ir_compcode != 0) { 645 device_printf(sc->ipmi_dev, "Watchdog reset returned 0x%x\n", 646 req->ir_compcode); 647 error = EINVAL; 648 } 649 return (error); 650 } 651 652 static int 653 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec) 654 { 655 struct ipmi_request *req; 656 int error; 657 658 if (sec > 0xffff / 10) 659 return (EINVAL); 660 661 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 662 IPMI_SET_WDOG, 6, 0); 663 if (sec) { 664 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP 665 | IPMI_SET_WD_TIMER_SMS_OS; 666 req->ir_request[1] = (wd_timer_actions & 0xff); 667 req->ir_request[2] = min(0xff, 668 min(wd_pretimeout_countdown, (sec + 2) / 4)); 669 req->ir_request[3] = 0; /* Timer use */ 670 req->ir_request[4] = (sec * 10) & 0xff; 671 req->ir_request[5] = (sec * 10) >> 8; 672 } else { 673 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS; 674 req->ir_request[1] = 0; 675 req->ir_request[2] = 0; 676 req->ir_request[3] = 0; /* Timer use */ 677 req->ir_request[4] = 0; 678 req->ir_request[5] = 0; 679 } 680 error = ipmi_submit_driver_request(sc, req, 0); 681 if (error) { 682 device_printf(sc->ipmi_dev, "Failed to set watchdog\n"); 683 } else if (req->ir_compcode != 0) { 684 device_printf(sc->ipmi_dev, "Watchdog set returned 0x%x\n", 685 req->ir_compcode); 686 error = EINVAL; 687 } 688 return (error); 689 } 690 691 static void 692 ipmi_wd_event(void *arg, unsigned int cmd, int *error) 693 { 694 struct ipmi_softc *sc = arg; 695 unsigned int timeout; 696 int e; 697 698 /* Ignore requests while disabled. */ 699 if (!on) 700 return; 701 702 /* 703 * To prevent infinite hangs, we don't let anyone pat or change 704 * the watchdog when we're shutting down. (See ipmi_shutdown_event().) 705 * However, we do want to keep patting the watchdog while we are doing 706 * a coredump. 707 */ 708 if (wd_in_shutdown) { 709 if (dumping && sc->ipmi_watchdog_active) 710 ipmi_reset_watchdog(sc); 711 return; 712 } 713 714 cmd &= WD_INTERVAL; 715 if (cmd > 0 && cmd <= 63) { 716 timeout = ((uint64_t)1 << cmd) / 1000000000; 717 if (timeout == 0) 718 timeout = 1; 719 if (timeout != sc->ipmi_watchdog_active || 720 wd_timer_actions != sc->ipmi_watchdog_actions || 721 wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) { 722 e = ipmi_set_watchdog(sc, timeout); 723 if (e == 0) { 724 sc->ipmi_watchdog_active = timeout; 725 sc->ipmi_watchdog_actions = wd_timer_actions; 726 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown; 727 } else { 728 (void)ipmi_set_watchdog(sc, 0); 729 sc->ipmi_watchdog_active = 0; 730 sc->ipmi_watchdog_actions = 0; 731 sc->ipmi_watchdog_pretimeout = 0; 732 } 733 } 734 if (sc->ipmi_watchdog_active != 0) { 735 e = ipmi_reset_watchdog(sc); 736 if (e == 0) { 737 *error = 0; 738 } else { 739 (void)ipmi_set_watchdog(sc, 0); 740 sc->ipmi_watchdog_active = 0; 741 sc->ipmi_watchdog_actions = 0; 742 sc->ipmi_watchdog_pretimeout = 0; 743 } 744 } 745 } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) { 746 sc->ipmi_watchdog_actions = 0; 747 sc->ipmi_watchdog_pretimeout = 0; 748 749 e = ipmi_set_watchdog(sc, 0); 750 if (e != 0 && cmd == 0) 751 *error = EOPNOTSUPP; 752 } 753 } 754 755 static void 756 ipmi_shutdown_event(void *arg, unsigned int cmd, int *error) 757 { 758 struct ipmi_softc *sc = arg; 759 760 /* Ignore event if disabled. */ 761 if (!on) 762 return; 763 764 /* 765 * Positive wd_shutdown_countdown value will re-arm watchdog; 766 * Zero value in wd_shutdown_countdown will disable watchdog; 767 * Negative value in wd_shutdown_countdown will keep existing state; 768 * 769 * Revert to using a power cycle to ensure that the watchdog will 770 * do something useful here. Having the watchdog send an NMI 771 * instead is useless during shutdown, and might be ignored if an 772 * NMI already triggered. 773 */ 774 775 wd_in_shutdown = true; 776 if (wd_shutdown_countdown == 0) { 777 /* disable watchdog */ 778 ipmi_set_watchdog(sc, 0); 779 sc->ipmi_watchdog_active = 0; 780 } else if (wd_shutdown_countdown > 0) { 781 /* set desired action and time, and, reset watchdog */ 782 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE; 783 ipmi_set_watchdog(sc, wd_shutdown_countdown); 784 sc->ipmi_watchdog_active = wd_shutdown_countdown; 785 ipmi_reset_watchdog(sc); 786 } 787 } 788 789 static void 790 ipmi_power_cycle(void *arg, int howto) 791 { 792 struct ipmi_softc *sc = arg; 793 struct ipmi_request *req; 794 795 /* 796 * Ignore everything except power cycling requests 797 */ 798 if ((howto & RB_POWERCYCLE) == 0) 799 return; 800 801 device_printf(sc->ipmi_dev, "Power cycling using IPMI\n"); 802 803 /* 804 * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2 805 * as described in IPMI v2.0 spec section 28.3. 806 */ 807 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0), 808 IPMI_CHASSIS_CONTROL, 1, 0); 809 req->ir_request[0] = IPMI_CC_POWER_CYCLE; 810 811 ipmi_submit_driver_request(sc, req, MAX_TIMEOUT); 812 813 if (req->ir_error != 0 || req->ir_compcode != 0) { 814 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n", 815 req->ir_error, req->ir_compcode); 816 return; 817 } 818 819 /* 820 * BMCs are notoriously slow, give it cycle_wait seconds for the power 821 * down leg of the power cycle. If that fails, fallback to the next 822 * hanlder in the shutdown_final chain and/or the platform failsafe. 823 */ 824 DELAY(cycle_wait * 1000 * 1000); 825 device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n"); 826 } 827 828 static void 829 ipmi_startup(void *arg) 830 { 831 struct ipmi_softc *sc = arg; 832 struct ipmi_request *req; 833 device_t dev; 834 int error, i; 835 836 config_intrhook_disestablish(&sc->ipmi_ich); 837 dev = sc->ipmi_dev; 838 839 /* Initialize interface-independent state. */ 840 mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF); 841 mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF); 842 cv_init(&sc->ipmi_request_added, "ipmireq"); 843 TAILQ_INIT(&sc->ipmi_pending_requests_highpri); 844 TAILQ_INIT(&sc->ipmi_pending_requests); 845 846 /* Initialize interface-dependent state. */ 847 error = sc->ipmi_startup(sc); 848 if (error) { 849 device_printf(dev, "Failed to initialize interface: %d\n", 850 error); 851 return; 852 } 853 854 /* Send a GET_DEVICE_ID request. */ 855 IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 856 IPMI_GET_DEVICE_ID, 0, 15); 857 858 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT); 859 if (error == EWOULDBLOCK) { 860 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n"); 861 return; 862 } else if (error) { 863 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error); 864 return; 865 } else if (req->ir_compcode != 0) { 866 device_printf(dev, 867 "Bad completion code for GET_DEVICE_ID: %d\n", 868 req->ir_compcode); 869 return; 870 } else if (req->ir_replylen < 5) { 871 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n", 872 req->ir_replylen); 873 return; 874 } 875 876 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, " 877 "version %d.%d, device support mask %#x\n", 878 req->ir_reply[1] & 0x0f, 879 req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f, 880 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]); 881 882 sc->ipmi_dev_support = req->ir_reply[5]; 883 884 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 885 IPMI_CLEAR_FLAGS, 1, 0); 886 887 ipmi_submit_driver_request(sc, req, 0); 888 889 /* XXX: Magic numbers */ 890 if (req->ir_compcode == 0xc0) { 891 device_printf(dev, "Clear flags is busy\n"); 892 } 893 if (req->ir_compcode == 0xc1) { 894 device_printf(dev, "Clear flags illegal\n"); 895 } 896 897 for (i = 0; i < 8; i++) { 898 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 899 IPMI_GET_CHANNEL_INFO, 1, 0); 900 req->ir_request[0] = i; 901 902 error = ipmi_submit_driver_request(sc, req, 0); 903 904 if (error != 0 || req->ir_compcode != 0) 905 break; 906 } 907 device_printf(dev, "Number of channels %d\n", i); 908 909 /* 910 * Probe for watchdog, but only for backends which support 911 * polled driver requests. 912 */ 913 if (wd_init_enable && sc->ipmi_driver_requests_polled) { 914 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0), 915 IPMI_GET_WDOG, 0, 0); 916 917 error = ipmi_submit_driver_request(sc, req, 0); 918 919 if (error == 0 && req->ir_compcode == 0x00) { 920 device_printf(dev, "Attached watchdog\n"); 921 /* register the watchdog event handler */ 922 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER( 923 watchdog_list, ipmi_wd_event, sc, 0); 924 sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER( 925 shutdown_pre_sync, ipmi_shutdown_event, 926 sc, 0); 927 } 928 } 929 930 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev), 931 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev)); 932 if (sc->ipmi_cdev == NULL) { 933 device_printf(dev, "Failed to create cdev\n"); 934 return; 935 } 936 sc->ipmi_cdev->si_drv1 = sc; 937 938 /* 939 * Set initial watchdog state. If desired, set an initial 940 * watchdog on startup. Or, if the watchdog device is 941 * disabled, clear any existing watchdog. 942 */ 943 if (on && wd_startup_countdown > 0) { 944 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 && 945 ipmi_reset_watchdog(sc) == 0) { 946 sc->ipmi_watchdog_active = wd_startup_countdown; 947 sc->ipmi_watchdog_actions = wd_timer_actions; 948 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown; 949 } else 950 (void)ipmi_set_watchdog(sc, 0); 951 ipmi_reset_watchdog(sc); 952 } else if (!on) 953 (void)ipmi_set_watchdog(sc, 0); 954 /* 955 * Power cycle the system off using IPMI. We use last - 2 since we don't 956 * handle all the other kinds of reboots. We'll let others handle them. 957 * We only try to do this if the BMC supports the Chassis device. 958 */ 959 if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) { 960 device_printf(dev, "Establishing power cycle handler\n"); 961 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final, 962 ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2); 963 } 964 } 965 966 int 967 ipmi_attach(device_t dev) 968 { 969 struct ipmi_softc *sc = device_get_softc(dev); 970 int error; 971 972 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) { 973 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC, 974 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq); 975 if (error) { 976 device_printf(dev, "can't set up interrupt\n"); 977 return (error); 978 } 979 } 980 981 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook)); 982 sc->ipmi_ich.ich_func = ipmi_startup; 983 sc->ipmi_ich.ich_arg = sc; 984 if (config_intrhook_establish(&sc->ipmi_ich) != 0) { 985 device_printf(dev, "can't establish configuration hook\n"); 986 return (ENOMEM); 987 } 988 989 ipmi_attached = 1; 990 return (0); 991 } 992 993 int 994 ipmi_detach(device_t dev) 995 { 996 struct ipmi_softc *sc; 997 998 sc = device_get_softc(dev); 999 1000 /* Fail if there are any open handles. */ 1001 IPMI_LOCK(sc); 1002 if (sc->ipmi_opened) { 1003 IPMI_UNLOCK(sc); 1004 return (EBUSY); 1005 } 1006 IPMI_UNLOCK(sc); 1007 if (sc->ipmi_cdev) 1008 destroy_dev(sc->ipmi_cdev); 1009 1010 /* Detach from watchdog handling and turn off watchdog. */ 1011 if (sc->ipmi_shutdown_tag) 1012 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, 1013 sc->ipmi_shutdown_tag); 1014 if (sc->ipmi_watchdog_tag) { 1015 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag); 1016 ipmi_set_watchdog(sc, 0); 1017 } 1018 1019 /* Detach from shutdown handling for power cycle reboot */ 1020 if (sc->ipmi_power_cycle_tag) 1021 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag); 1022 1023 /* XXX: should use shutdown callout I think. */ 1024 /* If the backend uses a kthread, shut it down. */ 1025 IPMI_LOCK(sc); 1026 sc->ipmi_detaching = 1; 1027 if (sc->ipmi_kthread) { 1028 cv_broadcast(&sc->ipmi_request_added); 1029 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0, 1030 "ipmi_wait", 0); 1031 } 1032 IPMI_UNLOCK(sc); 1033 if (sc->ipmi_irq) 1034 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq); 1035 1036 ipmi_release_resources(dev); 1037 mtx_destroy(&sc->ipmi_io_lock); 1038 mtx_destroy(&sc->ipmi_requests_lock); 1039 return (0); 1040 } 1041 1042 void 1043 ipmi_release_resources(device_t dev) 1044 { 1045 struct ipmi_softc *sc; 1046 int i; 1047 1048 sc = device_get_softc(dev); 1049 if (sc->ipmi_irq) 1050 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq); 1051 if (sc->ipmi_irq_res) 1052 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid, 1053 sc->ipmi_irq_res); 1054 for (i = 0; i < MAX_RES; i++) 1055 if (sc->ipmi_io_res[i]) 1056 bus_release_resource(dev, sc->ipmi_io_type, 1057 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]); 1058 } 1059 1060 /* XXX: Why? */ 1061 static void 1062 ipmi_unload(void *arg) 1063 { 1064 device_t * devs; 1065 int count; 1066 int i; 1067 1068 if (devclass_get_devices(devclass_find("ipmi"), &devs, &count) != 0) 1069 return; 1070 for (i = 0; i < count; i++) 1071 device_delete_child(device_get_parent(devs[i]), devs[i]); 1072 free(devs, M_TEMP); 1073 } 1074 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL); 1075 1076 #ifdef IMPI_DEBUG 1077 static void 1078 dump_buf(u_char *data, int len) 1079 { 1080 char buf[20]; 1081 char line[1024]; 1082 char temp[30]; 1083 int count = 0; 1084 int i=0; 1085 1086 printf("Address %p len %d\n", data, len); 1087 if (len > 256) 1088 len = 256; 1089 line[0] = '\000'; 1090 for (; len > 0; len--, data++) { 1091 sprintf(temp, "%02x ", *data); 1092 strcat(line, temp); 1093 if (*data >= ' ' && *data <= '~') 1094 buf[count] = *data; 1095 else if (*data >= 'A' && *data <= 'Z') 1096 buf[count] = *data; 1097 else 1098 buf[count] = '.'; 1099 if (++count == 16) { 1100 buf[count] = '\000'; 1101 count = 0; 1102 printf(" %3x %s %s\n", i, line, buf); 1103 i+=16; 1104 line[0] = '\000'; 1105 } 1106 } 1107 buf[count] = '\000'; 1108 1109 for (; count != 16; count++) { 1110 strcat(line, " "); 1111 } 1112 printf(" %3x %s %s\n", i, line, buf); 1113 } 1114 #endif 1115