1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer, 15 * without modification, immediately at the beginning of the file. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/types.h> 39 #include <sys/lock.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/condvar.h> 43 #include <sys/malloc.h> 44 #include <sys/conf.h> 45 #include <sys/queue.h> 46 #include <sys/sysctl.h> 47 #include <sys/nv.h> 48 #include <sys/dnv.h> 49 50 #include <cam/cam.h> 51 #include <cam/scsi/scsi_all.h> 52 #include <cam/scsi/scsi_da.h> 53 #include <cam/ctl/ctl_io.h> 54 #include <cam/ctl/ctl.h> 55 #include <cam/ctl/ctl_frontend.h> 56 #include <cam/ctl/ctl_util.h> 57 #include <cam/ctl/ctl_backend.h> 58 #include <cam/ctl/ctl_ioctl.h> 59 #include <cam/ctl/ctl_ha.h> 60 #include <cam/ctl/ctl_private.h> 61 #include <cam/ctl/ctl_debug.h> 62 #include <cam/ctl/ctl_error.h> 63 64 typedef enum { 65 CTL_IOCTL_INPROG, 66 CTL_IOCTL_DATAMOVE, 67 CTL_IOCTL_DONE 68 } ctl_fe_ioctl_state; 69 70 struct ctl_fe_ioctl_params { 71 struct cv sem; 72 struct mtx ioctl_mtx; 73 ctl_fe_ioctl_state state; 74 }; 75 76 struct cfi_port { 77 TAILQ_ENTRY(cfi_port) link; 78 uint32_t cur_tag_num; 79 struct cdev * dev; 80 struct ctl_port port; 81 }; 82 83 struct cfi_softc { 84 TAILQ_HEAD(, cfi_port) ports; 85 }; 86 87 88 static struct cfi_softc cfi_softc; 89 90 91 static int cfi_init(void); 92 static int cfi_shutdown(void); 93 static void cfi_datamove(union ctl_io *io); 94 static void cfi_done(union ctl_io *io); 95 static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 96 struct thread *td); 97 static void cfi_ioctl_port_create(struct ctl_req *req); 98 static void cfi_ioctl_port_remove(struct ctl_req *req); 99 100 static struct cdevsw cfi_cdevsw = { 101 .d_version = D_VERSION, 102 .d_flags = 0, 103 .d_ioctl = ctl_ioctl_io 104 }; 105 106 static struct ctl_frontend cfi_frontend = 107 { 108 .name = "ioctl", 109 .init = cfi_init, 110 .ioctl = cfi_ioctl, 111 .shutdown = cfi_shutdown, 112 }; 113 CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend); 114 115 static int 116 cfi_init(void) 117 { 118 struct cfi_softc *isoftc = &cfi_softc; 119 struct cfi_port *cfi; 120 struct ctl_port *port; 121 int error = 0; 122 123 memset(isoftc, 0, sizeof(*isoftc)); 124 TAILQ_INIT(&isoftc->ports); 125 126 cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO); 127 port = &cfi->port; 128 port->frontend = &cfi_frontend; 129 port->port_type = CTL_PORT_IOCTL; 130 port->num_requested_ctl_io = 100; 131 port->port_name = "ioctl"; 132 port->fe_datamove = cfi_datamove; 133 port->fe_done = cfi_done; 134 port->physical_port = 0; 135 port->targ_port = -1; 136 137 if ((error = ctl_port_register(port)) != 0) { 138 printf("%s: ioctl port registration failed\n", __func__); 139 return (error); 140 } 141 142 ctl_port_online(port); 143 TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link); 144 return (0); 145 } 146 147 static int 148 cfi_shutdown(void) 149 { 150 struct cfi_softc *isoftc = &cfi_softc; 151 struct cfi_port *cfi, *temp; 152 struct ctl_port *port; 153 int error; 154 155 TAILQ_FOREACH_SAFE(cfi, &isoftc->ports, link, temp) { 156 port = &cfi->port; 157 ctl_port_offline(port); 158 error = ctl_port_deregister(port); 159 if (error != 0) { 160 printf("%s: ctl_frontend_deregister() failed\n", 161 __func__); 162 return (error); 163 } 164 165 TAILQ_REMOVE(&isoftc->ports, cfi, link); 166 free(cfi, M_CTL); 167 } 168 169 return (0); 170 } 171 172 static void 173 cfi_ioctl_port_create(struct ctl_req *req) 174 { 175 struct cfi_softc *isoftc = &cfi_softc; 176 struct cfi_port *cfi; 177 struct ctl_port *port; 178 struct make_dev_args args; 179 const char *val; 180 int retval; 181 int pp = -1, vp = 0; 182 183 val = dnvlist_get_string(req->args_nvl, "pp", NULL); 184 if (val != NULL) 185 pp = strtol(val, NULL, 10); 186 187 val = dnvlist_get_string(req->args_nvl, "vp", NULL); 188 if (val != NULL) 189 vp = strtol(val, NULL, 10); 190 191 if (pp != -1) { 192 /* Check for duplicates */ 193 TAILQ_FOREACH(cfi, &isoftc->ports, link) { 194 if (pp == cfi->port.physical_port && 195 vp == cfi->port.virtual_port) { 196 req->status = CTL_LUN_ERROR; 197 snprintf(req->error_str, sizeof(req->error_str), 198 "port %d already exists", pp); 199 200 return; 201 } 202 } 203 } else { 204 /* Find free port number */ 205 TAILQ_FOREACH(cfi, &isoftc->ports, link) { 206 pp = MAX(pp, cfi->port.physical_port); 207 } 208 209 pp++; 210 } 211 212 cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO); 213 port = &cfi->port; 214 port->frontend = &cfi_frontend; 215 port->port_type = CTL_PORT_IOCTL; 216 port->num_requested_ctl_io = 100; 217 port->port_name = "ioctl"; 218 port->fe_datamove = cfi_datamove; 219 port->fe_done = cfi_done; 220 port->physical_port = pp; 221 port->virtual_port = vp; 222 port->targ_port = -1; 223 224 retval = ctl_port_register(port); 225 if (retval != 0) { 226 req->status = CTL_LUN_ERROR; 227 snprintf(req->error_str, sizeof(req->error_str), 228 "ctl_port_register() failed with error %d", retval); 229 free(port, M_CTL); 230 return; 231 } 232 233 req->result_nvl = nvlist_create(0); 234 nvlist_add_number(req->result_nvl, "port_id", port->targ_port); 235 ctl_port_online(port); 236 237 make_dev_args_init(&args); 238 args.mda_devsw = &cfi_cdevsw; 239 args.mda_uid = UID_ROOT; 240 args.mda_gid = GID_OPERATOR; 241 args.mda_mode = 0600; 242 args.mda_si_drv1 = NULL; 243 args.mda_si_drv2 = cfi; 244 245 retval = make_dev_s(&args, &cfi->dev, "cam/ctl%d.%d", pp, vp); 246 if (retval != 0) { 247 req->status = CTL_LUN_ERROR; 248 snprintf(req->error_str, sizeof(req->error_str), 249 "make_dev_s() failed with error %d", retval); 250 free(port, M_CTL); 251 return; 252 } 253 254 req->status = CTL_LUN_OK; 255 TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link); 256 } 257 258 static void 259 cfi_ioctl_port_remove(struct ctl_req *req) 260 { 261 struct cfi_softc *isoftc = &cfi_softc; 262 struct cfi_port *cfi = NULL; 263 const char *val; 264 int port_id = -1; 265 266 val = dnvlist_get_string(req->args_nvl, "port_id", NULL); 267 if (val != NULL) 268 port_id = strtol(val, NULL, 10); 269 270 if (port_id == -1) { 271 req->status = CTL_LUN_ERROR; 272 snprintf(req->error_str, sizeof(req->error_str), 273 "port_id not provided"); 274 return; 275 } 276 277 TAILQ_FOREACH(cfi, &isoftc->ports, link) { 278 if (cfi->port.targ_port == port_id) 279 break; 280 } 281 282 if (cfi == NULL) { 283 req->status = CTL_LUN_ERROR; 284 snprintf(req->error_str, sizeof(req->error_str), 285 "cannot find port %d", port_id); 286 287 return; 288 } 289 290 if (cfi->port.physical_port == 0 && cfi->port.virtual_port == 0) { 291 req->status = CTL_LUN_ERROR; 292 snprintf(req->error_str, sizeof(req->error_str), 293 "cannot destroy default ioctl port"); 294 295 return; 296 } 297 298 ctl_port_offline(&cfi->port); 299 ctl_port_deregister(&cfi->port); 300 TAILQ_REMOVE(&isoftc->ports, cfi, link); 301 destroy_dev(cfi->dev); 302 free(cfi, M_CTL); 303 req->status = CTL_LUN_OK; 304 } 305 306 static int 307 cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 308 struct thread *td) 309 { 310 struct ctl_req *req; 311 312 if (cmd == CTL_PORT_REQ) { 313 req = (struct ctl_req *)addr; 314 switch (req->reqtype) { 315 case CTL_REQ_CREATE: 316 cfi_ioctl_port_create(req); 317 break; 318 case CTL_REQ_REMOVE: 319 cfi_ioctl_port_remove(req); 320 break; 321 default: 322 req->status = CTL_LUN_ERROR; 323 snprintf(req->error_str, sizeof(req->error_str), 324 "Unsupported request type %d", req->reqtype); 325 } 326 return (0); 327 } 328 329 return (ENOTTY); 330 } 331 332 /* 333 * Data movement routine for the CTL ioctl frontend port. 334 */ 335 static int 336 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 337 { 338 struct ctl_sg_entry *ext_sglist, *kern_sglist; 339 struct ctl_sg_entry ext_entry, kern_entry; 340 int ext_sglen, ext_sg_entries, kern_sg_entries; 341 int ext_sg_start, ext_offset; 342 int len_to_copy; 343 int kern_watermark, ext_watermark; 344 int ext_sglist_malloced; 345 int i, j; 346 347 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 348 349 /* 350 * If this flag is set, fake the data transfer. 351 */ 352 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 353 ext_sglist_malloced = 0; 354 ctsio->ext_data_filled += ctsio->kern_data_len; 355 ctsio->kern_data_resid = 0; 356 goto bailout; 357 } 358 359 /* 360 * To simplify things here, if we have a single buffer, stick it in 361 * a S/G entry and just make it a single entry S/G list. 362 */ 363 if (ctsio->ext_sg_entries > 0) { 364 int len_seen; 365 366 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 367 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 368 M_WAITOK); 369 ext_sglist_malloced = 1; 370 if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) { 371 ctsio->io_hdr.port_status = 31343; 372 goto bailout; 373 } 374 ext_sg_entries = ctsio->ext_sg_entries; 375 ext_sg_start = ext_sg_entries; 376 ext_offset = 0; 377 len_seen = 0; 378 for (i = 0; i < ext_sg_entries; i++) { 379 if ((len_seen + ext_sglist[i].len) >= 380 ctsio->ext_data_filled) { 381 ext_sg_start = i; 382 ext_offset = ctsio->ext_data_filled - len_seen; 383 break; 384 } 385 len_seen += ext_sglist[i].len; 386 } 387 } else { 388 ext_sglist = &ext_entry; 389 ext_sglist_malloced = 0; 390 ext_sglist->addr = ctsio->ext_data_ptr; 391 ext_sglist->len = ctsio->ext_data_len; 392 ext_sg_entries = 1; 393 ext_sg_start = 0; 394 ext_offset = ctsio->ext_data_filled; 395 } 396 397 if (ctsio->kern_sg_entries > 0) { 398 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 399 kern_sg_entries = ctsio->kern_sg_entries; 400 } else { 401 kern_sglist = &kern_entry; 402 kern_sglist->addr = ctsio->kern_data_ptr; 403 kern_sglist->len = ctsio->kern_data_len; 404 kern_sg_entries = 1; 405 } 406 407 kern_watermark = 0; 408 ext_watermark = ext_offset; 409 for (i = ext_sg_start, j = 0; 410 i < ext_sg_entries && j < kern_sg_entries;) { 411 uint8_t *ext_ptr, *kern_ptr; 412 413 len_to_copy = MIN(ext_sglist[i].len - ext_watermark, 414 kern_sglist[j].len - kern_watermark); 415 416 ext_ptr = (uint8_t *)ext_sglist[i].addr; 417 ext_ptr = ext_ptr + ext_watermark; 418 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 419 /* 420 * XXX KDM fix this! 421 */ 422 panic("need to implement bus address support"); 423 #if 0 424 kern_ptr = bus_to_virt(kern_sglist[j].addr); 425 #endif 426 } else 427 kern_ptr = (uint8_t *)kern_sglist[j].addr; 428 kern_ptr = kern_ptr + kern_watermark; 429 430 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 431 CTL_FLAG_DATA_IN) { 432 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 433 "bytes to user\n", len_to_copy)); 434 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 435 "to %p\n", kern_ptr, ext_ptr)); 436 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 437 ctsio->io_hdr.port_status = 31344; 438 goto bailout; 439 } 440 } else { 441 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 442 "bytes from user\n", len_to_copy)); 443 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 444 "to %p\n", ext_ptr, kern_ptr)); 445 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 446 ctsio->io_hdr.port_status = 31345; 447 goto bailout; 448 } 449 } 450 451 ctsio->ext_data_filled += len_to_copy; 452 ctsio->kern_data_resid -= len_to_copy; 453 454 ext_watermark += len_to_copy; 455 if (ext_sglist[i].len == ext_watermark) { 456 i++; 457 ext_watermark = 0; 458 } 459 460 kern_watermark += len_to_copy; 461 if (kern_sglist[j].len == kern_watermark) { 462 j++; 463 kern_watermark = 0; 464 } 465 } 466 467 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 468 "kern_sg_entries: %d\n", ext_sg_entries, 469 kern_sg_entries)); 470 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 471 "kern_data_len = %d\n", ctsio->ext_data_len, 472 ctsio->kern_data_len)); 473 474 bailout: 475 if (ext_sglist_malloced != 0) 476 free(ext_sglist, M_CTL); 477 478 return (CTL_RETVAL_COMPLETE); 479 } 480 481 static void 482 cfi_datamove(union ctl_io *io) 483 { 484 struct ctl_fe_ioctl_params *params; 485 486 params = (struct ctl_fe_ioctl_params *) 487 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 488 489 mtx_lock(¶ms->ioctl_mtx); 490 params->state = CTL_IOCTL_DATAMOVE; 491 cv_broadcast(¶ms->sem); 492 mtx_unlock(¶ms->ioctl_mtx); 493 } 494 495 static void 496 cfi_done(union ctl_io *io) 497 { 498 struct ctl_fe_ioctl_params *params; 499 500 params = (struct ctl_fe_ioctl_params *) 501 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 502 503 mtx_lock(¶ms->ioctl_mtx); 504 params->state = CTL_IOCTL_DONE; 505 cv_broadcast(¶ms->sem); 506 mtx_unlock(¶ms->ioctl_mtx); 507 } 508 509 static int 510 cfi_submit_wait(union ctl_io *io) 511 { 512 struct ctl_fe_ioctl_params params; 513 ctl_fe_ioctl_state last_state; 514 int done, retval; 515 516 bzero(¶ms, sizeof(params)); 517 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 518 cv_init(¶ms.sem, "ctlioccv"); 519 params.state = CTL_IOCTL_INPROG; 520 last_state = params.state; 521 522 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 523 524 CTL_DEBUG_PRINT(("cfi_submit_wait\n")); 525 526 /* This shouldn't happen */ 527 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 528 return (retval); 529 530 done = 0; 531 532 do { 533 mtx_lock(¶ms.ioctl_mtx); 534 /* 535 * Check the state here, and don't sleep if the state has 536 * already changed (i.e. wakeup has already occurred, but we 537 * weren't waiting yet). 538 */ 539 if (params.state == last_state) { 540 /* XXX KDM cv_wait_sig instead? */ 541 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 542 } 543 last_state = params.state; 544 545 switch (params.state) { 546 case CTL_IOCTL_INPROG: 547 /* Why did we wake up? */ 548 /* XXX KDM error here? */ 549 mtx_unlock(¶ms.ioctl_mtx); 550 break; 551 case CTL_IOCTL_DATAMOVE: 552 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 553 554 /* 555 * change last_state back to INPROG to avoid 556 * deadlock on subsequent data moves. 557 */ 558 params.state = last_state = CTL_IOCTL_INPROG; 559 560 mtx_unlock(¶ms.ioctl_mtx); 561 ctl_ioctl_do_datamove(&io->scsiio); 562 /* 563 * Note that in some cases, most notably writes, 564 * this will queue the I/O and call us back later. 565 * In other cases, generally reads, this routine 566 * will immediately call back and wake us up, 567 * probably using our own context. 568 */ 569 io->scsiio.be_move_done(io); 570 break; 571 case CTL_IOCTL_DONE: 572 mtx_unlock(¶ms.ioctl_mtx); 573 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 574 done = 1; 575 break; 576 default: 577 mtx_unlock(¶ms.ioctl_mtx); 578 /* XXX KDM error here? */ 579 break; 580 } 581 } while (done == 0); 582 583 mtx_destroy(¶ms.ioctl_mtx); 584 cv_destroy(¶ms.sem); 585 586 return (CTL_RETVAL_COMPLETE); 587 } 588 589 int 590 ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 591 struct thread *td) 592 { 593 struct cfi_port *cfi; 594 union ctl_io *io; 595 void *pool_tmp, *sc_tmp; 596 int retval = 0; 597 598 if (cmd != CTL_IO) 599 return (ENOTTY); 600 601 cfi = dev->si_drv2 == NULL 602 ? TAILQ_FIRST(&cfi_softc.ports) 603 : dev->si_drv2; 604 605 /* 606 * If we haven't been "enabled", don't allow any SCSI I/O 607 * to this FETD. 608 */ 609 if ((cfi->port.status & CTL_PORT_STATUS_ONLINE) == 0) 610 return (EPERM); 611 612 io = ctl_alloc_io(cfi->port.ctl_pool_ref); 613 614 /* 615 * Need to save the pool reference so it doesn't get 616 * spammed by the user's ctl_io. 617 */ 618 pool_tmp = io->io_hdr.pool; 619 sc_tmp = CTL_SOFTC(io); 620 memcpy(io, (void *)addr, sizeof(*io)); 621 io->io_hdr.pool = pool_tmp; 622 CTL_SOFTC(io) = sc_tmp; 623 TAILQ_INIT(&io->io_hdr.blocked_queue); 624 625 /* 626 * No status yet, so make sure the status is set properly. 627 */ 628 io->io_hdr.status = CTL_STATUS_NONE; 629 630 /* 631 * The user sets the initiator ID, target and LUN IDs. 632 */ 633 io->io_hdr.nexus.targ_port = cfi->port.targ_port; 634 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 635 if ((io->io_hdr.io_type == CTL_IO_SCSI) && 636 (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 637 io->scsiio.tag_num = cfi->cur_tag_num++; 638 639 retval = cfi_submit_wait(io); 640 if (retval == 0) 641 memcpy((void *)addr, io, sizeof(*io)); 642 643 ctl_free_io(io); 644 return (retval); 645 } 646