1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/types.h> 36 #include <sys/lock.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/condvar.h> 40 #include <sys/malloc.h> 41 #include <sys/conf.h> 42 #include <sys/queue.h> 43 #include <sys/sysctl.h> 44 45 #include <cam/cam.h> 46 #include <cam/scsi/scsi_all.h> 47 #include <cam/scsi/scsi_da.h> 48 #include <cam/ctl/ctl_io.h> 49 #include <cam/ctl/ctl.h> 50 #include <cam/ctl/ctl_frontend.h> 51 #include <cam/ctl/ctl_util.h> 52 #include <cam/ctl/ctl_backend.h> 53 #include <cam/ctl/ctl_ioctl.h> 54 #include <cam/ctl/ctl_ha.h> 55 #include <cam/ctl/ctl_private.h> 56 #include <cam/ctl/ctl_debug.h> 57 #include <cam/ctl/ctl_error.h> 58 59 struct cfi_softc { 60 uint32_t cur_tag_num; 61 struct ctl_port port; 62 }; 63 64 static struct cfi_softc cfi_softc; 65 66 static int cfi_init(void); 67 static void cfi_shutdown(void); 68 static void cfi_online(void *arg); 69 static void cfi_offline(void *arg); 70 static int cfi_lun_enable(void *arg, int lun_id); 71 static int cfi_lun_disable(void *arg, int lun_id); 72 static void cfi_datamove(union ctl_io *io); 73 static void cfi_done(union ctl_io *io); 74 75 static struct ctl_frontend cfi_frontend = 76 { 77 .name = "ioctl", 78 .init = cfi_init, 79 .shutdown = cfi_shutdown, 80 }; 81 CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend); 82 83 static int 84 cfi_init(void) 85 { 86 struct cfi_softc *isoftc = &cfi_softc; 87 struct ctl_port *port; 88 89 memset(isoftc, 0, sizeof(*isoftc)); 90 91 port = &isoftc->port; 92 port->frontend = &cfi_frontend; 93 port->port_type = CTL_PORT_IOCTL; 94 port->num_requested_ctl_io = 100; 95 port->port_name = "ioctl"; 96 port->port_online = cfi_online; 97 port->port_offline = cfi_offline; 98 port->onoff_arg = &isoftc; 99 port->lun_enable = cfi_lun_enable; 100 port->lun_disable = cfi_lun_disable; 101 port->targ_lun_arg = &isoftc; 102 port->fe_datamove = cfi_datamove; 103 port->fe_done = cfi_done; 104 port->max_targets = 1; 105 port->max_target_id = 0; 106 port->max_initiators = 1; 107 108 if (ctl_port_register(port) != 0) { 109 printf("%s: ioctl port registration failed\n", __func__); 110 return (0); 111 } 112 ctl_port_online(port); 113 return (0); 114 } 115 116 void 117 cfi_shutdown(void) 118 { 119 struct cfi_softc *isoftc = &cfi_softc; 120 struct ctl_port *port; 121 122 port = &isoftc->port; 123 ctl_port_offline(port); 124 if (ctl_port_deregister(&isoftc->port) != 0) 125 printf("%s: ctl_frontend_deregister() failed\n", __func__); 126 } 127 128 static void 129 cfi_online(void *arg) 130 { 131 } 132 133 static void 134 cfi_offline(void *arg) 135 { 136 } 137 138 static int 139 cfi_lun_enable(void *arg, int lun_id) 140 { 141 142 return (0); 143 } 144 145 static int 146 cfi_lun_disable(void *arg, int lun_id) 147 { 148 149 return (0); 150 } 151 152 /* 153 * Data movement routine for the CTL ioctl frontend port. 154 */ 155 static int 156 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 157 { 158 struct ctl_sg_entry *ext_sglist, *kern_sglist; 159 struct ctl_sg_entry ext_entry, kern_entry; 160 int ext_sglen, ext_sg_entries, kern_sg_entries; 161 int ext_sg_start, ext_offset; 162 int len_to_copy, len_copied; 163 int kern_watermark, ext_watermark; 164 int ext_sglist_malloced; 165 int i, j; 166 167 ext_sglist_malloced = 0; 168 ext_sg_start = 0; 169 ext_offset = 0; 170 171 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 172 173 /* 174 * If this flag is set, fake the data transfer. 175 */ 176 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 177 ctsio->ext_data_filled = ctsio->ext_data_len; 178 goto bailout; 179 } 180 181 /* 182 * To simplify things here, if we have a single buffer, stick it in 183 * a S/G entry and just make it a single entry S/G list. 184 */ 185 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 186 int len_seen; 187 188 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 189 190 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 191 M_WAITOK); 192 ext_sglist_malloced = 1; 193 if (copyin(ctsio->ext_data_ptr, ext_sglist, 194 ext_sglen) != 0) { 195 ctl_set_internal_failure(ctsio, 196 /*sks_valid*/ 0, 197 /*retry_count*/ 0); 198 goto bailout; 199 } 200 ext_sg_entries = ctsio->ext_sg_entries; 201 len_seen = 0; 202 for (i = 0; i < ext_sg_entries; i++) { 203 if ((len_seen + ext_sglist[i].len) >= 204 ctsio->ext_data_filled) { 205 ext_sg_start = i; 206 ext_offset = ctsio->ext_data_filled - len_seen; 207 break; 208 } 209 len_seen += ext_sglist[i].len; 210 } 211 } else { 212 ext_sglist = &ext_entry; 213 ext_sglist->addr = ctsio->ext_data_ptr; 214 ext_sglist->len = ctsio->ext_data_len; 215 ext_sg_entries = 1; 216 ext_sg_start = 0; 217 ext_offset = ctsio->ext_data_filled; 218 } 219 220 if (ctsio->kern_sg_entries > 0) { 221 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 222 kern_sg_entries = ctsio->kern_sg_entries; 223 } else { 224 kern_sglist = &kern_entry; 225 kern_sglist->addr = ctsio->kern_data_ptr; 226 kern_sglist->len = ctsio->kern_data_len; 227 kern_sg_entries = 1; 228 } 229 230 231 kern_watermark = 0; 232 ext_watermark = ext_offset; 233 len_copied = 0; 234 for (i = ext_sg_start, j = 0; 235 i < ext_sg_entries && j < kern_sg_entries;) { 236 uint8_t *ext_ptr, *kern_ptr; 237 238 len_to_copy = MIN(ext_sglist[i].len - ext_watermark, 239 kern_sglist[j].len - kern_watermark); 240 241 ext_ptr = (uint8_t *)ext_sglist[i].addr; 242 ext_ptr = ext_ptr + ext_watermark; 243 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 244 /* 245 * XXX KDM fix this! 246 */ 247 panic("need to implement bus address support"); 248 #if 0 249 kern_ptr = bus_to_virt(kern_sglist[j].addr); 250 #endif 251 } else 252 kern_ptr = (uint8_t *)kern_sglist[j].addr; 253 kern_ptr = kern_ptr + kern_watermark; 254 255 kern_watermark += len_to_copy; 256 ext_watermark += len_to_copy; 257 258 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 259 CTL_FLAG_DATA_IN) { 260 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 261 "bytes to user\n", len_to_copy)); 262 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 263 "to %p\n", kern_ptr, ext_ptr)); 264 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 265 ctl_set_internal_failure(ctsio, 266 /*sks_valid*/ 0, 267 /*retry_count*/ 0); 268 goto bailout; 269 } 270 } else { 271 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 272 "bytes from user\n", len_to_copy)); 273 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 274 "to %p\n", ext_ptr, kern_ptr)); 275 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 276 ctl_set_internal_failure(ctsio, 277 /*sks_valid*/ 0, 278 /*retry_count*/0); 279 goto bailout; 280 } 281 } 282 283 len_copied += len_to_copy; 284 285 if (ext_sglist[i].len == ext_watermark) { 286 i++; 287 ext_watermark = 0; 288 } 289 290 if (kern_sglist[j].len == kern_watermark) { 291 j++; 292 kern_watermark = 0; 293 } 294 } 295 296 ctsio->ext_data_filled += len_copied; 297 298 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 299 "kern_sg_entries: %d\n", ext_sg_entries, 300 kern_sg_entries)); 301 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 302 "kern_data_len = %d\n", ctsio->ext_data_len, 303 ctsio->kern_data_len)); 304 305 306 /* XXX KDM set residual?? */ 307 bailout: 308 309 if (ext_sglist_malloced != 0) 310 free(ext_sglist, M_CTL); 311 312 return (CTL_RETVAL_COMPLETE); 313 } 314 315 static void 316 cfi_datamove(union ctl_io *io) 317 { 318 struct ctl_fe_ioctl_params *params; 319 320 params = (struct ctl_fe_ioctl_params *) 321 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 322 323 mtx_lock(¶ms->ioctl_mtx); 324 params->state = CTL_IOCTL_DATAMOVE; 325 cv_broadcast(¶ms->sem); 326 mtx_unlock(¶ms->ioctl_mtx); 327 } 328 329 static void 330 cfi_done(union ctl_io *io) 331 { 332 struct ctl_fe_ioctl_params *params; 333 334 params = (struct ctl_fe_ioctl_params *) 335 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 336 337 mtx_lock(¶ms->ioctl_mtx); 338 params->state = CTL_IOCTL_DONE; 339 cv_broadcast(¶ms->sem); 340 mtx_unlock(¶ms->ioctl_mtx); 341 } 342 343 static int 344 cfi_submit_wait(union ctl_io *io) 345 { 346 struct ctl_fe_ioctl_params params; 347 ctl_fe_ioctl_state last_state; 348 int done, retval; 349 350 retval = 0; 351 352 bzero(¶ms, sizeof(params)); 353 354 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 355 cv_init(¶ms.sem, "ctlioccv"); 356 params.state = CTL_IOCTL_INPROG; 357 last_state = params.state; 358 359 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 360 361 CTL_DEBUG_PRINT(("cfi_submit_wait\n")); 362 363 /* This shouldn't happen */ 364 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 365 return (retval); 366 367 done = 0; 368 369 do { 370 mtx_lock(¶ms.ioctl_mtx); 371 /* 372 * Check the state here, and don't sleep if the state has 373 * already changed (i.e. wakeup has already occured, but we 374 * weren't waiting yet). 375 */ 376 if (params.state == last_state) { 377 /* XXX KDM cv_wait_sig instead? */ 378 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 379 } 380 last_state = params.state; 381 382 switch (params.state) { 383 case CTL_IOCTL_INPROG: 384 /* Why did we wake up? */ 385 /* XXX KDM error here? */ 386 mtx_unlock(¶ms.ioctl_mtx); 387 break; 388 case CTL_IOCTL_DATAMOVE: 389 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 390 391 /* 392 * change last_state back to INPROG to avoid 393 * deadlock on subsequent data moves. 394 */ 395 params.state = last_state = CTL_IOCTL_INPROG; 396 397 mtx_unlock(¶ms.ioctl_mtx); 398 ctl_ioctl_do_datamove(&io->scsiio); 399 /* 400 * Note that in some cases, most notably writes, 401 * this will queue the I/O and call us back later. 402 * In other cases, generally reads, this routine 403 * will immediately call back and wake us up, 404 * probably using our own context. 405 */ 406 io->scsiio.be_move_done(io); 407 break; 408 case CTL_IOCTL_DONE: 409 mtx_unlock(¶ms.ioctl_mtx); 410 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 411 done = 1; 412 break; 413 default: 414 mtx_unlock(¶ms.ioctl_mtx); 415 /* XXX KDM error here? */ 416 break; 417 } 418 } while (done == 0); 419 420 mtx_destroy(¶ms.ioctl_mtx); 421 cv_destroy(¶ms.sem); 422 423 return (CTL_RETVAL_COMPLETE); 424 } 425 426 int 427 ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 428 struct thread *td) 429 { 430 union ctl_io *io; 431 void *pool_tmp; 432 int retval = 0; 433 434 /* 435 * If we haven't been "enabled", don't allow any SCSI I/O 436 * to this FETD. 437 */ 438 if ((cfi_softc.port.status & CTL_PORT_STATUS_ONLINE) == 0) 439 return (EPERM); 440 441 io = ctl_alloc_io(cfi_softc.port.ctl_pool_ref); 442 443 /* 444 * Need to save the pool reference so it doesn't get 445 * spammed by the user's ctl_io. 446 */ 447 pool_tmp = io->io_hdr.pool; 448 memcpy(io, (void *)addr, sizeof(*io)); 449 io->io_hdr.pool = pool_tmp; 450 451 /* 452 * No status yet, so make sure the status is set properly. 453 */ 454 io->io_hdr.status = CTL_STATUS_NONE; 455 456 /* 457 * The user sets the initiator ID, target and LUN IDs. 458 */ 459 io->io_hdr.nexus.targ_port = cfi_softc.port.targ_port; 460 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 461 if ((io->io_hdr.io_type == CTL_IO_SCSI) && 462 (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 463 io->scsiio.tag_num = cfi_softc.cur_tag_num++; 464 465 retval = cfi_submit_wait(io); 466 if (retval == 0) 467 memcpy((void *)addr, io, sizeof(*io)); 468 ctl_free_io(io); 469 return (retval); 470 } 471