1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $ 35 */ 36 /* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #define _CTL_C 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/types.h> 51 #include <sys/kthread.h> 52 #include <sys/bio.h> 53 #include <sys/fcntl.h> 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <sys/condvar.h> 57 #include <sys/malloc.h> 58 #include <sys/conf.h> 59 #include <sys/ioccom.h> 60 #include <sys/queue.h> 61 #include <sys/sbuf.h> 62 #include <sys/endian.h> 63 #include <sys/sysctl.h> 64 65 #include <cam/cam.h> 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_da.h> 68 #include <cam/ctl/ctl_io.h> 69 #include <cam/ctl/ctl.h> 70 #include <cam/ctl/ctl_frontend.h> 71 #include <cam/ctl/ctl_frontend_internal.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_backend.h> 74 #include <cam/ctl/ctl_ioctl.h> 75 #include <cam/ctl/ctl_ha.h> 76 #include <cam/ctl/ctl_private.h> 77 #include <cam/ctl/ctl_debug.h> 78 #include <cam/ctl/ctl_scsi_all.h> 79 #include <cam/ctl/ctl_error.h> 80 81 struct ctl_softc *control_softc = NULL; 82 83 /* 84 * The default is to run with CTL_DONE_THREAD turned on. Completed 85 * transactions are queued for processing by the CTL work thread. When 86 * CTL_DONE_THREAD is not defined, completed transactions are processed in 87 * the caller's context. 88 */ 89 #define CTL_DONE_THREAD 90 91 /* 92 * * Use the serial number and device ID provided by the backend, rather than 93 * * making up our own. 94 * */ 95 #define CTL_USE_BACKEND_SN 96 97 /* 98 * Size and alignment macros needed for Copan-specific HA hardware. These 99 * can go away when the HA code is re-written, and uses busdma for any 100 * hardware. 101 */ 102 #define CTL_ALIGN_8B(target, source, type) \ 103 if (((uint32_t)source & 0x7) != 0) \ 104 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 105 else \ 106 target = (type)source; 107 108 #define CTL_SIZE_8B(target, size) \ 109 if ((size & 0x7) != 0) \ 110 target = size + (0x8 - (size & 0x7)); \ 111 else \ 112 target = size; 113 114 #define CTL_ALIGN_8B_MARGIN 16 115 116 /* 117 * Template mode pages. 118 */ 119 120 /* 121 * Note that these are default values only. The actual values will be 122 * filled in when the user does a mode sense. 123 */ 124 static struct copan_power_subpage power_page_default = { 125 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 126 /*subpage*/ PWR_SUBPAGE_CODE, 127 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 128 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 129 /*page_version*/ PWR_VERSION, 130 /* total_luns */ 26, 131 /* max_active_luns*/ PWR_DFLT_MAX_LUNS, 132 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 134 0, 0, 0, 0, 0, 0} 135 }; 136 137 static struct copan_power_subpage power_page_changeable = { 138 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 139 /*subpage*/ PWR_SUBPAGE_CODE, 140 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 141 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 142 /*page_version*/ 0, 143 /* total_luns */ 0, 144 /* max_active_luns*/ 0, 145 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 147 0, 0, 0, 0, 0, 0} 148 }; 149 150 static struct copan_aps_subpage aps_page_default = { 151 APS_PAGE_CODE | SMPH_SPF, //page_code 152 APS_SUBPAGE_CODE, //subpage 153 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 154 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 155 APS_VERSION, //page_version 156 0, //lock_active 157 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 159 0, 0, 0, 0, 0} //reserved 160 }; 161 162 static struct copan_aps_subpage aps_page_changeable = { 163 APS_PAGE_CODE | SMPH_SPF, //page_code 164 APS_SUBPAGE_CODE, //subpage 165 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 166 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 167 0, //page_version 168 0, //lock_active 169 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 170 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171 0, 0, 0, 0, 0} //reserved 172 }; 173 174 static struct copan_debugconf_subpage debugconf_page_default = { 175 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 176 DBGCNF_SUBPAGE_CODE, /* subpage */ 177 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 178 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 179 DBGCNF_VERSION, /* page_version */ 180 {CTL_TIME_IO_DEFAULT_SECS>>8, 181 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 182 }; 183 184 static struct copan_debugconf_subpage debugconf_page_changeable = { 185 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 186 DBGCNF_SUBPAGE_CODE, /* subpage */ 187 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 188 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 189 0, /* page_version */ 190 {0xff,0xff}, /* ctl_time_io_secs */ 191 }; 192 193 static struct scsi_format_page format_page_default = { 194 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 195 /*page_length*/sizeof(struct scsi_format_page) - 2, 196 /*tracks_per_zone*/ {0, 0}, 197 /*alt_sectors_per_zone*/ {0, 0}, 198 /*alt_tracks_per_zone*/ {0, 0}, 199 /*alt_tracks_per_lun*/ {0, 0}, 200 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 201 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 202 /*bytes_per_sector*/ {0, 0}, 203 /*interleave*/ {0, 0}, 204 /*track_skew*/ {0, 0}, 205 /*cylinder_skew*/ {0, 0}, 206 /*flags*/ SFP_HSEC, 207 /*reserved*/ {0, 0, 0} 208 }; 209 210 static struct scsi_format_page format_page_changeable = { 211 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 212 /*page_length*/sizeof(struct scsi_format_page) - 2, 213 /*tracks_per_zone*/ {0, 0}, 214 /*alt_sectors_per_zone*/ {0, 0}, 215 /*alt_tracks_per_zone*/ {0, 0}, 216 /*alt_tracks_per_lun*/ {0, 0}, 217 /*sectors_per_track*/ {0, 0}, 218 /*bytes_per_sector*/ {0, 0}, 219 /*interleave*/ {0, 0}, 220 /*track_skew*/ {0, 0}, 221 /*cylinder_skew*/ {0, 0}, 222 /*flags*/ 0, 223 /*reserved*/ {0, 0, 0} 224 }; 225 226 static struct scsi_rigid_disk_page rigid_disk_page_default = { 227 /*page_code*/SMS_RIGID_DISK_PAGE, 228 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 229 /*cylinders*/ {0, 0, 0}, 230 /*heads*/ CTL_DEFAULT_HEADS, 231 /*start_write_precomp*/ {0, 0, 0}, 232 /*start_reduced_current*/ {0, 0, 0}, 233 /*step_rate*/ {0, 0}, 234 /*landing_zone_cylinder*/ {0, 0, 0}, 235 /*rpl*/ SRDP_RPL_DISABLED, 236 /*rotational_offset*/ 0, 237 /*reserved1*/ 0, 238 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 239 CTL_DEFAULT_ROTATION_RATE & 0xff}, 240 /*reserved2*/ {0, 0} 241 }; 242 243 static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 244 /*page_code*/SMS_RIGID_DISK_PAGE, 245 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 246 /*cylinders*/ {0, 0, 0}, 247 /*heads*/ 0, 248 /*start_write_precomp*/ {0, 0, 0}, 249 /*start_reduced_current*/ {0, 0, 0}, 250 /*step_rate*/ {0, 0}, 251 /*landing_zone_cylinder*/ {0, 0, 0}, 252 /*rpl*/ 0, 253 /*rotational_offset*/ 0, 254 /*reserved1*/ 0, 255 /*rotation_rate*/ {0, 0}, 256 /*reserved2*/ {0, 0} 257 }; 258 259 static struct scsi_caching_page caching_page_default = { 260 /*page_code*/SMS_CACHING_PAGE, 261 /*page_length*/sizeof(struct scsi_caching_page) - 2, 262 /*flags1*/ SCP_DISC | SCP_WCE, 263 /*ret_priority*/ 0, 264 /*disable_pf_transfer_len*/ {0xff, 0xff}, 265 /*min_prefetch*/ {0, 0}, 266 /*max_prefetch*/ {0xff, 0xff}, 267 /*max_pf_ceiling*/ {0xff, 0xff}, 268 /*flags2*/ 0, 269 /*cache_segments*/ 0, 270 /*cache_seg_size*/ {0, 0}, 271 /*reserved*/ 0, 272 /*non_cache_seg_size*/ {0, 0, 0} 273 }; 274 275 static struct scsi_caching_page caching_page_changeable = { 276 /*page_code*/SMS_CACHING_PAGE, 277 /*page_length*/sizeof(struct scsi_caching_page) - 2, 278 /*flags1*/ 0, 279 /*ret_priority*/ 0, 280 /*disable_pf_transfer_len*/ {0, 0}, 281 /*min_prefetch*/ {0, 0}, 282 /*max_prefetch*/ {0, 0}, 283 /*max_pf_ceiling*/ {0, 0}, 284 /*flags2*/ 0, 285 /*cache_segments*/ 0, 286 /*cache_seg_size*/ {0, 0}, 287 /*reserved*/ 0, 288 /*non_cache_seg_size*/ {0, 0, 0} 289 }; 290 291 static struct scsi_control_page control_page_default = { 292 /*page_code*/SMS_CONTROL_MODE_PAGE, 293 /*page_length*/sizeof(struct scsi_control_page) - 2, 294 /*rlec*/0, 295 /*queue_flags*/0, 296 /*eca_and_aen*/0, 297 /*reserved*/0, 298 /*aen_holdoff_period*/{0, 0} 299 }; 300 301 static struct scsi_control_page control_page_changeable = { 302 /*page_code*/SMS_CONTROL_MODE_PAGE, 303 /*page_length*/sizeof(struct scsi_control_page) - 2, 304 /*rlec*/SCP_DSENSE, 305 /*queue_flags*/0, 306 /*eca_and_aen*/0, 307 /*reserved*/0, 308 /*aen_holdoff_period*/{0, 0} 309 }; 310 311 312 /* 313 * XXX KDM move these into the softc. 314 */ 315 static int rcv_sync_msg; 316 static int persis_offset; 317 static uint8_t ctl_pause_rtr; 318 static int ctl_is_single; 319 static int index_to_aps_page; 320 int ctl_disable = 0; 321 322 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 323 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, disable, CTLFLAG_RDTUN, &ctl_disable, 0, 324 "Disable CTL"); 325 TUNABLE_INT("kern.cam.ctl.disable", &ctl_disable); 326 327 /* 328 * Serial number (0x80), device id (0x83), and supported pages (0x00) 329 */ 330 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 3 331 332 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 333 int param); 334 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 335 static void ctl_init(void); 336 void ctl_shutdown(void); 337 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 338 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 339 static void ctl_ioctl_online(void *arg); 340 static void ctl_ioctl_offline(void *arg); 341 static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id); 342 static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id); 343 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 344 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 345 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 346 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock); 347 static int ctl_ioctl_submit_wait(union ctl_io *io); 348 static void ctl_ioctl_datamove(union ctl_io *io); 349 static void ctl_ioctl_done(union ctl_io *io); 350 static void ctl_ioctl_hard_startstop_callback(void *arg, 351 struct cfi_metatask *metatask); 352 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 353 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 354 struct ctl_ooa *ooa_hdr); 355 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 356 struct thread *td); 357 uint32_t ctl_get_resindex(struct ctl_nexus *nexus); 358 uint32_t ctl_port_idx(int port_num); 359 #ifdef unused 360 static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, 361 uint32_t targ_target, uint32_t targ_lun, 362 int can_wait); 363 static void ctl_kfree_io(union ctl_io *io); 364 #endif /* unused */ 365 static void ctl_free_io_internal(union ctl_io *io, int have_lock); 366 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 367 struct ctl_be_lun *be_lun, struct ctl_id target_id); 368 static int ctl_free_lun(struct ctl_lun *lun); 369 static void ctl_create_lun(struct ctl_be_lun *be_lun); 370 /** 371 static void ctl_failover_change_pages(struct ctl_softc *softc, 372 struct ctl_scsiio *ctsio, int master); 373 **/ 374 375 static int ctl_do_mode_select(union ctl_io *io); 376 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 377 uint64_t res_key, uint64_t sa_res_key, 378 uint8_t type, uint32_t residx, 379 struct ctl_scsiio *ctsio, 380 struct scsi_per_res_out *cdb, 381 struct scsi_per_res_out_parms* param); 382 static void ctl_pro_preempt_other(struct ctl_lun *lun, 383 union ctl_ha_msg *msg); 384 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 385 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 386 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 387 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 388 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 389 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 390 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len); 391 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2); 392 static ctl_action ctl_check_for_blockage(union ctl_io *pending_io, 393 union ctl_io *ooa_io); 394 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 395 union ctl_io *starting_io); 396 static int ctl_check_blocked(struct ctl_lun *lun); 397 static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, 398 struct ctl_lun *lun, 399 struct ctl_cmd_entry *entry, 400 struct ctl_scsiio *ctsio); 401 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 402 static void ctl_failover(void); 403 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 404 struct ctl_scsiio *ctsio); 405 static int ctl_scsiio(struct ctl_scsiio *ctsio); 406 407 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 408 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 409 ctl_ua_type ua_type); 410 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 411 ctl_ua_type ua_type); 412 static int ctl_abort_task(union ctl_io *io); 413 static void ctl_run_task_queue(struct ctl_softc *ctl_softc); 414 #ifdef CTL_IO_DELAY 415 static void ctl_datamove_timer_wakeup(void *arg); 416 static void ctl_done_timer_wakeup(void *arg); 417 #endif /* CTL_IO_DELAY */ 418 419 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 420 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 421 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 422 static void ctl_datamove_remote_write(union ctl_io *io); 423 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 424 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 425 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 426 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 427 ctl_ha_dt_cb callback); 428 static void ctl_datamove_remote_read(union ctl_io *io); 429 static void ctl_datamove_remote(union ctl_io *io); 430 static int ctl_process_done(union ctl_io *io, int have_lock); 431 static void ctl_work_thread(void *arg); 432 433 /* 434 * Load the serialization table. This isn't very pretty, but is probably 435 * the easiest way to do it. 436 */ 437 #include "ctl_ser_table.c" 438 439 /* 440 * We only need to define open, close and ioctl routines for this driver. 441 */ 442 static struct cdevsw ctl_cdevsw = { 443 .d_version = D_VERSION, 444 .d_flags = 0, 445 .d_open = ctl_open, 446 .d_close = ctl_close, 447 .d_ioctl = ctl_ioctl, 448 .d_name = "ctl", 449 }; 450 451 452 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 453 454 /* 455 * If we have the CAM SIM, we may or may not have another SIM that will 456 * cause CTL to get initialized. If not, we need to initialize it. 457 */ 458 SYSINIT(ctl_init, SI_SUB_CONFIGURE, SI_ORDER_THIRD, ctl_init, NULL); 459 460 static void 461 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 462 union ctl_ha_msg *msg_info) 463 { 464 struct ctl_scsiio *ctsio; 465 466 if (msg_info->hdr.original_sc == NULL) { 467 printf("%s: original_sc == NULL!\n", __func__); 468 /* XXX KDM now what? */ 469 return; 470 } 471 472 ctsio = &msg_info->hdr.original_sc->scsiio; 473 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 474 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 475 ctsio->io_hdr.status = msg_info->hdr.status; 476 ctsio->scsi_status = msg_info->scsi.scsi_status; 477 ctsio->sense_len = msg_info->scsi.sense_len; 478 ctsio->sense_residual = msg_info->scsi.sense_residual; 479 ctsio->residual = msg_info->scsi.residual; 480 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 481 sizeof(ctsio->sense_data)); 482 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 483 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 484 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links); 485 ctl_wakeup_thread(); 486 } 487 488 static void 489 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 490 union ctl_ha_msg *msg_info) 491 { 492 struct ctl_scsiio *ctsio; 493 494 if (msg_info->hdr.serializing_sc == NULL) { 495 printf("%s: serializing_sc == NULL!\n", __func__); 496 /* XXX KDM now what? */ 497 return; 498 } 499 500 ctsio = &msg_info->hdr.serializing_sc->scsiio; 501 #if 0 502 /* 503 * Attempt to catch the situation where an I/O has 504 * been freed, and we're using it again. 505 */ 506 if (ctsio->io_hdr.io_type == 0xff) { 507 union ctl_io *tmp_io; 508 tmp_io = (union ctl_io *)ctsio; 509 printf("%s: %p use after free!\n", __func__, 510 ctsio); 511 printf("%s: type %d msg %d cdb %x iptl: " 512 "%d:%d:%d:%d tag 0x%04x " 513 "flag %#x status %x\n", 514 __func__, 515 tmp_io->io_hdr.io_type, 516 tmp_io->io_hdr.msg_type, 517 tmp_io->scsiio.cdb[0], 518 tmp_io->io_hdr.nexus.initid.id, 519 tmp_io->io_hdr.nexus.targ_port, 520 tmp_io->io_hdr.nexus.targ_target.id, 521 tmp_io->io_hdr.nexus.targ_lun, 522 (tmp_io->io_hdr.io_type == 523 CTL_IO_TASK) ? 524 tmp_io->taskio.tag_num : 525 tmp_io->scsiio.tag_num, 526 tmp_io->io_hdr.flags, 527 tmp_io->io_hdr.status); 528 } 529 #endif 530 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 531 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links); 532 ctl_wakeup_thread(); 533 } 534 535 /* 536 * ISC (Inter Shelf Communication) event handler. Events from the HA 537 * subsystem come in here. 538 */ 539 static void 540 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 541 { 542 struct ctl_softc *ctl_softc; 543 union ctl_io *io; 544 struct ctl_prio *presio; 545 ctl_ha_status isc_status; 546 547 ctl_softc = control_softc; 548 io = NULL; 549 550 551 #if 0 552 printf("CTL: Isc Msg event %d\n", event); 553 #endif 554 if (event == CTL_HA_EVT_MSG_RECV) { 555 union ctl_ha_msg msg_info; 556 557 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 558 sizeof(msg_info), /*wait*/ 0); 559 #if 0 560 printf("CTL: msg_type %d\n", msg_info.msg_type); 561 #endif 562 if (isc_status != 0) { 563 printf("Error receiving message, status = %d\n", 564 isc_status); 565 return; 566 } 567 mtx_lock(&ctl_softc->ctl_lock); 568 569 switch (msg_info.hdr.msg_type) { 570 case CTL_MSG_SERIALIZE: 571 #if 0 572 printf("Serialize\n"); 573 #endif 574 io = ctl_alloc_io((void *)ctl_softc->othersc_pool); 575 if (io == NULL) { 576 printf("ctl_isc_event_handler: can't allocate " 577 "ctl_io!\n"); 578 /* Bad Juju */ 579 /* Need to set busy and send msg back */ 580 mtx_unlock(&ctl_softc->ctl_lock); 581 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 582 msg_info.hdr.status = CTL_SCSI_ERROR; 583 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 584 msg_info.scsi.sense_len = 0; 585 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 586 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 587 } 588 goto bailout; 589 } 590 ctl_zero_io(io); 591 // populate ctsio from msg_info 592 io->io_hdr.io_type = CTL_IO_SCSI; 593 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 594 io->io_hdr.original_sc = msg_info.hdr.original_sc; 595 #if 0 596 printf("pOrig %x\n", (int)msg_info.original_sc); 597 #endif 598 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 599 CTL_FLAG_IO_ACTIVE; 600 /* 601 * If we're in serialization-only mode, we don't 602 * want to go through full done processing. Thus 603 * the COPY flag. 604 * 605 * XXX KDM add another flag that is more specific. 606 */ 607 if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY) 608 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 609 io->io_hdr.nexus = msg_info.hdr.nexus; 610 #if 0 611 printf("targ %d, port %d, iid %d, lun %d\n", 612 io->io_hdr.nexus.targ_target.id, 613 io->io_hdr.nexus.targ_port, 614 io->io_hdr.nexus.initid.id, 615 io->io_hdr.nexus.targ_lun); 616 #endif 617 io->scsiio.tag_num = msg_info.scsi.tag_num; 618 io->scsiio.tag_type = msg_info.scsi.tag_type; 619 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 620 CTL_MAX_CDBLEN); 621 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 622 struct ctl_cmd_entry *entry; 623 uint8_t opcode; 624 625 opcode = io->scsiio.cdb[0]; 626 entry = &ctl_cmd_table[opcode]; 627 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 628 io->io_hdr.flags |= 629 entry->flags & CTL_FLAG_DATA_MASK; 630 } 631 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 632 &io->io_hdr, links); 633 ctl_wakeup_thread(); 634 break; 635 636 /* Performed on the Originating SC, XFER mode only */ 637 case CTL_MSG_DATAMOVE: { 638 struct ctl_sg_entry *sgl; 639 int i, j; 640 641 io = msg_info.hdr.original_sc; 642 if (io == NULL) { 643 printf("%s: original_sc == NULL!\n", __func__); 644 /* XXX KDM do something here */ 645 break; 646 } 647 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 648 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 649 /* 650 * Keep track of this, we need to send it back over 651 * when the datamove is complete. 652 */ 653 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 654 655 if (msg_info.dt.sg_sequence == 0) { 656 /* 657 * XXX KDM we use the preallocated S/G list 658 * here, but we'll need to change this to 659 * dynamic allocation if we need larger S/G 660 * lists. 661 */ 662 if (msg_info.dt.kern_sg_entries > 663 sizeof(io->io_hdr.remote_sglist) / 664 sizeof(io->io_hdr.remote_sglist[0])) { 665 printf("%s: number of S/G entries " 666 "needed %u > allocated num %zd\n", 667 __func__, 668 msg_info.dt.kern_sg_entries, 669 sizeof(io->io_hdr.remote_sglist)/ 670 sizeof(io->io_hdr.remote_sglist[0])); 671 672 /* 673 * XXX KDM send a message back to 674 * the other side to shut down the 675 * DMA. The error will come back 676 * through via the normal channel. 677 */ 678 break; 679 } 680 sgl = io->io_hdr.remote_sglist; 681 memset(sgl, 0, 682 sizeof(io->io_hdr.remote_sglist)); 683 684 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 685 686 io->scsiio.kern_sg_entries = 687 msg_info.dt.kern_sg_entries; 688 io->scsiio.rem_sg_entries = 689 msg_info.dt.kern_sg_entries; 690 io->scsiio.kern_data_len = 691 msg_info.dt.kern_data_len; 692 io->scsiio.kern_total_len = 693 msg_info.dt.kern_total_len; 694 io->scsiio.kern_data_resid = 695 msg_info.dt.kern_data_resid; 696 io->scsiio.kern_rel_offset = 697 msg_info.dt.kern_rel_offset; 698 /* 699 * Clear out per-DMA flags. 700 */ 701 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 702 /* 703 * Add per-DMA flags that are set for this 704 * particular DMA request. 705 */ 706 io->io_hdr.flags |= msg_info.dt.flags & 707 CTL_FLAG_RDMA_MASK; 708 } else 709 sgl = (struct ctl_sg_entry *) 710 io->scsiio.kern_data_ptr; 711 712 for (i = msg_info.dt.sent_sg_entries, j = 0; 713 i < (msg_info.dt.sent_sg_entries + 714 msg_info.dt.cur_sg_entries); i++, j++) { 715 sgl[i].addr = msg_info.dt.sg_list[j].addr; 716 sgl[i].len = msg_info.dt.sg_list[j].len; 717 718 #if 0 719 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 720 __func__, 721 msg_info.dt.sg_list[j].addr, 722 msg_info.dt.sg_list[j].len, 723 sgl[i].addr, sgl[i].len, j, i); 724 #endif 725 } 726 #if 0 727 memcpy(&sgl[msg_info.dt.sent_sg_entries], 728 msg_info.dt.sg_list, 729 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 730 #endif 731 732 /* 733 * If this is the last piece of the I/O, we've got 734 * the full S/G list. Queue processing in the thread. 735 * Otherwise wait for the next piece. 736 */ 737 if (msg_info.dt.sg_last != 0) { 738 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 739 &io->io_hdr, links); 740 ctl_wakeup_thread(); 741 } 742 break; 743 } 744 /* Performed on the Serializing (primary) SC, XFER mode only */ 745 case CTL_MSG_DATAMOVE_DONE: { 746 if (msg_info.hdr.serializing_sc == NULL) { 747 printf("%s: serializing_sc == NULL!\n", 748 __func__); 749 /* XXX KDM now what? */ 750 break; 751 } 752 /* 753 * We grab the sense information here in case 754 * there was a failure, so we can return status 755 * back to the initiator. 756 */ 757 io = msg_info.hdr.serializing_sc; 758 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 759 io->io_hdr.status = msg_info.hdr.status; 760 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 761 io->scsiio.sense_len = msg_info.scsi.sense_len; 762 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 763 io->io_hdr.port_status = msg_info.scsi.fetd_status; 764 io->scsiio.residual = msg_info.scsi.residual; 765 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 766 sizeof(io->scsiio.sense_data)); 767 768 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 769 &io->io_hdr, links); 770 ctl_wakeup_thread(); 771 break; 772 } 773 774 /* Preformed on Originating SC, SER_ONLY mode */ 775 case CTL_MSG_R2R: 776 io = msg_info.hdr.original_sc; 777 if (io == NULL) { 778 printf("%s: Major Bummer\n", __func__); 779 mtx_unlock(&ctl_softc->ctl_lock); 780 return; 781 } else { 782 #if 0 783 printf("pOrig %x\n",(int) ctsio); 784 #endif 785 } 786 io->io_hdr.msg_type = CTL_MSG_R2R; 787 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 788 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 789 &io->io_hdr, links); 790 ctl_wakeup_thread(); 791 break; 792 793 /* 794 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 795 * mode. 796 * Performed on the Originating (i.e. secondary) SC in XFER 797 * mode 798 */ 799 case CTL_MSG_FINISH_IO: 800 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) 801 ctl_isc_handler_finish_xfer(ctl_softc, 802 &msg_info); 803 else 804 ctl_isc_handler_finish_ser_only(ctl_softc, 805 &msg_info); 806 break; 807 808 /* Preformed on Originating SC */ 809 case CTL_MSG_BAD_JUJU: 810 io = msg_info.hdr.original_sc; 811 if (io == NULL) { 812 printf("%s: Bad JUJU!, original_sc is NULL!\n", 813 __func__); 814 break; 815 } 816 ctl_copy_sense_data(&msg_info, io); 817 /* 818 * IO should have already been cleaned up on other 819 * SC so clear this flag so we won't send a message 820 * back to finish the IO there. 821 */ 822 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 823 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 824 825 /* io = msg_info.hdr.serializing_sc; */ 826 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 827 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 828 &io->io_hdr, links); 829 ctl_wakeup_thread(); 830 break; 831 832 /* Handle resets sent from the other side */ 833 case CTL_MSG_MANAGE_TASKS: { 834 struct ctl_taskio *taskio; 835 taskio = (struct ctl_taskio *)ctl_alloc_io( 836 (void *)ctl_softc->othersc_pool); 837 if (taskio == NULL) { 838 printf("ctl_isc_event_handler: can't allocate " 839 "ctl_io!\n"); 840 /* Bad Juju */ 841 /* should I just call the proper reset func 842 here??? */ 843 mtx_unlock(&ctl_softc->ctl_lock); 844 goto bailout; 845 } 846 ctl_zero_io((union ctl_io *)taskio); 847 taskio->io_hdr.io_type = CTL_IO_TASK; 848 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 849 taskio->io_hdr.nexus = msg_info.hdr.nexus; 850 taskio->task_action = msg_info.task.task_action; 851 taskio->tag_num = msg_info.task.tag_num; 852 taskio->tag_type = msg_info.task.tag_type; 853 #ifdef CTL_TIME_IO 854 taskio->io_hdr.start_time = time_uptime; 855 getbintime(&taskio->io_hdr.start_bt); 856 #if 0 857 cs_prof_gettime(&taskio->io_hdr.start_ticks); 858 #endif 859 #endif /* CTL_TIME_IO */ 860 STAILQ_INSERT_TAIL(&ctl_softc->task_queue, 861 &taskio->io_hdr, links); 862 ctl_softc->flags |= CTL_FLAG_TASK_PENDING; 863 ctl_wakeup_thread(); 864 break; 865 } 866 /* Persistent Reserve action which needs attention */ 867 case CTL_MSG_PERS_ACTION: 868 presio = (struct ctl_prio *)ctl_alloc_io( 869 (void *)ctl_softc->othersc_pool); 870 if (presio == NULL) { 871 printf("ctl_isc_event_handler: can't allocate " 872 "ctl_io!\n"); 873 /* Bad Juju */ 874 /* Need to set busy and send msg back */ 875 mtx_unlock(&ctl_softc->ctl_lock); 876 goto bailout; 877 } 878 ctl_zero_io((union ctl_io *)presio); 879 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 880 presio->pr_msg = msg_info.pr; 881 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 882 &presio->io_hdr, links); 883 ctl_wakeup_thread(); 884 break; 885 case CTL_MSG_SYNC_FE: 886 rcv_sync_msg = 1; 887 break; 888 case CTL_MSG_APS_LOCK: { 889 // It's quicker to execute this then to 890 // queue it. 891 struct ctl_lun *lun; 892 struct ctl_page_index *page_index; 893 struct copan_aps_subpage *current_sp; 894 895 lun = ctl_softc->ctl_luns[msg_info.hdr.nexus.targ_lun]; 896 page_index = &lun->mode_pages.index[index_to_aps_page]; 897 current_sp = (struct copan_aps_subpage *) 898 (page_index->page_data + 899 (page_index->page_len * CTL_PAGE_CURRENT)); 900 901 current_sp->lock_active = msg_info.aps.lock_flag; 902 break; 903 } 904 default: 905 printf("How did I get here?\n"); 906 } 907 mtx_unlock(&ctl_softc->ctl_lock); 908 } else if (event == CTL_HA_EVT_MSG_SENT) { 909 if (param != CTL_HA_STATUS_SUCCESS) { 910 printf("Bad status from ctl_ha_msg_send status %d\n", 911 param); 912 } 913 return; 914 } else if (event == CTL_HA_EVT_DISCONNECT) { 915 printf("CTL: Got a disconnect from Isc\n"); 916 return; 917 } else { 918 printf("ctl_isc_event_handler: Unknown event %d\n", event); 919 return; 920 } 921 922 bailout: 923 return; 924 } 925 926 static void 927 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 928 { 929 struct scsi_sense_data *sense; 930 931 sense = &dest->scsiio.sense_data; 932 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 933 dest->scsiio.scsi_status = src->scsi.scsi_status; 934 dest->scsiio.sense_len = src->scsi.sense_len; 935 dest->io_hdr.status = src->hdr.status; 936 } 937 938 static void 939 ctl_init(void) 940 { 941 struct ctl_softc *softc; 942 struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool; 943 struct ctl_frontend *fe; 944 struct ctl_lun *lun; 945 uint8_t sc_id =0; 946 #if 0 947 int i; 948 #endif 949 int retval; 950 //int isc_retval; 951 952 retval = 0; 953 ctl_pause_rtr = 0; 954 rcv_sync_msg = 0; 955 956 /* If we're disabled, don't initialize. */ 957 if (ctl_disable != 0) 958 return; 959 960 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 961 M_WAITOK | M_ZERO); 962 softc = control_softc; 963 964 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 965 "cam/ctl"); 966 967 softc->dev->si_drv1 = softc; 968 969 /* 970 * By default, return a "bad LUN" peripheral qualifier for unknown 971 * LUNs. The user can override this default using the tunable or 972 * sysctl. See the comment in ctl_inquiry_std() for more details. 973 */ 974 softc->inquiry_pq_no_lun = 1; 975 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 976 &softc->inquiry_pq_no_lun); 977 sysctl_ctx_init(&softc->sysctl_ctx); 978 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 979 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 980 CTLFLAG_RD, 0, "CAM Target Layer"); 981 982 if (softc->sysctl_tree == NULL) { 983 printf("%s: unable to allocate sysctl tree\n", __func__); 984 destroy_dev(softc->dev); 985 free(control_softc, M_DEVBUF); 986 control_softc = NULL; 987 return; 988 } 989 990 SYSCTL_ADD_INT(&softc->sysctl_ctx, 991 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 992 "inquiry_pq_no_lun", CTLFLAG_RW, 993 &softc->inquiry_pq_no_lun, 0, 994 "Report no lun possible for invalid LUNs"); 995 996 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 997 softc->open_count = 0; 998 999 /* 1000 * Default to actually sending a SYNCHRONIZE CACHE command down to 1001 * the drive. 1002 */ 1003 softc->flags = CTL_FLAG_REAL_SYNC; 1004 1005 /* 1006 * In Copan's HA scheme, the "master" and "slave" roles are 1007 * figured out through the slot the controller is in. Although it 1008 * is an active/active system, someone has to be in charge. 1009 */ 1010 #ifdef NEEDTOPORT 1011 scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id); 1012 #endif 1013 1014 if (sc_id == 0) { 1015 softc->flags |= CTL_FLAG_MASTER_SHELF; 1016 persis_offset = 0; 1017 } else 1018 persis_offset = CTL_MAX_INITIATORS; 1019 1020 /* 1021 * XXX KDM need to figure out where we want to get our target ID 1022 * and WWID. Is it different on each port? 1023 */ 1024 softc->target.id = 0; 1025 softc->target.wwid[0] = 0x12345678; 1026 softc->target.wwid[1] = 0x87654321; 1027 STAILQ_INIT(&softc->lun_list); 1028 STAILQ_INIT(&softc->pending_lun_queue); 1029 STAILQ_INIT(&softc->task_queue); 1030 STAILQ_INIT(&softc->incoming_queue); 1031 STAILQ_INIT(&softc->rtr_queue); 1032 STAILQ_INIT(&softc->done_queue); 1033 STAILQ_INIT(&softc->isc_queue); 1034 STAILQ_INIT(&softc->fe_list); 1035 STAILQ_INIT(&softc->be_list); 1036 STAILQ_INIT(&softc->io_pools); 1037 1038 lun = &softc->lun; 1039 1040 /* 1041 * We don't bother calling these with ctl_lock held here, because, 1042 * in theory, no one else can try to do anything while we're in our 1043 * module init routine. 1044 */ 1045 if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL, 1046 &internal_pool)!= 0){ 1047 printf("ctl: can't allocate %d entry internal pool, " 1048 "exiting\n", CTL_POOL_ENTRIES_INTERNAL); 1049 return; 1050 } 1051 1052 if (ctl_pool_create(softc, CTL_POOL_EMERGENCY, 1053 CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { 1054 printf("ctl: can't allocate %d entry emergency pool, " 1055 "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); 1056 ctl_pool_free(softc, internal_pool); 1057 return; 1058 } 1059 1060 if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC, 1061 &other_pool) != 0) 1062 { 1063 printf("ctl: can't allocate %d entry other SC pool, " 1064 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1065 ctl_pool_free(softc, internal_pool); 1066 ctl_pool_free(softc, emergency_pool); 1067 return; 1068 } 1069 1070 softc->internal_pool = internal_pool; 1071 softc->emergency_pool = emergency_pool; 1072 softc->othersc_pool = other_pool; 1073 1074 ctl_pool_acquire(internal_pool); 1075 ctl_pool_acquire(emergency_pool); 1076 ctl_pool_acquire(other_pool); 1077 1078 /* 1079 * We used to allocate a processor LUN here. The new scheme is to 1080 * just let the user allocate LUNs as he sees fit. 1081 */ 1082 #if 0 1083 mtx_lock(&softc->ctl_lock); 1084 ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target); 1085 mtx_unlock(&softc->ctl_lock); 1086 #endif 1087 1088 if (kproc_create(ctl_work_thread, softc, &softc->work_thread, 0, 0, 1089 "ctl_thrd") != 0) { 1090 printf("error creating CTL work thread!\n"); 1091 ctl_free_lun(lun); 1092 ctl_pool_free(softc, internal_pool); 1093 ctl_pool_free(softc, emergency_pool); 1094 ctl_pool_free(softc, other_pool); 1095 return; 1096 } 1097 printf("ctl: CAM Target Layer loaded\n"); 1098 1099 /* 1100 * Initialize the initiator and portname mappings 1101 */ 1102 memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid)); 1103 1104 /* 1105 * Initialize the ioctl front end. 1106 */ 1107 fe = &softc->ioctl_info.fe; 1108 sprintf(softc->ioctl_info.port_name, "CTL ioctl"); 1109 fe->port_type = CTL_PORT_IOCTL; 1110 fe->num_requested_ctl_io = 100; 1111 fe->port_name = softc->ioctl_info.port_name; 1112 fe->port_online = ctl_ioctl_online; 1113 fe->port_offline = ctl_ioctl_offline; 1114 fe->onoff_arg = &softc->ioctl_info; 1115 fe->targ_enable = ctl_ioctl_targ_enable; 1116 fe->targ_disable = ctl_ioctl_targ_disable; 1117 fe->lun_enable = ctl_ioctl_lun_enable; 1118 fe->lun_disable = ctl_ioctl_lun_disable; 1119 fe->targ_lun_arg = &softc->ioctl_info; 1120 fe->fe_datamove = ctl_ioctl_datamove; 1121 fe->fe_done = ctl_ioctl_done; 1122 fe->max_targets = 15; 1123 fe->max_target_id = 15; 1124 1125 if (ctl_frontend_register(&softc->ioctl_info.fe, 1126 (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) { 1127 printf("ctl: ioctl front end registration failed, will " 1128 "continue anyway\n"); 1129 } 1130 1131 #ifdef CTL_IO_DELAY 1132 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1133 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1134 sizeof(struct callout), CTL_TIMER_BYTES); 1135 return; 1136 } 1137 #endif /* CTL_IO_DELAY */ 1138 1139 } 1140 1141 void 1142 ctl_shutdown(void) 1143 { 1144 struct ctl_softc *softc; 1145 struct ctl_lun *lun, *next_lun; 1146 struct ctl_io_pool *pool, *next_pool; 1147 1148 softc = (struct ctl_softc *)control_softc; 1149 1150 if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0) 1151 printf("ctl: ioctl front end deregistration failed\n"); 1152 1153 mtx_lock(&softc->ctl_lock); 1154 1155 /* 1156 * Free up each LUN. 1157 */ 1158 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1159 next_lun = STAILQ_NEXT(lun, links); 1160 ctl_free_lun(lun); 1161 } 1162 1163 /* 1164 * This will rip the rug out from under any FETDs or anyone else 1165 * that has a pool allocated. Since we increment our module 1166 * refcount any time someone outside the main CTL module allocates 1167 * a pool, we shouldn't have any problems here. The user won't be 1168 * able to unload the CTL module until client modules have 1169 * successfully unloaded. 1170 */ 1171 for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL; 1172 pool = next_pool) { 1173 next_pool = STAILQ_NEXT(pool, links); 1174 ctl_pool_free(softc, pool); 1175 } 1176 1177 mtx_unlock(&softc->ctl_lock); 1178 1179 #if 0 1180 ctl_shutdown_thread(softc->work_thread); 1181 #endif 1182 1183 mtx_destroy(&softc->ctl_lock); 1184 1185 destroy_dev(softc->dev); 1186 1187 sysctl_ctx_free(&softc->sysctl_ctx); 1188 1189 free(control_softc, M_DEVBUF); 1190 control_softc = NULL; 1191 1192 printf("ctl: CAM Target Layer unloaded\n"); 1193 } 1194 1195 /* 1196 * XXX KDM should we do some access checks here? Bump a reference count to 1197 * prevent a CTL module from being unloaded while someone has it open? 1198 */ 1199 static int 1200 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1201 { 1202 return (0); 1203 } 1204 1205 static int 1206 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1207 { 1208 return (0); 1209 } 1210 1211 int 1212 ctl_port_enable(ctl_port_type port_type) 1213 { 1214 struct ctl_softc *softc; 1215 struct ctl_frontend *fe; 1216 1217 if (ctl_is_single == 0) { 1218 union ctl_ha_msg msg_info; 1219 int isc_retval; 1220 1221 #if 0 1222 printf("%s: HA mode, synchronizing frontend enable\n", 1223 __func__); 1224 #endif 1225 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1226 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1227 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1228 printf("Sync msg send error retval %d\n", isc_retval); 1229 } 1230 if (!rcv_sync_msg) { 1231 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1232 sizeof(msg_info), 1); 1233 } 1234 #if 0 1235 printf("CTL:Frontend Enable\n"); 1236 } else { 1237 printf("%s: single mode, skipping frontend synchronization\n", 1238 __func__); 1239 #endif 1240 } 1241 1242 softc = control_softc; 1243 1244 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1245 if (port_type & fe->port_type) 1246 { 1247 #if 0 1248 printf("port %d\n", fe->targ_port); 1249 #endif 1250 ctl_frontend_online(fe); 1251 } 1252 } 1253 1254 return (0); 1255 } 1256 1257 int 1258 ctl_port_disable(ctl_port_type port_type) 1259 { 1260 struct ctl_softc *softc; 1261 struct ctl_frontend *fe; 1262 1263 softc = control_softc; 1264 1265 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1266 if (port_type & fe->port_type) 1267 ctl_frontend_offline(fe); 1268 } 1269 1270 return (0); 1271 } 1272 1273 /* 1274 * Returns 0 for success, 1 for failure. 1275 * Currently the only failure mode is if there aren't enough entries 1276 * allocated. So, in case of a failure, look at num_entries_dropped, 1277 * reallocate and try again. 1278 */ 1279 int 1280 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1281 int *num_entries_filled, int *num_entries_dropped, 1282 ctl_port_type port_type, int no_virtual) 1283 { 1284 struct ctl_softc *softc; 1285 struct ctl_frontend *fe; 1286 int entries_dropped, entries_filled; 1287 int retval; 1288 int i; 1289 1290 softc = control_softc; 1291 1292 retval = 0; 1293 entries_filled = 0; 1294 entries_dropped = 0; 1295 1296 i = 0; 1297 mtx_lock(&softc->ctl_lock); 1298 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1299 struct ctl_port_entry *entry; 1300 1301 if ((fe->port_type & port_type) == 0) 1302 continue; 1303 1304 if ((no_virtual != 0) 1305 && (fe->virtual_port != 0)) 1306 continue; 1307 1308 if (entries_filled >= num_entries_alloced) { 1309 entries_dropped++; 1310 continue; 1311 } 1312 entry = &entries[i]; 1313 1314 entry->port_type = fe->port_type; 1315 strlcpy(entry->port_name, fe->port_name, 1316 sizeof(entry->port_name)); 1317 entry->physical_port = fe->physical_port; 1318 entry->virtual_port = fe->virtual_port; 1319 entry->wwnn = fe->wwnn; 1320 entry->wwpn = fe->wwpn; 1321 1322 i++; 1323 entries_filled++; 1324 } 1325 1326 mtx_unlock(&softc->ctl_lock); 1327 1328 if (entries_dropped > 0) 1329 retval = 1; 1330 1331 *num_entries_dropped = entries_dropped; 1332 *num_entries_filled = entries_filled; 1333 1334 return (retval); 1335 } 1336 1337 static void 1338 ctl_ioctl_online(void *arg) 1339 { 1340 struct ctl_ioctl_info *ioctl_info; 1341 1342 ioctl_info = (struct ctl_ioctl_info *)arg; 1343 1344 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1345 } 1346 1347 static void 1348 ctl_ioctl_offline(void *arg) 1349 { 1350 struct ctl_ioctl_info *ioctl_info; 1351 1352 ioctl_info = (struct ctl_ioctl_info *)arg; 1353 1354 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1355 } 1356 1357 /* 1358 * Remove an initiator by port number and initiator ID. 1359 * Returns 0 for success, 1 for failure. 1360 * Assumes the caller does NOT hold the CTL lock. 1361 */ 1362 int 1363 ctl_remove_initiator(int32_t targ_port, uint32_t iid) 1364 { 1365 struct ctl_softc *softc; 1366 1367 softc = control_softc; 1368 1369 if ((targ_port < 0) 1370 || (targ_port > CTL_MAX_PORTS)) { 1371 printf("%s: invalid port number %d\n", __func__, targ_port); 1372 return (1); 1373 } 1374 if (iid > CTL_MAX_INIT_PER_PORT) { 1375 printf("%s: initiator ID %u > maximun %u!\n", 1376 __func__, iid, CTL_MAX_INIT_PER_PORT); 1377 return (1); 1378 } 1379 1380 mtx_lock(&softc->ctl_lock); 1381 1382 softc->wwpn_iid[targ_port][iid].in_use = 0; 1383 1384 mtx_unlock(&softc->ctl_lock); 1385 1386 return (0); 1387 } 1388 1389 /* 1390 * Add an initiator to the initiator map. 1391 * Returns 0 for success, 1 for failure. 1392 * Assumes the caller does NOT hold the CTL lock. 1393 */ 1394 int 1395 ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid) 1396 { 1397 struct ctl_softc *softc; 1398 int retval; 1399 1400 softc = control_softc; 1401 1402 retval = 0; 1403 1404 if ((targ_port < 0) 1405 || (targ_port > CTL_MAX_PORTS)) { 1406 printf("%s: invalid port number %d\n", __func__, targ_port); 1407 return (1); 1408 } 1409 if (iid > CTL_MAX_INIT_PER_PORT) { 1410 printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n", 1411 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1412 return (1); 1413 } 1414 1415 mtx_lock(&softc->ctl_lock); 1416 1417 if (softc->wwpn_iid[targ_port][iid].in_use != 0) { 1418 /* 1419 * We don't treat this as an error. 1420 */ 1421 if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) { 1422 printf("%s: port %d iid %u WWPN %#jx arrived again?\n", 1423 __func__, targ_port, iid, (uintmax_t)wwpn); 1424 goto bailout; 1425 } 1426 1427 /* 1428 * This is an error, but what do we do about it? The 1429 * driver is telling us we have a new WWPN for this 1430 * initiator ID, so we pretty much need to use it. 1431 */ 1432 printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is " 1433 "still at that address\n", __func__, targ_port, iid, 1434 (uintmax_t)wwpn, 1435 (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn); 1436 1437 /* 1438 * XXX KDM clear have_ca and ua_pending on each LUN for 1439 * this initiator. 1440 */ 1441 } 1442 softc->wwpn_iid[targ_port][iid].in_use = 1; 1443 softc->wwpn_iid[targ_port][iid].iid = iid; 1444 softc->wwpn_iid[targ_port][iid].wwpn = wwpn; 1445 softc->wwpn_iid[targ_port][iid].port = targ_port; 1446 1447 bailout: 1448 1449 mtx_unlock(&softc->ctl_lock); 1450 1451 return (retval); 1452 } 1453 1454 /* 1455 * XXX KDM should we pretend to do something in the target/lun 1456 * enable/disable functions? 1457 */ 1458 static int 1459 ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id) 1460 { 1461 return (0); 1462 } 1463 1464 static int 1465 ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id) 1466 { 1467 return (0); 1468 } 1469 1470 static int 1471 ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1472 { 1473 return (0); 1474 } 1475 1476 static int 1477 ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1478 { 1479 return (0); 1480 } 1481 1482 /* 1483 * Data movement routine for the CTL ioctl frontend port. 1484 */ 1485 static int 1486 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1487 { 1488 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1489 struct ctl_sg_entry ext_entry, kern_entry; 1490 int ext_sglen, ext_sg_entries, kern_sg_entries; 1491 int ext_sg_start, ext_offset; 1492 int len_to_copy, len_copied; 1493 int kern_watermark, ext_watermark; 1494 int ext_sglist_malloced; 1495 int i, j; 1496 1497 ext_sglist_malloced = 0; 1498 ext_sg_start = 0; 1499 ext_offset = 0; 1500 1501 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1502 1503 /* 1504 * If this flag is set, fake the data transfer. 1505 */ 1506 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1507 ctsio->ext_data_filled = ctsio->ext_data_len; 1508 goto bailout; 1509 } 1510 1511 /* 1512 * To simplify things here, if we have a single buffer, stick it in 1513 * a S/G entry and just make it a single entry S/G list. 1514 */ 1515 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1516 int len_seen; 1517 1518 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1519 1520 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1521 M_WAITOK); 1522 ext_sglist_malloced = 1; 1523 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1524 ext_sglen) != 0) { 1525 ctl_set_internal_failure(ctsio, 1526 /*sks_valid*/ 0, 1527 /*retry_count*/ 0); 1528 goto bailout; 1529 } 1530 ext_sg_entries = ctsio->ext_sg_entries; 1531 len_seen = 0; 1532 for (i = 0; i < ext_sg_entries; i++) { 1533 if ((len_seen + ext_sglist[i].len) >= 1534 ctsio->ext_data_filled) { 1535 ext_sg_start = i; 1536 ext_offset = ctsio->ext_data_filled - len_seen; 1537 break; 1538 } 1539 len_seen += ext_sglist[i].len; 1540 } 1541 } else { 1542 ext_sglist = &ext_entry; 1543 ext_sglist->addr = ctsio->ext_data_ptr; 1544 ext_sglist->len = ctsio->ext_data_len; 1545 ext_sg_entries = 1; 1546 ext_sg_start = 0; 1547 ext_offset = ctsio->ext_data_filled; 1548 } 1549 1550 if (ctsio->kern_sg_entries > 0) { 1551 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1552 kern_sg_entries = ctsio->kern_sg_entries; 1553 } else { 1554 kern_sglist = &kern_entry; 1555 kern_sglist->addr = ctsio->kern_data_ptr; 1556 kern_sglist->len = ctsio->kern_data_len; 1557 kern_sg_entries = 1; 1558 } 1559 1560 1561 kern_watermark = 0; 1562 ext_watermark = ext_offset; 1563 len_copied = 0; 1564 for (i = ext_sg_start, j = 0; 1565 i < ext_sg_entries && j < kern_sg_entries;) { 1566 uint8_t *ext_ptr, *kern_ptr; 1567 1568 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark, 1569 kern_sglist[j].len - kern_watermark); 1570 1571 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1572 ext_ptr = ext_ptr + ext_watermark; 1573 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1574 /* 1575 * XXX KDM fix this! 1576 */ 1577 panic("need to implement bus address support"); 1578 #if 0 1579 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1580 #endif 1581 } else 1582 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1583 kern_ptr = kern_ptr + kern_watermark; 1584 1585 kern_watermark += len_to_copy; 1586 ext_watermark += len_to_copy; 1587 1588 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1589 CTL_FLAG_DATA_IN) { 1590 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1591 "bytes to user\n", len_to_copy)); 1592 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1593 "to %p\n", kern_ptr, ext_ptr)); 1594 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1595 ctl_set_internal_failure(ctsio, 1596 /*sks_valid*/ 0, 1597 /*retry_count*/ 0); 1598 goto bailout; 1599 } 1600 } else { 1601 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1602 "bytes from user\n", len_to_copy)); 1603 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1604 "to %p\n", ext_ptr, kern_ptr)); 1605 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1606 ctl_set_internal_failure(ctsio, 1607 /*sks_valid*/ 0, 1608 /*retry_count*/0); 1609 goto bailout; 1610 } 1611 } 1612 1613 len_copied += len_to_copy; 1614 1615 if (ext_sglist[i].len == ext_watermark) { 1616 i++; 1617 ext_watermark = 0; 1618 } 1619 1620 if (kern_sglist[j].len == kern_watermark) { 1621 j++; 1622 kern_watermark = 0; 1623 } 1624 } 1625 1626 ctsio->ext_data_filled += len_copied; 1627 1628 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1629 "kern_sg_entries: %d\n", ext_sg_entries, 1630 kern_sg_entries)); 1631 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1632 "kern_data_len = %d\n", ctsio->ext_data_len, 1633 ctsio->kern_data_len)); 1634 1635 1636 /* XXX KDM set residual?? */ 1637 bailout: 1638 1639 if (ext_sglist_malloced != 0) 1640 free(ext_sglist, M_CTL); 1641 1642 return (CTL_RETVAL_COMPLETE); 1643 } 1644 1645 /* 1646 * Serialize a command that went down the "wrong" side, and so was sent to 1647 * this controller for execution. The logic is a little different than the 1648 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1649 * sent back to the other side, but in the success case, we execute the 1650 * command on this side (XFER mode) or tell the other side to execute it 1651 * (SER_ONLY mode). 1652 */ 1653 static int 1654 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock) 1655 { 1656 struct ctl_softc *ctl_softc; 1657 union ctl_ha_msg msg_info; 1658 struct ctl_lun *lun; 1659 int retval = 0; 1660 1661 ctl_softc = control_softc; 1662 if (have_lock == 0) 1663 mtx_lock(&ctl_softc->ctl_lock); 1664 1665 lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun]; 1666 if (lun==NULL) 1667 { 1668 /* 1669 * Why isn't LUN defined? The other side wouldn't 1670 * send a cmd if the LUN is undefined. 1671 */ 1672 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1673 1674 /* "Logical unit not supported" */ 1675 ctl_set_sense_data(&msg_info.scsi.sense_data, 1676 lun, 1677 /*sense_format*/SSD_TYPE_NONE, 1678 /*current_error*/ 1, 1679 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1680 /*asc*/ 0x25, 1681 /*ascq*/ 0x00, 1682 SSD_ELEM_NONE); 1683 1684 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1685 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1686 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1687 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1688 msg_info.hdr.serializing_sc = NULL; 1689 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1690 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1691 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1692 } 1693 if (have_lock == 0) 1694 mtx_unlock(&ctl_softc->ctl_lock); 1695 return(1); 1696 1697 } 1698 1699 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1700 1701 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1702 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1703 ooa_links))) { 1704 case CTL_ACTION_BLOCK: 1705 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1706 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1707 blocked_links); 1708 break; 1709 case CTL_ACTION_PASS: 1710 case CTL_ACTION_SKIP: 1711 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 1712 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1713 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, 1714 &ctsio->io_hdr, links); 1715 } else { 1716 1717 /* send msg back to other side */ 1718 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1719 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1720 msg_info.hdr.msg_type = CTL_MSG_R2R; 1721 #if 0 1722 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1723 #endif 1724 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1725 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1726 } 1727 } 1728 break; 1729 case CTL_ACTION_OVERLAP: 1730 /* OVERLAPPED COMMANDS ATTEMPTED */ 1731 ctl_set_sense_data(&msg_info.scsi.sense_data, 1732 lun, 1733 /*sense_format*/SSD_TYPE_NONE, 1734 /*current_error*/ 1, 1735 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1736 /*asc*/ 0x4E, 1737 /*ascq*/ 0x00, 1738 SSD_ELEM_NONE); 1739 1740 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1741 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1742 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1743 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1744 msg_info.hdr.serializing_sc = NULL; 1745 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1746 #if 0 1747 printf("BAD JUJU:Major Bummer Overlap\n"); 1748 #endif 1749 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1750 retval = 1; 1751 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1752 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1753 } 1754 break; 1755 case CTL_ACTION_OVERLAP_TAG: 1756 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1757 ctl_set_sense_data(&msg_info.scsi.sense_data, 1758 lun, 1759 /*sense_format*/SSD_TYPE_NONE, 1760 /*current_error*/ 1, 1761 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1762 /*asc*/ 0x4D, 1763 /*ascq*/ ctsio->tag_num & 0xff, 1764 SSD_ELEM_NONE); 1765 1766 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1767 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1768 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1769 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1770 msg_info.hdr.serializing_sc = NULL; 1771 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1772 #if 0 1773 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1774 #endif 1775 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1776 retval = 1; 1777 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1778 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1779 } 1780 break; 1781 case CTL_ACTION_ERROR: 1782 default: 1783 /* "Internal target failure" */ 1784 ctl_set_sense_data(&msg_info.scsi.sense_data, 1785 lun, 1786 /*sense_format*/SSD_TYPE_NONE, 1787 /*current_error*/ 1, 1788 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1789 /*asc*/ 0x44, 1790 /*ascq*/ 0x00, 1791 SSD_ELEM_NONE); 1792 1793 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1794 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1795 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1796 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1797 msg_info.hdr.serializing_sc = NULL; 1798 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1799 #if 0 1800 printf("BAD JUJU:Major Bummer HW Error\n"); 1801 #endif 1802 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1803 retval = 1; 1804 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1805 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1806 } 1807 break; 1808 } 1809 if (have_lock == 0) 1810 mtx_unlock(&ctl_softc->ctl_lock); 1811 return (retval); 1812 } 1813 1814 static int 1815 ctl_ioctl_submit_wait(union ctl_io *io) 1816 { 1817 struct ctl_fe_ioctl_params params; 1818 ctl_fe_ioctl_state last_state; 1819 int done, retval; 1820 1821 retval = 0; 1822 1823 bzero(¶ms, sizeof(params)); 1824 1825 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 1826 cv_init(¶ms.sem, "ctlioccv"); 1827 params.state = CTL_IOCTL_INPROG; 1828 last_state = params.state; 1829 1830 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 1831 1832 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 1833 1834 /* This shouldn't happen */ 1835 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 1836 return (retval); 1837 1838 done = 0; 1839 1840 do { 1841 mtx_lock(¶ms.ioctl_mtx); 1842 /* 1843 * Check the state here, and don't sleep if the state has 1844 * already changed (i.e. wakeup has already occured, but we 1845 * weren't waiting yet). 1846 */ 1847 if (params.state == last_state) { 1848 /* XXX KDM cv_wait_sig instead? */ 1849 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 1850 } 1851 last_state = params.state; 1852 1853 switch (params.state) { 1854 case CTL_IOCTL_INPROG: 1855 /* Why did we wake up? */ 1856 /* XXX KDM error here? */ 1857 mtx_unlock(¶ms.ioctl_mtx); 1858 break; 1859 case CTL_IOCTL_DATAMOVE: 1860 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 1861 1862 /* 1863 * change last_state back to INPROG to avoid 1864 * deadlock on subsequent data moves. 1865 */ 1866 params.state = last_state = CTL_IOCTL_INPROG; 1867 1868 mtx_unlock(¶ms.ioctl_mtx); 1869 ctl_ioctl_do_datamove(&io->scsiio); 1870 /* 1871 * Note that in some cases, most notably writes, 1872 * this will queue the I/O and call us back later. 1873 * In other cases, generally reads, this routine 1874 * will immediately call back and wake us up, 1875 * probably using our own context. 1876 */ 1877 io->scsiio.be_move_done(io); 1878 break; 1879 case CTL_IOCTL_DONE: 1880 mtx_unlock(¶ms.ioctl_mtx); 1881 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 1882 done = 1; 1883 break; 1884 default: 1885 mtx_unlock(¶ms.ioctl_mtx); 1886 /* XXX KDM error here? */ 1887 break; 1888 } 1889 } while (done == 0); 1890 1891 mtx_destroy(¶ms.ioctl_mtx); 1892 cv_destroy(¶ms.sem); 1893 1894 return (CTL_RETVAL_COMPLETE); 1895 } 1896 1897 static void 1898 ctl_ioctl_datamove(union ctl_io *io) 1899 { 1900 struct ctl_fe_ioctl_params *params; 1901 1902 params = (struct ctl_fe_ioctl_params *) 1903 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1904 1905 mtx_lock(¶ms->ioctl_mtx); 1906 params->state = CTL_IOCTL_DATAMOVE; 1907 cv_broadcast(¶ms->sem); 1908 mtx_unlock(¶ms->ioctl_mtx); 1909 } 1910 1911 static void 1912 ctl_ioctl_done(union ctl_io *io) 1913 { 1914 struct ctl_fe_ioctl_params *params; 1915 1916 params = (struct ctl_fe_ioctl_params *) 1917 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1918 1919 mtx_lock(¶ms->ioctl_mtx); 1920 params->state = CTL_IOCTL_DONE; 1921 cv_broadcast(¶ms->sem); 1922 mtx_unlock(¶ms->ioctl_mtx); 1923 } 1924 1925 static void 1926 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 1927 { 1928 struct ctl_fe_ioctl_startstop_info *sd_info; 1929 1930 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 1931 1932 sd_info->hs_info.status = metatask->status; 1933 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 1934 sd_info->hs_info.luns_complete = 1935 metatask->taskinfo.startstop.luns_complete; 1936 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 1937 1938 cv_broadcast(&sd_info->sem); 1939 } 1940 1941 static void 1942 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 1943 { 1944 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 1945 1946 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 1947 1948 mtx_lock(fe_bbr_info->lock); 1949 fe_bbr_info->bbr_info->status = metatask->status; 1950 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 1951 fe_bbr_info->wakeup_done = 1; 1952 mtx_unlock(fe_bbr_info->lock); 1953 1954 cv_broadcast(&fe_bbr_info->sem); 1955 } 1956 1957 /* 1958 * Must be called with the ctl_lock held. 1959 * Returns 0 for success, errno for failure. 1960 */ 1961 static int 1962 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1963 struct ctl_ooa *ooa_hdr) 1964 { 1965 union ctl_io *io; 1966 int retval; 1967 1968 retval = 0; 1969 1970 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1971 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1972 ooa_links)) { 1973 struct ctl_ooa_entry *cur_entry, entry; 1974 1975 /* 1976 * If we've got more than we can fit, just count the 1977 * remaining entries. 1978 */ 1979 if (*cur_fill_num >= ooa_hdr->alloc_num) 1980 continue; 1981 1982 cur_entry = &ooa_hdr->entries[*cur_fill_num]; 1983 1984 bzero(&entry, sizeof(entry)); 1985 1986 entry.tag_num = io->scsiio.tag_num; 1987 entry.lun_num = lun->lun; 1988 #ifdef CTL_TIME_IO 1989 entry.start_bt = io->io_hdr.start_bt; 1990 #endif 1991 bcopy(io->scsiio.cdb, entry.cdb, io->scsiio.cdb_len); 1992 entry.cdb_len = io->scsiio.cdb_len; 1993 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1994 entry.cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1995 1996 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1997 entry.cmd_flags |= CTL_OOACMD_FLAG_DMA; 1998 1999 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2000 entry.cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2001 2002 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2003 entry.cmd_flags |= CTL_OOACMD_FLAG_RTR; 2004 2005 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2006 entry.cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2007 2008 retval = copyout(&entry, cur_entry, sizeof(entry)); 2009 2010 if (retval != 0) 2011 break; 2012 } 2013 2014 return (retval); 2015 } 2016 2017 static void * 2018 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2019 size_t error_str_len) 2020 { 2021 void *kptr; 2022 2023 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2024 2025 if (copyin(user_addr, kptr, len) != 0) { 2026 snprintf(error_str, error_str_len, "Error copying %d bytes " 2027 "from user address %p to kernel address %p", len, 2028 user_addr, kptr); 2029 free(kptr, M_CTL); 2030 return (NULL); 2031 } 2032 2033 return (kptr); 2034 } 2035 2036 static void 2037 ctl_free_args(int num_be_args, struct ctl_be_arg *be_args) 2038 { 2039 int i; 2040 2041 if (be_args == NULL) 2042 return; 2043 2044 for (i = 0; i < num_be_args; i++) { 2045 free(be_args[i].kname, M_CTL); 2046 free(be_args[i].kvalue, M_CTL); 2047 } 2048 2049 free(be_args, M_CTL); 2050 } 2051 2052 static struct ctl_be_arg * 2053 ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args, 2054 char *error_str, size_t error_str_len) 2055 { 2056 struct ctl_be_arg *args; 2057 int i; 2058 2059 args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args), 2060 error_str, error_str_len); 2061 2062 if (args == NULL) 2063 goto bailout; 2064 2065 for (i = 0; i < num_be_args; i++) { 2066 args[i].kname = NULL; 2067 args[i].kvalue = NULL; 2068 } 2069 2070 for (i = 0; i < num_be_args; i++) { 2071 uint8_t *tmpptr; 2072 2073 args[i].kname = ctl_copyin_alloc(args[i].name, 2074 args[i].namelen, error_str, error_str_len); 2075 if (args[i].kname == NULL) 2076 goto bailout; 2077 2078 if (args[i].kname[args[i].namelen - 1] != '\0') { 2079 snprintf(error_str, error_str_len, "Argument %d " 2080 "name is not NUL-terminated", i); 2081 goto bailout; 2082 } 2083 2084 args[i].kvalue = NULL; 2085 2086 tmpptr = ctl_copyin_alloc(args[i].value, 2087 args[i].vallen, error_str, error_str_len); 2088 if (tmpptr == NULL) 2089 goto bailout; 2090 2091 args[i].kvalue = tmpptr; 2092 2093 if ((args[i].flags & CTL_BEARG_ASCII) 2094 && (tmpptr[args[i].vallen - 1] != '\0')) { 2095 snprintf(error_str, error_str_len, "Argument %d " 2096 "value is not NUL-terminated", i); 2097 goto bailout; 2098 } 2099 } 2100 2101 return (args); 2102 bailout: 2103 2104 ctl_free_args(num_be_args, args); 2105 2106 return (NULL); 2107 } 2108 2109 /* 2110 * Escape characters that are illegal or not recommended in XML. 2111 */ 2112 int 2113 ctl_sbuf_printf_esc(struct sbuf *sb, char *str) 2114 { 2115 int retval; 2116 2117 retval = 0; 2118 2119 for (; *str; str++) { 2120 switch (*str) { 2121 case '&': 2122 retval = sbuf_printf(sb, "&"); 2123 break; 2124 case '>': 2125 retval = sbuf_printf(sb, ">"); 2126 break; 2127 case '<': 2128 retval = sbuf_printf(sb, "<"); 2129 break; 2130 default: 2131 retval = sbuf_putc(sb, *str); 2132 break; 2133 } 2134 2135 if (retval != 0) 2136 break; 2137 2138 } 2139 2140 return (retval); 2141 } 2142 2143 static int 2144 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2145 struct thread *td) 2146 { 2147 struct ctl_softc *softc; 2148 int retval; 2149 2150 softc = control_softc; 2151 2152 retval = 0; 2153 2154 switch (cmd) { 2155 case CTL_IO: { 2156 union ctl_io *io; 2157 void *pool_tmp; 2158 2159 /* 2160 * If we haven't been "enabled", don't allow any SCSI I/O 2161 * to this FETD. 2162 */ 2163 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2164 retval = -EPERM; 2165 break; 2166 } 2167 2168 io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref); 2169 if (io == NULL) { 2170 printf("ctl_ioctl: can't allocate ctl_io!\n"); 2171 retval = -ENOSPC; 2172 break; 2173 } 2174 2175 /* 2176 * Need to save the pool reference so it doesn't get 2177 * spammed by the user's ctl_io. 2178 */ 2179 pool_tmp = io->io_hdr.pool; 2180 2181 memcpy(io, (void *)addr, sizeof(*io)); 2182 2183 io->io_hdr.pool = pool_tmp; 2184 /* 2185 * No status yet, so make sure the status is set properly. 2186 */ 2187 io->io_hdr.status = CTL_STATUS_NONE; 2188 2189 /* 2190 * The user sets the initiator ID, target and LUN IDs. 2191 */ 2192 io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port; 2193 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2194 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2195 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2196 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2197 2198 retval = ctl_ioctl_submit_wait(io); 2199 2200 if (retval != 0) { 2201 ctl_free_io(io); 2202 break; 2203 } 2204 2205 memcpy((void *)addr, io, sizeof(*io)); 2206 2207 /* return this to our pool */ 2208 ctl_free_io(io); 2209 2210 break; 2211 } 2212 case CTL_ENABLE_PORT: 2213 case CTL_DISABLE_PORT: 2214 case CTL_SET_PORT_WWNS: { 2215 struct ctl_frontend *fe; 2216 struct ctl_port_entry *entry; 2217 2218 entry = (struct ctl_port_entry *)addr; 2219 2220 mtx_lock(&softc->ctl_lock); 2221 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2222 int action, done; 2223 2224 action = 0; 2225 done = 0; 2226 2227 if ((entry->port_type == CTL_PORT_NONE) 2228 && (entry->targ_port == fe->targ_port)) { 2229 /* 2230 * If the user only wants to enable or 2231 * disable or set WWNs on a specific port, 2232 * do the operation and we're done. 2233 */ 2234 action = 1; 2235 done = 1; 2236 } else if (entry->port_type & fe->port_type) { 2237 /* 2238 * Compare the user's type mask with the 2239 * particular frontend type to see if we 2240 * have a match. 2241 */ 2242 action = 1; 2243 done = 0; 2244 2245 /* 2246 * Make sure the user isn't trying to set 2247 * WWNs on multiple ports at the same time. 2248 */ 2249 if (cmd == CTL_SET_PORT_WWNS) { 2250 printf("%s: Can't set WWNs on " 2251 "multiple ports\n", __func__); 2252 retval = EINVAL; 2253 break; 2254 } 2255 } 2256 if (action != 0) { 2257 /* 2258 * XXX KDM we have to drop the lock here, 2259 * because the online/offline operations 2260 * can potentially block. We need to 2261 * reference count the frontends so they 2262 * can't go away, 2263 */ 2264 mtx_unlock(&softc->ctl_lock); 2265 2266 if (cmd == CTL_ENABLE_PORT) 2267 ctl_frontend_online(fe); 2268 else if (cmd == CTL_DISABLE_PORT) 2269 ctl_frontend_offline(fe); 2270 2271 mtx_lock(&softc->ctl_lock); 2272 2273 if (cmd == CTL_SET_PORT_WWNS) 2274 ctl_frontend_set_wwns(fe, 2275 (entry->flags & CTL_PORT_WWNN_VALID) ? 2276 1 : 0, entry->wwnn, 2277 (entry->flags & CTL_PORT_WWPN_VALID) ? 2278 1 : 0, entry->wwpn); 2279 } 2280 if (done != 0) 2281 break; 2282 } 2283 mtx_unlock(&softc->ctl_lock); 2284 break; 2285 } 2286 case CTL_GET_PORT_LIST: { 2287 struct ctl_frontend *fe; 2288 struct ctl_port_list *list; 2289 int i; 2290 2291 list = (struct ctl_port_list *)addr; 2292 2293 if (list->alloc_len != (list->alloc_num * 2294 sizeof(struct ctl_port_entry))) { 2295 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2296 "alloc_num %u * sizeof(struct ctl_port_entry) " 2297 "%zu\n", __func__, list->alloc_len, 2298 list->alloc_num, sizeof(struct ctl_port_entry)); 2299 retval = EINVAL; 2300 break; 2301 } 2302 list->fill_len = 0; 2303 list->fill_num = 0; 2304 list->dropped_num = 0; 2305 i = 0; 2306 mtx_lock(&softc->ctl_lock); 2307 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2308 struct ctl_port_entry entry, *list_entry; 2309 2310 if (list->fill_num >= list->alloc_num) { 2311 list->dropped_num++; 2312 continue; 2313 } 2314 2315 entry.port_type = fe->port_type; 2316 strlcpy(entry.port_name, fe->port_name, 2317 sizeof(entry.port_name)); 2318 entry.targ_port = fe->targ_port; 2319 entry.physical_port = fe->physical_port; 2320 entry.virtual_port = fe->virtual_port; 2321 entry.wwnn = fe->wwnn; 2322 entry.wwpn = fe->wwpn; 2323 if (fe->status & CTL_PORT_STATUS_ONLINE) 2324 entry.online = 1; 2325 else 2326 entry.online = 0; 2327 2328 list_entry = &list->entries[i]; 2329 2330 retval = copyout(&entry, list_entry, sizeof(entry)); 2331 if (retval != 0) { 2332 printf("%s: CTL_GET_PORT_LIST: copyout " 2333 "returned %d\n", __func__, retval); 2334 break; 2335 } 2336 i++; 2337 list->fill_num++; 2338 list->fill_len += sizeof(entry); 2339 } 2340 mtx_unlock(&softc->ctl_lock); 2341 2342 /* 2343 * If this is non-zero, we had a copyout fault, so there's 2344 * probably no point in attempting to set the status inside 2345 * the structure. 2346 */ 2347 if (retval != 0) 2348 break; 2349 2350 if (list->dropped_num > 0) 2351 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2352 else 2353 list->status = CTL_PORT_LIST_OK; 2354 break; 2355 } 2356 case CTL_DUMP_OOA: { 2357 struct ctl_lun *lun; 2358 union ctl_io *io; 2359 char printbuf[128]; 2360 struct sbuf sb; 2361 2362 mtx_lock(&softc->ctl_lock); 2363 printf("Dumping OOA queues:\n"); 2364 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2365 for (io = (union ctl_io *)TAILQ_FIRST( 2366 &lun->ooa_queue); io != NULL; 2367 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2368 ooa_links)) { 2369 sbuf_new(&sb, printbuf, sizeof(printbuf), 2370 SBUF_FIXEDLEN); 2371 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2372 (intmax_t)lun->lun, 2373 io->scsiio.tag_num, 2374 (io->io_hdr.flags & 2375 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2376 (io->io_hdr.flags & 2377 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2378 (io->io_hdr.flags & 2379 CTL_FLAG_ABORT) ? " ABORT" : "", 2380 (io->io_hdr.flags & 2381 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2382 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2383 sbuf_finish(&sb); 2384 printf("%s\n", sbuf_data(&sb)); 2385 } 2386 } 2387 printf("OOA queues dump done\n"); 2388 mtx_unlock(&softc->ctl_lock); 2389 break; 2390 } 2391 case CTL_GET_OOA: { 2392 struct ctl_lun *lun; 2393 struct ctl_ooa *ooa_hdr; 2394 uint32_t cur_fill_num; 2395 2396 ooa_hdr = (struct ctl_ooa *)addr; 2397 2398 if ((ooa_hdr->alloc_len == 0) 2399 || (ooa_hdr->alloc_num == 0)) { 2400 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2401 "must be non-zero\n", __func__, 2402 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2403 retval = EINVAL; 2404 break; 2405 } 2406 2407 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2408 sizeof(struct ctl_ooa_entry))) { 2409 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2410 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2411 __func__, ooa_hdr->alloc_len, 2412 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2413 retval = EINVAL; 2414 break; 2415 } 2416 2417 mtx_lock(&softc->ctl_lock); 2418 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2419 && ((ooa_hdr->lun_num > CTL_MAX_LUNS) 2420 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2421 mtx_unlock(&softc->ctl_lock); 2422 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2423 __func__, (uintmax_t)ooa_hdr->lun_num); 2424 retval = EINVAL; 2425 break; 2426 } 2427 2428 cur_fill_num = 0; 2429 2430 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2431 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2432 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2433 ooa_hdr); 2434 if (retval != 0) 2435 break; 2436 } 2437 if (retval != 0) { 2438 mtx_unlock(&softc->ctl_lock); 2439 break; 2440 } 2441 } else { 2442 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2443 2444 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr); 2445 } 2446 mtx_unlock(&softc->ctl_lock); 2447 2448 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2449 ooa_hdr->fill_len = ooa_hdr->fill_num * 2450 sizeof(struct ctl_ooa_entry); 2451 2452 getbintime(&ooa_hdr->cur_bt); 2453 2454 if (cur_fill_num > ooa_hdr->alloc_num) { 2455 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2456 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2457 } else { 2458 ooa_hdr->dropped_num = 0; 2459 ooa_hdr->status = CTL_OOA_OK; 2460 } 2461 break; 2462 } 2463 case CTL_CHECK_OOA: { 2464 union ctl_io *io; 2465 struct ctl_lun *lun; 2466 struct ctl_ooa_info *ooa_info; 2467 2468 2469 ooa_info = (struct ctl_ooa_info *)addr; 2470 2471 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2472 ooa_info->status = CTL_OOA_INVALID_LUN; 2473 break; 2474 } 2475 mtx_lock(&softc->ctl_lock); 2476 lun = softc->ctl_luns[ooa_info->lun_id]; 2477 if (lun == NULL) { 2478 mtx_unlock(&softc->ctl_lock); 2479 ooa_info->status = CTL_OOA_INVALID_LUN; 2480 break; 2481 } 2482 2483 ooa_info->num_entries = 0; 2484 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2485 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2486 &io->io_hdr, ooa_links)) { 2487 ooa_info->num_entries++; 2488 } 2489 2490 mtx_unlock(&softc->ctl_lock); 2491 ooa_info->status = CTL_OOA_SUCCESS; 2492 2493 break; 2494 } 2495 case CTL_HARD_START: 2496 case CTL_HARD_STOP: { 2497 struct ctl_fe_ioctl_startstop_info ss_info; 2498 struct cfi_metatask *metatask; 2499 struct mtx hs_mtx; 2500 2501 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2502 2503 cv_init(&ss_info.sem, "hard start/stop cv" ); 2504 2505 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2506 if (metatask == NULL) { 2507 retval = ENOMEM; 2508 mtx_destroy(&hs_mtx); 2509 break; 2510 } 2511 2512 if (cmd == CTL_HARD_START) 2513 metatask->tasktype = CFI_TASK_STARTUP; 2514 else 2515 metatask->tasktype = CFI_TASK_SHUTDOWN; 2516 2517 metatask->callback = ctl_ioctl_hard_startstop_callback; 2518 metatask->callback_arg = &ss_info; 2519 2520 cfi_action(metatask); 2521 2522 /* Wait for the callback */ 2523 mtx_lock(&hs_mtx); 2524 cv_wait_sig(&ss_info.sem, &hs_mtx); 2525 mtx_unlock(&hs_mtx); 2526 2527 /* 2528 * All information has been copied from the metatask by the 2529 * time cv_broadcast() is called, so we free the metatask here. 2530 */ 2531 cfi_free_metatask(metatask); 2532 2533 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2534 2535 mtx_destroy(&hs_mtx); 2536 break; 2537 } 2538 case CTL_BBRREAD: { 2539 struct ctl_bbrread_info *bbr_info; 2540 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2541 struct mtx bbr_mtx; 2542 struct cfi_metatask *metatask; 2543 2544 bbr_info = (struct ctl_bbrread_info *)addr; 2545 2546 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2547 2548 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2549 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2550 2551 fe_bbr_info.bbr_info = bbr_info; 2552 fe_bbr_info.lock = &bbr_mtx; 2553 2554 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2555 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2556 2557 if (metatask == NULL) { 2558 mtx_destroy(&bbr_mtx); 2559 cv_destroy(&fe_bbr_info.sem); 2560 retval = ENOMEM; 2561 break; 2562 } 2563 metatask->tasktype = CFI_TASK_BBRREAD; 2564 metatask->callback = ctl_ioctl_bbrread_callback; 2565 metatask->callback_arg = &fe_bbr_info; 2566 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2567 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2568 metatask->taskinfo.bbrread.len = bbr_info->len; 2569 2570 cfi_action(metatask); 2571 2572 mtx_lock(&bbr_mtx); 2573 while (fe_bbr_info.wakeup_done == 0) 2574 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2575 mtx_unlock(&bbr_mtx); 2576 2577 bbr_info->status = metatask->status; 2578 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2579 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2580 memcpy(&bbr_info->sense_data, 2581 &metatask->taskinfo.bbrread.sense_data, 2582 ctl_min(sizeof(bbr_info->sense_data), 2583 sizeof(metatask->taskinfo.bbrread.sense_data))); 2584 2585 cfi_free_metatask(metatask); 2586 2587 mtx_destroy(&bbr_mtx); 2588 cv_destroy(&fe_bbr_info.sem); 2589 2590 break; 2591 } 2592 case CTL_DELAY_IO: { 2593 struct ctl_io_delay_info *delay_info; 2594 #ifdef CTL_IO_DELAY 2595 struct ctl_lun *lun; 2596 #endif /* CTL_IO_DELAY */ 2597 2598 delay_info = (struct ctl_io_delay_info *)addr; 2599 2600 #ifdef CTL_IO_DELAY 2601 mtx_lock(&softc->ctl_lock); 2602 2603 if ((delay_info->lun_id > CTL_MAX_LUNS) 2604 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2605 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2606 } else { 2607 lun = softc->ctl_luns[delay_info->lun_id]; 2608 2609 delay_info->status = CTL_DELAY_STATUS_OK; 2610 2611 switch (delay_info->delay_type) { 2612 case CTL_DELAY_TYPE_CONT: 2613 break; 2614 case CTL_DELAY_TYPE_ONESHOT: 2615 break; 2616 default: 2617 delay_info->status = 2618 CTL_DELAY_STATUS_INVALID_TYPE; 2619 break; 2620 } 2621 2622 switch (delay_info->delay_loc) { 2623 case CTL_DELAY_LOC_DATAMOVE: 2624 lun->delay_info.datamove_type = 2625 delay_info->delay_type; 2626 lun->delay_info.datamove_delay = 2627 delay_info->delay_secs; 2628 break; 2629 case CTL_DELAY_LOC_DONE: 2630 lun->delay_info.done_type = 2631 delay_info->delay_type; 2632 lun->delay_info.done_delay = 2633 delay_info->delay_secs; 2634 break; 2635 default: 2636 delay_info->status = 2637 CTL_DELAY_STATUS_INVALID_LOC; 2638 break; 2639 } 2640 } 2641 2642 mtx_unlock(&softc->ctl_lock); 2643 #else 2644 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2645 #endif /* CTL_IO_DELAY */ 2646 break; 2647 } 2648 case CTL_REALSYNC_SET: { 2649 int *syncstate; 2650 2651 syncstate = (int *)addr; 2652 2653 mtx_lock(&softc->ctl_lock); 2654 switch (*syncstate) { 2655 case 0: 2656 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2657 break; 2658 case 1: 2659 softc->flags |= CTL_FLAG_REAL_SYNC; 2660 break; 2661 default: 2662 retval = -EINVAL; 2663 break; 2664 } 2665 mtx_unlock(&softc->ctl_lock); 2666 break; 2667 } 2668 case CTL_REALSYNC_GET: { 2669 int *syncstate; 2670 2671 syncstate = (int*)addr; 2672 2673 mtx_lock(&softc->ctl_lock); 2674 if (softc->flags & CTL_FLAG_REAL_SYNC) 2675 *syncstate = 1; 2676 else 2677 *syncstate = 0; 2678 mtx_unlock(&softc->ctl_lock); 2679 2680 break; 2681 } 2682 case CTL_SETSYNC: 2683 case CTL_GETSYNC: { 2684 struct ctl_sync_info *sync_info; 2685 struct ctl_lun *lun; 2686 2687 sync_info = (struct ctl_sync_info *)addr; 2688 2689 mtx_lock(&softc->ctl_lock); 2690 lun = softc->ctl_luns[sync_info->lun_id]; 2691 if (lun == NULL) { 2692 mtx_unlock(&softc->ctl_lock); 2693 sync_info->status = CTL_GS_SYNC_NO_LUN; 2694 } 2695 /* 2696 * Get or set the sync interval. We're not bounds checking 2697 * in the set case, hopefully the user won't do something 2698 * silly. 2699 */ 2700 if (cmd == CTL_GETSYNC) 2701 sync_info->sync_interval = lun->sync_interval; 2702 else 2703 lun->sync_interval = sync_info->sync_interval; 2704 2705 mtx_unlock(&softc->ctl_lock); 2706 2707 sync_info->status = CTL_GS_SYNC_OK; 2708 2709 break; 2710 } 2711 case CTL_GETSTATS: { 2712 struct ctl_stats *stats; 2713 struct ctl_lun *lun; 2714 int i; 2715 2716 stats = (struct ctl_stats *)addr; 2717 2718 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2719 stats->alloc_len) { 2720 stats->status = CTL_SS_NEED_MORE_SPACE; 2721 stats->num_luns = softc->num_luns; 2722 break; 2723 } 2724 /* 2725 * XXX KDM no locking here. If the LUN list changes, 2726 * things can blow up. 2727 */ 2728 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2729 i++, lun = STAILQ_NEXT(lun, links)) { 2730 retval = copyout(&lun->stats, &stats->lun_stats[i], 2731 sizeof(lun->stats)); 2732 if (retval != 0) 2733 break; 2734 } 2735 stats->num_luns = softc->num_luns; 2736 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2737 softc->num_luns; 2738 stats->status = CTL_SS_OK; 2739 #ifdef CTL_TIME_IO 2740 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2741 #else 2742 stats->flags = CTL_STATS_FLAG_NONE; 2743 #endif 2744 getnanouptime(&stats->timestamp); 2745 break; 2746 } 2747 case CTL_ERROR_INJECT: { 2748 struct ctl_error_desc *err_desc, *new_err_desc; 2749 struct ctl_lun *lun; 2750 2751 err_desc = (struct ctl_error_desc *)addr; 2752 2753 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2754 M_WAITOK | M_ZERO); 2755 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2756 2757 mtx_lock(&softc->ctl_lock); 2758 lun = softc->ctl_luns[err_desc->lun_id]; 2759 if (lun == NULL) { 2760 mtx_unlock(&softc->ctl_lock); 2761 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2762 __func__, (uintmax_t)err_desc->lun_id); 2763 retval = EINVAL; 2764 break; 2765 } 2766 2767 /* 2768 * We could do some checking here to verify the validity 2769 * of the request, but given the complexity of error 2770 * injection requests, the checking logic would be fairly 2771 * complex. 2772 * 2773 * For now, if the request is invalid, it just won't get 2774 * executed and might get deleted. 2775 */ 2776 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2777 2778 /* 2779 * XXX KDM check to make sure the serial number is unique, 2780 * in case we somehow manage to wrap. That shouldn't 2781 * happen for a very long time, but it's the right thing to 2782 * do. 2783 */ 2784 new_err_desc->serial = lun->error_serial; 2785 err_desc->serial = lun->error_serial; 2786 lun->error_serial++; 2787 2788 mtx_unlock(&softc->ctl_lock); 2789 break; 2790 } 2791 case CTL_ERROR_INJECT_DELETE: { 2792 struct ctl_error_desc *delete_desc, *desc, *desc2; 2793 struct ctl_lun *lun; 2794 int delete_done; 2795 2796 delete_desc = (struct ctl_error_desc *)addr; 2797 delete_done = 0; 2798 2799 mtx_lock(&softc->ctl_lock); 2800 lun = softc->ctl_luns[delete_desc->lun_id]; 2801 if (lun == NULL) { 2802 mtx_unlock(&softc->ctl_lock); 2803 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2804 __func__, (uintmax_t)delete_desc->lun_id); 2805 retval = EINVAL; 2806 break; 2807 } 2808 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2809 if (desc->serial != delete_desc->serial) 2810 continue; 2811 2812 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2813 links); 2814 free(desc, M_CTL); 2815 delete_done = 1; 2816 } 2817 mtx_unlock(&softc->ctl_lock); 2818 if (delete_done == 0) { 2819 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2820 "error serial %ju on LUN %u\n", __func__, 2821 delete_desc->serial, delete_desc->lun_id); 2822 retval = EINVAL; 2823 break; 2824 } 2825 break; 2826 } 2827 case CTL_DUMP_STRUCTS: { 2828 int i, j, k; 2829 struct ctl_frontend *fe; 2830 2831 printf("CTL IID to WWPN map start:\n"); 2832 for (i = 0; i < CTL_MAX_PORTS; i++) { 2833 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2834 if (softc->wwpn_iid[i][j].in_use == 0) 2835 continue; 2836 2837 printf("port %d iid %u WWPN %#jx\n", 2838 softc->wwpn_iid[i][j].port, 2839 softc->wwpn_iid[i][j].iid, 2840 (uintmax_t)softc->wwpn_iid[i][j].wwpn); 2841 } 2842 } 2843 printf("CTL IID to WWPN map end\n"); 2844 printf("CTL Persistent Reservation information start:\n"); 2845 for (i = 0; i < CTL_MAX_LUNS; i++) { 2846 struct ctl_lun *lun; 2847 2848 lun = softc->ctl_luns[i]; 2849 2850 if ((lun == NULL) 2851 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2852 continue; 2853 2854 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2855 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2856 if (lun->per_res[j+k].registered == 0) 2857 continue; 2858 printf("LUN %d port %d iid %d key " 2859 "%#jx\n", i, j, k, 2860 (uintmax_t)scsi_8btou64( 2861 lun->per_res[j+k].res_key.key)); 2862 } 2863 } 2864 } 2865 printf("CTL Persistent Reservation information end\n"); 2866 printf("CTL Frontends:\n"); 2867 /* 2868 * XXX KDM calling this without a lock. We'd likely want 2869 * to drop the lock before calling the frontend's dump 2870 * routine anyway. 2871 */ 2872 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2873 printf("Frontend %s Type %u pport %d vport %d WWNN " 2874 "%#jx WWPN %#jx\n", fe->port_name, fe->port_type, 2875 fe->physical_port, fe->virtual_port, 2876 (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn); 2877 2878 /* 2879 * Frontends are not required to support the dump 2880 * routine. 2881 */ 2882 if (fe->fe_dump == NULL) 2883 continue; 2884 2885 fe->fe_dump(); 2886 } 2887 printf("CTL Frontend information end\n"); 2888 break; 2889 } 2890 case CTL_LUN_REQ: { 2891 struct ctl_lun_req *lun_req; 2892 struct ctl_backend_driver *backend; 2893 2894 lun_req = (struct ctl_lun_req *)addr; 2895 2896 backend = ctl_backend_find(lun_req->backend); 2897 if (backend == NULL) { 2898 lun_req->status = CTL_LUN_ERROR; 2899 snprintf(lun_req->error_str, 2900 sizeof(lun_req->error_str), 2901 "Backend \"%s\" not found.", 2902 lun_req->backend); 2903 break; 2904 } 2905 if (lun_req->num_be_args > 0) { 2906 lun_req->kern_be_args = ctl_copyin_args( 2907 lun_req->num_be_args, 2908 lun_req->be_args, 2909 lun_req->error_str, 2910 sizeof(lun_req->error_str)); 2911 if (lun_req->kern_be_args == NULL) { 2912 lun_req->status = CTL_LUN_ERROR; 2913 break; 2914 } 2915 } 2916 2917 retval = backend->ioctl(dev, cmd, addr, flag, td); 2918 2919 if (lun_req->num_be_args > 0) { 2920 ctl_free_args(lun_req->num_be_args, 2921 lun_req->kern_be_args); 2922 } 2923 break; 2924 } 2925 case CTL_LUN_LIST: { 2926 struct sbuf *sb; 2927 struct ctl_lun *lun; 2928 struct ctl_lun_list *list; 2929 2930 list = (struct ctl_lun_list *)addr; 2931 2932 /* 2933 * Allocate a fixed length sbuf here, based on the length 2934 * of the user's buffer. We could allocate an auto-extending 2935 * buffer, and then tell the user how much larger our 2936 * amount of data is than his buffer, but that presents 2937 * some problems: 2938 * 2939 * 1. The sbuf(9) routines use a blocking malloc, and so 2940 * we can't hold a lock while calling them with an 2941 * auto-extending buffer. 2942 * 2943 * 2. There is not currently a LUN reference counting 2944 * mechanism, outside of outstanding transactions on 2945 * the LUN's OOA queue. So a LUN could go away on us 2946 * while we're getting the LUN number, backend-specific 2947 * information, etc. Thus, given the way things 2948 * currently work, we need to hold the CTL lock while 2949 * grabbing LUN information. 2950 * 2951 * So, from the user's standpoint, the best thing to do is 2952 * allocate what he thinks is a reasonable buffer length, 2953 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2954 * double the buffer length and try again. (And repeat 2955 * that until he succeeds.) 2956 */ 2957 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2958 if (sb == NULL) { 2959 list->status = CTL_LUN_LIST_ERROR; 2960 snprintf(list->error_str, sizeof(list->error_str), 2961 "Unable to allocate %d bytes for LUN list", 2962 list->alloc_len); 2963 break; 2964 } 2965 2966 sbuf_printf(sb, "<ctllunlist>\n"); 2967 2968 mtx_lock(&softc->ctl_lock); 2969 2970 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2971 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2972 (uintmax_t)lun->lun); 2973 2974 /* 2975 * Bail out as soon as we see that we've overfilled 2976 * the buffer. 2977 */ 2978 if (retval != 0) 2979 break; 2980 2981 retval = sbuf_printf(sb, "<backend_type>%s" 2982 "</backend_type>\n", 2983 (lun->backend == NULL) ? "none" : 2984 lun->backend->name); 2985 2986 if (retval != 0) 2987 break; 2988 2989 retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n", 2990 lun->be_lun->lun_type); 2991 2992 if (retval != 0) 2993 break; 2994 2995 if (lun->backend == NULL) { 2996 retval = sbuf_printf(sb, "</lun>\n"); 2997 if (retval != 0) 2998 break; 2999 continue; 3000 } 3001 3002 retval = sbuf_printf(sb, "<size>%ju</size>\n", 3003 (lun->be_lun->maxlba > 0) ? 3004 lun->be_lun->maxlba + 1 : 0); 3005 3006 if (retval != 0) 3007 break; 3008 3009 retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n", 3010 lun->be_lun->blocksize); 3011 3012 if (retval != 0) 3013 break; 3014 3015 retval = sbuf_printf(sb, "<serial_number>"); 3016 3017 if (retval != 0) 3018 break; 3019 3020 retval = ctl_sbuf_printf_esc(sb, 3021 lun->be_lun->serial_num); 3022 3023 if (retval != 0) 3024 break; 3025 3026 retval = sbuf_printf(sb, "</serial_number>\n"); 3027 3028 if (retval != 0) 3029 break; 3030 3031 retval = sbuf_printf(sb, "<device_id>"); 3032 3033 if (retval != 0) 3034 break; 3035 3036 retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id); 3037 3038 if (retval != 0) 3039 break; 3040 3041 retval = sbuf_printf(sb, "</device_id>\n"); 3042 3043 if (retval != 0) 3044 break; 3045 3046 if (lun->backend->lun_info == NULL) { 3047 retval = sbuf_printf(sb, "</lun>\n"); 3048 if (retval != 0) 3049 break; 3050 continue; 3051 } 3052 3053 retval =lun->backend->lun_info(lun->be_lun->be_lun, sb); 3054 3055 if (retval != 0) 3056 break; 3057 3058 retval = sbuf_printf(sb, "</lun>\n"); 3059 3060 if (retval != 0) 3061 break; 3062 } 3063 mtx_unlock(&softc->ctl_lock); 3064 3065 if ((retval != 0) 3066 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3067 retval = 0; 3068 sbuf_delete(sb); 3069 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3070 snprintf(list->error_str, sizeof(list->error_str), 3071 "Out of space, %d bytes is too small", 3072 list->alloc_len); 3073 break; 3074 } 3075 3076 sbuf_finish(sb); 3077 3078 retval = copyout(sbuf_data(sb), list->lun_xml, 3079 sbuf_len(sb) + 1); 3080 3081 list->fill_len = sbuf_len(sb) + 1; 3082 list->status = CTL_LUN_LIST_OK; 3083 sbuf_delete(sb); 3084 break; 3085 } 3086 default: { 3087 /* XXX KDM should we fix this? */ 3088 #if 0 3089 struct ctl_backend_driver *backend; 3090 unsigned int type; 3091 int found; 3092 3093 found = 0; 3094 3095 /* 3096 * We encode the backend type as the ioctl type for backend 3097 * ioctls. So parse it out here, and then search for a 3098 * backend of this type. 3099 */ 3100 type = _IOC_TYPE(cmd); 3101 3102 STAILQ_FOREACH(backend, &softc->be_list, links) { 3103 if (backend->type == type) { 3104 found = 1; 3105 break; 3106 } 3107 } 3108 if (found == 0) { 3109 printf("ctl: unknown ioctl command %#lx or backend " 3110 "%d\n", cmd, type); 3111 retval = -EINVAL; 3112 break; 3113 } 3114 retval = backend->ioctl(dev, cmd, addr, flag, td); 3115 #endif 3116 retval = ENOTTY; 3117 break; 3118 } 3119 } 3120 return (retval); 3121 } 3122 3123 uint32_t 3124 ctl_get_initindex(struct ctl_nexus *nexus) 3125 { 3126 if (nexus->targ_port < CTL_MAX_PORTS) 3127 return (nexus->initid.id + 3128 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3129 else 3130 return (nexus->initid.id + 3131 ((nexus->targ_port - CTL_MAX_PORTS) * 3132 CTL_MAX_INIT_PER_PORT)); 3133 } 3134 3135 uint32_t 3136 ctl_get_resindex(struct ctl_nexus *nexus) 3137 { 3138 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3139 } 3140 3141 uint32_t 3142 ctl_port_idx(int port_num) 3143 { 3144 if (port_num < CTL_MAX_PORTS) 3145 return(port_num); 3146 else 3147 return(port_num - CTL_MAX_PORTS); 3148 } 3149 3150 /* 3151 * Note: This only works for bitmask sizes that are at least 32 bits, and 3152 * that are a power of 2. 3153 */ 3154 int 3155 ctl_ffz(uint32_t *mask, uint32_t size) 3156 { 3157 uint32_t num_chunks, num_pieces; 3158 int i, j; 3159 3160 num_chunks = (size >> 5); 3161 if (num_chunks == 0) 3162 num_chunks++; 3163 num_pieces = ctl_min((sizeof(uint32_t) * 8), size); 3164 3165 for (i = 0; i < num_chunks; i++) { 3166 for (j = 0; j < num_pieces; j++) { 3167 if ((mask[i] & (1 << j)) == 0) 3168 return ((i << 5) + j); 3169 } 3170 } 3171 3172 return (-1); 3173 } 3174 3175 int 3176 ctl_set_mask(uint32_t *mask, uint32_t bit) 3177 { 3178 uint32_t chunk, piece; 3179 3180 chunk = bit >> 5; 3181 piece = bit % (sizeof(uint32_t) * 8); 3182 3183 if ((mask[chunk] & (1 << piece)) != 0) 3184 return (-1); 3185 else 3186 mask[chunk] |= (1 << piece); 3187 3188 return (0); 3189 } 3190 3191 int 3192 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3193 { 3194 uint32_t chunk, piece; 3195 3196 chunk = bit >> 5; 3197 piece = bit % (sizeof(uint32_t) * 8); 3198 3199 if ((mask[chunk] & (1 << piece)) == 0) 3200 return (-1); 3201 else 3202 mask[chunk] &= ~(1 << piece); 3203 3204 return (0); 3205 } 3206 3207 int 3208 ctl_is_set(uint32_t *mask, uint32_t bit) 3209 { 3210 uint32_t chunk, piece; 3211 3212 chunk = bit >> 5; 3213 piece = bit % (sizeof(uint32_t) * 8); 3214 3215 if ((mask[chunk] & (1 << piece)) == 0) 3216 return (0); 3217 else 3218 return (1); 3219 } 3220 3221 #ifdef unused 3222 /* 3223 * The bus, target and lun are optional, they can be filled in later. 3224 * can_wait is used to determine whether we can wait on the malloc or not. 3225 */ 3226 union ctl_io* 3227 ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target, 3228 uint32_t targ_lun, int can_wait) 3229 { 3230 union ctl_io *io; 3231 3232 if (can_wait) 3233 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK); 3234 else 3235 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3236 3237 if (io != NULL) { 3238 io->io_hdr.io_type = io_type; 3239 io->io_hdr.targ_port = targ_port; 3240 /* 3241 * XXX KDM this needs to change/go away. We need to move 3242 * to a preallocated pool of ctl_scsiio structures. 3243 */ 3244 io->io_hdr.nexus.targ_target.id = targ_target; 3245 io->io_hdr.nexus.targ_lun = targ_lun; 3246 } 3247 3248 return (io); 3249 } 3250 3251 void 3252 ctl_kfree_io(union ctl_io *io) 3253 { 3254 free(io, M_CTL); 3255 } 3256 #endif /* unused */ 3257 3258 /* 3259 * ctl_softc, pool_type, total_ctl_io are passed in. 3260 * npool is passed out. 3261 */ 3262 int 3263 ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, 3264 uint32_t total_ctl_io, struct ctl_io_pool **npool) 3265 { 3266 uint32_t i; 3267 union ctl_io *cur_io, *next_io; 3268 struct ctl_io_pool *pool; 3269 int retval; 3270 3271 retval = 0; 3272 3273 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3274 M_NOWAIT | M_ZERO); 3275 if (pool == NULL) { 3276 retval = -ENOMEM; 3277 goto bailout; 3278 } 3279 3280 pool->type = pool_type; 3281 pool->ctl_softc = ctl_softc; 3282 3283 mtx_lock(&ctl_softc->ctl_lock); 3284 pool->id = ctl_softc->cur_pool_id++; 3285 mtx_unlock(&ctl_softc->ctl_lock); 3286 3287 pool->flags = CTL_POOL_FLAG_NONE; 3288 STAILQ_INIT(&pool->free_queue); 3289 3290 /* 3291 * XXX KDM other options here: 3292 * - allocate a page at a time 3293 * - allocate one big chunk of memory. 3294 * Page allocation might work well, but would take a little more 3295 * tracking. 3296 */ 3297 for (i = 0; i < total_ctl_io; i++) { 3298 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL, 3299 M_NOWAIT); 3300 if (cur_io == NULL) { 3301 retval = ENOMEM; 3302 break; 3303 } 3304 cur_io->io_hdr.pool = pool; 3305 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links); 3306 pool->total_ctl_io++; 3307 pool->free_ctl_io++; 3308 } 3309 3310 if (retval != 0) { 3311 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3312 cur_io != NULL; cur_io = next_io) { 3313 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3314 links); 3315 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, 3316 ctl_io_hdr, links); 3317 free(cur_io, M_CTL); 3318 } 3319 3320 free(pool, M_CTL); 3321 goto bailout; 3322 } 3323 mtx_lock(&ctl_softc->ctl_lock); 3324 ctl_softc->num_pools++; 3325 STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); 3326 /* 3327 * Increment our usage count if this is an external consumer, so we 3328 * can't get unloaded until the external consumer (most likely a 3329 * FETD) unloads and frees his pool. 3330 * 3331 * XXX KDM will this increment the caller's module use count, or 3332 * mine? 3333 */ 3334 #if 0 3335 if ((pool_type != CTL_POOL_EMERGENCY) 3336 && (pool_type != CTL_POOL_INTERNAL) 3337 && (pool_type != CTL_POOL_IOCTL) 3338 && (pool_type != CTL_POOL_4OTHERSC)) 3339 MOD_INC_USE_COUNT; 3340 #endif 3341 3342 mtx_unlock(&ctl_softc->ctl_lock); 3343 3344 *npool = pool; 3345 3346 bailout: 3347 3348 return (retval); 3349 } 3350 3351 /* 3352 * Caller must hold ctl_softc->ctl_lock. 3353 */ 3354 int 3355 ctl_pool_acquire(struct ctl_io_pool *pool) 3356 { 3357 if (pool == NULL) 3358 return (-EINVAL); 3359 3360 if (pool->flags & CTL_POOL_FLAG_INVALID) 3361 return (-EINVAL); 3362 3363 pool->refcount++; 3364 3365 return (0); 3366 } 3367 3368 /* 3369 * Caller must hold ctl_softc->ctl_lock. 3370 */ 3371 int 3372 ctl_pool_invalidate(struct ctl_io_pool *pool) 3373 { 3374 if (pool == NULL) 3375 return (-EINVAL); 3376 3377 pool->flags |= CTL_POOL_FLAG_INVALID; 3378 3379 return (0); 3380 } 3381 3382 /* 3383 * Caller must hold ctl_softc->ctl_lock. 3384 */ 3385 int 3386 ctl_pool_release(struct ctl_io_pool *pool) 3387 { 3388 if (pool == NULL) 3389 return (-EINVAL); 3390 3391 if ((--pool->refcount == 0) 3392 && (pool->flags & CTL_POOL_FLAG_INVALID)) { 3393 ctl_pool_free(pool->ctl_softc, pool); 3394 } 3395 3396 return (0); 3397 } 3398 3399 /* 3400 * Must be called with ctl_softc->ctl_lock held. 3401 */ 3402 void 3403 ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool) 3404 { 3405 union ctl_io *cur_io, *next_io; 3406 3407 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3408 cur_io != NULL; cur_io = next_io) { 3409 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3410 links); 3411 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr, 3412 links); 3413 free(cur_io, M_CTL); 3414 } 3415 3416 STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); 3417 ctl_softc->num_pools--; 3418 3419 /* 3420 * XXX KDM will this decrement the caller's usage count or mine? 3421 */ 3422 #if 0 3423 if ((pool->type != CTL_POOL_EMERGENCY) 3424 && (pool->type != CTL_POOL_INTERNAL) 3425 && (pool->type != CTL_POOL_IOCTL)) 3426 MOD_DEC_USE_COUNT; 3427 #endif 3428 3429 free(pool, M_CTL); 3430 } 3431 3432 /* 3433 * This routine does not block (except for spinlocks of course). 3434 * It tries to allocate a ctl_io union from the caller's pool as quickly as 3435 * possible. 3436 */ 3437 union ctl_io * 3438 ctl_alloc_io(void *pool_ref) 3439 { 3440 union ctl_io *io; 3441 struct ctl_softc *ctl_softc; 3442 struct ctl_io_pool *pool, *npool; 3443 struct ctl_io_pool *emergency_pool; 3444 3445 pool = (struct ctl_io_pool *)pool_ref; 3446 3447 if (pool == NULL) { 3448 printf("%s: pool is NULL\n", __func__); 3449 return (NULL); 3450 } 3451 3452 emergency_pool = NULL; 3453 3454 ctl_softc = pool->ctl_softc; 3455 3456 mtx_lock(&ctl_softc->ctl_lock); 3457 /* 3458 * First, try to get the io structure from the user's pool. 3459 */ 3460 if (ctl_pool_acquire(pool) == 0) { 3461 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3462 if (io != NULL) { 3463 STAILQ_REMOVE_HEAD(&pool->free_queue, links); 3464 pool->total_allocated++; 3465 pool->free_ctl_io--; 3466 mtx_unlock(&ctl_softc->ctl_lock); 3467 return (io); 3468 } else 3469 ctl_pool_release(pool); 3470 } 3471 /* 3472 * If he doesn't have any io structures left, search for an 3473 * emergency pool and grab one from there. 3474 */ 3475 STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) { 3476 if (npool->type != CTL_POOL_EMERGENCY) 3477 continue; 3478 3479 if (ctl_pool_acquire(npool) != 0) 3480 continue; 3481 3482 emergency_pool = npool; 3483 3484 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue); 3485 if (io != NULL) { 3486 STAILQ_REMOVE_HEAD(&npool->free_queue, links); 3487 npool->total_allocated++; 3488 npool->free_ctl_io--; 3489 mtx_unlock(&ctl_softc->ctl_lock); 3490 return (io); 3491 } else 3492 ctl_pool_release(npool); 3493 } 3494 3495 /* Drop the spinlock before we malloc */ 3496 mtx_unlock(&ctl_softc->ctl_lock); 3497 3498 /* 3499 * The emergency pool (if it exists) didn't have one, so try an 3500 * atomic (i.e. nonblocking) malloc and see if we get lucky. 3501 */ 3502 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3503 if (io != NULL) { 3504 /* 3505 * If the emergency pool exists but is empty, add this 3506 * ctl_io to its list when it gets freed. 3507 */ 3508 if (emergency_pool != NULL) { 3509 mtx_lock(&ctl_softc->ctl_lock); 3510 if (ctl_pool_acquire(emergency_pool) == 0) { 3511 io->io_hdr.pool = emergency_pool; 3512 emergency_pool->total_ctl_io++; 3513 /* 3514 * Need to bump this, otherwise 3515 * total_allocated and total_freed won't 3516 * match when we no longer have anything 3517 * outstanding. 3518 */ 3519 emergency_pool->total_allocated++; 3520 } 3521 mtx_unlock(&ctl_softc->ctl_lock); 3522 } else 3523 io->io_hdr.pool = NULL; 3524 } 3525 3526 return (io); 3527 } 3528 3529 static void 3530 ctl_free_io_internal(union ctl_io *io, int have_lock) 3531 { 3532 if (io == NULL) 3533 return; 3534 3535 /* 3536 * If this ctl_io has a pool, return it to that pool. 3537 */ 3538 if (io->io_hdr.pool != NULL) { 3539 struct ctl_io_pool *pool; 3540 #if 0 3541 struct ctl_softc *ctl_softc; 3542 union ctl_io *tmp_io; 3543 unsigned long xflags; 3544 int i; 3545 3546 ctl_softc = control_softc; 3547 #endif 3548 3549 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3550 3551 if (have_lock == 0) 3552 mtx_lock(&pool->ctl_softc->ctl_lock); 3553 #if 0 3554 save_flags(xflags); 3555 3556 for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST( 3557 &ctl_softc->task_queue); tmp_io != NULL; i++, 3558 tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr, 3559 links)) { 3560 if (tmp_io == io) { 3561 printf("%s: %p is still on the task queue!\n", 3562 __func__, tmp_io); 3563 printf("%s: (%d): type %d " 3564 "msg %d cdb %x iptl: " 3565 "%d:%d:%d:%d tag 0x%04x " 3566 "flg %#lx\n", 3567 __func__, i, 3568 tmp_io->io_hdr.io_type, 3569 tmp_io->io_hdr.msg_type, 3570 tmp_io->scsiio.cdb[0], 3571 tmp_io->io_hdr.nexus.initid.id, 3572 tmp_io->io_hdr.nexus.targ_port, 3573 tmp_io->io_hdr.nexus.targ_target.id, 3574 tmp_io->io_hdr.nexus.targ_lun, 3575 (tmp_io->io_hdr.io_type == 3576 CTL_IO_TASK) ? 3577 tmp_io->taskio.tag_num : 3578 tmp_io->scsiio.tag_num, 3579 xflags); 3580 panic("I/O still on the task queue!"); 3581 } 3582 } 3583 #endif 3584 io->io_hdr.io_type = 0xff; 3585 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links); 3586 pool->total_freed++; 3587 pool->free_ctl_io++; 3588 ctl_pool_release(pool); 3589 if (have_lock == 0) 3590 mtx_unlock(&pool->ctl_softc->ctl_lock); 3591 } else { 3592 /* 3593 * Otherwise, just free it. We probably malloced it and 3594 * the emergency pool wasn't available. 3595 */ 3596 free(io, M_CTL); 3597 } 3598 3599 } 3600 3601 void 3602 ctl_free_io(union ctl_io *io) 3603 { 3604 ctl_free_io_internal(io, /*have_lock*/ 0); 3605 } 3606 3607 void 3608 ctl_zero_io(union ctl_io *io) 3609 { 3610 void *pool_ref; 3611 3612 if (io == NULL) 3613 return; 3614 3615 /* 3616 * May need to preserve linked list pointers at some point too. 3617 */ 3618 pool_ref = io->io_hdr.pool; 3619 3620 memset(io, 0, sizeof(*io)); 3621 3622 io->io_hdr.pool = pool_ref; 3623 } 3624 3625 /* 3626 * This routine is currently used for internal copies of ctl_ios that need 3627 * to persist for some reason after we've already returned status to the 3628 * FETD. (Thus the flag set.) 3629 * 3630 * XXX XXX 3631 * Note that this makes a blind copy of all fields in the ctl_io, except 3632 * for the pool reference. This includes any memory that has been 3633 * allocated! That memory will no longer be valid after done has been 3634 * called, so this would be VERY DANGEROUS for command that actually does 3635 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3636 * start and stop commands, which don't transfer any data, so this is not a 3637 * problem. If it is used for anything else, the caller would also need to 3638 * allocate data buffer space and this routine would need to be modified to 3639 * copy the data buffer(s) as well. 3640 */ 3641 void 3642 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3643 { 3644 void *pool_ref; 3645 3646 if ((src == NULL) 3647 || (dest == NULL)) 3648 return; 3649 3650 /* 3651 * May need to preserve linked list pointers at some point too. 3652 */ 3653 pool_ref = dest->io_hdr.pool; 3654 3655 memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest))); 3656 3657 dest->io_hdr.pool = pool_ref; 3658 /* 3659 * We need to know that this is an internal copy, and doesn't need 3660 * to get passed back to the FETD that allocated it. 3661 */ 3662 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3663 } 3664 3665 #ifdef NEEDTOPORT 3666 static void 3667 ctl_update_power_subpage(struct copan_power_subpage *page) 3668 { 3669 int num_luns, num_partitions, config_type; 3670 struct ctl_softc *softc; 3671 cs_BOOL_t aor_present, shelf_50pct_power; 3672 cs_raidset_personality_t rs_type; 3673 int max_active_luns; 3674 3675 softc = control_softc; 3676 3677 /* subtract out the processor LUN */ 3678 num_luns = softc->num_luns - 1; 3679 /* 3680 * Default to 7 LUNs active, which was the only number we allowed 3681 * in the past. 3682 */ 3683 max_active_luns = 7; 3684 3685 num_partitions = config_GetRsPartitionInfo(); 3686 config_type = config_GetConfigType(); 3687 shelf_50pct_power = config_GetShelfPowerMode(); 3688 aor_present = config_IsAorRsPresent(); 3689 3690 rs_type = ddb_GetRsRaidType(1); 3691 if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5) 3692 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) { 3693 EPRINT(0, "Unsupported RS type %d!", rs_type); 3694 } 3695 3696 3697 page->total_luns = num_luns; 3698 3699 switch (config_type) { 3700 case 40: 3701 /* 3702 * In a 40 drive configuration, it doesn't matter what DC 3703 * cards we have, whether we have AOR enabled or not, 3704 * partitioning or not, or what type of RAIDset we have. 3705 * In that scenario, we can power up every LUN we present 3706 * to the user. 3707 */ 3708 max_active_luns = num_luns; 3709 3710 break; 3711 case 64: 3712 if (shelf_50pct_power == CS_FALSE) { 3713 /* 25% power */ 3714 if (aor_present == CS_TRUE) { 3715 if (rs_type == 3716 CS_RAIDSET_PERSONALITY_RAID5) { 3717 max_active_luns = 7; 3718 } else if (rs_type == 3719 CS_RAIDSET_PERSONALITY_RAID1){ 3720 max_active_luns = 14; 3721 } else { 3722 /* XXX KDM now what?? */ 3723 } 3724 } else { 3725 if (rs_type == 3726 CS_RAIDSET_PERSONALITY_RAID5) { 3727 max_active_luns = 8; 3728 } else if (rs_type == 3729 CS_RAIDSET_PERSONALITY_RAID1){ 3730 max_active_luns = 16; 3731 } else { 3732 /* XXX KDM now what?? */ 3733 } 3734 } 3735 } else { 3736 /* 50% power */ 3737 /* 3738 * With 50% power in a 64 drive configuration, we 3739 * can power all LUNs we present. 3740 */ 3741 max_active_luns = num_luns; 3742 } 3743 break; 3744 case 112: 3745 if (shelf_50pct_power == CS_FALSE) { 3746 /* 25% power */ 3747 if (aor_present == CS_TRUE) { 3748 if (rs_type == 3749 CS_RAIDSET_PERSONALITY_RAID5) { 3750 max_active_luns = 7; 3751 } else if (rs_type == 3752 CS_RAIDSET_PERSONALITY_RAID1){ 3753 max_active_luns = 14; 3754 } else { 3755 /* XXX KDM now what?? */ 3756 } 3757 } else { 3758 if (rs_type == 3759 CS_RAIDSET_PERSONALITY_RAID5) { 3760 max_active_luns = 8; 3761 } else if (rs_type == 3762 CS_RAIDSET_PERSONALITY_RAID1){ 3763 max_active_luns = 16; 3764 } else { 3765 /* XXX KDM now what?? */ 3766 } 3767 } 3768 } else { 3769 /* 50% power */ 3770 if (aor_present == CS_TRUE) { 3771 if (rs_type == 3772 CS_RAIDSET_PERSONALITY_RAID5) { 3773 max_active_luns = 14; 3774 } else if (rs_type == 3775 CS_RAIDSET_PERSONALITY_RAID1){ 3776 /* 3777 * We're assuming here that disk 3778 * caching is enabled, and so we're 3779 * able to power up half of each 3780 * LUN, and cache all writes. 3781 */ 3782 max_active_luns = num_luns; 3783 } else { 3784 /* XXX KDM now what?? */ 3785 } 3786 } else { 3787 if (rs_type == 3788 CS_RAIDSET_PERSONALITY_RAID5) { 3789 max_active_luns = 15; 3790 } else if (rs_type == 3791 CS_RAIDSET_PERSONALITY_RAID1){ 3792 max_active_luns = 30; 3793 } else { 3794 /* XXX KDM now what?? */ 3795 } 3796 } 3797 } 3798 break; 3799 default: 3800 /* 3801 * In this case, we have an unknown configuration, so we 3802 * just use the default from above. 3803 */ 3804 break; 3805 } 3806 3807 page->max_active_luns = max_active_luns; 3808 #if 0 3809 printk("%s: total_luns = %d, max_active_luns = %d\n", __func__, 3810 page->total_luns, page->max_active_luns); 3811 #endif 3812 } 3813 #endif /* NEEDTOPORT */ 3814 3815 /* 3816 * This routine could be used in the future to load default and/or saved 3817 * mode page parameters for a particuar lun. 3818 */ 3819 static int 3820 ctl_init_page_index(struct ctl_lun *lun) 3821 { 3822 int i; 3823 struct ctl_page_index *page_index; 3824 struct ctl_softc *softc; 3825 3826 memcpy(&lun->mode_pages.index, page_index_template, 3827 sizeof(page_index_template)); 3828 3829 softc = lun->ctl_softc; 3830 3831 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3832 3833 page_index = &lun->mode_pages.index[i]; 3834 /* 3835 * If this is a disk-only mode page, there's no point in 3836 * setting it up. For some pages, we have to have some 3837 * basic information about the disk in order to calculate the 3838 * mode page data. 3839 */ 3840 if ((lun->be_lun->lun_type != T_DIRECT) 3841 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3842 continue; 3843 3844 switch (page_index->page_code & SMPH_PC_MASK) { 3845 case SMS_FORMAT_DEVICE_PAGE: { 3846 struct scsi_format_page *format_page; 3847 3848 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3849 panic("subpage is incorrect!"); 3850 3851 /* 3852 * Sectors per track are set above. Bytes per 3853 * sector need to be set here on a per-LUN basis. 3854 */ 3855 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3856 &format_page_default, 3857 sizeof(format_page_default)); 3858 memcpy(&lun->mode_pages.format_page[ 3859 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3860 sizeof(format_page_changeable)); 3861 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3862 &format_page_default, 3863 sizeof(format_page_default)); 3864 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3865 &format_page_default, 3866 sizeof(format_page_default)); 3867 3868 format_page = &lun->mode_pages.format_page[ 3869 CTL_PAGE_CURRENT]; 3870 scsi_ulto2b(lun->be_lun->blocksize, 3871 format_page->bytes_per_sector); 3872 3873 format_page = &lun->mode_pages.format_page[ 3874 CTL_PAGE_DEFAULT]; 3875 scsi_ulto2b(lun->be_lun->blocksize, 3876 format_page->bytes_per_sector); 3877 3878 format_page = &lun->mode_pages.format_page[ 3879 CTL_PAGE_SAVED]; 3880 scsi_ulto2b(lun->be_lun->blocksize, 3881 format_page->bytes_per_sector); 3882 3883 page_index->page_data = 3884 (uint8_t *)lun->mode_pages.format_page; 3885 break; 3886 } 3887 case SMS_RIGID_DISK_PAGE: { 3888 struct scsi_rigid_disk_page *rigid_disk_page; 3889 uint32_t sectors_per_cylinder; 3890 uint64_t cylinders; 3891 #ifndef __XSCALE__ 3892 int shift; 3893 #endif /* !__XSCALE__ */ 3894 3895 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3896 panic("invalid subpage value %d", 3897 page_index->subpage); 3898 3899 /* 3900 * Rotation rate and sectors per track are set 3901 * above. We calculate the cylinders here based on 3902 * capacity. Due to the number of heads and 3903 * sectors per track we're using, smaller arrays 3904 * may turn out to have 0 cylinders. Linux and 3905 * FreeBSD don't pay attention to these mode pages 3906 * to figure out capacity, but Solaris does. It 3907 * seems to deal with 0 cylinders just fine, and 3908 * works out a fake geometry based on the capacity. 3909 */ 3910 memcpy(&lun->mode_pages.rigid_disk_page[ 3911 CTL_PAGE_CURRENT], &rigid_disk_page_default, 3912 sizeof(rigid_disk_page_default)); 3913 memcpy(&lun->mode_pages.rigid_disk_page[ 3914 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3915 sizeof(rigid_disk_page_changeable)); 3916 memcpy(&lun->mode_pages.rigid_disk_page[ 3917 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3918 sizeof(rigid_disk_page_default)); 3919 memcpy(&lun->mode_pages.rigid_disk_page[ 3920 CTL_PAGE_SAVED], &rigid_disk_page_default, 3921 sizeof(rigid_disk_page_default)); 3922 3923 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3924 CTL_DEFAULT_HEADS; 3925 3926 /* 3927 * The divide method here will be more accurate, 3928 * probably, but results in floating point being 3929 * used in the kernel on i386 (__udivdi3()). On the 3930 * XScale, though, __udivdi3() is implemented in 3931 * software. 3932 * 3933 * The shift method for cylinder calculation is 3934 * accurate if sectors_per_cylinder is a power of 3935 * 2. Otherwise it might be slightly off -- you 3936 * might have a bit of a truncation problem. 3937 */ 3938 #ifdef __XSCALE__ 3939 cylinders = (lun->be_lun->maxlba + 1) / 3940 sectors_per_cylinder; 3941 #else 3942 for (shift = 31; shift > 0; shift--) { 3943 if (sectors_per_cylinder & (1 << shift)) 3944 break; 3945 } 3946 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3947 #endif 3948 3949 /* 3950 * We've basically got 3 bytes, or 24 bits for the 3951 * cylinder size in the mode page. If we're over, 3952 * just round down to 2^24. 3953 */ 3954 if (cylinders > 0xffffff) 3955 cylinders = 0xffffff; 3956 3957 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3958 CTL_PAGE_CURRENT]; 3959 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3960 3961 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3962 CTL_PAGE_DEFAULT]; 3963 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3964 3965 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3966 CTL_PAGE_SAVED]; 3967 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3968 3969 page_index->page_data = 3970 (uint8_t *)lun->mode_pages.rigid_disk_page; 3971 break; 3972 } 3973 case SMS_CACHING_PAGE: { 3974 3975 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3976 panic("invalid subpage value %d", 3977 page_index->subpage); 3978 /* 3979 * Defaults should be okay here, no calculations 3980 * needed. 3981 */ 3982 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3983 &caching_page_default, 3984 sizeof(caching_page_default)); 3985 memcpy(&lun->mode_pages.caching_page[ 3986 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3987 sizeof(caching_page_changeable)); 3988 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3989 &caching_page_default, 3990 sizeof(caching_page_default)); 3991 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3992 &caching_page_default, 3993 sizeof(caching_page_default)); 3994 page_index->page_data = 3995 (uint8_t *)lun->mode_pages.caching_page; 3996 break; 3997 } 3998 case SMS_CONTROL_MODE_PAGE: { 3999 4000 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4001 panic("invalid subpage value %d", 4002 page_index->subpage); 4003 4004 /* 4005 * Defaults should be okay here, no calculations 4006 * needed. 4007 */ 4008 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4009 &control_page_default, 4010 sizeof(control_page_default)); 4011 memcpy(&lun->mode_pages.control_page[ 4012 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4013 sizeof(control_page_changeable)); 4014 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4015 &control_page_default, 4016 sizeof(control_page_default)); 4017 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4018 &control_page_default, 4019 sizeof(control_page_default)); 4020 page_index->page_data = 4021 (uint8_t *)lun->mode_pages.control_page; 4022 break; 4023 4024 } 4025 case SMS_VENDOR_SPECIFIC_PAGE:{ 4026 switch (page_index->subpage) { 4027 case PWR_SUBPAGE_CODE: { 4028 struct copan_power_subpage *current_page, 4029 *saved_page; 4030 4031 memcpy(&lun->mode_pages.power_subpage[ 4032 CTL_PAGE_CURRENT], 4033 &power_page_default, 4034 sizeof(power_page_default)); 4035 memcpy(&lun->mode_pages.power_subpage[ 4036 CTL_PAGE_CHANGEABLE], 4037 &power_page_changeable, 4038 sizeof(power_page_changeable)); 4039 memcpy(&lun->mode_pages.power_subpage[ 4040 CTL_PAGE_DEFAULT], 4041 &power_page_default, 4042 sizeof(power_page_default)); 4043 memcpy(&lun->mode_pages.power_subpage[ 4044 CTL_PAGE_SAVED], 4045 &power_page_default, 4046 sizeof(power_page_default)); 4047 page_index->page_data = 4048 (uint8_t *)lun->mode_pages.power_subpage; 4049 4050 current_page = (struct copan_power_subpage *) 4051 (page_index->page_data + 4052 (page_index->page_len * 4053 CTL_PAGE_CURRENT)); 4054 saved_page = (struct copan_power_subpage *) 4055 (page_index->page_data + 4056 (page_index->page_len * 4057 CTL_PAGE_SAVED)); 4058 break; 4059 } 4060 case APS_SUBPAGE_CODE: { 4061 struct copan_aps_subpage *current_page, 4062 *saved_page; 4063 4064 // This gets set multiple times but 4065 // it should always be the same. It's 4066 // only done during init so who cares. 4067 index_to_aps_page = i; 4068 4069 memcpy(&lun->mode_pages.aps_subpage[ 4070 CTL_PAGE_CURRENT], 4071 &aps_page_default, 4072 sizeof(aps_page_default)); 4073 memcpy(&lun->mode_pages.aps_subpage[ 4074 CTL_PAGE_CHANGEABLE], 4075 &aps_page_changeable, 4076 sizeof(aps_page_changeable)); 4077 memcpy(&lun->mode_pages.aps_subpage[ 4078 CTL_PAGE_DEFAULT], 4079 &aps_page_default, 4080 sizeof(aps_page_default)); 4081 memcpy(&lun->mode_pages.aps_subpage[ 4082 CTL_PAGE_SAVED], 4083 &aps_page_default, 4084 sizeof(aps_page_default)); 4085 page_index->page_data = 4086 (uint8_t *)lun->mode_pages.aps_subpage; 4087 4088 current_page = (struct copan_aps_subpage *) 4089 (page_index->page_data + 4090 (page_index->page_len * 4091 CTL_PAGE_CURRENT)); 4092 saved_page = (struct copan_aps_subpage *) 4093 (page_index->page_data + 4094 (page_index->page_len * 4095 CTL_PAGE_SAVED)); 4096 break; 4097 } 4098 case DBGCNF_SUBPAGE_CODE: { 4099 struct copan_debugconf_subpage *current_page, 4100 *saved_page; 4101 4102 memcpy(&lun->mode_pages.debugconf_subpage[ 4103 CTL_PAGE_CURRENT], 4104 &debugconf_page_default, 4105 sizeof(debugconf_page_default)); 4106 memcpy(&lun->mode_pages.debugconf_subpage[ 4107 CTL_PAGE_CHANGEABLE], 4108 &debugconf_page_changeable, 4109 sizeof(debugconf_page_changeable)); 4110 memcpy(&lun->mode_pages.debugconf_subpage[ 4111 CTL_PAGE_DEFAULT], 4112 &debugconf_page_default, 4113 sizeof(debugconf_page_default)); 4114 memcpy(&lun->mode_pages.debugconf_subpage[ 4115 CTL_PAGE_SAVED], 4116 &debugconf_page_default, 4117 sizeof(debugconf_page_default)); 4118 page_index->page_data = 4119 (uint8_t *)lun->mode_pages.debugconf_subpage; 4120 4121 current_page = (struct copan_debugconf_subpage *) 4122 (page_index->page_data + 4123 (page_index->page_len * 4124 CTL_PAGE_CURRENT)); 4125 saved_page = (struct copan_debugconf_subpage *) 4126 (page_index->page_data + 4127 (page_index->page_len * 4128 CTL_PAGE_SAVED)); 4129 break; 4130 } 4131 default: 4132 panic("invalid subpage value %d", 4133 page_index->subpage); 4134 break; 4135 } 4136 break; 4137 } 4138 default: 4139 panic("invalid page value %d", 4140 page_index->page_code & SMPH_PC_MASK); 4141 break; 4142 } 4143 } 4144 4145 return (CTL_RETVAL_COMPLETE); 4146 } 4147 4148 /* 4149 * LUN allocation. 4150 * 4151 * Requirements: 4152 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4153 * wants us to allocate the LUN and he can block. 4154 * - ctl_softc is always set 4155 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4156 * 4157 * Returns 0 for success, non-zero (errno) for failure. 4158 */ 4159 static int 4160 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4161 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4162 { 4163 struct ctl_lun *nlun, *lun; 4164 struct ctl_frontend *fe; 4165 int lun_number, i; 4166 4167 if (be_lun == NULL) 4168 return (EINVAL); 4169 4170 /* 4171 * We currently only support Direct Access or Processor LUN types. 4172 */ 4173 switch (be_lun->lun_type) { 4174 case T_DIRECT: 4175 break; 4176 case T_PROCESSOR: 4177 break; 4178 case T_SEQUENTIAL: 4179 case T_CHANGER: 4180 default: 4181 be_lun->lun_config_status(be_lun->be_lun, 4182 CTL_LUN_CONFIG_FAILURE); 4183 break; 4184 } 4185 if (ctl_lun == NULL) { 4186 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4187 lun->flags = CTL_LUN_MALLOCED; 4188 } else 4189 lun = ctl_lun; 4190 4191 memset(lun, 0, sizeof(*lun)); 4192 4193 mtx_lock(&ctl_softc->ctl_lock); 4194 /* 4195 * See if the caller requested a particular LUN number. If so, see 4196 * if it is available. Otherwise, allocate the first available LUN. 4197 */ 4198 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4199 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4200 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4201 mtx_unlock(&ctl_softc->ctl_lock); 4202 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4203 printf("ctl: requested LUN ID %d is higher " 4204 "than CTL_MAX_LUNS - 1 (%d)\n", 4205 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4206 } else { 4207 /* 4208 * XXX KDM return an error, or just assign 4209 * another LUN ID in this case?? 4210 */ 4211 printf("ctl: requested LUN ID %d is already " 4212 "in use\n", be_lun->req_lun_id); 4213 } 4214 if (lun->flags & CTL_LUN_MALLOCED) 4215 free(lun, M_CTL); 4216 be_lun->lun_config_status(be_lun->be_lun, 4217 CTL_LUN_CONFIG_FAILURE); 4218 return (ENOSPC); 4219 } 4220 lun_number = be_lun->req_lun_id; 4221 } else { 4222 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4223 if (lun_number == -1) { 4224 mtx_unlock(&ctl_softc->ctl_lock); 4225 printf("ctl: can't allocate LUN on target %ju, out of " 4226 "LUNs\n", (uintmax_t)target_id.id); 4227 if (lun->flags & CTL_LUN_MALLOCED) 4228 free(lun, M_CTL); 4229 be_lun->lun_config_status(be_lun->be_lun, 4230 CTL_LUN_CONFIG_FAILURE); 4231 return (ENOSPC); 4232 } 4233 } 4234 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4235 4236 lun->target = target_id; 4237 lun->lun = lun_number; 4238 lun->be_lun = be_lun; 4239 /* 4240 * The processor LUN is always enabled. Disk LUNs come on line 4241 * disabled, and must be enabled by the backend. 4242 */ 4243 lun->flags = CTL_LUN_DISABLED; 4244 lun->backend = be_lun->be; 4245 be_lun->ctl_lun = lun; 4246 be_lun->lun_id = lun_number; 4247 atomic_add_int(&be_lun->be->num_luns, 1); 4248 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4249 lun->flags |= CTL_LUN_STOPPED; 4250 4251 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4252 lun->flags |= CTL_LUN_INOPERABLE; 4253 4254 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4255 lun->flags |= CTL_LUN_PRIMARY_SC; 4256 4257 lun->ctl_softc = ctl_softc; 4258 TAILQ_INIT(&lun->ooa_queue); 4259 TAILQ_INIT(&lun->blocked_queue); 4260 STAILQ_INIT(&lun->error_list); 4261 4262 /* 4263 * Initialize the mode page index. 4264 */ 4265 ctl_init_page_index(lun); 4266 4267 /* 4268 * Set the poweron UA for all initiators on this LUN only. 4269 */ 4270 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4271 lun->pending_sense[i].ua_pending = CTL_UA_POWERON; 4272 4273 /* 4274 * Now, before we insert this lun on the lun list, set the lun 4275 * inventory changed UA for all other luns. 4276 */ 4277 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4278 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4279 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4280 } 4281 } 4282 4283 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4284 4285 ctl_softc->ctl_luns[lun_number] = lun; 4286 4287 ctl_softc->num_luns++; 4288 4289 /* Setup statistics gathering */ 4290 lun->stats.device_type = be_lun->lun_type; 4291 lun->stats.lun_number = lun_number; 4292 if (lun->stats.device_type == T_DIRECT) 4293 lun->stats.blocksize = be_lun->blocksize; 4294 else 4295 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4296 for (i = 0;i < CTL_MAX_PORTS;i++) 4297 lun->stats.ports[i].targ_port = i; 4298 4299 mtx_unlock(&ctl_softc->ctl_lock); 4300 4301 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4302 4303 /* 4304 * Run through each registered FETD and bring it online if it isn't 4305 * already. Enable the target ID if it hasn't been enabled, and 4306 * enable this particular LUN. 4307 */ 4308 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4309 int retval; 4310 4311 /* 4312 * XXX KDM this only works for ONE TARGET ID. We'll need 4313 * to do things differently if we go to a multiple target 4314 * ID scheme. 4315 */ 4316 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) { 4317 4318 retval = fe->targ_enable(fe->targ_lun_arg, target_id); 4319 if (retval != 0) { 4320 printf("ctl_alloc_lun: FETD %s port %d " 4321 "returned error %d for targ_enable on " 4322 "target %ju\n", fe->port_name, 4323 fe->targ_port, retval, 4324 (uintmax_t)target_id.id); 4325 } else 4326 fe->status |= CTL_PORT_STATUS_TARG_ONLINE; 4327 } 4328 4329 retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number); 4330 if (retval != 0) { 4331 printf("ctl_alloc_lun: FETD %s port %d returned error " 4332 "%d for lun_enable on target %ju lun %d\n", 4333 fe->port_name, fe->targ_port, retval, 4334 (uintmax_t)target_id.id, lun_number); 4335 } else 4336 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4337 } 4338 return (0); 4339 } 4340 4341 /* 4342 * Delete a LUN. 4343 * Assumptions: 4344 * - caller holds ctl_softc->ctl_lock. 4345 * - LUN has already been marked invalid and any pending I/O has been taken 4346 * care of. 4347 */ 4348 static int 4349 ctl_free_lun(struct ctl_lun *lun) 4350 { 4351 struct ctl_softc *softc; 4352 #if 0 4353 struct ctl_frontend *fe; 4354 #endif 4355 struct ctl_lun *nlun; 4356 union ctl_io *io, *next_io; 4357 int i; 4358 4359 softc = lun->ctl_softc; 4360 4361 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4362 4363 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4364 4365 softc->ctl_luns[lun->lun] = NULL; 4366 4367 if (TAILQ_FIRST(&lun->ooa_queue) != NULL) { 4368 printf("ctl_free_lun: aieee!! freeing a LUN with " 4369 "outstanding I/O!!\n"); 4370 } 4371 4372 /* 4373 * If we have anything pending on the RtR queue, remove it. 4374 */ 4375 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL; 4376 io = next_io) { 4377 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 4378 if ((io->io_hdr.nexus.targ_target.id == lun->target.id) 4379 && (io->io_hdr.nexus.targ_lun == lun->lun)) 4380 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 4381 ctl_io_hdr, links); 4382 } 4383 4384 /* 4385 * Then remove everything from the blocked queue. 4386 */ 4387 for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL; 4388 io = next_io) { 4389 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links); 4390 TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links); 4391 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 4392 } 4393 4394 /* 4395 * Now clear out the OOA queue, and free all the I/O. 4396 * XXX KDM should we notify the FETD here? We probably need to 4397 * quiesce the LUN before deleting it. 4398 */ 4399 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL; 4400 io = next_io) { 4401 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links); 4402 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 4403 ctl_free_io_internal(io, /*have_lock*/ 1); 4404 } 4405 4406 softc->num_luns--; 4407 4408 /* 4409 * XXX KDM this scheme only works for a single target/multiple LUN 4410 * setup. It needs to be revamped for a multiple target scheme. 4411 * 4412 * XXX KDM this results in fe->lun_disable() getting called twice, 4413 * once when ctl_disable_lun() is called, and a second time here. 4414 * We really need to re-think the LUN disable semantics. There 4415 * should probably be several steps/levels to LUN removal: 4416 * - disable 4417 * - invalidate 4418 * - free 4419 * 4420 * Right now we only have a disable method when communicating to 4421 * the front end ports, at least for individual LUNs. 4422 */ 4423 #if 0 4424 STAILQ_FOREACH(fe, &softc->fe_list, links) { 4425 int retval; 4426 4427 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4428 lun->lun); 4429 if (retval != 0) { 4430 printf("ctl_free_lun: FETD %s port %d returned error " 4431 "%d for lun_disable on target %ju lun %jd\n", 4432 fe->port_name, fe->targ_port, retval, 4433 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4434 } 4435 4436 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4437 fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4438 4439 retval = fe->targ_disable(fe->targ_lun_arg,lun->target); 4440 if (retval != 0) { 4441 printf("ctl_free_lun: FETD %s port %d " 4442 "returned error %d for targ_disable on " 4443 "target %ju\n", fe->port_name, 4444 fe->targ_port, retval, 4445 (uintmax_t)lun->target.id); 4446 } else 4447 fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4448 4449 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4450 continue; 4451 4452 #if 0 4453 fe->port_offline(fe->onoff_arg); 4454 fe->status &= ~CTL_PORT_STATUS_ONLINE; 4455 #endif 4456 } 4457 } 4458 #endif 4459 4460 /* 4461 * Tell the backend to free resources, if this LUN has a backend. 4462 */ 4463 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4464 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4465 4466 if (lun->flags & CTL_LUN_MALLOCED) 4467 free(lun, M_CTL); 4468 4469 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4470 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4471 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4472 } 4473 } 4474 4475 return (0); 4476 } 4477 4478 static void 4479 ctl_create_lun(struct ctl_be_lun *be_lun) 4480 { 4481 struct ctl_softc *ctl_softc; 4482 4483 ctl_softc = control_softc; 4484 4485 /* 4486 * ctl_alloc_lun() should handle all potential failure cases. 4487 */ 4488 ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target); 4489 } 4490 4491 int 4492 ctl_add_lun(struct ctl_be_lun *be_lun) 4493 { 4494 struct ctl_softc *ctl_softc; 4495 4496 ctl_softc = control_softc; 4497 4498 mtx_lock(&ctl_softc->ctl_lock); 4499 STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links); 4500 mtx_unlock(&ctl_softc->ctl_lock); 4501 4502 ctl_wakeup_thread(); 4503 4504 return (0); 4505 } 4506 4507 int 4508 ctl_enable_lun(struct ctl_be_lun *be_lun) 4509 { 4510 struct ctl_softc *ctl_softc; 4511 struct ctl_frontend *fe, *nfe; 4512 struct ctl_lun *lun; 4513 int retval; 4514 4515 ctl_softc = control_softc; 4516 4517 lun = (struct ctl_lun *)be_lun->ctl_lun; 4518 4519 mtx_lock(&ctl_softc->ctl_lock); 4520 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4521 /* 4522 * eh? Why did we get called if the LUN is already 4523 * enabled? 4524 */ 4525 mtx_unlock(&ctl_softc->ctl_lock); 4526 return (0); 4527 } 4528 lun->flags &= ~CTL_LUN_DISABLED; 4529 4530 for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) { 4531 nfe = STAILQ_NEXT(fe, links); 4532 4533 /* 4534 * Drop the lock while we call the FETD's enable routine. 4535 * This can lead to a callback into CTL (at least in the 4536 * case of the internal initiator frontend. 4537 */ 4538 mtx_unlock(&ctl_softc->ctl_lock); 4539 retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun); 4540 mtx_lock(&ctl_softc->ctl_lock); 4541 if (retval != 0) { 4542 printf("%s: FETD %s port %d returned error " 4543 "%d for lun_enable on target %ju lun %jd\n", 4544 __func__, fe->port_name, fe->targ_port, retval, 4545 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4546 } 4547 #if 0 4548 else { 4549 /* NOTE: TODO: why does lun enable affect port status? */ 4550 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4551 } 4552 #endif 4553 } 4554 4555 mtx_unlock(&ctl_softc->ctl_lock); 4556 4557 return (0); 4558 } 4559 4560 int 4561 ctl_disable_lun(struct ctl_be_lun *be_lun) 4562 { 4563 struct ctl_softc *ctl_softc; 4564 struct ctl_frontend *fe; 4565 struct ctl_lun *lun; 4566 int retval; 4567 4568 ctl_softc = control_softc; 4569 4570 lun = (struct ctl_lun *)be_lun->ctl_lun; 4571 4572 mtx_lock(&ctl_softc->ctl_lock); 4573 4574 if (lun->flags & CTL_LUN_DISABLED) { 4575 mtx_unlock(&ctl_softc->ctl_lock); 4576 return (0); 4577 } 4578 lun->flags |= CTL_LUN_DISABLED; 4579 4580 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4581 mtx_unlock(&ctl_softc->ctl_lock); 4582 /* 4583 * Drop the lock before we call the frontend's disable 4584 * routine, to avoid lock order reversals. 4585 * 4586 * XXX KDM what happens if the frontend list changes while 4587 * we're traversing it? It's unlikely, but should be handled. 4588 */ 4589 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4590 lun->lun); 4591 mtx_lock(&ctl_softc->ctl_lock); 4592 if (retval != 0) { 4593 printf("ctl_alloc_lun: FETD %s port %d returned error " 4594 "%d for lun_disable on target %ju lun %jd\n", 4595 fe->port_name, fe->targ_port, retval, 4596 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4597 } 4598 } 4599 4600 mtx_unlock(&ctl_softc->ctl_lock); 4601 4602 return (0); 4603 } 4604 4605 int 4606 ctl_start_lun(struct ctl_be_lun *be_lun) 4607 { 4608 struct ctl_softc *ctl_softc; 4609 struct ctl_lun *lun; 4610 4611 ctl_softc = control_softc; 4612 4613 lun = (struct ctl_lun *)be_lun->ctl_lun; 4614 4615 mtx_lock(&ctl_softc->ctl_lock); 4616 lun->flags &= ~CTL_LUN_STOPPED; 4617 mtx_unlock(&ctl_softc->ctl_lock); 4618 4619 return (0); 4620 } 4621 4622 int 4623 ctl_stop_lun(struct ctl_be_lun *be_lun) 4624 { 4625 struct ctl_softc *ctl_softc; 4626 struct ctl_lun *lun; 4627 4628 ctl_softc = control_softc; 4629 4630 lun = (struct ctl_lun *)be_lun->ctl_lun; 4631 4632 mtx_lock(&ctl_softc->ctl_lock); 4633 lun->flags |= CTL_LUN_STOPPED; 4634 mtx_unlock(&ctl_softc->ctl_lock); 4635 4636 return (0); 4637 } 4638 4639 int 4640 ctl_lun_offline(struct ctl_be_lun *be_lun) 4641 { 4642 struct ctl_softc *ctl_softc; 4643 struct ctl_lun *lun; 4644 4645 ctl_softc = control_softc; 4646 4647 lun = (struct ctl_lun *)be_lun->ctl_lun; 4648 4649 mtx_lock(&ctl_softc->ctl_lock); 4650 lun->flags |= CTL_LUN_OFFLINE; 4651 mtx_unlock(&ctl_softc->ctl_lock); 4652 4653 return (0); 4654 } 4655 4656 int 4657 ctl_lun_online(struct ctl_be_lun *be_lun) 4658 { 4659 struct ctl_softc *ctl_softc; 4660 struct ctl_lun *lun; 4661 4662 ctl_softc = control_softc; 4663 4664 lun = (struct ctl_lun *)be_lun->ctl_lun; 4665 4666 mtx_lock(&ctl_softc->ctl_lock); 4667 lun->flags &= ~CTL_LUN_OFFLINE; 4668 mtx_unlock(&ctl_softc->ctl_lock); 4669 4670 return (0); 4671 } 4672 4673 int 4674 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4675 { 4676 struct ctl_softc *ctl_softc; 4677 struct ctl_lun *lun; 4678 4679 ctl_softc = control_softc; 4680 4681 lun = (struct ctl_lun *)be_lun->ctl_lun; 4682 4683 mtx_lock(&ctl_softc->ctl_lock); 4684 4685 /* 4686 * The LUN needs to be disabled before it can be marked invalid. 4687 */ 4688 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4689 mtx_unlock(&ctl_softc->ctl_lock); 4690 return (-1); 4691 } 4692 /* 4693 * Mark the LUN invalid. 4694 */ 4695 lun->flags |= CTL_LUN_INVALID; 4696 4697 /* 4698 * If there is nothing in the OOA queue, go ahead and free the LUN. 4699 * If we have something in the OOA queue, we'll free it when the 4700 * last I/O completes. 4701 */ 4702 if (TAILQ_FIRST(&lun->ooa_queue) == NULL) 4703 ctl_free_lun(lun); 4704 mtx_unlock(&ctl_softc->ctl_lock); 4705 4706 return (0); 4707 } 4708 4709 int 4710 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4711 { 4712 struct ctl_softc *ctl_softc; 4713 struct ctl_lun *lun; 4714 4715 ctl_softc = control_softc; 4716 lun = (struct ctl_lun *)be_lun->ctl_lun; 4717 4718 mtx_lock(&ctl_softc->ctl_lock); 4719 lun->flags |= CTL_LUN_INOPERABLE; 4720 mtx_unlock(&ctl_softc->ctl_lock); 4721 4722 return (0); 4723 } 4724 4725 int 4726 ctl_lun_operable(struct ctl_be_lun *be_lun) 4727 { 4728 struct ctl_softc *ctl_softc; 4729 struct ctl_lun *lun; 4730 4731 ctl_softc = control_softc; 4732 lun = (struct ctl_lun *)be_lun->ctl_lun; 4733 4734 mtx_lock(&ctl_softc->ctl_lock); 4735 lun->flags &= ~CTL_LUN_INOPERABLE; 4736 mtx_unlock(&ctl_softc->ctl_lock); 4737 4738 return (0); 4739 } 4740 4741 int 4742 ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus, 4743 int lock) 4744 { 4745 struct ctl_softc *softc; 4746 struct ctl_lun *lun; 4747 struct copan_aps_subpage *current_sp; 4748 struct ctl_page_index *page_index; 4749 int i; 4750 4751 softc = control_softc; 4752 4753 mtx_lock(&softc->ctl_lock); 4754 4755 lun = (struct ctl_lun *)be_lun->ctl_lun; 4756 4757 page_index = NULL; 4758 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4759 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 4760 APS_PAGE_CODE) 4761 continue; 4762 4763 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE) 4764 continue; 4765 page_index = &lun->mode_pages.index[i]; 4766 } 4767 4768 if (page_index == NULL) { 4769 mtx_unlock(&softc->ctl_lock); 4770 printf("%s: APS subpage not found for lun %ju!\n", __func__, 4771 (uintmax_t)lun->lun); 4772 return (1); 4773 } 4774 #if 0 4775 if ((softc->aps_locked_lun != 0) 4776 && (softc->aps_locked_lun != lun->lun)) { 4777 printf("%s: attempt to lock LUN %llu when %llu is already " 4778 "locked\n"); 4779 mtx_unlock(&softc->ctl_lock); 4780 return (1); 4781 } 4782 #endif 4783 4784 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 4785 (page_index->page_len * CTL_PAGE_CURRENT)); 4786 4787 if (lock != 0) { 4788 current_sp->lock_active = APS_LOCK_ACTIVE; 4789 softc->aps_locked_lun = lun->lun; 4790 } else { 4791 current_sp->lock_active = 0; 4792 softc->aps_locked_lun = 0; 4793 } 4794 4795 4796 /* 4797 * If we're in HA mode, try to send the lock message to the other 4798 * side. 4799 */ 4800 if (ctl_is_single == 0) { 4801 int isc_retval; 4802 union ctl_ha_msg lock_msg; 4803 4804 lock_msg.hdr.nexus = *nexus; 4805 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK; 4806 if (lock != 0) 4807 lock_msg.aps.lock_flag = 1; 4808 else 4809 lock_msg.aps.lock_flag = 0; 4810 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg, 4811 sizeof(lock_msg), 0); 4812 if (isc_retval > CTL_HA_STATUS_SUCCESS) { 4813 printf("%s: APS (lock=%d) error returned from " 4814 "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval); 4815 mtx_unlock(&softc->ctl_lock); 4816 return (1); 4817 } 4818 } 4819 4820 mtx_unlock(&softc->ctl_lock); 4821 4822 return (0); 4823 } 4824 4825 void 4826 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4827 { 4828 struct ctl_lun *lun; 4829 struct ctl_softc *softc; 4830 int i; 4831 4832 softc = control_softc; 4833 4834 mtx_lock(&softc->ctl_lock); 4835 4836 lun = (struct ctl_lun *)be_lun->ctl_lun; 4837 4838 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4839 lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED; 4840 4841 mtx_unlock(&softc->ctl_lock); 4842 } 4843 4844 /* 4845 * Backend "memory move is complete" callback for requests that never 4846 * make it down to say RAIDCore's configuration code. 4847 */ 4848 int 4849 ctl_config_move_done(union ctl_io *io) 4850 { 4851 int retval; 4852 4853 retval = CTL_RETVAL_COMPLETE; 4854 4855 4856 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4857 /* 4858 * XXX KDM this shouldn't happen, but what if it does? 4859 */ 4860 if (io->io_hdr.io_type != CTL_IO_SCSI) 4861 panic("I/O type isn't CTL_IO_SCSI!"); 4862 4863 if ((io->io_hdr.port_status == 0) 4864 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4865 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 4866 io->io_hdr.status = CTL_SUCCESS; 4867 else if ((io->io_hdr.port_status != 0) 4868 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4869 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 4870 /* 4871 * For hardware error sense keys, the sense key 4872 * specific value is defined to be a retry count, 4873 * but we use it to pass back an internal FETD 4874 * error code. XXX KDM Hopefully the FETD is only 4875 * using 16 bits for an error code, since that's 4876 * all the space we have in the sks field. 4877 */ 4878 ctl_set_internal_failure(&io->scsiio, 4879 /*sks_valid*/ 1, 4880 /*retry_count*/ 4881 io->io_hdr.port_status); 4882 free(io->scsiio.kern_data_ptr, M_CTL); 4883 ctl_done(io); 4884 goto bailout; 4885 } 4886 4887 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 4888 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 4889 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4890 /* 4891 * XXX KDM just assuming a single pointer here, and not a 4892 * S/G list. If we start using S/G lists for config data, 4893 * we'll need to know how to clean them up here as well. 4894 */ 4895 free(io->scsiio.kern_data_ptr, M_CTL); 4896 /* Hopefully the user has already set the status... */ 4897 ctl_done(io); 4898 } else { 4899 /* 4900 * XXX KDM now we need to continue data movement. Some 4901 * options: 4902 * - call ctl_scsiio() again? We don't do this for data 4903 * writes, because for those at least we know ahead of 4904 * time where the write will go and how long it is. For 4905 * config writes, though, that information is largely 4906 * contained within the write itself, thus we need to 4907 * parse out the data again. 4908 * 4909 * - Call some other function once the data is in? 4910 */ 4911 4912 /* 4913 * XXX KDM call ctl_scsiio() again for now, and check flag 4914 * bits to see whether we're allocated or not. 4915 */ 4916 retval = ctl_scsiio(&io->scsiio); 4917 } 4918 bailout: 4919 return (retval); 4920 } 4921 4922 /* 4923 * This gets called by a backend driver when it is done with a 4924 * configuration write. 4925 */ 4926 void 4927 ctl_config_write_done(union ctl_io *io) 4928 { 4929 /* 4930 * If the IO_CONT flag is set, we need to call the supplied 4931 * function to continue processing the I/O, instead of completing 4932 * the I/O just yet. 4933 * 4934 * If there is an error, though, we don't want to keep processing. 4935 * Instead, just send status back to the initiator. 4936 */ 4937 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) 4938 && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) 4939 || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) { 4940 io->scsiio.io_cont(io); 4941 return; 4942 } 4943 /* 4944 * Since a configuration write can be done for commands that actually 4945 * have data allocated, like write buffer, and commands that have 4946 * no data, like start/stop unit, we need to check here. 4947 */ 4948 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 4949 free(io->scsiio.kern_data_ptr, M_CTL); 4950 ctl_done(io); 4951 } 4952 4953 /* 4954 * SCSI release command. 4955 */ 4956 int 4957 ctl_scsi_release(struct ctl_scsiio *ctsio) 4958 { 4959 int length, longid, thirdparty_id, resv_id; 4960 struct ctl_softc *ctl_softc; 4961 struct ctl_lun *lun; 4962 4963 length = 0; 4964 resv_id = 0; 4965 4966 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4967 4968 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4969 ctl_softc = control_softc; 4970 4971 switch (ctsio->cdb[0]) { 4972 case RELEASE: { 4973 struct scsi_release *cdb; 4974 4975 cdb = (struct scsi_release *)ctsio->cdb; 4976 if ((cdb->byte2 & 0x1f) != 0) { 4977 ctl_set_invalid_field(ctsio, 4978 /*sks_valid*/ 1, 4979 /*command*/ 1, 4980 /*field*/ 1, 4981 /*bit_valid*/ 0, 4982 /*bit*/ 0); 4983 ctl_done((union ctl_io *)ctsio); 4984 return (CTL_RETVAL_COMPLETE); 4985 } 4986 break; 4987 } 4988 case RELEASE_10: { 4989 struct scsi_release_10 *cdb; 4990 4991 cdb = (struct scsi_release_10 *)ctsio->cdb; 4992 4993 if ((cdb->byte2 & SR10_EXTENT) != 0) { 4994 ctl_set_invalid_field(ctsio, 4995 /*sks_valid*/ 1, 4996 /*command*/ 1, 4997 /*field*/ 1, 4998 /*bit_valid*/ 1, 4999 /*bit*/ 0); 5000 ctl_done((union ctl_io *)ctsio); 5001 return (CTL_RETVAL_COMPLETE); 5002 5003 } 5004 5005 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5006 ctl_set_invalid_field(ctsio, 5007 /*sks_valid*/ 1, 5008 /*command*/ 1, 5009 /*field*/ 1, 5010 /*bit_valid*/ 1, 5011 /*bit*/ 4); 5012 ctl_done((union ctl_io *)ctsio); 5013 return (CTL_RETVAL_COMPLETE); 5014 } 5015 5016 if (cdb->byte2 & SR10_LONGID) 5017 longid = 1; 5018 else 5019 thirdparty_id = cdb->thirdparty_id; 5020 5021 resv_id = cdb->resv_id; 5022 length = scsi_2btoul(cdb->length); 5023 break; 5024 } 5025 } 5026 5027 5028 /* 5029 * XXX KDM right now, we only support LUN reservation. We don't 5030 * support 3rd party reservations, or extent reservations, which 5031 * might actually need the parameter list. If we've gotten this 5032 * far, we've got a LUN reservation. Anything else got kicked out 5033 * above. So, according to SPC, ignore the length. 5034 */ 5035 length = 0; 5036 5037 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5038 && (length > 0)) { 5039 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5040 ctsio->kern_data_len = length; 5041 ctsio->kern_total_len = length; 5042 ctsio->kern_data_resid = 0; 5043 ctsio->kern_rel_offset = 0; 5044 ctsio->kern_sg_entries = 0; 5045 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5046 ctsio->be_move_done = ctl_config_move_done; 5047 ctl_datamove((union ctl_io *)ctsio); 5048 5049 return (CTL_RETVAL_COMPLETE); 5050 } 5051 5052 if (length > 0) 5053 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5054 5055 mtx_lock(&ctl_softc->ctl_lock); 5056 5057 /* 5058 * According to SPC, it is not an error for an intiator to attempt 5059 * to release a reservation on a LUN that isn't reserved, or that 5060 * is reserved by another initiator. The reservation can only be 5061 * released, though, by the initiator who made it or by one of 5062 * several reset type events. 5063 */ 5064 if (lun->flags & CTL_LUN_RESERVED) { 5065 if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id) 5066 && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port) 5067 && (ctsio->io_hdr.nexus.targ_target.id == 5068 lun->rsv_nexus.targ_target.id)) { 5069 lun->flags &= ~CTL_LUN_RESERVED; 5070 } 5071 } 5072 5073 ctsio->scsi_status = SCSI_STATUS_OK; 5074 ctsio->io_hdr.status = CTL_SUCCESS; 5075 5076 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5077 free(ctsio->kern_data_ptr, M_CTL); 5078 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5079 } 5080 5081 mtx_unlock(&ctl_softc->ctl_lock); 5082 5083 ctl_done((union ctl_io *)ctsio); 5084 return (CTL_RETVAL_COMPLETE); 5085 } 5086 5087 int 5088 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5089 { 5090 int extent, thirdparty, longid; 5091 int resv_id, length; 5092 uint64_t thirdparty_id; 5093 struct ctl_softc *ctl_softc; 5094 struct ctl_lun *lun; 5095 5096 extent = 0; 5097 thirdparty = 0; 5098 longid = 0; 5099 resv_id = 0; 5100 length = 0; 5101 thirdparty_id = 0; 5102 5103 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5104 5105 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5106 ctl_softc = control_softc; 5107 5108 switch (ctsio->cdb[0]) { 5109 case RESERVE: { 5110 struct scsi_reserve *cdb; 5111 5112 cdb = (struct scsi_reserve *)ctsio->cdb; 5113 if ((cdb->byte2 & 0x1f) != 0) { 5114 ctl_set_invalid_field(ctsio, 5115 /*sks_valid*/ 1, 5116 /*command*/ 1, 5117 /*field*/ 1, 5118 /*bit_valid*/ 0, 5119 /*bit*/ 0); 5120 ctl_done((union ctl_io *)ctsio); 5121 return (CTL_RETVAL_COMPLETE); 5122 } 5123 resv_id = cdb->resv_id; 5124 length = scsi_2btoul(cdb->length); 5125 break; 5126 } 5127 case RESERVE_10: { 5128 struct scsi_reserve_10 *cdb; 5129 5130 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5131 5132 if ((cdb->byte2 & SR10_EXTENT) != 0) { 5133 ctl_set_invalid_field(ctsio, 5134 /*sks_valid*/ 1, 5135 /*command*/ 1, 5136 /*field*/ 1, 5137 /*bit_valid*/ 1, 5138 /*bit*/ 0); 5139 ctl_done((union ctl_io *)ctsio); 5140 return (CTL_RETVAL_COMPLETE); 5141 } 5142 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5143 ctl_set_invalid_field(ctsio, 5144 /*sks_valid*/ 1, 5145 /*command*/ 1, 5146 /*field*/ 1, 5147 /*bit_valid*/ 1, 5148 /*bit*/ 4); 5149 ctl_done((union ctl_io *)ctsio); 5150 return (CTL_RETVAL_COMPLETE); 5151 } 5152 if (cdb->byte2 & SR10_LONGID) 5153 longid = 1; 5154 else 5155 thirdparty_id = cdb->thirdparty_id; 5156 5157 resv_id = cdb->resv_id; 5158 length = scsi_2btoul(cdb->length); 5159 break; 5160 } 5161 } 5162 5163 /* 5164 * XXX KDM right now, we only support LUN reservation. We don't 5165 * support 3rd party reservations, or extent reservations, which 5166 * might actually need the parameter list. If we've gotten this 5167 * far, we've got a LUN reservation. Anything else got kicked out 5168 * above. So, according to SPC, ignore the length. 5169 */ 5170 length = 0; 5171 5172 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5173 && (length > 0)) { 5174 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5175 ctsio->kern_data_len = length; 5176 ctsio->kern_total_len = length; 5177 ctsio->kern_data_resid = 0; 5178 ctsio->kern_rel_offset = 0; 5179 ctsio->kern_sg_entries = 0; 5180 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5181 ctsio->be_move_done = ctl_config_move_done; 5182 ctl_datamove((union ctl_io *)ctsio); 5183 5184 return (CTL_RETVAL_COMPLETE); 5185 } 5186 5187 if (length > 0) 5188 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5189 5190 mtx_lock(&ctl_softc->ctl_lock); 5191 if (lun->flags & CTL_LUN_RESERVED) { 5192 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 5193 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 5194 || (ctsio->io_hdr.nexus.targ_target.id != 5195 lun->rsv_nexus.targ_target.id)) { 5196 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 5197 ctsio->io_hdr.status = CTL_SCSI_ERROR; 5198 goto bailout; 5199 } 5200 } 5201 5202 lun->flags |= CTL_LUN_RESERVED; 5203 lun->rsv_nexus = ctsio->io_hdr.nexus; 5204 5205 ctsio->scsi_status = SCSI_STATUS_OK; 5206 ctsio->io_hdr.status = CTL_SUCCESS; 5207 5208 bailout: 5209 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5210 free(ctsio->kern_data_ptr, M_CTL); 5211 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5212 } 5213 5214 mtx_unlock(&ctl_softc->ctl_lock); 5215 5216 ctl_done((union ctl_io *)ctsio); 5217 return (CTL_RETVAL_COMPLETE); 5218 } 5219 5220 int 5221 ctl_start_stop(struct ctl_scsiio *ctsio) 5222 { 5223 struct scsi_start_stop_unit *cdb; 5224 struct ctl_lun *lun; 5225 struct ctl_softc *ctl_softc; 5226 int retval; 5227 5228 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5229 5230 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5231 ctl_softc = control_softc; 5232 retval = 0; 5233 5234 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5235 5236 /* 5237 * XXX KDM 5238 * We don't support the immediate bit on a stop unit. In order to 5239 * do that, we would need to code up a way to know that a stop is 5240 * pending, and hold off any new commands until it completes, one 5241 * way or another. Then we could accept or reject those commands 5242 * depending on its status. We would almost need to do the reverse 5243 * of what we do below for an immediate start -- return the copy of 5244 * the ctl_io to the FETD with status to send to the host (and to 5245 * free the copy!) and then free the original I/O once the stop 5246 * actually completes. That way, the OOA queue mechanism can work 5247 * to block commands that shouldn't proceed. Another alternative 5248 * would be to put the copy in the queue in place of the original, 5249 * and return the original back to the caller. That could be 5250 * slightly safer.. 5251 */ 5252 if ((cdb->byte2 & SSS_IMMED) 5253 && ((cdb->how & SSS_START) == 0)) { 5254 ctl_set_invalid_field(ctsio, 5255 /*sks_valid*/ 1, 5256 /*command*/ 1, 5257 /*field*/ 1, 5258 /*bit_valid*/ 1, 5259 /*bit*/ 0); 5260 ctl_done((union ctl_io *)ctsio); 5261 return (CTL_RETVAL_COMPLETE); 5262 } 5263 5264 /* 5265 * We don't support the power conditions field. We need to check 5266 * this prior to checking the load/eject and start/stop bits. 5267 */ 5268 if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) { 5269 ctl_set_invalid_field(ctsio, 5270 /*sks_valid*/ 1, 5271 /*command*/ 1, 5272 /*field*/ 4, 5273 /*bit_valid*/ 1, 5274 /*bit*/ 4); 5275 ctl_done((union ctl_io *)ctsio); 5276 return (CTL_RETVAL_COMPLETE); 5277 } 5278 5279 /* 5280 * Media isn't removable, so we can't load or eject it. 5281 */ 5282 if ((cdb->how & SSS_LOEJ) != 0) { 5283 ctl_set_invalid_field(ctsio, 5284 /*sks_valid*/ 1, 5285 /*command*/ 1, 5286 /*field*/ 4, 5287 /*bit_valid*/ 1, 5288 /*bit*/ 1); 5289 ctl_done((union ctl_io *)ctsio); 5290 return (CTL_RETVAL_COMPLETE); 5291 } 5292 5293 if ((lun->flags & CTL_LUN_PR_RESERVED) 5294 && ((cdb->how & SSS_START)==0)) { 5295 uint32_t residx; 5296 5297 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5298 if (!lun->per_res[residx].registered 5299 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5300 5301 ctl_set_reservation_conflict(ctsio); 5302 ctl_done((union ctl_io *)ctsio); 5303 return (CTL_RETVAL_COMPLETE); 5304 } 5305 } 5306 5307 /* 5308 * If there is no backend on this device, we can't start or stop 5309 * it. In theory we shouldn't get any start/stop commands in the 5310 * first place at this level if the LUN doesn't have a backend. 5311 * That should get stopped by the command decode code. 5312 */ 5313 if (lun->backend == NULL) { 5314 ctl_set_invalid_opcode(ctsio); 5315 ctl_done((union ctl_io *)ctsio); 5316 return (CTL_RETVAL_COMPLETE); 5317 } 5318 5319 /* 5320 * XXX KDM Copan-specific offline behavior. 5321 * Figure out a reasonable way to port this? 5322 */ 5323 #ifdef NEEDTOPORT 5324 mtx_lock(&ctl_softc->ctl_lock); 5325 5326 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5327 && (lun->flags & CTL_LUN_OFFLINE)) { 5328 /* 5329 * If the LUN is offline, and the on/offline bit isn't set, 5330 * reject the start or stop. Otherwise, let it through. 5331 */ 5332 mtx_unlock(&ctl_softc->ctl_lock); 5333 ctl_set_lun_not_ready(ctsio); 5334 ctl_done((union ctl_io *)ctsio); 5335 } else { 5336 mtx_unlock(&ctl_softc->ctl_lock); 5337 #endif /* NEEDTOPORT */ 5338 /* 5339 * This could be a start or a stop when we're online, 5340 * or a stop/offline or start/online. A start or stop when 5341 * we're offline is covered in the case above. 5342 */ 5343 /* 5344 * In the non-immediate case, we send the request to 5345 * the backend and return status to the user when 5346 * it is done. 5347 * 5348 * In the immediate case, we allocate a new ctl_io 5349 * to hold a copy of the request, and send that to 5350 * the backend. We then set good status on the 5351 * user's request and return it immediately. 5352 */ 5353 if (cdb->byte2 & SSS_IMMED) { 5354 union ctl_io *new_io; 5355 5356 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5357 if (new_io == NULL) { 5358 ctl_set_busy(ctsio); 5359 ctl_done((union ctl_io *)ctsio); 5360 } else { 5361 ctl_copy_io((union ctl_io *)ctsio, 5362 new_io); 5363 retval = lun->backend->config_write(new_io); 5364 ctl_set_success(ctsio); 5365 ctl_done((union ctl_io *)ctsio); 5366 } 5367 } else { 5368 retval = lun->backend->config_write( 5369 (union ctl_io *)ctsio); 5370 } 5371 #ifdef NEEDTOPORT 5372 } 5373 #endif 5374 return (retval); 5375 } 5376 5377 /* 5378 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5379 * we don't really do anything with the LBA and length fields if the user 5380 * passes them in. Instead we'll just flush out the cache for the entire 5381 * LUN. 5382 */ 5383 int 5384 ctl_sync_cache(struct ctl_scsiio *ctsio) 5385 { 5386 struct ctl_lun *lun; 5387 struct ctl_softc *ctl_softc; 5388 uint64_t starting_lba; 5389 uint32_t block_count; 5390 int reladr, immed; 5391 int retval; 5392 5393 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5394 5395 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5396 ctl_softc = control_softc; 5397 retval = 0; 5398 reladr = 0; 5399 immed = 0; 5400 5401 switch (ctsio->cdb[0]) { 5402 case SYNCHRONIZE_CACHE: { 5403 struct scsi_sync_cache *cdb; 5404 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5405 5406 if (cdb->byte2 & SSC_RELADR) 5407 reladr = 1; 5408 5409 if (cdb->byte2 & SSC_IMMED) 5410 immed = 1; 5411 5412 starting_lba = scsi_4btoul(cdb->begin_lba); 5413 block_count = scsi_2btoul(cdb->lb_count); 5414 break; 5415 } 5416 case SYNCHRONIZE_CACHE_16: { 5417 struct scsi_sync_cache_16 *cdb; 5418 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5419 5420 if (cdb->byte2 & SSC_RELADR) 5421 reladr = 1; 5422 5423 if (cdb->byte2 & SSC_IMMED) 5424 immed = 1; 5425 5426 starting_lba = scsi_8btou64(cdb->begin_lba); 5427 block_count = scsi_4btoul(cdb->lb_count); 5428 break; 5429 } 5430 default: 5431 ctl_set_invalid_opcode(ctsio); 5432 ctl_done((union ctl_io *)ctsio); 5433 goto bailout; 5434 break; /* NOTREACHED */ 5435 } 5436 5437 if (immed) { 5438 /* 5439 * We don't support the immediate bit. Since it's in the 5440 * same place for the 10 and 16 byte SYNCHRONIZE CACHE 5441 * commands, we can just return the same error in either 5442 * case. 5443 */ 5444 ctl_set_invalid_field(ctsio, 5445 /*sks_valid*/ 1, 5446 /*command*/ 1, 5447 /*field*/ 1, 5448 /*bit_valid*/ 1, 5449 /*bit*/ 1); 5450 ctl_done((union ctl_io *)ctsio); 5451 goto bailout; 5452 } 5453 5454 if (reladr) { 5455 /* 5456 * We don't support the reladr bit either. It can only be 5457 * used with linked commands, and we don't support linked 5458 * commands. Since the bit is in the same place for the 5459 * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can 5460 * just return the same error in either case. 5461 */ 5462 ctl_set_invalid_field(ctsio, 5463 /*sks_valid*/ 1, 5464 /*command*/ 1, 5465 /*field*/ 1, 5466 /*bit_valid*/ 1, 5467 /*bit*/ 0); 5468 ctl_done((union ctl_io *)ctsio); 5469 goto bailout; 5470 } 5471 5472 /* 5473 * We check the LBA and length, but don't do anything with them. 5474 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5475 * get flushed. This check will just help satisfy anyone who wants 5476 * to see an error for an out of range LBA. 5477 */ 5478 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5479 ctl_set_lba_out_of_range(ctsio); 5480 ctl_done((union ctl_io *)ctsio); 5481 goto bailout; 5482 } 5483 5484 /* 5485 * If this LUN has no backend, we can't flush the cache anyway. 5486 */ 5487 if (lun->backend == NULL) { 5488 ctl_set_invalid_opcode(ctsio); 5489 ctl_done((union ctl_io *)ctsio); 5490 goto bailout; 5491 } 5492 5493 /* 5494 * Check to see whether we're configured to send the SYNCHRONIZE 5495 * CACHE command directly to the back end. 5496 */ 5497 mtx_lock(&ctl_softc->ctl_lock); 5498 if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC) 5499 && (++(lun->sync_count) >= lun->sync_interval)) { 5500 lun->sync_count = 0; 5501 mtx_unlock(&ctl_softc->ctl_lock); 5502 retval = lun->backend->config_write((union ctl_io *)ctsio); 5503 } else { 5504 mtx_unlock(&ctl_softc->ctl_lock); 5505 ctl_set_success(ctsio); 5506 ctl_done((union ctl_io *)ctsio); 5507 } 5508 5509 bailout: 5510 5511 return (retval); 5512 } 5513 5514 int 5515 ctl_format(struct ctl_scsiio *ctsio) 5516 { 5517 struct scsi_format *cdb; 5518 struct ctl_lun *lun; 5519 struct ctl_softc *ctl_softc; 5520 int length, defect_list_len; 5521 5522 CTL_DEBUG_PRINT(("ctl_format\n")); 5523 5524 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5525 ctl_softc = control_softc; 5526 5527 cdb = (struct scsi_format *)ctsio->cdb; 5528 5529 length = 0; 5530 if (cdb->byte2 & SF_FMTDATA) { 5531 if (cdb->byte2 & SF_LONGLIST) 5532 length = sizeof(struct scsi_format_header_long); 5533 else 5534 length = sizeof(struct scsi_format_header_short); 5535 } 5536 5537 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5538 && (length > 0)) { 5539 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5540 ctsio->kern_data_len = length; 5541 ctsio->kern_total_len = length; 5542 ctsio->kern_data_resid = 0; 5543 ctsio->kern_rel_offset = 0; 5544 ctsio->kern_sg_entries = 0; 5545 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5546 ctsio->be_move_done = ctl_config_move_done; 5547 ctl_datamove((union ctl_io *)ctsio); 5548 5549 return (CTL_RETVAL_COMPLETE); 5550 } 5551 5552 defect_list_len = 0; 5553 5554 if (cdb->byte2 & SF_FMTDATA) { 5555 if (cdb->byte2 & SF_LONGLIST) { 5556 struct scsi_format_header_long *header; 5557 5558 header = (struct scsi_format_header_long *) 5559 ctsio->kern_data_ptr; 5560 5561 defect_list_len = scsi_4btoul(header->defect_list_len); 5562 if (defect_list_len != 0) { 5563 ctl_set_invalid_field(ctsio, 5564 /*sks_valid*/ 1, 5565 /*command*/ 0, 5566 /*field*/ 2, 5567 /*bit_valid*/ 0, 5568 /*bit*/ 0); 5569 goto bailout; 5570 } 5571 } else { 5572 struct scsi_format_header_short *header; 5573 5574 header = (struct scsi_format_header_short *) 5575 ctsio->kern_data_ptr; 5576 5577 defect_list_len = scsi_2btoul(header->defect_list_len); 5578 if (defect_list_len != 0) { 5579 ctl_set_invalid_field(ctsio, 5580 /*sks_valid*/ 1, 5581 /*command*/ 0, 5582 /*field*/ 2, 5583 /*bit_valid*/ 0, 5584 /*bit*/ 0); 5585 goto bailout; 5586 } 5587 } 5588 } 5589 5590 /* 5591 * The format command will clear out the "Medium format corrupted" 5592 * status if set by the configuration code. That status is really 5593 * just a way to notify the host that we have lost the media, and 5594 * get them to issue a command that will basically make them think 5595 * they're blowing away the media. 5596 */ 5597 mtx_lock(&ctl_softc->ctl_lock); 5598 lun->flags &= ~CTL_LUN_INOPERABLE; 5599 mtx_unlock(&ctl_softc->ctl_lock); 5600 5601 ctsio->scsi_status = SCSI_STATUS_OK; 5602 ctsio->io_hdr.status = CTL_SUCCESS; 5603 bailout: 5604 5605 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5606 free(ctsio->kern_data_ptr, M_CTL); 5607 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5608 } 5609 5610 ctl_done((union ctl_io *)ctsio); 5611 return (CTL_RETVAL_COMPLETE); 5612 } 5613 5614 int 5615 ctl_write_buffer(struct ctl_scsiio *ctsio) 5616 { 5617 struct scsi_write_buffer *cdb; 5618 struct copan_page_header *header; 5619 struct ctl_lun *lun; 5620 struct ctl_softc *ctl_softc; 5621 int buffer_offset, len; 5622 int retval; 5623 5624 header = NULL; 5625 5626 retval = CTL_RETVAL_COMPLETE; 5627 5628 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5629 5630 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5631 ctl_softc = control_softc; 5632 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5633 5634 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5635 ctl_set_invalid_field(ctsio, 5636 /*sks_valid*/ 1, 5637 /*command*/ 1, 5638 /*field*/ 1, 5639 /*bit_valid*/ 1, 5640 /*bit*/ 4); 5641 ctl_done((union ctl_io *)ctsio); 5642 return (CTL_RETVAL_COMPLETE); 5643 } 5644 if (cdb->buffer_id != 0) { 5645 ctl_set_invalid_field(ctsio, 5646 /*sks_valid*/ 1, 5647 /*command*/ 1, 5648 /*field*/ 2, 5649 /*bit_valid*/ 0, 5650 /*bit*/ 0); 5651 ctl_done((union ctl_io *)ctsio); 5652 return (CTL_RETVAL_COMPLETE); 5653 } 5654 5655 len = scsi_3btoul(cdb->length); 5656 buffer_offset = scsi_3btoul(cdb->offset); 5657 5658 if (len > sizeof(lun->write_buffer)) { 5659 ctl_set_invalid_field(ctsio, 5660 /*sks_valid*/ 1, 5661 /*command*/ 1, 5662 /*field*/ 6, 5663 /*bit_valid*/ 0, 5664 /*bit*/ 0); 5665 ctl_done((union ctl_io *)ctsio); 5666 return (CTL_RETVAL_COMPLETE); 5667 } 5668 5669 if (buffer_offset != 0) { 5670 ctl_set_invalid_field(ctsio, 5671 /*sks_valid*/ 1, 5672 /*command*/ 1, 5673 /*field*/ 3, 5674 /*bit_valid*/ 0, 5675 /*bit*/ 0); 5676 ctl_done((union ctl_io *)ctsio); 5677 return (CTL_RETVAL_COMPLETE); 5678 } 5679 5680 /* 5681 * If we've got a kernel request that hasn't been malloced yet, 5682 * malloc it and tell the caller the data buffer is here. 5683 */ 5684 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5685 ctsio->kern_data_ptr = lun->write_buffer; 5686 ctsio->kern_data_len = len; 5687 ctsio->kern_total_len = len; 5688 ctsio->kern_data_resid = 0; 5689 ctsio->kern_rel_offset = 0; 5690 ctsio->kern_sg_entries = 0; 5691 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5692 ctsio->be_move_done = ctl_config_move_done; 5693 ctl_datamove((union ctl_io *)ctsio); 5694 5695 return (CTL_RETVAL_COMPLETE); 5696 } 5697 5698 ctl_done((union ctl_io *)ctsio); 5699 5700 return (CTL_RETVAL_COMPLETE); 5701 } 5702 5703 /* 5704 * Note that this function currently doesn't actually do anything inside 5705 * CTL to enforce things if the DQue bit is turned on. 5706 * 5707 * Also note that this function can't be used in the default case, because 5708 * the DQue bit isn't set in the changeable mask for the control mode page 5709 * anyway. This is just here as an example for how to implement a page 5710 * handler, and a placeholder in case we want to allow the user to turn 5711 * tagged queueing on and off. 5712 * 5713 * The D_SENSE bit handling is functional, however, and will turn 5714 * descriptor sense on and off for a given LUN. 5715 */ 5716 int 5717 ctl_control_page_handler(struct ctl_scsiio *ctsio, 5718 struct ctl_page_index *page_index, uint8_t *page_ptr) 5719 { 5720 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5721 struct ctl_lun *lun; 5722 struct ctl_softc *softc; 5723 int set_ua; 5724 uint32_t initidx; 5725 5726 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5727 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5728 set_ua = 0; 5729 5730 user_cp = (struct scsi_control_page *)page_ptr; 5731 current_cp = (struct scsi_control_page *) 5732 (page_index->page_data + (page_index->page_len * 5733 CTL_PAGE_CURRENT)); 5734 saved_cp = (struct scsi_control_page *) 5735 (page_index->page_data + (page_index->page_len * 5736 CTL_PAGE_SAVED)); 5737 5738 softc = control_softc; 5739 5740 mtx_lock(&softc->ctl_lock); 5741 if (((current_cp->rlec & SCP_DSENSE) == 0) 5742 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5743 /* 5744 * Descriptor sense is currently turned off and the user 5745 * wants to turn it on. 5746 */ 5747 current_cp->rlec |= SCP_DSENSE; 5748 saved_cp->rlec |= SCP_DSENSE; 5749 lun->flags |= CTL_LUN_SENSE_DESC; 5750 set_ua = 1; 5751 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5752 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5753 /* 5754 * Descriptor sense is currently turned on, and the user 5755 * wants to turn it off. 5756 */ 5757 current_cp->rlec &= ~SCP_DSENSE; 5758 saved_cp->rlec &= ~SCP_DSENSE; 5759 lun->flags &= ~CTL_LUN_SENSE_DESC; 5760 set_ua = 1; 5761 } 5762 if (current_cp->queue_flags & SCP_QUEUE_DQUE) { 5763 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 5764 #ifdef NEEDTOPORT 5765 csevent_log(CSC_CTL | CSC_SHELF_SW | 5766 CTL_UNTAG_TO_UNTAG, 5767 csevent_LogType_Trace, 5768 csevent_Severity_Information, 5769 csevent_AlertLevel_Green, 5770 csevent_FRU_Firmware, 5771 csevent_FRU_Unknown, 5772 "Received untagged to untagged transition"); 5773 #endif /* NEEDTOPORT */ 5774 } else { 5775 #ifdef NEEDTOPORT 5776 csevent_log(CSC_CTL | CSC_SHELF_SW | 5777 CTL_UNTAG_TO_TAG, 5778 csevent_LogType_ConfigChange, 5779 csevent_Severity_Information, 5780 csevent_AlertLevel_Green, 5781 csevent_FRU_Firmware, 5782 csevent_FRU_Unknown, 5783 "Received untagged to tagged " 5784 "queueing transition"); 5785 #endif /* NEEDTOPORT */ 5786 5787 current_cp->queue_flags &= ~SCP_QUEUE_DQUE; 5788 saved_cp->queue_flags &= ~SCP_QUEUE_DQUE; 5789 set_ua = 1; 5790 } 5791 } else { 5792 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 5793 #ifdef NEEDTOPORT 5794 csevent_log(CSC_CTL | CSC_SHELF_SW | 5795 CTL_TAG_TO_UNTAG, 5796 csevent_LogType_ConfigChange, 5797 csevent_Severity_Warning, 5798 csevent_AlertLevel_Yellow, 5799 csevent_FRU_Firmware, 5800 csevent_FRU_Unknown, 5801 "Received tagged queueing to untagged " 5802 "transition"); 5803 #endif /* NEEDTOPORT */ 5804 5805 current_cp->queue_flags |= SCP_QUEUE_DQUE; 5806 saved_cp->queue_flags |= SCP_QUEUE_DQUE; 5807 set_ua = 1; 5808 } else { 5809 #ifdef NEEDTOPORT 5810 csevent_log(CSC_CTL | CSC_SHELF_SW | 5811 CTL_TAG_TO_TAG, 5812 csevent_LogType_Trace, 5813 csevent_Severity_Information, 5814 csevent_AlertLevel_Green, 5815 csevent_FRU_Firmware, 5816 csevent_FRU_Unknown, 5817 "Received tagged queueing to tagged " 5818 "queueing transition"); 5819 #endif /* NEEDTOPORT */ 5820 } 5821 } 5822 if (set_ua != 0) { 5823 int i; 5824 /* 5825 * Let other initiators know that the mode 5826 * parameters for this LUN have changed. 5827 */ 5828 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 5829 if (i == initidx) 5830 continue; 5831 5832 lun->pending_sense[i].ua_pending |= 5833 CTL_UA_MODE_CHANGE; 5834 } 5835 } 5836 mtx_unlock(&softc->ctl_lock); 5837 5838 return (0); 5839 } 5840 5841 int 5842 ctl_power_sp_handler(struct ctl_scsiio *ctsio, 5843 struct ctl_page_index *page_index, uint8_t *page_ptr) 5844 { 5845 return (0); 5846 } 5847 5848 int 5849 ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio, 5850 struct ctl_page_index *page_index, int pc) 5851 { 5852 struct copan_power_subpage *page; 5853 5854 page = (struct copan_power_subpage *)page_index->page_data + 5855 (page_index->page_len * pc); 5856 5857 switch (pc) { 5858 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5859 /* 5860 * We don't update the changable bits for this page. 5861 */ 5862 break; 5863 case SMS_PAGE_CTRL_CURRENT >> 6: 5864 case SMS_PAGE_CTRL_DEFAULT >> 6: 5865 case SMS_PAGE_CTRL_SAVED >> 6: 5866 #ifdef NEEDTOPORT 5867 ctl_update_power_subpage(page); 5868 #endif 5869 break; 5870 default: 5871 #ifdef NEEDTOPORT 5872 EPRINT(0, "Invalid PC %d!!", pc); 5873 #endif 5874 break; 5875 } 5876 return (0); 5877 } 5878 5879 5880 int 5881 ctl_aps_sp_handler(struct ctl_scsiio *ctsio, 5882 struct ctl_page_index *page_index, uint8_t *page_ptr) 5883 { 5884 struct copan_aps_subpage *user_sp; 5885 struct copan_aps_subpage *current_sp; 5886 union ctl_modepage_info *modepage_info; 5887 struct ctl_softc *softc; 5888 struct ctl_lun *lun; 5889 int retval; 5890 5891 retval = CTL_RETVAL_COMPLETE; 5892 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 5893 (page_index->page_len * CTL_PAGE_CURRENT)); 5894 softc = control_softc; 5895 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5896 5897 user_sp = (struct copan_aps_subpage *)page_ptr; 5898 5899 modepage_info = (union ctl_modepage_info *) 5900 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5901 5902 modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK; 5903 modepage_info->header.subpage = page_index->subpage; 5904 modepage_info->aps.lock_active = user_sp->lock_active; 5905 5906 mtx_lock(&softc->ctl_lock); 5907 5908 /* 5909 * If there is a request to lock the LUN and another LUN is locked 5910 * this is an error. If the requested LUN is already locked ignore 5911 * the request. If no LUN is locked attempt to lock it. 5912 * if there is a request to unlock the LUN and the LUN is currently 5913 * locked attempt to unlock it. Otherwise ignore the request. i.e. 5914 * if another LUN is locked or no LUN is locked. 5915 */ 5916 if (user_sp->lock_active & APS_LOCK_ACTIVE) { 5917 if (softc->aps_locked_lun == lun->lun) { 5918 /* 5919 * This LUN is already locked, so we're done. 5920 */ 5921 retval = CTL_RETVAL_COMPLETE; 5922 } else if (softc->aps_locked_lun == 0) { 5923 /* 5924 * No one has the lock, pass the request to the 5925 * backend. 5926 */ 5927 retval = lun->backend->config_write( 5928 (union ctl_io *)ctsio); 5929 } else { 5930 /* 5931 * Someone else has the lock, throw out the request. 5932 */ 5933 ctl_set_already_locked(ctsio); 5934 free(ctsio->kern_data_ptr, M_CTL); 5935 ctl_done((union ctl_io *)ctsio); 5936 5937 /* 5938 * Set the return value so that ctl_do_mode_select() 5939 * won't try to complete the command. We already 5940 * completed it here. 5941 */ 5942 retval = CTL_RETVAL_ERROR; 5943 } 5944 } else if (softc->aps_locked_lun == lun->lun) { 5945 /* 5946 * This LUN is locked, so pass the unlock request to the 5947 * backend. 5948 */ 5949 retval = lun->backend->config_write((union ctl_io *)ctsio); 5950 } 5951 mtx_unlock(&softc->ctl_lock); 5952 5953 return (retval); 5954 } 5955 5956 int 5957 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5958 struct ctl_page_index *page_index, 5959 uint8_t *page_ptr) 5960 { 5961 uint8_t *c; 5962 int i; 5963 5964 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5965 ctl_time_io_secs = 5966 (c[0] << 8) | 5967 (c[1] << 0) | 5968 0; 5969 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5970 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5971 printf("page data:"); 5972 for (i=0; i<8; i++) 5973 printf(" %.2x",page_ptr[i]); 5974 printf("\n"); 5975 return (0); 5976 } 5977 5978 int 5979 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5980 struct ctl_page_index *page_index, 5981 int pc) 5982 { 5983 struct copan_debugconf_subpage *page; 5984 5985 page = (struct copan_debugconf_subpage *)page_index->page_data + 5986 (page_index->page_len * pc); 5987 5988 switch (pc) { 5989 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5990 case SMS_PAGE_CTRL_DEFAULT >> 6: 5991 case SMS_PAGE_CTRL_SAVED >> 6: 5992 /* 5993 * We don't update the changable or default bits for this page. 5994 */ 5995 break; 5996 case SMS_PAGE_CTRL_CURRENT >> 6: 5997 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5998 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5999 break; 6000 default: 6001 #ifdef NEEDTOPORT 6002 EPRINT(0, "Invalid PC %d!!", pc); 6003 #endif /* NEEDTOPORT */ 6004 break; 6005 } 6006 return (0); 6007 } 6008 6009 6010 static int 6011 ctl_do_mode_select(union ctl_io *io) 6012 { 6013 struct scsi_mode_page_header *page_header; 6014 struct ctl_page_index *page_index; 6015 struct ctl_scsiio *ctsio; 6016 int control_dev, page_len; 6017 int page_len_offset, page_len_size; 6018 union ctl_modepage_info *modepage_info; 6019 struct ctl_lun *lun; 6020 int *len_left, *len_used; 6021 int retval, i; 6022 6023 ctsio = &io->scsiio; 6024 page_index = NULL; 6025 page_len = 0; 6026 retval = CTL_RETVAL_COMPLETE; 6027 6028 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6029 6030 if (lun->be_lun->lun_type != T_DIRECT) 6031 control_dev = 1; 6032 else 6033 control_dev = 0; 6034 6035 modepage_info = (union ctl_modepage_info *) 6036 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6037 len_left = &modepage_info->header.len_left; 6038 len_used = &modepage_info->header.len_used; 6039 6040 do_next_page: 6041 6042 page_header = (struct scsi_mode_page_header *) 6043 (ctsio->kern_data_ptr + *len_used); 6044 6045 if (*len_left == 0) { 6046 free(ctsio->kern_data_ptr, M_CTL); 6047 ctl_set_success(ctsio); 6048 ctl_done((union ctl_io *)ctsio); 6049 return (CTL_RETVAL_COMPLETE); 6050 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6051 6052 free(ctsio->kern_data_ptr, M_CTL); 6053 ctl_set_param_len_error(ctsio); 6054 ctl_done((union ctl_io *)ctsio); 6055 return (CTL_RETVAL_COMPLETE); 6056 6057 } else if ((page_header->page_code & SMPH_SPF) 6058 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6059 6060 free(ctsio->kern_data_ptr, M_CTL); 6061 ctl_set_param_len_error(ctsio); 6062 ctl_done((union ctl_io *)ctsio); 6063 return (CTL_RETVAL_COMPLETE); 6064 } 6065 6066 6067 /* 6068 * XXX KDM should we do something with the block descriptor? 6069 */ 6070 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6071 6072 if ((control_dev != 0) 6073 && (lun->mode_pages.index[i].page_flags & 6074 CTL_PAGE_FLAG_DISK_ONLY)) 6075 continue; 6076 6077 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6078 (page_header->page_code & SMPH_PC_MASK)) 6079 continue; 6080 6081 /* 6082 * If neither page has a subpage code, then we've got a 6083 * match. 6084 */ 6085 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6086 && ((page_header->page_code & SMPH_SPF) == 0)) { 6087 page_index = &lun->mode_pages.index[i]; 6088 page_len = page_header->page_length; 6089 break; 6090 } 6091 6092 /* 6093 * If both pages have subpages, then the subpage numbers 6094 * have to match. 6095 */ 6096 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6097 && (page_header->page_code & SMPH_SPF)) { 6098 struct scsi_mode_page_header_sp *sph; 6099 6100 sph = (struct scsi_mode_page_header_sp *)page_header; 6101 6102 if (lun->mode_pages.index[i].subpage == 6103 sph->subpage) { 6104 page_index = &lun->mode_pages.index[i]; 6105 page_len = scsi_2btoul(sph->page_length); 6106 break; 6107 } 6108 } 6109 } 6110 6111 /* 6112 * If we couldn't find the page, or if we don't have a mode select 6113 * handler for it, send back an error to the user. 6114 */ 6115 if ((page_index == NULL) 6116 || (page_index->select_handler == NULL)) { 6117 ctl_set_invalid_field(ctsio, 6118 /*sks_valid*/ 1, 6119 /*command*/ 0, 6120 /*field*/ *len_used, 6121 /*bit_valid*/ 0, 6122 /*bit*/ 0); 6123 free(ctsio->kern_data_ptr, M_CTL); 6124 ctl_done((union ctl_io *)ctsio); 6125 return (CTL_RETVAL_COMPLETE); 6126 } 6127 6128 if (page_index->page_code & SMPH_SPF) { 6129 page_len_offset = 2; 6130 page_len_size = 2; 6131 } else { 6132 page_len_size = 1; 6133 page_len_offset = 1; 6134 } 6135 6136 /* 6137 * If the length the initiator gives us isn't the one we specify in 6138 * the mode page header, or if they didn't specify enough data in 6139 * the CDB to avoid truncating this page, kick out the request. 6140 */ 6141 if ((page_len != (page_index->page_len - page_len_offset - 6142 page_len_size)) 6143 || (*len_left < page_index->page_len)) { 6144 6145 6146 ctl_set_invalid_field(ctsio, 6147 /*sks_valid*/ 1, 6148 /*command*/ 0, 6149 /*field*/ *len_used + page_len_offset, 6150 /*bit_valid*/ 0, 6151 /*bit*/ 0); 6152 free(ctsio->kern_data_ptr, M_CTL); 6153 ctl_done((union ctl_io *)ctsio); 6154 return (CTL_RETVAL_COMPLETE); 6155 } 6156 6157 /* 6158 * Run through the mode page, checking to make sure that the bits 6159 * the user changed are actually legal for him to change. 6160 */ 6161 for (i = 0; i < page_index->page_len; i++) { 6162 uint8_t *user_byte, *change_mask, *current_byte; 6163 int bad_bit; 6164 int j; 6165 6166 user_byte = (uint8_t *)page_header + i; 6167 change_mask = page_index->page_data + 6168 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6169 current_byte = page_index->page_data + 6170 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6171 6172 /* 6173 * Check to see whether the user set any bits in this byte 6174 * that he is not allowed to set. 6175 */ 6176 if ((*user_byte & ~(*change_mask)) == 6177 (*current_byte & ~(*change_mask))) 6178 continue; 6179 6180 /* 6181 * Go through bit by bit to determine which one is illegal. 6182 */ 6183 bad_bit = 0; 6184 for (j = 7; j >= 0; j--) { 6185 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6186 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6187 bad_bit = i; 6188 break; 6189 } 6190 } 6191 ctl_set_invalid_field(ctsio, 6192 /*sks_valid*/ 1, 6193 /*command*/ 0, 6194 /*field*/ *len_used + i, 6195 /*bit_valid*/ 1, 6196 /*bit*/ bad_bit); 6197 free(ctsio->kern_data_ptr, M_CTL); 6198 ctl_done((union ctl_io *)ctsio); 6199 return (CTL_RETVAL_COMPLETE); 6200 } 6201 6202 /* 6203 * Decrement these before we call the page handler, since we may 6204 * end up getting called back one way or another before the handler 6205 * returns to this context. 6206 */ 6207 *len_left -= page_index->page_len; 6208 *len_used += page_index->page_len; 6209 6210 retval = page_index->select_handler(ctsio, page_index, 6211 (uint8_t *)page_header); 6212 6213 /* 6214 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6215 * wait until this queued command completes to finish processing 6216 * the mode page. If it returns anything other than 6217 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6218 * already set the sense information, freed the data pointer, and 6219 * completed the io for us. 6220 */ 6221 if (retval != CTL_RETVAL_COMPLETE) 6222 goto bailout_no_done; 6223 6224 /* 6225 * If the initiator sent us more than one page, parse the next one. 6226 */ 6227 if (*len_left > 0) 6228 goto do_next_page; 6229 6230 ctl_set_success(ctsio); 6231 free(ctsio->kern_data_ptr, M_CTL); 6232 ctl_done((union ctl_io *)ctsio); 6233 6234 bailout_no_done: 6235 6236 return (CTL_RETVAL_COMPLETE); 6237 6238 } 6239 6240 int 6241 ctl_mode_select(struct ctl_scsiio *ctsio) 6242 { 6243 int param_len, pf, sp; 6244 int header_size, bd_len; 6245 int len_left, len_used; 6246 struct ctl_page_index *page_index; 6247 struct ctl_lun *lun; 6248 int control_dev, page_len; 6249 union ctl_modepage_info *modepage_info; 6250 int retval; 6251 6252 pf = 0; 6253 sp = 0; 6254 page_len = 0; 6255 len_used = 0; 6256 len_left = 0; 6257 retval = 0; 6258 bd_len = 0; 6259 page_index = NULL; 6260 6261 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6262 6263 if (lun->be_lun->lun_type != T_DIRECT) 6264 control_dev = 1; 6265 else 6266 control_dev = 0; 6267 6268 switch (ctsio->cdb[0]) { 6269 case MODE_SELECT_6: { 6270 struct scsi_mode_select_6 *cdb; 6271 6272 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6273 6274 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6275 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6276 6277 param_len = cdb->length; 6278 header_size = sizeof(struct scsi_mode_header_6); 6279 break; 6280 } 6281 case MODE_SELECT_10: { 6282 struct scsi_mode_select_10 *cdb; 6283 6284 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6285 6286 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6287 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6288 6289 param_len = scsi_2btoul(cdb->length); 6290 header_size = sizeof(struct scsi_mode_header_10); 6291 break; 6292 } 6293 default: 6294 ctl_set_invalid_opcode(ctsio); 6295 ctl_done((union ctl_io *)ctsio); 6296 return (CTL_RETVAL_COMPLETE); 6297 break; /* NOTREACHED */ 6298 } 6299 6300 /* 6301 * From SPC-3: 6302 * "A parameter list length of zero indicates that the Data-Out Buffer 6303 * shall be empty. This condition shall not be considered as an error." 6304 */ 6305 if (param_len == 0) { 6306 ctl_set_success(ctsio); 6307 ctl_done((union ctl_io *)ctsio); 6308 return (CTL_RETVAL_COMPLETE); 6309 } 6310 6311 /* 6312 * Since we'll hit this the first time through, prior to 6313 * allocation, we don't need to free a data buffer here. 6314 */ 6315 if (param_len < header_size) { 6316 ctl_set_param_len_error(ctsio); 6317 ctl_done((union ctl_io *)ctsio); 6318 return (CTL_RETVAL_COMPLETE); 6319 } 6320 6321 /* 6322 * Allocate the data buffer and grab the user's data. In theory, 6323 * we shouldn't have to sanity check the parameter list length here 6324 * because the maximum size is 64K. We should be able to malloc 6325 * that much without too many problems. 6326 */ 6327 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6328 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6329 ctsio->kern_data_len = param_len; 6330 ctsio->kern_total_len = param_len; 6331 ctsio->kern_data_resid = 0; 6332 ctsio->kern_rel_offset = 0; 6333 ctsio->kern_sg_entries = 0; 6334 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6335 ctsio->be_move_done = ctl_config_move_done; 6336 ctl_datamove((union ctl_io *)ctsio); 6337 6338 return (CTL_RETVAL_COMPLETE); 6339 } 6340 6341 switch (ctsio->cdb[0]) { 6342 case MODE_SELECT_6: { 6343 struct scsi_mode_header_6 *mh6; 6344 6345 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6346 bd_len = mh6->blk_desc_len; 6347 break; 6348 } 6349 case MODE_SELECT_10: { 6350 struct scsi_mode_header_10 *mh10; 6351 6352 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6353 bd_len = scsi_2btoul(mh10->blk_desc_len); 6354 break; 6355 } 6356 default: 6357 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6358 break; 6359 } 6360 6361 if (param_len < (header_size + bd_len)) { 6362 free(ctsio->kern_data_ptr, M_CTL); 6363 ctl_set_param_len_error(ctsio); 6364 ctl_done((union ctl_io *)ctsio); 6365 return (CTL_RETVAL_COMPLETE); 6366 } 6367 6368 /* 6369 * Set the IO_CONT flag, so that if this I/O gets passed to 6370 * ctl_config_write_done(), it'll get passed back to 6371 * ctl_do_mode_select() for further processing, or completion if 6372 * we're all done. 6373 */ 6374 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6375 ctsio->io_cont = ctl_do_mode_select; 6376 6377 modepage_info = (union ctl_modepage_info *) 6378 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6379 6380 memset(modepage_info, 0, sizeof(*modepage_info)); 6381 6382 len_left = param_len - header_size - bd_len; 6383 len_used = header_size + bd_len; 6384 6385 modepage_info->header.len_left = len_left; 6386 modepage_info->header.len_used = len_used; 6387 6388 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6389 } 6390 6391 int 6392 ctl_mode_sense(struct ctl_scsiio *ctsio) 6393 { 6394 struct ctl_lun *lun; 6395 int pc, page_code, dbd, llba, subpage; 6396 int alloc_len, page_len, header_len, total_len; 6397 struct scsi_mode_block_descr *block_desc; 6398 struct ctl_page_index *page_index; 6399 int control_dev; 6400 6401 dbd = 0; 6402 llba = 0; 6403 block_desc = NULL; 6404 page_index = NULL; 6405 6406 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6407 6408 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6409 6410 if (lun->be_lun->lun_type != T_DIRECT) 6411 control_dev = 1; 6412 else 6413 control_dev = 0; 6414 6415 switch (ctsio->cdb[0]) { 6416 case MODE_SENSE_6: { 6417 struct scsi_mode_sense_6 *cdb; 6418 6419 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6420 6421 header_len = sizeof(struct scsi_mode_hdr_6); 6422 if (cdb->byte2 & SMS_DBD) 6423 dbd = 1; 6424 else 6425 header_len += sizeof(struct scsi_mode_block_descr); 6426 6427 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6428 page_code = cdb->page & SMS_PAGE_CODE; 6429 subpage = cdb->subpage; 6430 alloc_len = cdb->length; 6431 break; 6432 } 6433 case MODE_SENSE_10: { 6434 struct scsi_mode_sense_10 *cdb; 6435 6436 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6437 6438 header_len = sizeof(struct scsi_mode_hdr_10); 6439 6440 if (cdb->byte2 & SMS_DBD) 6441 dbd = 1; 6442 else 6443 header_len += sizeof(struct scsi_mode_block_descr); 6444 if (cdb->byte2 & SMS10_LLBAA) 6445 llba = 1; 6446 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6447 page_code = cdb->page & SMS_PAGE_CODE; 6448 subpage = cdb->subpage; 6449 alloc_len = scsi_2btoul(cdb->length); 6450 break; 6451 } 6452 default: 6453 ctl_set_invalid_opcode(ctsio); 6454 ctl_done((union ctl_io *)ctsio); 6455 return (CTL_RETVAL_COMPLETE); 6456 break; /* NOTREACHED */ 6457 } 6458 6459 /* 6460 * We have to make a first pass through to calculate the size of 6461 * the pages that match the user's query. Then we allocate enough 6462 * memory to hold it, and actually copy the data into the buffer. 6463 */ 6464 switch (page_code) { 6465 case SMS_ALL_PAGES_PAGE: { 6466 int i; 6467 6468 page_len = 0; 6469 6470 /* 6471 * At the moment, values other than 0 and 0xff here are 6472 * reserved according to SPC-3. 6473 */ 6474 if ((subpage != SMS_SUBPAGE_PAGE_0) 6475 && (subpage != SMS_SUBPAGE_ALL)) { 6476 ctl_set_invalid_field(ctsio, 6477 /*sks_valid*/ 1, 6478 /*command*/ 1, 6479 /*field*/ 3, 6480 /*bit_valid*/ 0, 6481 /*bit*/ 0); 6482 ctl_done((union ctl_io *)ctsio); 6483 return (CTL_RETVAL_COMPLETE); 6484 } 6485 6486 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6487 if ((control_dev != 0) 6488 && (lun->mode_pages.index[i].page_flags & 6489 CTL_PAGE_FLAG_DISK_ONLY)) 6490 continue; 6491 6492 /* 6493 * We don't use this subpage if the user didn't 6494 * request all subpages. 6495 */ 6496 if ((lun->mode_pages.index[i].subpage != 0) 6497 && (subpage == SMS_SUBPAGE_PAGE_0)) 6498 continue; 6499 6500 #if 0 6501 printf("found page %#x len %d\n", 6502 lun->mode_pages.index[i].page_code & 6503 SMPH_PC_MASK, 6504 lun->mode_pages.index[i].page_len); 6505 #endif 6506 page_len += lun->mode_pages.index[i].page_len; 6507 } 6508 break; 6509 } 6510 default: { 6511 int i; 6512 6513 page_len = 0; 6514 6515 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6516 /* Look for the right page code */ 6517 if ((lun->mode_pages.index[i].page_code & 6518 SMPH_PC_MASK) != page_code) 6519 continue; 6520 6521 /* Look for the right subpage or the subpage wildcard*/ 6522 if ((lun->mode_pages.index[i].subpage != subpage) 6523 && (subpage != SMS_SUBPAGE_ALL)) 6524 continue; 6525 6526 /* Make sure the page is supported for this dev type */ 6527 if ((control_dev != 0) 6528 && (lun->mode_pages.index[i].page_flags & 6529 CTL_PAGE_FLAG_DISK_ONLY)) 6530 continue; 6531 6532 #if 0 6533 printf("found page %#x len %d\n", 6534 lun->mode_pages.index[i].page_code & 6535 SMPH_PC_MASK, 6536 lun->mode_pages.index[i].page_len); 6537 #endif 6538 6539 page_len += lun->mode_pages.index[i].page_len; 6540 } 6541 6542 if (page_len == 0) { 6543 ctl_set_invalid_field(ctsio, 6544 /*sks_valid*/ 1, 6545 /*command*/ 1, 6546 /*field*/ 2, 6547 /*bit_valid*/ 1, 6548 /*bit*/ 5); 6549 ctl_done((union ctl_io *)ctsio); 6550 return (CTL_RETVAL_COMPLETE); 6551 } 6552 break; 6553 } 6554 } 6555 6556 total_len = header_len + page_len; 6557 #if 0 6558 printf("header_len = %d, page_len = %d, total_len = %d\n", 6559 header_len, page_len, total_len); 6560 #endif 6561 6562 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6563 ctsio->kern_sg_entries = 0; 6564 ctsio->kern_data_resid = 0; 6565 ctsio->kern_rel_offset = 0; 6566 if (total_len < alloc_len) { 6567 ctsio->residual = alloc_len - total_len; 6568 ctsio->kern_data_len = total_len; 6569 ctsio->kern_total_len = total_len; 6570 } else { 6571 ctsio->residual = 0; 6572 ctsio->kern_data_len = alloc_len; 6573 ctsio->kern_total_len = alloc_len; 6574 } 6575 6576 switch (ctsio->cdb[0]) { 6577 case MODE_SENSE_6: { 6578 struct scsi_mode_hdr_6 *header; 6579 6580 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6581 6582 header->datalen = ctl_min(total_len - 1, 254); 6583 6584 if (dbd) 6585 header->block_descr_len = 0; 6586 else 6587 header->block_descr_len = 6588 sizeof(struct scsi_mode_block_descr); 6589 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6590 break; 6591 } 6592 case MODE_SENSE_10: { 6593 struct scsi_mode_hdr_10 *header; 6594 int datalen; 6595 6596 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6597 6598 datalen = ctl_min(total_len - 2, 65533); 6599 scsi_ulto2b(datalen, header->datalen); 6600 if (dbd) 6601 scsi_ulto2b(0, header->block_descr_len); 6602 else 6603 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6604 header->block_descr_len); 6605 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6606 break; 6607 } 6608 default: 6609 panic("invalid CDB type %#x", ctsio->cdb[0]); 6610 break; /* NOTREACHED */ 6611 } 6612 6613 /* 6614 * If we've got a disk, use its blocksize in the block 6615 * descriptor. Otherwise, just set it to 0. 6616 */ 6617 if (dbd == 0) { 6618 if (control_dev != 0) 6619 scsi_ulto3b(lun->be_lun->blocksize, 6620 block_desc->block_len); 6621 else 6622 scsi_ulto3b(0, block_desc->block_len); 6623 } 6624 6625 switch (page_code) { 6626 case SMS_ALL_PAGES_PAGE: { 6627 int i, data_used; 6628 6629 data_used = header_len; 6630 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6631 struct ctl_page_index *page_index; 6632 6633 page_index = &lun->mode_pages.index[i]; 6634 6635 if ((control_dev != 0) 6636 && (page_index->page_flags & 6637 CTL_PAGE_FLAG_DISK_ONLY)) 6638 continue; 6639 6640 /* 6641 * We don't use this subpage if the user didn't 6642 * request all subpages. We already checked (above) 6643 * to make sure the user only specified a subpage 6644 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6645 */ 6646 if ((page_index->subpage != 0) 6647 && (subpage == SMS_SUBPAGE_PAGE_0)) 6648 continue; 6649 6650 /* 6651 * Call the handler, if it exists, to update the 6652 * page to the latest values. 6653 */ 6654 if (page_index->sense_handler != NULL) 6655 page_index->sense_handler(ctsio, page_index,pc); 6656 6657 memcpy(ctsio->kern_data_ptr + data_used, 6658 page_index->page_data + 6659 (page_index->page_len * pc), 6660 page_index->page_len); 6661 data_used += page_index->page_len; 6662 } 6663 break; 6664 } 6665 default: { 6666 int i, data_used; 6667 6668 data_used = header_len; 6669 6670 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6671 struct ctl_page_index *page_index; 6672 6673 page_index = &lun->mode_pages.index[i]; 6674 6675 /* Look for the right page code */ 6676 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6677 continue; 6678 6679 /* Look for the right subpage or the subpage wildcard*/ 6680 if ((page_index->subpage != subpage) 6681 && (subpage != SMS_SUBPAGE_ALL)) 6682 continue; 6683 6684 /* Make sure the page is supported for this dev type */ 6685 if ((control_dev != 0) 6686 && (page_index->page_flags & 6687 CTL_PAGE_FLAG_DISK_ONLY)) 6688 continue; 6689 6690 /* 6691 * Call the handler, if it exists, to update the 6692 * page to the latest values. 6693 */ 6694 if (page_index->sense_handler != NULL) 6695 page_index->sense_handler(ctsio, page_index,pc); 6696 6697 memcpy(ctsio->kern_data_ptr + data_used, 6698 page_index->page_data + 6699 (page_index->page_len * pc), 6700 page_index->page_len); 6701 data_used += page_index->page_len; 6702 } 6703 break; 6704 } 6705 } 6706 6707 ctsio->scsi_status = SCSI_STATUS_OK; 6708 6709 ctsio->be_move_done = ctl_config_move_done; 6710 ctl_datamove((union ctl_io *)ctsio); 6711 6712 return (CTL_RETVAL_COMPLETE); 6713 } 6714 6715 int 6716 ctl_read_capacity(struct ctl_scsiio *ctsio) 6717 { 6718 struct scsi_read_capacity *cdb; 6719 struct scsi_read_capacity_data *data; 6720 struct ctl_lun *lun; 6721 uint32_t lba; 6722 6723 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6724 6725 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6726 6727 lba = scsi_4btoul(cdb->addr); 6728 if (((cdb->pmi & SRC_PMI) == 0) 6729 && (lba != 0)) { 6730 ctl_set_invalid_field(/*ctsio*/ ctsio, 6731 /*sks_valid*/ 1, 6732 /*command*/ 1, 6733 /*field*/ 2, 6734 /*bit_valid*/ 0, 6735 /*bit*/ 0); 6736 ctl_done((union ctl_io *)ctsio); 6737 return (CTL_RETVAL_COMPLETE); 6738 } 6739 6740 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6741 6742 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6743 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6744 ctsio->residual = 0; 6745 ctsio->kern_data_len = sizeof(*data); 6746 ctsio->kern_total_len = sizeof(*data); 6747 ctsio->kern_data_resid = 0; 6748 ctsio->kern_rel_offset = 0; 6749 ctsio->kern_sg_entries = 0; 6750 6751 /* 6752 * If the maximum LBA is greater than 0xfffffffe, the user must 6753 * issue a SERVICE ACTION IN (16) command, with the read capacity 6754 * serivce action set. 6755 */ 6756 if (lun->be_lun->maxlba > 0xfffffffe) 6757 scsi_ulto4b(0xffffffff, data->addr); 6758 else 6759 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6760 6761 /* 6762 * XXX KDM this may not be 512 bytes... 6763 */ 6764 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6765 6766 ctsio->scsi_status = SCSI_STATUS_OK; 6767 6768 ctsio->be_move_done = ctl_config_move_done; 6769 ctl_datamove((union ctl_io *)ctsio); 6770 6771 return (CTL_RETVAL_COMPLETE); 6772 } 6773 6774 static int 6775 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6776 { 6777 struct scsi_read_capacity_16 *cdb; 6778 struct scsi_read_capacity_data_long *data; 6779 struct ctl_lun *lun; 6780 uint64_t lba; 6781 uint32_t alloc_len; 6782 6783 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6784 6785 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6786 6787 alloc_len = scsi_4btoul(cdb->alloc_len); 6788 lba = scsi_8btou64(cdb->addr); 6789 6790 if ((cdb->reladr & SRC16_PMI) 6791 && (lba != 0)) { 6792 ctl_set_invalid_field(/*ctsio*/ ctsio, 6793 /*sks_valid*/ 1, 6794 /*command*/ 1, 6795 /*field*/ 2, 6796 /*bit_valid*/ 0, 6797 /*bit*/ 0); 6798 ctl_done((union ctl_io *)ctsio); 6799 return (CTL_RETVAL_COMPLETE); 6800 } 6801 6802 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6803 6804 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6805 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6806 6807 if (sizeof(*data) < alloc_len) { 6808 ctsio->residual = alloc_len - sizeof(*data); 6809 ctsio->kern_data_len = sizeof(*data); 6810 ctsio->kern_total_len = sizeof(*data); 6811 } else { 6812 ctsio->residual = 0; 6813 ctsio->kern_data_len = alloc_len; 6814 ctsio->kern_total_len = alloc_len; 6815 } 6816 ctsio->kern_data_resid = 0; 6817 ctsio->kern_rel_offset = 0; 6818 ctsio->kern_sg_entries = 0; 6819 6820 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6821 /* XXX KDM this may not be 512 bytes... */ 6822 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6823 6824 ctsio->scsi_status = SCSI_STATUS_OK; 6825 6826 ctsio->be_move_done = ctl_config_move_done; 6827 ctl_datamove((union ctl_io *)ctsio); 6828 6829 return (CTL_RETVAL_COMPLETE); 6830 } 6831 6832 int 6833 ctl_service_action_in(struct ctl_scsiio *ctsio) 6834 { 6835 struct scsi_service_action_in *cdb; 6836 int retval; 6837 6838 CTL_DEBUG_PRINT(("ctl_service_action_in\n")); 6839 6840 cdb = (struct scsi_service_action_in *)ctsio->cdb; 6841 6842 retval = CTL_RETVAL_COMPLETE; 6843 6844 switch (cdb->service_action) { 6845 case SRC16_SERVICE_ACTION: 6846 retval = ctl_read_capacity_16(ctsio); 6847 break; 6848 default: 6849 ctl_set_invalid_field(/*ctsio*/ ctsio, 6850 /*sks_valid*/ 1, 6851 /*command*/ 1, 6852 /*field*/ 1, 6853 /*bit_valid*/ 1, 6854 /*bit*/ 4); 6855 ctl_done((union ctl_io *)ctsio); 6856 break; 6857 } 6858 6859 return (retval); 6860 } 6861 6862 int 6863 ctl_maintenance_in(struct ctl_scsiio *ctsio) 6864 { 6865 struct scsi_maintenance_in *cdb; 6866 int retval; 6867 int alloc_len, total_len = 0; 6868 int num_target_port_groups; 6869 struct ctl_lun *lun; 6870 struct ctl_softc *softc; 6871 struct scsi_target_group_data *rtg_ptr; 6872 struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2; 6873 struct scsi_target_port_descriptor *tp_desc_ptr1_1, *tp_desc_ptr1_2, 6874 *tp_desc_ptr2_1, *tp_desc_ptr2_2; 6875 6876 CTL_DEBUG_PRINT(("ctl_maintenance_in\n")); 6877 6878 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 6879 softc = control_softc; 6880 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6881 6882 retval = CTL_RETVAL_COMPLETE; 6883 mtx_lock(&softc->ctl_lock); 6884 6885 if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) { 6886 ctl_set_invalid_field(/*ctsio*/ ctsio, 6887 /*sks_valid*/ 1, 6888 /*command*/ 1, 6889 /*field*/ 1, 6890 /*bit_valid*/ 1, 6891 /*bit*/ 4); 6892 ctl_done((union ctl_io *)ctsio); 6893 return(retval); 6894 } 6895 6896 if (ctl_is_single) 6897 num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1; 6898 else 6899 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 6900 6901 total_len = sizeof(struct scsi_target_group_data) + 6902 sizeof(struct scsi_target_port_group_descriptor) * 6903 num_target_port_groups + 6904 sizeof(struct scsi_target_port_descriptor) * 6905 NUM_PORTS_PER_GRP * num_target_port_groups; 6906 6907 alloc_len = scsi_4btoul(cdb->length); 6908 6909 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6910 6911 ctsio->kern_sg_entries = 0; 6912 6913 if (total_len < alloc_len) { 6914 ctsio->residual = alloc_len - total_len; 6915 ctsio->kern_data_len = total_len; 6916 ctsio->kern_total_len = total_len; 6917 } else { 6918 ctsio->residual = 0; 6919 ctsio->kern_data_len = alloc_len; 6920 ctsio->kern_total_len = alloc_len; 6921 } 6922 ctsio->kern_data_resid = 0; 6923 ctsio->kern_rel_offset = 0; 6924 6925 rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr; 6926 6927 tpg_desc_ptr1 = &rtg_ptr->groups[0]; 6928 tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0]; 6929 tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *) 6930 &tp_desc_ptr1_1->desc_list[0]; 6931 6932 6933 6934 if (ctl_is_single == 0) { 6935 tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *) 6936 &tp_desc_ptr1_2->desc_list[0]; 6937 tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0]; 6938 tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *) 6939 &tp_desc_ptr2_1->desc_list[0]; 6940 } else { 6941 tpg_desc_ptr2 = NULL; 6942 tp_desc_ptr2_1 = NULL; 6943 tp_desc_ptr2_2 = NULL; 6944 } 6945 6946 scsi_ulto4b(total_len - 4, rtg_ptr->length); 6947 if (ctl_is_single == 0) { 6948 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 6949 if (lun->flags & CTL_LUN_PRIMARY_SC) { 6950 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 6951 tpg_desc_ptr2->pref_state = 6952 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6953 } else { 6954 tpg_desc_ptr1->pref_state = 6955 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6956 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 6957 } 6958 } else { 6959 if (lun->flags & CTL_LUN_PRIMARY_SC) { 6960 tpg_desc_ptr1->pref_state = 6961 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6962 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 6963 } else { 6964 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 6965 tpg_desc_ptr2->pref_state = 6966 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6967 } 6968 } 6969 } else { 6970 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 6971 } 6972 tpg_desc_ptr1->support = 0; 6973 tpg_desc_ptr1->target_port_group[1] = 1; 6974 tpg_desc_ptr1->status = TPG_IMPLICIT; 6975 tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP; 6976 6977 if (ctl_is_single == 0) { 6978 tpg_desc_ptr2->support = 0; 6979 tpg_desc_ptr2->target_port_group[1] = 2; 6980 tpg_desc_ptr2->status = TPG_IMPLICIT; 6981 tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP; 6982 6983 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 6984 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 6985 6986 tp_desc_ptr2_1->relative_target_port_identifier[1] = 9; 6987 tp_desc_ptr2_2->relative_target_port_identifier[1] = 10; 6988 } else { 6989 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 6990 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 6991 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 6992 } else { 6993 tp_desc_ptr1_1->relative_target_port_identifier[1] = 9; 6994 tp_desc_ptr1_2->relative_target_port_identifier[1] = 10; 6995 } 6996 } 6997 6998 mtx_unlock(&softc->ctl_lock); 6999 7000 ctsio->be_move_done = ctl_config_move_done; 7001 7002 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7003 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7004 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7005 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7006 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7007 7008 ctl_datamove((union ctl_io *)ctsio); 7009 return(retval); 7010 } 7011 7012 int 7013 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7014 { 7015 struct scsi_per_res_in *cdb; 7016 int alloc_len, total_len = 0; 7017 /* struct scsi_per_res_in_rsrv in_data; */ 7018 struct ctl_lun *lun; 7019 struct ctl_softc *softc; 7020 7021 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7022 7023 softc = control_softc; 7024 7025 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7026 7027 alloc_len = scsi_2btoul(cdb->length); 7028 7029 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7030 7031 retry: 7032 mtx_lock(&softc->ctl_lock); 7033 switch (cdb->action) { 7034 case SPRI_RK: /* read keys */ 7035 total_len = sizeof(struct scsi_per_res_in_keys) + 7036 lun->pr_key_count * 7037 sizeof(struct scsi_per_res_key); 7038 break; 7039 case SPRI_RR: /* read reservation */ 7040 if (lun->flags & CTL_LUN_PR_RESERVED) 7041 total_len = sizeof(struct scsi_per_res_in_rsrv); 7042 else 7043 total_len = sizeof(struct scsi_per_res_in_header); 7044 break; 7045 case SPRI_RC: /* report capabilities */ 7046 total_len = sizeof(struct scsi_per_res_cap); 7047 break; 7048 case SPRI_RS: /* read full status */ 7049 default: 7050 mtx_unlock(&softc->ctl_lock); 7051 ctl_set_invalid_field(ctsio, 7052 /*sks_valid*/ 1, 7053 /*command*/ 1, 7054 /*field*/ 1, 7055 /*bit_valid*/ 1, 7056 /*bit*/ 0); 7057 ctl_done((union ctl_io *)ctsio); 7058 return (CTL_RETVAL_COMPLETE); 7059 break; /* NOTREACHED */ 7060 } 7061 mtx_unlock(&softc->ctl_lock); 7062 7063 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7064 7065 if (total_len < alloc_len) { 7066 ctsio->residual = alloc_len - total_len; 7067 ctsio->kern_data_len = total_len; 7068 ctsio->kern_total_len = total_len; 7069 } else { 7070 ctsio->residual = 0; 7071 ctsio->kern_data_len = alloc_len; 7072 ctsio->kern_total_len = alloc_len; 7073 } 7074 7075 ctsio->kern_data_resid = 0; 7076 ctsio->kern_rel_offset = 0; 7077 ctsio->kern_sg_entries = 0; 7078 7079 mtx_lock(&softc->ctl_lock); 7080 switch (cdb->action) { 7081 case SPRI_RK: { // read keys 7082 struct scsi_per_res_in_keys *res_keys; 7083 int i, key_count; 7084 7085 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7086 7087 /* 7088 * We had to drop the lock to allocate our buffer, which 7089 * leaves time for someone to come in with another 7090 * persistent reservation. (That is unlikely, though, 7091 * since this should be the only persistent reservation 7092 * command active right now.) 7093 */ 7094 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7095 (lun->pr_key_count * 7096 sizeof(struct scsi_per_res_key)))){ 7097 mtx_unlock(&softc->ctl_lock); 7098 free(ctsio->kern_data_ptr, M_CTL); 7099 printf("%s: reservation length changed, retrying\n", 7100 __func__); 7101 goto retry; 7102 } 7103 7104 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7105 7106 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7107 lun->pr_key_count, res_keys->header.length); 7108 7109 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7110 if (!lun->per_res[i].registered) 7111 continue; 7112 7113 /* 7114 * We used lun->pr_key_count to calculate the 7115 * size to allocate. If it turns out the number of 7116 * initiators with the registered flag set is 7117 * larger than that (i.e. they haven't been kept in 7118 * sync), we've got a problem. 7119 */ 7120 if (key_count >= lun->pr_key_count) { 7121 #ifdef NEEDTOPORT 7122 csevent_log(CSC_CTL | CSC_SHELF_SW | 7123 CTL_PR_ERROR, 7124 csevent_LogType_Fault, 7125 csevent_AlertLevel_Yellow, 7126 csevent_FRU_ShelfController, 7127 csevent_FRU_Firmware, 7128 csevent_FRU_Unknown, 7129 "registered keys %d >= key " 7130 "count %d", key_count, 7131 lun->pr_key_count); 7132 #endif 7133 key_count++; 7134 continue; 7135 } 7136 memcpy(res_keys->keys[key_count].key, 7137 lun->per_res[i].res_key.key, 7138 ctl_min(sizeof(res_keys->keys[key_count].key), 7139 sizeof(lun->per_res[i].res_key))); 7140 key_count++; 7141 } 7142 break; 7143 } 7144 case SPRI_RR: { // read reservation 7145 struct scsi_per_res_in_rsrv *res; 7146 int tmp_len, header_only; 7147 7148 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7149 7150 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7151 7152 if (lun->flags & CTL_LUN_PR_RESERVED) 7153 { 7154 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7155 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7156 res->header.length); 7157 header_only = 0; 7158 } else { 7159 tmp_len = sizeof(struct scsi_per_res_in_header); 7160 scsi_ulto4b(0, res->header.length); 7161 header_only = 1; 7162 } 7163 7164 /* 7165 * We had to drop the lock to allocate our buffer, which 7166 * leaves time for someone to come in with another 7167 * persistent reservation. (That is unlikely, though, 7168 * since this should be the only persistent reservation 7169 * command active right now.) 7170 */ 7171 if (tmp_len != total_len) { 7172 mtx_unlock(&softc->ctl_lock); 7173 free(ctsio->kern_data_ptr, M_CTL); 7174 printf("%s: reservation status changed, retrying\n", 7175 __func__); 7176 goto retry; 7177 } 7178 7179 /* 7180 * No reservation held, so we're done. 7181 */ 7182 if (header_only != 0) 7183 break; 7184 7185 /* 7186 * If the registration is an All Registrants type, the key 7187 * is 0, since it doesn't really matter. 7188 */ 7189 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7190 memcpy(res->data.reservation, 7191 &lun->per_res[lun->pr_res_idx].res_key, 7192 sizeof(struct scsi_per_res_key)); 7193 } 7194 res->data.scopetype = lun->res_type; 7195 break; 7196 } 7197 case SPRI_RC: //report capabilities 7198 { 7199 struct scsi_per_res_cap *res_cap; 7200 uint16_t type_mask; 7201 7202 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7203 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7204 res_cap->flags2 |= SPRI_TMV; 7205 type_mask = SPRI_TM_WR_EX_AR | 7206 SPRI_TM_EX_AC_RO | 7207 SPRI_TM_WR_EX_RO | 7208 SPRI_TM_EX_AC | 7209 SPRI_TM_WR_EX | 7210 SPRI_TM_EX_AC_AR; 7211 scsi_ulto2b(type_mask, res_cap->type_mask); 7212 break; 7213 } 7214 case SPRI_RS: //read full status 7215 default: 7216 /* 7217 * This is a bug, because we just checked for this above, 7218 * and should have returned an error. 7219 */ 7220 panic("Invalid PR type %x", cdb->action); 7221 break; /* NOTREACHED */ 7222 } 7223 mtx_unlock(&softc->ctl_lock); 7224 7225 ctsio->be_move_done = ctl_config_move_done; 7226 7227 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7228 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7229 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7230 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7231 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7232 7233 ctl_datamove((union ctl_io *)ctsio); 7234 7235 return (CTL_RETVAL_COMPLETE); 7236 } 7237 7238 /* 7239 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7240 * it should return. 7241 */ 7242 static int 7243 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7244 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7245 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7246 struct scsi_per_res_out_parms* param) 7247 { 7248 union ctl_ha_msg persis_io; 7249 int retval, i; 7250 int isc_retval; 7251 7252 retval = 0; 7253 7254 if (sa_res_key == 0) { 7255 mtx_lock(&softc->ctl_lock); 7256 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7257 /* validate scope and type */ 7258 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7259 SPR_LU_SCOPE) { 7260 mtx_unlock(&softc->ctl_lock); 7261 ctl_set_invalid_field(/*ctsio*/ ctsio, 7262 /*sks_valid*/ 1, 7263 /*command*/ 1, 7264 /*field*/ 2, 7265 /*bit_valid*/ 1, 7266 /*bit*/ 4); 7267 ctl_done((union ctl_io *)ctsio); 7268 return (1); 7269 } 7270 7271 if (type>8 || type==2 || type==4 || type==0) { 7272 mtx_unlock(&softc->ctl_lock); 7273 ctl_set_invalid_field(/*ctsio*/ ctsio, 7274 /*sks_valid*/ 1, 7275 /*command*/ 1, 7276 /*field*/ 2, 7277 /*bit_valid*/ 1, 7278 /*bit*/ 0); 7279 ctl_done((union ctl_io *)ctsio); 7280 return (1); 7281 } 7282 7283 /* temporarily unregister this nexus */ 7284 lun->per_res[residx].registered = 0; 7285 7286 /* 7287 * Unregister everybody else and build UA for 7288 * them 7289 */ 7290 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7291 if (lun->per_res[i].registered == 0) 7292 continue; 7293 7294 if (!persis_offset 7295 && i <CTL_MAX_INITIATORS) 7296 lun->pending_sense[i].ua_pending |= 7297 CTL_UA_REG_PREEMPT; 7298 else if (persis_offset 7299 && i >= persis_offset) 7300 lun->pending_sense[i-persis_offset 7301 ].ua_pending |= 7302 CTL_UA_REG_PREEMPT; 7303 lun->per_res[i].registered = 0; 7304 memset(&lun->per_res[i].res_key, 0, 7305 sizeof(struct scsi_per_res_key)); 7306 } 7307 lun->per_res[residx].registered = 1; 7308 lun->pr_key_count = 1; 7309 lun->res_type = type; 7310 if (lun->res_type != SPR_TYPE_WR_EX_AR 7311 && lun->res_type != SPR_TYPE_EX_AC_AR) 7312 lun->pr_res_idx = residx; 7313 7314 mtx_unlock(&softc->ctl_lock); 7315 /* send msg to other side */ 7316 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7317 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7318 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7319 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7320 persis_io.pr.pr_info.res_type = type; 7321 memcpy(persis_io.pr.pr_info.sa_res_key, 7322 param->serv_act_res_key, 7323 sizeof(param->serv_act_res_key)); 7324 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7325 &persis_io, sizeof(persis_io), 0)) > 7326 CTL_HA_STATUS_SUCCESS) { 7327 printf("CTL:Persis Out error returned " 7328 "from ctl_ha_msg_send %d\n", 7329 isc_retval); 7330 } 7331 } else { 7332 /* not all registrants */ 7333 mtx_unlock(&softc->ctl_lock); 7334 free(ctsio->kern_data_ptr, M_CTL); 7335 ctl_set_invalid_field(ctsio, 7336 /*sks_valid*/ 1, 7337 /*command*/ 0, 7338 /*field*/ 8, 7339 /*bit_valid*/ 0, 7340 /*bit*/ 0); 7341 ctl_done((union ctl_io *)ctsio); 7342 return (1); 7343 } 7344 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7345 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7346 int found = 0; 7347 7348 mtx_lock(&softc->ctl_lock); 7349 if (res_key == sa_res_key) { 7350 /* special case */ 7351 /* 7352 * The spec implies this is not good but doesn't 7353 * say what to do. There are two choices either 7354 * generate a res conflict or check condition 7355 * with illegal field in parameter data. Since 7356 * that is what is done when the sa_res_key is 7357 * zero I'll take that approach since this has 7358 * to do with the sa_res_key. 7359 */ 7360 mtx_unlock(&softc->ctl_lock); 7361 free(ctsio->kern_data_ptr, M_CTL); 7362 ctl_set_invalid_field(ctsio, 7363 /*sks_valid*/ 1, 7364 /*command*/ 0, 7365 /*field*/ 8, 7366 /*bit_valid*/ 0, 7367 /*bit*/ 0); 7368 ctl_done((union ctl_io *)ctsio); 7369 return (1); 7370 } 7371 7372 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7373 if (lun->per_res[i].registered 7374 && memcmp(param->serv_act_res_key, 7375 lun->per_res[i].res_key.key, 7376 sizeof(struct scsi_per_res_key)) != 0) 7377 continue; 7378 7379 found = 1; 7380 lun->per_res[i].registered = 0; 7381 memset(&lun->per_res[i].res_key, 0, 7382 sizeof(struct scsi_per_res_key)); 7383 lun->pr_key_count--; 7384 7385 if (!persis_offset 7386 && i < CTL_MAX_INITIATORS) 7387 lun->pending_sense[i].ua_pending |= 7388 CTL_UA_REG_PREEMPT; 7389 else if (persis_offset 7390 && i >= persis_offset) 7391 lun->pending_sense[i-persis_offset].ua_pending|= 7392 CTL_UA_REG_PREEMPT; 7393 } 7394 mtx_unlock(&softc->ctl_lock); 7395 if (!found) { 7396 free(ctsio->kern_data_ptr, M_CTL); 7397 ctl_set_reservation_conflict(ctsio); 7398 ctl_done((union ctl_io *)ctsio); 7399 return (CTL_RETVAL_COMPLETE); 7400 } 7401 /* send msg to other side */ 7402 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7403 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7404 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7405 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7406 persis_io.pr.pr_info.res_type = type; 7407 memcpy(persis_io.pr.pr_info.sa_res_key, 7408 param->serv_act_res_key, 7409 sizeof(param->serv_act_res_key)); 7410 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7411 &persis_io, sizeof(persis_io), 0)) > 7412 CTL_HA_STATUS_SUCCESS) { 7413 printf("CTL:Persis Out error returned from " 7414 "ctl_ha_msg_send %d\n", isc_retval); 7415 } 7416 } else { 7417 /* Reserved but not all registrants */ 7418 /* sa_res_key is res holder */ 7419 if (memcmp(param->serv_act_res_key, 7420 lun->per_res[lun->pr_res_idx].res_key.key, 7421 sizeof(struct scsi_per_res_key)) == 0) { 7422 /* validate scope and type */ 7423 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7424 SPR_LU_SCOPE) { 7425 ctl_set_invalid_field(/*ctsio*/ ctsio, 7426 /*sks_valid*/ 1, 7427 /*command*/ 1, 7428 /*field*/ 2, 7429 /*bit_valid*/ 1, 7430 /*bit*/ 4); 7431 ctl_done((union ctl_io *)ctsio); 7432 return (1); 7433 } 7434 7435 if (type>8 || type==2 || type==4 || type==0) { 7436 ctl_set_invalid_field(/*ctsio*/ ctsio, 7437 /*sks_valid*/ 1, 7438 /*command*/ 1, 7439 /*field*/ 2, 7440 /*bit_valid*/ 1, 7441 /*bit*/ 0); 7442 ctl_done((union ctl_io *)ctsio); 7443 return (1); 7444 } 7445 7446 /* 7447 * Do the following: 7448 * if sa_res_key != res_key remove all 7449 * registrants w/sa_res_key and generate UA 7450 * for these registrants(Registrations 7451 * Preempted) if it wasn't an exclusive 7452 * reservation generate UA(Reservations 7453 * Preempted) for all other registered nexuses 7454 * if the type has changed. Establish the new 7455 * reservation and holder. If res_key and 7456 * sa_res_key are the same do the above 7457 * except don't unregister the res holder. 7458 */ 7459 7460 /* 7461 * Temporarily unregister so it won't get 7462 * removed or UA generated 7463 */ 7464 lun->per_res[residx].registered = 0; 7465 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7466 if (lun->per_res[i].registered == 0) 7467 continue; 7468 7469 if (memcmp(param->serv_act_res_key, 7470 lun->per_res[i].res_key.key, 7471 sizeof(struct scsi_per_res_key)) == 0) { 7472 lun->per_res[i].registered = 0; 7473 memset(&lun->per_res[i].res_key, 7474 0, 7475 sizeof(struct scsi_per_res_key)); 7476 lun->pr_key_count--; 7477 7478 if (!persis_offset 7479 && i < CTL_MAX_INITIATORS) 7480 lun->pending_sense[i 7481 ].ua_pending |= 7482 CTL_UA_REG_PREEMPT; 7483 else if (persis_offset 7484 && i >= persis_offset) 7485 lun->pending_sense[ 7486 i-persis_offset].ua_pending |= 7487 CTL_UA_REG_PREEMPT; 7488 } else if (type != lun->res_type 7489 && (lun->res_type == SPR_TYPE_WR_EX_RO 7490 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7491 if (!persis_offset 7492 && i < CTL_MAX_INITIATORS) 7493 lun->pending_sense[i 7494 ].ua_pending |= 7495 CTL_UA_RES_RELEASE; 7496 else if (persis_offset 7497 && i >= persis_offset) 7498 lun->pending_sense[ 7499 i-persis_offset 7500 ].ua_pending |= 7501 CTL_UA_RES_RELEASE; 7502 } 7503 } 7504 lun->per_res[residx].registered = 1; 7505 lun->res_type = type; 7506 if (lun->res_type != SPR_TYPE_WR_EX_AR 7507 && lun->res_type != SPR_TYPE_EX_AC_AR) 7508 lun->pr_res_idx = residx; 7509 else 7510 lun->pr_res_idx = 7511 CTL_PR_ALL_REGISTRANTS; 7512 7513 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7514 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7515 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7516 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7517 persis_io.pr.pr_info.res_type = type; 7518 memcpy(persis_io.pr.pr_info.sa_res_key, 7519 param->serv_act_res_key, 7520 sizeof(param->serv_act_res_key)); 7521 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7522 &persis_io, sizeof(persis_io), 0)) > 7523 CTL_HA_STATUS_SUCCESS) { 7524 printf("CTL:Persis Out error returned " 7525 "from ctl_ha_msg_send %d\n", 7526 isc_retval); 7527 } 7528 } else { 7529 /* 7530 * sa_res_key is not the res holder just 7531 * remove registrants 7532 */ 7533 int found=0; 7534 mtx_lock(&softc->ctl_lock); 7535 7536 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7537 if (memcmp(param->serv_act_res_key, 7538 lun->per_res[i].res_key.key, 7539 sizeof(struct scsi_per_res_key)) != 0) 7540 continue; 7541 7542 found = 1; 7543 lun->per_res[i].registered = 0; 7544 memset(&lun->per_res[i].res_key, 0, 7545 sizeof(struct scsi_per_res_key)); 7546 lun->pr_key_count--; 7547 7548 if (!persis_offset 7549 && i < CTL_MAX_INITIATORS) 7550 lun->pending_sense[i].ua_pending |= 7551 CTL_UA_REG_PREEMPT; 7552 else if (persis_offset 7553 && i >= persis_offset) 7554 lun->pending_sense[ 7555 i-persis_offset].ua_pending |= 7556 CTL_UA_REG_PREEMPT; 7557 } 7558 7559 if (!found) { 7560 mtx_unlock(&softc->ctl_lock); 7561 free(ctsio->kern_data_ptr, M_CTL); 7562 ctl_set_reservation_conflict(ctsio); 7563 ctl_done((union ctl_io *)ctsio); 7564 return (1); 7565 } 7566 mtx_unlock(&softc->ctl_lock); 7567 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7568 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7569 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7570 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7571 persis_io.pr.pr_info.res_type = type; 7572 memcpy(persis_io.pr.pr_info.sa_res_key, 7573 param->serv_act_res_key, 7574 sizeof(param->serv_act_res_key)); 7575 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7576 &persis_io, sizeof(persis_io), 0)) > 7577 CTL_HA_STATUS_SUCCESS) { 7578 printf("CTL:Persis Out error returned " 7579 "from ctl_ha_msg_send %d\n", 7580 isc_retval); 7581 } 7582 } 7583 } 7584 7585 lun->PRGeneration++; 7586 7587 return (retval); 7588 } 7589 7590 static void 7591 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7592 { 7593 int i; 7594 7595 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7596 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7597 || memcmp(&lun->per_res[lun->pr_res_idx].res_key, 7598 msg->pr.pr_info.sa_res_key, 7599 sizeof(struct scsi_per_res_key)) != 0) { 7600 uint64_t sa_res_key; 7601 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7602 7603 if (sa_res_key == 0) { 7604 /* temporarily unregister this nexus */ 7605 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7606 7607 /* 7608 * Unregister everybody else and build UA for 7609 * them 7610 */ 7611 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7612 if (lun->per_res[i].registered == 0) 7613 continue; 7614 7615 if (!persis_offset 7616 && i < CTL_MAX_INITIATORS) 7617 lun->pending_sense[i].ua_pending |= 7618 CTL_UA_REG_PREEMPT; 7619 else if (persis_offset && i >= persis_offset) 7620 lun->pending_sense[i - 7621 persis_offset].ua_pending |= 7622 CTL_UA_REG_PREEMPT; 7623 lun->per_res[i].registered = 0; 7624 memset(&lun->per_res[i].res_key, 0, 7625 sizeof(struct scsi_per_res_key)); 7626 } 7627 7628 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7629 lun->pr_key_count = 1; 7630 lun->res_type = msg->pr.pr_info.res_type; 7631 if (lun->res_type != SPR_TYPE_WR_EX_AR 7632 && lun->res_type != SPR_TYPE_EX_AC_AR) 7633 lun->pr_res_idx = msg->pr.pr_info.residx; 7634 } else { 7635 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7636 if (memcmp(msg->pr.pr_info.sa_res_key, 7637 lun->per_res[i].res_key.key, 7638 sizeof(struct scsi_per_res_key)) != 0) 7639 continue; 7640 7641 lun->per_res[i].registered = 0; 7642 memset(&lun->per_res[i].res_key, 0, 7643 sizeof(struct scsi_per_res_key)); 7644 lun->pr_key_count--; 7645 7646 if (!persis_offset 7647 && i < persis_offset) 7648 lun->pending_sense[i].ua_pending |= 7649 CTL_UA_REG_PREEMPT; 7650 else if (persis_offset 7651 && i >= persis_offset) 7652 lun->pending_sense[i - 7653 persis_offset].ua_pending |= 7654 CTL_UA_REG_PREEMPT; 7655 } 7656 } 7657 } else { 7658 /* 7659 * Temporarily unregister so it won't get removed 7660 * or UA generated 7661 */ 7662 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7663 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7664 if (lun->per_res[i].registered == 0) 7665 continue; 7666 7667 if (memcmp(msg->pr.pr_info.sa_res_key, 7668 lun->per_res[i].res_key.key, 7669 sizeof(struct scsi_per_res_key)) == 0) { 7670 lun->per_res[i].registered = 0; 7671 memset(&lun->per_res[i].res_key, 0, 7672 sizeof(struct scsi_per_res_key)); 7673 lun->pr_key_count--; 7674 if (!persis_offset 7675 && i < CTL_MAX_INITIATORS) 7676 lun->pending_sense[i].ua_pending |= 7677 CTL_UA_REG_PREEMPT; 7678 else if (persis_offset 7679 && i >= persis_offset) 7680 lun->pending_sense[i - 7681 persis_offset].ua_pending |= 7682 CTL_UA_REG_PREEMPT; 7683 } else if (msg->pr.pr_info.res_type != lun->res_type 7684 && (lun->res_type == SPR_TYPE_WR_EX_RO 7685 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7686 if (!persis_offset 7687 && i < persis_offset) 7688 lun->pending_sense[i 7689 ].ua_pending |= 7690 CTL_UA_RES_RELEASE; 7691 else if (persis_offset 7692 && i >= persis_offset) 7693 lun->pending_sense[i - 7694 persis_offset].ua_pending |= 7695 CTL_UA_RES_RELEASE; 7696 } 7697 } 7698 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7699 lun->res_type = msg->pr.pr_info.res_type; 7700 if (lun->res_type != SPR_TYPE_WR_EX_AR 7701 && lun->res_type != SPR_TYPE_EX_AC_AR) 7702 lun->pr_res_idx = msg->pr.pr_info.residx; 7703 else 7704 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7705 } 7706 lun->PRGeneration++; 7707 7708 } 7709 7710 7711 int 7712 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7713 { 7714 int retval; 7715 int isc_retval; 7716 u_int32_t param_len; 7717 struct scsi_per_res_out *cdb; 7718 struct ctl_lun *lun; 7719 struct scsi_per_res_out_parms* param; 7720 struct ctl_softc *softc; 7721 uint32_t residx; 7722 uint64_t res_key, sa_res_key; 7723 uint8_t type; 7724 union ctl_ha_msg persis_io; 7725 int i; 7726 7727 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 7728 7729 retval = CTL_RETVAL_COMPLETE; 7730 7731 softc = control_softc; 7732 7733 cdb = (struct scsi_per_res_out *)ctsio->cdb; 7734 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7735 7736 /* 7737 * We only support whole-LUN scope. The scope & type are ignored for 7738 * register, register and ignore existing key and clear. 7739 * We sometimes ignore scope and type on preempts too!! 7740 * Verify reservation type here as well. 7741 */ 7742 type = cdb->scope_type & SPR_TYPE_MASK; 7743 if ((cdb->action == SPRO_RESERVE) 7744 || (cdb->action == SPRO_RELEASE)) { 7745 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 7746 ctl_set_invalid_field(/*ctsio*/ ctsio, 7747 /*sks_valid*/ 1, 7748 /*command*/ 1, 7749 /*field*/ 2, 7750 /*bit_valid*/ 1, 7751 /*bit*/ 4); 7752 ctl_done((union ctl_io *)ctsio); 7753 return (CTL_RETVAL_COMPLETE); 7754 } 7755 7756 if (type>8 || type==2 || type==4 || type==0) { 7757 ctl_set_invalid_field(/*ctsio*/ ctsio, 7758 /*sks_valid*/ 1, 7759 /*command*/ 1, 7760 /*field*/ 2, 7761 /*bit_valid*/ 1, 7762 /*bit*/ 0); 7763 ctl_done((union ctl_io *)ctsio); 7764 return (CTL_RETVAL_COMPLETE); 7765 } 7766 } 7767 7768 switch (cdb->action & SPRO_ACTION_MASK) { 7769 case SPRO_REGISTER: 7770 case SPRO_RESERVE: 7771 case SPRO_RELEASE: 7772 case SPRO_CLEAR: 7773 case SPRO_PREEMPT: 7774 case SPRO_REG_IGNO: 7775 break; 7776 case SPRO_REG_MOVE: 7777 case SPRO_PRE_ABO: 7778 default: 7779 ctl_set_invalid_field(/*ctsio*/ ctsio, 7780 /*sks_valid*/ 1, 7781 /*command*/ 1, 7782 /*field*/ 1, 7783 /*bit_valid*/ 1, 7784 /*bit*/ 0); 7785 ctl_done((union ctl_io *)ctsio); 7786 return (CTL_RETVAL_COMPLETE); 7787 break; /* NOTREACHED */ 7788 } 7789 7790 param_len = scsi_4btoul(cdb->length); 7791 7792 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 7793 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 7794 ctsio->kern_data_len = param_len; 7795 ctsio->kern_total_len = param_len; 7796 ctsio->kern_data_resid = 0; 7797 ctsio->kern_rel_offset = 0; 7798 ctsio->kern_sg_entries = 0; 7799 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7800 ctsio->be_move_done = ctl_config_move_done; 7801 ctl_datamove((union ctl_io *)ctsio); 7802 7803 return (CTL_RETVAL_COMPLETE); 7804 } 7805 7806 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 7807 7808 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 7809 res_key = scsi_8btou64(param->res_key.key); 7810 sa_res_key = scsi_8btou64(param->serv_act_res_key); 7811 7812 /* 7813 * Validate the reservation key here except for SPRO_REG_IGNO 7814 * This must be done for all other service actions 7815 */ 7816 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 7817 mtx_lock(&softc->ctl_lock); 7818 if (lun->per_res[residx].registered) { 7819 if (memcmp(param->res_key.key, 7820 lun->per_res[residx].res_key.key, 7821 ctl_min(sizeof(param->res_key), 7822 sizeof(lun->per_res[residx].res_key))) != 0) { 7823 /* 7824 * The current key passed in doesn't match 7825 * the one the initiator previously 7826 * registered. 7827 */ 7828 mtx_unlock(&softc->ctl_lock); 7829 free(ctsio->kern_data_ptr, M_CTL); 7830 ctl_set_reservation_conflict(ctsio); 7831 ctl_done((union ctl_io *)ctsio); 7832 return (CTL_RETVAL_COMPLETE); 7833 } 7834 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 7835 /* 7836 * We are not registered 7837 */ 7838 mtx_unlock(&softc->ctl_lock); 7839 free(ctsio->kern_data_ptr, M_CTL); 7840 ctl_set_reservation_conflict(ctsio); 7841 ctl_done((union ctl_io *)ctsio); 7842 return (CTL_RETVAL_COMPLETE); 7843 } else if (res_key != 0) { 7844 /* 7845 * We are not registered and trying to register but 7846 * the register key isn't zero. 7847 */ 7848 mtx_unlock(&softc->ctl_lock); 7849 free(ctsio->kern_data_ptr, M_CTL); 7850 ctl_set_reservation_conflict(ctsio); 7851 ctl_done((union ctl_io *)ctsio); 7852 return (CTL_RETVAL_COMPLETE); 7853 } 7854 mtx_unlock(&softc->ctl_lock); 7855 } 7856 7857 switch (cdb->action & SPRO_ACTION_MASK) { 7858 case SPRO_REGISTER: 7859 case SPRO_REG_IGNO: { 7860 7861 #if 0 7862 printf("Registration received\n"); 7863 #endif 7864 7865 /* 7866 * We don't support any of these options, as we report in 7867 * the read capabilities request (see 7868 * ctl_persistent_reserve_in(), above). 7869 */ 7870 if ((param->flags & SPR_SPEC_I_PT) 7871 || (param->flags & SPR_ALL_TG_PT) 7872 || (param->flags & SPR_APTPL)) { 7873 int bit_ptr; 7874 7875 if (param->flags & SPR_APTPL) 7876 bit_ptr = 0; 7877 else if (param->flags & SPR_ALL_TG_PT) 7878 bit_ptr = 2; 7879 else /* SPR_SPEC_I_PT */ 7880 bit_ptr = 3; 7881 7882 free(ctsio->kern_data_ptr, M_CTL); 7883 ctl_set_invalid_field(ctsio, 7884 /*sks_valid*/ 1, 7885 /*command*/ 0, 7886 /*field*/ 20, 7887 /*bit_valid*/ 1, 7888 /*bit*/ bit_ptr); 7889 ctl_done((union ctl_io *)ctsio); 7890 return (CTL_RETVAL_COMPLETE); 7891 } 7892 7893 mtx_lock(&softc->ctl_lock); 7894 7895 /* 7896 * The initiator wants to clear the 7897 * key/unregister. 7898 */ 7899 if (sa_res_key == 0) { 7900 if ((res_key == 0 7901 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 7902 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 7903 && !lun->per_res[residx].registered)) { 7904 mtx_unlock(&softc->ctl_lock); 7905 goto done; 7906 } 7907 7908 lun->per_res[residx].registered = 0; 7909 memset(&lun->per_res[residx].res_key, 7910 0, sizeof(lun->per_res[residx].res_key)); 7911 lun->pr_key_count--; 7912 7913 if (residx == lun->pr_res_idx) { 7914 lun->flags &= ~CTL_LUN_PR_RESERVED; 7915 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 7916 7917 if ((lun->res_type == SPR_TYPE_WR_EX_RO 7918 || lun->res_type == SPR_TYPE_EX_AC_RO) 7919 && lun->pr_key_count) { 7920 /* 7921 * If the reservation is a registrants 7922 * only type we need to generate a UA 7923 * for other registered inits. The 7924 * sense code should be RESERVATIONS 7925 * RELEASED 7926 */ 7927 7928 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 7929 if (lun->per_res[ 7930 i+persis_offset].registered 7931 == 0) 7932 continue; 7933 lun->pending_sense[i 7934 ].ua_pending |= 7935 CTL_UA_RES_RELEASE; 7936 } 7937 } 7938 lun->res_type = 0; 7939 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7940 if (lun->pr_key_count==0) { 7941 lun->flags &= ~CTL_LUN_PR_RESERVED; 7942 lun->res_type = 0; 7943 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 7944 } 7945 } 7946 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7947 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7948 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 7949 persis_io.pr.pr_info.residx = residx; 7950 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7951 &persis_io, sizeof(persis_io), 0 )) > 7952 CTL_HA_STATUS_SUCCESS) { 7953 printf("CTL:Persis Out error returned from " 7954 "ctl_ha_msg_send %d\n", isc_retval); 7955 } 7956 mtx_unlock(&softc->ctl_lock); 7957 } else /* sa_res_key != 0 */ { 7958 7959 /* 7960 * If we aren't registered currently then increment 7961 * the key count and set the registered flag. 7962 */ 7963 if (!lun->per_res[residx].registered) { 7964 lun->pr_key_count++; 7965 lun->per_res[residx].registered = 1; 7966 } 7967 7968 memcpy(&lun->per_res[residx].res_key, 7969 param->serv_act_res_key, 7970 ctl_min(sizeof(param->serv_act_res_key), 7971 sizeof(lun->per_res[residx].res_key))); 7972 7973 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7974 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7975 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 7976 persis_io.pr.pr_info.residx = residx; 7977 memcpy(persis_io.pr.pr_info.sa_res_key, 7978 param->serv_act_res_key, 7979 sizeof(param->serv_act_res_key)); 7980 mtx_unlock(&softc->ctl_lock); 7981 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7982 &persis_io, sizeof(persis_io), 0)) > 7983 CTL_HA_STATUS_SUCCESS) { 7984 printf("CTL:Persis Out error returned from " 7985 "ctl_ha_msg_send %d\n", isc_retval); 7986 } 7987 } 7988 lun->PRGeneration++; 7989 7990 break; 7991 } 7992 case SPRO_RESERVE: 7993 #if 0 7994 printf("Reserve executed type %d\n", type); 7995 #endif 7996 mtx_lock(&softc->ctl_lock); 7997 if (lun->flags & CTL_LUN_PR_RESERVED) { 7998 /* 7999 * if this isn't the reservation holder and it's 8000 * not a "all registrants" type or if the type is 8001 * different then we have a conflict 8002 */ 8003 if ((lun->pr_res_idx != residx 8004 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8005 || lun->res_type != type) { 8006 mtx_unlock(&softc->ctl_lock); 8007 free(ctsio->kern_data_ptr, M_CTL); 8008 ctl_set_reservation_conflict(ctsio); 8009 ctl_done((union ctl_io *)ctsio); 8010 return (CTL_RETVAL_COMPLETE); 8011 } 8012 } else /* create a reservation */ { 8013 /* 8014 * If it's not an "all registrants" type record 8015 * reservation holder 8016 */ 8017 if (type != SPR_TYPE_WR_EX_AR 8018 && type != SPR_TYPE_EX_AC_AR) 8019 lun->pr_res_idx = residx; /* Res holder */ 8020 else 8021 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8022 8023 lun->flags |= CTL_LUN_PR_RESERVED; 8024 lun->res_type = type; 8025 8026 mtx_unlock(&softc->ctl_lock); 8027 8028 /* send msg to other side */ 8029 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8030 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8031 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8032 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8033 persis_io.pr.pr_info.res_type = type; 8034 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8035 &persis_io, sizeof(persis_io), 0)) > 8036 CTL_HA_STATUS_SUCCESS) { 8037 printf("CTL:Persis Out error returned from " 8038 "ctl_ha_msg_send %d\n", isc_retval); 8039 } 8040 } 8041 break; 8042 8043 case SPRO_RELEASE: 8044 mtx_lock(&softc->ctl_lock); 8045 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8046 /* No reservation exists return good status */ 8047 mtx_unlock(&softc->ctl_lock); 8048 goto done; 8049 } 8050 /* 8051 * Is this nexus a reservation holder? 8052 */ 8053 if (lun->pr_res_idx != residx 8054 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8055 /* 8056 * not a res holder return good status but 8057 * do nothing 8058 */ 8059 mtx_unlock(&softc->ctl_lock); 8060 goto done; 8061 } 8062 8063 if (lun->res_type != type) { 8064 mtx_unlock(&softc->ctl_lock); 8065 free(ctsio->kern_data_ptr, M_CTL); 8066 ctl_set_illegal_pr_release(ctsio); 8067 ctl_done((union ctl_io *)ctsio); 8068 return (CTL_RETVAL_COMPLETE); 8069 } 8070 8071 /* okay to release */ 8072 lun->flags &= ~CTL_LUN_PR_RESERVED; 8073 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8074 lun->res_type = 0; 8075 8076 /* 8077 * if this isn't an exclusive access 8078 * res generate UA for all other 8079 * registrants. 8080 */ 8081 if (type != SPR_TYPE_EX_AC 8082 && type != SPR_TYPE_WR_EX) { 8083 /* 8084 * temporarily unregister so we don't generate UA 8085 */ 8086 lun->per_res[residx].registered = 0; 8087 8088 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8089 if (lun->per_res[i+persis_offset].registered 8090 == 0) 8091 continue; 8092 lun->pending_sense[i].ua_pending |= 8093 CTL_UA_RES_RELEASE; 8094 } 8095 8096 lun->per_res[residx].registered = 1; 8097 } 8098 mtx_unlock(&softc->ctl_lock); 8099 /* Send msg to other side */ 8100 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8101 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8102 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8103 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8104 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8105 printf("CTL:Persis Out error returned from " 8106 "ctl_ha_msg_send %d\n", isc_retval); 8107 } 8108 break; 8109 8110 case SPRO_CLEAR: 8111 /* send msg to other side */ 8112 8113 mtx_lock(&softc->ctl_lock); 8114 lun->flags &= ~CTL_LUN_PR_RESERVED; 8115 lun->res_type = 0; 8116 lun->pr_key_count = 0; 8117 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8118 8119 8120 memset(&lun->per_res[residx].res_key, 8121 0, sizeof(lun->per_res[residx].res_key)); 8122 lun->per_res[residx].registered = 0; 8123 8124 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8125 if (lun->per_res[i].registered) { 8126 if (!persis_offset && i < CTL_MAX_INITIATORS) 8127 lun->pending_sense[i].ua_pending |= 8128 CTL_UA_RES_PREEMPT; 8129 else if (persis_offset && i >= persis_offset) 8130 lun->pending_sense[i-persis_offset 8131 ].ua_pending |= CTL_UA_RES_PREEMPT; 8132 8133 memset(&lun->per_res[i].res_key, 8134 0, sizeof(struct scsi_per_res_key)); 8135 lun->per_res[i].registered = 0; 8136 } 8137 lun->PRGeneration++; 8138 mtx_unlock(&softc->ctl_lock); 8139 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8140 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8141 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8142 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8143 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8144 printf("CTL:Persis Out error returned from " 8145 "ctl_ha_msg_send %d\n", isc_retval); 8146 } 8147 break; 8148 8149 case SPRO_PREEMPT: { 8150 int nretval; 8151 8152 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8153 residx, ctsio, cdb, param); 8154 if (nretval != 0) 8155 return (CTL_RETVAL_COMPLETE); 8156 break; 8157 } 8158 case SPRO_REG_MOVE: 8159 case SPRO_PRE_ABO: 8160 default: 8161 free(ctsio->kern_data_ptr, M_CTL); 8162 ctl_set_invalid_field(/*ctsio*/ ctsio, 8163 /*sks_valid*/ 1, 8164 /*command*/ 1, 8165 /*field*/ 1, 8166 /*bit_valid*/ 1, 8167 /*bit*/ 0); 8168 ctl_done((union ctl_io *)ctsio); 8169 return (CTL_RETVAL_COMPLETE); 8170 break; /* NOTREACHED */ 8171 } 8172 8173 done: 8174 free(ctsio->kern_data_ptr, M_CTL); 8175 ctl_set_success(ctsio); 8176 ctl_done((union ctl_io *)ctsio); 8177 8178 return (retval); 8179 } 8180 8181 /* 8182 * This routine is for handling a message from the other SC pertaining to 8183 * persistent reserve out. All the error checking will have been done 8184 * so only perorming the action need be done here to keep the two 8185 * in sync. 8186 */ 8187 static void 8188 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8189 { 8190 struct ctl_lun *lun; 8191 struct ctl_softc *softc; 8192 int i; 8193 8194 softc = control_softc; 8195 8196 mtx_lock(&softc->ctl_lock); 8197 8198 lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]; 8199 switch(msg->pr.pr_info.action) { 8200 case CTL_PR_REG_KEY: 8201 if (!lun->per_res[msg->pr.pr_info.residx].registered) { 8202 lun->per_res[msg->pr.pr_info.residx].registered = 1; 8203 lun->pr_key_count++; 8204 } 8205 lun->PRGeneration++; 8206 memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key, 8207 msg->pr.pr_info.sa_res_key, 8208 sizeof(struct scsi_per_res_key)); 8209 break; 8210 8211 case CTL_PR_UNREG_KEY: 8212 lun->per_res[msg->pr.pr_info.residx].registered = 0; 8213 memset(&lun->per_res[msg->pr.pr_info.residx].res_key, 8214 0, sizeof(struct scsi_per_res_key)); 8215 lun->pr_key_count--; 8216 8217 /* XXX Need to see if the reservation has been released */ 8218 /* if so do we need to generate UA? */ 8219 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8220 lun->flags &= ~CTL_LUN_PR_RESERVED; 8221 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8222 8223 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8224 || lun->res_type == SPR_TYPE_EX_AC_RO) 8225 && lun->pr_key_count) { 8226 /* 8227 * If the reservation is a registrants 8228 * only type we need to generate a UA 8229 * for other registered inits. The 8230 * sense code should be RESERVATIONS 8231 * RELEASED 8232 */ 8233 8234 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8235 if (lun->per_res[i+ 8236 persis_offset].registered == 0) 8237 continue; 8238 8239 lun->pending_sense[i 8240 ].ua_pending |= 8241 CTL_UA_RES_RELEASE; 8242 } 8243 } 8244 lun->res_type = 0; 8245 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8246 if (lun->pr_key_count==0) { 8247 lun->flags &= ~CTL_LUN_PR_RESERVED; 8248 lun->res_type = 0; 8249 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8250 } 8251 } 8252 lun->PRGeneration++; 8253 break; 8254 8255 case CTL_PR_RESERVE: 8256 lun->flags |= CTL_LUN_PR_RESERVED; 8257 lun->res_type = msg->pr.pr_info.res_type; 8258 lun->pr_res_idx = msg->pr.pr_info.residx; 8259 8260 break; 8261 8262 case CTL_PR_RELEASE: 8263 /* 8264 * if this isn't an exclusive access res generate UA for all 8265 * other registrants. 8266 */ 8267 if (lun->res_type != SPR_TYPE_EX_AC 8268 && lun->res_type != SPR_TYPE_WR_EX) { 8269 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8270 if (lun->per_res[i+persis_offset].registered) 8271 lun->pending_sense[i].ua_pending |= 8272 CTL_UA_RES_RELEASE; 8273 } 8274 8275 lun->flags &= ~CTL_LUN_PR_RESERVED; 8276 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8277 lun->res_type = 0; 8278 break; 8279 8280 case CTL_PR_PREEMPT: 8281 ctl_pro_preempt_other(lun, msg); 8282 break; 8283 case CTL_PR_CLEAR: 8284 lun->flags &= ~CTL_LUN_PR_RESERVED; 8285 lun->res_type = 0; 8286 lun->pr_key_count = 0; 8287 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8288 8289 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8290 if (lun->per_res[i].registered == 0) 8291 continue; 8292 if (!persis_offset 8293 && i < CTL_MAX_INITIATORS) 8294 lun->pending_sense[i].ua_pending |= 8295 CTL_UA_RES_PREEMPT; 8296 else if (persis_offset 8297 && i >= persis_offset) 8298 lun->pending_sense[i-persis_offset].ua_pending|= 8299 CTL_UA_RES_PREEMPT; 8300 memset(&lun->per_res[i].res_key, 0, 8301 sizeof(struct scsi_per_res_key)); 8302 lun->per_res[i].registered = 0; 8303 } 8304 lun->PRGeneration++; 8305 break; 8306 } 8307 8308 mtx_unlock(&softc->ctl_lock); 8309 } 8310 8311 int 8312 ctl_read_write(struct ctl_scsiio *ctsio) 8313 { 8314 struct ctl_lun *lun; 8315 struct ctl_lba_len lbalen; 8316 uint64_t lba; 8317 uint32_t num_blocks; 8318 int reladdr, fua, dpo, ebp; 8319 int retval; 8320 int isread; 8321 8322 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8323 8324 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8325 8326 reladdr = 0; 8327 fua = 0; 8328 dpo = 0; 8329 ebp = 0; 8330 8331 retval = CTL_RETVAL_COMPLETE; 8332 8333 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8334 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8335 if (lun->flags & CTL_LUN_PR_RESERVED && isread) { 8336 uint32_t residx; 8337 8338 /* 8339 * XXX KDM need a lock here. 8340 */ 8341 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8342 if ((lun->res_type == SPR_TYPE_EX_AC 8343 && residx != lun->pr_res_idx) 8344 || ((lun->res_type == SPR_TYPE_EX_AC_RO 8345 || lun->res_type == SPR_TYPE_EX_AC_AR) 8346 && !lun->per_res[residx].registered)) { 8347 ctl_set_reservation_conflict(ctsio); 8348 ctl_done((union ctl_io *)ctsio); 8349 return (CTL_RETVAL_COMPLETE); 8350 } 8351 } 8352 8353 switch (ctsio->cdb[0]) { 8354 case READ_6: 8355 case WRITE_6: { 8356 struct scsi_rw_6 *cdb; 8357 8358 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8359 8360 lba = scsi_3btoul(cdb->addr); 8361 /* only 5 bits are valid in the most significant address byte */ 8362 lba &= 0x1fffff; 8363 num_blocks = cdb->length; 8364 /* 8365 * This is correct according to SBC-2. 8366 */ 8367 if (num_blocks == 0) 8368 num_blocks = 256; 8369 break; 8370 } 8371 case READ_10: 8372 case WRITE_10: { 8373 struct scsi_rw_10 *cdb; 8374 8375 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8376 8377 if (cdb->byte2 & SRW10_RELADDR) 8378 reladdr = 1; 8379 if (cdb->byte2 & SRW10_FUA) 8380 fua = 1; 8381 if (cdb->byte2 & SRW10_DPO) 8382 dpo = 1; 8383 8384 if ((cdb->opcode == WRITE_10) 8385 && (cdb->byte2 & SRW10_EBP)) 8386 ebp = 1; 8387 8388 lba = scsi_4btoul(cdb->addr); 8389 num_blocks = scsi_2btoul(cdb->length); 8390 break; 8391 } 8392 case WRITE_VERIFY_10: { 8393 struct scsi_write_verify_10 *cdb; 8394 8395 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8396 8397 /* 8398 * XXX KDM we should do actual write verify support at some 8399 * point. This is obviously fake, we're just translating 8400 * things to a write. So we don't even bother checking the 8401 * BYTCHK field, since we don't do any verification. If 8402 * the user asks for it, we'll just pretend we did it. 8403 */ 8404 if (cdb->byte2 & SWV_DPO) 8405 dpo = 1; 8406 8407 lba = scsi_4btoul(cdb->addr); 8408 num_blocks = scsi_2btoul(cdb->length); 8409 break; 8410 } 8411 case READ_12: 8412 case WRITE_12: { 8413 struct scsi_rw_12 *cdb; 8414 8415 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8416 8417 if (cdb->byte2 & SRW12_RELADDR) 8418 reladdr = 1; 8419 if (cdb->byte2 & SRW12_FUA) 8420 fua = 1; 8421 if (cdb->byte2 & SRW12_DPO) 8422 dpo = 1; 8423 lba = scsi_4btoul(cdb->addr); 8424 num_blocks = scsi_4btoul(cdb->length); 8425 break; 8426 } 8427 case WRITE_VERIFY_12: { 8428 struct scsi_write_verify_12 *cdb; 8429 8430 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8431 8432 if (cdb->byte2 & SWV_DPO) 8433 dpo = 1; 8434 8435 lba = scsi_4btoul(cdb->addr); 8436 num_blocks = scsi_4btoul(cdb->length); 8437 8438 break; 8439 } 8440 case READ_16: 8441 case WRITE_16: { 8442 struct scsi_rw_16 *cdb; 8443 8444 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8445 8446 if (cdb->byte2 & SRW12_RELADDR) 8447 reladdr = 1; 8448 if (cdb->byte2 & SRW12_FUA) 8449 fua = 1; 8450 if (cdb->byte2 & SRW12_DPO) 8451 dpo = 1; 8452 8453 lba = scsi_8btou64(cdb->addr); 8454 num_blocks = scsi_4btoul(cdb->length); 8455 break; 8456 } 8457 case WRITE_VERIFY_16: { 8458 struct scsi_write_verify_16 *cdb; 8459 8460 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8461 8462 if (cdb->byte2 & SWV_DPO) 8463 dpo = 1; 8464 8465 lba = scsi_8btou64(cdb->addr); 8466 num_blocks = scsi_4btoul(cdb->length); 8467 break; 8468 } 8469 default: 8470 /* 8471 * We got a command we don't support. This shouldn't 8472 * happen, commands should be filtered out above us. 8473 */ 8474 ctl_set_invalid_opcode(ctsio); 8475 ctl_done((union ctl_io *)ctsio); 8476 8477 return (CTL_RETVAL_COMPLETE); 8478 break; /* NOTREACHED */ 8479 } 8480 8481 /* 8482 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 8483 * interesting for us, but if RAIDCore is in write-back mode, 8484 * getting it to do write-through for a particular transaction may 8485 * not be possible. 8486 */ 8487 /* 8488 * We don't support relative addressing. That also requires 8489 * supporting linked commands, which we don't do. 8490 */ 8491 if (reladdr != 0) { 8492 ctl_set_invalid_field(ctsio, 8493 /*sks_valid*/ 1, 8494 /*command*/ 1, 8495 /*field*/ 1, 8496 /*bit_valid*/ 1, 8497 /*bit*/ 0); 8498 ctl_done((union ctl_io *)ctsio); 8499 return (CTL_RETVAL_COMPLETE); 8500 } 8501 8502 /* 8503 * The first check is to make sure we're in bounds, the second 8504 * check is to catch wrap-around problems. If the lba + num blocks 8505 * is less than the lba, then we've wrapped around and the block 8506 * range is invalid anyway. 8507 */ 8508 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8509 || ((lba + num_blocks) < lba)) { 8510 ctl_set_lba_out_of_range(ctsio); 8511 ctl_done((union ctl_io *)ctsio); 8512 return (CTL_RETVAL_COMPLETE); 8513 } 8514 8515 /* 8516 * According to SBC-3, a transfer length of 0 is not an error. 8517 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8518 * translates to 256 blocks for those commands. 8519 */ 8520 if (num_blocks == 0) { 8521 ctl_set_success(ctsio); 8522 ctl_done((union ctl_io *)ctsio); 8523 return (CTL_RETVAL_COMPLETE); 8524 } 8525 8526 lbalen.lba = lba; 8527 lbalen.len = num_blocks; 8528 memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen, 8529 sizeof(lbalen)); 8530 8531 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8532 8533 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8534 8535 return (retval); 8536 } 8537 8538 int 8539 ctl_report_luns(struct ctl_scsiio *ctsio) 8540 { 8541 struct scsi_report_luns *cdb; 8542 struct scsi_report_luns_data *lun_data; 8543 struct ctl_lun *lun, *request_lun; 8544 int num_luns, retval; 8545 uint32_t alloc_len, lun_datalen; 8546 int num_filled, well_known; 8547 uint32_t initidx; 8548 8549 retval = CTL_RETVAL_COMPLETE; 8550 well_known = 0; 8551 8552 cdb = (struct scsi_report_luns *)ctsio->cdb; 8553 8554 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8555 8556 mtx_lock(&control_softc->ctl_lock); 8557 num_luns = control_softc->num_luns; 8558 mtx_unlock(&control_softc->ctl_lock); 8559 8560 switch (cdb->select_report) { 8561 case RPL_REPORT_DEFAULT: 8562 case RPL_REPORT_ALL: 8563 break; 8564 case RPL_REPORT_WELLKNOWN: 8565 well_known = 1; 8566 num_luns = 0; 8567 break; 8568 default: 8569 ctl_set_invalid_field(ctsio, 8570 /*sks_valid*/ 1, 8571 /*command*/ 1, 8572 /*field*/ 2, 8573 /*bit_valid*/ 0, 8574 /*bit*/ 0); 8575 ctl_done((union ctl_io *)ctsio); 8576 return (retval); 8577 break; /* NOTREACHED */ 8578 } 8579 8580 alloc_len = scsi_4btoul(cdb->length); 8581 /* 8582 * The initiator has to allocate at least 16 bytes for this request, 8583 * so he can at least get the header and the first LUN. Otherwise 8584 * we reject the request (per SPC-3 rev 14, section 6.21). 8585 */ 8586 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 8587 sizeof(struct scsi_report_luns_lundata))) { 8588 ctl_set_invalid_field(ctsio, 8589 /*sks_valid*/ 1, 8590 /*command*/ 1, 8591 /*field*/ 6, 8592 /*bit_valid*/ 0, 8593 /*bit*/ 0); 8594 ctl_done((union ctl_io *)ctsio); 8595 return (retval); 8596 } 8597 8598 request_lun = (struct ctl_lun *) 8599 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8600 8601 lun_datalen = sizeof(*lun_data) + 8602 (num_luns * sizeof(struct scsi_report_luns_lundata)); 8603 8604 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 8605 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 8606 ctsio->kern_sg_entries = 0; 8607 8608 if (lun_datalen < alloc_len) { 8609 ctsio->residual = alloc_len - lun_datalen; 8610 ctsio->kern_data_len = lun_datalen; 8611 ctsio->kern_total_len = lun_datalen; 8612 } else { 8613 ctsio->residual = 0; 8614 ctsio->kern_data_len = alloc_len; 8615 ctsio->kern_total_len = alloc_len; 8616 } 8617 ctsio->kern_data_resid = 0; 8618 ctsio->kern_rel_offset = 0; 8619 ctsio->kern_sg_entries = 0; 8620 8621 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8622 8623 /* 8624 * We set this to the actual data length, regardless of how much 8625 * space we actually have to return results. If the user looks at 8626 * this value, he'll know whether or not he allocated enough space 8627 * and reissue the command if necessary. We don't support well 8628 * known logical units, so if the user asks for that, return none. 8629 */ 8630 scsi_ulto4b(lun_datalen - 8, lun_data->length); 8631 8632 mtx_lock(&control_softc->ctl_lock); 8633 for (num_filled = 0, lun = STAILQ_FIRST(&control_softc->lun_list); 8634 (lun != NULL) && (num_filled < num_luns); 8635 lun = STAILQ_NEXT(lun, links)) { 8636 8637 if (lun->lun <= 0xff) { 8638 /* 8639 * Peripheral addressing method, bus number 0. 8640 */ 8641 lun_data->luns[num_filled].lundata[0] = 8642 RPL_LUNDATA_ATYP_PERIPH; 8643 lun_data->luns[num_filled].lundata[1] = lun->lun; 8644 num_filled++; 8645 } else if (lun->lun <= 0x3fff) { 8646 /* 8647 * Flat addressing method. 8648 */ 8649 lun_data->luns[num_filled].lundata[0] = 8650 RPL_LUNDATA_ATYP_FLAT | 8651 (lun->lun & RPL_LUNDATA_FLAT_LUN_MASK); 8652 #ifdef OLDCTLHEADERS 8653 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) | 8654 (lun->lun & SRLD_BUS_LUN_MASK); 8655 #endif 8656 lun_data->luns[num_filled].lundata[1] = 8657 #ifdef OLDCTLHEADERS 8658 lun->lun >> SRLD_BUS_LUN_BITS; 8659 #endif 8660 lun->lun >> RPL_LUNDATA_FLAT_LUN_BITS; 8661 num_filled++; 8662 } else { 8663 printf("ctl_report_luns: bogus LUN number %jd, " 8664 "skipping\n", (intmax_t)lun->lun); 8665 } 8666 /* 8667 * According to SPC-3, rev 14 section 6.21: 8668 * 8669 * "The execution of a REPORT LUNS command to any valid and 8670 * installed logical unit shall clear the REPORTED LUNS DATA 8671 * HAS CHANGED unit attention condition for all logical 8672 * units of that target with respect to the requesting 8673 * initiator. A valid and installed logical unit is one 8674 * having a PERIPHERAL QUALIFIER of 000b in the standard 8675 * INQUIRY data (see 6.4.2)." 8676 * 8677 * If request_lun is NULL, the LUN this report luns command 8678 * was issued to is either disabled or doesn't exist. In that 8679 * case, we shouldn't clear any pending lun change unit 8680 * attention. 8681 */ 8682 if (request_lun != NULL) 8683 lun->pending_sense[initidx].ua_pending &= 8684 ~CTL_UA_LUN_CHANGE; 8685 } 8686 mtx_unlock(&control_softc->ctl_lock); 8687 8688 /* 8689 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 8690 * this request. 8691 */ 8692 ctsio->scsi_status = SCSI_STATUS_OK; 8693 8694 ctsio->be_move_done = ctl_config_move_done; 8695 ctl_datamove((union ctl_io *)ctsio); 8696 8697 return (retval); 8698 } 8699 8700 int 8701 ctl_request_sense(struct ctl_scsiio *ctsio) 8702 { 8703 struct scsi_request_sense *cdb; 8704 struct scsi_sense_data *sense_ptr; 8705 struct ctl_lun *lun; 8706 uint32_t initidx; 8707 int have_error; 8708 scsi_sense_data_type sense_format; 8709 8710 cdb = (struct scsi_request_sense *)ctsio->cdb; 8711 8712 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8713 8714 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 8715 8716 /* 8717 * Determine which sense format the user wants. 8718 */ 8719 if (cdb->byte2 & SRS_DESC) 8720 sense_format = SSD_TYPE_DESC; 8721 else 8722 sense_format = SSD_TYPE_FIXED; 8723 8724 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 8725 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 8726 ctsio->kern_sg_entries = 0; 8727 8728 /* 8729 * struct scsi_sense_data, which is currently set to 256 bytes, is 8730 * larger than the largest allowed value for the length field in the 8731 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 8732 */ 8733 ctsio->residual = 0; 8734 ctsio->kern_data_len = cdb->length; 8735 ctsio->kern_total_len = cdb->length; 8736 8737 ctsio->kern_data_resid = 0; 8738 ctsio->kern_rel_offset = 0; 8739 ctsio->kern_sg_entries = 0; 8740 8741 /* 8742 * If we don't have a LUN, we don't have any pending sense. 8743 */ 8744 if (lun == NULL) 8745 goto no_sense; 8746 8747 have_error = 0; 8748 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8749 /* 8750 * Check for pending sense, and then for pending unit attentions. 8751 * Pending sense gets returned first, then pending unit attentions. 8752 */ 8753 mtx_lock(&lun->ctl_softc->ctl_lock); 8754 if (ctl_is_set(lun->have_ca, initidx)) { 8755 scsi_sense_data_type stored_format; 8756 8757 /* 8758 * Check to see which sense format was used for the stored 8759 * sense data. 8760 */ 8761 stored_format = scsi_sense_type( 8762 &lun->pending_sense[initidx].sense); 8763 8764 /* 8765 * If the user requested a different sense format than the 8766 * one we stored, then we need to convert it to the other 8767 * format. If we're going from descriptor to fixed format 8768 * sense data, we may lose things in translation, depending 8769 * on what options were used. 8770 * 8771 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 8772 * for some reason we'll just copy it out as-is. 8773 */ 8774 if ((stored_format == SSD_TYPE_FIXED) 8775 && (sense_format == SSD_TYPE_DESC)) 8776 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 8777 &lun->pending_sense[initidx].sense, 8778 (struct scsi_sense_data_desc *)sense_ptr); 8779 else if ((stored_format == SSD_TYPE_DESC) 8780 && (sense_format == SSD_TYPE_FIXED)) 8781 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 8782 &lun->pending_sense[initidx].sense, 8783 (struct scsi_sense_data_fixed *)sense_ptr); 8784 else 8785 memcpy(sense_ptr, &lun->pending_sense[initidx].sense, 8786 ctl_min(sizeof(*sense_ptr), 8787 sizeof(lun->pending_sense[initidx].sense))); 8788 8789 ctl_clear_mask(lun->have_ca, initidx); 8790 have_error = 1; 8791 } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) { 8792 ctl_ua_type ua_type; 8793 8794 ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending, 8795 sense_ptr, sense_format); 8796 if (ua_type != CTL_UA_NONE) { 8797 have_error = 1; 8798 /* We're reporting this UA, so clear it */ 8799 lun->pending_sense[initidx].ua_pending &= ~ua_type; 8800 } 8801 } 8802 mtx_unlock(&lun->ctl_softc->ctl_lock); 8803 8804 /* 8805 * We already have a pending error, return it. 8806 */ 8807 if (have_error != 0) { 8808 /* 8809 * We report the SCSI status as OK, since the status of the 8810 * request sense command itself is OK. 8811 */ 8812 ctsio->scsi_status = SCSI_STATUS_OK; 8813 8814 /* 8815 * We report 0 for the sense length, because we aren't doing 8816 * autosense in this case. We're reporting sense as 8817 * parameter data. 8818 */ 8819 ctsio->sense_len = 0; 8820 8821 ctsio->be_move_done = ctl_config_move_done; 8822 ctl_datamove((union ctl_io *)ctsio); 8823 8824 return (CTL_RETVAL_COMPLETE); 8825 } 8826 8827 no_sense: 8828 8829 /* 8830 * No sense information to report, so we report that everything is 8831 * okay. 8832 */ 8833 ctl_set_sense_data(sense_ptr, 8834 lun, 8835 sense_format, 8836 /*current_error*/ 1, 8837 /*sense_key*/ SSD_KEY_NO_SENSE, 8838 /*asc*/ 0x00, 8839 /*ascq*/ 0x00, 8840 SSD_ELEM_NONE); 8841 8842 ctsio->scsi_status = SCSI_STATUS_OK; 8843 8844 /* 8845 * We report 0 for the sense length, because we aren't doing 8846 * autosense in this case. We're reporting sense as parameter data. 8847 */ 8848 ctsio->sense_len = 0; 8849 ctsio->be_move_done = ctl_config_move_done; 8850 ctl_datamove((union ctl_io *)ctsio); 8851 8852 return (CTL_RETVAL_COMPLETE); 8853 } 8854 8855 int 8856 ctl_tur(struct ctl_scsiio *ctsio) 8857 { 8858 struct ctl_lun *lun; 8859 8860 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8861 8862 CTL_DEBUG_PRINT(("ctl_tur\n")); 8863 8864 if (lun == NULL) 8865 return (-EINVAL); 8866 8867 ctsio->scsi_status = SCSI_STATUS_OK; 8868 ctsio->io_hdr.status = CTL_SUCCESS; 8869 8870 ctl_done((union ctl_io *)ctsio); 8871 8872 return (CTL_RETVAL_COMPLETE); 8873 } 8874 8875 #ifdef notyet 8876 static int 8877 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 8878 { 8879 8880 } 8881 #endif 8882 8883 static int 8884 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 8885 { 8886 struct scsi_vpd_supported_pages *pages; 8887 int sup_page_size; 8888 struct ctl_lun *lun; 8889 8890 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8891 8892 sup_page_size = sizeof(struct scsi_vpd_supported_pages) + 8893 SCSI_EVPD_NUM_SUPPORTED_PAGES; 8894 /* 8895 * XXX KDM GFP_??? We probably don't want to wait here, 8896 * unless we end up having a process/thread context. 8897 */ 8898 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 8899 if (ctsio->kern_data_ptr == NULL) { 8900 ctsio->io_hdr.status = CTL_SCSI_ERROR; 8901 ctsio->scsi_status = SCSI_STATUS_BUSY; 8902 ctl_done((union ctl_io *)ctsio); 8903 return (CTL_RETVAL_COMPLETE); 8904 } 8905 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 8906 ctsio->kern_sg_entries = 0; 8907 8908 if (sup_page_size < alloc_len) { 8909 ctsio->residual = alloc_len - sup_page_size; 8910 ctsio->kern_data_len = sup_page_size; 8911 ctsio->kern_total_len = sup_page_size; 8912 } else { 8913 ctsio->residual = 0; 8914 ctsio->kern_data_len = alloc_len; 8915 ctsio->kern_total_len = alloc_len; 8916 } 8917 ctsio->kern_data_resid = 0; 8918 ctsio->kern_rel_offset = 0; 8919 ctsio->kern_sg_entries = 0; 8920 8921 /* 8922 * The control device is always connected. The disk device, on the 8923 * other hand, may not be online all the time. Need to change this 8924 * to figure out whether the disk device is actually online or not. 8925 */ 8926 if (lun != NULL) 8927 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 8928 lun->be_lun->lun_type; 8929 else 8930 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 8931 8932 pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES; 8933 /* Supported VPD pages */ 8934 pages->page_list[0] = SVPD_SUPPORTED_PAGES; 8935 /* Serial Number */ 8936 pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER; 8937 /* Device Identification */ 8938 pages->page_list[2] = SVPD_DEVICE_ID; 8939 8940 ctsio->scsi_status = SCSI_STATUS_OK; 8941 8942 ctsio->be_move_done = ctl_config_move_done; 8943 ctl_datamove((union ctl_io *)ctsio); 8944 8945 return (CTL_RETVAL_COMPLETE); 8946 } 8947 8948 static int 8949 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 8950 { 8951 struct scsi_vpd_unit_serial_number *sn_ptr; 8952 struct ctl_lun *lun; 8953 #ifndef CTL_USE_BACKEND_SN 8954 char tmpstr[32]; 8955 #endif 8956 8957 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8958 8959 /* XXX KDM which malloc flags here?? */ 8960 ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO); 8961 if (ctsio->kern_data_ptr == NULL) { 8962 ctsio->io_hdr.status = CTL_SCSI_ERROR; 8963 ctsio->scsi_status = SCSI_STATUS_BUSY; 8964 ctl_done((union ctl_io *)ctsio); 8965 return (CTL_RETVAL_COMPLETE); 8966 } 8967 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 8968 ctsio->kern_sg_entries = 0; 8969 8970 if (sizeof(*sn_ptr) < alloc_len) { 8971 ctsio->residual = alloc_len - sizeof(*sn_ptr); 8972 ctsio->kern_data_len = sizeof(*sn_ptr); 8973 ctsio->kern_total_len = sizeof(*sn_ptr); 8974 } else { 8975 ctsio->residual = 0; 8976 ctsio->kern_data_len = alloc_len; 8977 ctsio->kern_total_len = alloc_len; 8978 } 8979 ctsio->kern_data_resid = 0; 8980 ctsio->kern_rel_offset = 0; 8981 ctsio->kern_sg_entries = 0; 8982 8983 /* 8984 * The control device is always connected. The disk device, on the 8985 * other hand, may not be online all the time. Need to change this 8986 * to figure out whether the disk device is actually online or not. 8987 */ 8988 if (lun != NULL) 8989 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 8990 lun->be_lun->lun_type; 8991 else 8992 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 8993 8994 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 8995 sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN); 8996 #ifdef CTL_USE_BACKEND_SN 8997 /* 8998 * If we don't have a LUN, we just leave the serial number as 8999 * all spaces. 9000 */ 9001 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9002 if (lun != NULL) { 9003 strncpy((char *)sn_ptr->serial_num, 9004 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9005 } 9006 #else 9007 /* 9008 * Note that we're using a non-unique serial number here, 9009 */ 9010 snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000"); 9011 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9012 strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN, 9013 ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4))); 9014 #endif 9015 ctsio->scsi_status = SCSI_STATUS_OK; 9016 9017 ctsio->be_move_done = ctl_config_move_done; 9018 ctl_datamove((union ctl_io *)ctsio); 9019 9020 return (CTL_RETVAL_COMPLETE); 9021 } 9022 9023 9024 static int 9025 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9026 { 9027 struct scsi_vpd_device_id *devid_ptr; 9028 struct scsi_vpd_id_descriptor *desc, *desc1; 9029 struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */ 9030 struct scsi_vpd_id_t10 *t10id; 9031 struct ctl_softc *ctl_softc; 9032 struct ctl_lun *lun; 9033 struct ctl_frontend *fe; 9034 #ifndef CTL_USE_BACKEND_SN 9035 char tmpstr[32]; 9036 #endif /* CTL_USE_BACKEND_SN */ 9037 int devid_len; 9038 9039 ctl_softc = control_softc; 9040 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9041 9042 devid_len = sizeof(struct scsi_vpd_device_id) + 9043 sizeof(struct scsi_vpd_id_descriptor) + 9044 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN + 9045 sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN + 9046 sizeof(struct scsi_vpd_id_descriptor) + 9047 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9048 sizeof(struct scsi_vpd_id_descriptor) + 9049 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9050 9051 /* XXX KDM which malloc flags here ?? */ 9052 ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO); 9053 if (ctsio->kern_data_ptr == NULL) { 9054 ctsio->io_hdr.status = CTL_SCSI_ERROR; 9055 ctsio->scsi_status = SCSI_STATUS_BUSY; 9056 ctl_done((union ctl_io *)ctsio); 9057 return (CTL_RETVAL_COMPLETE); 9058 } 9059 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9060 ctsio->kern_sg_entries = 0; 9061 9062 if (devid_len < alloc_len) { 9063 ctsio->residual = alloc_len - devid_len; 9064 ctsio->kern_data_len = devid_len; 9065 ctsio->kern_total_len = devid_len; 9066 } else { 9067 ctsio->residual = 0; 9068 ctsio->kern_data_len = alloc_len; 9069 ctsio->kern_total_len = alloc_len; 9070 } 9071 ctsio->kern_data_resid = 0; 9072 ctsio->kern_rel_offset = 0; 9073 ctsio->kern_sg_entries = 0; 9074 9075 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9076 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 9077 desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9078 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN); 9079 desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] + 9080 CTL_WWPN_LEN); 9081 desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] + 9082 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9083 9084 /* 9085 * The control device is always connected. The disk device, on the 9086 * other hand, may not be online all the time. 9087 */ 9088 if (lun != NULL) 9089 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9090 lun->be_lun->lun_type; 9091 else 9092 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9093 9094 devid_ptr->page_code = SVPD_DEVICE_ID; 9095 9096 scsi_ulto2b(devid_len - 4, devid_ptr->length); 9097 9098 mtx_lock(&ctl_softc->ctl_lock); 9099 9100 fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9101 9102 /* 9103 * For Fibre channel, 9104 */ 9105 if (fe->port_type == CTL_PORT_FC) 9106 { 9107 desc->proto_codeset = (SCSI_PROTO_FC << 4) | 9108 SVPD_ID_CODESET_ASCII; 9109 desc1->proto_codeset = (SCSI_PROTO_FC << 4) | 9110 SVPD_ID_CODESET_BINARY; 9111 } 9112 else 9113 { 9114 desc->proto_codeset = (SCSI_PROTO_SPI << 4) | 9115 SVPD_ID_CODESET_ASCII; 9116 desc1->proto_codeset = (SCSI_PROTO_SPI << 4) | 9117 SVPD_ID_CODESET_BINARY; 9118 } 9119 desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset; 9120 mtx_unlock(&ctl_softc->ctl_lock); 9121 9122 /* 9123 * We're using a LUN association here. i.e., this device ID is a 9124 * per-LUN identifier. 9125 */ 9126 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 9127 desc->length = sizeof(*t10id) + CTL_DEVID_LEN; 9128 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 9129 9130 /* 9131 * desc1 is for the WWPN which is a port asscociation. 9132 */ 9133 desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; 9134 desc1->length = CTL_WWPN_LEN; 9135 /* XXX Call Reggie's get_WWNN func here then add port # to the end */ 9136 /* For testing just create the WWPN */ 9137 #if 0 9138 ddb_GetWWNN((char *)desc1->identifier); 9139 9140 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9141 /* This is so Copancontrol will return something sane */ 9142 if (ctsio->io_hdr.nexus.targ_port!=0 && 9143 ctsio->io_hdr.nexus.targ_port!=8) 9144 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1; 9145 else 9146 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port; 9147 #endif 9148 9149 be64enc(desc1->identifier, fe->wwpn); 9150 9151 /* 9152 * desc2 is for the Relative Target Port(type 4h) identifier 9153 */ 9154 desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9155 | SVPD_ID_TYPE_RELTARG; 9156 desc2->length = 4; 9157 //#if 0 9158 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9159 /* This is so Copancontrol will return something sane */ 9160 if (ctsio->io_hdr.nexus.targ_port!=0 && 9161 ctsio->io_hdr.nexus.targ_port!=8) 9162 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1; 9163 else 9164 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port; 9165 //#endif 9166 9167 /* 9168 * desc3 is for the Target Port Group(type 5h) identifier 9169 */ 9170 desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9171 | SVPD_ID_TYPE_TPORTGRP; 9172 desc3->length = 4; 9173 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single) 9174 desc3->identifier[3] = 1; 9175 else 9176 desc3->identifier[3] = 2; 9177 9178 #ifdef CTL_USE_BACKEND_SN 9179 /* 9180 * If we've actually got a backend, copy the device id from the 9181 * per-LUN data. Otherwise, set it to all spaces. 9182 */ 9183 if (lun != NULL) { 9184 /* 9185 * Copy the backend's LUN ID. 9186 */ 9187 strncpy((char *)t10id->vendor_spec_id, 9188 (char *)lun->be_lun->device_id, CTL_DEVID_LEN); 9189 } else { 9190 /* 9191 * No backend, set this to spaces. 9192 */ 9193 memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN); 9194 } 9195 #else 9196 snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d", 9197 (lun != NULL) ? (int)lun->lun : 0); 9198 strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN, 9199 sizeof(tmpstr))); 9200 #endif 9201 9202 ctsio->scsi_status = SCSI_STATUS_OK; 9203 9204 ctsio->be_move_done = ctl_config_move_done; 9205 ctl_datamove((union ctl_io *)ctsio); 9206 9207 return (CTL_RETVAL_COMPLETE); 9208 } 9209 9210 static int 9211 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9212 { 9213 struct scsi_inquiry *cdb; 9214 int alloc_len, retval; 9215 9216 cdb = (struct scsi_inquiry *)ctsio->cdb; 9217 9218 retval = CTL_RETVAL_COMPLETE; 9219 9220 alloc_len = scsi_2btoul(cdb->length); 9221 9222 switch (cdb->page_code) { 9223 case SVPD_SUPPORTED_PAGES: 9224 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9225 break; 9226 case SVPD_UNIT_SERIAL_NUMBER: 9227 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9228 break; 9229 case SVPD_DEVICE_ID: 9230 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9231 break; 9232 default: 9233 ctl_set_invalid_field(ctsio, 9234 /*sks_valid*/ 1, 9235 /*command*/ 1, 9236 /*field*/ 2, 9237 /*bit_valid*/ 0, 9238 /*bit*/ 0); 9239 ctl_done((union ctl_io *)ctsio); 9240 retval = CTL_RETVAL_COMPLETE; 9241 break; 9242 } 9243 9244 return (retval); 9245 } 9246 9247 static int 9248 ctl_inquiry_std(struct ctl_scsiio *ctsio) 9249 { 9250 struct scsi_inquiry_data *inq_ptr; 9251 struct scsi_inquiry *cdb; 9252 struct ctl_softc *ctl_softc; 9253 struct ctl_lun *lun; 9254 uint32_t alloc_len; 9255 int is_fc; 9256 9257 ctl_softc = control_softc; 9258 9259 /* 9260 * Figure out whether we're talking to a Fibre Channel port or not. 9261 * We treat the ioctl front end, and any SCSI adapters, as packetized 9262 * SCSI front ends. 9263 */ 9264 mtx_lock(&ctl_softc->ctl_lock); 9265 if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type != 9266 CTL_PORT_FC) 9267 is_fc = 0; 9268 else 9269 is_fc = 1; 9270 mtx_unlock(&ctl_softc->ctl_lock); 9271 9272 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9273 cdb = (struct scsi_inquiry *)ctsio->cdb; 9274 alloc_len = scsi_2btoul(cdb->length); 9275 9276 /* 9277 * We malloc the full inquiry data size here and fill it 9278 * in. If the user only asks for less, we'll give him 9279 * that much. 9280 */ 9281 /* XXX KDM what malloc flags should we use here?? */ 9282 ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO); 9283 if (ctsio->kern_data_ptr == NULL) { 9284 ctsio->io_hdr.status = CTL_SCSI_ERROR; 9285 ctsio->scsi_status = SCSI_STATUS_BUSY; 9286 ctl_done((union ctl_io *)ctsio); 9287 return (CTL_RETVAL_COMPLETE); 9288 } 9289 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9290 ctsio->kern_sg_entries = 0; 9291 ctsio->kern_data_resid = 0; 9292 ctsio->kern_rel_offset = 0; 9293 9294 if (sizeof(*inq_ptr) < alloc_len) { 9295 ctsio->residual = alloc_len - sizeof(*inq_ptr); 9296 ctsio->kern_data_len = sizeof(*inq_ptr); 9297 ctsio->kern_total_len = sizeof(*inq_ptr); 9298 } else { 9299 ctsio->residual = 0; 9300 ctsio->kern_data_len = alloc_len; 9301 ctsio->kern_total_len = alloc_len; 9302 } 9303 9304 /* 9305 * If we have a LUN configured, report it as connected. Otherwise, 9306 * report that it is offline or no device is supported, depending 9307 * on the value of inquiry_pq_no_lun. 9308 * 9309 * According to the spec (SPC-4 r34), the peripheral qualifier 9310 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9311 * 9312 * "A peripheral device having the specified peripheral device type 9313 * is not connected to this logical unit. However, the device 9314 * server is capable of supporting the specified peripheral device 9315 * type on this logical unit." 9316 * 9317 * According to the same spec, the peripheral qualifier 9318 * SID_QUAL_BAD_LU (011b) is used in this scenario: 9319 * 9320 * "The device server is not capable of supporting a peripheral 9321 * device on this logical unit. For this peripheral qualifier the 9322 * peripheral device type shall be set to 1Fh. All other peripheral 9323 * device type values are reserved for this peripheral qualifier." 9324 * 9325 * Given the text, it would seem that we probably want to report that 9326 * the LUN is offline here. There is no LUN connected, but we can 9327 * support a LUN at the given LUN number. 9328 * 9329 * In the real world, though, it sounds like things are a little 9330 * different: 9331 * 9332 * - Linux, when presented with a LUN with the offline peripheral 9333 * qualifier, will create an sg driver instance for it. So when 9334 * you attach it to CTL, you wind up with a ton of sg driver 9335 * instances. (One for every LUN that Linux bothered to probe.) 9336 * Linux does this despite the fact that it issues a REPORT LUNs 9337 * to LUN 0 to get the inventory of supported LUNs. 9338 * 9339 * - There is other anecdotal evidence (from Emulex folks) about 9340 * arrays that use the offline peripheral qualifier for LUNs that 9341 * are on the "passive" path in an active/passive array. 9342 * 9343 * So the solution is provide a hopefully reasonable default 9344 * (return bad/no LUN) and allow the user to change the behavior 9345 * with a tunable/sysctl variable. 9346 */ 9347 if (lun != NULL) 9348 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9349 lun->be_lun->lun_type; 9350 else if (ctl_softc->inquiry_pq_no_lun == 0) 9351 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9352 else 9353 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9354 9355 /* RMB in byte 2 is 0 */ 9356 inq_ptr->version = SCSI_REV_SPC3; 9357 9358 /* 9359 * According to SAM-3, even if a device only supports a single 9360 * level of LUN addressing, it should still set the HISUP bit: 9361 * 9362 * 4.9.1 Logical unit numbers overview 9363 * 9364 * All logical unit number formats described in this standard are 9365 * hierarchical in structure even when only a single level in that 9366 * hierarchy is used. The HISUP bit shall be set to one in the 9367 * standard INQUIRY data (see SPC-2) when any logical unit number 9368 * format described in this standard is used. Non-hierarchical 9369 * formats are outside the scope of this standard. 9370 * 9371 * Therefore we set the HiSup bit here. 9372 * 9373 * The reponse format is 2, per SPC-3. 9374 */ 9375 inq_ptr->response_format = SID_HiSup | 2; 9376 9377 inq_ptr->additional_length = sizeof(*inq_ptr) - 4; 9378 CTL_DEBUG_PRINT(("additional_length = %d\n", 9379 inq_ptr->additional_length)); 9380 9381 inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT; 9382 /* 16 bit addressing */ 9383 if (is_fc == 0) 9384 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 9385 /* XXX set the SID_MultiP bit here if we're actually going to 9386 respond on multiple ports */ 9387 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 9388 9389 /* 16 bit data bus, synchronous transfers */ 9390 /* XXX these flags don't apply for FC */ 9391 if (is_fc == 0) 9392 inq_ptr->flags = SID_WBus16 | SID_Sync; 9393 /* 9394 * XXX KDM do we want to support tagged queueing on the control 9395 * device at all? 9396 */ 9397 if ((lun == NULL) 9398 || (lun->be_lun->lun_type != T_PROCESSOR)) 9399 inq_ptr->flags |= SID_CmdQue; 9400 /* 9401 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 9402 * We have 8 bytes for the vendor name, and 16 bytes for the device 9403 * name and 4 bytes for the revision. 9404 */ 9405 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 9406 if (lun == NULL) { 9407 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 9408 } else { 9409 switch (lun->be_lun->lun_type) { 9410 case T_DIRECT: 9411 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 9412 break; 9413 case T_PROCESSOR: 9414 strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT); 9415 break; 9416 default: 9417 strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT); 9418 break; 9419 } 9420 } 9421 9422 /* 9423 * XXX make this a macro somewhere so it automatically gets 9424 * incremented when we make changes. 9425 */ 9426 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 9427 9428 /* 9429 * For parallel SCSI, we support double transition and single 9430 * transition clocking. We also support QAS (Quick Arbitration 9431 * and Selection) and Information Unit transfers on both the 9432 * control and array devices. 9433 */ 9434 if (is_fc == 0) 9435 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 9436 SID_SPI_IUS; 9437 9438 /* SAM-3 */ 9439 scsi_ulto2b(0x0060, inq_ptr->version1); 9440 /* SPC-3 (no version claimed) XXX should we claim a version? */ 9441 scsi_ulto2b(0x0300, inq_ptr->version2); 9442 if (is_fc) { 9443 /* FCP-2 ANSI INCITS.350:2003 */ 9444 scsi_ulto2b(0x0917, inq_ptr->version3); 9445 } else { 9446 /* SPI-4 ANSI INCITS.362:200x */ 9447 scsi_ulto2b(0x0B56, inq_ptr->version3); 9448 } 9449 9450 if (lun == NULL) { 9451 /* SBC-2 (no version claimed) XXX should we claim a version? */ 9452 scsi_ulto2b(0x0320, inq_ptr->version4); 9453 } else { 9454 switch (lun->be_lun->lun_type) { 9455 case T_DIRECT: 9456 /* 9457 * SBC-2 (no version claimed) XXX should we claim a 9458 * version? 9459 */ 9460 scsi_ulto2b(0x0320, inq_ptr->version4); 9461 break; 9462 case T_PROCESSOR: 9463 default: 9464 break; 9465 } 9466 } 9467 9468 ctsio->scsi_status = SCSI_STATUS_OK; 9469 if (ctsio->kern_data_len > 0) { 9470 ctsio->be_move_done = ctl_config_move_done; 9471 ctl_datamove((union ctl_io *)ctsio); 9472 } else { 9473 ctsio->io_hdr.status = CTL_SUCCESS; 9474 ctl_done((union ctl_io *)ctsio); 9475 } 9476 9477 return (CTL_RETVAL_COMPLETE); 9478 } 9479 9480 int 9481 ctl_inquiry(struct ctl_scsiio *ctsio) 9482 { 9483 struct scsi_inquiry *cdb; 9484 int retval; 9485 9486 cdb = (struct scsi_inquiry *)ctsio->cdb; 9487 9488 retval = 0; 9489 9490 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 9491 9492 /* 9493 * Right now, we don't support the CmdDt inquiry information. 9494 * This would be nice to support in the future. When we do 9495 * support it, we should change this test so that it checks to make 9496 * sure SI_EVPD and SI_CMDDT aren't both set at the same time. 9497 */ 9498 #ifdef notyet 9499 if (((cdb->byte2 & SI_EVPD) 9500 && (cdb->byte2 & SI_CMDDT))) 9501 #endif 9502 if (cdb->byte2 & SI_CMDDT) { 9503 /* 9504 * Point to the SI_CMDDT bit. We might change this 9505 * when we support SI_CMDDT, but since both bits would be 9506 * "wrong", this should probably just stay as-is then. 9507 */ 9508 ctl_set_invalid_field(ctsio, 9509 /*sks_valid*/ 1, 9510 /*command*/ 1, 9511 /*field*/ 1, 9512 /*bit_valid*/ 1, 9513 /*bit*/ 1); 9514 ctl_done((union ctl_io *)ctsio); 9515 return (CTL_RETVAL_COMPLETE); 9516 } 9517 if (cdb->byte2 & SI_EVPD) 9518 retval = ctl_inquiry_evpd(ctsio); 9519 #ifdef notyet 9520 else if (cdb->byte2 & SI_CMDDT) 9521 retval = ctl_inquiry_cmddt(ctsio); 9522 #endif 9523 else 9524 retval = ctl_inquiry_std(ctsio); 9525 9526 return (retval); 9527 } 9528 9529 /* 9530 * For known CDB types, parse the LBA and length. 9531 */ 9532 static int 9533 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len) 9534 { 9535 if (io->io_hdr.io_type != CTL_IO_SCSI) 9536 return (1); 9537 9538 switch (io->scsiio.cdb[0]) { 9539 case READ_6: 9540 case WRITE_6: { 9541 struct scsi_rw_6 *cdb; 9542 9543 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 9544 9545 *lba = scsi_3btoul(cdb->addr); 9546 /* only 5 bits are valid in the most significant address byte */ 9547 *lba &= 0x1fffff; 9548 *len = cdb->length; 9549 break; 9550 } 9551 case READ_10: 9552 case WRITE_10: { 9553 struct scsi_rw_10 *cdb; 9554 9555 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 9556 9557 *lba = scsi_4btoul(cdb->addr); 9558 *len = scsi_2btoul(cdb->length); 9559 break; 9560 } 9561 case WRITE_VERIFY_10: { 9562 struct scsi_write_verify_10 *cdb; 9563 9564 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 9565 9566 *lba = scsi_4btoul(cdb->addr); 9567 *len = scsi_2btoul(cdb->length); 9568 break; 9569 } 9570 case READ_12: 9571 case WRITE_12: { 9572 struct scsi_rw_12 *cdb; 9573 9574 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 9575 9576 *lba = scsi_4btoul(cdb->addr); 9577 *len = scsi_4btoul(cdb->length); 9578 break; 9579 } 9580 case WRITE_VERIFY_12: { 9581 struct scsi_write_verify_12 *cdb; 9582 9583 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 9584 9585 *lba = scsi_4btoul(cdb->addr); 9586 *len = scsi_4btoul(cdb->length); 9587 break; 9588 } 9589 case READ_16: 9590 case WRITE_16: { 9591 struct scsi_rw_16 *cdb; 9592 9593 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 9594 9595 *lba = scsi_8btou64(cdb->addr); 9596 *len = scsi_4btoul(cdb->length); 9597 break; 9598 } 9599 case WRITE_VERIFY_16: { 9600 struct scsi_write_verify_16 *cdb; 9601 9602 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 9603 9604 9605 *lba = scsi_8btou64(cdb->addr); 9606 *len = scsi_4btoul(cdb->length); 9607 break; 9608 } 9609 default: 9610 return (1); 9611 break; /* NOTREACHED */ 9612 } 9613 9614 return (0); 9615 } 9616 9617 static ctl_action 9618 ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2) 9619 { 9620 uint64_t endlba1, endlba2; 9621 9622 endlba1 = lba1 + len1 - 1; 9623 endlba2 = lba2 + len2 - 1; 9624 9625 if ((endlba1 < lba2) 9626 || (endlba2 < lba1)) 9627 return (CTL_ACTION_PASS); 9628 else 9629 return (CTL_ACTION_BLOCK); 9630 } 9631 9632 static ctl_action 9633 ctl_extent_check(union ctl_io *io1, union ctl_io *io2) 9634 { 9635 uint64_t lba1, lba2; 9636 uint32_t len1, len2; 9637 int retval; 9638 9639 retval = ctl_get_lba_len(io1, &lba1, &len1); 9640 if (retval != 0) 9641 return (CTL_ACTION_ERROR); 9642 9643 retval = ctl_get_lba_len(io2, &lba2, &len2); 9644 if (retval != 0) 9645 return (CTL_ACTION_ERROR); 9646 9647 return (ctl_extent_check_lba(lba1, len1, lba2, len2)); 9648 } 9649 9650 static ctl_action 9651 ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io) 9652 { 9653 struct ctl_cmd_entry *pending_entry, *ooa_entry; 9654 ctl_serialize_action *serialize_row; 9655 9656 /* 9657 * The initiator attempted multiple untagged commands at the same 9658 * time. Can't do that. 9659 */ 9660 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 9661 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 9662 && ((pending_io->io_hdr.nexus.targ_port == 9663 ooa_io->io_hdr.nexus.targ_port) 9664 && (pending_io->io_hdr.nexus.initid.id == 9665 ooa_io->io_hdr.nexus.initid.id)) 9666 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 9667 return (CTL_ACTION_OVERLAP); 9668 9669 /* 9670 * The initiator attempted to send multiple tagged commands with 9671 * the same ID. (It's fine if different initiators have the same 9672 * tag ID.) 9673 * 9674 * Even if all of those conditions are true, we don't kill the I/O 9675 * if the command ahead of us has been aborted. We won't end up 9676 * sending it to the FETD, and it's perfectly legal to resend a 9677 * command with the same tag number as long as the previous 9678 * instance of this tag number has been aborted somehow. 9679 */ 9680 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 9681 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 9682 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 9683 && ((pending_io->io_hdr.nexus.targ_port == 9684 ooa_io->io_hdr.nexus.targ_port) 9685 && (pending_io->io_hdr.nexus.initid.id == 9686 ooa_io->io_hdr.nexus.initid.id)) 9687 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 9688 return (CTL_ACTION_OVERLAP_TAG); 9689 9690 /* 9691 * If we get a head of queue tag, SAM-3 says that we should 9692 * immediately execute it. 9693 * 9694 * What happens if this command would normally block for some other 9695 * reason? e.g. a request sense with a head of queue tag 9696 * immediately after a write. Normally that would block, but this 9697 * will result in its getting executed immediately... 9698 * 9699 * We currently return "pass" instead of "skip", so we'll end up 9700 * going through the rest of the queue to check for overlapped tags. 9701 * 9702 * XXX KDM check for other types of blockage first?? 9703 */ 9704 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 9705 return (CTL_ACTION_PASS); 9706 9707 /* 9708 * Ordered tags have to block until all items ahead of them 9709 * have completed. If we get called with an ordered tag, we always 9710 * block, if something else is ahead of us in the queue. 9711 */ 9712 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 9713 return (CTL_ACTION_BLOCK); 9714 9715 /* 9716 * Simple tags get blocked until all head of queue and ordered tags 9717 * ahead of them have completed. I'm lumping untagged commands in 9718 * with simple tags here. XXX KDM is that the right thing to do? 9719 */ 9720 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 9721 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 9722 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 9723 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 9724 return (CTL_ACTION_BLOCK); 9725 9726 pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]]; 9727 ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]]; 9728 9729 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 9730 9731 switch (serialize_row[pending_entry->seridx]) { 9732 case CTL_SER_BLOCK: 9733 return (CTL_ACTION_BLOCK); 9734 break; /* NOTREACHED */ 9735 case CTL_SER_EXTENT: 9736 return (ctl_extent_check(pending_io, ooa_io)); 9737 break; /* NOTREACHED */ 9738 case CTL_SER_PASS: 9739 return (CTL_ACTION_PASS); 9740 break; /* NOTREACHED */ 9741 case CTL_SER_SKIP: 9742 return (CTL_ACTION_SKIP); 9743 break; 9744 default: 9745 panic("invalid serialization value %d", 9746 serialize_row[pending_entry->seridx]); 9747 break; /* NOTREACHED */ 9748 } 9749 9750 return (CTL_ACTION_ERROR); 9751 } 9752 9753 /* 9754 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 9755 * Assumptions: 9756 * - caller holds ctl_lock 9757 * - pending_io is generally either incoming, or on the blocked queue 9758 * - starting I/O is the I/O we want to start the check with. 9759 */ 9760 static ctl_action 9761 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 9762 union ctl_io *starting_io) 9763 { 9764 union ctl_io *ooa_io; 9765 ctl_action action; 9766 9767 /* 9768 * Run back along the OOA queue, starting with the current 9769 * blocked I/O and going through every I/O before it on the 9770 * queue. If starting_io is NULL, we'll just end up returning 9771 * CTL_ACTION_PASS. 9772 */ 9773 for (ooa_io = starting_io; ooa_io != NULL; 9774 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 9775 ooa_links)){ 9776 9777 /* 9778 * This routine just checks to see whether 9779 * cur_blocked is blocked by ooa_io, which is ahead 9780 * of it in the queue. It doesn't queue/dequeue 9781 * cur_blocked. 9782 */ 9783 action = ctl_check_for_blockage(pending_io, ooa_io); 9784 switch (action) { 9785 case CTL_ACTION_BLOCK: 9786 case CTL_ACTION_OVERLAP: 9787 case CTL_ACTION_OVERLAP_TAG: 9788 case CTL_ACTION_SKIP: 9789 case CTL_ACTION_ERROR: 9790 return (action); 9791 break; /* NOTREACHED */ 9792 case CTL_ACTION_PASS: 9793 break; 9794 default: 9795 panic("invalid action %d", action); 9796 break; /* NOTREACHED */ 9797 } 9798 } 9799 9800 return (CTL_ACTION_PASS); 9801 } 9802 9803 /* 9804 * Assumptions: 9805 * - An I/O has just completed, and has been removed from the per-LUN OOA 9806 * queue, so some items on the blocked queue may now be unblocked. 9807 * - The caller holds ctl_softc->ctl_lock 9808 */ 9809 static int 9810 ctl_check_blocked(struct ctl_lun *lun) 9811 { 9812 union ctl_io *cur_blocked, *next_blocked; 9813 9814 /* 9815 * Run forward from the head of the blocked queue, checking each 9816 * entry against the I/Os prior to it on the OOA queue to see if 9817 * there is still any blockage. 9818 * 9819 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 9820 * with our removing a variable on it while it is traversing the 9821 * list. 9822 */ 9823 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 9824 cur_blocked != NULL; cur_blocked = next_blocked) { 9825 union ctl_io *prev_ooa; 9826 ctl_action action; 9827 9828 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 9829 blocked_links); 9830 9831 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 9832 ctl_ooaq, ooa_links); 9833 9834 /* 9835 * If cur_blocked happens to be the first item in the OOA 9836 * queue now, prev_ooa will be NULL, and the action 9837 * returned will just be CTL_ACTION_PASS. 9838 */ 9839 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 9840 9841 switch (action) { 9842 case CTL_ACTION_BLOCK: 9843 /* Nothing to do here, still blocked */ 9844 break; 9845 case CTL_ACTION_OVERLAP: 9846 case CTL_ACTION_OVERLAP_TAG: 9847 /* 9848 * This shouldn't happen! In theory we've already 9849 * checked this command for overlap... 9850 */ 9851 break; 9852 case CTL_ACTION_PASS: 9853 case CTL_ACTION_SKIP: { 9854 struct ctl_softc *softc; 9855 struct ctl_cmd_entry *entry; 9856 uint32_t initidx; 9857 uint8_t opcode; 9858 int isc_retval; 9859 9860 /* 9861 * The skip case shouldn't happen, this transaction 9862 * should have never made it onto the blocked queue. 9863 */ 9864 /* 9865 * This I/O is no longer blocked, we can remove it 9866 * from the blocked queue. Since this is a TAILQ 9867 * (doubly linked list), we can do O(1) removals 9868 * from any place on the list. 9869 */ 9870 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 9871 blocked_links); 9872 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 9873 9874 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 9875 /* 9876 * Need to send IO back to original side to 9877 * run 9878 */ 9879 union ctl_ha_msg msg_info; 9880 9881 msg_info.hdr.original_sc = 9882 cur_blocked->io_hdr.original_sc; 9883 msg_info.hdr.serializing_sc = cur_blocked; 9884 msg_info.hdr.msg_type = CTL_MSG_R2R; 9885 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 9886 &msg_info, sizeof(msg_info), 0)) > 9887 CTL_HA_STATUS_SUCCESS) { 9888 printf("CTL:Check Blocked error from " 9889 "ctl_ha_msg_send %d\n", 9890 isc_retval); 9891 } 9892 break; 9893 } 9894 opcode = cur_blocked->scsiio.cdb[0]; 9895 entry = &ctl_cmd_table[opcode]; 9896 softc = control_softc; 9897 9898 initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus); 9899 9900 /* 9901 * Check this I/O for LUN state changes that may 9902 * have happened while this command was blocked. 9903 * The LUN state may have been changed by a command 9904 * ahead of us in the queue, so we need to re-check 9905 * for any states that can be caused by SCSI 9906 * commands. 9907 */ 9908 if (ctl_scsiio_lun_check(softc, lun, entry, 9909 &cur_blocked->scsiio) == 0) { 9910 cur_blocked->io_hdr.flags |= 9911 CTL_FLAG_IS_WAS_ON_RTR; 9912 STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue, 9913 &cur_blocked->io_hdr, links); 9914 /* 9915 * In the non CTL_DONE_THREAD case, we need 9916 * to wake up the work thread here. When 9917 * we're processing completed requests from 9918 * the work thread context, we'll pop back 9919 * around and end up pulling things off the 9920 * RtR queue. When we aren't processing 9921 * things from the work thread context, 9922 * though, we won't ever check the RtR queue. 9923 * So we need to wake up the thread to clear 9924 * things off the queue. Otherwise this 9925 * transaction will just sit on the RtR queue 9926 * until a new I/O comes in. (Which may or 9927 * may not happen...) 9928 */ 9929 #ifndef CTL_DONE_THREAD 9930 ctl_wakeup_thread(); 9931 #endif 9932 } else 9933 ctl_done_lock(cur_blocked, /*have_lock*/ 1); 9934 break; 9935 } 9936 default: 9937 /* 9938 * This probably shouldn't happen -- we shouldn't 9939 * get CTL_ACTION_ERROR, or anything else. 9940 */ 9941 break; 9942 } 9943 } 9944 9945 return (CTL_RETVAL_COMPLETE); 9946 } 9947 9948 /* 9949 * This routine (with one exception) checks LUN flags that can be set by 9950 * commands ahead of us in the OOA queue. These flags have to be checked 9951 * when a command initially comes in, and when we pull a command off the 9952 * blocked queue and are preparing to execute it. The reason we have to 9953 * check these flags for commands on the blocked queue is that the LUN 9954 * state may have been changed by a command ahead of us while we're on the 9955 * blocked queue. 9956 * 9957 * Ordering is somewhat important with these checks, so please pay 9958 * careful attention to the placement of any new checks. 9959 */ 9960 static int 9961 ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 9962 struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 9963 { 9964 int retval; 9965 9966 retval = 0; 9967 9968 /* 9969 * If this shelf is a secondary shelf controller, we have to reject 9970 * any media access commands. 9971 */ 9972 #if 0 9973 /* No longer needed for HA */ 9974 if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0) 9975 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) { 9976 ctl_set_lun_standby(ctsio); 9977 retval = 1; 9978 goto bailout; 9979 } 9980 #endif 9981 9982 /* 9983 * Check for a reservation conflict. If this command isn't allowed 9984 * even on reserved LUNs, and if this initiator isn't the one who 9985 * reserved us, reject the command with a reservation conflict. 9986 */ 9987 if ((lun->flags & CTL_LUN_RESERVED) 9988 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 9989 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 9990 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 9991 || (ctsio->io_hdr.nexus.targ_target.id != 9992 lun->rsv_nexus.targ_target.id)) { 9993 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 9994 ctsio->io_hdr.status = CTL_SCSI_ERROR; 9995 retval = 1; 9996 goto bailout; 9997 } 9998 } 9999 10000 if ( (lun->flags & CTL_LUN_PR_RESERVED) 10001 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) { 10002 uint32_t residx; 10003 10004 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 10005 /* 10006 * if we aren't registered or it's a res holder type 10007 * reservation and this isn't the res holder then set a 10008 * conflict. 10009 * NOTE: Commands which might be allowed on write exclusive 10010 * type reservations are checked in the particular command 10011 * for a conflict. Read and SSU are the only ones. 10012 */ 10013 if (!lun->per_res[residx].registered 10014 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10015 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 10016 ctsio->io_hdr.status = CTL_SCSI_ERROR; 10017 retval = 1; 10018 goto bailout; 10019 } 10020 10021 } 10022 10023 if ((lun->flags & CTL_LUN_OFFLINE) 10024 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 10025 ctl_set_lun_not_ready(ctsio); 10026 retval = 1; 10027 goto bailout; 10028 } 10029 10030 /* 10031 * If the LUN is stopped, see if this particular command is allowed 10032 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10033 */ 10034 if ((lun->flags & CTL_LUN_STOPPED) 10035 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10036 /* "Logical unit not ready, initializing cmd. required" */ 10037 ctl_set_lun_stopped(ctsio); 10038 retval = 1; 10039 goto bailout; 10040 } 10041 10042 if ((lun->flags & CTL_LUN_INOPERABLE) 10043 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10044 /* "Medium format corrupted" */ 10045 ctl_set_medium_format_corrupted(ctsio); 10046 retval = 1; 10047 goto bailout; 10048 } 10049 10050 bailout: 10051 return (retval); 10052 10053 } 10054 10055 static void 10056 ctl_failover_io(union ctl_io *io, int have_lock) 10057 { 10058 ctl_set_busy(&io->scsiio); 10059 ctl_done_lock(io, have_lock); 10060 } 10061 10062 static void 10063 ctl_failover(void) 10064 { 10065 struct ctl_lun *lun; 10066 struct ctl_softc *ctl_softc; 10067 union ctl_io *next_io, *pending_io; 10068 union ctl_io *io; 10069 int lun_idx; 10070 int i; 10071 10072 ctl_softc = control_softc; 10073 10074 mtx_lock(&ctl_softc->ctl_lock); 10075 /* 10076 * Remove any cmds from the other SC from the rtr queue. These 10077 * will obviously only be for LUNs for which we're the primary. 10078 * We can't send status or get/send data for these commands. 10079 * Since they haven't been executed yet, we can just remove them. 10080 * We'll either abort them or delete them below, depending on 10081 * which HA mode we're in. 10082 */ 10083 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue); 10084 io != NULL; io = next_io) { 10085 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10086 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10087 STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr, 10088 ctl_io_hdr, links); 10089 } 10090 10091 for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) { 10092 lun = ctl_softc->ctl_luns[lun_idx]; 10093 if (lun==NULL) 10094 continue; 10095 10096 /* 10097 * Processor LUNs are primary on both sides. 10098 * XXX will this always be true? 10099 */ 10100 if (lun->be_lun->lun_type == T_PROCESSOR) 10101 continue; 10102 10103 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10104 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10105 printf("FAILOVER: primary lun %d\n", lun_idx); 10106 /* 10107 * Remove all commands from the other SC. First from the 10108 * blocked queue then from the ooa queue. Once we have 10109 * removed them. Call ctl_check_blocked to see if there 10110 * is anything that can run. 10111 */ 10112 for (io = (union ctl_io *)TAILQ_FIRST( 10113 &lun->blocked_queue); io != NULL; io = next_io) { 10114 10115 next_io = (union ctl_io *)TAILQ_NEXT( 10116 &io->io_hdr, blocked_links); 10117 10118 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10119 TAILQ_REMOVE(&lun->blocked_queue, 10120 &io->io_hdr,blocked_links); 10121 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10122 TAILQ_REMOVE(&lun->ooa_queue, 10123 &io->io_hdr, ooa_links); 10124 10125 ctl_free_io_internal(io, 1); 10126 } 10127 } 10128 10129 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10130 io != NULL; io = next_io) { 10131 10132 next_io = (union ctl_io *)TAILQ_NEXT( 10133 &io->io_hdr, ooa_links); 10134 10135 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10136 10137 TAILQ_REMOVE(&lun->ooa_queue, 10138 &io->io_hdr, 10139 ooa_links); 10140 10141 ctl_free_io_internal(io, 1); 10142 } 10143 } 10144 ctl_check_blocked(lun); 10145 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10146 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10147 10148 printf("FAILOVER: primary lun %d\n", lun_idx); 10149 /* 10150 * Abort all commands from the other SC. We can't 10151 * send status back for them now. These should get 10152 * cleaned up when they are completed or come out 10153 * for a datamove operation. 10154 */ 10155 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10156 io != NULL; io = next_io) { 10157 next_io = (union ctl_io *)TAILQ_NEXT( 10158 &io->io_hdr, ooa_links); 10159 10160 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10161 io->io_hdr.flags |= CTL_FLAG_ABORT; 10162 } 10163 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10164 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10165 10166 printf("FAILOVER: secondary lun %d\n", lun_idx); 10167 10168 lun->flags |= CTL_LUN_PRIMARY_SC; 10169 10170 /* 10171 * We send all I/O that was sent to this controller 10172 * and redirected to the other side back with 10173 * busy status, and have the initiator retry it. 10174 * Figuring out how much data has been transferred, 10175 * etc. and picking up where we left off would be 10176 * very tricky. 10177 * 10178 * XXX KDM need to remove I/O from the blocked 10179 * queue as well! 10180 */ 10181 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10182 &lun->ooa_queue); pending_io != NULL; 10183 pending_io = next_io) { 10184 10185 next_io = (union ctl_io *)TAILQ_NEXT( 10186 &pending_io->io_hdr, ooa_links); 10187 10188 pending_io->io_hdr.flags &= 10189 ~CTL_FLAG_SENT_2OTHER_SC; 10190 10191 if (pending_io->io_hdr.flags & 10192 CTL_FLAG_IO_ACTIVE) { 10193 pending_io->io_hdr.flags |= 10194 CTL_FLAG_FAILOVER; 10195 } else { 10196 ctl_set_busy(&pending_io->scsiio); 10197 ctl_done_lock(pending_io, 10198 /*have_lock*/1); 10199 } 10200 } 10201 10202 /* 10203 * Build Unit Attention 10204 */ 10205 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10206 lun->pending_sense[i].ua_pending |= 10207 CTL_UA_ASYM_ACC_CHANGE; 10208 } 10209 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10210 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10211 printf("FAILOVER: secondary lun %d\n", lun_idx); 10212 /* 10213 * if the first io on the OOA is not on the RtR queue 10214 * add it. 10215 */ 10216 lun->flags |= CTL_LUN_PRIMARY_SC; 10217 10218 pending_io = (union ctl_io *)TAILQ_FIRST( 10219 &lun->ooa_queue); 10220 if (pending_io==NULL) { 10221 printf("Nothing on OOA queue\n"); 10222 continue; 10223 } 10224 10225 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10226 if ((pending_io->io_hdr.flags & 10227 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 10228 pending_io->io_hdr.flags |= 10229 CTL_FLAG_IS_WAS_ON_RTR; 10230 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, 10231 &pending_io->io_hdr, links); 10232 } 10233 #if 0 10234 else 10235 { 10236 printf("Tag 0x%04x is running\n", 10237 pending_io->scsiio.tag_num); 10238 } 10239 #endif 10240 10241 next_io = (union ctl_io *)TAILQ_NEXT( 10242 &pending_io->io_hdr, ooa_links); 10243 for (pending_io=next_io; pending_io != NULL; 10244 pending_io = next_io) { 10245 pending_io->io_hdr.flags &= 10246 ~CTL_FLAG_SENT_2OTHER_SC; 10247 next_io = (union ctl_io *)TAILQ_NEXT( 10248 &pending_io->io_hdr, ooa_links); 10249 if (pending_io->io_hdr.flags & 10250 CTL_FLAG_IS_WAS_ON_RTR) { 10251 #if 0 10252 printf("Tag 0x%04x is running\n", 10253 pending_io->scsiio.tag_num); 10254 #endif 10255 continue; 10256 } 10257 10258 switch (ctl_check_ooa(lun, pending_io, 10259 (union ctl_io *)TAILQ_PREV( 10260 &pending_io->io_hdr, ctl_ooaq, 10261 ooa_links))) { 10262 10263 case CTL_ACTION_BLOCK: 10264 TAILQ_INSERT_TAIL(&lun->blocked_queue, 10265 &pending_io->io_hdr, 10266 blocked_links); 10267 pending_io->io_hdr.flags |= 10268 CTL_FLAG_BLOCKED; 10269 break; 10270 case CTL_ACTION_PASS: 10271 case CTL_ACTION_SKIP: 10272 pending_io->io_hdr.flags |= 10273 CTL_FLAG_IS_WAS_ON_RTR; 10274 STAILQ_INSERT_TAIL( 10275 &ctl_softc->rtr_queue, 10276 &pending_io->io_hdr, links); 10277 break; 10278 case CTL_ACTION_OVERLAP: 10279 ctl_set_overlapped_cmd( 10280 (struct ctl_scsiio *)pending_io); 10281 ctl_done_lock(pending_io, 10282 /*have_lock*/ 1); 10283 break; 10284 case CTL_ACTION_OVERLAP_TAG: 10285 ctl_set_overlapped_tag( 10286 (struct ctl_scsiio *)pending_io, 10287 pending_io->scsiio.tag_num & 0xff); 10288 ctl_done_lock(pending_io, 10289 /*have_lock*/ 1); 10290 break; 10291 case CTL_ACTION_ERROR: 10292 default: 10293 ctl_set_internal_failure( 10294 (struct ctl_scsiio *)pending_io, 10295 0, // sks_valid 10296 0); //retry count 10297 ctl_done_lock(pending_io, 10298 /*have_lock*/ 1); 10299 break; 10300 } 10301 } 10302 10303 /* 10304 * Build Unit Attention 10305 */ 10306 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10307 lun->pending_sense[i].ua_pending |= 10308 CTL_UA_ASYM_ACC_CHANGE; 10309 } 10310 } else { 10311 panic("Unhandled HA mode failover, LUN flags = %#x, " 10312 "ha_mode = #%x", lun->flags, ctl_softc->ha_mode); 10313 } 10314 } 10315 ctl_pause_rtr = 0; 10316 mtx_unlock(&ctl_softc->ctl_lock); 10317 } 10318 10319 static int 10320 ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio) 10321 { 10322 struct ctl_lun *lun; 10323 struct ctl_cmd_entry *entry; 10324 uint8_t opcode; 10325 uint32_t initidx; 10326 int retval; 10327 10328 retval = 0; 10329 10330 lun = NULL; 10331 10332 opcode = ctsio->cdb[0]; 10333 10334 mtx_lock(&ctl_softc->ctl_lock); 10335 10336 if ((ctsio->io_hdr.nexus.targ_lun < CTL_MAX_LUNS) 10337 && (ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun] != NULL)) { 10338 lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun]; 10339 /* 10340 * If the LUN is invalid, pretend that it doesn't exist. 10341 * It will go away as soon as all pending I/O has been 10342 * completed. 10343 */ 10344 if (lun->flags & CTL_LUN_DISABLED) { 10345 lun = NULL; 10346 } else { 10347 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 10348 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 10349 lun->be_lun; 10350 if (lun->be_lun->lun_type == T_PROCESSOR) { 10351 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 10352 } 10353 } 10354 } else { 10355 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 10356 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 10357 } 10358 10359 entry = &ctl_cmd_table[opcode]; 10360 10361 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 10362 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 10363 10364 /* 10365 * Check to see whether we can send this command to LUNs that don't 10366 * exist. This should pretty much only be the case for inquiry 10367 * and request sense. Further checks, below, really require having 10368 * a LUN, so we can't really check the command anymore. Just put 10369 * it on the rtr queue. 10370 */ 10371 if (lun == NULL) { 10372 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 10373 goto queue_rtr; 10374 10375 ctl_set_unsupported_lun(ctsio); 10376 mtx_unlock(&ctl_softc->ctl_lock); 10377 ctl_done((union ctl_io *)ctsio); 10378 goto bailout; 10379 } else { 10380 /* 10381 * Every I/O goes into the OOA queue for a particular LUN, and 10382 * stays there until completion. 10383 */ 10384 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 10385 10386 /* 10387 * Make sure we support this particular command on this LUN. 10388 * e.g., we don't support writes to the control LUN. 10389 */ 10390 switch (lun->be_lun->lun_type) { 10391 case T_PROCESSOR: 10392 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 10393 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 10394 == 0)) { 10395 ctl_set_invalid_opcode(ctsio); 10396 mtx_unlock(&ctl_softc->ctl_lock); 10397 ctl_done((union ctl_io *)ctsio); 10398 goto bailout; 10399 } 10400 break; 10401 case T_DIRECT: 10402 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) 10403 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 10404 == 0)){ 10405 ctl_set_invalid_opcode(ctsio); 10406 mtx_unlock(&ctl_softc->ctl_lock); 10407 ctl_done((union ctl_io *)ctsio); 10408 goto bailout; 10409 } 10410 break; 10411 default: 10412 printf("Unsupported CTL LUN type %d\n", 10413 lun->be_lun->lun_type); 10414 panic("Unsupported CTL LUN type %d\n", 10415 lun->be_lun->lun_type); 10416 break; /* NOTREACHED */ 10417 } 10418 } 10419 10420 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 10421 10422 /* 10423 * If we've got a request sense, it'll clear the contingent 10424 * allegiance condition. Otherwise, if we have a CA condition for 10425 * this initiator, clear it, because it sent down a command other 10426 * than request sense. 10427 */ 10428 if ((opcode != REQUEST_SENSE) 10429 && (ctl_is_set(lun->have_ca, initidx))) 10430 ctl_clear_mask(lun->have_ca, initidx); 10431 10432 /* 10433 * If the command has this flag set, it handles its own unit 10434 * attention reporting, we shouldn't do anything. Otherwise we 10435 * check for any pending unit attentions, and send them back to the 10436 * initiator. We only do this when a command initially comes in, 10437 * not when we pull it off the blocked queue. 10438 * 10439 * According to SAM-3, section 5.3.2, the order that things get 10440 * presented back to the host is basically unit attentions caused 10441 * by some sort of reset event, busy status, reservation conflicts 10442 * or task set full, and finally any other status. 10443 * 10444 * One issue here is that some of the unit attentions we report 10445 * don't fall into the "reset" category (e.g. "reported luns data 10446 * has changed"). So reporting it here, before the reservation 10447 * check, may be technically wrong. I guess the only thing to do 10448 * would be to check for and report the reset events here, and then 10449 * check for the other unit attention types after we check for a 10450 * reservation conflict. 10451 * 10452 * XXX KDM need to fix this 10453 */ 10454 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 10455 ctl_ua_type ua_type; 10456 10457 ua_type = lun->pending_sense[initidx].ua_pending; 10458 if (ua_type != CTL_UA_NONE) { 10459 scsi_sense_data_type sense_format; 10460 10461 if (lun != NULL) 10462 sense_format = (lun->flags & 10463 CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC : 10464 SSD_TYPE_FIXED; 10465 else 10466 sense_format = SSD_TYPE_FIXED; 10467 10468 ua_type = ctl_build_ua(ua_type, &ctsio->sense_data, 10469 sense_format); 10470 if (ua_type != CTL_UA_NONE) { 10471 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 10472 ctsio->io_hdr.status = CTL_SCSI_ERROR | 10473 CTL_AUTOSENSE; 10474 ctsio->sense_len = SSD_FULL_SIZE; 10475 lun->pending_sense[initidx].ua_pending &= 10476 ~ua_type; 10477 mtx_unlock(&ctl_softc->ctl_lock); 10478 ctl_done((union ctl_io *)ctsio); 10479 goto bailout; 10480 } 10481 } 10482 } 10483 10484 10485 if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) { 10486 mtx_unlock(&ctl_softc->ctl_lock); 10487 ctl_done((union ctl_io *)ctsio); 10488 goto bailout; 10489 } 10490 10491 /* 10492 * XXX CHD this is where we want to send IO to other side if 10493 * this LUN is secondary on this SC. We will need to make a copy 10494 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 10495 * the copy we send as FROM_OTHER. 10496 * We also need to stuff the address of the original IO so we can 10497 * find it easily. Something similar will need be done on the other 10498 * side so when we are done we can find the copy. 10499 */ 10500 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10501 union ctl_ha_msg msg_info; 10502 int isc_retval; 10503 10504 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 10505 10506 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 10507 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 10508 #if 0 10509 printf("1. ctsio %p\n", ctsio); 10510 #endif 10511 msg_info.hdr.serializing_sc = NULL; 10512 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 10513 msg_info.scsi.tag_num = ctsio->tag_num; 10514 msg_info.scsi.tag_type = ctsio->tag_type; 10515 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 10516 10517 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 10518 10519 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10520 (void *)&msg_info, sizeof(msg_info), 0)) > 10521 CTL_HA_STATUS_SUCCESS) { 10522 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 10523 isc_retval); 10524 printf("CTL:opcode is %x\n",opcode); 10525 } else { 10526 #if 0 10527 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 10528 #endif 10529 } 10530 10531 /* 10532 * XXX KDM this I/O is off the incoming queue, but hasn't 10533 * been inserted on any other queue. We may need to come 10534 * up with a holding queue while we wait for serialization 10535 * so that we have an idea of what we're waiting for from 10536 * the other side. 10537 */ 10538 goto bailout_unlock; 10539 } 10540 10541 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 10542 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 10543 ctl_ooaq, ooa_links))) { 10544 case CTL_ACTION_BLOCK: 10545 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 10546 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 10547 blocked_links); 10548 goto bailout_unlock; 10549 break; /* NOTREACHED */ 10550 case CTL_ACTION_PASS: 10551 case CTL_ACTION_SKIP: 10552 goto queue_rtr; 10553 break; /* NOTREACHED */ 10554 case CTL_ACTION_OVERLAP: 10555 ctl_set_overlapped_cmd(ctsio); 10556 mtx_unlock(&ctl_softc->ctl_lock); 10557 ctl_done((union ctl_io *)ctsio); 10558 goto bailout; 10559 break; /* NOTREACHED */ 10560 case CTL_ACTION_OVERLAP_TAG: 10561 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 10562 mtx_unlock(&ctl_softc->ctl_lock); 10563 ctl_done((union ctl_io *)ctsio); 10564 goto bailout; 10565 break; /* NOTREACHED */ 10566 case CTL_ACTION_ERROR: 10567 default: 10568 ctl_set_internal_failure(ctsio, 10569 /*sks_valid*/ 0, 10570 /*retry_count*/ 0); 10571 mtx_unlock(&ctl_softc->ctl_lock); 10572 ctl_done((union ctl_io *)ctsio); 10573 goto bailout; 10574 break; /* NOTREACHED */ 10575 } 10576 10577 goto bailout_unlock; 10578 10579 queue_rtr: 10580 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 10581 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links); 10582 10583 bailout_unlock: 10584 mtx_unlock(&ctl_softc->ctl_lock); 10585 10586 bailout: 10587 return (retval); 10588 } 10589 10590 static int 10591 ctl_scsiio(struct ctl_scsiio *ctsio) 10592 { 10593 int retval; 10594 struct ctl_cmd_entry *entry; 10595 10596 retval = CTL_RETVAL_COMPLETE; 10597 10598 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 10599 10600 entry = &ctl_cmd_table[ctsio->cdb[0]]; 10601 10602 /* 10603 * If this I/O has been aborted, just send it straight to 10604 * ctl_done() without executing it. 10605 */ 10606 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 10607 ctl_done((union ctl_io *)ctsio); 10608 goto bailout; 10609 } 10610 10611 /* 10612 * All the checks should have been handled by ctl_scsiio_precheck(). 10613 * We should be clear now to just execute the I/O. 10614 */ 10615 retval = entry->execute(ctsio); 10616 10617 bailout: 10618 return (retval); 10619 } 10620 10621 /* 10622 * Since we only implement one target right now, a bus reset simply resets 10623 * our single target. 10624 */ 10625 static int 10626 ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io) 10627 { 10628 return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET)); 10629 } 10630 10631 static int 10632 ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 10633 ctl_ua_type ua_type) 10634 { 10635 struct ctl_lun *lun; 10636 int retval; 10637 10638 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 10639 union ctl_ha_msg msg_info; 10640 10641 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 10642 msg_info.hdr.nexus = io->io_hdr.nexus; 10643 if (ua_type==CTL_UA_TARG_RESET) 10644 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 10645 else 10646 msg_info.task.task_action = CTL_TASK_BUS_RESET; 10647 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 10648 msg_info.hdr.original_sc = NULL; 10649 msg_info.hdr.serializing_sc = NULL; 10650 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10651 (void *)&msg_info, sizeof(msg_info), 0)) { 10652 } 10653 } 10654 retval = 0; 10655 10656 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) 10657 retval += ctl_lun_reset(lun, io, ua_type); 10658 10659 return (retval); 10660 } 10661 10662 /* 10663 * The LUN should always be set. The I/O is optional, and is used to 10664 * distinguish between I/Os sent by this initiator, and by other 10665 * initiators. We set unit attention for initiators other than this one. 10666 * SAM-3 is vague on this point. It does say that a unit attention should 10667 * be established for other initiators when a LUN is reset (see section 10668 * 5.7.3), but it doesn't specifically say that the unit attention should 10669 * be established for this particular initiator when a LUN is reset. Here 10670 * is the relevant text, from SAM-3 rev 8: 10671 * 10672 * 5.7.2 When a SCSI initiator port aborts its own tasks 10673 * 10674 * When a SCSI initiator port causes its own task(s) to be aborted, no 10675 * notification that the task(s) have been aborted shall be returned to 10676 * the SCSI initiator port other than the completion response for the 10677 * command or task management function action that caused the task(s) to 10678 * be aborted and notification(s) associated with related effects of the 10679 * action (e.g., a reset unit attention condition). 10680 * 10681 * XXX KDM for now, we're setting unit attention for all initiators. 10682 */ 10683 static int 10684 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 10685 { 10686 union ctl_io *xio; 10687 #if 0 10688 uint32_t initindex; 10689 #endif 10690 int i; 10691 10692 /* 10693 * Run through the OOA queue and abort each I/O. 10694 */ 10695 #if 0 10696 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 10697 #endif 10698 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 10699 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 10700 xio->io_hdr.flags |= CTL_FLAG_ABORT; 10701 } 10702 10703 /* 10704 * This version sets unit attention for every 10705 */ 10706 #if 0 10707 initindex = ctl_get_initindex(&io->io_hdr.nexus); 10708 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10709 if (initindex == i) 10710 continue; 10711 lun->pending_sense[i].ua_pending |= ua_type; 10712 } 10713 #endif 10714 10715 /* 10716 * A reset (any kind, really) clears reservations established with 10717 * RESERVE/RELEASE. It does not clear reservations established 10718 * with PERSISTENT RESERVE OUT, but we don't support that at the 10719 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 10720 * reservations made with the RESERVE/RELEASE commands, because 10721 * those commands are obsolete in SPC-3. 10722 */ 10723 lun->flags &= ~CTL_LUN_RESERVED; 10724 10725 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10726 ctl_clear_mask(lun->have_ca, i); 10727 lun->pending_sense[i].ua_pending |= ua_type; 10728 } 10729 10730 return (0); 10731 } 10732 10733 static int 10734 ctl_abort_task(union ctl_io *io) 10735 { 10736 union ctl_io *xio; 10737 struct ctl_lun *lun; 10738 struct ctl_softc *ctl_softc; 10739 #if 0 10740 struct sbuf sb; 10741 char printbuf[128]; 10742 #endif 10743 int found; 10744 10745 ctl_softc = control_softc; 10746 found = 0; 10747 10748 /* 10749 * Look up the LUN. 10750 */ 10751 if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS) 10752 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL)) 10753 lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun]; 10754 else 10755 goto bailout; 10756 10757 #if 0 10758 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 10759 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 10760 #endif 10761 10762 /* 10763 * Run through the OOA queue and attempt to find the given I/O. 10764 * The target port, initiator ID, tag type and tag number have to 10765 * match the values that we got from the initiator. If we have an 10766 * untagged command to abort, simply abort the first untagged command 10767 * we come to. We only allow one untagged command at a time of course. 10768 */ 10769 #if 0 10770 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 10771 #endif 10772 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 10773 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 10774 #if 0 10775 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 10776 10777 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 10778 lun->lun, xio->scsiio.tag_num, 10779 xio->scsiio.tag_type, 10780 (xio->io_hdr.blocked_links.tqe_prev 10781 == NULL) ? "" : " BLOCKED", 10782 (xio->io_hdr.flags & 10783 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 10784 (xio->io_hdr.flags & 10785 CTL_FLAG_ABORT) ? " ABORT" : ""), 10786 (xio->io_hdr.flags & 10787 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""); 10788 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 10789 sbuf_finish(&sb); 10790 printf("%s\n", sbuf_data(&sb)); 10791 #endif 10792 10793 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 10794 && (xio->io_hdr.nexus.initid.id == 10795 io->io_hdr.nexus.initid.id)) { 10796 /* 10797 * If the abort says that the task is untagged, the 10798 * task in the queue must be untagged. Otherwise, 10799 * we just check to see whether the tag numbers 10800 * match. This is because the QLogic firmware 10801 * doesn't pass back the tag type in an abort 10802 * request. 10803 */ 10804 #if 0 10805 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 10806 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 10807 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 10808 #endif 10809 /* 10810 * XXX KDM we've got problems with FC, because it 10811 * doesn't send down a tag type with aborts. So we 10812 * can only really go by the tag number... 10813 * This may cause problems with parallel SCSI. 10814 * Need to figure that out!! 10815 */ 10816 if (xio->scsiio.tag_num == io->taskio.tag_num) { 10817 xio->io_hdr.flags |= CTL_FLAG_ABORT; 10818 found = 1; 10819 if ((io->io_hdr.flags & 10820 CTL_FLAG_FROM_OTHER_SC) == 0 && 10821 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 10822 union ctl_ha_msg msg_info; 10823 10824 io->io_hdr.flags |= 10825 CTL_FLAG_SENT_2OTHER_SC; 10826 msg_info.hdr.nexus = io->io_hdr.nexus; 10827 msg_info.task.task_action = 10828 CTL_TASK_ABORT_TASK; 10829 msg_info.task.tag_num = 10830 io->taskio.tag_num; 10831 msg_info.task.tag_type = 10832 io->taskio.tag_type; 10833 msg_info.hdr.msg_type = 10834 CTL_MSG_MANAGE_TASKS; 10835 msg_info.hdr.original_sc = NULL; 10836 msg_info.hdr.serializing_sc = NULL; 10837 #if 0 10838 printf("Sent Abort to other side\n"); 10839 #endif 10840 if (CTL_HA_STATUS_SUCCESS != 10841 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10842 (void *)&msg_info, 10843 sizeof(msg_info), 0)) { 10844 } 10845 } 10846 #if 0 10847 printf("ctl_abort_task: found I/O to abort\n"); 10848 #endif 10849 break; 10850 } 10851 } 10852 } 10853 10854 bailout: 10855 10856 if (found == 0) { 10857 /* 10858 * This isn't really an error. It's entirely possible for 10859 * the abort and command completion to cross on the wire. 10860 * This is more of an informative/diagnostic error. 10861 */ 10862 #if 0 10863 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 10864 "%d:%d:%d:%d tag %d type %d\n", 10865 io->io_hdr.nexus.initid.id, 10866 io->io_hdr.nexus.targ_port, 10867 io->io_hdr.nexus.targ_target.id, 10868 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 10869 io->taskio.tag_type); 10870 #endif 10871 return (1); 10872 } else 10873 return (0); 10874 } 10875 10876 /* 10877 * Assumptions: caller holds ctl_softc->ctl_lock 10878 * 10879 * This routine cannot block! It must be callable from an interrupt 10880 * handler as well as from the work thread. 10881 */ 10882 static void 10883 ctl_run_task_queue(struct ctl_softc *ctl_softc) 10884 { 10885 union ctl_io *io, *next_io; 10886 10887 CTL_DEBUG_PRINT(("ctl_run_task_queue\n")); 10888 10889 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue); 10890 io != NULL; io = next_io) { 10891 int retval; 10892 const char *task_desc; 10893 10894 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10895 10896 retval = 0; 10897 10898 switch (io->io_hdr.io_type) { 10899 case CTL_IO_TASK: { 10900 task_desc = ctl_scsi_task_string(&io->taskio); 10901 if (task_desc != NULL) { 10902 #ifdef NEEDTOPORT 10903 csevent_log(CSC_CTL | CSC_SHELF_SW | 10904 CTL_TASK_REPORT, 10905 csevent_LogType_Trace, 10906 csevent_Severity_Information, 10907 csevent_AlertLevel_Green, 10908 csevent_FRU_Firmware, 10909 csevent_FRU_Unknown, 10910 "CTL: received task: %s",task_desc); 10911 #endif 10912 } else { 10913 #ifdef NEEDTOPORT 10914 csevent_log(CSC_CTL | CSC_SHELF_SW | 10915 CTL_TASK_REPORT, 10916 csevent_LogType_Trace, 10917 csevent_Severity_Information, 10918 csevent_AlertLevel_Green, 10919 csevent_FRU_Firmware, 10920 csevent_FRU_Unknown, 10921 "CTL: received unknown task " 10922 "type: %d (%#x)", 10923 io->taskio.task_action, 10924 io->taskio.task_action); 10925 #endif 10926 } 10927 switch (io->taskio.task_action) { 10928 case CTL_TASK_ABORT_TASK: 10929 retval = ctl_abort_task(io); 10930 break; 10931 case CTL_TASK_ABORT_TASK_SET: 10932 break; 10933 case CTL_TASK_CLEAR_ACA: 10934 break; 10935 case CTL_TASK_CLEAR_TASK_SET: 10936 break; 10937 case CTL_TASK_LUN_RESET: { 10938 struct ctl_lun *lun; 10939 uint32_t targ_lun; 10940 int retval; 10941 10942 targ_lun = io->io_hdr.nexus.targ_lun; 10943 10944 if ((targ_lun < CTL_MAX_LUNS) 10945 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 10946 lun = ctl_softc->ctl_luns[targ_lun]; 10947 else { 10948 retval = 1; 10949 break; 10950 } 10951 10952 if (!(io->io_hdr.flags & 10953 CTL_FLAG_FROM_OTHER_SC)) { 10954 union ctl_ha_msg msg_info; 10955 10956 io->io_hdr.flags |= 10957 CTL_FLAG_SENT_2OTHER_SC; 10958 msg_info.hdr.msg_type = 10959 CTL_MSG_MANAGE_TASKS; 10960 msg_info.hdr.nexus = io->io_hdr.nexus; 10961 msg_info.task.task_action = 10962 CTL_TASK_LUN_RESET; 10963 msg_info.hdr.original_sc = NULL; 10964 msg_info.hdr.serializing_sc = NULL; 10965 if (CTL_HA_STATUS_SUCCESS != 10966 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10967 (void *)&msg_info, 10968 sizeof(msg_info), 0)) { 10969 } 10970 } 10971 10972 retval = ctl_lun_reset(lun, io, 10973 CTL_UA_LUN_RESET); 10974 break; 10975 } 10976 case CTL_TASK_TARGET_RESET: 10977 retval = ctl_target_reset(ctl_softc, io, 10978 CTL_UA_TARG_RESET); 10979 break; 10980 case CTL_TASK_BUS_RESET: 10981 retval = ctl_bus_reset(ctl_softc, io); 10982 break; 10983 case CTL_TASK_PORT_LOGIN: 10984 break; 10985 case CTL_TASK_PORT_LOGOUT: 10986 break; 10987 default: 10988 printf("ctl_run_task_queue: got unknown task " 10989 "management event %d\n", 10990 io->taskio.task_action); 10991 break; 10992 } 10993 if (retval == 0) 10994 io->io_hdr.status = CTL_SUCCESS; 10995 else 10996 io->io_hdr.status = CTL_ERROR; 10997 10998 STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr, 10999 ctl_io_hdr, links); 11000 /* 11001 * This will queue this I/O to the done queue, but the 11002 * work thread won't be able to process it until we 11003 * return and the lock is released. 11004 */ 11005 ctl_done_lock(io, /*have_lock*/ 1); 11006 break; 11007 } 11008 default: { 11009 11010 printf("%s: invalid I/O type %d msg %d cdb %x" 11011 " iptl: %ju:%d:%ju:%d tag 0x%04x\n", 11012 __func__, io->io_hdr.io_type, 11013 io->io_hdr.msg_type, io->scsiio.cdb[0], 11014 (uintmax_t)io->io_hdr.nexus.initid.id, 11015 io->io_hdr.nexus.targ_port, 11016 (uintmax_t)io->io_hdr.nexus.targ_target.id, 11017 io->io_hdr.nexus.targ_lun, 11018 (io->io_hdr.io_type == CTL_IO_TASK) ? 11019 io->taskio.tag_num : io->scsiio.tag_num); 11020 STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr, 11021 ctl_io_hdr, links); 11022 ctl_free_io_internal(io, 1); 11023 break; 11024 } 11025 } 11026 } 11027 11028 ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING; 11029 } 11030 11031 /* 11032 * For HA operation. Handle commands that come in from the other 11033 * controller. 11034 */ 11035 static void 11036 ctl_handle_isc(union ctl_io *io) 11037 { 11038 int free_io; 11039 struct ctl_lun *lun; 11040 struct ctl_softc *ctl_softc; 11041 11042 ctl_softc = control_softc; 11043 11044 lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun]; 11045 11046 switch (io->io_hdr.msg_type) { 11047 case CTL_MSG_SERIALIZE: 11048 free_io = ctl_serialize_other_sc_cmd(&io->scsiio, 11049 /*have_lock*/ 0); 11050 break; 11051 case CTL_MSG_R2R: { 11052 uint8_t opcode; 11053 struct ctl_cmd_entry *entry; 11054 11055 /* 11056 * This is only used in SER_ONLY mode. 11057 */ 11058 free_io = 0; 11059 opcode = io->scsiio.cdb[0]; 11060 entry = &ctl_cmd_table[opcode]; 11061 mtx_lock(&ctl_softc->ctl_lock); 11062 if (ctl_scsiio_lun_check(ctl_softc, lun, 11063 entry, (struct ctl_scsiio *)io) != 0) { 11064 ctl_done_lock(io, /*have_lock*/ 1); 11065 mtx_unlock(&ctl_softc->ctl_lock); 11066 break; 11067 } 11068 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11069 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, 11070 &io->io_hdr, links); 11071 mtx_unlock(&ctl_softc->ctl_lock); 11072 break; 11073 } 11074 case CTL_MSG_FINISH_IO: 11075 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 11076 free_io = 0; 11077 ctl_done_lock(io, /*have_lock*/ 0); 11078 } else { 11079 free_io = 1; 11080 mtx_lock(&ctl_softc->ctl_lock); 11081 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11082 ooa_links); 11083 STAILQ_REMOVE(&ctl_softc->task_queue, 11084 &io->io_hdr, ctl_io_hdr, links); 11085 ctl_check_blocked(lun); 11086 mtx_unlock(&ctl_softc->ctl_lock); 11087 } 11088 break; 11089 case CTL_MSG_PERS_ACTION: 11090 ctl_hndl_per_res_out_on_other_sc( 11091 (union ctl_ha_msg *)&io->presio.pr_msg); 11092 free_io = 1; 11093 break; 11094 case CTL_MSG_BAD_JUJU: 11095 free_io = 0; 11096 ctl_done_lock(io, /*have_lock*/ 0); 11097 break; 11098 case CTL_MSG_DATAMOVE: 11099 /* Only used in XFER mode */ 11100 free_io = 0; 11101 ctl_datamove_remote(io); 11102 break; 11103 case CTL_MSG_DATAMOVE_DONE: 11104 /* Only used in XFER mode */ 11105 free_io = 0; 11106 io->scsiio.be_move_done(io); 11107 break; 11108 default: 11109 free_io = 1; 11110 printf("%s: Invalid message type %d\n", 11111 __func__, io->io_hdr.msg_type); 11112 break; 11113 } 11114 if (free_io) 11115 ctl_free_io_internal(io, 0); 11116 11117 } 11118 11119 11120 /* 11121 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 11122 * there is no match. 11123 */ 11124 static ctl_lun_error_pattern 11125 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 11126 { 11127 struct ctl_cmd_entry *entry; 11128 ctl_lun_error_pattern filtered_pattern, pattern; 11129 uint8_t opcode; 11130 11131 pattern = desc->error_pattern; 11132 11133 /* 11134 * XXX KDM we need more data passed into this function to match a 11135 * custom pattern, and we actually need to implement custom pattern 11136 * matching. 11137 */ 11138 if (pattern & CTL_LUN_PAT_CMD) 11139 return (CTL_LUN_PAT_CMD); 11140 11141 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 11142 return (CTL_LUN_PAT_ANY); 11143 11144 opcode = ctsio->cdb[0]; 11145 entry = &ctl_cmd_table[opcode]; 11146 11147 filtered_pattern = entry->pattern & pattern; 11148 11149 /* 11150 * If the user requested specific flags in the pattern (e.g. 11151 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 11152 * flags. 11153 * 11154 * If the user did not specify any flags, it doesn't matter whether 11155 * or not the command supports the flags. 11156 */ 11157 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 11158 (pattern & ~CTL_LUN_PAT_MASK)) 11159 return (CTL_LUN_PAT_NONE); 11160 11161 /* 11162 * If the user asked for a range check, see if the requested LBA 11163 * range overlaps with this command's LBA range. 11164 */ 11165 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 11166 uint64_t lba1; 11167 uint32_t len1; 11168 ctl_action action; 11169 int retval; 11170 11171 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 11172 if (retval != 0) 11173 return (CTL_LUN_PAT_NONE); 11174 11175 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 11176 desc->lba_range.len); 11177 /* 11178 * A "pass" means that the LBA ranges don't overlap, so 11179 * this doesn't match the user's range criteria. 11180 */ 11181 if (action == CTL_ACTION_PASS) 11182 return (CTL_LUN_PAT_NONE); 11183 } 11184 11185 return (filtered_pattern); 11186 } 11187 11188 /* 11189 * Called with the CTL lock held. 11190 */ 11191 static void 11192 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 11193 { 11194 struct ctl_error_desc *desc, *desc2; 11195 11196 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 11197 ctl_lun_error_pattern pattern; 11198 /* 11199 * Check to see whether this particular command matches 11200 * the pattern in the descriptor. 11201 */ 11202 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 11203 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 11204 continue; 11205 11206 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 11207 case CTL_LUN_INJ_ABORTED: 11208 ctl_set_aborted(&io->scsiio); 11209 break; 11210 case CTL_LUN_INJ_MEDIUM_ERR: 11211 ctl_set_medium_error(&io->scsiio); 11212 break; 11213 case CTL_LUN_INJ_UA: 11214 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 11215 * OCCURRED */ 11216 ctl_set_ua(&io->scsiio, 0x29, 0x00); 11217 break; 11218 case CTL_LUN_INJ_CUSTOM: 11219 /* 11220 * We're assuming the user knows what he is doing. 11221 * Just copy the sense information without doing 11222 * checks. 11223 */ 11224 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 11225 ctl_min(sizeof(desc->custom_sense), 11226 sizeof(io->scsiio.sense_data))); 11227 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 11228 io->scsiio.sense_len = SSD_FULL_SIZE; 11229 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11230 break; 11231 case CTL_LUN_INJ_NONE: 11232 default: 11233 /* 11234 * If this is an error injection type we don't know 11235 * about, clear the continuous flag (if it is set) 11236 * so it will get deleted below. 11237 */ 11238 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 11239 break; 11240 } 11241 /* 11242 * By default, each error injection action is a one-shot 11243 */ 11244 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 11245 continue; 11246 11247 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 11248 11249 free(desc, M_CTL); 11250 } 11251 } 11252 11253 #ifdef CTL_IO_DELAY 11254 static void 11255 ctl_datamove_timer_wakeup(void *arg) 11256 { 11257 union ctl_io *io; 11258 11259 io = (union ctl_io *)arg; 11260 11261 ctl_datamove(io); 11262 } 11263 #endif /* CTL_IO_DELAY */ 11264 11265 /* 11266 * Assumption: caller does NOT hold ctl_lock 11267 */ 11268 void 11269 ctl_datamove(union ctl_io *io) 11270 { 11271 void (*fe_datamove)(union ctl_io *io); 11272 11273 CTL_DEBUG_PRINT(("ctl_datamove\n")); 11274 11275 #ifdef CTL_TIME_IO 11276 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 11277 char str[256]; 11278 char path_str[64]; 11279 struct sbuf sb; 11280 11281 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 11282 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 11283 11284 sbuf_cat(&sb, path_str); 11285 switch (io->io_hdr.io_type) { 11286 case CTL_IO_SCSI: 11287 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 11288 sbuf_printf(&sb, "\n"); 11289 sbuf_cat(&sb, path_str); 11290 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 11291 io->scsiio.tag_num, io->scsiio.tag_type); 11292 break; 11293 case CTL_IO_TASK: 11294 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 11295 "Tag Type: %d\n", io->taskio.task_action, 11296 io->taskio.tag_num, io->taskio.tag_type); 11297 break; 11298 default: 11299 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11300 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11301 break; 11302 } 11303 sbuf_cat(&sb, path_str); 11304 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 11305 (intmax_t)time_uptime - io->io_hdr.start_time); 11306 sbuf_finish(&sb); 11307 printf("%s", sbuf_data(&sb)); 11308 } 11309 #endif /* CTL_TIME_IO */ 11310 11311 mtx_lock(&control_softc->ctl_lock); 11312 #ifdef CTL_IO_DELAY 11313 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 11314 struct ctl_lun *lun; 11315 11316 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11317 11318 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 11319 } else { 11320 struct ctl_lun *lun; 11321 11322 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11323 if ((lun != NULL) 11324 && (lun->delay_info.datamove_delay > 0)) { 11325 struct callout *callout; 11326 11327 callout = (struct callout *)&io->io_hdr.timer_bytes; 11328 callout_init(callout, /*mpsafe*/ 1); 11329 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 11330 callout_reset(callout, 11331 lun->delay_info.datamove_delay * hz, 11332 ctl_datamove_timer_wakeup, io); 11333 if (lun->delay_info.datamove_type == 11334 CTL_DELAY_TYPE_ONESHOT) 11335 lun->delay_info.datamove_delay = 0; 11336 mtx_unlock(&control_softc->ctl_lock); 11337 return; 11338 } 11339 } 11340 #endif 11341 /* 11342 * If we have any pending task management commands, process them 11343 * first. This is necessary to eliminate a race condition with the 11344 * FETD: 11345 * 11346 * - FETD submits a task management command, like an abort. 11347 * - Back end calls fe_datamove() to move the data for the aborted 11348 * command. The FETD can't really accept it, but if it did, it 11349 * would end up transmitting data for a command that the initiator 11350 * told us to abort. 11351 * 11352 * We close the race by processing all pending task management 11353 * commands here (we can't block!), and then check this I/O to see 11354 * if it has been aborted. If so, return it to the back end with 11355 * bad status, so the back end can say return an error to the back end 11356 * and then when the back end returns an error, we can return the 11357 * aborted command to the FETD, so it can clean up its resources. 11358 */ 11359 if (control_softc->flags & CTL_FLAG_TASK_PENDING) 11360 ctl_run_task_queue(control_softc); 11361 11362 /* 11363 * This command has been aborted. Set the port status, so we fail 11364 * the data move. 11365 */ 11366 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 11367 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 11368 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 11369 io->io_hdr.nexus.targ_port, 11370 (uintmax_t)io->io_hdr.nexus.targ_target.id, 11371 io->io_hdr.nexus.targ_lun); 11372 io->io_hdr.status = CTL_CMD_ABORTED; 11373 io->io_hdr.port_status = 31337; 11374 mtx_unlock(&control_softc->ctl_lock); 11375 /* 11376 * Note that the backend, in this case, will get the 11377 * callback in its context. In other cases it may get 11378 * called in the frontend's interrupt thread context. 11379 */ 11380 io->scsiio.be_move_done(io); 11381 return; 11382 } 11383 11384 /* 11385 * If we're in XFER mode and this I/O is from the other shelf 11386 * controller, we need to send the DMA to the other side to 11387 * actually transfer the data to/from the host. In serialize only 11388 * mode the transfer happens below CTL and ctl_datamove() is only 11389 * called on the machine that originally received the I/O. 11390 */ 11391 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 11392 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11393 union ctl_ha_msg msg; 11394 uint32_t sg_entries_sent; 11395 int do_sg_copy; 11396 int i; 11397 11398 memset(&msg, 0, sizeof(msg)); 11399 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 11400 msg.hdr.original_sc = io->io_hdr.original_sc; 11401 msg.hdr.serializing_sc = io; 11402 msg.hdr.nexus = io->io_hdr.nexus; 11403 msg.dt.flags = io->io_hdr.flags; 11404 /* 11405 * We convert everything into a S/G list here. We can't 11406 * pass by reference, only by value between controllers. 11407 * So we can't pass a pointer to the S/G list, only as many 11408 * S/G entries as we can fit in here. If it's possible for 11409 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 11410 * then we need to break this up into multiple transfers. 11411 */ 11412 if (io->scsiio.kern_sg_entries == 0) { 11413 msg.dt.kern_sg_entries = 1; 11414 /* 11415 * If this is in cached memory, flush the cache 11416 * before we send the DMA request to the other 11417 * controller. We want to do this in either the 11418 * read or the write case. The read case is 11419 * straightforward. In the write case, we want to 11420 * make sure nothing is in the local cache that 11421 * could overwrite the DMAed data. 11422 */ 11423 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 11424 /* 11425 * XXX KDM use bus_dmamap_sync() here. 11426 */ 11427 } 11428 11429 /* 11430 * Convert to a physical address if this is a 11431 * virtual address. 11432 */ 11433 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 11434 msg.dt.sg_list[0].addr = 11435 io->scsiio.kern_data_ptr; 11436 } else { 11437 /* 11438 * XXX KDM use busdma here! 11439 */ 11440 #if 0 11441 msg.dt.sg_list[0].addr = (void *) 11442 vtophys(io->scsiio.kern_data_ptr); 11443 #endif 11444 } 11445 11446 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 11447 do_sg_copy = 0; 11448 } else { 11449 struct ctl_sg_entry *sgl; 11450 11451 do_sg_copy = 1; 11452 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 11453 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 11454 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 11455 /* 11456 * XXX KDM use bus_dmamap_sync() here. 11457 */ 11458 } 11459 } 11460 11461 msg.dt.kern_data_len = io->scsiio.kern_data_len; 11462 msg.dt.kern_total_len = io->scsiio.kern_total_len; 11463 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 11464 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 11465 msg.dt.sg_sequence = 0; 11466 11467 /* 11468 * Loop until we've sent all of the S/G entries. On the 11469 * other end, we'll recompose these S/G entries into one 11470 * contiguous list before passing it to the 11471 */ 11472 for (sg_entries_sent = 0; sg_entries_sent < 11473 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 11474 msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/ 11475 sizeof(msg.dt.sg_list[0])), 11476 msg.dt.kern_sg_entries - sg_entries_sent); 11477 11478 if (do_sg_copy != 0) { 11479 struct ctl_sg_entry *sgl; 11480 int j; 11481 11482 sgl = (struct ctl_sg_entry *) 11483 io->scsiio.kern_data_ptr; 11484 /* 11485 * If this is in cached memory, flush the cache 11486 * before we send the DMA request to the other 11487 * controller. We want to do this in either 11488 * the * read or the write case. The read 11489 * case is straightforward. In the write 11490 * case, we want to make sure nothing is 11491 * in the local cache that could overwrite 11492 * the DMAed data. 11493 */ 11494 11495 for (i = sg_entries_sent, j = 0; 11496 i < msg.dt.cur_sg_entries; i++, j++) { 11497 if ((io->io_hdr.flags & 11498 CTL_FLAG_NO_DATASYNC) == 0) { 11499 /* 11500 * XXX KDM use bus_dmamap_sync() 11501 */ 11502 } 11503 if ((io->io_hdr.flags & 11504 CTL_FLAG_BUS_ADDR) == 0) { 11505 /* 11506 * XXX KDM use busdma. 11507 */ 11508 #if 0 11509 msg.dt.sg_list[j].addr =(void *) 11510 vtophys(sgl[i].addr); 11511 #endif 11512 } else { 11513 msg.dt.sg_list[j].addr = 11514 sgl[i].addr; 11515 } 11516 msg.dt.sg_list[j].len = sgl[i].len; 11517 } 11518 } 11519 11520 sg_entries_sent += msg.dt.cur_sg_entries; 11521 if (sg_entries_sent >= msg.dt.kern_sg_entries) 11522 msg.dt.sg_last = 1; 11523 else 11524 msg.dt.sg_last = 0; 11525 11526 /* 11527 * XXX KDM drop and reacquire the lock here? 11528 */ 11529 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 11530 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 11531 /* 11532 * XXX do something here. 11533 */ 11534 } 11535 11536 msg.dt.sent_sg_entries = sg_entries_sent; 11537 } 11538 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11539 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 11540 ctl_failover_io(io, /*have_lock*/ 1); 11541 11542 } else { 11543 11544 /* 11545 * Lookup the fe_datamove() function for this particular 11546 * front end. 11547 */ 11548 fe_datamove = 11549 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 11550 mtx_unlock(&control_softc->ctl_lock); 11551 11552 fe_datamove(io); 11553 } 11554 } 11555 11556 static void 11557 ctl_send_datamove_done(union ctl_io *io, int have_lock) 11558 { 11559 union ctl_ha_msg msg; 11560 int isc_status; 11561 11562 memset(&msg, 0, sizeof(msg)); 11563 11564 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 11565 msg.hdr.original_sc = io; 11566 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 11567 msg.hdr.nexus = io->io_hdr.nexus; 11568 msg.hdr.status = io->io_hdr.status; 11569 msg.scsi.tag_num = io->scsiio.tag_num; 11570 msg.scsi.tag_type = io->scsiio.tag_type; 11571 msg.scsi.scsi_status = io->scsiio.scsi_status; 11572 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 11573 sizeof(io->scsiio.sense_data)); 11574 msg.scsi.sense_len = io->scsiio.sense_len; 11575 msg.scsi.sense_residual = io->scsiio.sense_residual; 11576 msg.scsi.fetd_status = io->io_hdr.port_status; 11577 msg.scsi.residual = io->scsiio.residual; 11578 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11579 11580 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 11581 ctl_failover_io(io, /*have_lock*/ have_lock); 11582 return; 11583 } 11584 11585 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 11586 if (isc_status > CTL_HA_STATUS_SUCCESS) { 11587 /* XXX do something if this fails */ 11588 } 11589 11590 } 11591 11592 /* 11593 * The DMA to the remote side is done, now we need to tell the other side 11594 * we're done so it can continue with its data movement. 11595 */ 11596 static void 11597 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 11598 { 11599 union ctl_io *io; 11600 11601 io = rq->context; 11602 11603 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 11604 printf("%s: ISC DMA write failed with error %d", __func__, 11605 rq->ret); 11606 ctl_set_internal_failure(&io->scsiio, 11607 /*sks_valid*/ 1, 11608 /*retry_count*/ rq->ret); 11609 } 11610 11611 ctl_dt_req_free(rq); 11612 11613 /* 11614 * In this case, we had to malloc the memory locally. Free it. 11615 */ 11616 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 11617 int i; 11618 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 11619 free(io->io_hdr.local_sglist[i].addr, M_CTL); 11620 } 11621 /* 11622 * The data is in local and remote memory, so now we need to send 11623 * status (good or back) back to the other side. 11624 */ 11625 ctl_send_datamove_done(io, /*have_lock*/ 0); 11626 } 11627 11628 /* 11629 * We've moved the data from the host/controller into local memory. Now we 11630 * need to push it over to the remote controller's memory. 11631 */ 11632 static int 11633 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 11634 { 11635 int retval; 11636 11637 retval = 0; 11638 11639 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 11640 ctl_datamove_remote_write_cb); 11641 11642 return (retval); 11643 } 11644 11645 static void 11646 ctl_datamove_remote_write(union ctl_io *io) 11647 { 11648 int retval; 11649 void (*fe_datamove)(union ctl_io *io); 11650 11651 /* 11652 * - Get the data from the host/HBA into local memory. 11653 * - DMA memory from the local controller to the remote controller. 11654 * - Send status back to the remote controller. 11655 */ 11656 11657 retval = ctl_datamove_remote_sgl_setup(io); 11658 if (retval != 0) 11659 return; 11660 11661 /* Switch the pointer over so the FETD knows what to do */ 11662 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 11663 11664 /* 11665 * Use a custom move done callback, since we need to send completion 11666 * back to the other controller, not to the backend on this side. 11667 */ 11668 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 11669 11670 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 11671 11672 fe_datamove(io); 11673 11674 return; 11675 11676 } 11677 11678 static int 11679 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 11680 { 11681 #if 0 11682 char str[256]; 11683 char path_str[64]; 11684 struct sbuf sb; 11685 #endif 11686 11687 /* 11688 * In this case, we had to malloc the memory locally. Free it. 11689 */ 11690 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 11691 int i; 11692 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 11693 free(io->io_hdr.local_sglist[i].addr, M_CTL); 11694 } 11695 11696 #if 0 11697 scsi_path_string(io, path_str, sizeof(path_str)); 11698 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 11699 sbuf_cat(&sb, path_str); 11700 scsi_command_string(&io->scsiio, NULL, &sb); 11701 sbuf_printf(&sb, "\n"); 11702 sbuf_cat(&sb, path_str); 11703 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 11704 io->scsiio.tag_num, io->scsiio.tag_type); 11705 sbuf_cat(&sb, path_str); 11706 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 11707 io->io_hdr.flags, io->io_hdr.status); 11708 sbuf_finish(&sb); 11709 printk("%s", sbuf_data(&sb)); 11710 #endif 11711 11712 11713 /* 11714 * The read is done, now we need to send status (good or bad) back 11715 * to the other side. 11716 */ 11717 ctl_send_datamove_done(io, /*have_lock*/ 0); 11718 11719 return (0); 11720 } 11721 11722 static void 11723 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 11724 { 11725 union ctl_io *io; 11726 void (*fe_datamove)(union ctl_io *io); 11727 11728 io = rq->context; 11729 11730 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 11731 printf("%s: ISC DMA read failed with error %d", __func__, 11732 rq->ret); 11733 ctl_set_internal_failure(&io->scsiio, 11734 /*sks_valid*/ 1, 11735 /*retry_count*/ rq->ret); 11736 } 11737 11738 ctl_dt_req_free(rq); 11739 11740 /* Switch the pointer over so the FETD knows what to do */ 11741 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 11742 11743 /* 11744 * Use a custom move done callback, since we need to send completion 11745 * back to the other controller, not to the backend on this side. 11746 */ 11747 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 11748 11749 /* XXX KDM add checks like the ones in ctl_datamove? */ 11750 11751 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 11752 11753 fe_datamove(io); 11754 } 11755 11756 static int 11757 ctl_datamove_remote_sgl_setup(union ctl_io *io) 11758 { 11759 struct ctl_sg_entry *local_sglist, *remote_sglist; 11760 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 11761 struct ctl_softc *softc; 11762 int retval; 11763 int i; 11764 11765 retval = 0; 11766 softc = control_softc; 11767 11768 local_sglist = io->io_hdr.local_sglist; 11769 local_dma_sglist = io->io_hdr.local_dma_sglist; 11770 remote_sglist = io->io_hdr.remote_sglist; 11771 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 11772 11773 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 11774 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 11775 local_sglist[i].len = remote_sglist[i].len; 11776 11777 /* 11778 * XXX Detect the situation where the RS-level I/O 11779 * redirector on the other side has already read the 11780 * data off of the AOR RS on this side, and 11781 * transferred it to remote (mirror) memory on the 11782 * other side. Since we already have the data in 11783 * memory here, we just need to use it. 11784 * 11785 * XXX KDM this can probably be removed once we 11786 * get the cache device code in and take the 11787 * current AOR implementation out. 11788 */ 11789 #ifdef NEEDTOPORT 11790 if ((remote_sglist[i].addr >= 11791 (void *)vtophys(softc->mirr->addr)) 11792 && (remote_sglist[i].addr < 11793 ((void *)vtophys(softc->mirr->addr) + 11794 CacheMirrorOffset))) { 11795 local_sglist[i].addr = remote_sglist[i].addr - 11796 CacheMirrorOffset; 11797 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 11798 CTL_FLAG_DATA_IN) 11799 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 11800 } else { 11801 local_sglist[i].addr = remote_sglist[i].addr + 11802 CacheMirrorOffset; 11803 } 11804 #endif 11805 #if 0 11806 printf("%s: local %p, remote %p, len %d\n", 11807 __func__, local_sglist[i].addr, 11808 remote_sglist[i].addr, local_sglist[i].len); 11809 #endif 11810 } 11811 } else { 11812 uint32_t len_to_go; 11813 11814 /* 11815 * In this case, we don't have automatically allocated 11816 * memory for this I/O on this controller. This typically 11817 * happens with internal CTL I/O -- e.g. inquiry, mode 11818 * sense, etc. Anything coming from RAIDCore will have 11819 * a mirror area available. 11820 */ 11821 len_to_go = io->scsiio.kern_data_len; 11822 11823 /* 11824 * Clear the no datasync flag, we have to use malloced 11825 * buffers. 11826 */ 11827 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 11828 11829 /* 11830 * The difficult thing here is that the size of the various 11831 * S/G segments may be different than the size from the 11832 * remote controller. That'll make it harder when DMAing 11833 * the data back to the other side. 11834 */ 11835 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 11836 sizeof(io->io_hdr.remote_sglist[0])) && 11837 (len_to_go > 0); i++) { 11838 local_sglist[i].len = ctl_min(len_to_go, 131072); 11839 CTL_SIZE_8B(local_dma_sglist[i].len, 11840 local_sglist[i].len); 11841 local_sglist[i].addr = 11842 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 11843 11844 local_dma_sglist[i].addr = local_sglist[i].addr; 11845 11846 if (local_sglist[i].addr == NULL) { 11847 int j; 11848 11849 printf("malloc failed for %zd bytes!", 11850 local_dma_sglist[i].len); 11851 for (j = 0; j < i; j++) { 11852 free(local_sglist[j].addr, M_CTL); 11853 } 11854 ctl_set_internal_failure(&io->scsiio, 11855 /*sks_valid*/ 1, 11856 /*retry_count*/ 4857); 11857 retval = 1; 11858 goto bailout_error; 11859 11860 } 11861 /* XXX KDM do we need a sync here? */ 11862 11863 len_to_go -= local_sglist[i].len; 11864 } 11865 /* 11866 * Reset the number of S/G entries accordingly. The 11867 * original number of S/G entries is available in 11868 * rem_sg_entries. 11869 */ 11870 io->scsiio.kern_sg_entries = i; 11871 11872 #if 0 11873 printf("%s: kern_sg_entries = %d\n", __func__, 11874 io->scsiio.kern_sg_entries); 11875 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 11876 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 11877 local_sglist[i].addr, local_sglist[i].len, 11878 local_dma_sglist[i].len); 11879 #endif 11880 } 11881 11882 11883 return (retval); 11884 11885 bailout_error: 11886 11887 ctl_send_datamove_done(io, /*have_lock*/ 0); 11888 11889 return (retval); 11890 } 11891 11892 static int 11893 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 11894 ctl_ha_dt_cb callback) 11895 { 11896 struct ctl_ha_dt_req *rq; 11897 struct ctl_sg_entry *remote_sglist, *local_sglist; 11898 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 11899 uint32_t local_used, remote_used, total_used; 11900 int retval; 11901 int i, j; 11902 11903 retval = 0; 11904 11905 rq = ctl_dt_req_alloc(); 11906 11907 /* 11908 * If we failed to allocate the request, and if the DMA didn't fail 11909 * anyway, set busy status. This is just a resource allocation 11910 * failure. 11911 */ 11912 if ((rq == NULL) 11913 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 11914 ctl_set_busy(&io->scsiio); 11915 11916 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 11917 11918 if (rq != NULL) 11919 ctl_dt_req_free(rq); 11920 11921 /* 11922 * The data move failed. We need to return status back 11923 * to the other controller. No point in trying to DMA 11924 * data to the remote controller. 11925 */ 11926 11927 ctl_send_datamove_done(io, /*have_lock*/ 0); 11928 11929 retval = 1; 11930 11931 goto bailout; 11932 } 11933 11934 local_sglist = io->io_hdr.local_sglist; 11935 local_dma_sglist = io->io_hdr.local_dma_sglist; 11936 remote_sglist = io->io_hdr.remote_sglist; 11937 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 11938 local_used = 0; 11939 remote_used = 0; 11940 total_used = 0; 11941 11942 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 11943 rq->ret = CTL_HA_STATUS_SUCCESS; 11944 rq->context = io; 11945 callback(rq); 11946 goto bailout; 11947 } 11948 11949 /* 11950 * Pull/push the data over the wire from/to the other controller. 11951 * This takes into account the possibility that the local and 11952 * remote sglists may not be identical in terms of the size of 11953 * the elements and the number of elements. 11954 * 11955 * One fundamental assumption here is that the length allocated for 11956 * both the local and remote sglists is identical. Otherwise, we've 11957 * essentially got a coding error of some sort. 11958 */ 11959 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 11960 int isc_ret; 11961 uint32_t cur_len, dma_length; 11962 uint8_t *tmp_ptr; 11963 11964 rq->id = CTL_HA_DATA_CTL; 11965 rq->command = command; 11966 rq->context = io; 11967 11968 /* 11969 * Both pointers should be aligned. But it is possible 11970 * that the allocation length is not. They should both 11971 * also have enough slack left over at the end, though, 11972 * to round up to the next 8 byte boundary. 11973 */ 11974 cur_len = ctl_min(local_sglist[i].len - local_used, 11975 remote_sglist[j].len - remote_used); 11976 11977 /* 11978 * In this case, we have a size issue and need to decrease 11979 * the size, except in the case where we actually have less 11980 * than 8 bytes left. In that case, we need to increase 11981 * the DMA length to get the last bit. 11982 */ 11983 if ((cur_len & 0x7) != 0) { 11984 if (cur_len > 0x7) { 11985 cur_len = cur_len - (cur_len & 0x7); 11986 dma_length = cur_len; 11987 } else { 11988 CTL_SIZE_8B(dma_length, cur_len); 11989 } 11990 11991 } else 11992 dma_length = cur_len; 11993 11994 /* 11995 * If we had to allocate memory for this I/O, instead of using 11996 * the non-cached mirror memory, we'll need to flush the cache 11997 * before trying to DMA to the other controller. 11998 * 11999 * We could end up doing this multiple times for the same 12000 * segment if we have a larger local segment than remote 12001 * segment. That shouldn't be an issue. 12002 */ 12003 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12004 /* 12005 * XXX KDM use bus_dmamap_sync() here. 12006 */ 12007 } 12008 12009 rq->size = dma_length; 12010 12011 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12012 tmp_ptr += local_used; 12013 12014 /* Use physical addresses when talking to ISC hardware */ 12015 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12016 /* XXX KDM use busdma */ 12017 #if 0 12018 rq->local = vtophys(tmp_ptr); 12019 #endif 12020 } else 12021 rq->local = tmp_ptr; 12022 12023 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12024 tmp_ptr += remote_used; 12025 rq->remote = tmp_ptr; 12026 12027 rq->callback = NULL; 12028 12029 local_used += cur_len; 12030 if (local_used >= local_sglist[i].len) { 12031 i++; 12032 local_used = 0; 12033 } 12034 12035 remote_used += cur_len; 12036 if (remote_used >= remote_sglist[j].len) { 12037 j++; 12038 remote_used = 0; 12039 } 12040 total_used += cur_len; 12041 12042 if (total_used >= io->scsiio.kern_data_len) 12043 rq->callback = callback; 12044 12045 if ((rq->size & 0x7) != 0) { 12046 printf("%s: warning: size %d is not on 8b boundary\n", 12047 __func__, rq->size); 12048 } 12049 if (((uintptr_t)rq->local & 0x7) != 0) { 12050 printf("%s: warning: local %p not on 8b boundary\n", 12051 __func__, rq->local); 12052 } 12053 if (((uintptr_t)rq->remote & 0x7) != 0) { 12054 printf("%s: warning: remote %p not on 8b boundary\n", 12055 __func__, rq->local); 12056 } 12057 #if 0 12058 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12059 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12060 rq->local, rq->remote, rq->size); 12061 #endif 12062 12063 isc_ret = ctl_dt_single(rq); 12064 if (isc_ret == CTL_HA_STATUS_WAIT) 12065 continue; 12066 12067 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12068 rq->ret = CTL_HA_STATUS_SUCCESS; 12069 } else { 12070 rq->ret = isc_ret; 12071 } 12072 callback(rq); 12073 goto bailout; 12074 } 12075 12076 bailout: 12077 return (retval); 12078 12079 } 12080 12081 static void 12082 ctl_datamove_remote_read(union ctl_io *io) 12083 { 12084 int retval; 12085 int i; 12086 12087 /* 12088 * This will send an error to the other controller in the case of a 12089 * failure. 12090 */ 12091 retval = ctl_datamove_remote_sgl_setup(io); 12092 if (retval != 0) 12093 return; 12094 12095 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12096 ctl_datamove_remote_read_cb); 12097 if ((retval != 0) 12098 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 12099 /* 12100 * Make sure we free memory if there was an error.. The 12101 * ctl_datamove_remote_xfer() function will send the 12102 * datamove done message, or call the callback with an 12103 * error if there is a problem. 12104 */ 12105 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12106 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12107 } 12108 12109 return; 12110 } 12111 12112 /* 12113 * Process a datamove request from the other controller. This is used for 12114 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12115 * first. Once that is complete, the data gets DMAed into the remote 12116 * controller's memory. For reads, we DMA from the remote controller's 12117 * memory into our memory first, and then move it out to the FETD. 12118 * 12119 * Should be called without the ctl_lock held. 12120 */ 12121 static void 12122 ctl_datamove_remote(union ctl_io *io) 12123 { 12124 struct ctl_softc *softc; 12125 12126 softc = control_softc; 12127 12128 /* 12129 * Note that we look for an aborted I/O here, but don't do some of 12130 * the other checks that ctl_datamove() normally does. We don't 12131 * need to run the task queue, because this I/O is on the ISC 12132 * queue, which is executed by the work thread after the task queue. 12133 * We don't need to run the datamove delay code, since that should 12134 * have been done if need be on the other controller. 12135 */ 12136 mtx_lock(&softc->ctl_lock); 12137 12138 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12139 12140 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 12141 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 12142 io->io_hdr.nexus.targ_port, 12143 io->io_hdr.nexus.targ_target.id, 12144 io->io_hdr.nexus.targ_lun); 12145 io->io_hdr.status = CTL_CMD_ABORTED; 12146 io->io_hdr.port_status = 31338; 12147 12148 mtx_unlock(&softc->ctl_lock); 12149 12150 ctl_send_datamove_done(io, /*have_lock*/ 0); 12151 12152 return; 12153 } 12154 12155 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 12156 mtx_unlock(&softc->ctl_lock); 12157 ctl_datamove_remote_write(io); 12158 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 12159 mtx_unlock(&softc->ctl_lock); 12160 ctl_datamove_remote_read(io); 12161 } else { 12162 union ctl_ha_msg msg; 12163 struct scsi_sense_data *sense; 12164 uint8_t sks[3]; 12165 int retry_count; 12166 12167 memset(&msg, 0, sizeof(msg)); 12168 12169 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 12170 msg.hdr.status = CTL_SCSI_ERROR; 12171 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 12172 12173 retry_count = 4243; 12174 12175 sense = &msg.scsi.sense_data; 12176 sks[0] = SSD_SCS_VALID; 12177 sks[1] = (retry_count >> 8) & 0xff; 12178 sks[2] = retry_count & 0xff; 12179 12180 /* "Internal target failure" */ 12181 scsi_set_sense_data(sense, 12182 /*sense_format*/ SSD_TYPE_NONE, 12183 /*current_error*/ 1, 12184 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 12185 /*asc*/ 0x44, 12186 /*ascq*/ 0x00, 12187 /*type*/ SSD_ELEM_SKS, 12188 /*size*/ sizeof(sks), 12189 /*data*/ sks, 12190 SSD_ELEM_NONE); 12191 12192 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12193 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12194 ctl_failover_io(io, /*have_lock*/ 1); 12195 mtx_unlock(&softc->ctl_lock); 12196 return; 12197 } 12198 12199 mtx_unlock(&softc->ctl_lock); 12200 12201 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 12202 CTL_HA_STATUS_SUCCESS) { 12203 /* XXX KDM what to do if this fails? */ 12204 } 12205 return; 12206 } 12207 12208 } 12209 12210 static int 12211 ctl_process_done(union ctl_io *io, int have_lock) 12212 { 12213 struct ctl_lun *lun; 12214 struct ctl_softc *ctl_softc; 12215 void (*fe_done)(union ctl_io *io); 12216 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 12217 12218 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12219 12220 fe_done = 12221 control_softc->ctl_ports[targ_port]->fe_done; 12222 12223 #ifdef CTL_TIME_IO 12224 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12225 char str[256]; 12226 char path_str[64]; 12227 struct sbuf sb; 12228 12229 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12230 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12231 12232 sbuf_cat(&sb, path_str); 12233 switch (io->io_hdr.io_type) { 12234 case CTL_IO_SCSI: 12235 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12236 sbuf_printf(&sb, "\n"); 12237 sbuf_cat(&sb, path_str); 12238 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12239 io->scsiio.tag_num, io->scsiio.tag_type); 12240 break; 12241 case CTL_IO_TASK: 12242 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12243 "Tag Type: %d\n", io->taskio.task_action, 12244 io->taskio.tag_num, io->taskio.tag_type); 12245 break; 12246 default: 12247 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12248 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12249 break; 12250 } 12251 sbuf_cat(&sb, path_str); 12252 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12253 (intmax_t)time_uptime - io->io_hdr.start_time); 12254 sbuf_finish(&sb); 12255 printf("%s", sbuf_data(&sb)); 12256 } 12257 #endif /* CTL_TIME_IO */ 12258 12259 switch (io->io_hdr.io_type) { 12260 case CTL_IO_SCSI: 12261 break; 12262 case CTL_IO_TASK: 12263 ctl_io_error_print(io, NULL); 12264 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 12265 ctl_free_io_internal(io, /*have_lock*/ 0); 12266 else 12267 fe_done(io); 12268 return (CTL_RETVAL_COMPLETE); 12269 break; 12270 default: 12271 printf("ctl_process_done: invalid io type %d\n", 12272 io->io_hdr.io_type); 12273 panic("ctl_process_done: invalid io type %d\n", 12274 io->io_hdr.io_type); 12275 break; /* NOTREACHED */ 12276 } 12277 12278 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12279 if (lun == NULL) { 12280 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12281 io->io_hdr.nexus.targ_lun)); 12282 fe_done(io); 12283 goto bailout; 12284 } 12285 ctl_softc = lun->ctl_softc; 12286 12287 /* 12288 * Remove this from the OOA queue. 12289 */ 12290 if (have_lock == 0) 12291 mtx_lock(&ctl_softc->ctl_lock); 12292 12293 /* 12294 * Check to see if we have any errors to inject here. We only 12295 * inject errors for commands that don't already have errors set. 12296 */ 12297 if ((STAILQ_FIRST(&lun->error_list) != NULL) 12298 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) 12299 ctl_inject_error(lun, io); 12300 12301 /* 12302 * XXX KDM how do we treat commands that aren't completed 12303 * successfully? 12304 * 12305 * XXX KDM should we also track I/O latency? 12306 */ 12307 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { 12308 uint32_t blocksize; 12309 #ifdef CTL_TIME_IO 12310 struct bintime cur_bt; 12311 #endif 12312 12313 if ((lun->be_lun != NULL) 12314 && (lun->be_lun->blocksize != 0)) 12315 blocksize = lun->be_lun->blocksize; 12316 else 12317 blocksize = 512; 12318 12319 switch (io->io_hdr.io_type) { 12320 case CTL_IO_SCSI: { 12321 int isread; 12322 struct ctl_lba_len lbalen; 12323 12324 isread = 0; 12325 switch (io->scsiio.cdb[0]) { 12326 case READ_6: 12327 case READ_10: 12328 case READ_12: 12329 case READ_16: 12330 isread = 1; 12331 /* FALLTHROUGH */ 12332 case WRITE_6: 12333 case WRITE_10: 12334 case WRITE_12: 12335 case WRITE_16: 12336 case WRITE_VERIFY_10: 12337 case WRITE_VERIFY_12: 12338 case WRITE_VERIFY_16: 12339 memcpy(&lbalen, io->io_hdr.ctl_private[ 12340 CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen)); 12341 12342 if (isread) { 12343 lun->stats.ports[targ_port].bytes[CTL_STATS_READ] += 12344 lbalen.len * blocksize; 12345 lun->stats.ports[targ_port].operations[CTL_STATS_READ]++; 12346 12347 #ifdef CTL_TIME_IO 12348 bintime_add( 12349 &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ], 12350 &io->io_hdr.dma_bt); 12351 lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] += 12352 io->io_hdr.num_dmas; 12353 getbintime(&cur_bt); 12354 bintime_sub(&cur_bt, 12355 &io->io_hdr.start_bt); 12356 12357 bintime_add( 12358 &lun->stats.ports[targ_port].time[CTL_STATS_READ], 12359 &cur_bt); 12360 12361 #if 0 12362 cs_prof_gettime(&cur_ticks); 12363 lun->stats.time[CTL_STATS_READ] += 12364 cur_ticks - 12365 io->io_hdr.start_ticks; 12366 #endif 12367 #if 0 12368 lun->stats.time[CTL_STATS_READ] += 12369 jiffies - io->io_hdr.start_time; 12370 #endif 12371 #endif /* CTL_TIME_IO */ 12372 } else { 12373 lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] += 12374 lbalen.len * blocksize; 12375 lun->stats.ports[targ_port].operations[ 12376 CTL_STATS_WRITE]++; 12377 12378 #ifdef CTL_TIME_IO 12379 bintime_add( 12380 &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE], 12381 &io->io_hdr.dma_bt); 12382 lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] += 12383 io->io_hdr.num_dmas; 12384 getbintime(&cur_bt); 12385 bintime_sub(&cur_bt, 12386 &io->io_hdr.start_bt); 12387 12388 bintime_add( 12389 &lun->stats.ports[targ_port].time[CTL_STATS_WRITE], 12390 &cur_bt); 12391 #if 0 12392 cs_prof_gettime(&cur_ticks); 12393 lun->stats.ports[targ_port].time[CTL_STATS_WRITE] += 12394 cur_ticks - 12395 io->io_hdr.start_ticks; 12396 lun->stats.ports[targ_port].time[CTL_STATS_WRITE] += 12397 jiffies - io->io_hdr.start_time; 12398 #endif 12399 #endif /* CTL_TIME_IO */ 12400 } 12401 break; 12402 default: 12403 lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++; 12404 12405 #ifdef CTL_TIME_IO 12406 bintime_add( 12407 &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO], 12408 &io->io_hdr.dma_bt); 12409 lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] += 12410 io->io_hdr.num_dmas; 12411 getbintime(&cur_bt); 12412 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 12413 12414 bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO], 12415 &cur_bt); 12416 12417 #if 0 12418 cs_prof_gettime(&cur_ticks); 12419 lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] += 12420 cur_ticks - 12421 io->io_hdr.start_ticks; 12422 lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] += 12423 jiffies - io->io_hdr.start_time; 12424 #endif 12425 #endif /* CTL_TIME_IO */ 12426 break; 12427 } 12428 break; 12429 } 12430 default: 12431 break; 12432 } 12433 } 12434 12435 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12436 12437 /* 12438 * Run through the blocked queue on this LUN and see if anything 12439 * has become unblocked, now that this transaction is done. 12440 */ 12441 ctl_check_blocked(lun); 12442 12443 /* 12444 * If the LUN has been invalidated, free it if there is nothing 12445 * left on its OOA queue. 12446 */ 12447 if ((lun->flags & CTL_LUN_INVALID) 12448 && (TAILQ_FIRST(&lun->ooa_queue) == NULL)) 12449 ctl_free_lun(lun); 12450 12451 /* 12452 * If this command has been aborted, make sure we set the status 12453 * properly. The FETD is responsible for freeing the I/O and doing 12454 * whatever it needs to do to clean up its state. 12455 */ 12456 if (io->io_hdr.flags & CTL_FLAG_ABORT) 12457 io->io_hdr.status = CTL_CMD_ABORTED; 12458 12459 /* 12460 * We print out status for every task management command. For SCSI 12461 * commands, we filter out any unit attention errors; they happen 12462 * on every boot, and would clutter up the log. Note: task 12463 * management commands aren't printed here, they are printed above, 12464 * since they should never even make it down here. 12465 */ 12466 switch (io->io_hdr.io_type) { 12467 case CTL_IO_SCSI: { 12468 int error_code, sense_key, asc, ascq; 12469 12470 sense_key = 0; 12471 12472 if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) 12473 && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 12474 /* 12475 * Since this is just for printing, no need to 12476 * show errors here. 12477 */ 12478 scsi_extract_sense_len(&io->scsiio.sense_data, 12479 io->scsiio.sense_len, 12480 &error_code, 12481 &sense_key, 12482 &asc, 12483 &ascq, 12484 /*show_errors*/ 0); 12485 } 12486 12487 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 12488 && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) 12489 || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) 12490 || (sense_key != SSD_KEY_UNIT_ATTENTION))) { 12491 12492 if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){ 12493 ctl_softc->skipped_prints++; 12494 if (have_lock == 0) 12495 mtx_unlock(&ctl_softc->ctl_lock); 12496 } else { 12497 uint32_t skipped_prints; 12498 12499 skipped_prints = ctl_softc->skipped_prints; 12500 12501 ctl_softc->skipped_prints = 0; 12502 ctl_softc->last_print_jiffies = time_uptime; 12503 12504 if (have_lock == 0) 12505 mtx_unlock(&ctl_softc->ctl_lock); 12506 if (skipped_prints > 0) { 12507 #ifdef NEEDTOPORT 12508 csevent_log(CSC_CTL | CSC_SHELF_SW | 12509 CTL_ERROR_REPORT, 12510 csevent_LogType_Trace, 12511 csevent_Severity_Information, 12512 csevent_AlertLevel_Green, 12513 csevent_FRU_Firmware, 12514 csevent_FRU_Unknown, 12515 "High CTL error volume, %d prints " 12516 "skipped", skipped_prints); 12517 #endif 12518 } 12519 ctl_io_error_print(io, NULL); 12520 } 12521 } else { 12522 if (have_lock == 0) 12523 mtx_unlock(&ctl_softc->ctl_lock); 12524 } 12525 break; 12526 } 12527 case CTL_IO_TASK: 12528 if (have_lock == 0) 12529 mtx_unlock(&ctl_softc->ctl_lock); 12530 ctl_io_error_print(io, NULL); 12531 break; 12532 default: 12533 if (have_lock == 0) 12534 mtx_unlock(&ctl_softc->ctl_lock); 12535 break; 12536 } 12537 12538 /* 12539 * Tell the FETD or the other shelf controller we're done with this 12540 * command. Note that only SCSI commands get to this point. Task 12541 * management commands are completed above. 12542 * 12543 * We only send status to the other controller if we're in XFER 12544 * mode. In SER_ONLY mode, the I/O is done on the controller that 12545 * received the I/O (from CTL's perspective), and so the status is 12546 * generated there. 12547 * 12548 * XXX KDM if we hold the lock here, we could cause a deadlock 12549 * if the frontend comes back in in this context to queue 12550 * something. 12551 */ 12552 if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER) 12553 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12554 union ctl_ha_msg msg; 12555 12556 memset(&msg, 0, sizeof(msg)); 12557 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12558 msg.hdr.original_sc = io->io_hdr.original_sc; 12559 msg.hdr.nexus = io->io_hdr.nexus; 12560 msg.hdr.status = io->io_hdr.status; 12561 msg.scsi.scsi_status = io->scsiio.scsi_status; 12562 msg.scsi.tag_num = io->scsiio.tag_num; 12563 msg.scsi.tag_type = io->scsiio.tag_type; 12564 msg.scsi.sense_len = io->scsiio.sense_len; 12565 msg.scsi.sense_residual = io->scsiio.sense_residual; 12566 msg.scsi.residual = io->scsiio.residual; 12567 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12568 sizeof(io->scsiio.sense_data)); 12569 /* 12570 * We copy this whether or not this is an I/O-related 12571 * command. Otherwise, we'd have to go and check to see 12572 * whether it's a read/write command, and it really isn't 12573 * worth it. 12574 */ 12575 memcpy(&msg.scsi.lbalen, 12576 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 12577 sizeof(msg.scsi.lbalen)); 12578 12579 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12580 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12581 /* XXX do something here */ 12582 } 12583 12584 ctl_free_io_internal(io, /*have_lock*/ 0); 12585 } else 12586 fe_done(io); 12587 12588 bailout: 12589 12590 return (CTL_RETVAL_COMPLETE); 12591 } 12592 12593 /* 12594 * Front end should call this if it doesn't do autosense. When the request 12595 * sense comes back in from the initiator, we'll dequeue this and send it. 12596 */ 12597 int 12598 ctl_queue_sense(union ctl_io *io) 12599 { 12600 struct ctl_lun *lun; 12601 struct ctl_softc *ctl_softc; 12602 uint32_t initidx; 12603 12604 ctl_softc = control_softc; 12605 12606 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 12607 12608 /* 12609 * LUN lookup will likely move to the ctl_work_thread() once we 12610 * have our new queueing infrastructure (that doesn't put things on 12611 * a per-LUN queue initially). That is so that we can handle 12612 * things like an INQUIRY to a LUN that we don't have enabled. We 12613 * can't deal with that right now. 12614 */ 12615 mtx_lock(&ctl_softc->ctl_lock); 12616 12617 /* 12618 * If we don't have a LUN for this, just toss the sense 12619 * information. 12620 */ 12621 if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS) 12622 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL)) 12623 lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun]; 12624 else 12625 goto bailout; 12626 12627 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12628 12629 /* 12630 * Already have CA set for this LUN...toss the sense information. 12631 */ 12632 if (ctl_is_set(lun->have_ca, initidx)) 12633 goto bailout; 12634 12635 memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data, 12636 ctl_min(sizeof(lun->pending_sense[initidx].sense), 12637 sizeof(io->scsiio.sense_data))); 12638 ctl_set_mask(lun->have_ca, initidx); 12639 12640 bailout: 12641 mtx_unlock(&ctl_softc->ctl_lock); 12642 12643 ctl_free_io(io); 12644 12645 return (CTL_RETVAL_COMPLETE); 12646 } 12647 12648 /* 12649 * Primary command inlet from frontend ports. All SCSI and task I/O 12650 * requests must go through this function. 12651 */ 12652 int 12653 ctl_queue(union ctl_io *io) 12654 { 12655 struct ctl_softc *ctl_softc; 12656 12657 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 12658 12659 ctl_softc = control_softc; 12660 12661 #ifdef CTL_TIME_IO 12662 io->io_hdr.start_time = time_uptime; 12663 getbintime(&io->io_hdr.start_bt); 12664 #endif /* CTL_TIME_IO */ 12665 12666 mtx_lock(&ctl_softc->ctl_lock); 12667 12668 switch (io->io_hdr.io_type) { 12669 case CTL_IO_SCSI: 12670 STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr, 12671 links); 12672 break; 12673 case CTL_IO_TASK: 12674 STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links); 12675 /* 12676 * Set the task pending flag. This is necessary to close a 12677 * race condition with the FETD: 12678 * 12679 * - FETD submits a task management command, like an abort. 12680 * - Back end calls fe_datamove() to move the data for the 12681 * aborted command. The FETD can't really accept it, but 12682 * if it did, it would end up transmitting data for a 12683 * command that the initiator told us to abort. 12684 * 12685 * We close the race condition by setting the flag here, 12686 * and checking it in ctl_datamove(), before calling the 12687 * FETD's fe_datamove routine. If we've got a task 12688 * pending, we run the task queue and then check to see 12689 * whether our particular I/O has been aborted. 12690 */ 12691 ctl_softc->flags |= CTL_FLAG_TASK_PENDING; 12692 break; 12693 default: 12694 mtx_unlock(&ctl_softc->ctl_lock); 12695 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 12696 return (-EINVAL); 12697 break; /* NOTREACHED */ 12698 } 12699 mtx_unlock(&ctl_softc->ctl_lock); 12700 12701 ctl_wakeup_thread(); 12702 12703 return (CTL_RETVAL_COMPLETE); 12704 } 12705 12706 #ifdef CTL_IO_DELAY 12707 static void 12708 ctl_done_timer_wakeup(void *arg) 12709 { 12710 union ctl_io *io; 12711 12712 io = (union ctl_io *)arg; 12713 ctl_done_lock(io, /*have_lock*/ 0); 12714 } 12715 #endif /* CTL_IO_DELAY */ 12716 12717 void 12718 ctl_done_lock(union ctl_io *io, int have_lock) 12719 { 12720 struct ctl_softc *ctl_softc; 12721 #ifndef CTL_DONE_THREAD 12722 union ctl_io *xio; 12723 #endif /* !CTL_DONE_THREAD */ 12724 12725 ctl_softc = control_softc; 12726 12727 if (have_lock == 0) 12728 mtx_lock(&ctl_softc->ctl_lock); 12729 12730 /* 12731 * Enable this to catch duplicate completion issues. 12732 */ 12733 #if 0 12734 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 12735 printf("%s: type %d msg %d cdb %x iptl: " 12736 "%d:%d:%d:%d tag 0x%04x " 12737 "flag %#x status %x\n", 12738 __func__, 12739 io->io_hdr.io_type, 12740 io->io_hdr.msg_type, 12741 io->scsiio.cdb[0], 12742 io->io_hdr.nexus.initid.id, 12743 io->io_hdr.nexus.targ_port, 12744 io->io_hdr.nexus.targ_target.id, 12745 io->io_hdr.nexus.targ_lun, 12746 (io->io_hdr.io_type == 12747 CTL_IO_TASK) ? 12748 io->taskio.tag_num : 12749 io->scsiio.tag_num, 12750 io->io_hdr.flags, 12751 io->io_hdr.status); 12752 } else 12753 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 12754 #endif 12755 12756 /* 12757 * This is an internal copy of an I/O, and should not go through 12758 * the normal done processing logic. 12759 */ 12760 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) { 12761 if (have_lock == 0) 12762 mtx_unlock(&ctl_softc->ctl_lock); 12763 return; 12764 } 12765 12766 /* 12767 * We need to send a msg to the serializing shelf to finish the IO 12768 * as well. We don't send a finish message to the other shelf if 12769 * this is a task management command. Task management commands 12770 * aren't serialized in the OOA queue, but rather just executed on 12771 * both shelf controllers for commands that originated on that 12772 * controller. 12773 */ 12774 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 12775 && (io->io_hdr.io_type != CTL_IO_TASK)) { 12776 union ctl_ha_msg msg_io; 12777 12778 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 12779 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 12780 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 12781 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 12782 } 12783 /* continue on to finish IO */ 12784 } 12785 #ifdef CTL_IO_DELAY 12786 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12787 struct ctl_lun *lun; 12788 12789 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12790 12791 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12792 } else { 12793 struct ctl_lun *lun; 12794 12795 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12796 12797 if ((lun != NULL) 12798 && (lun->delay_info.done_delay > 0)) { 12799 struct callout *callout; 12800 12801 callout = (struct callout *)&io->io_hdr.timer_bytes; 12802 callout_init(callout, /*mpsafe*/ 1); 12803 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12804 callout_reset(callout, 12805 lun->delay_info.done_delay * hz, 12806 ctl_done_timer_wakeup, io); 12807 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 12808 lun->delay_info.done_delay = 0; 12809 if (have_lock == 0) 12810 mtx_unlock(&ctl_softc->ctl_lock); 12811 return; 12812 } 12813 } 12814 #endif /* CTL_IO_DELAY */ 12815 12816 STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links); 12817 12818 #ifdef CTL_DONE_THREAD 12819 if (have_lock == 0) 12820 mtx_unlock(&ctl_softc->ctl_lock); 12821 12822 ctl_wakeup_thread(); 12823 #else /* CTL_DONE_THREAD */ 12824 for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue); 12825 xio != NULL; 12826 xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) { 12827 12828 STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links); 12829 12830 ctl_process_done(xio, /*have_lock*/ 1); 12831 } 12832 if (have_lock == 0) 12833 mtx_unlock(&ctl_softc->ctl_lock); 12834 #endif /* CTL_DONE_THREAD */ 12835 } 12836 12837 void 12838 ctl_done(union ctl_io *io) 12839 { 12840 ctl_done_lock(io, /*have_lock*/ 0); 12841 } 12842 12843 int 12844 ctl_isc(struct ctl_scsiio *ctsio) 12845 { 12846 struct ctl_lun *lun; 12847 int retval; 12848 12849 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12850 12851 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 12852 12853 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 12854 12855 retval = lun->backend->data_submit((union ctl_io *)ctsio); 12856 12857 return (retval); 12858 } 12859 12860 12861 static void 12862 ctl_work_thread(void *arg) 12863 { 12864 struct ctl_softc *softc; 12865 union ctl_io *io; 12866 struct ctl_be_lun *be_lun; 12867 int retval; 12868 12869 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 12870 12871 softc = (struct ctl_softc *)arg; 12872 if (softc == NULL) 12873 return; 12874 12875 mtx_lock(&softc->ctl_lock); 12876 for (;;) { 12877 retval = 0; 12878 12879 /* 12880 * We handle the queues in this order: 12881 * - task management 12882 * - ISC 12883 * - done queue (to free up resources, unblock other commands) 12884 * - RtR queue 12885 * - incoming queue 12886 * 12887 * If those queues are empty, we break out of the loop and 12888 * go to sleep. 12889 */ 12890 io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue); 12891 if (io != NULL) { 12892 ctl_run_task_queue(softc); 12893 continue; 12894 } 12895 io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue); 12896 if (io != NULL) { 12897 STAILQ_REMOVE_HEAD(&softc->isc_queue, links); 12898 ctl_handle_isc(io); 12899 continue; 12900 } 12901 io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue); 12902 if (io != NULL) { 12903 STAILQ_REMOVE_HEAD(&softc->done_queue, links); 12904 /* clear any blocked commands, call fe_done */ 12905 mtx_unlock(&softc->ctl_lock); 12906 /* 12907 * XXX KDM 12908 * Call this without a lock for now. This will 12909 * depend on whether there is any way the FETD can 12910 * sleep or deadlock if called with the CTL lock 12911 * held. 12912 */ 12913 retval = ctl_process_done(io, /*have_lock*/ 0); 12914 mtx_lock(&softc->ctl_lock); 12915 continue; 12916 } 12917 if (!ctl_pause_rtr) { 12918 io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 12919 if (io != NULL) { 12920 STAILQ_REMOVE_HEAD(&softc->rtr_queue, links); 12921 mtx_unlock(&softc->ctl_lock); 12922 goto execute; 12923 } 12924 } 12925 io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue); 12926 if (io != NULL) { 12927 STAILQ_REMOVE_HEAD(&softc->incoming_queue, links); 12928 mtx_unlock(&softc->ctl_lock); 12929 ctl_scsiio_precheck(softc, &io->scsiio); 12930 mtx_lock(&softc->ctl_lock); 12931 continue; 12932 } 12933 /* 12934 * We might want to move this to a separate thread, so that 12935 * configuration requests (in this case LUN creations) 12936 * won't impact the I/O path. 12937 */ 12938 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 12939 if (be_lun != NULL) { 12940 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 12941 mtx_unlock(&softc->ctl_lock); 12942 ctl_create_lun(be_lun); 12943 mtx_lock(&softc->ctl_lock); 12944 continue; 12945 } 12946 12947 /* XXX KDM use the PDROP flag?? */ 12948 /* Sleep until we have something to do. */ 12949 mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "ctl_work", 0); 12950 12951 /* Back to the top of the loop to see what woke us up. */ 12952 continue; 12953 12954 execute: 12955 retval = ctl_scsiio(&io->scsiio); 12956 switch (retval) { 12957 case CTL_RETVAL_COMPLETE: 12958 break; 12959 default: 12960 /* 12961 * Probably need to make sure this doesn't happen. 12962 */ 12963 break; 12964 } 12965 mtx_lock(&softc->ctl_lock); 12966 } 12967 } 12968 12969 void 12970 ctl_wakeup_thread() 12971 { 12972 struct ctl_softc *softc; 12973 12974 softc = control_softc; 12975 12976 wakeup(softc); 12977 } 12978 12979 /* Initialization and failover */ 12980 12981 void 12982 ctl_init_isc_msg(void) 12983 { 12984 printf("CTL: Still calling this thing\n"); 12985 } 12986 12987 /* 12988 * Init component 12989 * Initializes component into configuration defined by bootMode 12990 * (see hasc-sv.c) 12991 * returns hasc_Status: 12992 * OK 12993 * ERROR - fatal error 12994 */ 12995 static ctl_ha_comp_status 12996 ctl_isc_init(struct ctl_ha_component *c) 12997 { 12998 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 12999 13000 c->status = ret; 13001 return ret; 13002 } 13003 13004 /* Start component 13005 * Starts component in state requested. If component starts successfully, 13006 * it must set its own state to the requestrd state 13007 * When requested state is HASC_STATE_HA, the component may refine it 13008 * by adding _SLAVE or _MASTER flags. 13009 * Currently allowed state transitions are: 13010 * UNKNOWN->HA - initial startup 13011 * UNKNOWN->SINGLE - initial startup when no parter detected 13012 * HA->SINGLE - failover 13013 * returns ctl_ha_comp_status: 13014 * OK - component successfully started in requested state 13015 * FAILED - could not start the requested state, failover may 13016 * be possible 13017 * ERROR - fatal error detected, no future startup possible 13018 */ 13019 static ctl_ha_comp_status 13020 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13021 { 13022 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13023 13024 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13025 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13026 ctl_is_single = 0; 13027 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13028 != CTL_HA_STATUS_SUCCESS) { 13029 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13030 ret = CTL_HA_COMP_STATUS_ERROR; 13031 } 13032 } else if (CTL_HA_STATE_IS_HA(c->state) 13033 && CTL_HA_STATE_IS_SINGLE(state)){ 13034 // HA->SINGLE transition 13035 ctl_failover(); 13036 ctl_is_single = 1; 13037 } else { 13038 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13039 c->state, state); 13040 ret = CTL_HA_COMP_STATUS_ERROR; 13041 } 13042 if (CTL_HA_STATE_IS_SINGLE(state)) 13043 ctl_is_single = 1; 13044 13045 c->state = state; 13046 c->status = ret; 13047 return ret; 13048 } 13049 13050 /* 13051 * Quiesce component 13052 * The component must clear any error conditions (set status to OK) and 13053 * prepare itself to another Start call 13054 * returns ctl_ha_comp_status: 13055 * OK 13056 * ERROR 13057 */ 13058 static ctl_ha_comp_status 13059 ctl_isc_quiesce(struct ctl_ha_component *c) 13060 { 13061 int ret = CTL_HA_COMP_STATUS_OK; 13062 13063 ctl_pause_rtr = 1; 13064 c->status = ret; 13065 return ret; 13066 } 13067 13068 struct ctl_ha_component ctl_ha_component_ctlisc = 13069 { 13070 .name = "CTL ISC", 13071 .state = CTL_HA_STATE_UNKNOWN, 13072 .init = ctl_isc_init, 13073 .start = ctl_isc_start, 13074 .quiesce = ctl_isc_quiesce 13075 }; 13076 13077 /* 13078 * vim: ts=8 13079 */ 13080