1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $ 35 */ 36 /* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #define _CTL_C 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/types.h> 51 #include <sys/kthread.h> 52 #include <sys/bio.h> 53 #include <sys/fcntl.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/condvar.h> 58 #include <sys/malloc.h> 59 #include <sys/conf.h> 60 #include <sys/ioccom.h> 61 #include <sys/queue.h> 62 #include <sys/sbuf.h> 63 #include <sys/endian.h> 64 #include <sys/sysctl.h> 65 66 #include <cam/cam.h> 67 #include <cam/scsi/scsi_all.h> 68 #include <cam/scsi/scsi_da.h> 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_frontend_internal.h> 73 #include <cam/ctl/ctl_util.h> 74 #include <cam/ctl/ctl_backend.h> 75 #include <cam/ctl/ctl_ioctl.h> 76 #include <cam/ctl/ctl_ha.h> 77 #include <cam/ctl/ctl_private.h> 78 #include <cam/ctl/ctl_debug.h> 79 #include <cam/ctl/ctl_scsi_all.h> 80 #include <cam/ctl/ctl_error.h> 81 82 struct ctl_softc *control_softc = NULL; 83 84 /* 85 * The default is to run with CTL_DONE_THREAD turned on. Completed 86 * transactions are queued for processing by the CTL work thread. When 87 * CTL_DONE_THREAD is not defined, completed transactions are processed in 88 * the caller's context. 89 */ 90 #define CTL_DONE_THREAD 91 92 /* 93 * Use the serial number and device ID provided by the backend, rather than 94 * making up our own. 95 */ 96 #define CTL_USE_BACKEND_SN 97 98 /* 99 * Size and alignment macros needed for Copan-specific HA hardware. These 100 * can go away when the HA code is re-written, and uses busdma for any 101 * hardware. 102 */ 103 #define CTL_ALIGN_8B(target, source, type) \ 104 if (((uint32_t)source & 0x7) != 0) \ 105 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 106 else \ 107 target = (type)source; 108 109 #define CTL_SIZE_8B(target, size) \ 110 if ((size & 0x7) != 0) \ 111 target = size + (0x8 - (size & 0x7)); \ 112 else \ 113 target = size; 114 115 #define CTL_ALIGN_8B_MARGIN 16 116 117 /* 118 * Template mode pages. 119 */ 120 121 /* 122 * Note that these are default values only. The actual values will be 123 * filled in when the user does a mode sense. 124 */ 125 static struct copan_power_subpage power_page_default = { 126 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 127 /*subpage*/ PWR_SUBPAGE_CODE, 128 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 129 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 130 /*page_version*/ PWR_VERSION, 131 /* total_luns */ 26, 132 /* max_active_luns*/ PWR_DFLT_MAX_LUNS, 133 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135 0, 0, 0, 0, 0, 0} 136 }; 137 138 static struct copan_power_subpage power_page_changeable = { 139 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 140 /*subpage*/ PWR_SUBPAGE_CODE, 141 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 142 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 143 /*page_version*/ 0, 144 /* total_luns */ 0, 145 /* max_active_luns*/ 0, 146 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 147 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 148 0, 0, 0, 0, 0, 0} 149 }; 150 151 static struct copan_aps_subpage aps_page_default = { 152 APS_PAGE_CODE | SMPH_SPF, //page_code 153 APS_SUBPAGE_CODE, //subpage 154 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 155 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 156 APS_VERSION, //page_version 157 0, //lock_active 158 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 159 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160 0, 0, 0, 0, 0} //reserved 161 }; 162 163 static struct copan_aps_subpage aps_page_changeable = { 164 APS_PAGE_CODE | SMPH_SPF, //page_code 165 APS_SUBPAGE_CODE, //subpage 166 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 167 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 168 0, //page_version 169 0, //lock_active 170 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172 0, 0, 0, 0, 0} //reserved 173 }; 174 175 static struct copan_debugconf_subpage debugconf_page_default = { 176 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 177 DBGCNF_SUBPAGE_CODE, /* subpage */ 178 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 179 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 180 DBGCNF_VERSION, /* page_version */ 181 {CTL_TIME_IO_DEFAULT_SECS>>8, 182 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 183 }; 184 185 static struct copan_debugconf_subpage debugconf_page_changeable = { 186 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 187 DBGCNF_SUBPAGE_CODE, /* subpage */ 188 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 189 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 190 0, /* page_version */ 191 {0xff,0xff}, /* ctl_time_io_secs */ 192 }; 193 194 static struct scsi_format_page format_page_default = { 195 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 196 /*page_length*/sizeof(struct scsi_format_page) - 2, 197 /*tracks_per_zone*/ {0, 0}, 198 /*alt_sectors_per_zone*/ {0, 0}, 199 /*alt_tracks_per_zone*/ {0, 0}, 200 /*alt_tracks_per_lun*/ {0, 0}, 201 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 202 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 203 /*bytes_per_sector*/ {0, 0}, 204 /*interleave*/ {0, 0}, 205 /*track_skew*/ {0, 0}, 206 /*cylinder_skew*/ {0, 0}, 207 /*flags*/ SFP_HSEC, 208 /*reserved*/ {0, 0, 0} 209 }; 210 211 static struct scsi_format_page format_page_changeable = { 212 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 213 /*page_length*/sizeof(struct scsi_format_page) - 2, 214 /*tracks_per_zone*/ {0, 0}, 215 /*alt_sectors_per_zone*/ {0, 0}, 216 /*alt_tracks_per_zone*/ {0, 0}, 217 /*alt_tracks_per_lun*/ {0, 0}, 218 /*sectors_per_track*/ {0, 0}, 219 /*bytes_per_sector*/ {0, 0}, 220 /*interleave*/ {0, 0}, 221 /*track_skew*/ {0, 0}, 222 /*cylinder_skew*/ {0, 0}, 223 /*flags*/ 0, 224 /*reserved*/ {0, 0, 0} 225 }; 226 227 static struct scsi_rigid_disk_page rigid_disk_page_default = { 228 /*page_code*/SMS_RIGID_DISK_PAGE, 229 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 230 /*cylinders*/ {0, 0, 0}, 231 /*heads*/ CTL_DEFAULT_HEADS, 232 /*start_write_precomp*/ {0, 0, 0}, 233 /*start_reduced_current*/ {0, 0, 0}, 234 /*step_rate*/ {0, 0}, 235 /*landing_zone_cylinder*/ {0, 0, 0}, 236 /*rpl*/ SRDP_RPL_DISABLED, 237 /*rotational_offset*/ 0, 238 /*reserved1*/ 0, 239 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 240 CTL_DEFAULT_ROTATION_RATE & 0xff}, 241 /*reserved2*/ {0, 0} 242 }; 243 244 static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 245 /*page_code*/SMS_RIGID_DISK_PAGE, 246 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 247 /*cylinders*/ {0, 0, 0}, 248 /*heads*/ 0, 249 /*start_write_precomp*/ {0, 0, 0}, 250 /*start_reduced_current*/ {0, 0, 0}, 251 /*step_rate*/ {0, 0}, 252 /*landing_zone_cylinder*/ {0, 0, 0}, 253 /*rpl*/ 0, 254 /*rotational_offset*/ 0, 255 /*reserved1*/ 0, 256 /*rotation_rate*/ {0, 0}, 257 /*reserved2*/ {0, 0} 258 }; 259 260 static struct scsi_caching_page caching_page_default = { 261 /*page_code*/SMS_CACHING_PAGE, 262 /*page_length*/sizeof(struct scsi_caching_page) - 2, 263 /*flags1*/ SCP_DISC | SCP_WCE, 264 /*ret_priority*/ 0, 265 /*disable_pf_transfer_len*/ {0xff, 0xff}, 266 /*min_prefetch*/ {0, 0}, 267 /*max_prefetch*/ {0xff, 0xff}, 268 /*max_pf_ceiling*/ {0xff, 0xff}, 269 /*flags2*/ 0, 270 /*cache_segments*/ 0, 271 /*cache_seg_size*/ {0, 0}, 272 /*reserved*/ 0, 273 /*non_cache_seg_size*/ {0, 0, 0} 274 }; 275 276 static struct scsi_caching_page caching_page_changeable = { 277 /*page_code*/SMS_CACHING_PAGE, 278 /*page_length*/sizeof(struct scsi_caching_page) - 2, 279 /*flags1*/ 0, 280 /*ret_priority*/ 0, 281 /*disable_pf_transfer_len*/ {0, 0}, 282 /*min_prefetch*/ {0, 0}, 283 /*max_prefetch*/ {0, 0}, 284 /*max_pf_ceiling*/ {0, 0}, 285 /*flags2*/ 0, 286 /*cache_segments*/ 0, 287 /*cache_seg_size*/ {0, 0}, 288 /*reserved*/ 0, 289 /*non_cache_seg_size*/ {0, 0, 0} 290 }; 291 292 static struct scsi_control_page control_page_default = { 293 /*page_code*/SMS_CONTROL_MODE_PAGE, 294 /*page_length*/sizeof(struct scsi_control_page) - 2, 295 /*rlec*/0, 296 /*queue_flags*/0, 297 /*eca_and_aen*/0, 298 /*reserved*/0, 299 /*aen_holdoff_period*/{0, 0} 300 }; 301 302 static struct scsi_control_page control_page_changeable = { 303 /*page_code*/SMS_CONTROL_MODE_PAGE, 304 /*page_length*/sizeof(struct scsi_control_page) - 2, 305 /*rlec*/SCP_DSENSE, 306 /*queue_flags*/0, 307 /*eca_and_aen*/0, 308 /*reserved*/0, 309 /*aen_holdoff_period*/{0, 0} 310 }; 311 312 313 /* 314 * XXX KDM move these into the softc. 315 */ 316 static int rcv_sync_msg; 317 static int persis_offset; 318 static uint8_t ctl_pause_rtr; 319 static int ctl_is_single = 1; 320 static int index_to_aps_page; 321 322 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 323 324 /* 325 * Serial number (0x80), device id (0x83), and supported pages (0x00) 326 */ 327 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 3 328 329 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 330 int param); 331 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 332 static int ctl_init(void); 333 void ctl_shutdown(void); 334 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 335 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 336 static void ctl_ioctl_online(void *arg); 337 static void ctl_ioctl_offline(void *arg); 338 static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id); 339 static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id); 340 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 341 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 342 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 343 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock); 344 static int ctl_ioctl_submit_wait(union ctl_io *io); 345 static void ctl_ioctl_datamove(union ctl_io *io); 346 static void ctl_ioctl_done(union ctl_io *io); 347 static void ctl_ioctl_hard_startstop_callback(void *arg, 348 struct cfi_metatask *metatask); 349 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 350 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 351 struct ctl_ooa *ooa_hdr, 352 struct ctl_ooa_entry *kern_entries); 353 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 354 struct thread *td); 355 uint32_t ctl_get_resindex(struct ctl_nexus *nexus); 356 uint32_t ctl_port_idx(int port_num); 357 #ifdef unused 358 static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, 359 uint32_t targ_target, uint32_t targ_lun, 360 int can_wait); 361 static void ctl_kfree_io(union ctl_io *io); 362 #endif /* unused */ 363 static void ctl_free_io_internal(union ctl_io *io, int have_lock); 364 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 365 struct ctl_be_lun *be_lun, struct ctl_id target_id); 366 static int ctl_free_lun(struct ctl_lun *lun); 367 static void ctl_create_lun(struct ctl_be_lun *be_lun); 368 /** 369 static void ctl_failover_change_pages(struct ctl_softc *softc, 370 struct ctl_scsiio *ctsio, int master); 371 **/ 372 373 static int ctl_do_mode_select(union ctl_io *io); 374 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 375 uint64_t res_key, uint64_t sa_res_key, 376 uint8_t type, uint32_t residx, 377 struct ctl_scsiio *ctsio, 378 struct scsi_per_res_out *cdb, 379 struct scsi_per_res_out_parms* param); 380 static void ctl_pro_preempt_other(struct ctl_lun *lun, 381 union ctl_ha_msg *msg); 382 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 383 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 384 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 385 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 386 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 387 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 388 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len); 389 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2); 390 static ctl_action ctl_check_for_blockage(union ctl_io *pending_io, 391 union ctl_io *ooa_io); 392 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 393 union ctl_io *starting_io); 394 static int ctl_check_blocked(struct ctl_lun *lun); 395 static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, 396 struct ctl_lun *lun, 397 struct ctl_cmd_entry *entry, 398 struct ctl_scsiio *ctsio); 399 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 400 static void ctl_failover(void); 401 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 402 struct ctl_scsiio *ctsio); 403 static int ctl_scsiio(struct ctl_scsiio *ctsio); 404 405 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 406 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 407 ctl_ua_type ua_type); 408 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 409 ctl_ua_type ua_type); 410 static int ctl_abort_task(union ctl_io *io); 411 static void ctl_run_task_queue(struct ctl_softc *ctl_softc); 412 #ifdef CTL_IO_DELAY 413 static void ctl_datamove_timer_wakeup(void *arg); 414 static void ctl_done_timer_wakeup(void *arg); 415 #endif /* CTL_IO_DELAY */ 416 417 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 418 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 419 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 420 static void ctl_datamove_remote_write(union ctl_io *io); 421 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 422 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 423 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 424 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 425 ctl_ha_dt_cb callback); 426 static void ctl_datamove_remote_read(union ctl_io *io); 427 static void ctl_datamove_remote(union ctl_io *io); 428 static int ctl_process_done(union ctl_io *io, int have_lock); 429 static void ctl_work_thread(void *arg); 430 431 /* 432 * Load the serialization table. This isn't very pretty, but is probably 433 * the easiest way to do it. 434 */ 435 #include "ctl_ser_table.c" 436 437 /* 438 * We only need to define open, close and ioctl routines for this driver. 439 */ 440 static struct cdevsw ctl_cdevsw = { 441 .d_version = D_VERSION, 442 .d_flags = 0, 443 .d_open = ctl_open, 444 .d_close = ctl_close, 445 .d_ioctl = ctl_ioctl, 446 .d_name = "ctl", 447 }; 448 449 450 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 451 452 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 453 454 static moduledata_t ctl_moduledata = { 455 "ctl", 456 ctl_module_event_handler, 457 NULL 458 }; 459 460 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 461 MODULE_VERSION(ctl, 1); 462 463 static void 464 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 465 union ctl_ha_msg *msg_info) 466 { 467 struct ctl_scsiio *ctsio; 468 469 if (msg_info->hdr.original_sc == NULL) { 470 printf("%s: original_sc == NULL!\n", __func__); 471 /* XXX KDM now what? */ 472 return; 473 } 474 475 ctsio = &msg_info->hdr.original_sc->scsiio; 476 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 477 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 478 ctsio->io_hdr.status = msg_info->hdr.status; 479 ctsio->scsi_status = msg_info->scsi.scsi_status; 480 ctsio->sense_len = msg_info->scsi.sense_len; 481 ctsio->sense_residual = msg_info->scsi.sense_residual; 482 ctsio->residual = msg_info->scsi.residual; 483 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 484 sizeof(ctsio->sense_data)); 485 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 486 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 487 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links); 488 ctl_wakeup_thread(); 489 } 490 491 static void 492 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 493 union ctl_ha_msg *msg_info) 494 { 495 struct ctl_scsiio *ctsio; 496 497 if (msg_info->hdr.serializing_sc == NULL) { 498 printf("%s: serializing_sc == NULL!\n", __func__); 499 /* XXX KDM now what? */ 500 return; 501 } 502 503 ctsio = &msg_info->hdr.serializing_sc->scsiio; 504 #if 0 505 /* 506 * Attempt to catch the situation where an I/O has 507 * been freed, and we're using it again. 508 */ 509 if (ctsio->io_hdr.io_type == 0xff) { 510 union ctl_io *tmp_io; 511 tmp_io = (union ctl_io *)ctsio; 512 printf("%s: %p use after free!\n", __func__, 513 ctsio); 514 printf("%s: type %d msg %d cdb %x iptl: " 515 "%d:%d:%d:%d tag 0x%04x " 516 "flag %#x status %x\n", 517 __func__, 518 tmp_io->io_hdr.io_type, 519 tmp_io->io_hdr.msg_type, 520 tmp_io->scsiio.cdb[0], 521 tmp_io->io_hdr.nexus.initid.id, 522 tmp_io->io_hdr.nexus.targ_port, 523 tmp_io->io_hdr.nexus.targ_target.id, 524 tmp_io->io_hdr.nexus.targ_lun, 525 (tmp_io->io_hdr.io_type == 526 CTL_IO_TASK) ? 527 tmp_io->taskio.tag_num : 528 tmp_io->scsiio.tag_num, 529 tmp_io->io_hdr.flags, 530 tmp_io->io_hdr.status); 531 } 532 #endif 533 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 534 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links); 535 ctl_wakeup_thread(); 536 } 537 538 /* 539 * ISC (Inter Shelf Communication) event handler. Events from the HA 540 * subsystem come in here. 541 */ 542 static void 543 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 544 { 545 struct ctl_softc *ctl_softc; 546 union ctl_io *io; 547 struct ctl_prio *presio; 548 ctl_ha_status isc_status; 549 550 ctl_softc = control_softc; 551 io = NULL; 552 553 554 #if 0 555 printf("CTL: Isc Msg event %d\n", event); 556 #endif 557 if (event == CTL_HA_EVT_MSG_RECV) { 558 union ctl_ha_msg msg_info; 559 560 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 561 sizeof(msg_info), /*wait*/ 0); 562 #if 0 563 printf("CTL: msg_type %d\n", msg_info.msg_type); 564 #endif 565 if (isc_status != 0) { 566 printf("Error receiving message, status = %d\n", 567 isc_status); 568 return; 569 } 570 mtx_lock(&ctl_softc->ctl_lock); 571 572 switch (msg_info.hdr.msg_type) { 573 case CTL_MSG_SERIALIZE: 574 #if 0 575 printf("Serialize\n"); 576 #endif 577 io = ctl_alloc_io((void *)ctl_softc->othersc_pool); 578 if (io == NULL) { 579 printf("ctl_isc_event_handler: can't allocate " 580 "ctl_io!\n"); 581 /* Bad Juju */ 582 /* Need to set busy and send msg back */ 583 mtx_unlock(&ctl_softc->ctl_lock); 584 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 585 msg_info.hdr.status = CTL_SCSI_ERROR; 586 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 587 msg_info.scsi.sense_len = 0; 588 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 589 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 590 } 591 goto bailout; 592 } 593 ctl_zero_io(io); 594 // populate ctsio from msg_info 595 io->io_hdr.io_type = CTL_IO_SCSI; 596 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 597 io->io_hdr.original_sc = msg_info.hdr.original_sc; 598 #if 0 599 printf("pOrig %x\n", (int)msg_info.original_sc); 600 #endif 601 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 602 CTL_FLAG_IO_ACTIVE; 603 /* 604 * If we're in serialization-only mode, we don't 605 * want to go through full done processing. Thus 606 * the COPY flag. 607 * 608 * XXX KDM add another flag that is more specific. 609 */ 610 if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY) 611 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 612 io->io_hdr.nexus = msg_info.hdr.nexus; 613 #if 0 614 printf("targ %d, port %d, iid %d, lun %d\n", 615 io->io_hdr.nexus.targ_target.id, 616 io->io_hdr.nexus.targ_port, 617 io->io_hdr.nexus.initid.id, 618 io->io_hdr.nexus.targ_lun); 619 #endif 620 io->scsiio.tag_num = msg_info.scsi.tag_num; 621 io->scsiio.tag_type = msg_info.scsi.tag_type; 622 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 623 CTL_MAX_CDBLEN); 624 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 625 struct ctl_cmd_entry *entry; 626 uint8_t opcode; 627 628 opcode = io->scsiio.cdb[0]; 629 entry = &ctl_cmd_table[opcode]; 630 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 631 io->io_hdr.flags |= 632 entry->flags & CTL_FLAG_DATA_MASK; 633 } 634 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 635 &io->io_hdr, links); 636 ctl_wakeup_thread(); 637 break; 638 639 /* Performed on the Originating SC, XFER mode only */ 640 case CTL_MSG_DATAMOVE: { 641 struct ctl_sg_entry *sgl; 642 int i, j; 643 644 io = msg_info.hdr.original_sc; 645 if (io == NULL) { 646 printf("%s: original_sc == NULL!\n", __func__); 647 /* XXX KDM do something here */ 648 break; 649 } 650 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 651 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 652 /* 653 * Keep track of this, we need to send it back over 654 * when the datamove is complete. 655 */ 656 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 657 658 if (msg_info.dt.sg_sequence == 0) { 659 /* 660 * XXX KDM we use the preallocated S/G list 661 * here, but we'll need to change this to 662 * dynamic allocation if we need larger S/G 663 * lists. 664 */ 665 if (msg_info.dt.kern_sg_entries > 666 sizeof(io->io_hdr.remote_sglist) / 667 sizeof(io->io_hdr.remote_sglist[0])) { 668 printf("%s: number of S/G entries " 669 "needed %u > allocated num %zd\n", 670 __func__, 671 msg_info.dt.kern_sg_entries, 672 sizeof(io->io_hdr.remote_sglist)/ 673 sizeof(io->io_hdr.remote_sglist[0])); 674 675 /* 676 * XXX KDM send a message back to 677 * the other side to shut down the 678 * DMA. The error will come back 679 * through via the normal channel. 680 */ 681 break; 682 } 683 sgl = io->io_hdr.remote_sglist; 684 memset(sgl, 0, 685 sizeof(io->io_hdr.remote_sglist)); 686 687 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 688 689 io->scsiio.kern_sg_entries = 690 msg_info.dt.kern_sg_entries; 691 io->scsiio.rem_sg_entries = 692 msg_info.dt.kern_sg_entries; 693 io->scsiio.kern_data_len = 694 msg_info.dt.kern_data_len; 695 io->scsiio.kern_total_len = 696 msg_info.dt.kern_total_len; 697 io->scsiio.kern_data_resid = 698 msg_info.dt.kern_data_resid; 699 io->scsiio.kern_rel_offset = 700 msg_info.dt.kern_rel_offset; 701 /* 702 * Clear out per-DMA flags. 703 */ 704 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 705 /* 706 * Add per-DMA flags that are set for this 707 * particular DMA request. 708 */ 709 io->io_hdr.flags |= msg_info.dt.flags & 710 CTL_FLAG_RDMA_MASK; 711 } else 712 sgl = (struct ctl_sg_entry *) 713 io->scsiio.kern_data_ptr; 714 715 for (i = msg_info.dt.sent_sg_entries, j = 0; 716 i < (msg_info.dt.sent_sg_entries + 717 msg_info.dt.cur_sg_entries); i++, j++) { 718 sgl[i].addr = msg_info.dt.sg_list[j].addr; 719 sgl[i].len = msg_info.dt.sg_list[j].len; 720 721 #if 0 722 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 723 __func__, 724 msg_info.dt.sg_list[j].addr, 725 msg_info.dt.sg_list[j].len, 726 sgl[i].addr, sgl[i].len, j, i); 727 #endif 728 } 729 #if 0 730 memcpy(&sgl[msg_info.dt.sent_sg_entries], 731 msg_info.dt.sg_list, 732 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 733 #endif 734 735 /* 736 * If this is the last piece of the I/O, we've got 737 * the full S/G list. Queue processing in the thread. 738 * Otherwise wait for the next piece. 739 */ 740 if (msg_info.dt.sg_last != 0) { 741 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 742 &io->io_hdr, links); 743 ctl_wakeup_thread(); 744 } 745 break; 746 } 747 /* Performed on the Serializing (primary) SC, XFER mode only */ 748 case CTL_MSG_DATAMOVE_DONE: { 749 if (msg_info.hdr.serializing_sc == NULL) { 750 printf("%s: serializing_sc == NULL!\n", 751 __func__); 752 /* XXX KDM now what? */ 753 break; 754 } 755 /* 756 * We grab the sense information here in case 757 * there was a failure, so we can return status 758 * back to the initiator. 759 */ 760 io = msg_info.hdr.serializing_sc; 761 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 762 io->io_hdr.status = msg_info.hdr.status; 763 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 764 io->scsiio.sense_len = msg_info.scsi.sense_len; 765 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 766 io->io_hdr.port_status = msg_info.scsi.fetd_status; 767 io->scsiio.residual = msg_info.scsi.residual; 768 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 769 sizeof(io->scsiio.sense_data)); 770 771 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 772 &io->io_hdr, links); 773 ctl_wakeup_thread(); 774 break; 775 } 776 777 /* Preformed on Originating SC, SER_ONLY mode */ 778 case CTL_MSG_R2R: 779 io = msg_info.hdr.original_sc; 780 if (io == NULL) { 781 printf("%s: Major Bummer\n", __func__); 782 mtx_unlock(&ctl_softc->ctl_lock); 783 return; 784 } else { 785 #if 0 786 printf("pOrig %x\n",(int) ctsio); 787 #endif 788 } 789 io->io_hdr.msg_type = CTL_MSG_R2R; 790 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 791 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 792 &io->io_hdr, links); 793 ctl_wakeup_thread(); 794 break; 795 796 /* 797 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 798 * mode. 799 * Performed on the Originating (i.e. secondary) SC in XFER 800 * mode 801 */ 802 case CTL_MSG_FINISH_IO: 803 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) 804 ctl_isc_handler_finish_xfer(ctl_softc, 805 &msg_info); 806 else 807 ctl_isc_handler_finish_ser_only(ctl_softc, 808 &msg_info); 809 break; 810 811 /* Preformed on Originating SC */ 812 case CTL_MSG_BAD_JUJU: 813 io = msg_info.hdr.original_sc; 814 if (io == NULL) { 815 printf("%s: Bad JUJU!, original_sc is NULL!\n", 816 __func__); 817 break; 818 } 819 ctl_copy_sense_data(&msg_info, io); 820 /* 821 * IO should have already been cleaned up on other 822 * SC so clear this flag so we won't send a message 823 * back to finish the IO there. 824 */ 825 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 826 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 827 828 /* io = msg_info.hdr.serializing_sc; */ 829 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 830 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 831 &io->io_hdr, links); 832 ctl_wakeup_thread(); 833 break; 834 835 /* Handle resets sent from the other side */ 836 case CTL_MSG_MANAGE_TASKS: { 837 struct ctl_taskio *taskio; 838 taskio = (struct ctl_taskio *)ctl_alloc_io( 839 (void *)ctl_softc->othersc_pool); 840 if (taskio == NULL) { 841 printf("ctl_isc_event_handler: can't allocate " 842 "ctl_io!\n"); 843 /* Bad Juju */ 844 /* should I just call the proper reset func 845 here??? */ 846 mtx_unlock(&ctl_softc->ctl_lock); 847 goto bailout; 848 } 849 ctl_zero_io((union ctl_io *)taskio); 850 taskio->io_hdr.io_type = CTL_IO_TASK; 851 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 852 taskio->io_hdr.nexus = msg_info.hdr.nexus; 853 taskio->task_action = msg_info.task.task_action; 854 taskio->tag_num = msg_info.task.tag_num; 855 taskio->tag_type = msg_info.task.tag_type; 856 #ifdef CTL_TIME_IO 857 taskio->io_hdr.start_time = time_uptime; 858 getbintime(&taskio->io_hdr.start_bt); 859 #if 0 860 cs_prof_gettime(&taskio->io_hdr.start_ticks); 861 #endif 862 #endif /* CTL_TIME_IO */ 863 STAILQ_INSERT_TAIL(&ctl_softc->task_queue, 864 &taskio->io_hdr, links); 865 ctl_softc->flags |= CTL_FLAG_TASK_PENDING; 866 ctl_wakeup_thread(); 867 break; 868 } 869 /* Persistent Reserve action which needs attention */ 870 case CTL_MSG_PERS_ACTION: 871 presio = (struct ctl_prio *)ctl_alloc_io( 872 (void *)ctl_softc->othersc_pool); 873 if (presio == NULL) { 874 printf("ctl_isc_event_handler: can't allocate " 875 "ctl_io!\n"); 876 /* Bad Juju */ 877 /* Need to set busy and send msg back */ 878 mtx_unlock(&ctl_softc->ctl_lock); 879 goto bailout; 880 } 881 ctl_zero_io((union ctl_io *)presio); 882 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 883 presio->pr_msg = msg_info.pr; 884 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, 885 &presio->io_hdr, links); 886 ctl_wakeup_thread(); 887 break; 888 case CTL_MSG_SYNC_FE: 889 rcv_sync_msg = 1; 890 break; 891 case CTL_MSG_APS_LOCK: { 892 // It's quicker to execute this then to 893 // queue it. 894 struct ctl_lun *lun; 895 struct ctl_page_index *page_index; 896 struct copan_aps_subpage *current_sp; 897 898 lun = ctl_softc->ctl_luns[msg_info.hdr.nexus.targ_lun]; 899 page_index = &lun->mode_pages.index[index_to_aps_page]; 900 current_sp = (struct copan_aps_subpage *) 901 (page_index->page_data + 902 (page_index->page_len * CTL_PAGE_CURRENT)); 903 904 current_sp->lock_active = msg_info.aps.lock_flag; 905 break; 906 } 907 default: 908 printf("How did I get here?\n"); 909 } 910 mtx_unlock(&ctl_softc->ctl_lock); 911 } else if (event == CTL_HA_EVT_MSG_SENT) { 912 if (param != CTL_HA_STATUS_SUCCESS) { 913 printf("Bad status from ctl_ha_msg_send status %d\n", 914 param); 915 } 916 return; 917 } else if (event == CTL_HA_EVT_DISCONNECT) { 918 printf("CTL: Got a disconnect from Isc\n"); 919 return; 920 } else { 921 printf("ctl_isc_event_handler: Unknown event %d\n", event); 922 return; 923 } 924 925 bailout: 926 return; 927 } 928 929 static void 930 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 931 { 932 struct scsi_sense_data *sense; 933 934 sense = &dest->scsiio.sense_data; 935 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 936 dest->scsiio.scsi_status = src->scsi.scsi_status; 937 dest->scsiio.sense_len = src->scsi.sense_len; 938 dest->io_hdr.status = src->hdr.status; 939 } 940 941 static int 942 ctl_init(void) 943 { 944 struct ctl_softc *softc; 945 struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool; 946 struct ctl_frontend *fe; 947 struct ctl_lun *lun; 948 uint8_t sc_id =0; 949 #if 0 950 int i; 951 #endif 952 int error, retval; 953 //int isc_retval; 954 955 retval = 0; 956 ctl_pause_rtr = 0; 957 rcv_sync_msg = 0; 958 959 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 960 M_WAITOK | M_ZERO); 961 softc = control_softc; 962 963 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 964 "cam/ctl"); 965 966 softc->dev->si_drv1 = softc; 967 968 /* 969 * By default, return a "bad LUN" peripheral qualifier for unknown 970 * LUNs. The user can override this default using the tunable or 971 * sysctl. See the comment in ctl_inquiry_std() for more details. 972 */ 973 softc->inquiry_pq_no_lun = 1; 974 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 975 &softc->inquiry_pq_no_lun); 976 sysctl_ctx_init(&softc->sysctl_ctx); 977 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 978 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 979 CTLFLAG_RD, 0, "CAM Target Layer"); 980 981 if (softc->sysctl_tree == NULL) { 982 printf("%s: unable to allocate sysctl tree\n", __func__); 983 destroy_dev(softc->dev); 984 free(control_softc, M_DEVBUF); 985 control_softc = NULL; 986 return (ENOMEM); 987 } 988 989 SYSCTL_ADD_INT(&softc->sysctl_ctx, 990 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 991 "inquiry_pq_no_lun", CTLFLAG_RW, 992 &softc->inquiry_pq_no_lun, 0, 993 "Report no lun possible for invalid LUNs"); 994 995 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 996 softc->open_count = 0; 997 998 /* 999 * Default to actually sending a SYNCHRONIZE CACHE command down to 1000 * the drive. 1001 */ 1002 softc->flags = CTL_FLAG_REAL_SYNC; 1003 1004 /* 1005 * In Copan's HA scheme, the "master" and "slave" roles are 1006 * figured out through the slot the controller is in. Although it 1007 * is an active/active system, someone has to be in charge. 1008 */ 1009 #ifdef NEEDTOPORT 1010 scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id); 1011 #endif 1012 1013 if (sc_id == 0) { 1014 softc->flags |= CTL_FLAG_MASTER_SHELF; 1015 persis_offset = 0; 1016 } else 1017 persis_offset = CTL_MAX_INITIATORS; 1018 1019 /* 1020 * XXX KDM need to figure out where we want to get our target ID 1021 * and WWID. Is it different on each port? 1022 */ 1023 softc->target.id = 0; 1024 softc->target.wwid[0] = 0x12345678; 1025 softc->target.wwid[1] = 0x87654321; 1026 STAILQ_INIT(&softc->lun_list); 1027 STAILQ_INIT(&softc->pending_lun_queue); 1028 STAILQ_INIT(&softc->task_queue); 1029 STAILQ_INIT(&softc->incoming_queue); 1030 STAILQ_INIT(&softc->rtr_queue); 1031 STAILQ_INIT(&softc->done_queue); 1032 STAILQ_INIT(&softc->isc_queue); 1033 STAILQ_INIT(&softc->fe_list); 1034 STAILQ_INIT(&softc->be_list); 1035 STAILQ_INIT(&softc->io_pools); 1036 1037 lun = &softc->lun; 1038 1039 /* 1040 * We don't bother calling these with ctl_lock held here, because, 1041 * in theory, no one else can try to do anything while we're in our 1042 * module init routine. 1043 */ 1044 if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL, 1045 &internal_pool)!= 0){ 1046 printf("ctl: can't allocate %d entry internal pool, " 1047 "exiting\n", CTL_POOL_ENTRIES_INTERNAL); 1048 return (ENOMEM); 1049 } 1050 1051 if (ctl_pool_create(softc, CTL_POOL_EMERGENCY, 1052 CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { 1053 printf("ctl: can't allocate %d entry emergency pool, " 1054 "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); 1055 ctl_pool_free(softc, internal_pool); 1056 return (ENOMEM); 1057 } 1058 1059 if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC, 1060 &other_pool) != 0) 1061 { 1062 printf("ctl: can't allocate %d entry other SC pool, " 1063 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1064 ctl_pool_free(softc, internal_pool); 1065 ctl_pool_free(softc, emergency_pool); 1066 return (ENOMEM); 1067 } 1068 1069 softc->internal_pool = internal_pool; 1070 softc->emergency_pool = emergency_pool; 1071 softc->othersc_pool = other_pool; 1072 1073 mtx_lock(&softc->ctl_lock); 1074 ctl_pool_acquire(internal_pool); 1075 ctl_pool_acquire(emergency_pool); 1076 ctl_pool_acquire(other_pool); 1077 mtx_unlock(&softc->ctl_lock); 1078 1079 /* 1080 * We used to allocate a processor LUN here. The new scheme is to 1081 * just let the user allocate LUNs as he sees fit. 1082 */ 1083 #if 0 1084 mtx_lock(&softc->ctl_lock); 1085 ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target); 1086 mtx_unlock(&softc->ctl_lock); 1087 #endif 1088 1089 error = kproc_create(ctl_work_thread, softc, &softc->work_thread, 0, 0, 1090 "ctl_thrd"); 1091 if (error != 0) { 1092 printf("error creating CTL work thread!\n"); 1093 mtx_lock(&softc->ctl_lock); 1094 ctl_free_lun(lun); 1095 ctl_pool_free(softc, internal_pool); 1096 ctl_pool_free(softc, emergency_pool); 1097 ctl_pool_free(softc, other_pool); 1098 mtx_unlock(&softc->ctl_lock); 1099 return (error); 1100 } 1101 printf("ctl: CAM Target Layer loaded\n"); 1102 1103 /* 1104 * Initialize the initiator and portname mappings 1105 */ 1106 memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid)); 1107 1108 /* 1109 * Initialize the ioctl front end. 1110 */ 1111 fe = &softc->ioctl_info.fe; 1112 sprintf(softc->ioctl_info.port_name, "CTL ioctl"); 1113 fe->port_type = CTL_PORT_IOCTL; 1114 fe->num_requested_ctl_io = 100; 1115 fe->port_name = softc->ioctl_info.port_name; 1116 fe->port_online = ctl_ioctl_online; 1117 fe->port_offline = ctl_ioctl_offline; 1118 fe->onoff_arg = &softc->ioctl_info; 1119 fe->targ_enable = ctl_ioctl_targ_enable; 1120 fe->targ_disable = ctl_ioctl_targ_disable; 1121 fe->lun_enable = ctl_ioctl_lun_enable; 1122 fe->lun_disable = ctl_ioctl_lun_disable; 1123 fe->targ_lun_arg = &softc->ioctl_info; 1124 fe->fe_datamove = ctl_ioctl_datamove; 1125 fe->fe_done = ctl_ioctl_done; 1126 fe->max_targets = 15; 1127 fe->max_target_id = 15; 1128 1129 if (ctl_frontend_register(&softc->ioctl_info.fe, 1130 (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) { 1131 printf("ctl: ioctl front end registration failed, will " 1132 "continue anyway\n"); 1133 } 1134 1135 #ifdef CTL_IO_DELAY 1136 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1137 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1138 sizeof(struct callout), CTL_TIMER_BYTES); 1139 return (EINVAL); 1140 } 1141 #endif /* CTL_IO_DELAY */ 1142 1143 return (0); 1144 } 1145 1146 void 1147 ctl_shutdown(void) 1148 { 1149 struct ctl_softc *softc; 1150 struct ctl_lun *lun, *next_lun; 1151 struct ctl_io_pool *pool, *next_pool; 1152 1153 softc = (struct ctl_softc *)control_softc; 1154 1155 if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0) 1156 printf("ctl: ioctl front end deregistration failed\n"); 1157 1158 mtx_lock(&softc->ctl_lock); 1159 1160 /* 1161 * Free up each LUN. 1162 */ 1163 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1164 next_lun = STAILQ_NEXT(lun, links); 1165 ctl_free_lun(lun); 1166 } 1167 1168 /* 1169 * This will rip the rug out from under any FETDs or anyone else 1170 * that has a pool allocated. Since we increment our module 1171 * refcount any time someone outside the main CTL module allocates 1172 * a pool, we shouldn't have any problems here. The user won't be 1173 * able to unload the CTL module until client modules have 1174 * successfully unloaded. 1175 */ 1176 for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL; 1177 pool = next_pool) { 1178 next_pool = STAILQ_NEXT(pool, links); 1179 ctl_pool_free(softc, pool); 1180 } 1181 1182 mtx_unlock(&softc->ctl_lock); 1183 1184 #if 0 1185 ctl_shutdown_thread(softc->work_thread); 1186 #endif 1187 1188 mtx_destroy(&softc->ctl_lock); 1189 1190 destroy_dev(softc->dev); 1191 1192 sysctl_ctx_free(&softc->sysctl_ctx); 1193 1194 free(control_softc, M_DEVBUF); 1195 control_softc = NULL; 1196 1197 printf("ctl: CAM Target Layer unloaded\n"); 1198 } 1199 1200 static int 1201 ctl_module_event_handler(module_t mod, int what, void *arg) 1202 { 1203 1204 switch (what) { 1205 case MOD_LOAD: 1206 return (ctl_init()); 1207 case MOD_UNLOAD: 1208 return (EBUSY); 1209 default: 1210 return (EOPNOTSUPP); 1211 } 1212 } 1213 1214 /* 1215 * XXX KDM should we do some access checks here? Bump a reference count to 1216 * prevent a CTL module from being unloaded while someone has it open? 1217 */ 1218 static int 1219 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1220 { 1221 return (0); 1222 } 1223 1224 static int 1225 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1226 { 1227 return (0); 1228 } 1229 1230 int 1231 ctl_port_enable(ctl_port_type port_type) 1232 { 1233 struct ctl_softc *softc; 1234 struct ctl_frontend *fe; 1235 1236 if (ctl_is_single == 0) { 1237 union ctl_ha_msg msg_info; 1238 int isc_retval; 1239 1240 #if 0 1241 printf("%s: HA mode, synchronizing frontend enable\n", 1242 __func__); 1243 #endif 1244 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1245 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1246 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1247 printf("Sync msg send error retval %d\n", isc_retval); 1248 } 1249 if (!rcv_sync_msg) { 1250 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1251 sizeof(msg_info), 1); 1252 } 1253 #if 0 1254 printf("CTL:Frontend Enable\n"); 1255 } else { 1256 printf("%s: single mode, skipping frontend synchronization\n", 1257 __func__); 1258 #endif 1259 } 1260 1261 softc = control_softc; 1262 1263 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1264 if (port_type & fe->port_type) 1265 { 1266 #if 0 1267 printf("port %d\n", fe->targ_port); 1268 #endif 1269 ctl_frontend_online(fe); 1270 } 1271 } 1272 1273 return (0); 1274 } 1275 1276 int 1277 ctl_port_disable(ctl_port_type port_type) 1278 { 1279 struct ctl_softc *softc; 1280 struct ctl_frontend *fe; 1281 1282 softc = control_softc; 1283 1284 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1285 if (port_type & fe->port_type) 1286 ctl_frontend_offline(fe); 1287 } 1288 1289 return (0); 1290 } 1291 1292 /* 1293 * Returns 0 for success, 1 for failure. 1294 * Currently the only failure mode is if there aren't enough entries 1295 * allocated. So, in case of a failure, look at num_entries_dropped, 1296 * reallocate and try again. 1297 */ 1298 int 1299 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1300 int *num_entries_filled, int *num_entries_dropped, 1301 ctl_port_type port_type, int no_virtual) 1302 { 1303 struct ctl_softc *softc; 1304 struct ctl_frontend *fe; 1305 int entries_dropped, entries_filled; 1306 int retval; 1307 int i; 1308 1309 softc = control_softc; 1310 1311 retval = 0; 1312 entries_filled = 0; 1313 entries_dropped = 0; 1314 1315 i = 0; 1316 mtx_lock(&softc->ctl_lock); 1317 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1318 struct ctl_port_entry *entry; 1319 1320 if ((fe->port_type & port_type) == 0) 1321 continue; 1322 1323 if ((no_virtual != 0) 1324 && (fe->virtual_port != 0)) 1325 continue; 1326 1327 if (entries_filled >= num_entries_alloced) { 1328 entries_dropped++; 1329 continue; 1330 } 1331 entry = &entries[i]; 1332 1333 entry->port_type = fe->port_type; 1334 strlcpy(entry->port_name, fe->port_name, 1335 sizeof(entry->port_name)); 1336 entry->physical_port = fe->physical_port; 1337 entry->virtual_port = fe->virtual_port; 1338 entry->wwnn = fe->wwnn; 1339 entry->wwpn = fe->wwpn; 1340 1341 i++; 1342 entries_filled++; 1343 } 1344 1345 mtx_unlock(&softc->ctl_lock); 1346 1347 if (entries_dropped > 0) 1348 retval = 1; 1349 1350 *num_entries_dropped = entries_dropped; 1351 *num_entries_filled = entries_filled; 1352 1353 return (retval); 1354 } 1355 1356 static void 1357 ctl_ioctl_online(void *arg) 1358 { 1359 struct ctl_ioctl_info *ioctl_info; 1360 1361 ioctl_info = (struct ctl_ioctl_info *)arg; 1362 1363 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1364 } 1365 1366 static void 1367 ctl_ioctl_offline(void *arg) 1368 { 1369 struct ctl_ioctl_info *ioctl_info; 1370 1371 ioctl_info = (struct ctl_ioctl_info *)arg; 1372 1373 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1374 } 1375 1376 /* 1377 * Remove an initiator by port number and initiator ID. 1378 * Returns 0 for success, 1 for failure. 1379 */ 1380 int 1381 ctl_remove_initiator(int32_t targ_port, uint32_t iid) 1382 { 1383 struct ctl_softc *softc; 1384 1385 softc = control_softc; 1386 1387 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1388 1389 if ((targ_port < 0) 1390 || (targ_port > CTL_MAX_PORTS)) { 1391 printf("%s: invalid port number %d\n", __func__, targ_port); 1392 return (1); 1393 } 1394 if (iid > CTL_MAX_INIT_PER_PORT) { 1395 printf("%s: initiator ID %u > maximun %u!\n", 1396 __func__, iid, CTL_MAX_INIT_PER_PORT); 1397 return (1); 1398 } 1399 1400 mtx_lock(&softc->ctl_lock); 1401 1402 softc->wwpn_iid[targ_port][iid].in_use = 0; 1403 1404 mtx_unlock(&softc->ctl_lock); 1405 1406 return (0); 1407 } 1408 1409 /* 1410 * Add an initiator to the initiator map. 1411 * Returns 0 for success, 1 for failure. 1412 */ 1413 int 1414 ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid) 1415 { 1416 struct ctl_softc *softc; 1417 int retval; 1418 1419 softc = control_softc; 1420 1421 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1422 1423 retval = 0; 1424 1425 if ((targ_port < 0) 1426 || (targ_port > CTL_MAX_PORTS)) { 1427 printf("%s: invalid port number %d\n", __func__, targ_port); 1428 return (1); 1429 } 1430 if (iid > CTL_MAX_INIT_PER_PORT) { 1431 printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n", 1432 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1433 return (1); 1434 } 1435 1436 mtx_lock(&softc->ctl_lock); 1437 1438 if (softc->wwpn_iid[targ_port][iid].in_use != 0) { 1439 /* 1440 * We don't treat this as an error. 1441 */ 1442 if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) { 1443 printf("%s: port %d iid %u WWPN %#jx arrived again?\n", 1444 __func__, targ_port, iid, (uintmax_t)wwpn); 1445 goto bailout; 1446 } 1447 1448 /* 1449 * This is an error, but what do we do about it? The 1450 * driver is telling us we have a new WWPN for this 1451 * initiator ID, so we pretty much need to use it. 1452 */ 1453 printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is " 1454 "still at that address\n", __func__, targ_port, iid, 1455 (uintmax_t)wwpn, 1456 (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn); 1457 1458 /* 1459 * XXX KDM clear have_ca and ua_pending on each LUN for 1460 * this initiator. 1461 */ 1462 } 1463 softc->wwpn_iid[targ_port][iid].in_use = 1; 1464 softc->wwpn_iid[targ_port][iid].iid = iid; 1465 softc->wwpn_iid[targ_port][iid].wwpn = wwpn; 1466 softc->wwpn_iid[targ_port][iid].port = targ_port; 1467 1468 bailout: 1469 1470 mtx_unlock(&softc->ctl_lock); 1471 1472 return (retval); 1473 } 1474 1475 /* 1476 * XXX KDM should we pretend to do something in the target/lun 1477 * enable/disable functions? 1478 */ 1479 static int 1480 ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id) 1481 { 1482 return (0); 1483 } 1484 1485 static int 1486 ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id) 1487 { 1488 return (0); 1489 } 1490 1491 static int 1492 ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1493 { 1494 return (0); 1495 } 1496 1497 static int 1498 ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1499 { 1500 return (0); 1501 } 1502 1503 /* 1504 * Data movement routine for the CTL ioctl frontend port. 1505 */ 1506 static int 1507 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1508 { 1509 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1510 struct ctl_sg_entry ext_entry, kern_entry; 1511 int ext_sglen, ext_sg_entries, kern_sg_entries; 1512 int ext_sg_start, ext_offset; 1513 int len_to_copy, len_copied; 1514 int kern_watermark, ext_watermark; 1515 int ext_sglist_malloced; 1516 int i, j; 1517 1518 ext_sglist_malloced = 0; 1519 ext_sg_start = 0; 1520 ext_offset = 0; 1521 1522 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1523 1524 /* 1525 * If this flag is set, fake the data transfer. 1526 */ 1527 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1528 ctsio->ext_data_filled = ctsio->ext_data_len; 1529 goto bailout; 1530 } 1531 1532 /* 1533 * To simplify things here, if we have a single buffer, stick it in 1534 * a S/G entry and just make it a single entry S/G list. 1535 */ 1536 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1537 int len_seen; 1538 1539 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1540 1541 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1542 M_WAITOK); 1543 ext_sglist_malloced = 1; 1544 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1545 ext_sglen) != 0) { 1546 ctl_set_internal_failure(ctsio, 1547 /*sks_valid*/ 0, 1548 /*retry_count*/ 0); 1549 goto bailout; 1550 } 1551 ext_sg_entries = ctsio->ext_sg_entries; 1552 len_seen = 0; 1553 for (i = 0; i < ext_sg_entries; i++) { 1554 if ((len_seen + ext_sglist[i].len) >= 1555 ctsio->ext_data_filled) { 1556 ext_sg_start = i; 1557 ext_offset = ctsio->ext_data_filled - len_seen; 1558 break; 1559 } 1560 len_seen += ext_sglist[i].len; 1561 } 1562 } else { 1563 ext_sglist = &ext_entry; 1564 ext_sglist->addr = ctsio->ext_data_ptr; 1565 ext_sglist->len = ctsio->ext_data_len; 1566 ext_sg_entries = 1; 1567 ext_sg_start = 0; 1568 ext_offset = ctsio->ext_data_filled; 1569 } 1570 1571 if (ctsio->kern_sg_entries > 0) { 1572 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1573 kern_sg_entries = ctsio->kern_sg_entries; 1574 } else { 1575 kern_sglist = &kern_entry; 1576 kern_sglist->addr = ctsio->kern_data_ptr; 1577 kern_sglist->len = ctsio->kern_data_len; 1578 kern_sg_entries = 1; 1579 } 1580 1581 1582 kern_watermark = 0; 1583 ext_watermark = ext_offset; 1584 len_copied = 0; 1585 for (i = ext_sg_start, j = 0; 1586 i < ext_sg_entries && j < kern_sg_entries;) { 1587 uint8_t *ext_ptr, *kern_ptr; 1588 1589 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark, 1590 kern_sglist[j].len - kern_watermark); 1591 1592 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1593 ext_ptr = ext_ptr + ext_watermark; 1594 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1595 /* 1596 * XXX KDM fix this! 1597 */ 1598 panic("need to implement bus address support"); 1599 #if 0 1600 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1601 #endif 1602 } else 1603 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1604 kern_ptr = kern_ptr + kern_watermark; 1605 1606 kern_watermark += len_to_copy; 1607 ext_watermark += len_to_copy; 1608 1609 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1610 CTL_FLAG_DATA_IN) { 1611 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1612 "bytes to user\n", len_to_copy)); 1613 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1614 "to %p\n", kern_ptr, ext_ptr)); 1615 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1616 ctl_set_internal_failure(ctsio, 1617 /*sks_valid*/ 0, 1618 /*retry_count*/ 0); 1619 goto bailout; 1620 } 1621 } else { 1622 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1623 "bytes from user\n", len_to_copy)); 1624 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1625 "to %p\n", ext_ptr, kern_ptr)); 1626 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1627 ctl_set_internal_failure(ctsio, 1628 /*sks_valid*/ 0, 1629 /*retry_count*/0); 1630 goto bailout; 1631 } 1632 } 1633 1634 len_copied += len_to_copy; 1635 1636 if (ext_sglist[i].len == ext_watermark) { 1637 i++; 1638 ext_watermark = 0; 1639 } 1640 1641 if (kern_sglist[j].len == kern_watermark) { 1642 j++; 1643 kern_watermark = 0; 1644 } 1645 } 1646 1647 ctsio->ext_data_filled += len_copied; 1648 1649 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1650 "kern_sg_entries: %d\n", ext_sg_entries, 1651 kern_sg_entries)); 1652 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1653 "kern_data_len = %d\n", ctsio->ext_data_len, 1654 ctsio->kern_data_len)); 1655 1656 1657 /* XXX KDM set residual?? */ 1658 bailout: 1659 1660 if (ext_sglist_malloced != 0) 1661 free(ext_sglist, M_CTL); 1662 1663 return (CTL_RETVAL_COMPLETE); 1664 } 1665 1666 /* 1667 * Serialize a command that went down the "wrong" side, and so was sent to 1668 * this controller for execution. The logic is a little different than the 1669 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1670 * sent back to the other side, but in the success case, we execute the 1671 * command on this side (XFER mode) or tell the other side to execute it 1672 * (SER_ONLY mode). 1673 */ 1674 static int 1675 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock) 1676 { 1677 struct ctl_softc *ctl_softc; 1678 union ctl_ha_msg msg_info; 1679 struct ctl_lun *lun; 1680 int retval = 0; 1681 1682 ctl_softc = control_softc; 1683 if (have_lock == 0) 1684 mtx_lock(&ctl_softc->ctl_lock); 1685 1686 lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun]; 1687 if (lun==NULL) 1688 { 1689 /* 1690 * Why isn't LUN defined? The other side wouldn't 1691 * send a cmd if the LUN is undefined. 1692 */ 1693 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1694 1695 /* "Logical unit not supported" */ 1696 ctl_set_sense_data(&msg_info.scsi.sense_data, 1697 lun, 1698 /*sense_format*/SSD_TYPE_NONE, 1699 /*current_error*/ 1, 1700 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1701 /*asc*/ 0x25, 1702 /*ascq*/ 0x00, 1703 SSD_ELEM_NONE); 1704 1705 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1706 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1707 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1708 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1709 msg_info.hdr.serializing_sc = NULL; 1710 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1711 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1712 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1713 } 1714 if (have_lock == 0) 1715 mtx_unlock(&ctl_softc->ctl_lock); 1716 return(1); 1717 1718 } 1719 1720 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1721 1722 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1723 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1724 ooa_links))) { 1725 case CTL_ACTION_BLOCK: 1726 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1727 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1728 blocked_links); 1729 break; 1730 case CTL_ACTION_PASS: 1731 case CTL_ACTION_SKIP: 1732 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 1733 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1734 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, 1735 &ctsio->io_hdr, links); 1736 } else { 1737 1738 /* send msg back to other side */ 1739 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1740 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1741 msg_info.hdr.msg_type = CTL_MSG_R2R; 1742 #if 0 1743 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1744 #endif 1745 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1746 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1747 } 1748 } 1749 break; 1750 case CTL_ACTION_OVERLAP: 1751 /* OVERLAPPED COMMANDS ATTEMPTED */ 1752 ctl_set_sense_data(&msg_info.scsi.sense_data, 1753 lun, 1754 /*sense_format*/SSD_TYPE_NONE, 1755 /*current_error*/ 1, 1756 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1757 /*asc*/ 0x4E, 1758 /*ascq*/ 0x00, 1759 SSD_ELEM_NONE); 1760 1761 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1762 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1763 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1764 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1765 msg_info.hdr.serializing_sc = NULL; 1766 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1767 #if 0 1768 printf("BAD JUJU:Major Bummer Overlap\n"); 1769 #endif 1770 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1771 retval = 1; 1772 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1773 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1774 } 1775 break; 1776 case CTL_ACTION_OVERLAP_TAG: 1777 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1778 ctl_set_sense_data(&msg_info.scsi.sense_data, 1779 lun, 1780 /*sense_format*/SSD_TYPE_NONE, 1781 /*current_error*/ 1, 1782 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1783 /*asc*/ 0x4D, 1784 /*ascq*/ ctsio->tag_num & 0xff, 1785 SSD_ELEM_NONE); 1786 1787 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1788 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1789 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1790 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1791 msg_info.hdr.serializing_sc = NULL; 1792 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1793 #if 0 1794 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1795 #endif 1796 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1797 retval = 1; 1798 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1799 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1800 } 1801 break; 1802 case CTL_ACTION_ERROR: 1803 default: 1804 /* "Internal target failure" */ 1805 ctl_set_sense_data(&msg_info.scsi.sense_data, 1806 lun, 1807 /*sense_format*/SSD_TYPE_NONE, 1808 /*current_error*/ 1, 1809 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1810 /*asc*/ 0x44, 1811 /*ascq*/ 0x00, 1812 SSD_ELEM_NONE); 1813 1814 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1815 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1816 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1817 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1818 msg_info.hdr.serializing_sc = NULL; 1819 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1820 #if 0 1821 printf("BAD JUJU:Major Bummer HW Error\n"); 1822 #endif 1823 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1824 retval = 1; 1825 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1826 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1827 } 1828 break; 1829 } 1830 if (have_lock == 0) 1831 mtx_unlock(&ctl_softc->ctl_lock); 1832 return (retval); 1833 } 1834 1835 static int 1836 ctl_ioctl_submit_wait(union ctl_io *io) 1837 { 1838 struct ctl_fe_ioctl_params params; 1839 ctl_fe_ioctl_state last_state; 1840 int done, retval; 1841 1842 retval = 0; 1843 1844 bzero(¶ms, sizeof(params)); 1845 1846 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 1847 cv_init(¶ms.sem, "ctlioccv"); 1848 params.state = CTL_IOCTL_INPROG; 1849 last_state = params.state; 1850 1851 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 1852 1853 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 1854 1855 /* This shouldn't happen */ 1856 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 1857 return (retval); 1858 1859 done = 0; 1860 1861 do { 1862 mtx_lock(¶ms.ioctl_mtx); 1863 /* 1864 * Check the state here, and don't sleep if the state has 1865 * already changed (i.e. wakeup has already occured, but we 1866 * weren't waiting yet). 1867 */ 1868 if (params.state == last_state) { 1869 /* XXX KDM cv_wait_sig instead? */ 1870 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 1871 } 1872 last_state = params.state; 1873 1874 switch (params.state) { 1875 case CTL_IOCTL_INPROG: 1876 /* Why did we wake up? */ 1877 /* XXX KDM error here? */ 1878 mtx_unlock(¶ms.ioctl_mtx); 1879 break; 1880 case CTL_IOCTL_DATAMOVE: 1881 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 1882 1883 /* 1884 * change last_state back to INPROG to avoid 1885 * deadlock on subsequent data moves. 1886 */ 1887 params.state = last_state = CTL_IOCTL_INPROG; 1888 1889 mtx_unlock(¶ms.ioctl_mtx); 1890 ctl_ioctl_do_datamove(&io->scsiio); 1891 /* 1892 * Note that in some cases, most notably writes, 1893 * this will queue the I/O and call us back later. 1894 * In other cases, generally reads, this routine 1895 * will immediately call back and wake us up, 1896 * probably using our own context. 1897 */ 1898 io->scsiio.be_move_done(io); 1899 break; 1900 case CTL_IOCTL_DONE: 1901 mtx_unlock(¶ms.ioctl_mtx); 1902 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 1903 done = 1; 1904 break; 1905 default: 1906 mtx_unlock(¶ms.ioctl_mtx); 1907 /* XXX KDM error here? */ 1908 break; 1909 } 1910 } while (done == 0); 1911 1912 mtx_destroy(¶ms.ioctl_mtx); 1913 cv_destroy(¶ms.sem); 1914 1915 return (CTL_RETVAL_COMPLETE); 1916 } 1917 1918 static void 1919 ctl_ioctl_datamove(union ctl_io *io) 1920 { 1921 struct ctl_fe_ioctl_params *params; 1922 1923 params = (struct ctl_fe_ioctl_params *) 1924 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1925 1926 mtx_lock(¶ms->ioctl_mtx); 1927 params->state = CTL_IOCTL_DATAMOVE; 1928 cv_broadcast(¶ms->sem); 1929 mtx_unlock(¶ms->ioctl_mtx); 1930 } 1931 1932 static void 1933 ctl_ioctl_done(union ctl_io *io) 1934 { 1935 struct ctl_fe_ioctl_params *params; 1936 1937 params = (struct ctl_fe_ioctl_params *) 1938 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1939 1940 mtx_lock(¶ms->ioctl_mtx); 1941 params->state = CTL_IOCTL_DONE; 1942 cv_broadcast(¶ms->sem); 1943 mtx_unlock(¶ms->ioctl_mtx); 1944 } 1945 1946 static void 1947 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 1948 { 1949 struct ctl_fe_ioctl_startstop_info *sd_info; 1950 1951 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 1952 1953 sd_info->hs_info.status = metatask->status; 1954 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 1955 sd_info->hs_info.luns_complete = 1956 metatask->taskinfo.startstop.luns_complete; 1957 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 1958 1959 cv_broadcast(&sd_info->sem); 1960 } 1961 1962 static void 1963 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 1964 { 1965 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 1966 1967 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 1968 1969 mtx_lock(fe_bbr_info->lock); 1970 fe_bbr_info->bbr_info->status = metatask->status; 1971 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 1972 fe_bbr_info->wakeup_done = 1; 1973 mtx_unlock(fe_bbr_info->lock); 1974 1975 cv_broadcast(&fe_bbr_info->sem); 1976 } 1977 1978 /* 1979 * Returns 0 for success, errno for failure. 1980 */ 1981 static int 1982 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1983 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1984 { 1985 union ctl_io *io; 1986 int retval; 1987 1988 retval = 0; 1989 1990 mtx_assert(&control_softc->ctl_lock, MA_OWNED); 1991 1992 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1993 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1994 ooa_links)) { 1995 struct ctl_ooa_entry *entry; 1996 1997 /* 1998 * If we've got more than we can fit, just count the 1999 * remaining entries. 2000 */ 2001 if (*cur_fill_num >= ooa_hdr->alloc_num) 2002 continue; 2003 2004 entry = &kern_entries[*cur_fill_num]; 2005 2006 entry->tag_num = io->scsiio.tag_num; 2007 entry->lun_num = lun->lun; 2008 #ifdef CTL_TIME_IO 2009 entry->start_bt = io->io_hdr.start_bt; 2010 #endif 2011 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2012 entry->cdb_len = io->scsiio.cdb_len; 2013 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2014 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2015 2016 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2017 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2018 2019 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2020 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2021 2022 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2023 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2024 2025 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2026 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2027 } 2028 2029 return (retval); 2030 } 2031 2032 static void * 2033 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2034 size_t error_str_len) 2035 { 2036 void *kptr; 2037 2038 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2039 2040 if (copyin(user_addr, kptr, len) != 0) { 2041 snprintf(error_str, error_str_len, "Error copying %d bytes " 2042 "from user address %p to kernel address %p", len, 2043 user_addr, kptr); 2044 free(kptr, M_CTL); 2045 return (NULL); 2046 } 2047 2048 return (kptr); 2049 } 2050 2051 static void 2052 ctl_free_args(int num_be_args, struct ctl_be_arg *be_args) 2053 { 2054 int i; 2055 2056 if (be_args == NULL) 2057 return; 2058 2059 for (i = 0; i < num_be_args; i++) { 2060 free(be_args[i].kname, M_CTL); 2061 free(be_args[i].kvalue, M_CTL); 2062 } 2063 2064 free(be_args, M_CTL); 2065 } 2066 2067 static struct ctl_be_arg * 2068 ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args, 2069 char *error_str, size_t error_str_len) 2070 { 2071 struct ctl_be_arg *args; 2072 int i; 2073 2074 args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args), 2075 error_str, error_str_len); 2076 2077 if (args == NULL) 2078 goto bailout; 2079 2080 for (i = 0; i < num_be_args; i++) { 2081 args[i].kname = NULL; 2082 args[i].kvalue = NULL; 2083 } 2084 2085 for (i = 0; i < num_be_args; i++) { 2086 uint8_t *tmpptr; 2087 2088 args[i].kname = ctl_copyin_alloc(args[i].name, 2089 args[i].namelen, error_str, error_str_len); 2090 if (args[i].kname == NULL) 2091 goto bailout; 2092 2093 if (args[i].kname[args[i].namelen - 1] != '\0') { 2094 snprintf(error_str, error_str_len, "Argument %d " 2095 "name is not NUL-terminated", i); 2096 goto bailout; 2097 } 2098 2099 args[i].kvalue = NULL; 2100 2101 tmpptr = ctl_copyin_alloc(args[i].value, 2102 args[i].vallen, error_str, error_str_len); 2103 if (tmpptr == NULL) 2104 goto bailout; 2105 2106 args[i].kvalue = tmpptr; 2107 2108 if ((args[i].flags & CTL_BEARG_ASCII) 2109 && (tmpptr[args[i].vallen - 1] != '\0')) { 2110 snprintf(error_str, error_str_len, "Argument %d " 2111 "value is not NUL-terminated", i); 2112 goto bailout; 2113 } 2114 } 2115 2116 return (args); 2117 bailout: 2118 2119 ctl_free_args(num_be_args, args); 2120 2121 return (NULL); 2122 } 2123 2124 /* 2125 * Escape characters that are illegal or not recommended in XML. 2126 */ 2127 int 2128 ctl_sbuf_printf_esc(struct sbuf *sb, char *str) 2129 { 2130 int retval; 2131 2132 retval = 0; 2133 2134 for (; *str; str++) { 2135 switch (*str) { 2136 case '&': 2137 retval = sbuf_printf(sb, "&"); 2138 break; 2139 case '>': 2140 retval = sbuf_printf(sb, ">"); 2141 break; 2142 case '<': 2143 retval = sbuf_printf(sb, "<"); 2144 break; 2145 default: 2146 retval = sbuf_putc(sb, *str); 2147 break; 2148 } 2149 2150 if (retval != 0) 2151 break; 2152 2153 } 2154 2155 return (retval); 2156 } 2157 2158 static int 2159 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2160 struct thread *td) 2161 { 2162 struct ctl_softc *softc; 2163 int retval; 2164 2165 softc = control_softc; 2166 2167 retval = 0; 2168 2169 switch (cmd) { 2170 case CTL_IO: { 2171 union ctl_io *io; 2172 void *pool_tmp; 2173 2174 /* 2175 * If we haven't been "enabled", don't allow any SCSI I/O 2176 * to this FETD. 2177 */ 2178 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2179 retval = -EPERM; 2180 break; 2181 } 2182 2183 io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref); 2184 if (io == NULL) { 2185 printf("ctl_ioctl: can't allocate ctl_io!\n"); 2186 retval = -ENOSPC; 2187 break; 2188 } 2189 2190 /* 2191 * Need to save the pool reference so it doesn't get 2192 * spammed by the user's ctl_io. 2193 */ 2194 pool_tmp = io->io_hdr.pool; 2195 2196 memcpy(io, (void *)addr, sizeof(*io)); 2197 2198 io->io_hdr.pool = pool_tmp; 2199 /* 2200 * No status yet, so make sure the status is set properly. 2201 */ 2202 io->io_hdr.status = CTL_STATUS_NONE; 2203 2204 /* 2205 * The user sets the initiator ID, target and LUN IDs. 2206 */ 2207 io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port; 2208 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2209 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2210 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2211 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2212 2213 retval = ctl_ioctl_submit_wait(io); 2214 2215 if (retval != 0) { 2216 ctl_free_io(io); 2217 break; 2218 } 2219 2220 memcpy((void *)addr, io, sizeof(*io)); 2221 2222 /* return this to our pool */ 2223 ctl_free_io(io); 2224 2225 break; 2226 } 2227 case CTL_ENABLE_PORT: 2228 case CTL_DISABLE_PORT: 2229 case CTL_SET_PORT_WWNS: { 2230 struct ctl_frontend *fe; 2231 struct ctl_port_entry *entry; 2232 2233 entry = (struct ctl_port_entry *)addr; 2234 2235 mtx_lock(&softc->ctl_lock); 2236 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2237 int action, done; 2238 2239 action = 0; 2240 done = 0; 2241 2242 if ((entry->port_type == CTL_PORT_NONE) 2243 && (entry->targ_port == fe->targ_port)) { 2244 /* 2245 * If the user only wants to enable or 2246 * disable or set WWNs on a specific port, 2247 * do the operation and we're done. 2248 */ 2249 action = 1; 2250 done = 1; 2251 } else if (entry->port_type & fe->port_type) { 2252 /* 2253 * Compare the user's type mask with the 2254 * particular frontend type to see if we 2255 * have a match. 2256 */ 2257 action = 1; 2258 done = 0; 2259 2260 /* 2261 * Make sure the user isn't trying to set 2262 * WWNs on multiple ports at the same time. 2263 */ 2264 if (cmd == CTL_SET_PORT_WWNS) { 2265 printf("%s: Can't set WWNs on " 2266 "multiple ports\n", __func__); 2267 retval = EINVAL; 2268 break; 2269 } 2270 } 2271 if (action != 0) { 2272 /* 2273 * XXX KDM we have to drop the lock here, 2274 * because the online/offline operations 2275 * can potentially block. We need to 2276 * reference count the frontends so they 2277 * can't go away, 2278 */ 2279 mtx_unlock(&softc->ctl_lock); 2280 2281 if (cmd == CTL_ENABLE_PORT) { 2282 struct ctl_lun *lun; 2283 2284 STAILQ_FOREACH(lun, &softc->lun_list, 2285 links) { 2286 fe->lun_enable(fe->targ_lun_arg, 2287 lun->target, 2288 lun->lun); 2289 } 2290 2291 ctl_frontend_online(fe); 2292 } else if (cmd == CTL_DISABLE_PORT) { 2293 struct ctl_lun *lun; 2294 2295 ctl_frontend_offline(fe); 2296 2297 STAILQ_FOREACH(lun, &softc->lun_list, 2298 links) { 2299 fe->lun_disable( 2300 fe->targ_lun_arg, 2301 lun->target, 2302 lun->lun); 2303 } 2304 } 2305 2306 mtx_lock(&softc->ctl_lock); 2307 2308 if (cmd == CTL_SET_PORT_WWNS) 2309 ctl_frontend_set_wwns(fe, 2310 (entry->flags & CTL_PORT_WWNN_VALID) ? 2311 1 : 0, entry->wwnn, 2312 (entry->flags & CTL_PORT_WWPN_VALID) ? 2313 1 : 0, entry->wwpn); 2314 } 2315 if (done != 0) 2316 break; 2317 } 2318 mtx_unlock(&softc->ctl_lock); 2319 break; 2320 } 2321 case CTL_GET_PORT_LIST: { 2322 struct ctl_frontend *fe; 2323 struct ctl_port_list *list; 2324 int i; 2325 2326 list = (struct ctl_port_list *)addr; 2327 2328 if (list->alloc_len != (list->alloc_num * 2329 sizeof(struct ctl_port_entry))) { 2330 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2331 "alloc_num %u * sizeof(struct ctl_port_entry) " 2332 "%zu\n", __func__, list->alloc_len, 2333 list->alloc_num, sizeof(struct ctl_port_entry)); 2334 retval = EINVAL; 2335 break; 2336 } 2337 list->fill_len = 0; 2338 list->fill_num = 0; 2339 list->dropped_num = 0; 2340 i = 0; 2341 mtx_lock(&softc->ctl_lock); 2342 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2343 struct ctl_port_entry entry, *list_entry; 2344 2345 if (list->fill_num >= list->alloc_num) { 2346 list->dropped_num++; 2347 continue; 2348 } 2349 2350 entry.port_type = fe->port_type; 2351 strlcpy(entry.port_name, fe->port_name, 2352 sizeof(entry.port_name)); 2353 entry.targ_port = fe->targ_port; 2354 entry.physical_port = fe->physical_port; 2355 entry.virtual_port = fe->virtual_port; 2356 entry.wwnn = fe->wwnn; 2357 entry.wwpn = fe->wwpn; 2358 if (fe->status & CTL_PORT_STATUS_ONLINE) 2359 entry.online = 1; 2360 else 2361 entry.online = 0; 2362 2363 list_entry = &list->entries[i]; 2364 2365 retval = copyout(&entry, list_entry, sizeof(entry)); 2366 if (retval != 0) { 2367 printf("%s: CTL_GET_PORT_LIST: copyout " 2368 "returned %d\n", __func__, retval); 2369 break; 2370 } 2371 i++; 2372 list->fill_num++; 2373 list->fill_len += sizeof(entry); 2374 } 2375 mtx_unlock(&softc->ctl_lock); 2376 2377 /* 2378 * If this is non-zero, we had a copyout fault, so there's 2379 * probably no point in attempting to set the status inside 2380 * the structure. 2381 */ 2382 if (retval != 0) 2383 break; 2384 2385 if (list->dropped_num > 0) 2386 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2387 else 2388 list->status = CTL_PORT_LIST_OK; 2389 break; 2390 } 2391 case CTL_DUMP_OOA: { 2392 struct ctl_lun *lun; 2393 union ctl_io *io; 2394 char printbuf[128]; 2395 struct sbuf sb; 2396 2397 mtx_lock(&softc->ctl_lock); 2398 printf("Dumping OOA queues:\n"); 2399 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2400 for (io = (union ctl_io *)TAILQ_FIRST( 2401 &lun->ooa_queue); io != NULL; 2402 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2403 ooa_links)) { 2404 sbuf_new(&sb, printbuf, sizeof(printbuf), 2405 SBUF_FIXEDLEN); 2406 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2407 (intmax_t)lun->lun, 2408 io->scsiio.tag_num, 2409 (io->io_hdr.flags & 2410 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2411 (io->io_hdr.flags & 2412 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2413 (io->io_hdr.flags & 2414 CTL_FLAG_ABORT) ? " ABORT" : "", 2415 (io->io_hdr.flags & 2416 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2417 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2418 sbuf_finish(&sb); 2419 printf("%s\n", sbuf_data(&sb)); 2420 } 2421 } 2422 printf("OOA queues dump done\n"); 2423 mtx_unlock(&softc->ctl_lock); 2424 break; 2425 } 2426 case CTL_GET_OOA: { 2427 struct ctl_lun *lun; 2428 struct ctl_ooa *ooa_hdr; 2429 struct ctl_ooa_entry *entries; 2430 uint32_t cur_fill_num; 2431 2432 ooa_hdr = (struct ctl_ooa *)addr; 2433 2434 if ((ooa_hdr->alloc_len == 0) 2435 || (ooa_hdr->alloc_num == 0)) { 2436 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2437 "must be non-zero\n", __func__, 2438 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2439 retval = EINVAL; 2440 break; 2441 } 2442 2443 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2444 sizeof(struct ctl_ooa_entry))) { 2445 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2446 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2447 __func__, ooa_hdr->alloc_len, 2448 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2449 retval = EINVAL; 2450 break; 2451 } 2452 2453 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2454 if (entries == NULL) { 2455 printf("%s: could not allocate %d bytes for OOA " 2456 "dump\n", __func__, ooa_hdr->alloc_len); 2457 retval = ENOMEM; 2458 break; 2459 } 2460 2461 mtx_lock(&softc->ctl_lock); 2462 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2463 && ((ooa_hdr->lun_num > CTL_MAX_LUNS) 2464 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2465 mtx_unlock(&softc->ctl_lock); 2466 free(entries, M_CTL); 2467 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2468 __func__, (uintmax_t)ooa_hdr->lun_num); 2469 retval = EINVAL; 2470 break; 2471 } 2472 2473 cur_fill_num = 0; 2474 2475 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2476 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2477 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2478 ooa_hdr, entries); 2479 if (retval != 0) 2480 break; 2481 } 2482 if (retval != 0) { 2483 mtx_unlock(&softc->ctl_lock); 2484 free(entries, M_CTL); 2485 break; 2486 } 2487 } else { 2488 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2489 2490 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2491 entries); 2492 } 2493 mtx_unlock(&softc->ctl_lock); 2494 2495 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2496 ooa_hdr->fill_len = ooa_hdr->fill_num * 2497 sizeof(struct ctl_ooa_entry); 2498 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2499 if (retval != 0) { 2500 printf("%s: error copying out %d bytes for OOA dump\n", 2501 __func__, ooa_hdr->fill_len); 2502 } 2503 2504 getbintime(&ooa_hdr->cur_bt); 2505 2506 if (cur_fill_num > ooa_hdr->alloc_num) { 2507 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2508 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2509 } else { 2510 ooa_hdr->dropped_num = 0; 2511 ooa_hdr->status = CTL_OOA_OK; 2512 } 2513 2514 free(entries, M_CTL); 2515 break; 2516 } 2517 case CTL_CHECK_OOA: { 2518 union ctl_io *io; 2519 struct ctl_lun *lun; 2520 struct ctl_ooa_info *ooa_info; 2521 2522 2523 ooa_info = (struct ctl_ooa_info *)addr; 2524 2525 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2526 ooa_info->status = CTL_OOA_INVALID_LUN; 2527 break; 2528 } 2529 mtx_lock(&softc->ctl_lock); 2530 lun = softc->ctl_luns[ooa_info->lun_id]; 2531 if (lun == NULL) { 2532 mtx_unlock(&softc->ctl_lock); 2533 ooa_info->status = CTL_OOA_INVALID_LUN; 2534 break; 2535 } 2536 2537 ooa_info->num_entries = 0; 2538 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2539 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2540 &io->io_hdr, ooa_links)) { 2541 ooa_info->num_entries++; 2542 } 2543 2544 mtx_unlock(&softc->ctl_lock); 2545 ooa_info->status = CTL_OOA_SUCCESS; 2546 2547 break; 2548 } 2549 case CTL_HARD_START: 2550 case CTL_HARD_STOP: { 2551 struct ctl_fe_ioctl_startstop_info ss_info; 2552 struct cfi_metatask *metatask; 2553 struct mtx hs_mtx; 2554 2555 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2556 2557 cv_init(&ss_info.sem, "hard start/stop cv" ); 2558 2559 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2560 if (metatask == NULL) { 2561 retval = ENOMEM; 2562 mtx_destroy(&hs_mtx); 2563 break; 2564 } 2565 2566 if (cmd == CTL_HARD_START) 2567 metatask->tasktype = CFI_TASK_STARTUP; 2568 else 2569 metatask->tasktype = CFI_TASK_SHUTDOWN; 2570 2571 metatask->callback = ctl_ioctl_hard_startstop_callback; 2572 metatask->callback_arg = &ss_info; 2573 2574 cfi_action(metatask); 2575 2576 /* Wait for the callback */ 2577 mtx_lock(&hs_mtx); 2578 cv_wait_sig(&ss_info.sem, &hs_mtx); 2579 mtx_unlock(&hs_mtx); 2580 2581 /* 2582 * All information has been copied from the metatask by the 2583 * time cv_broadcast() is called, so we free the metatask here. 2584 */ 2585 cfi_free_metatask(metatask); 2586 2587 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2588 2589 mtx_destroy(&hs_mtx); 2590 break; 2591 } 2592 case CTL_BBRREAD: { 2593 struct ctl_bbrread_info *bbr_info; 2594 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2595 struct mtx bbr_mtx; 2596 struct cfi_metatask *metatask; 2597 2598 bbr_info = (struct ctl_bbrread_info *)addr; 2599 2600 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2601 2602 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2603 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2604 2605 fe_bbr_info.bbr_info = bbr_info; 2606 fe_bbr_info.lock = &bbr_mtx; 2607 2608 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2609 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2610 2611 if (metatask == NULL) { 2612 mtx_destroy(&bbr_mtx); 2613 cv_destroy(&fe_bbr_info.sem); 2614 retval = ENOMEM; 2615 break; 2616 } 2617 metatask->tasktype = CFI_TASK_BBRREAD; 2618 metatask->callback = ctl_ioctl_bbrread_callback; 2619 metatask->callback_arg = &fe_bbr_info; 2620 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2621 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2622 metatask->taskinfo.bbrread.len = bbr_info->len; 2623 2624 cfi_action(metatask); 2625 2626 mtx_lock(&bbr_mtx); 2627 while (fe_bbr_info.wakeup_done == 0) 2628 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2629 mtx_unlock(&bbr_mtx); 2630 2631 bbr_info->status = metatask->status; 2632 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2633 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2634 memcpy(&bbr_info->sense_data, 2635 &metatask->taskinfo.bbrread.sense_data, 2636 ctl_min(sizeof(bbr_info->sense_data), 2637 sizeof(metatask->taskinfo.bbrread.sense_data))); 2638 2639 cfi_free_metatask(metatask); 2640 2641 mtx_destroy(&bbr_mtx); 2642 cv_destroy(&fe_bbr_info.sem); 2643 2644 break; 2645 } 2646 case CTL_DELAY_IO: { 2647 struct ctl_io_delay_info *delay_info; 2648 #ifdef CTL_IO_DELAY 2649 struct ctl_lun *lun; 2650 #endif /* CTL_IO_DELAY */ 2651 2652 delay_info = (struct ctl_io_delay_info *)addr; 2653 2654 #ifdef CTL_IO_DELAY 2655 mtx_lock(&softc->ctl_lock); 2656 2657 if ((delay_info->lun_id > CTL_MAX_LUNS) 2658 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2659 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2660 } else { 2661 lun = softc->ctl_luns[delay_info->lun_id]; 2662 2663 delay_info->status = CTL_DELAY_STATUS_OK; 2664 2665 switch (delay_info->delay_type) { 2666 case CTL_DELAY_TYPE_CONT: 2667 break; 2668 case CTL_DELAY_TYPE_ONESHOT: 2669 break; 2670 default: 2671 delay_info->status = 2672 CTL_DELAY_STATUS_INVALID_TYPE; 2673 break; 2674 } 2675 2676 switch (delay_info->delay_loc) { 2677 case CTL_DELAY_LOC_DATAMOVE: 2678 lun->delay_info.datamove_type = 2679 delay_info->delay_type; 2680 lun->delay_info.datamove_delay = 2681 delay_info->delay_secs; 2682 break; 2683 case CTL_DELAY_LOC_DONE: 2684 lun->delay_info.done_type = 2685 delay_info->delay_type; 2686 lun->delay_info.done_delay = 2687 delay_info->delay_secs; 2688 break; 2689 default: 2690 delay_info->status = 2691 CTL_DELAY_STATUS_INVALID_LOC; 2692 break; 2693 } 2694 } 2695 2696 mtx_unlock(&softc->ctl_lock); 2697 #else 2698 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2699 #endif /* CTL_IO_DELAY */ 2700 break; 2701 } 2702 case CTL_REALSYNC_SET: { 2703 int *syncstate; 2704 2705 syncstate = (int *)addr; 2706 2707 mtx_lock(&softc->ctl_lock); 2708 switch (*syncstate) { 2709 case 0: 2710 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2711 break; 2712 case 1: 2713 softc->flags |= CTL_FLAG_REAL_SYNC; 2714 break; 2715 default: 2716 retval = -EINVAL; 2717 break; 2718 } 2719 mtx_unlock(&softc->ctl_lock); 2720 break; 2721 } 2722 case CTL_REALSYNC_GET: { 2723 int *syncstate; 2724 2725 syncstate = (int*)addr; 2726 2727 mtx_lock(&softc->ctl_lock); 2728 if (softc->flags & CTL_FLAG_REAL_SYNC) 2729 *syncstate = 1; 2730 else 2731 *syncstate = 0; 2732 mtx_unlock(&softc->ctl_lock); 2733 2734 break; 2735 } 2736 case CTL_SETSYNC: 2737 case CTL_GETSYNC: { 2738 struct ctl_sync_info *sync_info; 2739 struct ctl_lun *lun; 2740 2741 sync_info = (struct ctl_sync_info *)addr; 2742 2743 mtx_lock(&softc->ctl_lock); 2744 lun = softc->ctl_luns[sync_info->lun_id]; 2745 if (lun == NULL) { 2746 mtx_unlock(&softc->ctl_lock); 2747 sync_info->status = CTL_GS_SYNC_NO_LUN; 2748 } 2749 /* 2750 * Get or set the sync interval. We're not bounds checking 2751 * in the set case, hopefully the user won't do something 2752 * silly. 2753 */ 2754 if (cmd == CTL_GETSYNC) 2755 sync_info->sync_interval = lun->sync_interval; 2756 else 2757 lun->sync_interval = sync_info->sync_interval; 2758 2759 mtx_unlock(&softc->ctl_lock); 2760 2761 sync_info->status = CTL_GS_SYNC_OK; 2762 2763 break; 2764 } 2765 case CTL_GETSTATS: { 2766 struct ctl_stats *stats; 2767 struct ctl_lun *lun; 2768 int i; 2769 2770 stats = (struct ctl_stats *)addr; 2771 2772 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2773 stats->alloc_len) { 2774 stats->status = CTL_SS_NEED_MORE_SPACE; 2775 stats->num_luns = softc->num_luns; 2776 break; 2777 } 2778 /* 2779 * XXX KDM no locking here. If the LUN list changes, 2780 * things can blow up. 2781 */ 2782 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2783 i++, lun = STAILQ_NEXT(lun, links)) { 2784 retval = copyout(&lun->stats, &stats->lun_stats[i], 2785 sizeof(lun->stats)); 2786 if (retval != 0) 2787 break; 2788 } 2789 stats->num_luns = softc->num_luns; 2790 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2791 softc->num_luns; 2792 stats->status = CTL_SS_OK; 2793 #ifdef CTL_TIME_IO 2794 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2795 #else 2796 stats->flags = CTL_STATS_FLAG_NONE; 2797 #endif 2798 getnanouptime(&stats->timestamp); 2799 break; 2800 } 2801 case CTL_ERROR_INJECT: { 2802 struct ctl_error_desc *err_desc, *new_err_desc; 2803 struct ctl_lun *lun; 2804 2805 err_desc = (struct ctl_error_desc *)addr; 2806 2807 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2808 M_WAITOK | M_ZERO); 2809 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2810 2811 mtx_lock(&softc->ctl_lock); 2812 lun = softc->ctl_luns[err_desc->lun_id]; 2813 if (lun == NULL) { 2814 mtx_unlock(&softc->ctl_lock); 2815 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2816 __func__, (uintmax_t)err_desc->lun_id); 2817 retval = EINVAL; 2818 break; 2819 } 2820 2821 /* 2822 * We could do some checking here to verify the validity 2823 * of the request, but given the complexity of error 2824 * injection requests, the checking logic would be fairly 2825 * complex. 2826 * 2827 * For now, if the request is invalid, it just won't get 2828 * executed and might get deleted. 2829 */ 2830 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2831 2832 /* 2833 * XXX KDM check to make sure the serial number is unique, 2834 * in case we somehow manage to wrap. That shouldn't 2835 * happen for a very long time, but it's the right thing to 2836 * do. 2837 */ 2838 new_err_desc->serial = lun->error_serial; 2839 err_desc->serial = lun->error_serial; 2840 lun->error_serial++; 2841 2842 mtx_unlock(&softc->ctl_lock); 2843 break; 2844 } 2845 case CTL_ERROR_INJECT_DELETE: { 2846 struct ctl_error_desc *delete_desc, *desc, *desc2; 2847 struct ctl_lun *lun; 2848 int delete_done; 2849 2850 delete_desc = (struct ctl_error_desc *)addr; 2851 delete_done = 0; 2852 2853 mtx_lock(&softc->ctl_lock); 2854 lun = softc->ctl_luns[delete_desc->lun_id]; 2855 if (lun == NULL) { 2856 mtx_unlock(&softc->ctl_lock); 2857 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2858 __func__, (uintmax_t)delete_desc->lun_id); 2859 retval = EINVAL; 2860 break; 2861 } 2862 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2863 if (desc->serial != delete_desc->serial) 2864 continue; 2865 2866 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2867 links); 2868 free(desc, M_CTL); 2869 delete_done = 1; 2870 } 2871 mtx_unlock(&softc->ctl_lock); 2872 if (delete_done == 0) { 2873 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2874 "error serial %ju on LUN %u\n", __func__, 2875 delete_desc->serial, delete_desc->lun_id); 2876 retval = EINVAL; 2877 break; 2878 } 2879 break; 2880 } 2881 case CTL_DUMP_STRUCTS: { 2882 int i, j, k; 2883 struct ctl_frontend *fe; 2884 2885 printf("CTL IID to WWPN map start:\n"); 2886 for (i = 0; i < CTL_MAX_PORTS; i++) { 2887 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2888 if (softc->wwpn_iid[i][j].in_use == 0) 2889 continue; 2890 2891 printf("port %d iid %u WWPN %#jx\n", 2892 softc->wwpn_iid[i][j].port, 2893 softc->wwpn_iid[i][j].iid, 2894 (uintmax_t)softc->wwpn_iid[i][j].wwpn); 2895 } 2896 } 2897 printf("CTL IID to WWPN map end\n"); 2898 printf("CTL Persistent Reservation information start:\n"); 2899 for (i = 0; i < CTL_MAX_LUNS; i++) { 2900 struct ctl_lun *lun; 2901 2902 lun = softc->ctl_luns[i]; 2903 2904 if ((lun == NULL) 2905 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2906 continue; 2907 2908 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2909 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2910 if (lun->per_res[j+k].registered == 0) 2911 continue; 2912 printf("LUN %d port %d iid %d key " 2913 "%#jx\n", i, j, k, 2914 (uintmax_t)scsi_8btou64( 2915 lun->per_res[j+k].res_key.key)); 2916 } 2917 } 2918 } 2919 printf("CTL Persistent Reservation information end\n"); 2920 printf("CTL Frontends:\n"); 2921 /* 2922 * XXX KDM calling this without a lock. We'd likely want 2923 * to drop the lock before calling the frontend's dump 2924 * routine anyway. 2925 */ 2926 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2927 printf("Frontend %s Type %u pport %d vport %d WWNN " 2928 "%#jx WWPN %#jx\n", fe->port_name, fe->port_type, 2929 fe->physical_port, fe->virtual_port, 2930 (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn); 2931 2932 /* 2933 * Frontends are not required to support the dump 2934 * routine. 2935 */ 2936 if (fe->fe_dump == NULL) 2937 continue; 2938 2939 fe->fe_dump(); 2940 } 2941 printf("CTL Frontend information end\n"); 2942 break; 2943 } 2944 case CTL_LUN_REQ: { 2945 struct ctl_lun_req *lun_req; 2946 struct ctl_backend_driver *backend; 2947 2948 lun_req = (struct ctl_lun_req *)addr; 2949 2950 backend = ctl_backend_find(lun_req->backend); 2951 if (backend == NULL) { 2952 lun_req->status = CTL_LUN_ERROR; 2953 snprintf(lun_req->error_str, 2954 sizeof(lun_req->error_str), 2955 "Backend \"%s\" not found.", 2956 lun_req->backend); 2957 break; 2958 } 2959 if (lun_req->num_be_args > 0) { 2960 lun_req->kern_be_args = ctl_copyin_args( 2961 lun_req->num_be_args, 2962 lun_req->be_args, 2963 lun_req->error_str, 2964 sizeof(lun_req->error_str)); 2965 if (lun_req->kern_be_args == NULL) { 2966 lun_req->status = CTL_LUN_ERROR; 2967 break; 2968 } 2969 } 2970 2971 retval = backend->ioctl(dev, cmd, addr, flag, td); 2972 2973 if (lun_req->num_be_args > 0) { 2974 ctl_free_args(lun_req->num_be_args, 2975 lun_req->kern_be_args); 2976 } 2977 break; 2978 } 2979 case CTL_LUN_LIST: { 2980 struct sbuf *sb; 2981 struct ctl_lun *lun; 2982 struct ctl_lun_list *list; 2983 2984 list = (struct ctl_lun_list *)addr; 2985 2986 /* 2987 * Allocate a fixed length sbuf here, based on the length 2988 * of the user's buffer. We could allocate an auto-extending 2989 * buffer, and then tell the user how much larger our 2990 * amount of data is than his buffer, but that presents 2991 * some problems: 2992 * 2993 * 1. The sbuf(9) routines use a blocking malloc, and so 2994 * we can't hold a lock while calling them with an 2995 * auto-extending buffer. 2996 * 2997 * 2. There is not currently a LUN reference counting 2998 * mechanism, outside of outstanding transactions on 2999 * the LUN's OOA queue. So a LUN could go away on us 3000 * while we're getting the LUN number, backend-specific 3001 * information, etc. Thus, given the way things 3002 * currently work, we need to hold the CTL lock while 3003 * grabbing LUN information. 3004 * 3005 * So, from the user's standpoint, the best thing to do is 3006 * allocate what he thinks is a reasonable buffer length, 3007 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3008 * double the buffer length and try again. (And repeat 3009 * that until he succeeds.) 3010 */ 3011 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3012 if (sb == NULL) { 3013 list->status = CTL_LUN_LIST_ERROR; 3014 snprintf(list->error_str, sizeof(list->error_str), 3015 "Unable to allocate %d bytes for LUN list", 3016 list->alloc_len); 3017 break; 3018 } 3019 3020 sbuf_printf(sb, "<ctllunlist>\n"); 3021 3022 mtx_lock(&softc->ctl_lock); 3023 3024 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3025 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3026 (uintmax_t)lun->lun); 3027 3028 /* 3029 * Bail out as soon as we see that we've overfilled 3030 * the buffer. 3031 */ 3032 if (retval != 0) 3033 break; 3034 3035 retval = sbuf_printf(sb, "<backend_type>%s" 3036 "</backend_type>\n", 3037 (lun->backend == NULL) ? "none" : 3038 lun->backend->name); 3039 3040 if (retval != 0) 3041 break; 3042 3043 retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n", 3044 lun->be_lun->lun_type); 3045 3046 if (retval != 0) 3047 break; 3048 3049 if (lun->backend == NULL) { 3050 retval = sbuf_printf(sb, "</lun>\n"); 3051 if (retval != 0) 3052 break; 3053 continue; 3054 } 3055 3056 retval = sbuf_printf(sb, "<size>%ju</size>\n", 3057 (lun->be_lun->maxlba > 0) ? 3058 lun->be_lun->maxlba + 1 : 0); 3059 3060 if (retval != 0) 3061 break; 3062 3063 retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n", 3064 lun->be_lun->blocksize); 3065 3066 if (retval != 0) 3067 break; 3068 3069 retval = sbuf_printf(sb, "<serial_number>"); 3070 3071 if (retval != 0) 3072 break; 3073 3074 retval = ctl_sbuf_printf_esc(sb, 3075 lun->be_lun->serial_num); 3076 3077 if (retval != 0) 3078 break; 3079 3080 retval = sbuf_printf(sb, "</serial_number>\n"); 3081 3082 if (retval != 0) 3083 break; 3084 3085 retval = sbuf_printf(sb, "<device_id>"); 3086 3087 if (retval != 0) 3088 break; 3089 3090 retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id); 3091 3092 if (retval != 0) 3093 break; 3094 3095 retval = sbuf_printf(sb, "</device_id>\n"); 3096 3097 if (retval != 0) 3098 break; 3099 3100 if (lun->backend->lun_info == NULL) { 3101 retval = sbuf_printf(sb, "</lun>\n"); 3102 if (retval != 0) 3103 break; 3104 continue; 3105 } 3106 3107 retval =lun->backend->lun_info(lun->be_lun->be_lun, sb); 3108 3109 if (retval != 0) 3110 break; 3111 3112 retval = sbuf_printf(sb, "</lun>\n"); 3113 3114 if (retval != 0) 3115 break; 3116 } 3117 mtx_unlock(&softc->ctl_lock); 3118 3119 if ((retval != 0) 3120 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3121 retval = 0; 3122 sbuf_delete(sb); 3123 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3124 snprintf(list->error_str, sizeof(list->error_str), 3125 "Out of space, %d bytes is too small", 3126 list->alloc_len); 3127 break; 3128 } 3129 3130 sbuf_finish(sb); 3131 3132 retval = copyout(sbuf_data(sb), list->lun_xml, 3133 sbuf_len(sb) + 1); 3134 3135 list->fill_len = sbuf_len(sb) + 1; 3136 list->status = CTL_LUN_LIST_OK; 3137 sbuf_delete(sb); 3138 break; 3139 } 3140 default: { 3141 /* XXX KDM should we fix this? */ 3142 #if 0 3143 struct ctl_backend_driver *backend; 3144 unsigned int type; 3145 int found; 3146 3147 found = 0; 3148 3149 /* 3150 * We encode the backend type as the ioctl type for backend 3151 * ioctls. So parse it out here, and then search for a 3152 * backend of this type. 3153 */ 3154 type = _IOC_TYPE(cmd); 3155 3156 STAILQ_FOREACH(backend, &softc->be_list, links) { 3157 if (backend->type == type) { 3158 found = 1; 3159 break; 3160 } 3161 } 3162 if (found == 0) { 3163 printf("ctl: unknown ioctl command %#lx or backend " 3164 "%d\n", cmd, type); 3165 retval = -EINVAL; 3166 break; 3167 } 3168 retval = backend->ioctl(dev, cmd, addr, flag, td); 3169 #endif 3170 retval = ENOTTY; 3171 break; 3172 } 3173 } 3174 return (retval); 3175 } 3176 3177 uint32_t 3178 ctl_get_initindex(struct ctl_nexus *nexus) 3179 { 3180 if (nexus->targ_port < CTL_MAX_PORTS) 3181 return (nexus->initid.id + 3182 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3183 else 3184 return (nexus->initid.id + 3185 ((nexus->targ_port - CTL_MAX_PORTS) * 3186 CTL_MAX_INIT_PER_PORT)); 3187 } 3188 3189 uint32_t 3190 ctl_get_resindex(struct ctl_nexus *nexus) 3191 { 3192 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3193 } 3194 3195 uint32_t 3196 ctl_port_idx(int port_num) 3197 { 3198 if (port_num < CTL_MAX_PORTS) 3199 return(port_num); 3200 else 3201 return(port_num - CTL_MAX_PORTS); 3202 } 3203 3204 /* 3205 * Note: This only works for bitmask sizes that are at least 32 bits, and 3206 * that are a power of 2. 3207 */ 3208 int 3209 ctl_ffz(uint32_t *mask, uint32_t size) 3210 { 3211 uint32_t num_chunks, num_pieces; 3212 int i, j; 3213 3214 num_chunks = (size >> 5); 3215 if (num_chunks == 0) 3216 num_chunks++; 3217 num_pieces = ctl_min((sizeof(uint32_t) * 8), size); 3218 3219 for (i = 0; i < num_chunks; i++) { 3220 for (j = 0; j < num_pieces; j++) { 3221 if ((mask[i] & (1 << j)) == 0) 3222 return ((i << 5) + j); 3223 } 3224 } 3225 3226 return (-1); 3227 } 3228 3229 int 3230 ctl_set_mask(uint32_t *mask, uint32_t bit) 3231 { 3232 uint32_t chunk, piece; 3233 3234 chunk = bit >> 5; 3235 piece = bit % (sizeof(uint32_t) * 8); 3236 3237 if ((mask[chunk] & (1 << piece)) != 0) 3238 return (-1); 3239 else 3240 mask[chunk] |= (1 << piece); 3241 3242 return (0); 3243 } 3244 3245 int 3246 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3247 { 3248 uint32_t chunk, piece; 3249 3250 chunk = bit >> 5; 3251 piece = bit % (sizeof(uint32_t) * 8); 3252 3253 if ((mask[chunk] & (1 << piece)) == 0) 3254 return (-1); 3255 else 3256 mask[chunk] &= ~(1 << piece); 3257 3258 return (0); 3259 } 3260 3261 int 3262 ctl_is_set(uint32_t *mask, uint32_t bit) 3263 { 3264 uint32_t chunk, piece; 3265 3266 chunk = bit >> 5; 3267 piece = bit % (sizeof(uint32_t) * 8); 3268 3269 if ((mask[chunk] & (1 << piece)) == 0) 3270 return (0); 3271 else 3272 return (1); 3273 } 3274 3275 #ifdef unused 3276 /* 3277 * The bus, target and lun are optional, they can be filled in later. 3278 * can_wait is used to determine whether we can wait on the malloc or not. 3279 */ 3280 union ctl_io* 3281 ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target, 3282 uint32_t targ_lun, int can_wait) 3283 { 3284 union ctl_io *io; 3285 3286 if (can_wait) 3287 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK); 3288 else 3289 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3290 3291 if (io != NULL) { 3292 io->io_hdr.io_type = io_type; 3293 io->io_hdr.targ_port = targ_port; 3294 /* 3295 * XXX KDM this needs to change/go away. We need to move 3296 * to a preallocated pool of ctl_scsiio structures. 3297 */ 3298 io->io_hdr.nexus.targ_target.id = targ_target; 3299 io->io_hdr.nexus.targ_lun = targ_lun; 3300 } 3301 3302 return (io); 3303 } 3304 3305 void 3306 ctl_kfree_io(union ctl_io *io) 3307 { 3308 free(io, M_CTL); 3309 } 3310 #endif /* unused */ 3311 3312 /* 3313 * ctl_softc, pool_type, total_ctl_io are passed in. 3314 * npool is passed out. 3315 */ 3316 int 3317 ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, 3318 uint32_t total_ctl_io, struct ctl_io_pool **npool) 3319 { 3320 uint32_t i; 3321 union ctl_io *cur_io, *next_io; 3322 struct ctl_io_pool *pool; 3323 int retval; 3324 3325 retval = 0; 3326 3327 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3328 M_NOWAIT | M_ZERO); 3329 if (pool == NULL) { 3330 retval = -ENOMEM; 3331 goto bailout; 3332 } 3333 3334 pool->type = pool_type; 3335 pool->ctl_softc = ctl_softc; 3336 3337 mtx_lock(&ctl_softc->ctl_lock); 3338 pool->id = ctl_softc->cur_pool_id++; 3339 mtx_unlock(&ctl_softc->ctl_lock); 3340 3341 pool->flags = CTL_POOL_FLAG_NONE; 3342 STAILQ_INIT(&pool->free_queue); 3343 3344 /* 3345 * XXX KDM other options here: 3346 * - allocate a page at a time 3347 * - allocate one big chunk of memory. 3348 * Page allocation might work well, but would take a little more 3349 * tracking. 3350 */ 3351 for (i = 0; i < total_ctl_io; i++) { 3352 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL, 3353 M_NOWAIT); 3354 if (cur_io == NULL) { 3355 retval = ENOMEM; 3356 break; 3357 } 3358 cur_io->io_hdr.pool = pool; 3359 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links); 3360 pool->total_ctl_io++; 3361 pool->free_ctl_io++; 3362 } 3363 3364 if (retval != 0) { 3365 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3366 cur_io != NULL; cur_io = next_io) { 3367 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3368 links); 3369 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, 3370 ctl_io_hdr, links); 3371 free(cur_io, M_CTL); 3372 } 3373 3374 free(pool, M_CTL); 3375 goto bailout; 3376 } 3377 mtx_lock(&ctl_softc->ctl_lock); 3378 ctl_softc->num_pools++; 3379 STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); 3380 /* 3381 * Increment our usage count if this is an external consumer, so we 3382 * can't get unloaded until the external consumer (most likely a 3383 * FETD) unloads and frees his pool. 3384 * 3385 * XXX KDM will this increment the caller's module use count, or 3386 * mine? 3387 */ 3388 #if 0 3389 if ((pool_type != CTL_POOL_EMERGENCY) 3390 && (pool_type != CTL_POOL_INTERNAL) 3391 && (pool_type != CTL_POOL_IOCTL) 3392 && (pool_type != CTL_POOL_4OTHERSC)) 3393 MOD_INC_USE_COUNT; 3394 #endif 3395 3396 mtx_unlock(&ctl_softc->ctl_lock); 3397 3398 *npool = pool; 3399 3400 bailout: 3401 3402 return (retval); 3403 } 3404 3405 int 3406 ctl_pool_acquire(struct ctl_io_pool *pool) 3407 { 3408 3409 mtx_assert(&control_softc->ctl_lock, MA_OWNED); 3410 3411 if (pool == NULL) 3412 return (-EINVAL); 3413 3414 if (pool->flags & CTL_POOL_FLAG_INVALID) 3415 return (-EINVAL); 3416 3417 pool->refcount++; 3418 3419 return (0); 3420 } 3421 3422 int 3423 ctl_pool_invalidate(struct ctl_io_pool *pool) 3424 { 3425 3426 mtx_assert(&control_softc->ctl_lock, MA_OWNED); 3427 3428 if (pool == NULL) 3429 return (-EINVAL); 3430 3431 pool->flags |= CTL_POOL_FLAG_INVALID; 3432 3433 return (0); 3434 } 3435 3436 int 3437 ctl_pool_release(struct ctl_io_pool *pool) 3438 { 3439 3440 mtx_assert(&control_softc->ctl_lock, MA_OWNED); 3441 3442 if (pool == NULL) 3443 return (-EINVAL); 3444 3445 if ((--pool->refcount == 0) 3446 && (pool->flags & CTL_POOL_FLAG_INVALID)) { 3447 ctl_pool_free(pool->ctl_softc, pool); 3448 } 3449 3450 return (0); 3451 } 3452 3453 void 3454 ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool) 3455 { 3456 union ctl_io *cur_io, *next_io; 3457 3458 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 3459 3460 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3461 cur_io != NULL; cur_io = next_io) { 3462 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3463 links); 3464 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr, 3465 links); 3466 free(cur_io, M_CTL); 3467 } 3468 3469 STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); 3470 ctl_softc->num_pools--; 3471 3472 /* 3473 * XXX KDM will this decrement the caller's usage count or mine? 3474 */ 3475 #if 0 3476 if ((pool->type != CTL_POOL_EMERGENCY) 3477 && (pool->type != CTL_POOL_INTERNAL) 3478 && (pool->type != CTL_POOL_IOCTL)) 3479 MOD_DEC_USE_COUNT; 3480 #endif 3481 3482 free(pool, M_CTL); 3483 } 3484 3485 /* 3486 * This routine does not block (except for spinlocks of course). 3487 * It tries to allocate a ctl_io union from the caller's pool as quickly as 3488 * possible. 3489 */ 3490 union ctl_io * 3491 ctl_alloc_io(void *pool_ref) 3492 { 3493 union ctl_io *io; 3494 struct ctl_softc *ctl_softc; 3495 struct ctl_io_pool *pool, *npool; 3496 struct ctl_io_pool *emergency_pool; 3497 3498 pool = (struct ctl_io_pool *)pool_ref; 3499 3500 if (pool == NULL) { 3501 printf("%s: pool is NULL\n", __func__); 3502 return (NULL); 3503 } 3504 3505 emergency_pool = NULL; 3506 3507 ctl_softc = pool->ctl_softc; 3508 3509 mtx_lock(&ctl_softc->ctl_lock); 3510 /* 3511 * First, try to get the io structure from the user's pool. 3512 */ 3513 if (ctl_pool_acquire(pool) == 0) { 3514 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3515 if (io != NULL) { 3516 STAILQ_REMOVE_HEAD(&pool->free_queue, links); 3517 pool->total_allocated++; 3518 pool->free_ctl_io--; 3519 mtx_unlock(&ctl_softc->ctl_lock); 3520 return (io); 3521 } else 3522 ctl_pool_release(pool); 3523 } 3524 /* 3525 * If he doesn't have any io structures left, search for an 3526 * emergency pool and grab one from there. 3527 */ 3528 STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) { 3529 if (npool->type != CTL_POOL_EMERGENCY) 3530 continue; 3531 3532 if (ctl_pool_acquire(npool) != 0) 3533 continue; 3534 3535 emergency_pool = npool; 3536 3537 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue); 3538 if (io != NULL) { 3539 STAILQ_REMOVE_HEAD(&npool->free_queue, links); 3540 npool->total_allocated++; 3541 npool->free_ctl_io--; 3542 mtx_unlock(&ctl_softc->ctl_lock); 3543 return (io); 3544 } else 3545 ctl_pool_release(npool); 3546 } 3547 3548 /* Drop the spinlock before we malloc */ 3549 mtx_unlock(&ctl_softc->ctl_lock); 3550 3551 /* 3552 * The emergency pool (if it exists) didn't have one, so try an 3553 * atomic (i.e. nonblocking) malloc and see if we get lucky. 3554 */ 3555 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3556 if (io != NULL) { 3557 /* 3558 * If the emergency pool exists but is empty, add this 3559 * ctl_io to its list when it gets freed. 3560 */ 3561 if (emergency_pool != NULL) { 3562 mtx_lock(&ctl_softc->ctl_lock); 3563 if (ctl_pool_acquire(emergency_pool) == 0) { 3564 io->io_hdr.pool = emergency_pool; 3565 emergency_pool->total_ctl_io++; 3566 /* 3567 * Need to bump this, otherwise 3568 * total_allocated and total_freed won't 3569 * match when we no longer have anything 3570 * outstanding. 3571 */ 3572 emergency_pool->total_allocated++; 3573 } 3574 mtx_unlock(&ctl_softc->ctl_lock); 3575 } else 3576 io->io_hdr.pool = NULL; 3577 } 3578 3579 return (io); 3580 } 3581 3582 static void 3583 ctl_free_io_internal(union ctl_io *io, int have_lock) 3584 { 3585 if (io == NULL) 3586 return; 3587 3588 /* 3589 * If this ctl_io has a pool, return it to that pool. 3590 */ 3591 if (io->io_hdr.pool != NULL) { 3592 struct ctl_io_pool *pool; 3593 #if 0 3594 struct ctl_softc *ctl_softc; 3595 union ctl_io *tmp_io; 3596 unsigned long xflags; 3597 int i; 3598 3599 ctl_softc = control_softc; 3600 #endif 3601 3602 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3603 3604 if (have_lock == 0) 3605 mtx_lock(&pool->ctl_softc->ctl_lock); 3606 #if 0 3607 save_flags(xflags); 3608 3609 for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST( 3610 &ctl_softc->task_queue); tmp_io != NULL; i++, 3611 tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr, 3612 links)) { 3613 if (tmp_io == io) { 3614 printf("%s: %p is still on the task queue!\n", 3615 __func__, tmp_io); 3616 printf("%s: (%d): type %d " 3617 "msg %d cdb %x iptl: " 3618 "%d:%d:%d:%d tag 0x%04x " 3619 "flg %#lx\n", 3620 __func__, i, 3621 tmp_io->io_hdr.io_type, 3622 tmp_io->io_hdr.msg_type, 3623 tmp_io->scsiio.cdb[0], 3624 tmp_io->io_hdr.nexus.initid.id, 3625 tmp_io->io_hdr.nexus.targ_port, 3626 tmp_io->io_hdr.nexus.targ_target.id, 3627 tmp_io->io_hdr.nexus.targ_lun, 3628 (tmp_io->io_hdr.io_type == 3629 CTL_IO_TASK) ? 3630 tmp_io->taskio.tag_num : 3631 tmp_io->scsiio.tag_num, 3632 xflags); 3633 panic("I/O still on the task queue!"); 3634 } 3635 } 3636 #endif 3637 io->io_hdr.io_type = 0xff; 3638 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links); 3639 pool->total_freed++; 3640 pool->free_ctl_io++; 3641 ctl_pool_release(pool); 3642 if (have_lock == 0) 3643 mtx_unlock(&pool->ctl_softc->ctl_lock); 3644 } else { 3645 /* 3646 * Otherwise, just free it. We probably malloced it and 3647 * the emergency pool wasn't available. 3648 */ 3649 free(io, M_CTL); 3650 } 3651 3652 } 3653 3654 void 3655 ctl_free_io(union ctl_io *io) 3656 { 3657 ctl_free_io_internal(io, /*have_lock*/ 0); 3658 } 3659 3660 void 3661 ctl_zero_io(union ctl_io *io) 3662 { 3663 void *pool_ref; 3664 3665 if (io == NULL) 3666 return; 3667 3668 /* 3669 * May need to preserve linked list pointers at some point too. 3670 */ 3671 pool_ref = io->io_hdr.pool; 3672 3673 memset(io, 0, sizeof(*io)); 3674 3675 io->io_hdr.pool = pool_ref; 3676 } 3677 3678 /* 3679 * This routine is currently used for internal copies of ctl_ios that need 3680 * to persist for some reason after we've already returned status to the 3681 * FETD. (Thus the flag set.) 3682 * 3683 * XXX XXX 3684 * Note that this makes a blind copy of all fields in the ctl_io, except 3685 * for the pool reference. This includes any memory that has been 3686 * allocated! That memory will no longer be valid after done has been 3687 * called, so this would be VERY DANGEROUS for command that actually does 3688 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3689 * start and stop commands, which don't transfer any data, so this is not a 3690 * problem. If it is used for anything else, the caller would also need to 3691 * allocate data buffer space and this routine would need to be modified to 3692 * copy the data buffer(s) as well. 3693 */ 3694 void 3695 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3696 { 3697 void *pool_ref; 3698 3699 if ((src == NULL) 3700 || (dest == NULL)) 3701 return; 3702 3703 /* 3704 * May need to preserve linked list pointers at some point too. 3705 */ 3706 pool_ref = dest->io_hdr.pool; 3707 3708 memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest))); 3709 3710 dest->io_hdr.pool = pool_ref; 3711 /* 3712 * We need to know that this is an internal copy, and doesn't need 3713 * to get passed back to the FETD that allocated it. 3714 */ 3715 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3716 } 3717 3718 #ifdef NEEDTOPORT 3719 static void 3720 ctl_update_power_subpage(struct copan_power_subpage *page) 3721 { 3722 int num_luns, num_partitions, config_type; 3723 struct ctl_softc *softc; 3724 cs_BOOL_t aor_present, shelf_50pct_power; 3725 cs_raidset_personality_t rs_type; 3726 int max_active_luns; 3727 3728 softc = control_softc; 3729 3730 /* subtract out the processor LUN */ 3731 num_luns = softc->num_luns - 1; 3732 /* 3733 * Default to 7 LUNs active, which was the only number we allowed 3734 * in the past. 3735 */ 3736 max_active_luns = 7; 3737 3738 num_partitions = config_GetRsPartitionInfo(); 3739 config_type = config_GetConfigType(); 3740 shelf_50pct_power = config_GetShelfPowerMode(); 3741 aor_present = config_IsAorRsPresent(); 3742 3743 rs_type = ddb_GetRsRaidType(1); 3744 if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5) 3745 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) { 3746 EPRINT(0, "Unsupported RS type %d!", rs_type); 3747 } 3748 3749 3750 page->total_luns = num_luns; 3751 3752 switch (config_type) { 3753 case 40: 3754 /* 3755 * In a 40 drive configuration, it doesn't matter what DC 3756 * cards we have, whether we have AOR enabled or not, 3757 * partitioning or not, or what type of RAIDset we have. 3758 * In that scenario, we can power up every LUN we present 3759 * to the user. 3760 */ 3761 max_active_luns = num_luns; 3762 3763 break; 3764 case 64: 3765 if (shelf_50pct_power == CS_FALSE) { 3766 /* 25% power */ 3767 if (aor_present == CS_TRUE) { 3768 if (rs_type == 3769 CS_RAIDSET_PERSONALITY_RAID5) { 3770 max_active_luns = 7; 3771 } else if (rs_type == 3772 CS_RAIDSET_PERSONALITY_RAID1){ 3773 max_active_luns = 14; 3774 } else { 3775 /* XXX KDM now what?? */ 3776 } 3777 } else { 3778 if (rs_type == 3779 CS_RAIDSET_PERSONALITY_RAID5) { 3780 max_active_luns = 8; 3781 } else if (rs_type == 3782 CS_RAIDSET_PERSONALITY_RAID1){ 3783 max_active_luns = 16; 3784 } else { 3785 /* XXX KDM now what?? */ 3786 } 3787 } 3788 } else { 3789 /* 50% power */ 3790 /* 3791 * With 50% power in a 64 drive configuration, we 3792 * can power all LUNs we present. 3793 */ 3794 max_active_luns = num_luns; 3795 } 3796 break; 3797 case 112: 3798 if (shelf_50pct_power == CS_FALSE) { 3799 /* 25% power */ 3800 if (aor_present == CS_TRUE) { 3801 if (rs_type == 3802 CS_RAIDSET_PERSONALITY_RAID5) { 3803 max_active_luns = 7; 3804 } else if (rs_type == 3805 CS_RAIDSET_PERSONALITY_RAID1){ 3806 max_active_luns = 14; 3807 } else { 3808 /* XXX KDM now what?? */ 3809 } 3810 } else { 3811 if (rs_type == 3812 CS_RAIDSET_PERSONALITY_RAID5) { 3813 max_active_luns = 8; 3814 } else if (rs_type == 3815 CS_RAIDSET_PERSONALITY_RAID1){ 3816 max_active_luns = 16; 3817 } else { 3818 /* XXX KDM now what?? */ 3819 } 3820 } 3821 } else { 3822 /* 50% power */ 3823 if (aor_present == CS_TRUE) { 3824 if (rs_type == 3825 CS_RAIDSET_PERSONALITY_RAID5) { 3826 max_active_luns = 14; 3827 } else if (rs_type == 3828 CS_RAIDSET_PERSONALITY_RAID1){ 3829 /* 3830 * We're assuming here that disk 3831 * caching is enabled, and so we're 3832 * able to power up half of each 3833 * LUN, and cache all writes. 3834 */ 3835 max_active_luns = num_luns; 3836 } else { 3837 /* XXX KDM now what?? */ 3838 } 3839 } else { 3840 if (rs_type == 3841 CS_RAIDSET_PERSONALITY_RAID5) { 3842 max_active_luns = 15; 3843 } else if (rs_type == 3844 CS_RAIDSET_PERSONALITY_RAID1){ 3845 max_active_luns = 30; 3846 } else { 3847 /* XXX KDM now what?? */ 3848 } 3849 } 3850 } 3851 break; 3852 default: 3853 /* 3854 * In this case, we have an unknown configuration, so we 3855 * just use the default from above. 3856 */ 3857 break; 3858 } 3859 3860 page->max_active_luns = max_active_luns; 3861 #if 0 3862 printk("%s: total_luns = %d, max_active_luns = %d\n", __func__, 3863 page->total_luns, page->max_active_luns); 3864 #endif 3865 } 3866 #endif /* NEEDTOPORT */ 3867 3868 /* 3869 * This routine could be used in the future to load default and/or saved 3870 * mode page parameters for a particuar lun. 3871 */ 3872 static int 3873 ctl_init_page_index(struct ctl_lun *lun) 3874 { 3875 int i; 3876 struct ctl_page_index *page_index; 3877 struct ctl_softc *softc; 3878 3879 memcpy(&lun->mode_pages.index, page_index_template, 3880 sizeof(page_index_template)); 3881 3882 softc = lun->ctl_softc; 3883 3884 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3885 3886 page_index = &lun->mode_pages.index[i]; 3887 /* 3888 * If this is a disk-only mode page, there's no point in 3889 * setting it up. For some pages, we have to have some 3890 * basic information about the disk in order to calculate the 3891 * mode page data. 3892 */ 3893 if ((lun->be_lun->lun_type != T_DIRECT) 3894 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3895 continue; 3896 3897 switch (page_index->page_code & SMPH_PC_MASK) { 3898 case SMS_FORMAT_DEVICE_PAGE: { 3899 struct scsi_format_page *format_page; 3900 3901 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3902 panic("subpage is incorrect!"); 3903 3904 /* 3905 * Sectors per track are set above. Bytes per 3906 * sector need to be set here on a per-LUN basis. 3907 */ 3908 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3909 &format_page_default, 3910 sizeof(format_page_default)); 3911 memcpy(&lun->mode_pages.format_page[ 3912 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3913 sizeof(format_page_changeable)); 3914 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3915 &format_page_default, 3916 sizeof(format_page_default)); 3917 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3918 &format_page_default, 3919 sizeof(format_page_default)); 3920 3921 format_page = &lun->mode_pages.format_page[ 3922 CTL_PAGE_CURRENT]; 3923 scsi_ulto2b(lun->be_lun->blocksize, 3924 format_page->bytes_per_sector); 3925 3926 format_page = &lun->mode_pages.format_page[ 3927 CTL_PAGE_DEFAULT]; 3928 scsi_ulto2b(lun->be_lun->blocksize, 3929 format_page->bytes_per_sector); 3930 3931 format_page = &lun->mode_pages.format_page[ 3932 CTL_PAGE_SAVED]; 3933 scsi_ulto2b(lun->be_lun->blocksize, 3934 format_page->bytes_per_sector); 3935 3936 page_index->page_data = 3937 (uint8_t *)lun->mode_pages.format_page; 3938 break; 3939 } 3940 case SMS_RIGID_DISK_PAGE: { 3941 struct scsi_rigid_disk_page *rigid_disk_page; 3942 uint32_t sectors_per_cylinder; 3943 uint64_t cylinders; 3944 #ifndef __XSCALE__ 3945 int shift; 3946 #endif /* !__XSCALE__ */ 3947 3948 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3949 panic("invalid subpage value %d", 3950 page_index->subpage); 3951 3952 /* 3953 * Rotation rate and sectors per track are set 3954 * above. We calculate the cylinders here based on 3955 * capacity. Due to the number of heads and 3956 * sectors per track we're using, smaller arrays 3957 * may turn out to have 0 cylinders. Linux and 3958 * FreeBSD don't pay attention to these mode pages 3959 * to figure out capacity, but Solaris does. It 3960 * seems to deal with 0 cylinders just fine, and 3961 * works out a fake geometry based on the capacity. 3962 */ 3963 memcpy(&lun->mode_pages.rigid_disk_page[ 3964 CTL_PAGE_CURRENT], &rigid_disk_page_default, 3965 sizeof(rigid_disk_page_default)); 3966 memcpy(&lun->mode_pages.rigid_disk_page[ 3967 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3968 sizeof(rigid_disk_page_changeable)); 3969 memcpy(&lun->mode_pages.rigid_disk_page[ 3970 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3971 sizeof(rigid_disk_page_default)); 3972 memcpy(&lun->mode_pages.rigid_disk_page[ 3973 CTL_PAGE_SAVED], &rigid_disk_page_default, 3974 sizeof(rigid_disk_page_default)); 3975 3976 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3977 CTL_DEFAULT_HEADS; 3978 3979 /* 3980 * The divide method here will be more accurate, 3981 * probably, but results in floating point being 3982 * used in the kernel on i386 (__udivdi3()). On the 3983 * XScale, though, __udivdi3() is implemented in 3984 * software. 3985 * 3986 * The shift method for cylinder calculation is 3987 * accurate if sectors_per_cylinder is a power of 3988 * 2. Otherwise it might be slightly off -- you 3989 * might have a bit of a truncation problem. 3990 */ 3991 #ifdef __XSCALE__ 3992 cylinders = (lun->be_lun->maxlba + 1) / 3993 sectors_per_cylinder; 3994 #else 3995 for (shift = 31; shift > 0; shift--) { 3996 if (sectors_per_cylinder & (1 << shift)) 3997 break; 3998 } 3999 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4000 #endif 4001 4002 /* 4003 * We've basically got 3 bytes, or 24 bits for the 4004 * cylinder size in the mode page. If we're over, 4005 * just round down to 2^24. 4006 */ 4007 if (cylinders > 0xffffff) 4008 cylinders = 0xffffff; 4009 4010 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4011 CTL_PAGE_CURRENT]; 4012 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4013 4014 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4015 CTL_PAGE_DEFAULT]; 4016 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4017 4018 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4019 CTL_PAGE_SAVED]; 4020 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4021 4022 page_index->page_data = 4023 (uint8_t *)lun->mode_pages.rigid_disk_page; 4024 break; 4025 } 4026 case SMS_CACHING_PAGE: { 4027 4028 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4029 panic("invalid subpage value %d", 4030 page_index->subpage); 4031 /* 4032 * Defaults should be okay here, no calculations 4033 * needed. 4034 */ 4035 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4036 &caching_page_default, 4037 sizeof(caching_page_default)); 4038 memcpy(&lun->mode_pages.caching_page[ 4039 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4040 sizeof(caching_page_changeable)); 4041 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4042 &caching_page_default, 4043 sizeof(caching_page_default)); 4044 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4045 &caching_page_default, 4046 sizeof(caching_page_default)); 4047 page_index->page_data = 4048 (uint8_t *)lun->mode_pages.caching_page; 4049 break; 4050 } 4051 case SMS_CONTROL_MODE_PAGE: { 4052 4053 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4054 panic("invalid subpage value %d", 4055 page_index->subpage); 4056 4057 /* 4058 * Defaults should be okay here, no calculations 4059 * needed. 4060 */ 4061 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4062 &control_page_default, 4063 sizeof(control_page_default)); 4064 memcpy(&lun->mode_pages.control_page[ 4065 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4066 sizeof(control_page_changeable)); 4067 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4068 &control_page_default, 4069 sizeof(control_page_default)); 4070 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4071 &control_page_default, 4072 sizeof(control_page_default)); 4073 page_index->page_data = 4074 (uint8_t *)lun->mode_pages.control_page; 4075 break; 4076 4077 } 4078 case SMS_VENDOR_SPECIFIC_PAGE:{ 4079 switch (page_index->subpage) { 4080 case PWR_SUBPAGE_CODE: { 4081 struct copan_power_subpage *current_page, 4082 *saved_page; 4083 4084 memcpy(&lun->mode_pages.power_subpage[ 4085 CTL_PAGE_CURRENT], 4086 &power_page_default, 4087 sizeof(power_page_default)); 4088 memcpy(&lun->mode_pages.power_subpage[ 4089 CTL_PAGE_CHANGEABLE], 4090 &power_page_changeable, 4091 sizeof(power_page_changeable)); 4092 memcpy(&lun->mode_pages.power_subpage[ 4093 CTL_PAGE_DEFAULT], 4094 &power_page_default, 4095 sizeof(power_page_default)); 4096 memcpy(&lun->mode_pages.power_subpage[ 4097 CTL_PAGE_SAVED], 4098 &power_page_default, 4099 sizeof(power_page_default)); 4100 page_index->page_data = 4101 (uint8_t *)lun->mode_pages.power_subpage; 4102 4103 current_page = (struct copan_power_subpage *) 4104 (page_index->page_data + 4105 (page_index->page_len * 4106 CTL_PAGE_CURRENT)); 4107 saved_page = (struct copan_power_subpage *) 4108 (page_index->page_data + 4109 (page_index->page_len * 4110 CTL_PAGE_SAVED)); 4111 break; 4112 } 4113 case APS_SUBPAGE_CODE: { 4114 struct copan_aps_subpage *current_page, 4115 *saved_page; 4116 4117 // This gets set multiple times but 4118 // it should always be the same. It's 4119 // only done during init so who cares. 4120 index_to_aps_page = i; 4121 4122 memcpy(&lun->mode_pages.aps_subpage[ 4123 CTL_PAGE_CURRENT], 4124 &aps_page_default, 4125 sizeof(aps_page_default)); 4126 memcpy(&lun->mode_pages.aps_subpage[ 4127 CTL_PAGE_CHANGEABLE], 4128 &aps_page_changeable, 4129 sizeof(aps_page_changeable)); 4130 memcpy(&lun->mode_pages.aps_subpage[ 4131 CTL_PAGE_DEFAULT], 4132 &aps_page_default, 4133 sizeof(aps_page_default)); 4134 memcpy(&lun->mode_pages.aps_subpage[ 4135 CTL_PAGE_SAVED], 4136 &aps_page_default, 4137 sizeof(aps_page_default)); 4138 page_index->page_data = 4139 (uint8_t *)lun->mode_pages.aps_subpage; 4140 4141 current_page = (struct copan_aps_subpage *) 4142 (page_index->page_data + 4143 (page_index->page_len * 4144 CTL_PAGE_CURRENT)); 4145 saved_page = (struct copan_aps_subpage *) 4146 (page_index->page_data + 4147 (page_index->page_len * 4148 CTL_PAGE_SAVED)); 4149 break; 4150 } 4151 case DBGCNF_SUBPAGE_CODE: { 4152 struct copan_debugconf_subpage *current_page, 4153 *saved_page; 4154 4155 memcpy(&lun->mode_pages.debugconf_subpage[ 4156 CTL_PAGE_CURRENT], 4157 &debugconf_page_default, 4158 sizeof(debugconf_page_default)); 4159 memcpy(&lun->mode_pages.debugconf_subpage[ 4160 CTL_PAGE_CHANGEABLE], 4161 &debugconf_page_changeable, 4162 sizeof(debugconf_page_changeable)); 4163 memcpy(&lun->mode_pages.debugconf_subpage[ 4164 CTL_PAGE_DEFAULT], 4165 &debugconf_page_default, 4166 sizeof(debugconf_page_default)); 4167 memcpy(&lun->mode_pages.debugconf_subpage[ 4168 CTL_PAGE_SAVED], 4169 &debugconf_page_default, 4170 sizeof(debugconf_page_default)); 4171 page_index->page_data = 4172 (uint8_t *)lun->mode_pages.debugconf_subpage; 4173 4174 current_page = (struct copan_debugconf_subpage *) 4175 (page_index->page_data + 4176 (page_index->page_len * 4177 CTL_PAGE_CURRENT)); 4178 saved_page = (struct copan_debugconf_subpage *) 4179 (page_index->page_data + 4180 (page_index->page_len * 4181 CTL_PAGE_SAVED)); 4182 break; 4183 } 4184 default: 4185 panic("invalid subpage value %d", 4186 page_index->subpage); 4187 break; 4188 } 4189 break; 4190 } 4191 default: 4192 panic("invalid page value %d", 4193 page_index->page_code & SMPH_PC_MASK); 4194 break; 4195 } 4196 } 4197 4198 return (CTL_RETVAL_COMPLETE); 4199 } 4200 4201 /* 4202 * LUN allocation. 4203 * 4204 * Requirements: 4205 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4206 * wants us to allocate the LUN and he can block. 4207 * - ctl_softc is always set 4208 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4209 * 4210 * Returns 0 for success, non-zero (errno) for failure. 4211 */ 4212 static int 4213 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4214 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4215 { 4216 struct ctl_lun *nlun, *lun; 4217 struct ctl_frontend *fe; 4218 int lun_number, i, lun_malloced; 4219 4220 if (be_lun == NULL) 4221 return (EINVAL); 4222 4223 /* 4224 * We currently only support Direct Access or Processor LUN types. 4225 */ 4226 switch (be_lun->lun_type) { 4227 case T_DIRECT: 4228 break; 4229 case T_PROCESSOR: 4230 break; 4231 case T_SEQUENTIAL: 4232 case T_CHANGER: 4233 default: 4234 be_lun->lun_config_status(be_lun->be_lun, 4235 CTL_LUN_CONFIG_FAILURE); 4236 break; 4237 } 4238 if (ctl_lun == NULL) { 4239 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4240 lun_malloced = 1; 4241 } else { 4242 lun_malloced = 0; 4243 lun = ctl_lun; 4244 } 4245 4246 memset(lun, 0, sizeof(*lun)); 4247 if (lun_malloced) 4248 lun->flags = CTL_LUN_MALLOCED; 4249 4250 mtx_lock(&ctl_softc->ctl_lock); 4251 /* 4252 * See if the caller requested a particular LUN number. If so, see 4253 * if it is available. Otherwise, allocate the first available LUN. 4254 */ 4255 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4256 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4257 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4258 mtx_unlock(&ctl_softc->ctl_lock); 4259 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4260 printf("ctl: requested LUN ID %d is higher " 4261 "than CTL_MAX_LUNS - 1 (%d)\n", 4262 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4263 } else { 4264 /* 4265 * XXX KDM return an error, or just assign 4266 * another LUN ID in this case?? 4267 */ 4268 printf("ctl: requested LUN ID %d is already " 4269 "in use\n", be_lun->req_lun_id); 4270 } 4271 if (lun->flags & CTL_LUN_MALLOCED) 4272 free(lun, M_CTL); 4273 be_lun->lun_config_status(be_lun->be_lun, 4274 CTL_LUN_CONFIG_FAILURE); 4275 return (ENOSPC); 4276 } 4277 lun_number = be_lun->req_lun_id; 4278 } else { 4279 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4280 if (lun_number == -1) { 4281 mtx_unlock(&ctl_softc->ctl_lock); 4282 printf("ctl: can't allocate LUN on target %ju, out of " 4283 "LUNs\n", (uintmax_t)target_id.id); 4284 if (lun->flags & CTL_LUN_MALLOCED) 4285 free(lun, M_CTL); 4286 be_lun->lun_config_status(be_lun->be_lun, 4287 CTL_LUN_CONFIG_FAILURE); 4288 return (ENOSPC); 4289 } 4290 } 4291 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4292 4293 lun->target = target_id; 4294 lun->lun = lun_number; 4295 lun->be_lun = be_lun; 4296 /* 4297 * The processor LUN is always enabled. Disk LUNs come on line 4298 * disabled, and must be enabled by the backend. 4299 */ 4300 lun->flags |= CTL_LUN_DISABLED; 4301 lun->backend = be_lun->be; 4302 be_lun->ctl_lun = lun; 4303 be_lun->lun_id = lun_number; 4304 atomic_add_int(&be_lun->be->num_luns, 1); 4305 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4306 lun->flags |= CTL_LUN_STOPPED; 4307 4308 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4309 lun->flags |= CTL_LUN_INOPERABLE; 4310 4311 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4312 lun->flags |= CTL_LUN_PRIMARY_SC; 4313 4314 lun->ctl_softc = ctl_softc; 4315 TAILQ_INIT(&lun->ooa_queue); 4316 TAILQ_INIT(&lun->blocked_queue); 4317 STAILQ_INIT(&lun->error_list); 4318 4319 /* 4320 * Initialize the mode page index. 4321 */ 4322 ctl_init_page_index(lun); 4323 4324 /* 4325 * Set the poweron UA for all initiators on this LUN only. 4326 */ 4327 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4328 lun->pending_sense[i].ua_pending = CTL_UA_POWERON; 4329 4330 /* 4331 * Now, before we insert this lun on the lun list, set the lun 4332 * inventory changed UA for all other luns. 4333 */ 4334 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4335 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4336 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4337 } 4338 } 4339 4340 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4341 4342 ctl_softc->ctl_luns[lun_number] = lun; 4343 4344 ctl_softc->num_luns++; 4345 4346 /* Setup statistics gathering */ 4347 lun->stats.device_type = be_lun->lun_type; 4348 lun->stats.lun_number = lun_number; 4349 if (lun->stats.device_type == T_DIRECT) 4350 lun->stats.blocksize = be_lun->blocksize; 4351 else 4352 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4353 for (i = 0;i < CTL_MAX_PORTS;i++) 4354 lun->stats.ports[i].targ_port = i; 4355 4356 mtx_unlock(&ctl_softc->ctl_lock); 4357 4358 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4359 4360 /* 4361 * Run through each registered FETD and bring it online if it isn't 4362 * already. Enable the target ID if it hasn't been enabled, and 4363 * enable this particular LUN. 4364 */ 4365 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4366 int retval; 4367 4368 /* 4369 * XXX KDM this only works for ONE TARGET ID. We'll need 4370 * to do things differently if we go to a multiple target 4371 * ID scheme. 4372 */ 4373 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) { 4374 4375 retval = fe->targ_enable(fe->targ_lun_arg, target_id); 4376 if (retval != 0) { 4377 printf("ctl_alloc_lun: FETD %s port %d " 4378 "returned error %d for targ_enable on " 4379 "target %ju\n", fe->port_name, 4380 fe->targ_port, retval, 4381 (uintmax_t)target_id.id); 4382 } else 4383 fe->status |= CTL_PORT_STATUS_TARG_ONLINE; 4384 } 4385 4386 retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number); 4387 if (retval != 0) { 4388 printf("ctl_alloc_lun: FETD %s port %d returned error " 4389 "%d for lun_enable on target %ju lun %d\n", 4390 fe->port_name, fe->targ_port, retval, 4391 (uintmax_t)target_id.id, lun_number); 4392 } else 4393 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4394 } 4395 return (0); 4396 } 4397 4398 /* 4399 * Delete a LUN. 4400 * Assumptions: 4401 * - LUN has already been marked invalid and any pending I/O has been taken 4402 * care of. 4403 */ 4404 static int 4405 ctl_free_lun(struct ctl_lun *lun) 4406 { 4407 struct ctl_softc *softc; 4408 #if 0 4409 struct ctl_frontend *fe; 4410 #endif 4411 struct ctl_lun *nlun; 4412 union ctl_io *io, *next_io; 4413 int i; 4414 4415 softc = lun->ctl_softc; 4416 4417 mtx_assert(&softc->ctl_lock, MA_OWNED); 4418 4419 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4420 4421 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4422 4423 softc->ctl_luns[lun->lun] = NULL; 4424 4425 if (TAILQ_FIRST(&lun->ooa_queue) != NULL) { 4426 printf("ctl_free_lun: aieee!! freeing a LUN with " 4427 "outstanding I/O!!\n"); 4428 } 4429 4430 /* 4431 * If we have anything pending on the RtR queue, remove it. 4432 */ 4433 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL; 4434 io = next_io) { 4435 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 4436 if ((io->io_hdr.nexus.targ_target.id == lun->target.id) 4437 && (io->io_hdr.nexus.targ_lun == lun->lun)) 4438 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 4439 ctl_io_hdr, links); 4440 } 4441 4442 /* 4443 * Then remove everything from the blocked queue. 4444 */ 4445 for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL; 4446 io = next_io) { 4447 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links); 4448 TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links); 4449 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 4450 } 4451 4452 /* 4453 * Now clear out the OOA queue, and free all the I/O. 4454 * XXX KDM should we notify the FETD here? We probably need to 4455 * quiesce the LUN before deleting it. 4456 */ 4457 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL; 4458 io = next_io) { 4459 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links); 4460 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 4461 ctl_free_io_internal(io, /*have_lock*/ 1); 4462 } 4463 4464 softc->num_luns--; 4465 4466 /* 4467 * XXX KDM this scheme only works for a single target/multiple LUN 4468 * setup. It needs to be revamped for a multiple target scheme. 4469 * 4470 * XXX KDM this results in fe->lun_disable() getting called twice, 4471 * once when ctl_disable_lun() is called, and a second time here. 4472 * We really need to re-think the LUN disable semantics. There 4473 * should probably be several steps/levels to LUN removal: 4474 * - disable 4475 * - invalidate 4476 * - free 4477 * 4478 * Right now we only have a disable method when communicating to 4479 * the front end ports, at least for individual LUNs. 4480 */ 4481 #if 0 4482 STAILQ_FOREACH(fe, &softc->fe_list, links) { 4483 int retval; 4484 4485 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4486 lun->lun); 4487 if (retval != 0) { 4488 printf("ctl_free_lun: FETD %s port %d returned error " 4489 "%d for lun_disable on target %ju lun %jd\n", 4490 fe->port_name, fe->targ_port, retval, 4491 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4492 } 4493 4494 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4495 fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4496 4497 retval = fe->targ_disable(fe->targ_lun_arg,lun->target); 4498 if (retval != 0) { 4499 printf("ctl_free_lun: FETD %s port %d " 4500 "returned error %d for targ_disable on " 4501 "target %ju\n", fe->port_name, 4502 fe->targ_port, retval, 4503 (uintmax_t)lun->target.id); 4504 } else 4505 fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4506 4507 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4508 continue; 4509 4510 #if 0 4511 fe->port_offline(fe->onoff_arg); 4512 fe->status &= ~CTL_PORT_STATUS_ONLINE; 4513 #endif 4514 } 4515 } 4516 #endif 4517 4518 /* 4519 * Tell the backend to free resources, if this LUN has a backend. 4520 */ 4521 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4522 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4523 4524 if (lun->flags & CTL_LUN_MALLOCED) 4525 free(lun, M_CTL); 4526 4527 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4528 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4529 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4530 } 4531 } 4532 4533 return (0); 4534 } 4535 4536 static void 4537 ctl_create_lun(struct ctl_be_lun *be_lun) 4538 { 4539 struct ctl_softc *ctl_softc; 4540 4541 ctl_softc = control_softc; 4542 4543 /* 4544 * ctl_alloc_lun() should handle all potential failure cases. 4545 */ 4546 ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target); 4547 } 4548 4549 int 4550 ctl_add_lun(struct ctl_be_lun *be_lun) 4551 { 4552 struct ctl_softc *ctl_softc; 4553 4554 ctl_softc = control_softc; 4555 4556 mtx_lock(&ctl_softc->ctl_lock); 4557 STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links); 4558 mtx_unlock(&ctl_softc->ctl_lock); 4559 4560 ctl_wakeup_thread(); 4561 4562 return (0); 4563 } 4564 4565 int 4566 ctl_enable_lun(struct ctl_be_lun *be_lun) 4567 { 4568 struct ctl_softc *ctl_softc; 4569 struct ctl_frontend *fe, *nfe; 4570 struct ctl_lun *lun; 4571 int retval; 4572 4573 ctl_softc = control_softc; 4574 4575 lun = (struct ctl_lun *)be_lun->ctl_lun; 4576 4577 mtx_lock(&ctl_softc->ctl_lock); 4578 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4579 /* 4580 * eh? Why did we get called if the LUN is already 4581 * enabled? 4582 */ 4583 mtx_unlock(&ctl_softc->ctl_lock); 4584 return (0); 4585 } 4586 lun->flags &= ~CTL_LUN_DISABLED; 4587 4588 for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) { 4589 nfe = STAILQ_NEXT(fe, links); 4590 4591 /* 4592 * Drop the lock while we call the FETD's enable routine. 4593 * This can lead to a callback into CTL (at least in the 4594 * case of the internal initiator frontend. 4595 */ 4596 mtx_unlock(&ctl_softc->ctl_lock); 4597 retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun); 4598 mtx_lock(&ctl_softc->ctl_lock); 4599 if (retval != 0) { 4600 printf("%s: FETD %s port %d returned error " 4601 "%d for lun_enable on target %ju lun %jd\n", 4602 __func__, fe->port_name, fe->targ_port, retval, 4603 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4604 } 4605 #if 0 4606 else { 4607 /* NOTE: TODO: why does lun enable affect port status? */ 4608 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4609 } 4610 #endif 4611 } 4612 4613 mtx_unlock(&ctl_softc->ctl_lock); 4614 4615 return (0); 4616 } 4617 4618 int 4619 ctl_disable_lun(struct ctl_be_lun *be_lun) 4620 { 4621 struct ctl_softc *ctl_softc; 4622 struct ctl_frontend *fe; 4623 struct ctl_lun *lun; 4624 int retval; 4625 4626 ctl_softc = control_softc; 4627 4628 lun = (struct ctl_lun *)be_lun->ctl_lun; 4629 4630 mtx_lock(&ctl_softc->ctl_lock); 4631 4632 if (lun->flags & CTL_LUN_DISABLED) { 4633 mtx_unlock(&ctl_softc->ctl_lock); 4634 return (0); 4635 } 4636 lun->flags |= CTL_LUN_DISABLED; 4637 4638 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4639 mtx_unlock(&ctl_softc->ctl_lock); 4640 /* 4641 * Drop the lock before we call the frontend's disable 4642 * routine, to avoid lock order reversals. 4643 * 4644 * XXX KDM what happens if the frontend list changes while 4645 * we're traversing it? It's unlikely, but should be handled. 4646 */ 4647 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4648 lun->lun); 4649 mtx_lock(&ctl_softc->ctl_lock); 4650 if (retval != 0) { 4651 printf("ctl_alloc_lun: FETD %s port %d returned error " 4652 "%d for lun_disable on target %ju lun %jd\n", 4653 fe->port_name, fe->targ_port, retval, 4654 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4655 } 4656 } 4657 4658 mtx_unlock(&ctl_softc->ctl_lock); 4659 4660 return (0); 4661 } 4662 4663 int 4664 ctl_start_lun(struct ctl_be_lun *be_lun) 4665 { 4666 struct ctl_softc *ctl_softc; 4667 struct ctl_lun *lun; 4668 4669 ctl_softc = control_softc; 4670 4671 lun = (struct ctl_lun *)be_lun->ctl_lun; 4672 4673 mtx_lock(&ctl_softc->ctl_lock); 4674 lun->flags &= ~CTL_LUN_STOPPED; 4675 mtx_unlock(&ctl_softc->ctl_lock); 4676 4677 return (0); 4678 } 4679 4680 int 4681 ctl_stop_lun(struct ctl_be_lun *be_lun) 4682 { 4683 struct ctl_softc *ctl_softc; 4684 struct ctl_lun *lun; 4685 4686 ctl_softc = control_softc; 4687 4688 lun = (struct ctl_lun *)be_lun->ctl_lun; 4689 4690 mtx_lock(&ctl_softc->ctl_lock); 4691 lun->flags |= CTL_LUN_STOPPED; 4692 mtx_unlock(&ctl_softc->ctl_lock); 4693 4694 return (0); 4695 } 4696 4697 int 4698 ctl_lun_offline(struct ctl_be_lun *be_lun) 4699 { 4700 struct ctl_softc *ctl_softc; 4701 struct ctl_lun *lun; 4702 4703 ctl_softc = control_softc; 4704 4705 lun = (struct ctl_lun *)be_lun->ctl_lun; 4706 4707 mtx_lock(&ctl_softc->ctl_lock); 4708 lun->flags |= CTL_LUN_OFFLINE; 4709 mtx_unlock(&ctl_softc->ctl_lock); 4710 4711 return (0); 4712 } 4713 4714 int 4715 ctl_lun_online(struct ctl_be_lun *be_lun) 4716 { 4717 struct ctl_softc *ctl_softc; 4718 struct ctl_lun *lun; 4719 4720 ctl_softc = control_softc; 4721 4722 lun = (struct ctl_lun *)be_lun->ctl_lun; 4723 4724 mtx_lock(&ctl_softc->ctl_lock); 4725 lun->flags &= ~CTL_LUN_OFFLINE; 4726 mtx_unlock(&ctl_softc->ctl_lock); 4727 4728 return (0); 4729 } 4730 4731 int 4732 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4733 { 4734 struct ctl_softc *ctl_softc; 4735 struct ctl_lun *lun; 4736 4737 ctl_softc = control_softc; 4738 4739 lun = (struct ctl_lun *)be_lun->ctl_lun; 4740 4741 mtx_lock(&ctl_softc->ctl_lock); 4742 4743 /* 4744 * The LUN needs to be disabled before it can be marked invalid. 4745 */ 4746 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4747 mtx_unlock(&ctl_softc->ctl_lock); 4748 return (-1); 4749 } 4750 /* 4751 * Mark the LUN invalid. 4752 */ 4753 lun->flags |= CTL_LUN_INVALID; 4754 4755 /* 4756 * If there is nothing in the OOA queue, go ahead and free the LUN. 4757 * If we have something in the OOA queue, we'll free it when the 4758 * last I/O completes. 4759 */ 4760 if (TAILQ_FIRST(&lun->ooa_queue) == NULL) 4761 ctl_free_lun(lun); 4762 mtx_unlock(&ctl_softc->ctl_lock); 4763 4764 return (0); 4765 } 4766 4767 int 4768 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4769 { 4770 struct ctl_softc *ctl_softc; 4771 struct ctl_lun *lun; 4772 4773 ctl_softc = control_softc; 4774 lun = (struct ctl_lun *)be_lun->ctl_lun; 4775 4776 mtx_lock(&ctl_softc->ctl_lock); 4777 lun->flags |= CTL_LUN_INOPERABLE; 4778 mtx_unlock(&ctl_softc->ctl_lock); 4779 4780 return (0); 4781 } 4782 4783 int 4784 ctl_lun_operable(struct ctl_be_lun *be_lun) 4785 { 4786 struct ctl_softc *ctl_softc; 4787 struct ctl_lun *lun; 4788 4789 ctl_softc = control_softc; 4790 lun = (struct ctl_lun *)be_lun->ctl_lun; 4791 4792 mtx_lock(&ctl_softc->ctl_lock); 4793 lun->flags &= ~CTL_LUN_INOPERABLE; 4794 mtx_unlock(&ctl_softc->ctl_lock); 4795 4796 return (0); 4797 } 4798 4799 int 4800 ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus, 4801 int lock) 4802 { 4803 struct ctl_softc *softc; 4804 struct ctl_lun *lun; 4805 struct copan_aps_subpage *current_sp; 4806 struct ctl_page_index *page_index; 4807 int i; 4808 4809 softc = control_softc; 4810 4811 mtx_lock(&softc->ctl_lock); 4812 4813 lun = (struct ctl_lun *)be_lun->ctl_lun; 4814 4815 page_index = NULL; 4816 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4817 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 4818 APS_PAGE_CODE) 4819 continue; 4820 4821 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE) 4822 continue; 4823 page_index = &lun->mode_pages.index[i]; 4824 } 4825 4826 if (page_index == NULL) { 4827 mtx_unlock(&softc->ctl_lock); 4828 printf("%s: APS subpage not found for lun %ju!\n", __func__, 4829 (uintmax_t)lun->lun); 4830 return (1); 4831 } 4832 #if 0 4833 if ((softc->aps_locked_lun != 0) 4834 && (softc->aps_locked_lun != lun->lun)) { 4835 printf("%s: attempt to lock LUN %llu when %llu is already " 4836 "locked\n"); 4837 mtx_unlock(&softc->ctl_lock); 4838 return (1); 4839 } 4840 #endif 4841 4842 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 4843 (page_index->page_len * CTL_PAGE_CURRENT)); 4844 4845 if (lock != 0) { 4846 current_sp->lock_active = APS_LOCK_ACTIVE; 4847 softc->aps_locked_lun = lun->lun; 4848 } else { 4849 current_sp->lock_active = 0; 4850 softc->aps_locked_lun = 0; 4851 } 4852 4853 4854 /* 4855 * If we're in HA mode, try to send the lock message to the other 4856 * side. 4857 */ 4858 if (ctl_is_single == 0) { 4859 int isc_retval; 4860 union ctl_ha_msg lock_msg; 4861 4862 lock_msg.hdr.nexus = *nexus; 4863 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK; 4864 if (lock != 0) 4865 lock_msg.aps.lock_flag = 1; 4866 else 4867 lock_msg.aps.lock_flag = 0; 4868 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg, 4869 sizeof(lock_msg), 0); 4870 if (isc_retval > CTL_HA_STATUS_SUCCESS) { 4871 printf("%s: APS (lock=%d) error returned from " 4872 "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval); 4873 mtx_unlock(&softc->ctl_lock); 4874 return (1); 4875 } 4876 } 4877 4878 mtx_unlock(&softc->ctl_lock); 4879 4880 return (0); 4881 } 4882 4883 void 4884 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4885 { 4886 struct ctl_lun *lun; 4887 struct ctl_softc *softc; 4888 int i; 4889 4890 softc = control_softc; 4891 4892 mtx_lock(&softc->ctl_lock); 4893 4894 lun = (struct ctl_lun *)be_lun->ctl_lun; 4895 4896 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4897 lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED; 4898 4899 mtx_unlock(&softc->ctl_lock); 4900 } 4901 4902 /* 4903 * Backend "memory move is complete" callback for requests that never 4904 * make it down to say RAIDCore's configuration code. 4905 */ 4906 int 4907 ctl_config_move_done(union ctl_io *io) 4908 { 4909 int retval; 4910 4911 retval = CTL_RETVAL_COMPLETE; 4912 4913 4914 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4915 /* 4916 * XXX KDM this shouldn't happen, but what if it does? 4917 */ 4918 if (io->io_hdr.io_type != CTL_IO_SCSI) 4919 panic("I/O type isn't CTL_IO_SCSI!"); 4920 4921 if ((io->io_hdr.port_status == 0) 4922 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4923 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 4924 io->io_hdr.status = CTL_SUCCESS; 4925 else if ((io->io_hdr.port_status != 0) 4926 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4927 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 4928 /* 4929 * For hardware error sense keys, the sense key 4930 * specific value is defined to be a retry count, 4931 * but we use it to pass back an internal FETD 4932 * error code. XXX KDM Hopefully the FETD is only 4933 * using 16 bits for an error code, since that's 4934 * all the space we have in the sks field. 4935 */ 4936 ctl_set_internal_failure(&io->scsiio, 4937 /*sks_valid*/ 1, 4938 /*retry_count*/ 4939 io->io_hdr.port_status); 4940 free(io->scsiio.kern_data_ptr, M_CTL); 4941 ctl_done(io); 4942 goto bailout; 4943 } 4944 4945 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 4946 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 4947 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4948 /* 4949 * XXX KDM just assuming a single pointer here, and not a 4950 * S/G list. If we start using S/G lists for config data, 4951 * we'll need to know how to clean them up here as well. 4952 */ 4953 free(io->scsiio.kern_data_ptr, M_CTL); 4954 /* Hopefully the user has already set the status... */ 4955 ctl_done(io); 4956 } else { 4957 /* 4958 * XXX KDM now we need to continue data movement. Some 4959 * options: 4960 * - call ctl_scsiio() again? We don't do this for data 4961 * writes, because for those at least we know ahead of 4962 * time where the write will go and how long it is. For 4963 * config writes, though, that information is largely 4964 * contained within the write itself, thus we need to 4965 * parse out the data again. 4966 * 4967 * - Call some other function once the data is in? 4968 */ 4969 4970 /* 4971 * XXX KDM call ctl_scsiio() again for now, and check flag 4972 * bits to see whether we're allocated or not. 4973 */ 4974 retval = ctl_scsiio(&io->scsiio); 4975 } 4976 bailout: 4977 return (retval); 4978 } 4979 4980 /* 4981 * This gets called by a backend driver when it is done with a 4982 * configuration write. 4983 */ 4984 void 4985 ctl_config_write_done(union ctl_io *io) 4986 { 4987 /* 4988 * If the IO_CONT flag is set, we need to call the supplied 4989 * function to continue processing the I/O, instead of completing 4990 * the I/O just yet. 4991 * 4992 * If there is an error, though, we don't want to keep processing. 4993 * Instead, just send status back to the initiator. 4994 */ 4995 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) 4996 && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) 4997 || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) { 4998 io->scsiio.io_cont(io); 4999 return; 5000 } 5001 /* 5002 * Since a configuration write can be done for commands that actually 5003 * have data allocated, like write buffer, and commands that have 5004 * no data, like start/stop unit, we need to check here. 5005 */ 5006 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 5007 free(io->scsiio.kern_data_ptr, M_CTL); 5008 ctl_done(io); 5009 } 5010 5011 /* 5012 * SCSI release command. 5013 */ 5014 int 5015 ctl_scsi_release(struct ctl_scsiio *ctsio) 5016 { 5017 int length, longid, thirdparty_id, resv_id; 5018 struct ctl_softc *ctl_softc; 5019 struct ctl_lun *lun; 5020 5021 length = 0; 5022 resv_id = 0; 5023 5024 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5025 5026 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5027 ctl_softc = control_softc; 5028 5029 switch (ctsio->cdb[0]) { 5030 case RELEASE: { 5031 struct scsi_release *cdb; 5032 5033 cdb = (struct scsi_release *)ctsio->cdb; 5034 if ((cdb->byte2 & 0x1f) != 0) { 5035 ctl_set_invalid_field(ctsio, 5036 /*sks_valid*/ 1, 5037 /*command*/ 1, 5038 /*field*/ 1, 5039 /*bit_valid*/ 0, 5040 /*bit*/ 0); 5041 ctl_done((union ctl_io *)ctsio); 5042 return (CTL_RETVAL_COMPLETE); 5043 } 5044 break; 5045 } 5046 case RELEASE_10: { 5047 struct scsi_release_10 *cdb; 5048 5049 cdb = (struct scsi_release_10 *)ctsio->cdb; 5050 5051 if ((cdb->byte2 & SR10_EXTENT) != 0) { 5052 ctl_set_invalid_field(ctsio, 5053 /*sks_valid*/ 1, 5054 /*command*/ 1, 5055 /*field*/ 1, 5056 /*bit_valid*/ 1, 5057 /*bit*/ 0); 5058 ctl_done((union ctl_io *)ctsio); 5059 return (CTL_RETVAL_COMPLETE); 5060 5061 } 5062 5063 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5064 ctl_set_invalid_field(ctsio, 5065 /*sks_valid*/ 1, 5066 /*command*/ 1, 5067 /*field*/ 1, 5068 /*bit_valid*/ 1, 5069 /*bit*/ 4); 5070 ctl_done((union ctl_io *)ctsio); 5071 return (CTL_RETVAL_COMPLETE); 5072 } 5073 5074 if (cdb->byte2 & SR10_LONGID) 5075 longid = 1; 5076 else 5077 thirdparty_id = cdb->thirdparty_id; 5078 5079 resv_id = cdb->resv_id; 5080 length = scsi_2btoul(cdb->length); 5081 break; 5082 } 5083 } 5084 5085 5086 /* 5087 * XXX KDM right now, we only support LUN reservation. We don't 5088 * support 3rd party reservations, or extent reservations, which 5089 * might actually need the parameter list. If we've gotten this 5090 * far, we've got a LUN reservation. Anything else got kicked out 5091 * above. So, according to SPC, ignore the length. 5092 */ 5093 length = 0; 5094 5095 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5096 && (length > 0)) { 5097 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5098 ctsio->kern_data_len = length; 5099 ctsio->kern_total_len = length; 5100 ctsio->kern_data_resid = 0; 5101 ctsio->kern_rel_offset = 0; 5102 ctsio->kern_sg_entries = 0; 5103 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5104 ctsio->be_move_done = ctl_config_move_done; 5105 ctl_datamove((union ctl_io *)ctsio); 5106 5107 return (CTL_RETVAL_COMPLETE); 5108 } 5109 5110 if (length > 0) 5111 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5112 5113 mtx_lock(&ctl_softc->ctl_lock); 5114 5115 /* 5116 * According to SPC, it is not an error for an intiator to attempt 5117 * to release a reservation on a LUN that isn't reserved, or that 5118 * is reserved by another initiator. The reservation can only be 5119 * released, though, by the initiator who made it or by one of 5120 * several reset type events. 5121 */ 5122 if (lun->flags & CTL_LUN_RESERVED) { 5123 if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id) 5124 && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port) 5125 && (ctsio->io_hdr.nexus.targ_target.id == 5126 lun->rsv_nexus.targ_target.id)) { 5127 lun->flags &= ~CTL_LUN_RESERVED; 5128 } 5129 } 5130 5131 ctsio->scsi_status = SCSI_STATUS_OK; 5132 ctsio->io_hdr.status = CTL_SUCCESS; 5133 5134 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5135 free(ctsio->kern_data_ptr, M_CTL); 5136 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5137 } 5138 5139 mtx_unlock(&ctl_softc->ctl_lock); 5140 5141 ctl_done((union ctl_io *)ctsio); 5142 return (CTL_RETVAL_COMPLETE); 5143 } 5144 5145 int 5146 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5147 { 5148 int extent, thirdparty, longid; 5149 int resv_id, length; 5150 uint64_t thirdparty_id; 5151 struct ctl_softc *ctl_softc; 5152 struct ctl_lun *lun; 5153 5154 extent = 0; 5155 thirdparty = 0; 5156 longid = 0; 5157 resv_id = 0; 5158 length = 0; 5159 thirdparty_id = 0; 5160 5161 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5162 5163 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5164 ctl_softc = control_softc; 5165 5166 switch (ctsio->cdb[0]) { 5167 case RESERVE: { 5168 struct scsi_reserve *cdb; 5169 5170 cdb = (struct scsi_reserve *)ctsio->cdb; 5171 if ((cdb->byte2 & 0x1f) != 0) { 5172 ctl_set_invalid_field(ctsio, 5173 /*sks_valid*/ 1, 5174 /*command*/ 1, 5175 /*field*/ 1, 5176 /*bit_valid*/ 0, 5177 /*bit*/ 0); 5178 ctl_done((union ctl_io *)ctsio); 5179 return (CTL_RETVAL_COMPLETE); 5180 } 5181 resv_id = cdb->resv_id; 5182 length = scsi_2btoul(cdb->length); 5183 break; 5184 } 5185 case RESERVE_10: { 5186 struct scsi_reserve_10 *cdb; 5187 5188 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5189 5190 if ((cdb->byte2 & SR10_EXTENT) != 0) { 5191 ctl_set_invalid_field(ctsio, 5192 /*sks_valid*/ 1, 5193 /*command*/ 1, 5194 /*field*/ 1, 5195 /*bit_valid*/ 1, 5196 /*bit*/ 0); 5197 ctl_done((union ctl_io *)ctsio); 5198 return (CTL_RETVAL_COMPLETE); 5199 } 5200 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5201 ctl_set_invalid_field(ctsio, 5202 /*sks_valid*/ 1, 5203 /*command*/ 1, 5204 /*field*/ 1, 5205 /*bit_valid*/ 1, 5206 /*bit*/ 4); 5207 ctl_done((union ctl_io *)ctsio); 5208 return (CTL_RETVAL_COMPLETE); 5209 } 5210 if (cdb->byte2 & SR10_LONGID) 5211 longid = 1; 5212 else 5213 thirdparty_id = cdb->thirdparty_id; 5214 5215 resv_id = cdb->resv_id; 5216 length = scsi_2btoul(cdb->length); 5217 break; 5218 } 5219 } 5220 5221 /* 5222 * XXX KDM right now, we only support LUN reservation. We don't 5223 * support 3rd party reservations, or extent reservations, which 5224 * might actually need the parameter list. If we've gotten this 5225 * far, we've got a LUN reservation. Anything else got kicked out 5226 * above. So, according to SPC, ignore the length. 5227 */ 5228 length = 0; 5229 5230 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5231 && (length > 0)) { 5232 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5233 ctsio->kern_data_len = length; 5234 ctsio->kern_total_len = length; 5235 ctsio->kern_data_resid = 0; 5236 ctsio->kern_rel_offset = 0; 5237 ctsio->kern_sg_entries = 0; 5238 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5239 ctsio->be_move_done = ctl_config_move_done; 5240 ctl_datamove((union ctl_io *)ctsio); 5241 5242 return (CTL_RETVAL_COMPLETE); 5243 } 5244 5245 if (length > 0) 5246 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5247 5248 mtx_lock(&ctl_softc->ctl_lock); 5249 if (lun->flags & CTL_LUN_RESERVED) { 5250 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 5251 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 5252 || (ctsio->io_hdr.nexus.targ_target.id != 5253 lun->rsv_nexus.targ_target.id)) { 5254 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 5255 ctsio->io_hdr.status = CTL_SCSI_ERROR; 5256 goto bailout; 5257 } 5258 } 5259 5260 lun->flags |= CTL_LUN_RESERVED; 5261 lun->rsv_nexus = ctsio->io_hdr.nexus; 5262 5263 ctsio->scsi_status = SCSI_STATUS_OK; 5264 ctsio->io_hdr.status = CTL_SUCCESS; 5265 5266 bailout: 5267 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5268 free(ctsio->kern_data_ptr, M_CTL); 5269 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5270 } 5271 5272 mtx_unlock(&ctl_softc->ctl_lock); 5273 5274 ctl_done((union ctl_io *)ctsio); 5275 return (CTL_RETVAL_COMPLETE); 5276 } 5277 5278 int 5279 ctl_start_stop(struct ctl_scsiio *ctsio) 5280 { 5281 struct scsi_start_stop_unit *cdb; 5282 struct ctl_lun *lun; 5283 struct ctl_softc *ctl_softc; 5284 int retval; 5285 5286 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5287 5288 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5289 ctl_softc = control_softc; 5290 retval = 0; 5291 5292 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5293 5294 /* 5295 * XXX KDM 5296 * We don't support the immediate bit on a stop unit. In order to 5297 * do that, we would need to code up a way to know that a stop is 5298 * pending, and hold off any new commands until it completes, one 5299 * way or another. Then we could accept or reject those commands 5300 * depending on its status. We would almost need to do the reverse 5301 * of what we do below for an immediate start -- return the copy of 5302 * the ctl_io to the FETD with status to send to the host (and to 5303 * free the copy!) and then free the original I/O once the stop 5304 * actually completes. That way, the OOA queue mechanism can work 5305 * to block commands that shouldn't proceed. Another alternative 5306 * would be to put the copy in the queue in place of the original, 5307 * and return the original back to the caller. That could be 5308 * slightly safer.. 5309 */ 5310 if ((cdb->byte2 & SSS_IMMED) 5311 && ((cdb->how & SSS_START) == 0)) { 5312 ctl_set_invalid_field(ctsio, 5313 /*sks_valid*/ 1, 5314 /*command*/ 1, 5315 /*field*/ 1, 5316 /*bit_valid*/ 1, 5317 /*bit*/ 0); 5318 ctl_done((union ctl_io *)ctsio); 5319 return (CTL_RETVAL_COMPLETE); 5320 } 5321 5322 /* 5323 * We don't support the power conditions field. We need to check 5324 * this prior to checking the load/eject and start/stop bits. 5325 */ 5326 if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) { 5327 ctl_set_invalid_field(ctsio, 5328 /*sks_valid*/ 1, 5329 /*command*/ 1, 5330 /*field*/ 4, 5331 /*bit_valid*/ 1, 5332 /*bit*/ 4); 5333 ctl_done((union ctl_io *)ctsio); 5334 return (CTL_RETVAL_COMPLETE); 5335 } 5336 5337 /* 5338 * Media isn't removable, so we can't load or eject it. 5339 */ 5340 if ((cdb->how & SSS_LOEJ) != 0) { 5341 ctl_set_invalid_field(ctsio, 5342 /*sks_valid*/ 1, 5343 /*command*/ 1, 5344 /*field*/ 4, 5345 /*bit_valid*/ 1, 5346 /*bit*/ 1); 5347 ctl_done((union ctl_io *)ctsio); 5348 return (CTL_RETVAL_COMPLETE); 5349 } 5350 5351 if ((lun->flags & CTL_LUN_PR_RESERVED) 5352 && ((cdb->how & SSS_START)==0)) { 5353 uint32_t residx; 5354 5355 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5356 if (!lun->per_res[residx].registered 5357 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5358 5359 ctl_set_reservation_conflict(ctsio); 5360 ctl_done((union ctl_io *)ctsio); 5361 return (CTL_RETVAL_COMPLETE); 5362 } 5363 } 5364 5365 /* 5366 * If there is no backend on this device, we can't start or stop 5367 * it. In theory we shouldn't get any start/stop commands in the 5368 * first place at this level if the LUN doesn't have a backend. 5369 * That should get stopped by the command decode code. 5370 */ 5371 if (lun->backend == NULL) { 5372 ctl_set_invalid_opcode(ctsio); 5373 ctl_done((union ctl_io *)ctsio); 5374 return (CTL_RETVAL_COMPLETE); 5375 } 5376 5377 /* 5378 * XXX KDM Copan-specific offline behavior. 5379 * Figure out a reasonable way to port this? 5380 */ 5381 #ifdef NEEDTOPORT 5382 mtx_lock(&ctl_softc->ctl_lock); 5383 5384 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5385 && (lun->flags & CTL_LUN_OFFLINE)) { 5386 /* 5387 * If the LUN is offline, and the on/offline bit isn't set, 5388 * reject the start or stop. Otherwise, let it through. 5389 */ 5390 mtx_unlock(&ctl_softc->ctl_lock); 5391 ctl_set_lun_not_ready(ctsio); 5392 ctl_done((union ctl_io *)ctsio); 5393 } else { 5394 mtx_unlock(&ctl_softc->ctl_lock); 5395 #endif /* NEEDTOPORT */ 5396 /* 5397 * This could be a start or a stop when we're online, 5398 * or a stop/offline or start/online. A start or stop when 5399 * we're offline is covered in the case above. 5400 */ 5401 /* 5402 * In the non-immediate case, we send the request to 5403 * the backend and return status to the user when 5404 * it is done. 5405 * 5406 * In the immediate case, we allocate a new ctl_io 5407 * to hold a copy of the request, and send that to 5408 * the backend. We then set good status on the 5409 * user's request and return it immediately. 5410 */ 5411 if (cdb->byte2 & SSS_IMMED) { 5412 union ctl_io *new_io; 5413 5414 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5415 if (new_io == NULL) { 5416 ctl_set_busy(ctsio); 5417 ctl_done((union ctl_io *)ctsio); 5418 } else { 5419 ctl_copy_io((union ctl_io *)ctsio, 5420 new_io); 5421 retval = lun->backend->config_write(new_io); 5422 ctl_set_success(ctsio); 5423 ctl_done((union ctl_io *)ctsio); 5424 } 5425 } else { 5426 retval = lun->backend->config_write( 5427 (union ctl_io *)ctsio); 5428 } 5429 #ifdef NEEDTOPORT 5430 } 5431 #endif 5432 return (retval); 5433 } 5434 5435 /* 5436 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5437 * we don't really do anything with the LBA and length fields if the user 5438 * passes them in. Instead we'll just flush out the cache for the entire 5439 * LUN. 5440 */ 5441 int 5442 ctl_sync_cache(struct ctl_scsiio *ctsio) 5443 { 5444 struct ctl_lun *lun; 5445 struct ctl_softc *ctl_softc; 5446 uint64_t starting_lba; 5447 uint32_t block_count; 5448 int reladr, immed; 5449 int retval; 5450 5451 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5452 5453 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5454 ctl_softc = control_softc; 5455 retval = 0; 5456 reladr = 0; 5457 immed = 0; 5458 5459 switch (ctsio->cdb[0]) { 5460 case SYNCHRONIZE_CACHE: { 5461 struct scsi_sync_cache *cdb; 5462 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5463 5464 if (cdb->byte2 & SSC_RELADR) 5465 reladr = 1; 5466 5467 if (cdb->byte2 & SSC_IMMED) 5468 immed = 1; 5469 5470 starting_lba = scsi_4btoul(cdb->begin_lba); 5471 block_count = scsi_2btoul(cdb->lb_count); 5472 break; 5473 } 5474 case SYNCHRONIZE_CACHE_16: { 5475 struct scsi_sync_cache_16 *cdb; 5476 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5477 5478 if (cdb->byte2 & SSC_RELADR) 5479 reladr = 1; 5480 5481 if (cdb->byte2 & SSC_IMMED) 5482 immed = 1; 5483 5484 starting_lba = scsi_8btou64(cdb->begin_lba); 5485 block_count = scsi_4btoul(cdb->lb_count); 5486 break; 5487 } 5488 default: 5489 ctl_set_invalid_opcode(ctsio); 5490 ctl_done((union ctl_io *)ctsio); 5491 goto bailout; 5492 break; /* NOTREACHED */ 5493 } 5494 5495 if (immed) { 5496 /* 5497 * We don't support the immediate bit. Since it's in the 5498 * same place for the 10 and 16 byte SYNCHRONIZE CACHE 5499 * commands, we can just return the same error in either 5500 * case. 5501 */ 5502 ctl_set_invalid_field(ctsio, 5503 /*sks_valid*/ 1, 5504 /*command*/ 1, 5505 /*field*/ 1, 5506 /*bit_valid*/ 1, 5507 /*bit*/ 1); 5508 ctl_done((union ctl_io *)ctsio); 5509 goto bailout; 5510 } 5511 5512 if (reladr) { 5513 /* 5514 * We don't support the reladr bit either. It can only be 5515 * used with linked commands, and we don't support linked 5516 * commands. Since the bit is in the same place for the 5517 * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can 5518 * just return the same error in either case. 5519 */ 5520 ctl_set_invalid_field(ctsio, 5521 /*sks_valid*/ 1, 5522 /*command*/ 1, 5523 /*field*/ 1, 5524 /*bit_valid*/ 1, 5525 /*bit*/ 0); 5526 ctl_done((union ctl_io *)ctsio); 5527 goto bailout; 5528 } 5529 5530 /* 5531 * We check the LBA and length, but don't do anything with them. 5532 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5533 * get flushed. This check will just help satisfy anyone who wants 5534 * to see an error for an out of range LBA. 5535 */ 5536 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5537 ctl_set_lba_out_of_range(ctsio); 5538 ctl_done((union ctl_io *)ctsio); 5539 goto bailout; 5540 } 5541 5542 /* 5543 * If this LUN has no backend, we can't flush the cache anyway. 5544 */ 5545 if (lun->backend == NULL) { 5546 ctl_set_invalid_opcode(ctsio); 5547 ctl_done((union ctl_io *)ctsio); 5548 goto bailout; 5549 } 5550 5551 /* 5552 * Check to see whether we're configured to send the SYNCHRONIZE 5553 * CACHE command directly to the back end. 5554 */ 5555 mtx_lock(&ctl_softc->ctl_lock); 5556 if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC) 5557 && (++(lun->sync_count) >= lun->sync_interval)) { 5558 lun->sync_count = 0; 5559 mtx_unlock(&ctl_softc->ctl_lock); 5560 retval = lun->backend->config_write((union ctl_io *)ctsio); 5561 } else { 5562 mtx_unlock(&ctl_softc->ctl_lock); 5563 ctl_set_success(ctsio); 5564 ctl_done((union ctl_io *)ctsio); 5565 } 5566 5567 bailout: 5568 5569 return (retval); 5570 } 5571 5572 int 5573 ctl_format(struct ctl_scsiio *ctsio) 5574 { 5575 struct scsi_format *cdb; 5576 struct ctl_lun *lun; 5577 struct ctl_softc *ctl_softc; 5578 int length, defect_list_len; 5579 5580 CTL_DEBUG_PRINT(("ctl_format\n")); 5581 5582 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5583 ctl_softc = control_softc; 5584 5585 cdb = (struct scsi_format *)ctsio->cdb; 5586 5587 length = 0; 5588 if (cdb->byte2 & SF_FMTDATA) { 5589 if (cdb->byte2 & SF_LONGLIST) 5590 length = sizeof(struct scsi_format_header_long); 5591 else 5592 length = sizeof(struct scsi_format_header_short); 5593 } 5594 5595 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5596 && (length > 0)) { 5597 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5598 ctsio->kern_data_len = length; 5599 ctsio->kern_total_len = length; 5600 ctsio->kern_data_resid = 0; 5601 ctsio->kern_rel_offset = 0; 5602 ctsio->kern_sg_entries = 0; 5603 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5604 ctsio->be_move_done = ctl_config_move_done; 5605 ctl_datamove((union ctl_io *)ctsio); 5606 5607 return (CTL_RETVAL_COMPLETE); 5608 } 5609 5610 defect_list_len = 0; 5611 5612 if (cdb->byte2 & SF_FMTDATA) { 5613 if (cdb->byte2 & SF_LONGLIST) { 5614 struct scsi_format_header_long *header; 5615 5616 header = (struct scsi_format_header_long *) 5617 ctsio->kern_data_ptr; 5618 5619 defect_list_len = scsi_4btoul(header->defect_list_len); 5620 if (defect_list_len != 0) { 5621 ctl_set_invalid_field(ctsio, 5622 /*sks_valid*/ 1, 5623 /*command*/ 0, 5624 /*field*/ 2, 5625 /*bit_valid*/ 0, 5626 /*bit*/ 0); 5627 goto bailout; 5628 } 5629 } else { 5630 struct scsi_format_header_short *header; 5631 5632 header = (struct scsi_format_header_short *) 5633 ctsio->kern_data_ptr; 5634 5635 defect_list_len = scsi_2btoul(header->defect_list_len); 5636 if (defect_list_len != 0) { 5637 ctl_set_invalid_field(ctsio, 5638 /*sks_valid*/ 1, 5639 /*command*/ 0, 5640 /*field*/ 2, 5641 /*bit_valid*/ 0, 5642 /*bit*/ 0); 5643 goto bailout; 5644 } 5645 } 5646 } 5647 5648 /* 5649 * The format command will clear out the "Medium format corrupted" 5650 * status if set by the configuration code. That status is really 5651 * just a way to notify the host that we have lost the media, and 5652 * get them to issue a command that will basically make them think 5653 * they're blowing away the media. 5654 */ 5655 mtx_lock(&ctl_softc->ctl_lock); 5656 lun->flags &= ~CTL_LUN_INOPERABLE; 5657 mtx_unlock(&ctl_softc->ctl_lock); 5658 5659 ctsio->scsi_status = SCSI_STATUS_OK; 5660 ctsio->io_hdr.status = CTL_SUCCESS; 5661 bailout: 5662 5663 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5664 free(ctsio->kern_data_ptr, M_CTL); 5665 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5666 } 5667 5668 ctl_done((union ctl_io *)ctsio); 5669 return (CTL_RETVAL_COMPLETE); 5670 } 5671 5672 int 5673 ctl_write_buffer(struct ctl_scsiio *ctsio) 5674 { 5675 struct scsi_write_buffer *cdb; 5676 struct copan_page_header *header; 5677 struct ctl_lun *lun; 5678 struct ctl_softc *ctl_softc; 5679 int buffer_offset, len; 5680 int retval; 5681 5682 header = NULL; 5683 5684 retval = CTL_RETVAL_COMPLETE; 5685 5686 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5687 5688 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5689 ctl_softc = control_softc; 5690 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5691 5692 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5693 ctl_set_invalid_field(ctsio, 5694 /*sks_valid*/ 1, 5695 /*command*/ 1, 5696 /*field*/ 1, 5697 /*bit_valid*/ 1, 5698 /*bit*/ 4); 5699 ctl_done((union ctl_io *)ctsio); 5700 return (CTL_RETVAL_COMPLETE); 5701 } 5702 if (cdb->buffer_id != 0) { 5703 ctl_set_invalid_field(ctsio, 5704 /*sks_valid*/ 1, 5705 /*command*/ 1, 5706 /*field*/ 2, 5707 /*bit_valid*/ 0, 5708 /*bit*/ 0); 5709 ctl_done((union ctl_io *)ctsio); 5710 return (CTL_RETVAL_COMPLETE); 5711 } 5712 5713 len = scsi_3btoul(cdb->length); 5714 buffer_offset = scsi_3btoul(cdb->offset); 5715 5716 if (len > sizeof(lun->write_buffer)) { 5717 ctl_set_invalid_field(ctsio, 5718 /*sks_valid*/ 1, 5719 /*command*/ 1, 5720 /*field*/ 6, 5721 /*bit_valid*/ 0, 5722 /*bit*/ 0); 5723 ctl_done((union ctl_io *)ctsio); 5724 return (CTL_RETVAL_COMPLETE); 5725 } 5726 5727 if (buffer_offset != 0) { 5728 ctl_set_invalid_field(ctsio, 5729 /*sks_valid*/ 1, 5730 /*command*/ 1, 5731 /*field*/ 3, 5732 /*bit_valid*/ 0, 5733 /*bit*/ 0); 5734 ctl_done((union ctl_io *)ctsio); 5735 return (CTL_RETVAL_COMPLETE); 5736 } 5737 5738 /* 5739 * If we've got a kernel request that hasn't been malloced yet, 5740 * malloc it and tell the caller the data buffer is here. 5741 */ 5742 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5743 ctsio->kern_data_ptr = lun->write_buffer; 5744 ctsio->kern_data_len = len; 5745 ctsio->kern_total_len = len; 5746 ctsio->kern_data_resid = 0; 5747 ctsio->kern_rel_offset = 0; 5748 ctsio->kern_sg_entries = 0; 5749 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5750 ctsio->be_move_done = ctl_config_move_done; 5751 ctl_datamove((union ctl_io *)ctsio); 5752 5753 return (CTL_RETVAL_COMPLETE); 5754 } 5755 5756 ctl_done((union ctl_io *)ctsio); 5757 5758 return (CTL_RETVAL_COMPLETE); 5759 } 5760 5761 /* 5762 * Note that this function currently doesn't actually do anything inside 5763 * CTL to enforce things if the DQue bit is turned on. 5764 * 5765 * Also note that this function can't be used in the default case, because 5766 * the DQue bit isn't set in the changeable mask for the control mode page 5767 * anyway. This is just here as an example for how to implement a page 5768 * handler, and a placeholder in case we want to allow the user to turn 5769 * tagged queueing on and off. 5770 * 5771 * The D_SENSE bit handling is functional, however, and will turn 5772 * descriptor sense on and off for a given LUN. 5773 */ 5774 int 5775 ctl_control_page_handler(struct ctl_scsiio *ctsio, 5776 struct ctl_page_index *page_index, uint8_t *page_ptr) 5777 { 5778 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5779 struct ctl_lun *lun; 5780 struct ctl_softc *softc; 5781 int set_ua; 5782 uint32_t initidx; 5783 5784 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5785 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5786 set_ua = 0; 5787 5788 user_cp = (struct scsi_control_page *)page_ptr; 5789 current_cp = (struct scsi_control_page *) 5790 (page_index->page_data + (page_index->page_len * 5791 CTL_PAGE_CURRENT)); 5792 saved_cp = (struct scsi_control_page *) 5793 (page_index->page_data + (page_index->page_len * 5794 CTL_PAGE_SAVED)); 5795 5796 softc = control_softc; 5797 5798 mtx_lock(&softc->ctl_lock); 5799 if (((current_cp->rlec & SCP_DSENSE) == 0) 5800 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5801 /* 5802 * Descriptor sense is currently turned off and the user 5803 * wants to turn it on. 5804 */ 5805 current_cp->rlec |= SCP_DSENSE; 5806 saved_cp->rlec |= SCP_DSENSE; 5807 lun->flags |= CTL_LUN_SENSE_DESC; 5808 set_ua = 1; 5809 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5810 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5811 /* 5812 * Descriptor sense is currently turned on, and the user 5813 * wants to turn it off. 5814 */ 5815 current_cp->rlec &= ~SCP_DSENSE; 5816 saved_cp->rlec &= ~SCP_DSENSE; 5817 lun->flags &= ~CTL_LUN_SENSE_DESC; 5818 set_ua = 1; 5819 } 5820 if (current_cp->queue_flags & SCP_QUEUE_DQUE) { 5821 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 5822 #ifdef NEEDTOPORT 5823 csevent_log(CSC_CTL | CSC_SHELF_SW | 5824 CTL_UNTAG_TO_UNTAG, 5825 csevent_LogType_Trace, 5826 csevent_Severity_Information, 5827 csevent_AlertLevel_Green, 5828 csevent_FRU_Firmware, 5829 csevent_FRU_Unknown, 5830 "Received untagged to untagged transition"); 5831 #endif /* NEEDTOPORT */ 5832 } else { 5833 #ifdef NEEDTOPORT 5834 csevent_log(CSC_CTL | CSC_SHELF_SW | 5835 CTL_UNTAG_TO_TAG, 5836 csevent_LogType_ConfigChange, 5837 csevent_Severity_Information, 5838 csevent_AlertLevel_Green, 5839 csevent_FRU_Firmware, 5840 csevent_FRU_Unknown, 5841 "Received untagged to tagged " 5842 "queueing transition"); 5843 #endif /* NEEDTOPORT */ 5844 5845 current_cp->queue_flags &= ~SCP_QUEUE_DQUE; 5846 saved_cp->queue_flags &= ~SCP_QUEUE_DQUE; 5847 set_ua = 1; 5848 } 5849 } else { 5850 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 5851 #ifdef NEEDTOPORT 5852 csevent_log(CSC_CTL | CSC_SHELF_SW | 5853 CTL_TAG_TO_UNTAG, 5854 csevent_LogType_ConfigChange, 5855 csevent_Severity_Warning, 5856 csevent_AlertLevel_Yellow, 5857 csevent_FRU_Firmware, 5858 csevent_FRU_Unknown, 5859 "Received tagged queueing to untagged " 5860 "transition"); 5861 #endif /* NEEDTOPORT */ 5862 5863 current_cp->queue_flags |= SCP_QUEUE_DQUE; 5864 saved_cp->queue_flags |= SCP_QUEUE_DQUE; 5865 set_ua = 1; 5866 } else { 5867 #ifdef NEEDTOPORT 5868 csevent_log(CSC_CTL | CSC_SHELF_SW | 5869 CTL_TAG_TO_TAG, 5870 csevent_LogType_Trace, 5871 csevent_Severity_Information, 5872 csevent_AlertLevel_Green, 5873 csevent_FRU_Firmware, 5874 csevent_FRU_Unknown, 5875 "Received tagged queueing to tagged " 5876 "queueing transition"); 5877 #endif /* NEEDTOPORT */ 5878 } 5879 } 5880 if (set_ua != 0) { 5881 int i; 5882 /* 5883 * Let other initiators know that the mode 5884 * parameters for this LUN have changed. 5885 */ 5886 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 5887 if (i == initidx) 5888 continue; 5889 5890 lun->pending_sense[i].ua_pending |= 5891 CTL_UA_MODE_CHANGE; 5892 } 5893 } 5894 mtx_unlock(&softc->ctl_lock); 5895 5896 return (0); 5897 } 5898 5899 int 5900 ctl_power_sp_handler(struct ctl_scsiio *ctsio, 5901 struct ctl_page_index *page_index, uint8_t *page_ptr) 5902 { 5903 return (0); 5904 } 5905 5906 int 5907 ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio, 5908 struct ctl_page_index *page_index, int pc) 5909 { 5910 struct copan_power_subpage *page; 5911 5912 page = (struct copan_power_subpage *)page_index->page_data + 5913 (page_index->page_len * pc); 5914 5915 switch (pc) { 5916 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5917 /* 5918 * We don't update the changable bits for this page. 5919 */ 5920 break; 5921 case SMS_PAGE_CTRL_CURRENT >> 6: 5922 case SMS_PAGE_CTRL_DEFAULT >> 6: 5923 case SMS_PAGE_CTRL_SAVED >> 6: 5924 #ifdef NEEDTOPORT 5925 ctl_update_power_subpage(page); 5926 #endif 5927 break; 5928 default: 5929 #ifdef NEEDTOPORT 5930 EPRINT(0, "Invalid PC %d!!", pc); 5931 #endif 5932 break; 5933 } 5934 return (0); 5935 } 5936 5937 5938 int 5939 ctl_aps_sp_handler(struct ctl_scsiio *ctsio, 5940 struct ctl_page_index *page_index, uint8_t *page_ptr) 5941 { 5942 struct copan_aps_subpage *user_sp; 5943 struct copan_aps_subpage *current_sp; 5944 union ctl_modepage_info *modepage_info; 5945 struct ctl_softc *softc; 5946 struct ctl_lun *lun; 5947 int retval; 5948 5949 retval = CTL_RETVAL_COMPLETE; 5950 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 5951 (page_index->page_len * CTL_PAGE_CURRENT)); 5952 softc = control_softc; 5953 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5954 5955 user_sp = (struct copan_aps_subpage *)page_ptr; 5956 5957 modepage_info = (union ctl_modepage_info *) 5958 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5959 5960 modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK; 5961 modepage_info->header.subpage = page_index->subpage; 5962 modepage_info->aps.lock_active = user_sp->lock_active; 5963 5964 mtx_lock(&softc->ctl_lock); 5965 5966 /* 5967 * If there is a request to lock the LUN and another LUN is locked 5968 * this is an error. If the requested LUN is already locked ignore 5969 * the request. If no LUN is locked attempt to lock it. 5970 * if there is a request to unlock the LUN and the LUN is currently 5971 * locked attempt to unlock it. Otherwise ignore the request. i.e. 5972 * if another LUN is locked or no LUN is locked. 5973 */ 5974 if (user_sp->lock_active & APS_LOCK_ACTIVE) { 5975 if (softc->aps_locked_lun == lun->lun) { 5976 /* 5977 * This LUN is already locked, so we're done. 5978 */ 5979 retval = CTL_RETVAL_COMPLETE; 5980 } else if (softc->aps_locked_lun == 0) { 5981 /* 5982 * No one has the lock, pass the request to the 5983 * backend. 5984 */ 5985 retval = lun->backend->config_write( 5986 (union ctl_io *)ctsio); 5987 } else { 5988 /* 5989 * Someone else has the lock, throw out the request. 5990 */ 5991 ctl_set_already_locked(ctsio); 5992 free(ctsio->kern_data_ptr, M_CTL); 5993 ctl_done((union ctl_io *)ctsio); 5994 5995 /* 5996 * Set the return value so that ctl_do_mode_select() 5997 * won't try to complete the command. We already 5998 * completed it here. 5999 */ 6000 retval = CTL_RETVAL_ERROR; 6001 } 6002 } else if (softc->aps_locked_lun == lun->lun) { 6003 /* 6004 * This LUN is locked, so pass the unlock request to the 6005 * backend. 6006 */ 6007 retval = lun->backend->config_write((union ctl_io *)ctsio); 6008 } 6009 mtx_unlock(&softc->ctl_lock); 6010 6011 return (retval); 6012 } 6013 6014 int 6015 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6016 struct ctl_page_index *page_index, 6017 uint8_t *page_ptr) 6018 { 6019 uint8_t *c; 6020 int i; 6021 6022 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6023 ctl_time_io_secs = 6024 (c[0] << 8) | 6025 (c[1] << 0) | 6026 0; 6027 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6028 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6029 printf("page data:"); 6030 for (i=0; i<8; i++) 6031 printf(" %.2x",page_ptr[i]); 6032 printf("\n"); 6033 return (0); 6034 } 6035 6036 int 6037 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6038 struct ctl_page_index *page_index, 6039 int pc) 6040 { 6041 struct copan_debugconf_subpage *page; 6042 6043 page = (struct copan_debugconf_subpage *)page_index->page_data + 6044 (page_index->page_len * pc); 6045 6046 switch (pc) { 6047 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6048 case SMS_PAGE_CTRL_DEFAULT >> 6: 6049 case SMS_PAGE_CTRL_SAVED >> 6: 6050 /* 6051 * We don't update the changable or default bits for this page. 6052 */ 6053 break; 6054 case SMS_PAGE_CTRL_CURRENT >> 6: 6055 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6056 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6057 break; 6058 default: 6059 #ifdef NEEDTOPORT 6060 EPRINT(0, "Invalid PC %d!!", pc); 6061 #endif /* NEEDTOPORT */ 6062 break; 6063 } 6064 return (0); 6065 } 6066 6067 6068 static int 6069 ctl_do_mode_select(union ctl_io *io) 6070 { 6071 struct scsi_mode_page_header *page_header; 6072 struct ctl_page_index *page_index; 6073 struct ctl_scsiio *ctsio; 6074 int control_dev, page_len; 6075 int page_len_offset, page_len_size; 6076 union ctl_modepage_info *modepage_info; 6077 struct ctl_lun *lun; 6078 int *len_left, *len_used; 6079 int retval, i; 6080 6081 ctsio = &io->scsiio; 6082 page_index = NULL; 6083 page_len = 0; 6084 retval = CTL_RETVAL_COMPLETE; 6085 6086 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6087 6088 if (lun->be_lun->lun_type != T_DIRECT) 6089 control_dev = 1; 6090 else 6091 control_dev = 0; 6092 6093 modepage_info = (union ctl_modepage_info *) 6094 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6095 len_left = &modepage_info->header.len_left; 6096 len_used = &modepage_info->header.len_used; 6097 6098 do_next_page: 6099 6100 page_header = (struct scsi_mode_page_header *) 6101 (ctsio->kern_data_ptr + *len_used); 6102 6103 if (*len_left == 0) { 6104 free(ctsio->kern_data_ptr, M_CTL); 6105 ctl_set_success(ctsio); 6106 ctl_done((union ctl_io *)ctsio); 6107 return (CTL_RETVAL_COMPLETE); 6108 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6109 6110 free(ctsio->kern_data_ptr, M_CTL); 6111 ctl_set_param_len_error(ctsio); 6112 ctl_done((union ctl_io *)ctsio); 6113 return (CTL_RETVAL_COMPLETE); 6114 6115 } else if ((page_header->page_code & SMPH_SPF) 6116 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6117 6118 free(ctsio->kern_data_ptr, M_CTL); 6119 ctl_set_param_len_error(ctsio); 6120 ctl_done((union ctl_io *)ctsio); 6121 return (CTL_RETVAL_COMPLETE); 6122 } 6123 6124 6125 /* 6126 * XXX KDM should we do something with the block descriptor? 6127 */ 6128 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6129 6130 if ((control_dev != 0) 6131 && (lun->mode_pages.index[i].page_flags & 6132 CTL_PAGE_FLAG_DISK_ONLY)) 6133 continue; 6134 6135 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6136 (page_header->page_code & SMPH_PC_MASK)) 6137 continue; 6138 6139 /* 6140 * If neither page has a subpage code, then we've got a 6141 * match. 6142 */ 6143 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6144 && ((page_header->page_code & SMPH_SPF) == 0)) { 6145 page_index = &lun->mode_pages.index[i]; 6146 page_len = page_header->page_length; 6147 break; 6148 } 6149 6150 /* 6151 * If both pages have subpages, then the subpage numbers 6152 * have to match. 6153 */ 6154 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6155 && (page_header->page_code & SMPH_SPF)) { 6156 struct scsi_mode_page_header_sp *sph; 6157 6158 sph = (struct scsi_mode_page_header_sp *)page_header; 6159 6160 if (lun->mode_pages.index[i].subpage == 6161 sph->subpage) { 6162 page_index = &lun->mode_pages.index[i]; 6163 page_len = scsi_2btoul(sph->page_length); 6164 break; 6165 } 6166 } 6167 } 6168 6169 /* 6170 * If we couldn't find the page, or if we don't have a mode select 6171 * handler for it, send back an error to the user. 6172 */ 6173 if ((page_index == NULL) 6174 || (page_index->select_handler == NULL)) { 6175 ctl_set_invalid_field(ctsio, 6176 /*sks_valid*/ 1, 6177 /*command*/ 0, 6178 /*field*/ *len_used, 6179 /*bit_valid*/ 0, 6180 /*bit*/ 0); 6181 free(ctsio->kern_data_ptr, M_CTL); 6182 ctl_done((union ctl_io *)ctsio); 6183 return (CTL_RETVAL_COMPLETE); 6184 } 6185 6186 if (page_index->page_code & SMPH_SPF) { 6187 page_len_offset = 2; 6188 page_len_size = 2; 6189 } else { 6190 page_len_size = 1; 6191 page_len_offset = 1; 6192 } 6193 6194 /* 6195 * If the length the initiator gives us isn't the one we specify in 6196 * the mode page header, or if they didn't specify enough data in 6197 * the CDB to avoid truncating this page, kick out the request. 6198 */ 6199 if ((page_len != (page_index->page_len - page_len_offset - 6200 page_len_size)) 6201 || (*len_left < page_index->page_len)) { 6202 6203 6204 ctl_set_invalid_field(ctsio, 6205 /*sks_valid*/ 1, 6206 /*command*/ 0, 6207 /*field*/ *len_used + page_len_offset, 6208 /*bit_valid*/ 0, 6209 /*bit*/ 0); 6210 free(ctsio->kern_data_ptr, M_CTL); 6211 ctl_done((union ctl_io *)ctsio); 6212 return (CTL_RETVAL_COMPLETE); 6213 } 6214 6215 /* 6216 * Run through the mode page, checking to make sure that the bits 6217 * the user changed are actually legal for him to change. 6218 */ 6219 for (i = 0; i < page_index->page_len; i++) { 6220 uint8_t *user_byte, *change_mask, *current_byte; 6221 int bad_bit; 6222 int j; 6223 6224 user_byte = (uint8_t *)page_header + i; 6225 change_mask = page_index->page_data + 6226 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6227 current_byte = page_index->page_data + 6228 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6229 6230 /* 6231 * Check to see whether the user set any bits in this byte 6232 * that he is not allowed to set. 6233 */ 6234 if ((*user_byte & ~(*change_mask)) == 6235 (*current_byte & ~(*change_mask))) 6236 continue; 6237 6238 /* 6239 * Go through bit by bit to determine which one is illegal. 6240 */ 6241 bad_bit = 0; 6242 for (j = 7; j >= 0; j--) { 6243 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6244 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6245 bad_bit = i; 6246 break; 6247 } 6248 } 6249 ctl_set_invalid_field(ctsio, 6250 /*sks_valid*/ 1, 6251 /*command*/ 0, 6252 /*field*/ *len_used + i, 6253 /*bit_valid*/ 1, 6254 /*bit*/ bad_bit); 6255 free(ctsio->kern_data_ptr, M_CTL); 6256 ctl_done((union ctl_io *)ctsio); 6257 return (CTL_RETVAL_COMPLETE); 6258 } 6259 6260 /* 6261 * Decrement these before we call the page handler, since we may 6262 * end up getting called back one way or another before the handler 6263 * returns to this context. 6264 */ 6265 *len_left -= page_index->page_len; 6266 *len_used += page_index->page_len; 6267 6268 retval = page_index->select_handler(ctsio, page_index, 6269 (uint8_t *)page_header); 6270 6271 /* 6272 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6273 * wait until this queued command completes to finish processing 6274 * the mode page. If it returns anything other than 6275 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6276 * already set the sense information, freed the data pointer, and 6277 * completed the io for us. 6278 */ 6279 if (retval != CTL_RETVAL_COMPLETE) 6280 goto bailout_no_done; 6281 6282 /* 6283 * If the initiator sent us more than one page, parse the next one. 6284 */ 6285 if (*len_left > 0) 6286 goto do_next_page; 6287 6288 ctl_set_success(ctsio); 6289 free(ctsio->kern_data_ptr, M_CTL); 6290 ctl_done((union ctl_io *)ctsio); 6291 6292 bailout_no_done: 6293 6294 return (CTL_RETVAL_COMPLETE); 6295 6296 } 6297 6298 int 6299 ctl_mode_select(struct ctl_scsiio *ctsio) 6300 { 6301 int param_len, pf, sp; 6302 int header_size, bd_len; 6303 int len_left, len_used; 6304 struct ctl_page_index *page_index; 6305 struct ctl_lun *lun; 6306 int control_dev, page_len; 6307 union ctl_modepage_info *modepage_info; 6308 int retval; 6309 6310 pf = 0; 6311 sp = 0; 6312 page_len = 0; 6313 len_used = 0; 6314 len_left = 0; 6315 retval = 0; 6316 bd_len = 0; 6317 page_index = NULL; 6318 6319 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6320 6321 if (lun->be_lun->lun_type != T_DIRECT) 6322 control_dev = 1; 6323 else 6324 control_dev = 0; 6325 6326 switch (ctsio->cdb[0]) { 6327 case MODE_SELECT_6: { 6328 struct scsi_mode_select_6 *cdb; 6329 6330 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6331 6332 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6333 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6334 6335 param_len = cdb->length; 6336 header_size = sizeof(struct scsi_mode_header_6); 6337 break; 6338 } 6339 case MODE_SELECT_10: { 6340 struct scsi_mode_select_10 *cdb; 6341 6342 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6343 6344 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6345 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6346 6347 param_len = scsi_2btoul(cdb->length); 6348 header_size = sizeof(struct scsi_mode_header_10); 6349 break; 6350 } 6351 default: 6352 ctl_set_invalid_opcode(ctsio); 6353 ctl_done((union ctl_io *)ctsio); 6354 return (CTL_RETVAL_COMPLETE); 6355 break; /* NOTREACHED */ 6356 } 6357 6358 /* 6359 * From SPC-3: 6360 * "A parameter list length of zero indicates that the Data-Out Buffer 6361 * shall be empty. This condition shall not be considered as an error." 6362 */ 6363 if (param_len == 0) { 6364 ctl_set_success(ctsio); 6365 ctl_done((union ctl_io *)ctsio); 6366 return (CTL_RETVAL_COMPLETE); 6367 } 6368 6369 /* 6370 * Since we'll hit this the first time through, prior to 6371 * allocation, we don't need to free a data buffer here. 6372 */ 6373 if (param_len < header_size) { 6374 ctl_set_param_len_error(ctsio); 6375 ctl_done((union ctl_io *)ctsio); 6376 return (CTL_RETVAL_COMPLETE); 6377 } 6378 6379 /* 6380 * Allocate the data buffer and grab the user's data. In theory, 6381 * we shouldn't have to sanity check the parameter list length here 6382 * because the maximum size is 64K. We should be able to malloc 6383 * that much without too many problems. 6384 */ 6385 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6386 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6387 ctsio->kern_data_len = param_len; 6388 ctsio->kern_total_len = param_len; 6389 ctsio->kern_data_resid = 0; 6390 ctsio->kern_rel_offset = 0; 6391 ctsio->kern_sg_entries = 0; 6392 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6393 ctsio->be_move_done = ctl_config_move_done; 6394 ctl_datamove((union ctl_io *)ctsio); 6395 6396 return (CTL_RETVAL_COMPLETE); 6397 } 6398 6399 switch (ctsio->cdb[0]) { 6400 case MODE_SELECT_6: { 6401 struct scsi_mode_header_6 *mh6; 6402 6403 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6404 bd_len = mh6->blk_desc_len; 6405 break; 6406 } 6407 case MODE_SELECT_10: { 6408 struct scsi_mode_header_10 *mh10; 6409 6410 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6411 bd_len = scsi_2btoul(mh10->blk_desc_len); 6412 break; 6413 } 6414 default: 6415 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6416 break; 6417 } 6418 6419 if (param_len < (header_size + bd_len)) { 6420 free(ctsio->kern_data_ptr, M_CTL); 6421 ctl_set_param_len_error(ctsio); 6422 ctl_done((union ctl_io *)ctsio); 6423 return (CTL_RETVAL_COMPLETE); 6424 } 6425 6426 /* 6427 * Set the IO_CONT flag, so that if this I/O gets passed to 6428 * ctl_config_write_done(), it'll get passed back to 6429 * ctl_do_mode_select() for further processing, or completion if 6430 * we're all done. 6431 */ 6432 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6433 ctsio->io_cont = ctl_do_mode_select; 6434 6435 modepage_info = (union ctl_modepage_info *) 6436 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6437 6438 memset(modepage_info, 0, sizeof(*modepage_info)); 6439 6440 len_left = param_len - header_size - bd_len; 6441 len_used = header_size + bd_len; 6442 6443 modepage_info->header.len_left = len_left; 6444 modepage_info->header.len_used = len_used; 6445 6446 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6447 } 6448 6449 int 6450 ctl_mode_sense(struct ctl_scsiio *ctsio) 6451 { 6452 struct ctl_lun *lun; 6453 int pc, page_code, dbd, llba, subpage; 6454 int alloc_len, page_len, header_len, total_len; 6455 struct scsi_mode_block_descr *block_desc; 6456 struct ctl_page_index *page_index; 6457 int control_dev; 6458 6459 dbd = 0; 6460 llba = 0; 6461 block_desc = NULL; 6462 page_index = NULL; 6463 6464 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6465 6466 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6467 6468 if (lun->be_lun->lun_type != T_DIRECT) 6469 control_dev = 1; 6470 else 6471 control_dev = 0; 6472 6473 switch (ctsio->cdb[0]) { 6474 case MODE_SENSE_6: { 6475 struct scsi_mode_sense_6 *cdb; 6476 6477 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6478 6479 header_len = sizeof(struct scsi_mode_hdr_6); 6480 if (cdb->byte2 & SMS_DBD) 6481 dbd = 1; 6482 else 6483 header_len += sizeof(struct scsi_mode_block_descr); 6484 6485 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6486 page_code = cdb->page & SMS_PAGE_CODE; 6487 subpage = cdb->subpage; 6488 alloc_len = cdb->length; 6489 break; 6490 } 6491 case MODE_SENSE_10: { 6492 struct scsi_mode_sense_10 *cdb; 6493 6494 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6495 6496 header_len = sizeof(struct scsi_mode_hdr_10); 6497 6498 if (cdb->byte2 & SMS_DBD) 6499 dbd = 1; 6500 else 6501 header_len += sizeof(struct scsi_mode_block_descr); 6502 if (cdb->byte2 & SMS10_LLBAA) 6503 llba = 1; 6504 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6505 page_code = cdb->page & SMS_PAGE_CODE; 6506 subpage = cdb->subpage; 6507 alloc_len = scsi_2btoul(cdb->length); 6508 break; 6509 } 6510 default: 6511 ctl_set_invalid_opcode(ctsio); 6512 ctl_done((union ctl_io *)ctsio); 6513 return (CTL_RETVAL_COMPLETE); 6514 break; /* NOTREACHED */ 6515 } 6516 6517 /* 6518 * We have to make a first pass through to calculate the size of 6519 * the pages that match the user's query. Then we allocate enough 6520 * memory to hold it, and actually copy the data into the buffer. 6521 */ 6522 switch (page_code) { 6523 case SMS_ALL_PAGES_PAGE: { 6524 int i; 6525 6526 page_len = 0; 6527 6528 /* 6529 * At the moment, values other than 0 and 0xff here are 6530 * reserved according to SPC-3. 6531 */ 6532 if ((subpage != SMS_SUBPAGE_PAGE_0) 6533 && (subpage != SMS_SUBPAGE_ALL)) { 6534 ctl_set_invalid_field(ctsio, 6535 /*sks_valid*/ 1, 6536 /*command*/ 1, 6537 /*field*/ 3, 6538 /*bit_valid*/ 0, 6539 /*bit*/ 0); 6540 ctl_done((union ctl_io *)ctsio); 6541 return (CTL_RETVAL_COMPLETE); 6542 } 6543 6544 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6545 if ((control_dev != 0) 6546 && (lun->mode_pages.index[i].page_flags & 6547 CTL_PAGE_FLAG_DISK_ONLY)) 6548 continue; 6549 6550 /* 6551 * We don't use this subpage if the user didn't 6552 * request all subpages. 6553 */ 6554 if ((lun->mode_pages.index[i].subpage != 0) 6555 && (subpage == SMS_SUBPAGE_PAGE_0)) 6556 continue; 6557 6558 #if 0 6559 printf("found page %#x len %d\n", 6560 lun->mode_pages.index[i].page_code & 6561 SMPH_PC_MASK, 6562 lun->mode_pages.index[i].page_len); 6563 #endif 6564 page_len += lun->mode_pages.index[i].page_len; 6565 } 6566 break; 6567 } 6568 default: { 6569 int i; 6570 6571 page_len = 0; 6572 6573 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6574 /* Look for the right page code */ 6575 if ((lun->mode_pages.index[i].page_code & 6576 SMPH_PC_MASK) != page_code) 6577 continue; 6578 6579 /* Look for the right subpage or the subpage wildcard*/ 6580 if ((lun->mode_pages.index[i].subpage != subpage) 6581 && (subpage != SMS_SUBPAGE_ALL)) 6582 continue; 6583 6584 /* Make sure the page is supported for this dev type */ 6585 if ((control_dev != 0) 6586 && (lun->mode_pages.index[i].page_flags & 6587 CTL_PAGE_FLAG_DISK_ONLY)) 6588 continue; 6589 6590 #if 0 6591 printf("found page %#x len %d\n", 6592 lun->mode_pages.index[i].page_code & 6593 SMPH_PC_MASK, 6594 lun->mode_pages.index[i].page_len); 6595 #endif 6596 6597 page_len += lun->mode_pages.index[i].page_len; 6598 } 6599 6600 if (page_len == 0) { 6601 ctl_set_invalid_field(ctsio, 6602 /*sks_valid*/ 1, 6603 /*command*/ 1, 6604 /*field*/ 2, 6605 /*bit_valid*/ 1, 6606 /*bit*/ 5); 6607 ctl_done((union ctl_io *)ctsio); 6608 return (CTL_RETVAL_COMPLETE); 6609 } 6610 break; 6611 } 6612 } 6613 6614 total_len = header_len + page_len; 6615 #if 0 6616 printf("header_len = %d, page_len = %d, total_len = %d\n", 6617 header_len, page_len, total_len); 6618 #endif 6619 6620 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6621 ctsio->kern_sg_entries = 0; 6622 ctsio->kern_data_resid = 0; 6623 ctsio->kern_rel_offset = 0; 6624 if (total_len < alloc_len) { 6625 ctsio->residual = alloc_len - total_len; 6626 ctsio->kern_data_len = total_len; 6627 ctsio->kern_total_len = total_len; 6628 } else { 6629 ctsio->residual = 0; 6630 ctsio->kern_data_len = alloc_len; 6631 ctsio->kern_total_len = alloc_len; 6632 } 6633 6634 switch (ctsio->cdb[0]) { 6635 case MODE_SENSE_6: { 6636 struct scsi_mode_hdr_6 *header; 6637 6638 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6639 6640 header->datalen = ctl_min(total_len - 1, 254); 6641 6642 if (dbd) 6643 header->block_descr_len = 0; 6644 else 6645 header->block_descr_len = 6646 sizeof(struct scsi_mode_block_descr); 6647 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6648 break; 6649 } 6650 case MODE_SENSE_10: { 6651 struct scsi_mode_hdr_10 *header; 6652 int datalen; 6653 6654 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6655 6656 datalen = ctl_min(total_len - 2, 65533); 6657 scsi_ulto2b(datalen, header->datalen); 6658 if (dbd) 6659 scsi_ulto2b(0, header->block_descr_len); 6660 else 6661 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6662 header->block_descr_len); 6663 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6664 break; 6665 } 6666 default: 6667 panic("invalid CDB type %#x", ctsio->cdb[0]); 6668 break; /* NOTREACHED */ 6669 } 6670 6671 /* 6672 * If we've got a disk, use its blocksize in the block 6673 * descriptor. Otherwise, just set it to 0. 6674 */ 6675 if (dbd == 0) { 6676 if (control_dev != 0) 6677 scsi_ulto3b(lun->be_lun->blocksize, 6678 block_desc->block_len); 6679 else 6680 scsi_ulto3b(0, block_desc->block_len); 6681 } 6682 6683 switch (page_code) { 6684 case SMS_ALL_PAGES_PAGE: { 6685 int i, data_used; 6686 6687 data_used = header_len; 6688 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6689 struct ctl_page_index *page_index; 6690 6691 page_index = &lun->mode_pages.index[i]; 6692 6693 if ((control_dev != 0) 6694 && (page_index->page_flags & 6695 CTL_PAGE_FLAG_DISK_ONLY)) 6696 continue; 6697 6698 /* 6699 * We don't use this subpage if the user didn't 6700 * request all subpages. We already checked (above) 6701 * to make sure the user only specified a subpage 6702 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6703 */ 6704 if ((page_index->subpage != 0) 6705 && (subpage == SMS_SUBPAGE_PAGE_0)) 6706 continue; 6707 6708 /* 6709 * Call the handler, if it exists, to update the 6710 * page to the latest values. 6711 */ 6712 if (page_index->sense_handler != NULL) 6713 page_index->sense_handler(ctsio, page_index,pc); 6714 6715 memcpy(ctsio->kern_data_ptr + data_used, 6716 page_index->page_data + 6717 (page_index->page_len * pc), 6718 page_index->page_len); 6719 data_used += page_index->page_len; 6720 } 6721 break; 6722 } 6723 default: { 6724 int i, data_used; 6725 6726 data_used = header_len; 6727 6728 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6729 struct ctl_page_index *page_index; 6730 6731 page_index = &lun->mode_pages.index[i]; 6732 6733 /* Look for the right page code */ 6734 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6735 continue; 6736 6737 /* Look for the right subpage or the subpage wildcard*/ 6738 if ((page_index->subpage != subpage) 6739 && (subpage != SMS_SUBPAGE_ALL)) 6740 continue; 6741 6742 /* Make sure the page is supported for this dev type */ 6743 if ((control_dev != 0) 6744 && (page_index->page_flags & 6745 CTL_PAGE_FLAG_DISK_ONLY)) 6746 continue; 6747 6748 /* 6749 * Call the handler, if it exists, to update the 6750 * page to the latest values. 6751 */ 6752 if (page_index->sense_handler != NULL) 6753 page_index->sense_handler(ctsio, page_index,pc); 6754 6755 memcpy(ctsio->kern_data_ptr + data_used, 6756 page_index->page_data + 6757 (page_index->page_len * pc), 6758 page_index->page_len); 6759 data_used += page_index->page_len; 6760 } 6761 break; 6762 } 6763 } 6764 6765 ctsio->scsi_status = SCSI_STATUS_OK; 6766 6767 ctsio->be_move_done = ctl_config_move_done; 6768 ctl_datamove((union ctl_io *)ctsio); 6769 6770 return (CTL_RETVAL_COMPLETE); 6771 } 6772 6773 int 6774 ctl_read_capacity(struct ctl_scsiio *ctsio) 6775 { 6776 struct scsi_read_capacity *cdb; 6777 struct scsi_read_capacity_data *data; 6778 struct ctl_lun *lun; 6779 uint32_t lba; 6780 6781 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6782 6783 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6784 6785 lba = scsi_4btoul(cdb->addr); 6786 if (((cdb->pmi & SRC_PMI) == 0) 6787 && (lba != 0)) { 6788 ctl_set_invalid_field(/*ctsio*/ ctsio, 6789 /*sks_valid*/ 1, 6790 /*command*/ 1, 6791 /*field*/ 2, 6792 /*bit_valid*/ 0, 6793 /*bit*/ 0); 6794 ctl_done((union ctl_io *)ctsio); 6795 return (CTL_RETVAL_COMPLETE); 6796 } 6797 6798 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6799 6800 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6801 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6802 ctsio->residual = 0; 6803 ctsio->kern_data_len = sizeof(*data); 6804 ctsio->kern_total_len = sizeof(*data); 6805 ctsio->kern_data_resid = 0; 6806 ctsio->kern_rel_offset = 0; 6807 ctsio->kern_sg_entries = 0; 6808 6809 /* 6810 * If the maximum LBA is greater than 0xfffffffe, the user must 6811 * issue a SERVICE ACTION IN (16) command, with the read capacity 6812 * serivce action set. 6813 */ 6814 if (lun->be_lun->maxlba > 0xfffffffe) 6815 scsi_ulto4b(0xffffffff, data->addr); 6816 else 6817 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6818 6819 /* 6820 * XXX KDM this may not be 512 bytes... 6821 */ 6822 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6823 6824 ctsio->scsi_status = SCSI_STATUS_OK; 6825 6826 ctsio->be_move_done = ctl_config_move_done; 6827 ctl_datamove((union ctl_io *)ctsio); 6828 6829 return (CTL_RETVAL_COMPLETE); 6830 } 6831 6832 static int 6833 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6834 { 6835 struct scsi_read_capacity_16 *cdb; 6836 struct scsi_read_capacity_data_long *data; 6837 struct ctl_lun *lun; 6838 uint64_t lba; 6839 uint32_t alloc_len; 6840 6841 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6842 6843 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6844 6845 alloc_len = scsi_4btoul(cdb->alloc_len); 6846 lba = scsi_8btou64(cdb->addr); 6847 6848 if ((cdb->reladr & SRC16_PMI) 6849 && (lba != 0)) { 6850 ctl_set_invalid_field(/*ctsio*/ ctsio, 6851 /*sks_valid*/ 1, 6852 /*command*/ 1, 6853 /*field*/ 2, 6854 /*bit_valid*/ 0, 6855 /*bit*/ 0); 6856 ctl_done((union ctl_io *)ctsio); 6857 return (CTL_RETVAL_COMPLETE); 6858 } 6859 6860 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6861 6862 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6863 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6864 6865 if (sizeof(*data) < alloc_len) { 6866 ctsio->residual = alloc_len - sizeof(*data); 6867 ctsio->kern_data_len = sizeof(*data); 6868 ctsio->kern_total_len = sizeof(*data); 6869 } else { 6870 ctsio->residual = 0; 6871 ctsio->kern_data_len = alloc_len; 6872 ctsio->kern_total_len = alloc_len; 6873 } 6874 ctsio->kern_data_resid = 0; 6875 ctsio->kern_rel_offset = 0; 6876 ctsio->kern_sg_entries = 0; 6877 6878 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6879 /* XXX KDM this may not be 512 bytes... */ 6880 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6881 6882 ctsio->scsi_status = SCSI_STATUS_OK; 6883 6884 ctsio->be_move_done = ctl_config_move_done; 6885 ctl_datamove((union ctl_io *)ctsio); 6886 6887 return (CTL_RETVAL_COMPLETE); 6888 } 6889 6890 int 6891 ctl_service_action_in(struct ctl_scsiio *ctsio) 6892 { 6893 struct scsi_service_action_in *cdb; 6894 int retval; 6895 6896 CTL_DEBUG_PRINT(("ctl_service_action_in\n")); 6897 6898 cdb = (struct scsi_service_action_in *)ctsio->cdb; 6899 6900 retval = CTL_RETVAL_COMPLETE; 6901 6902 switch (cdb->service_action) { 6903 case SRC16_SERVICE_ACTION: 6904 retval = ctl_read_capacity_16(ctsio); 6905 break; 6906 default: 6907 ctl_set_invalid_field(/*ctsio*/ ctsio, 6908 /*sks_valid*/ 1, 6909 /*command*/ 1, 6910 /*field*/ 1, 6911 /*bit_valid*/ 1, 6912 /*bit*/ 4); 6913 ctl_done((union ctl_io *)ctsio); 6914 break; 6915 } 6916 6917 return (retval); 6918 } 6919 6920 int 6921 ctl_maintenance_in(struct ctl_scsiio *ctsio) 6922 { 6923 struct scsi_maintenance_in *cdb; 6924 int retval; 6925 int alloc_len, total_len = 0; 6926 int num_target_port_groups, single; 6927 struct ctl_lun *lun; 6928 struct ctl_softc *softc; 6929 struct scsi_target_group_data *rtg_ptr; 6930 struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2; 6931 struct scsi_target_port_descriptor *tp_desc_ptr1_1, *tp_desc_ptr1_2, 6932 *tp_desc_ptr2_1, *tp_desc_ptr2_2; 6933 6934 CTL_DEBUG_PRINT(("ctl_maintenance_in\n")); 6935 6936 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 6937 softc = control_softc; 6938 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6939 6940 retval = CTL_RETVAL_COMPLETE; 6941 6942 if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) { 6943 ctl_set_invalid_field(/*ctsio*/ ctsio, 6944 /*sks_valid*/ 1, 6945 /*command*/ 1, 6946 /*field*/ 1, 6947 /*bit_valid*/ 1, 6948 /*bit*/ 4); 6949 ctl_done((union ctl_io *)ctsio); 6950 return(retval); 6951 } 6952 6953 mtx_lock(&softc->ctl_lock); 6954 single = ctl_is_single; 6955 mtx_unlock(&softc->ctl_lock); 6956 6957 if (single) 6958 num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1; 6959 else 6960 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 6961 6962 total_len = sizeof(struct scsi_target_group_data) + 6963 sizeof(struct scsi_target_port_group_descriptor) * 6964 num_target_port_groups + 6965 sizeof(struct scsi_target_port_descriptor) * 6966 NUM_PORTS_PER_GRP * num_target_port_groups; 6967 6968 alloc_len = scsi_4btoul(cdb->length); 6969 6970 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6971 6972 ctsio->kern_sg_entries = 0; 6973 6974 if (total_len < alloc_len) { 6975 ctsio->residual = alloc_len - total_len; 6976 ctsio->kern_data_len = total_len; 6977 ctsio->kern_total_len = total_len; 6978 } else { 6979 ctsio->residual = 0; 6980 ctsio->kern_data_len = alloc_len; 6981 ctsio->kern_total_len = alloc_len; 6982 } 6983 ctsio->kern_data_resid = 0; 6984 ctsio->kern_rel_offset = 0; 6985 6986 rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr; 6987 6988 tpg_desc_ptr1 = &rtg_ptr->groups[0]; 6989 tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0]; 6990 tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *) 6991 &tp_desc_ptr1_1->desc_list[0]; 6992 6993 if (single == 0) { 6994 tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *) 6995 &tp_desc_ptr1_2->desc_list[0]; 6996 tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0]; 6997 tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *) 6998 &tp_desc_ptr2_1->desc_list[0]; 6999 } else { 7000 tpg_desc_ptr2 = NULL; 7001 tp_desc_ptr2_1 = NULL; 7002 tp_desc_ptr2_2 = NULL; 7003 } 7004 7005 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7006 if (single == 0) { 7007 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 7008 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7009 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7010 tpg_desc_ptr2->pref_state = 7011 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7012 } else { 7013 tpg_desc_ptr1->pref_state = 7014 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7015 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 7016 } 7017 } else { 7018 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7019 tpg_desc_ptr1->pref_state = 7020 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7021 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 7022 } else { 7023 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7024 tpg_desc_ptr2->pref_state = 7025 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7026 } 7027 } 7028 } else { 7029 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7030 } 7031 tpg_desc_ptr1->support = 0; 7032 tpg_desc_ptr1->target_port_group[1] = 1; 7033 tpg_desc_ptr1->status = TPG_IMPLICIT; 7034 tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP; 7035 7036 if (single == 0) { 7037 tpg_desc_ptr2->support = 0; 7038 tpg_desc_ptr2->target_port_group[1] = 2; 7039 tpg_desc_ptr2->status = TPG_IMPLICIT; 7040 tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP; 7041 7042 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 7043 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 7044 7045 tp_desc_ptr2_1->relative_target_port_identifier[1] = 9; 7046 tp_desc_ptr2_2->relative_target_port_identifier[1] = 10; 7047 } else { 7048 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 7049 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 7050 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 7051 } else { 7052 tp_desc_ptr1_1->relative_target_port_identifier[1] = 9; 7053 tp_desc_ptr1_2->relative_target_port_identifier[1] = 10; 7054 } 7055 } 7056 7057 ctsio->be_move_done = ctl_config_move_done; 7058 7059 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7060 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7061 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7062 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7063 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7064 7065 ctl_datamove((union ctl_io *)ctsio); 7066 return(retval); 7067 } 7068 7069 int 7070 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7071 { 7072 struct scsi_per_res_in *cdb; 7073 int alloc_len, total_len = 0; 7074 /* struct scsi_per_res_in_rsrv in_data; */ 7075 struct ctl_lun *lun; 7076 struct ctl_softc *softc; 7077 7078 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7079 7080 softc = control_softc; 7081 7082 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7083 7084 alloc_len = scsi_2btoul(cdb->length); 7085 7086 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7087 7088 retry: 7089 mtx_lock(&softc->ctl_lock); 7090 switch (cdb->action) { 7091 case SPRI_RK: /* read keys */ 7092 total_len = sizeof(struct scsi_per_res_in_keys) + 7093 lun->pr_key_count * 7094 sizeof(struct scsi_per_res_key); 7095 break; 7096 case SPRI_RR: /* read reservation */ 7097 if (lun->flags & CTL_LUN_PR_RESERVED) 7098 total_len = sizeof(struct scsi_per_res_in_rsrv); 7099 else 7100 total_len = sizeof(struct scsi_per_res_in_header); 7101 break; 7102 case SPRI_RC: /* report capabilities */ 7103 total_len = sizeof(struct scsi_per_res_cap); 7104 break; 7105 case SPRI_RS: /* read full status */ 7106 default: 7107 mtx_unlock(&softc->ctl_lock); 7108 ctl_set_invalid_field(ctsio, 7109 /*sks_valid*/ 1, 7110 /*command*/ 1, 7111 /*field*/ 1, 7112 /*bit_valid*/ 1, 7113 /*bit*/ 0); 7114 ctl_done((union ctl_io *)ctsio); 7115 return (CTL_RETVAL_COMPLETE); 7116 break; /* NOTREACHED */ 7117 } 7118 mtx_unlock(&softc->ctl_lock); 7119 7120 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7121 7122 if (total_len < alloc_len) { 7123 ctsio->residual = alloc_len - total_len; 7124 ctsio->kern_data_len = total_len; 7125 ctsio->kern_total_len = total_len; 7126 } else { 7127 ctsio->residual = 0; 7128 ctsio->kern_data_len = alloc_len; 7129 ctsio->kern_total_len = alloc_len; 7130 } 7131 7132 ctsio->kern_data_resid = 0; 7133 ctsio->kern_rel_offset = 0; 7134 ctsio->kern_sg_entries = 0; 7135 7136 mtx_lock(&softc->ctl_lock); 7137 switch (cdb->action) { 7138 case SPRI_RK: { // read keys 7139 struct scsi_per_res_in_keys *res_keys; 7140 int i, key_count; 7141 7142 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7143 7144 /* 7145 * We had to drop the lock to allocate our buffer, which 7146 * leaves time for someone to come in with another 7147 * persistent reservation. (That is unlikely, though, 7148 * since this should be the only persistent reservation 7149 * command active right now.) 7150 */ 7151 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7152 (lun->pr_key_count * 7153 sizeof(struct scsi_per_res_key)))){ 7154 mtx_unlock(&softc->ctl_lock); 7155 free(ctsio->kern_data_ptr, M_CTL); 7156 printf("%s: reservation length changed, retrying\n", 7157 __func__); 7158 goto retry; 7159 } 7160 7161 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7162 7163 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7164 lun->pr_key_count, res_keys->header.length); 7165 7166 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7167 if (!lun->per_res[i].registered) 7168 continue; 7169 7170 /* 7171 * We used lun->pr_key_count to calculate the 7172 * size to allocate. If it turns out the number of 7173 * initiators with the registered flag set is 7174 * larger than that (i.e. they haven't been kept in 7175 * sync), we've got a problem. 7176 */ 7177 if (key_count >= lun->pr_key_count) { 7178 #ifdef NEEDTOPORT 7179 csevent_log(CSC_CTL | CSC_SHELF_SW | 7180 CTL_PR_ERROR, 7181 csevent_LogType_Fault, 7182 csevent_AlertLevel_Yellow, 7183 csevent_FRU_ShelfController, 7184 csevent_FRU_Firmware, 7185 csevent_FRU_Unknown, 7186 "registered keys %d >= key " 7187 "count %d", key_count, 7188 lun->pr_key_count); 7189 #endif 7190 key_count++; 7191 continue; 7192 } 7193 memcpy(res_keys->keys[key_count].key, 7194 lun->per_res[i].res_key.key, 7195 ctl_min(sizeof(res_keys->keys[key_count].key), 7196 sizeof(lun->per_res[i].res_key))); 7197 key_count++; 7198 } 7199 break; 7200 } 7201 case SPRI_RR: { // read reservation 7202 struct scsi_per_res_in_rsrv *res; 7203 int tmp_len, header_only; 7204 7205 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7206 7207 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7208 7209 if (lun->flags & CTL_LUN_PR_RESERVED) 7210 { 7211 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7212 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7213 res->header.length); 7214 header_only = 0; 7215 } else { 7216 tmp_len = sizeof(struct scsi_per_res_in_header); 7217 scsi_ulto4b(0, res->header.length); 7218 header_only = 1; 7219 } 7220 7221 /* 7222 * We had to drop the lock to allocate our buffer, which 7223 * leaves time for someone to come in with another 7224 * persistent reservation. (That is unlikely, though, 7225 * since this should be the only persistent reservation 7226 * command active right now.) 7227 */ 7228 if (tmp_len != total_len) { 7229 mtx_unlock(&softc->ctl_lock); 7230 free(ctsio->kern_data_ptr, M_CTL); 7231 printf("%s: reservation status changed, retrying\n", 7232 __func__); 7233 goto retry; 7234 } 7235 7236 /* 7237 * No reservation held, so we're done. 7238 */ 7239 if (header_only != 0) 7240 break; 7241 7242 /* 7243 * If the registration is an All Registrants type, the key 7244 * is 0, since it doesn't really matter. 7245 */ 7246 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7247 memcpy(res->data.reservation, 7248 &lun->per_res[lun->pr_res_idx].res_key, 7249 sizeof(struct scsi_per_res_key)); 7250 } 7251 res->data.scopetype = lun->res_type; 7252 break; 7253 } 7254 case SPRI_RC: //report capabilities 7255 { 7256 struct scsi_per_res_cap *res_cap; 7257 uint16_t type_mask; 7258 7259 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7260 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7261 res_cap->flags2 |= SPRI_TMV; 7262 type_mask = SPRI_TM_WR_EX_AR | 7263 SPRI_TM_EX_AC_RO | 7264 SPRI_TM_WR_EX_RO | 7265 SPRI_TM_EX_AC | 7266 SPRI_TM_WR_EX | 7267 SPRI_TM_EX_AC_AR; 7268 scsi_ulto2b(type_mask, res_cap->type_mask); 7269 break; 7270 } 7271 case SPRI_RS: //read full status 7272 default: 7273 /* 7274 * This is a bug, because we just checked for this above, 7275 * and should have returned an error. 7276 */ 7277 panic("Invalid PR type %x", cdb->action); 7278 break; /* NOTREACHED */ 7279 } 7280 mtx_unlock(&softc->ctl_lock); 7281 7282 ctsio->be_move_done = ctl_config_move_done; 7283 7284 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7285 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7286 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7287 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7288 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7289 7290 ctl_datamove((union ctl_io *)ctsio); 7291 7292 return (CTL_RETVAL_COMPLETE); 7293 } 7294 7295 /* 7296 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7297 * it should return. 7298 */ 7299 static int 7300 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7301 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7302 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7303 struct scsi_per_res_out_parms* param) 7304 { 7305 union ctl_ha_msg persis_io; 7306 int retval, i; 7307 int isc_retval; 7308 7309 retval = 0; 7310 7311 if (sa_res_key == 0) { 7312 mtx_lock(&softc->ctl_lock); 7313 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7314 /* validate scope and type */ 7315 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7316 SPR_LU_SCOPE) { 7317 mtx_unlock(&softc->ctl_lock); 7318 ctl_set_invalid_field(/*ctsio*/ ctsio, 7319 /*sks_valid*/ 1, 7320 /*command*/ 1, 7321 /*field*/ 2, 7322 /*bit_valid*/ 1, 7323 /*bit*/ 4); 7324 ctl_done((union ctl_io *)ctsio); 7325 return (1); 7326 } 7327 7328 if (type>8 || type==2 || type==4 || type==0) { 7329 mtx_unlock(&softc->ctl_lock); 7330 ctl_set_invalid_field(/*ctsio*/ ctsio, 7331 /*sks_valid*/ 1, 7332 /*command*/ 1, 7333 /*field*/ 2, 7334 /*bit_valid*/ 1, 7335 /*bit*/ 0); 7336 ctl_done((union ctl_io *)ctsio); 7337 return (1); 7338 } 7339 7340 /* temporarily unregister this nexus */ 7341 lun->per_res[residx].registered = 0; 7342 7343 /* 7344 * Unregister everybody else and build UA for 7345 * them 7346 */ 7347 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7348 if (lun->per_res[i].registered == 0) 7349 continue; 7350 7351 if (!persis_offset 7352 && i <CTL_MAX_INITIATORS) 7353 lun->pending_sense[i].ua_pending |= 7354 CTL_UA_REG_PREEMPT; 7355 else if (persis_offset 7356 && i >= persis_offset) 7357 lun->pending_sense[i-persis_offset 7358 ].ua_pending |= 7359 CTL_UA_REG_PREEMPT; 7360 lun->per_res[i].registered = 0; 7361 memset(&lun->per_res[i].res_key, 0, 7362 sizeof(struct scsi_per_res_key)); 7363 } 7364 lun->per_res[residx].registered = 1; 7365 lun->pr_key_count = 1; 7366 lun->res_type = type; 7367 if (lun->res_type != SPR_TYPE_WR_EX_AR 7368 && lun->res_type != SPR_TYPE_EX_AC_AR) 7369 lun->pr_res_idx = residx; 7370 7371 mtx_unlock(&softc->ctl_lock); 7372 /* send msg to other side */ 7373 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7374 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7375 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7376 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7377 persis_io.pr.pr_info.res_type = type; 7378 memcpy(persis_io.pr.pr_info.sa_res_key, 7379 param->serv_act_res_key, 7380 sizeof(param->serv_act_res_key)); 7381 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7382 &persis_io, sizeof(persis_io), 0)) > 7383 CTL_HA_STATUS_SUCCESS) { 7384 printf("CTL:Persis Out error returned " 7385 "from ctl_ha_msg_send %d\n", 7386 isc_retval); 7387 } 7388 } else { 7389 /* not all registrants */ 7390 mtx_unlock(&softc->ctl_lock); 7391 free(ctsio->kern_data_ptr, M_CTL); 7392 ctl_set_invalid_field(ctsio, 7393 /*sks_valid*/ 1, 7394 /*command*/ 0, 7395 /*field*/ 8, 7396 /*bit_valid*/ 0, 7397 /*bit*/ 0); 7398 ctl_done((union ctl_io *)ctsio); 7399 return (1); 7400 } 7401 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7402 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7403 int found = 0; 7404 7405 mtx_lock(&softc->ctl_lock); 7406 if (res_key == sa_res_key) { 7407 /* special case */ 7408 /* 7409 * The spec implies this is not good but doesn't 7410 * say what to do. There are two choices either 7411 * generate a res conflict or check condition 7412 * with illegal field in parameter data. Since 7413 * that is what is done when the sa_res_key is 7414 * zero I'll take that approach since this has 7415 * to do with the sa_res_key. 7416 */ 7417 mtx_unlock(&softc->ctl_lock); 7418 free(ctsio->kern_data_ptr, M_CTL); 7419 ctl_set_invalid_field(ctsio, 7420 /*sks_valid*/ 1, 7421 /*command*/ 0, 7422 /*field*/ 8, 7423 /*bit_valid*/ 0, 7424 /*bit*/ 0); 7425 ctl_done((union ctl_io *)ctsio); 7426 return (1); 7427 } 7428 7429 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7430 if (lun->per_res[i].registered 7431 && memcmp(param->serv_act_res_key, 7432 lun->per_res[i].res_key.key, 7433 sizeof(struct scsi_per_res_key)) != 0) 7434 continue; 7435 7436 found = 1; 7437 lun->per_res[i].registered = 0; 7438 memset(&lun->per_res[i].res_key, 0, 7439 sizeof(struct scsi_per_res_key)); 7440 lun->pr_key_count--; 7441 7442 if (!persis_offset 7443 && i < CTL_MAX_INITIATORS) 7444 lun->pending_sense[i].ua_pending |= 7445 CTL_UA_REG_PREEMPT; 7446 else if (persis_offset 7447 && i >= persis_offset) 7448 lun->pending_sense[i-persis_offset].ua_pending|= 7449 CTL_UA_REG_PREEMPT; 7450 } 7451 mtx_unlock(&softc->ctl_lock); 7452 if (!found) { 7453 free(ctsio->kern_data_ptr, M_CTL); 7454 ctl_set_reservation_conflict(ctsio); 7455 ctl_done((union ctl_io *)ctsio); 7456 return (CTL_RETVAL_COMPLETE); 7457 } 7458 /* send msg to other side */ 7459 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7460 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7461 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7462 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7463 persis_io.pr.pr_info.res_type = type; 7464 memcpy(persis_io.pr.pr_info.sa_res_key, 7465 param->serv_act_res_key, 7466 sizeof(param->serv_act_res_key)); 7467 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7468 &persis_io, sizeof(persis_io), 0)) > 7469 CTL_HA_STATUS_SUCCESS) { 7470 printf("CTL:Persis Out error returned from " 7471 "ctl_ha_msg_send %d\n", isc_retval); 7472 } 7473 } else { 7474 /* Reserved but not all registrants */ 7475 /* sa_res_key is res holder */ 7476 if (memcmp(param->serv_act_res_key, 7477 lun->per_res[lun->pr_res_idx].res_key.key, 7478 sizeof(struct scsi_per_res_key)) == 0) { 7479 /* validate scope and type */ 7480 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7481 SPR_LU_SCOPE) { 7482 ctl_set_invalid_field(/*ctsio*/ ctsio, 7483 /*sks_valid*/ 1, 7484 /*command*/ 1, 7485 /*field*/ 2, 7486 /*bit_valid*/ 1, 7487 /*bit*/ 4); 7488 ctl_done((union ctl_io *)ctsio); 7489 return (1); 7490 } 7491 7492 if (type>8 || type==2 || type==4 || type==0) { 7493 ctl_set_invalid_field(/*ctsio*/ ctsio, 7494 /*sks_valid*/ 1, 7495 /*command*/ 1, 7496 /*field*/ 2, 7497 /*bit_valid*/ 1, 7498 /*bit*/ 0); 7499 ctl_done((union ctl_io *)ctsio); 7500 return (1); 7501 } 7502 7503 /* 7504 * Do the following: 7505 * if sa_res_key != res_key remove all 7506 * registrants w/sa_res_key and generate UA 7507 * for these registrants(Registrations 7508 * Preempted) if it wasn't an exclusive 7509 * reservation generate UA(Reservations 7510 * Preempted) for all other registered nexuses 7511 * if the type has changed. Establish the new 7512 * reservation and holder. If res_key and 7513 * sa_res_key are the same do the above 7514 * except don't unregister the res holder. 7515 */ 7516 7517 /* 7518 * Temporarily unregister so it won't get 7519 * removed or UA generated 7520 */ 7521 lun->per_res[residx].registered = 0; 7522 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7523 if (lun->per_res[i].registered == 0) 7524 continue; 7525 7526 if (memcmp(param->serv_act_res_key, 7527 lun->per_res[i].res_key.key, 7528 sizeof(struct scsi_per_res_key)) == 0) { 7529 lun->per_res[i].registered = 0; 7530 memset(&lun->per_res[i].res_key, 7531 0, 7532 sizeof(struct scsi_per_res_key)); 7533 lun->pr_key_count--; 7534 7535 if (!persis_offset 7536 && i < CTL_MAX_INITIATORS) 7537 lun->pending_sense[i 7538 ].ua_pending |= 7539 CTL_UA_REG_PREEMPT; 7540 else if (persis_offset 7541 && i >= persis_offset) 7542 lun->pending_sense[ 7543 i-persis_offset].ua_pending |= 7544 CTL_UA_REG_PREEMPT; 7545 } else if (type != lun->res_type 7546 && (lun->res_type == SPR_TYPE_WR_EX_RO 7547 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7548 if (!persis_offset 7549 && i < CTL_MAX_INITIATORS) 7550 lun->pending_sense[i 7551 ].ua_pending |= 7552 CTL_UA_RES_RELEASE; 7553 else if (persis_offset 7554 && i >= persis_offset) 7555 lun->pending_sense[ 7556 i-persis_offset 7557 ].ua_pending |= 7558 CTL_UA_RES_RELEASE; 7559 } 7560 } 7561 lun->per_res[residx].registered = 1; 7562 lun->res_type = type; 7563 if (lun->res_type != SPR_TYPE_WR_EX_AR 7564 && lun->res_type != SPR_TYPE_EX_AC_AR) 7565 lun->pr_res_idx = residx; 7566 else 7567 lun->pr_res_idx = 7568 CTL_PR_ALL_REGISTRANTS; 7569 7570 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7571 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7572 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7573 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7574 persis_io.pr.pr_info.res_type = type; 7575 memcpy(persis_io.pr.pr_info.sa_res_key, 7576 param->serv_act_res_key, 7577 sizeof(param->serv_act_res_key)); 7578 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7579 &persis_io, sizeof(persis_io), 0)) > 7580 CTL_HA_STATUS_SUCCESS) { 7581 printf("CTL:Persis Out error returned " 7582 "from ctl_ha_msg_send %d\n", 7583 isc_retval); 7584 } 7585 } else { 7586 /* 7587 * sa_res_key is not the res holder just 7588 * remove registrants 7589 */ 7590 int found=0; 7591 mtx_lock(&softc->ctl_lock); 7592 7593 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7594 if (memcmp(param->serv_act_res_key, 7595 lun->per_res[i].res_key.key, 7596 sizeof(struct scsi_per_res_key)) != 0) 7597 continue; 7598 7599 found = 1; 7600 lun->per_res[i].registered = 0; 7601 memset(&lun->per_res[i].res_key, 0, 7602 sizeof(struct scsi_per_res_key)); 7603 lun->pr_key_count--; 7604 7605 if (!persis_offset 7606 && i < CTL_MAX_INITIATORS) 7607 lun->pending_sense[i].ua_pending |= 7608 CTL_UA_REG_PREEMPT; 7609 else if (persis_offset 7610 && i >= persis_offset) 7611 lun->pending_sense[ 7612 i-persis_offset].ua_pending |= 7613 CTL_UA_REG_PREEMPT; 7614 } 7615 7616 if (!found) { 7617 mtx_unlock(&softc->ctl_lock); 7618 free(ctsio->kern_data_ptr, M_CTL); 7619 ctl_set_reservation_conflict(ctsio); 7620 ctl_done((union ctl_io *)ctsio); 7621 return (1); 7622 } 7623 mtx_unlock(&softc->ctl_lock); 7624 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7625 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7626 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7627 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7628 persis_io.pr.pr_info.res_type = type; 7629 memcpy(persis_io.pr.pr_info.sa_res_key, 7630 param->serv_act_res_key, 7631 sizeof(param->serv_act_res_key)); 7632 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7633 &persis_io, sizeof(persis_io), 0)) > 7634 CTL_HA_STATUS_SUCCESS) { 7635 printf("CTL:Persis Out error returned " 7636 "from ctl_ha_msg_send %d\n", 7637 isc_retval); 7638 } 7639 } 7640 } 7641 7642 lun->PRGeneration++; 7643 7644 return (retval); 7645 } 7646 7647 static void 7648 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7649 { 7650 int i; 7651 7652 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7653 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7654 || memcmp(&lun->per_res[lun->pr_res_idx].res_key, 7655 msg->pr.pr_info.sa_res_key, 7656 sizeof(struct scsi_per_res_key)) != 0) { 7657 uint64_t sa_res_key; 7658 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7659 7660 if (sa_res_key == 0) { 7661 /* temporarily unregister this nexus */ 7662 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7663 7664 /* 7665 * Unregister everybody else and build UA for 7666 * them 7667 */ 7668 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7669 if (lun->per_res[i].registered == 0) 7670 continue; 7671 7672 if (!persis_offset 7673 && i < CTL_MAX_INITIATORS) 7674 lun->pending_sense[i].ua_pending |= 7675 CTL_UA_REG_PREEMPT; 7676 else if (persis_offset && i >= persis_offset) 7677 lun->pending_sense[i - 7678 persis_offset].ua_pending |= 7679 CTL_UA_REG_PREEMPT; 7680 lun->per_res[i].registered = 0; 7681 memset(&lun->per_res[i].res_key, 0, 7682 sizeof(struct scsi_per_res_key)); 7683 } 7684 7685 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7686 lun->pr_key_count = 1; 7687 lun->res_type = msg->pr.pr_info.res_type; 7688 if (lun->res_type != SPR_TYPE_WR_EX_AR 7689 && lun->res_type != SPR_TYPE_EX_AC_AR) 7690 lun->pr_res_idx = msg->pr.pr_info.residx; 7691 } else { 7692 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7693 if (memcmp(msg->pr.pr_info.sa_res_key, 7694 lun->per_res[i].res_key.key, 7695 sizeof(struct scsi_per_res_key)) != 0) 7696 continue; 7697 7698 lun->per_res[i].registered = 0; 7699 memset(&lun->per_res[i].res_key, 0, 7700 sizeof(struct scsi_per_res_key)); 7701 lun->pr_key_count--; 7702 7703 if (!persis_offset 7704 && i < persis_offset) 7705 lun->pending_sense[i].ua_pending |= 7706 CTL_UA_REG_PREEMPT; 7707 else if (persis_offset 7708 && i >= persis_offset) 7709 lun->pending_sense[i - 7710 persis_offset].ua_pending |= 7711 CTL_UA_REG_PREEMPT; 7712 } 7713 } 7714 } else { 7715 /* 7716 * Temporarily unregister so it won't get removed 7717 * or UA generated 7718 */ 7719 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7720 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7721 if (lun->per_res[i].registered == 0) 7722 continue; 7723 7724 if (memcmp(msg->pr.pr_info.sa_res_key, 7725 lun->per_res[i].res_key.key, 7726 sizeof(struct scsi_per_res_key)) == 0) { 7727 lun->per_res[i].registered = 0; 7728 memset(&lun->per_res[i].res_key, 0, 7729 sizeof(struct scsi_per_res_key)); 7730 lun->pr_key_count--; 7731 if (!persis_offset 7732 && i < CTL_MAX_INITIATORS) 7733 lun->pending_sense[i].ua_pending |= 7734 CTL_UA_REG_PREEMPT; 7735 else if (persis_offset 7736 && i >= persis_offset) 7737 lun->pending_sense[i - 7738 persis_offset].ua_pending |= 7739 CTL_UA_REG_PREEMPT; 7740 } else if (msg->pr.pr_info.res_type != lun->res_type 7741 && (lun->res_type == SPR_TYPE_WR_EX_RO 7742 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7743 if (!persis_offset 7744 && i < persis_offset) 7745 lun->pending_sense[i 7746 ].ua_pending |= 7747 CTL_UA_RES_RELEASE; 7748 else if (persis_offset 7749 && i >= persis_offset) 7750 lun->pending_sense[i - 7751 persis_offset].ua_pending |= 7752 CTL_UA_RES_RELEASE; 7753 } 7754 } 7755 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7756 lun->res_type = msg->pr.pr_info.res_type; 7757 if (lun->res_type != SPR_TYPE_WR_EX_AR 7758 && lun->res_type != SPR_TYPE_EX_AC_AR) 7759 lun->pr_res_idx = msg->pr.pr_info.residx; 7760 else 7761 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7762 } 7763 lun->PRGeneration++; 7764 7765 } 7766 7767 7768 int 7769 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7770 { 7771 int retval; 7772 int isc_retval; 7773 u_int32_t param_len; 7774 struct scsi_per_res_out *cdb; 7775 struct ctl_lun *lun; 7776 struct scsi_per_res_out_parms* param; 7777 struct ctl_softc *softc; 7778 uint32_t residx; 7779 uint64_t res_key, sa_res_key; 7780 uint8_t type; 7781 union ctl_ha_msg persis_io; 7782 int i; 7783 7784 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 7785 7786 retval = CTL_RETVAL_COMPLETE; 7787 7788 softc = control_softc; 7789 7790 cdb = (struct scsi_per_res_out *)ctsio->cdb; 7791 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7792 7793 /* 7794 * We only support whole-LUN scope. The scope & type are ignored for 7795 * register, register and ignore existing key and clear. 7796 * We sometimes ignore scope and type on preempts too!! 7797 * Verify reservation type here as well. 7798 */ 7799 type = cdb->scope_type & SPR_TYPE_MASK; 7800 if ((cdb->action == SPRO_RESERVE) 7801 || (cdb->action == SPRO_RELEASE)) { 7802 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 7803 ctl_set_invalid_field(/*ctsio*/ ctsio, 7804 /*sks_valid*/ 1, 7805 /*command*/ 1, 7806 /*field*/ 2, 7807 /*bit_valid*/ 1, 7808 /*bit*/ 4); 7809 ctl_done((union ctl_io *)ctsio); 7810 return (CTL_RETVAL_COMPLETE); 7811 } 7812 7813 if (type>8 || type==2 || type==4 || type==0) { 7814 ctl_set_invalid_field(/*ctsio*/ ctsio, 7815 /*sks_valid*/ 1, 7816 /*command*/ 1, 7817 /*field*/ 2, 7818 /*bit_valid*/ 1, 7819 /*bit*/ 0); 7820 ctl_done((union ctl_io *)ctsio); 7821 return (CTL_RETVAL_COMPLETE); 7822 } 7823 } 7824 7825 switch (cdb->action & SPRO_ACTION_MASK) { 7826 case SPRO_REGISTER: 7827 case SPRO_RESERVE: 7828 case SPRO_RELEASE: 7829 case SPRO_CLEAR: 7830 case SPRO_PREEMPT: 7831 case SPRO_REG_IGNO: 7832 break; 7833 case SPRO_REG_MOVE: 7834 case SPRO_PRE_ABO: 7835 default: 7836 ctl_set_invalid_field(/*ctsio*/ ctsio, 7837 /*sks_valid*/ 1, 7838 /*command*/ 1, 7839 /*field*/ 1, 7840 /*bit_valid*/ 1, 7841 /*bit*/ 0); 7842 ctl_done((union ctl_io *)ctsio); 7843 return (CTL_RETVAL_COMPLETE); 7844 break; /* NOTREACHED */ 7845 } 7846 7847 param_len = scsi_4btoul(cdb->length); 7848 7849 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 7850 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 7851 ctsio->kern_data_len = param_len; 7852 ctsio->kern_total_len = param_len; 7853 ctsio->kern_data_resid = 0; 7854 ctsio->kern_rel_offset = 0; 7855 ctsio->kern_sg_entries = 0; 7856 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7857 ctsio->be_move_done = ctl_config_move_done; 7858 ctl_datamove((union ctl_io *)ctsio); 7859 7860 return (CTL_RETVAL_COMPLETE); 7861 } 7862 7863 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 7864 7865 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 7866 res_key = scsi_8btou64(param->res_key.key); 7867 sa_res_key = scsi_8btou64(param->serv_act_res_key); 7868 7869 /* 7870 * Validate the reservation key here except for SPRO_REG_IGNO 7871 * This must be done for all other service actions 7872 */ 7873 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 7874 mtx_lock(&softc->ctl_lock); 7875 if (lun->per_res[residx].registered) { 7876 if (memcmp(param->res_key.key, 7877 lun->per_res[residx].res_key.key, 7878 ctl_min(sizeof(param->res_key), 7879 sizeof(lun->per_res[residx].res_key))) != 0) { 7880 /* 7881 * The current key passed in doesn't match 7882 * the one the initiator previously 7883 * registered. 7884 */ 7885 mtx_unlock(&softc->ctl_lock); 7886 free(ctsio->kern_data_ptr, M_CTL); 7887 ctl_set_reservation_conflict(ctsio); 7888 ctl_done((union ctl_io *)ctsio); 7889 return (CTL_RETVAL_COMPLETE); 7890 } 7891 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 7892 /* 7893 * We are not registered 7894 */ 7895 mtx_unlock(&softc->ctl_lock); 7896 free(ctsio->kern_data_ptr, M_CTL); 7897 ctl_set_reservation_conflict(ctsio); 7898 ctl_done((union ctl_io *)ctsio); 7899 return (CTL_RETVAL_COMPLETE); 7900 } else if (res_key != 0) { 7901 /* 7902 * We are not registered and trying to register but 7903 * the register key isn't zero. 7904 */ 7905 mtx_unlock(&softc->ctl_lock); 7906 free(ctsio->kern_data_ptr, M_CTL); 7907 ctl_set_reservation_conflict(ctsio); 7908 ctl_done((union ctl_io *)ctsio); 7909 return (CTL_RETVAL_COMPLETE); 7910 } 7911 mtx_unlock(&softc->ctl_lock); 7912 } 7913 7914 switch (cdb->action & SPRO_ACTION_MASK) { 7915 case SPRO_REGISTER: 7916 case SPRO_REG_IGNO: { 7917 7918 #if 0 7919 printf("Registration received\n"); 7920 #endif 7921 7922 /* 7923 * We don't support any of these options, as we report in 7924 * the read capabilities request (see 7925 * ctl_persistent_reserve_in(), above). 7926 */ 7927 if ((param->flags & SPR_SPEC_I_PT) 7928 || (param->flags & SPR_ALL_TG_PT) 7929 || (param->flags & SPR_APTPL)) { 7930 int bit_ptr; 7931 7932 if (param->flags & SPR_APTPL) 7933 bit_ptr = 0; 7934 else if (param->flags & SPR_ALL_TG_PT) 7935 bit_ptr = 2; 7936 else /* SPR_SPEC_I_PT */ 7937 bit_ptr = 3; 7938 7939 free(ctsio->kern_data_ptr, M_CTL); 7940 ctl_set_invalid_field(ctsio, 7941 /*sks_valid*/ 1, 7942 /*command*/ 0, 7943 /*field*/ 20, 7944 /*bit_valid*/ 1, 7945 /*bit*/ bit_ptr); 7946 ctl_done((union ctl_io *)ctsio); 7947 return (CTL_RETVAL_COMPLETE); 7948 } 7949 7950 mtx_lock(&softc->ctl_lock); 7951 7952 /* 7953 * The initiator wants to clear the 7954 * key/unregister. 7955 */ 7956 if (sa_res_key == 0) { 7957 if ((res_key == 0 7958 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 7959 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 7960 && !lun->per_res[residx].registered)) { 7961 mtx_unlock(&softc->ctl_lock); 7962 goto done; 7963 } 7964 7965 lun->per_res[residx].registered = 0; 7966 memset(&lun->per_res[residx].res_key, 7967 0, sizeof(lun->per_res[residx].res_key)); 7968 lun->pr_key_count--; 7969 7970 if (residx == lun->pr_res_idx) { 7971 lun->flags &= ~CTL_LUN_PR_RESERVED; 7972 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 7973 7974 if ((lun->res_type == SPR_TYPE_WR_EX_RO 7975 || lun->res_type == SPR_TYPE_EX_AC_RO) 7976 && lun->pr_key_count) { 7977 /* 7978 * If the reservation is a registrants 7979 * only type we need to generate a UA 7980 * for other registered inits. The 7981 * sense code should be RESERVATIONS 7982 * RELEASED 7983 */ 7984 7985 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 7986 if (lun->per_res[ 7987 i+persis_offset].registered 7988 == 0) 7989 continue; 7990 lun->pending_sense[i 7991 ].ua_pending |= 7992 CTL_UA_RES_RELEASE; 7993 } 7994 } 7995 lun->res_type = 0; 7996 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7997 if (lun->pr_key_count==0) { 7998 lun->flags &= ~CTL_LUN_PR_RESERVED; 7999 lun->res_type = 0; 8000 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8001 } 8002 } 8003 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8004 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8005 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8006 persis_io.pr.pr_info.residx = residx; 8007 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8008 &persis_io, sizeof(persis_io), 0 )) > 8009 CTL_HA_STATUS_SUCCESS) { 8010 printf("CTL:Persis Out error returned from " 8011 "ctl_ha_msg_send %d\n", isc_retval); 8012 } 8013 mtx_unlock(&softc->ctl_lock); 8014 } else /* sa_res_key != 0 */ { 8015 8016 /* 8017 * If we aren't registered currently then increment 8018 * the key count and set the registered flag. 8019 */ 8020 if (!lun->per_res[residx].registered) { 8021 lun->pr_key_count++; 8022 lun->per_res[residx].registered = 1; 8023 } 8024 8025 memcpy(&lun->per_res[residx].res_key, 8026 param->serv_act_res_key, 8027 ctl_min(sizeof(param->serv_act_res_key), 8028 sizeof(lun->per_res[residx].res_key))); 8029 8030 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8031 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8032 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8033 persis_io.pr.pr_info.residx = residx; 8034 memcpy(persis_io.pr.pr_info.sa_res_key, 8035 param->serv_act_res_key, 8036 sizeof(param->serv_act_res_key)); 8037 mtx_unlock(&softc->ctl_lock); 8038 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8039 &persis_io, sizeof(persis_io), 0)) > 8040 CTL_HA_STATUS_SUCCESS) { 8041 printf("CTL:Persis Out error returned from " 8042 "ctl_ha_msg_send %d\n", isc_retval); 8043 } 8044 } 8045 lun->PRGeneration++; 8046 8047 break; 8048 } 8049 case SPRO_RESERVE: 8050 #if 0 8051 printf("Reserve executed type %d\n", type); 8052 #endif 8053 mtx_lock(&softc->ctl_lock); 8054 if (lun->flags & CTL_LUN_PR_RESERVED) { 8055 /* 8056 * if this isn't the reservation holder and it's 8057 * not a "all registrants" type or if the type is 8058 * different then we have a conflict 8059 */ 8060 if ((lun->pr_res_idx != residx 8061 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8062 || lun->res_type != type) { 8063 mtx_unlock(&softc->ctl_lock); 8064 free(ctsio->kern_data_ptr, M_CTL); 8065 ctl_set_reservation_conflict(ctsio); 8066 ctl_done((union ctl_io *)ctsio); 8067 return (CTL_RETVAL_COMPLETE); 8068 } 8069 } else /* create a reservation */ { 8070 /* 8071 * If it's not an "all registrants" type record 8072 * reservation holder 8073 */ 8074 if (type != SPR_TYPE_WR_EX_AR 8075 && type != SPR_TYPE_EX_AC_AR) 8076 lun->pr_res_idx = residx; /* Res holder */ 8077 else 8078 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8079 8080 lun->flags |= CTL_LUN_PR_RESERVED; 8081 lun->res_type = type; 8082 8083 mtx_unlock(&softc->ctl_lock); 8084 8085 /* send msg to other side */ 8086 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8087 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8088 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8089 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8090 persis_io.pr.pr_info.res_type = type; 8091 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8092 &persis_io, sizeof(persis_io), 0)) > 8093 CTL_HA_STATUS_SUCCESS) { 8094 printf("CTL:Persis Out error returned from " 8095 "ctl_ha_msg_send %d\n", isc_retval); 8096 } 8097 } 8098 break; 8099 8100 case SPRO_RELEASE: 8101 mtx_lock(&softc->ctl_lock); 8102 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8103 /* No reservation exists return good status */ 8104 mtx_unlock(&softc->ctl_lock); 8105 goto done; 8106 } 8107 /* 8108 * Is this nexus a reservation holder? 8109 */ 8110 if (lun->pr_res_idx != residx 8111 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8112 /* 8113 * not a res holder return good status but 8114 * do nothing 8115 */ 8116 mtx_unlock(&softc->ctl_lock); 8117 goto done; 8118 } 8119 8120 if (lun->res_type != type) { 8121 mtx_unlock(&softc->ctl_lock); 8122 free(ctsio->kern_data_ptr, M_CTL); 8123 ctl_set_illegal_pr_release(ctsio); 8124 ctl_done((union ctl_io *)ctsio); 8125 return (CTL_RETVAL_COMPLETE); 8126 } 8127 8128 /* okay to release */ 8129 lun->flags &= ~CTL_LUN_PR_RESERVED; 8130 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8131 lun->res_type = 0; 8132 8133 /* 8134 * if this isn't an exclusive access 8135 * res generate UA for all other 8136 * registrants. 8137 */ 8138 if (type != SPR_TYPE_EX_AC 8139 && type != SPR_TYPE_WR_EX) { 8140 /* 8141 * temporarily unregister so we don't generate UA 8142 */ 8143 lun->per_res[residx].registered = 0; 8144 8145 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8146 if (lun->per_res[i+persis_offset].registered 8147 == 0) 8148 continue; 8149 lun->pending_sense[i].ua_pending |= 8150 CTL_UA_RES_RELEASE; 8151 } 8152 8153 lun->per_res[residx].registered = 1; 8154 } 8155 mtx_unlock(&softc->ctl_lock); 8156 /* Send msg to other side */ 8157 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8158 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8159 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8160 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8161 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8162 printf("CTL:Persis Out error returned from " 8163 "ctl_ha_msg_send %d\n", isc_retval); 8164 } 8165 break; 8166 8167 case SPRO_CLEAR: 8168 /* send msg to other side */ 8169 8170 mtx_lock(&softc->ctl_lock); 8171 lun->flags &= ~CTL_LUN_PR_RESERVED; 8172 lun->res_type = 0; 8173 lun->pr_key_count = 0; 8174 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8175 8176 8177 memset(&lun->per_res[residx].res_key, 8178 0, sizeof(lun->per_res[residx].res_key)); 8179 lun->per_res[residx].registered = 0; 8180 8181 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8182 if (lun->per_res[i].registered) { 8183 if (!persis_offset && i < CTL_MAX_INITIATORS) 8184 lun->pending_sense[i].ua_pending |= 8185 CTL_UA_RES_PREEMPT; 8186 else if (persis_offset && i >= persis_offset) 8187 lun->pending_sense[i-persis_offset 8188 ].ua_pending |= CTL_UA_RES_PREEMPT; 8189 8190 memset(&lun->per_res[i].res_key, 8191 0, sizeof(struct scsi_per_res_key)); 8192 lun->per_res[i].registered = 0; 8193 } 8194 lun->PRGeneration++; 8195 mtx_unlock(&softc->ctl_lock); 8196 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8197 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8198 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8199 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8200 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8201 printf("CTL:Persis Out error returned from " 8202 "ctl_ha_msg_send %d\n", isc_retval); 8203 } 8204 break; 8205 8206 case SPRO_PREEMPT: { 8207 int nretval; 8208 8209 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8210 residx, ctsio, cdb, param); 8211 if (nretval != 0) 8212 return (CTL_RETVAL_COMPLETE); 8213 break; 8214 } 8215 case SPRO_REG_MOVE: 8216 case SPRO_PRE_ABO: 8217 default: 8218 free(ctsio->kern_data_ptr, M_CTL); 8219 ctl_set_invalid_field(/*ctsio*/ ctsio, 8220 /*sks_valid*/ 1, 8221 /*command*/ 1, 8222 /*field*/ 1, 8223 /*bit_valid*/ 1, 8224 /*bit*/ 0); 8225 ctl_done((union ctl_io *)ctsio); 8226 return (CTL_RETVAL_COMPLETE); 8227 break; /* NOTREACHED */ 8228 } 8229 8230 done: 8231 free(ctsio->kern_data_ptr, M_CTL); 8232 ctl_set_success(ctsio); 8233 ctl_done((union ctl_io *)ctsio); 8234 8235 return (retval); 8236 } 8237 8238 /* 8239 * This routine is for handling a message from the other SC pertaining to 8240 * persistent reserve out. All the error checking will have been done 8241 * so only perorming the action need be done here to keep the two 8242 * in sync. 8243 */ 8244 static void 8245 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8246 { 8247 struct ctl_lun *lun; 8248 struct ctl_softc *softc; 8249 int i; 8250 8251 softc = control_softc; 8252 8253 mtx_lock(&softc->ctl_lock); 8254 8255 lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]; 8256 switch(msg->pr.pr_info.action) { 8257 case CTL_PR_REG_KEY: 8258 if (!lun->per_res[msg->pr.pr_info.residx].registered) { 8259 lun->per_res[msg->pr.pr_info.residx].registered = 1; 8260 lun->pr_key_count++; 8261 } 8262 lun->PRGeneration++; 8263 memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key, 8264 msg->pr.pr_info.sa_res_key, 8265 sizeof(struct scsi_per_res_key)); 8266 break; 8267 8268 case CTL_PR_UNREG_KEY: 8269 lun->per_res[msg->pr.pr_info.residx].registered = 0; 8270 memset(&lun->per_res[msg->pr.pr_info.residx].res_key, 8271 0, sizeof(struct scsi_per_res_key)); 8272 lun->pr_key_count--; 8273 8274 /* XXX Need to see if the reservation has been released */ 8275 /* if so do we need to generate UA? */ 8276 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8277 lun->flags &= ~CTL_LUN_PR_RESERVED; 8278 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8279 8280 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8281 || lun->res_type == SPR_TYPE_EX_AC_RO) 8282 && lun->pr_key_count) { 8283 /* 8284 * If the reservation is a registrants 8285 * only type we need to generate a UA 8286 * for other registered inits. The 8287 * sense code should be RESERVATIONS 8288 * RELEASED 8289 */ 8290 8291 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8292 if (lun->per_res[i+ 8293 persis_offset].registered == 0) 8294 continue; 8295 8296 lun->pending_sense[i 8297 ].ua_pending |= 8298 CTL_UA_RES_RELEASE; 8299 } 8300 } 8301 lun->res_type = 0; 8302 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8303 if (lun->pr_key_count==0) { 8304 lun->flags &= ~CTL_LUN_PR_RESERVED; 8305 lun->res_type = 0; 8306 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8307 } 8308 } 8309 lun->PRGeneration++; 8310 break; 8311 8312 case CTL_PR_RESERVE: 8313 lun->flags |= CTL_LUN_PR_RESERVED; 8314 lun->res_type = msg->pr.pr_info.res_type; 8315 lun->pr_res_idx = msg->pr.pr_info.residx; 8316 8317 break; 8318 8319 case CTL_PR_RELEASE: 8320 /* 8321 * if this isn't an exclusive access res generate UA for all 8322 * other registrants. 8323 */ 8324 if (lun->res_type != SPR_TYPE_EX_AC 8325 && lun->res_type != SPR_TYPE_WR_EX) { 8326 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8327 if (lun->per_res[i+persis_offset].registered) 8328 lun->pending_sense[i].ua_pending |= 8329 CTL_UA_RES_RELEASE; 8330 } 8331 8332 lun->flags &= ~CTL_LUN_PR_RESERVED; 8333 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8334 lun->res_type = 0; 8335 break; 8336 8337 case CTL_PR_PREEMPT: 8338 ctl_pro_preempt_other(lun, msg); 8339 break; 8340 case CTL_PR_CLEAR: 8341 lun->flags &= ~CTL_LUN_PR_RESERVED; 8342 lun->res_type = 0; 8343 lun->pr_key_count = 0; 8344 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8345 8346 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8347 if (lun->per_res[i].registered == 0) 8348 continue; 8349 if (!persis_offset 8350 && i < CTL_MAX_INITIATORS) 8351 lun->pending_sense[i].ua_pending |= 8352 CTL_UA_RES_PREEMPT; 8353 else if (persis_offset 8354 && i >= persis_offset) 8355 lun->pending_sense[i-persis_offset].ua_pending|= 8356 CTL_UA_RES_PREEMPT; 8357 memset(&lun->per_res[i].res_key, 0, 8358 sizeof(struct scsi_per_res_key)); 8359 lun->per_res[i].registered = 0; 8360 } 8361 lun->PRGeneration++; 8362 break; 8363 } 8364 8365 mtx_unlock(&softc->ctl_lock); 8366 } 8367 8368 int 8369 ctl_read_write(struct ctl_scsiio *ctsio) 8370 { 8371 struct ctl_lun *lun; 8372 struct ctl_lba_len lbalen; 8373 uint64_t lba; 8374 uint32_t num_blocks; 8375 int reladdr, fua, dpo, ebp; 8376 int retval; 8377 int isread; 8378 8379 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8380 8381 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8382 8383 reladdr = 0; 8384 fua = 0; 8385 dpo = 0; 8386 ebp = 0; 8387 8388 retval = CTL_RETVAL_COMPLETE; 8389 8390 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8391 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8392 if (lun->flags & CTL_LUN_PR_RESERVED && isread) { 8393 uint32_t residx; 8394 8395 /* 8396 * XXX KDM need a lock here. 8397 */ 8398 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8399 if ((lun->res_type == SPR_TYPE_EX_AC 8400 && residx != lun->pr_res_idx) 8401 || ((lun->res_type == SPR_TYPE_EX_AC_RO 8402 || lun->res_type == SPR_TYPE_EX_AC_AR) 8403 && !lun->per_res[residx].registered)) { 8404 ctl_set_reservation_conflict(ctsio); 8405 ctl_done((union ctl_io *)ctsio); 8406 return (CTL_RETVAL_COMPLETE); 8407 } 8408 } 8409 8410 switch (ctsio->cdb[0]) { 8411 case READ_6: 8412 case WRITE_6: { 8413 struct scsi_rw_6 *cdb; 8414 8415 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8416 8417 lba = scsi_3btoul(cdb->addr); 8418 /* only 5 bits are valid in the most significant address byte */ 8419 lba &= 0x1fffff; 8420 num_blocks = cdb->length; 8421 /* 8422 * This is correct according to SBC-2. 8423 */ 8424 if (num_blocks == 0) 8425 num_blocks = 256; 8426 break; 8427 } 8428 case READ_10: 8429 case WRITE_10: { 8430 struct scsi_rw_10 *cdb; 8431 8432 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8433 8434 if (cdb->byte2 & SRW10_RELADDR) 8435 reladdr = 1; 8436 if (cdb->byte2 & SRW10_FUA) 8437 fua = 1; 8438 if (cdb->byte2 & SRW10_DPO) 8439 dpo = 1; 8440 8441 if ((cdb->opcode == WRITE_10) 8442 && (cdb->byte2 & SRW10_EBP)) 8443 ebp = 1; 8444 8445 lba = scsi_4btoul(cdb->addr); 8446 num_blocks = scsi_2btoul(cdb->length); 8447 break; 8448 } 8449 case WRITE_VERIFY_10: { 8450 struct scsi_write_verify_10 *cdb; 8451 8452 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8453 8454 /* 8455 * XXX KDM we should do actual write verify support at some 8456 * point. This is obviously fake, we're just translating 8457 * things to a write. So we don't even bother checking the 8458 * BYTCHK field, since we don't do any verification. If 8459 * the user asks for it, we'll just pretend we did it. 8460 */ 8461 if (cdb->byte2 & SWV_DPO) 8462 dpo = 1; 8463 8464 lba = scsi_4btoul(cdb->addr); 8465 num_blocks = scsi_2btoul(cdb->length); 8466 break; 8467 } 8468 case READ_12: 8469 case WRITE_12: { 8470 struct scsi_rw_12 *cdb; 8471 8472 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8473 8474 if (cdb->byte2 & SRW12_RELADDR) 8475 reladdr = 1; 8476 if (cdb->byte2 & SRW12_FUA) 8477 fua = 1; 8478 if (cdb->byte2 & SRW12_DPO) 8479 dpo = 1; 8480 lba = scsi_4btoul(cdb->addr); 8481 num_blocks = scsi_4btoul(cdb->length); 8482 break; 8483 } 8484 case WRITE_VERIFY_12: { 8485 struct scsi_write_verify_12 *cdb; 8486 8487 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8488 8489 if (cdb->byte2 & SWV_DPO) 8490 dpo = 1; 8491 8492 lba = scsi_4btoul(cdb->addr); 8493 num_blocks = scsi_4btoul(cdb->length); 8494 8495 break; 8496 } 8497 case READ_16: 8498 case WRITE_16: { 8499 struct scsi_rw_16 *cdb; 8500 8501 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8502 8503 if (cdb->byte2 & SRW12_RELADDR) 8504 reladdr = 1; 8505 if (cdb->byte2 & SRW12_FUA) 8506 fua = 1; 8507 if (cdb->byte2 & SRW12_DPO) 8508 dpo = 1; 8509 8510 lba = scsi_8btou64(cdb->addr); 8511 num_blocks = scsi_4btoul(cdb->length); 8512 break; 8513 } 8514 case WRITE_VERIFY_16: { 8515 struct scsi_write_verify_16 *cdb; 8516 8517 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8518 8519 if (cdb->byte2 & SWV_DPO) 8520 dpo = 1; 8521 8522 lba = scsi_8btou64(cdb->addr); 8523 num_blocks = scsi_4btoul(cdb->length); 8524 break; 8525 } 8526 default: 8527 /* 8528 * We got a command we don't support. This shouldn't 8529 * happen, commands should be filtered out above us. 8530 */ 8531 ctl_set_invalid_opcode(ctsio); 8532 ctl_done((union ctl_io *)ctsio); 8533 8534 return (CTL_RETVAL_COMPLETE); 8535 break; /* NOTREACHED */ 8536 } 8537 8538 /* 8539 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 8540 * interesting for us, but if RAIDCore is in write-back mode, 8541 * getting it to do write-through for a particular transaction may 8542 * not be possible. 8543 */ 8544 /* 8545 * We don't support relative addressing. That also requires 8546 * supporting linked commands, which we don't do. 8547 */ 8548 if (reladdr != 0) { 8549 ctl_set_invalid_field(ctsio, 8550 /*sks_valid*/ 1, 8551 /*command*/ 1, 8552 /*field*/ 1, 8553 /*bit_valid*/ 1, 8554 /*bit*/ 0); 8555 ctl_done((union ctl_io *)ctsio); 8556 return (CTL_RETVAL_COMPLETE); 8557 } 8558 8559 /* 8560 * The first check is to make sure we're in bounds, the second 8561 * check is to catch wrap-around problems. If the lba + num blocks 8562 * is less than the lba, then we've wrapped around and the block 8563 * range is invalid anyway. 8564 */ 8565 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8566 || ((lba + num_blocks) < lba)) { 8567 ctl_set_lba_out_of_range(ctsio); 8568 ctl_done((union ctl_io *)ctsio); 8569 return (CTL_RETVAL_COMPLETE); 8570 } 8571 8572 /* 8573 * According to SBC-3, a transfer length of 0 is not an error. 8574 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8575 * translates to 256 blocks for those commands. 8576 */ 8577 if (num_blocks == 0) { 8578 ctl_set_success(ctsio); 8579 ctl_done((union ctl_io *)ctsio); 8580 return (CTL_RETVAL_COMPLETE); 8581 } 8582 8583 lbalen.lba = lba; 8584 lbalen.len = num_blocks; 8585 memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen, 8586 sizeof(lbalen)); 8587 8588 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8589 8590 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8591 8592 return (retval); 8593 } 8594 8595 int 8596 ctl_report_luns(struct ctl_scsiio *ctsio) 8597 { 8598 struct scsi_report_luns *cdb; 8599 struct scsi_report_luns_data *lun_data; 8600 struct ctl_lun *lun, *request_lun; 8601 int num_luns, retval; 8602 uint32_t alloc_len, lun_datalen; 8603 int num_filled, well_known; 8604 uint32_t initidx; 8605 8606 retval = CTL_RETVAL_COMPLETE; 8607 well_known = 0; 8608 8609 cdb = (struct scsi_report_luns *)ctsio->cdb; 8610 8611 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8612 8613 mtx_lock(&control_softc->ctl_lock); 8614 num_luns = control_softc->num_luns; 8615 mtx_unlock(&control_softc->ctl_lock); 8616 8617 switch (cdb->select_report) { 8618 case RPL_REPORT_DEFAULT: 8619 case RPL_REPORT_ALL: 8620 break; 8621 case RPL_REPORT_WELLKNOWN: 8622 well_known = 1; 8623 num_luns = 0; 8624 break; 8625 default: 8626 ctl_set_invalid_field(ctsio, 8627 /*sks_valid*/ 1, 8628 /*command*/ 1, 8629 /*field*/ 2, 8630 /*bit_valid*/ 0, 8631 /*bit*/ 0); 8632 ctl_done((union ctl_io *)ctsio); 8633 return (retval); 8634 break; /* NOTREACHED */ 8635 } 8636 8637 alloc_len = scsi_4btoul(cdb->length); 8638 /* 8639 * The initiator has to allocate at least 16 bytes for this request, 8640 * so he can at least get the header and the first LUN. Otherwise 8641 * we reject the request (per SPC-3 rev 14, section 6.21). 8642 */ 8643 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 8644 sizeof(struct scsi_report_luns_lundata))) { 8645 ctl_set_invalid_field(ctsio, 8646 /*sks_valid*/ 1, 8647 /*command*/ 1, 8648 /*field*/ 6, 8649 /*bit_valid*/ 0, 8650 /*bit*/ 0); 8651 ctl_done((union ctl_io *)ctsio); 8652 return (retval); 8653 } 8654 8655 request_lun = (struct ctl_lun *) 8656 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8657 8658 lun_datalen = sizeof(*lun_data) + 8659 (num_luns * sizeof(struct scsi_report_luns_lundata)); 8660 8661 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 8662 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 8663 ctsio->kern_sg_entries = 0; 8664 8665 if (lun_datalen < alloc_len) { 8666 ctsio->residual = alloc_len - lun_datalen; 8667 ctsio->kern_data_len = lun_datalen; 8668 ctsio->kern_total_len = lun_datalen; 8669 } else { 8670 ctsio->residual = 0; 8671 ctsio->kern_data_len = alloc_len; 8672 ctsio->kern_total_len = alloc_len; 8673 } 8674 ctsio->kern_data_resid = 0; 8675 ctsio->kern_rel_offset = 0; 8676 ctsio->kern_sg_entries = 0; 8677 8678 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8679 8680 /* 8681 * We set this to the actual data length, regardless of how much 8682 * space we actually have to return results. If the user looks at 8683 * this value, he'll know whether or not he allocated enough space 8684 * and reissue the command if necessary. We don't support well 8685 * known logical units, so if the user asks for that, return none. 8686 */ 8687 scsi_ulto4b(lun_datalen - 8, lun_data->length); 8688 8689 mtx_lock(&control_softc->ctl_lock); 8690 for (num_filled = 0, lun = STAILQ_FIRST(&control_softc->lun_list); 8691 (lun != NULL) && (num_filled < num_luns); 8692 lun = STAILQ_NEXT(lun, links)) { 8693 8694 if (lun->lun <= 0xff) { 8695 /* 8696 * Peripheral addressing method, bus number 0. 8697 */ 8698 lun_data->luns[num_filled].lundata[0] = 8699 RPL_LUNDATA_ATYP_PERIPH; 8700 lun_data->luns[num_filled].lundata[1] = lun->lun; 8701 num_filled++; 8702 } else if (lun->lun <= 0x3fff) { 8703 /* 8704 * Flat addressing method. 8705 */ 8706 lun_data->luns[num_filled].lundata[0] = 8707 RPL_LUNDATA_ATYP_FLAT | 8708 (lun->lun & RPL_LUNDATA_FLAT_LUN_MASK); 8709 #ifdef OLDCTLHEADERS 8710 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) | 8711 (lun->lun & SRLD_BUS_LUN_MASK); 8712 #endif 8713 lun_data->luns[num_filled].lundata[1] = 8714 #ifdef OLDCTLHEADERS 8715 lun->lun >> SRLD_BUS_LUN_BITS; 8716 #endif 8717 lun->lun >> RPL_LUNDATA_FLAT_LUN_BITS; 8718 num_filled++; 8719 } else { 8720 printf("ctl_report_luns: bogus LUN number %jd, " 8721 "skipping\n", (intmax_t)lun->lun); 8722 } 8723 /* 8724 * According to SPC-3, rev 14 section 6.21: 8725 * 8726 * "The execution of a REPORT LUNS command to any valid and 8727 * installed logical unit shall clear the REPORTED LUNS DATA 8728 * HAS CHANGED unit attention condition for all logical 8729 * units of that target with respect to the requesting 8730 * initiator. A valid and installed logical unit is one 8731 * having a PERIPHERAL QUALIFIER of 000b in the standard 8732 * INQUIRY data (see 6.4.2)." 8733 * 8734 * If request_lun is NULL, the LUN this report luns command 8735 * was issued to is either disabled or doesn't exist. In that 8736 * case, we shouldn't clear any pending lun change unit 8737 * attention. 8738 */ 8739 if (request_lun != NULL) 8740 lun->pending_sense[initidx].ua_pending &= 8741 ~CTL_UA_LUN_CHANGE; 8742 } 8743 mtx_unlock(&control_softc->ctl_lock); 8744 8745 /* 8746 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 8747 * this request. 8748 */ 8749 ctsio->scsi_status = SCSI_STATUS_OK; 8750 8751 ctsio->be_move_done = ctl_config_move_done; 8752 ctl_datamove((union ctl_io *)ctsio); 8753 8754 return (retval); 8755 } 8756 8757 int 8758 ctl_request_sense(struct ctl_scsiio *ctsio) 8759 { 8760 struct scsi_request_sense *cdb; 8761 struct scsi_sense_data *sense_ptr; 8762 struct ctl_lun *lun; 8763 uint32_t initidx; 8764 int have_error; 8765 scsi_sense_data_type sense_format; 8766 8767 cdb = (struct scsi_request_sense *)ctsio->cdb; 8768 8769 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8770 8771 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 8772 8773 /* 8774 * Determine which sense format the user wants. 8775 */ 8776 if (cdb->byte2 & SRS_DESC) 8777 sense_format = SSD_TYPE_DESC; 8778 else 8779 sense_format = SSD_TYPE_FIXED; 8780 8781 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 8782 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 8783 ctsio->kern_sg_entries = 0; 8784 8785 /* 8786 * struct scsi_sense_data, which is currently set to 256 bytes, is 8787 * larger than the largest allowed value for the length field in the 8788 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 8789 */ 8790 ctsio->residual = 0; 8791 ctsio->kern_data_len = cdb->length; 8792 ctsio->kern_total_len = cdb->length; 8793 8794 ctsio->kern_data_resid = 0; 8795 ctsio->kern_rel_offset = 0; 8796 ctsio->kern_sg_entries = 0; 8797 8798 /* 8799 * If we don't have a LUN, we don't have any pending sense. 8800 */ 8801 if (lun == NULL) 8802 goto no_sense; 8803 8804 have_error = 0; 8805 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8806 /* 8807 * Check for pending sense, and then for pending unit attentions. 8808 * Pending sense gets returned first, then pending unit attentions. 8809 */ 8810 mtx_lock(&lun->ctl_softc->ctl_lock); 8811 if (ctl_is_set(lun->have_ca, initidx)) { 8812 scsi_sense_data_type stored_format; 8813 8814 /* 8815 * Check to see which sense format was used for the stored 8816 * sense data. 8817 */ 8818 stored_format = scsi_sense_type( 8819 &lun->pending_sense[initidx].sense); 8820 8821 /* 8822 * If the user requested a different sense format than the 8823 * one we stored, then we need to convert it to the other 8824 * format. If we're going from descriptor to fixed format 8825 * sense data, we may lose things in translation, depending 8826 * on what options were used. 8827 * 8828 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 8829 * for some reason we'll just copy it out as-is. 8830 */ 8831 if ((stored_format == SSD_TYPE_FIXED) 8832 && (sense_format == SSD_TYPE_DESC)) 8833 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 8834 &lun->pending_sense[initidx].sense, 8835 (struct scsi_sense_data_desc *)sense_ptr); 8836 else if ((stored_format == SSD_TYPE_DESC) 8837 && (sense_format == SSD_TYPE_FIXED)) 8838 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 8839 &lun->pending_sense[initidx].sense, 8840 (struct scsi_sense_data_fixed *)sense_ptr); 8841 else 8842 memcpy(sense_ptr, &lun->pending_sense[initidx].sense, 8843 ctl_min(sizeof(*sense_ptr), 8844 sizeof(lun->pending_sense[initidx].sense))); 8845 8846 ctl_clear_mask(lun->have_ca, initidx); 8847 have_error = 1; 8848 } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) { 8849 ctl_ua_type ua_type; 8850 8851 ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending, 8852 sense_ptr, sense_format); 8853 if (ua_type != CTL_UA_NONE) { 8854 have_error = 1; 8855 /* We're reporting this UA, so clear it */ 8856 lun->pending_sense[initidx].ua_pending &= ~ua_type; 8857 } 8858 } 8859 mtx_unlock(&lun->ctl_softc->ctl_lock); 8860 8861 /* 8862 * We already have a pending error, return it. 8863 */ 8864 if (have_error != 0) { 8865 /* 8866 * We report the SCSI status as OK, since the status of the 8867 * request sense command itself is OK. 8868 */ 8869 ctsio->scsi_status = SCSI_STATUS_OK; 8870 8871 /* 8872 * We report 0 for the sense length, because we aren't doing 8873 * autosense in this case. We're reporting sense as 8874 * parameter data. 8875 */ 8876 ctsio->sense_len = 0; 8877 8878 ctsio->be_move_done = ctl_config_move_done; 8879 ctl_datamove((union ctl_io *)ctsio); 8880 8881 return (CTL_RETVAL_COMPLETE); 8882 } 8883 8884 no_sense: 8885 8886 /* 8887 * No sense information to report, so we report that everything is 8888 * okay. 8889 */ 8890 ctl_set_sense_data(sense_ptr, 8891 lun, 8892 sense_format, 8893 /*current_error*/ 1, 8894 /*sense_key*/ SSD_KEY_NO_SENSE, 8895 /*asc*/ 0x00, 8896 /*ascq*/ 0x00, 8897 SSD_ELEM_NONE); 8898 8899 ctsio->scsi_status = SCSI_STATUS_OK; 8900 8901 /* 8902 * We report 0 for the sense length, because we aren't doing 8903 * autosense in this case. We're reporting sense as parameter data. 8904 */ 8905 ctsio->sense_len = 0; 8906 ctsio->be_move_done = ctl_config_move_done; 8907 ctl_datamove((union ctl_io *)ctsio); 8908 8909 return (CTL_RETVAL_COMPLETE); 8910 } 8911 8912 int 8913 ctl_tur(struct ctl_scsiio *ctsio) 8914 { 8915 struct ctl_lun *lun; 8916 8917 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8918 8919 CTL_DEBUG_PRINT(("ctl_tur\n")); 8920 8921 if (lun == NULL) 8922 return (-EINVAL); 8923 8924 ctsio->scsi_status = SCSI_STATUS_OK; 8925 ctsio->io_hdr.status = CTL_SUCCESS; 8926 8927 ctl_done((union ctl_io *)ctsio); 8928 8929 return (CTL_RETVAL_COMPLETE); 8930 } 8931 8932 #ifdef notyet 8933 static int 8934 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 8935 { 8936 8937 } 8938 #endif 8939 8940 static int 8941 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 8942 { 8943 struct scsi_vpd_supported_pages *pages; 8944 int sup_page_size; 8945 struct ctl_lun *lun; 8946 8947 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8948 8949 sup_page_size = sizeof(struct scsi_vpd_supported_pages) + 8950 SCSI_EVPD_NUM_SUPPORTED_PAGES; 8951 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 8952 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 8953 ctsio->kern_sg_entries = 0; 8954 8955 if (sup_page_size < alloc_len) { 8956 ctsio->residual = alloc_len - sup_page_size; 8957 ctsio->kern_data_len = sup_page_size; 8958 ctsio->kern_total_len = sup_page_size; 8959 } else { 8960 ctsio->residual = 0; 8961 ctsio->kern_data_len = alloc_len; 8962 ctsio->kern_total_len = alloc_len; 8963 } 8964 ctsio->kern_data_resid = 0; 8965 ctsio->kern_rel_offset = 0; 8966 ctsio->kern_sg_entries = 0; 8967 8968 /* 8969 * The control device is always connected. The disk device, on the 8970 * other hand, may not be online all the time. Need to change this 8971 * to figure out whether the disk device is actually online or not. 8972 */ 8973 if (lun != NULL) 8974 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 8975 lun->be_lun->lun_type; 8976 else 8977 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 8978 8979 pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES; 8980 /* Supported VPD pages */ 8981 pages->page_list[0] = SVPD_SUPPORTED_PAGES; 8982 /* Serial Number */ 8983 pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER; 8984 /* Device Identification */ 8985 pages->page_list[2] = SVPD_DEVICE_ID; 8986 8987 ctsio->scsi_status = SCSI_STATUS_OK; 8988 8989 ctsio->be_move_done = ctl_config_move_done; 8990 ctl_datamove((union ctl_io *)ctsio); 8991 8992 return (CTL_RETVAL_COMPLETE); 8993 } 8994 8995 static int 8996 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 8997 { 8998 struct scsi_vpd_unit_serial_number *sn_ptr; 8999 struct ctl_lun *lun; 9000 #ifndef CTL_USE_BACKEND_SN 9001 char tmpstr[32]; 9002 #endif 9003 9004 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9005 9006 ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO); 9007 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9008 ctsio->kern_sg_entries = 0; 9009 9010 if (sizeof(*sn_ptr) < alloc_len) { 9011 ctsio->residual = alloc_len - sizeof(*sn_ptr); 9012 ctsio->kern_data_len = sizeof(*sn_ptr); 9013 ctsio->kern_total_len = sizeof(*sn_ptr); 9014 } else { 9015 ctsio->residual = 0; 9016 ctsio->kern_data_len = alloc_len; 9017 ctsio->kern_total_len = alloc_len; 9018 } 9019 ctsio->kern_data_resid = 0; 9020 ctsio->kern_rel_offset = 0; 9021 ctsio->kern_sg_entries = 0; 9022 9023 /* 9024 * The control device is always connected. The disk device, on the 9025 * other hand, may not be online all the time. Need to change this 9026 * to figure out whether the disk device is actually online or not. 9027 */ 9028 if (lun != NULL) 9029 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9030 lun->be_lun->lun_type; 9031 else 9032 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9033 9034 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9035 sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN); 9036 #ifdef CTL_USE_BACKEND_SN 9037 /* 9038 * If we don't have a LUN, we just leave the serial number as 9039 * all spaces. 9040 */ 9041 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9042 if (lun != NULL) { 9043 strncpy((char *)sn_ptr->serial_num, 9044 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9045 } 9046 #else 9047 /* 9048 * Note that we're using a non-unique serial number here, 9049 */ 9050 snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000"); 9051 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9052 strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN, 9053 ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4))); 9054 #endif 9055 ctsio->scsi_status = SCSI_STATUS_OK; 9056 9057 ctsio->be_move_done = ctl_config_move_done; 9058 ctl_datamove((union ctl_io *)ctsio); 9059 9060 return (CTL_RETVAL_COMPLETE); 9061 } 9062 9063 9064 static int 9065 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9066 { 9067 struct scsi_vpd_device_id *devid_ptr; 9068 struct scsi_vpd_id_descriptor *desc, *desc1; 9069 struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */ 9070 struct scsi_vpd_id_t10 *t10id; 9071 struct ctl_softc *ctl_softc; 9072 struct ctl_lun *lun; 9073 struct ctl_frontend *fe; 9074 #ifndef CTL_USE_BACKEND_SN 9075 char tmpstr[32]; 9076 #endif /* CTL_USE_BACKEND_SN */ 9077 int devid_len; 9078 9079 ctl_softc = control_softc; 9080 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9081 9082 devid_len = sizeof(struct scsi_vpd_device_id) + 9083 sizeof(struct scsi_vpd_id_descriptor) + 9084 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN + 9085 sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN + 9086 sizeof(struct scsi_vpd_id_descriptor) + 9087 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9088 sizeof(struct scsi_vpd_id_descriptor) + 9089 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9090 9091 ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO); 9092 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9093 ctsio->kern_sg_entries = 0; 9094 9095 if (devid_len < alloc_len) { 9096 ctsio->residual = alloc_len - devid_len; 9097 ctsio->kern_data_len = devid_len; 9098 ctsio->kern_total_len = devid_len; 9099 } else { 9100 ctsio->residual = 0; 9101 ctsio->kern_data_len = alloc_len; 9102 ctsio->kern_total_len = alloc_len; 9103 } 9104 ctsio->kern_data_resid = 0; 9105 ctsio->kern_rel_offset = 0; 9106 ctsio->kern_sg_entries = 0; 9107 9108 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9109 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 9110 desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9111 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN); 9112 desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] + 9113 CTL_WWPN_LEN); 9114 desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] + 9115 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9116 9117 /* 9118 * The control device is always connected. The disk device, on the 9119 * other hand, may not be online all the time. 9120 */ 9121 if (lun != NULL) 9122 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9123 lun->be_lun->lun_type; 9124 else 9125 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9126 9127 devid_ptr->page_code = SVPD_DEVICE_ID; 9128 9129 scsi_ulto2b(devid_len - 4, devid_ptr->length); 9130 9131 mtx_lock(&ctl_softc->ctl_lock); 9132 9133 fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9134 9135 /* 9136 * For Fibre channel, 9137 */ 9138 if (fe->port_type == CTL_PORT_FC) 9139 { 9140 desc->proto_codeset = (SCSI_PROTO_FC << 4) | 9141 SVPD_ID_CODESET_ASCII; 9142 desc1->proto_codeset = (SCSI_PROTO_FC << 4) | 9143 SVPD_ID_CODESET_BINARY; 9144 } 9145 else 9146 { 9147 desc->proto_codeset = (SCSI_PROTO_SPI << 4) | 9148 SVPD_ID_CODESET_ASCII; 9149 desc1->proto_codeset = (SCSI_PROTO_SPI << 4) | 9150 SVPD_ID_CODESET_BINARY; 9151 } 9152 desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset; 9153 mtx_unlock(&ctl_softc->ctl_lock); 9154 9155 /* 9156 * We're using a LUN association here. i.e., this device ID is a 9157 * per-LUN identifier. 9158 */ 9159 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 9160 desc->length = sizeof(*t10id) + CTL_DEVID_LEN; 9161 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 9162 9163 /* 9164 * desc1 is for the WWPN which is a port asscociation. 9165 */ 9166 desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; 9167 desc1->length = CTL_WWPN_LEN; 9168 /* XXX Call Reggie's get_WWNN func here then add port # to the end */ 9169 /* For testing just create the WWPN */ 9170 #if 0 9171 ddb_GetWWNN((char *)desc1->identifier); 9172 9173 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9174 /* This is so Copancontrol will return something sane */ 9175 if (ctsio->io_hdr.nexus.targ_port!=0 && 9176 ctsio->io_hdr.nexus.targ_port!=8) 9177 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1; 9178 else 9179 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port; 9180 #endif 9181 9182 be64enc(desc1->identifier, fe->wwpn); 9183 9184 /* 9185 * desc2 is for the Relative Target Port(type 4h) identifier 9186 */ 9187 desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9188 | SVPD_ID_TYPE_RELTARG; 9189 desc2->length = 4; 9190 //#if 0 9191 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9192 /* This is so Copancontrol will return something sane */ 9193 if (ctsio->io_hdr.nexus.targ_port!=0 && 9194 ctsio->io_hdr.nexus.targ_port!=8) 9195 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1; 9196 else 9197 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port; 9198 //#endif 9199 9200 /* 9201 * desc3 is for the Target Port Group(type 5h) identifier 9202 */ 9203 desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9204 | SVPD_ID_TYPE_TPORTGRP; 9205 desc3->length = 4; 9206 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single) 9207 desc3->identifier[3] = 1; 9208 else 9209 desc3->identifier[3] = 2; 9210 9211 #ifdef CTL_USE_BACKEND_SN 9212 /* 9213 * If we've actually got a backend, copy the device id from the 9214 * per-LUN data. Otherwise, set it to all spaces. 9215 */ 9216 if (lun != NULL) { 9217 /* 9218 * Copy the backend's LUN ID. 9219 */ 9220 strncpy((char *)t10id->vendor_spec_id, 9221 (char *)lun->be_lun->device_id, CTL_DEVID_LEN); 9222 } else { 9223 /* 9224 * No backend, set this to spaces. 9225 */ 9226 memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN); 9227 } 9228 #else 9229 snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d", 9230 (lun != NULL) ? (int)lun->lun : 0); 9231 strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN, 9232 sizeof(tmpstr))); 9233 #endif 9234 9235 ctsio->scsi_status = SCSI_STATUS_OK; 9236 9237 ctsio->be_move_done = ctl_config_move_done; 9238 ctl_datamove((union ctl_io *)ctsio); 9239 9240 return (CTL_RETVAL_COMPLETE); 9241 } 9242 9243 static int 9244 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9245 { 9246 struct scsi_inquiry *cdb; 9247 int alloc_len, retval; 9248 9249 cdb = (struct scsi_inquiry *)ctsio->cdb; 9250 9251 retval = CTL_RETVAL_COMPLETE; 9252 9253 alloc_len = scsi_2btoul(cdb->length); 9254 9255 switch (cdb->page_code) { 9256 case SVPD_SUPPORTED_PAGES: 9257 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9258 break; 9259 case SVPD_UNIT_SERIAL_NUMBER: 9260 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9261 break; 9262 case SVPD_DEVICE_ID: 9263 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9264 break; 9265 default: 9266 ctl_set_invalid_field(ctsio, 9267 /*sks_valid*/ 1, 9268 /*command*/ 1, 9269 /*field*/ 2, 9270 /*bit_valid*/ 0, 9271 /*bit*/ 0); 9272 ctl_done((union ctl_io *)ctsio); 9273 retval = CTL_RETVAL_COMPLETE; 9274 break; 9275 } 9276 9277 return (retval); 9278 } 9279 9280 static int 9281 ctl_inquiry_std(struct ctl_scsiio *ctsio) 9282 { 9283 struct scsi_inquiry_data *inq_ptr; 9284 struct scsi_inquiry *cdb; 9285 struct ctl_softc *ctl_softc; 9286 struct ctl_lun *lun; 9287 uint32_t alloc_len; 9288 int is_fc; 9289 9290 ctl_softc = control_softc; 9291 9292 /* 9293 * Figure out whether we're talking to a Fibre Channel port or not. 9294 * We treat the ioctl front end, and any SCSI adapters, as packetized 9295 * SCSI front ends. 9296 */ 9297 mtx_lock(&ctl_softc->ctl_lock); 9298 if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type != 9299 CTL_PORT_FC) 9300 is_fc = 0; 9301 else 9302 is_fc = 1; 9303 mtx_unlock(&ctl_softc->ctl_lock); 9304 9305 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9306 cdb = (struct scsi_inquiry *)ctsio->cdb; 9307 alloc_len = scsi_2btoul(cdb->length); 9308 9309 /* 9310 * We malloc the full inquiry data size here and fill it 9311 * in. If the user only asks for less, we'll give him 9312 * that much. 9313 */ 9314 ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO); 9315 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9316 ctsio->kern_sg_entries = 0; 9317 ctsio->kern_data_resid = 0; 9318 ctsio->kern_rel_offset = 0; 9319 9320 if (sizeof(*inq_ptr) < alloc_len) { 9321 ctsio->residual = alloc_len - sizeof(*inq_ptr); 9322 ctsio->kern_data_len = sizeof(*inq_ptr); 9323 ctsio->kern_total_len = sizeof(*inq_ptr); 9324 } else { 9325 ctsio->residual = 0; 9326 ctsio->kern_data_len = alloc_len; 9327 ctsio->kern_total_len = alloc_len; 9328 } 9329 9330 /* 9331 * If we have a LUN configured, report it as connected. Otherwise, 9332 * report that it is offline or no device is supported, depending 9333 * on the value of inquiry_pq_no_lun. 9334 * 9335 * According to the spec (SPC-4 r34), the peripheral qualifier 9336 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9337 * 9338 * "A peripheral device having the specified peripheral device type 9339 * is not connected to this logical unit. However, the device 9340 * server is capable of supporting the specified peripheral device 9341 * type on this logical unit." 9342 * 9343 * According to the same spec, the peripheral qualifier 9344 * SID_QUAL_BAD_LU (011b) is used in this scenario: 9345 * 9346 * "The device server is not capable of supporting a peripheral 9347 * device on this logical unit. For this peripheral qualifier the 9348 * peripheral device type shall be set to 1Fh. All other peripheral 9349 * device type values are reserved for this peripheral qualifier." 9350 * 9351 * Given the text, it would seem that we probably want to report that 9352 * the LUN is offline here. There is no LUN connected, but we can 9353 * support a LUN at the given LUN number. 9354 * 9355 * In the real world, though, it sounds like things are a little 9356 * different: 9357 * 9358 * - Linux, when presented with a LUN with the offline peripheral 9359 * qualifier, will create an sg driver instance for it. So when 9360 * you attach it to CTL, you wind up with a ton of sg driver 9361 * instances. (One for every LUN that Linux bothered to probe.) 9362 * Linux does this despite the fact that it issues a REPORT LUNs 9363 * to LUN 0 to get the inventory of supported LUNs. 9364 * 9365 * - There is other anecdotal evidence (from Emulex folks) about 9366 * arrays that use the offline peripheral qualifier for LUNs that 9367 * are on the "passive" path in an active/passive array. 9368 * 9369 * So the solution is provide a hopefully reasonable default 9370 * (return bad/no LUN) and allow the user to change the behavior 9371 * with a tunable/sysctl variable. 9372 */ 9373 if (lun != NULL) 9374 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9375 lun->be_lun->lun_type; 9376 else if (ctl_softc->inquiry_pq_no_lun == 0) 9377 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9378 else 9379 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9380 9381 /* RMB in byte 2 is 0 */ 9382 inq_ptr->version = SCSI_REV_SPC3; 9383 9384 /* 9385 * According to SAM-3, even if a device only supports a single 9386 * level of LUN addressing, it should still set the HISUP bit: 9387 * 9388 * 4.9.1 Logical unit numbers overview 9389 * 9390 * All logical unit number formats described in this standard are 9391 * hierarchical in structure even when only a single level in that 9392 * hierarchy is used. The HISUP bit shall be set to one in the 9393 * standard INQUIRY data (see SPC-2) when any logical unit number 9394 * format described in this standard is used. Non-hierarchical 9395 * formats are outside the scope of this standard. 9396 * 9397 * Therefore we set the HiSup bit here. 9398 * 9399 * The reponse format is 2, per SPC-3. 9400 */ 9401 inq_ptr->response_format = SID_HiSup | 2; 9402 9403 inq_ptr->additional_length = sizeof(*inq_ptr) - 4; 9404 CTL_DEBUG_PRINT(("additional_length = %d\n", 9405 inq_ptr->additional_length)); 9406 9407 inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT; 9408 /* 16 bit addressing */ 9409 if (is_fc == 0) 9410 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 9411 /* XXX set the SID_MultiP bit here if we're actually going to 9412 respond on multiple ports */ 9413 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 9414 9415 /* 16 bit data bus, synchronous transfers */ 9416 /* XXX these flags don't apply for FC */ 9417 if (is_fc == 0) 9418 inq_ptr->flags = SID_WBus16 | SID_Sync; 9419 /* 9420 * XXX KDM do we want to support tagged queueing on the control 9421 * device at all? 9422 */ 9423 if ((lun == NULL) 9424 || (lun->be_lun->lun_type != T_PROCESSOR)) 9425 inq_ptr->flags |= SID_CmdQue; 9426 /* 9427 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 9428 * We have 8 bytes for the vendor name, and 16 bytes for the device 9429 * name and 4 bytes for the revision. 9430 */ 9431 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 9432 if (lun == NULL) { 9433 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 9434 } else { 9435 switch (lun->be_lun->lun_type) { 9436 case T_DIRECT: 9437 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 9438 break; 9439 case T_PROCESSOR: 9440 strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT); 9441 break; 9442 default: 9443 strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT); 9444 break; 9445 } 9446 } 9447 9448 /* 9449 * XXX make this a macro somewhere so it automatically gets 9450 * incremented when we make changes. 9451 */ 9452 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 9453 9454 /* 9455 * For parallel SCSI, we support double transition and single 9456 * transition clocking. We also support QAS (Quick Arbitration 9457 * and Selection) and Information Unit transfers on both the 9458 * control and array devices. 9459 */ 9460 if (is_fc == 0) 9461 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 9462 SID_SPI_IUS; 9463 9464 /* SAM-3 */ 9465 scsi_ulto2b(0x0060, inq_ptr->version1); 9466 /* SPC-3 (no version claimed) XXX should we claim a version? */ 9467 scsi_ulto2b(0x0300, inq_ptr->version2); 9468 if (is_fc) { 9469 /* FCP-2 ANSI INCITS.350:2003 */ 9470 scsi_ulto2b(0x0917, inq_ptr->version3); 9471 } else { 9472 /* SPI-4 ANSI INCITS.362:200x */ 9473 scsi_ulto2b(0x0B56, inq_ptr->version3); 9474 } 9475 9476 if (lun == NULL) { 9477 /* SBC-2 (no version claimed) XXX should we claim a version? */ 9478 scsi_ulto2b(0x0320, inq_ptr->version4); 9479 } else { 9480 switch (lun->be_lun->lun_type) { 9481 case T_DIRECT: 9482 /* 9483 * SBC-2 (no version claimed) XXX should we claim a 9484 * version? 9485 */ 9486 scsi_ulto2b(0x0320, inq_ptr->version4); 9487 break; 9488 case T_PROCESSOR: 9489 default: 9490 break; 9491 } 9492 } 9493 9494 ctsio->scsi_status = SCSI_STATUS_OK; 9495 if (ctsio->kern_data_len > 0) { 9496 ctsio->be_move_done = ctl_config_move_done; 9497 ctl_datamove((union ctl_io *)ctsio); 9498 } else { 9499 ctsio->io_hdr.status = CTL_SUCCESS; 9500 ctl_done((union ctl_io *)ctsio); 9501 } 9502 9503 return (CTL_RETVAL_COMPLETE); 9504 } 9505 9506 int 9507 ctl_inquiry(struct ctl_scsiio *ctsio) 9508 { 9509 struct scsi_inquiry *cdb; 9510 int retval; 9511 9512 cdb = (struct scsi_inquiry *)ctsio->cdb; 9513 9514 retval = 0; 9515 9516 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 9517 9518 /* 9519 * Right now, we don't support the CmdDt inquiry information. 9520 * This would be nice to support in the future. When we do 9521 * support it, we should change this test so that it checks to make 9522 * sure SI_EVPD and SI_CMDDT aren't both set at the same time. 9523 */ 9524 #ifdef notyet 9525 if (((cdb->byte2 & SI_EVPD) 9526 && (cdb->byte2 & SI_CMDDT))) 9527 #endif 9528 if (cdb->byte2 & SI_CMDDT) { 9529 /* 9530 * Point to the SI_CMDDT bit. We might change this 9531 * when we support SI_CMDDT, but since both bits would be 9532 * "wrong", this should probably just stay as-is then. 9533 */ 9534 ctl_set_invalid_field(ctsio, 9535 /*sks_valid*/ 1, 9536 /*command*/ 1, 9537 /*field*/ 1, 9538 /*bit_valid*/ 1, 9539 /*bit*/ 1); 9540 ctl_done((union ctl_io *)ctsio); 9541 return (CTL_RETVAL_COMPLETE); 9542 } 9543 if (cdb->byte2 & SI_EVPD) 9544 retval = ctl_inquiry_evpd(ctsio); 9545 #ifdef notyet 9546 else if (cdb->byte2 & SI_CMDDT) 9547 retval = ctl_inquiry_cmddt(ctsio); 9548 #endif 9549 else 9550 retval = ctl_inquiry_std(ctsio); 9551 9552 return (retval); 9553 } 9554 9555 /* 9556 * For known CDB types, parse the LBA and length. 9557 */ 9558 static int 9559 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len) 9560 { 9561 if (io->io_hdr.io_type != CTL_IO_SCSI) 9562 return (1); 9563 9564 switch (io->scsiio.cdb[0]) { 9565 case READ_6: 9566 case WRITE_6: { 9567 struct scsi_rw_6 *cdb; 9568 9569 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 9570 9571 *lba = scsi_3btoul(cdb->addr); 9572 /* only 5 bits are valid in the most significant address byte */ 9573 *lba &= 0x1fffff; 9574 *len = cdb->length; 9575 break; 9576 } 9577 case READ_10: 9578 case WRITE_10: { 9579 struct scsi_rw_10 *cdb; 9580 9581 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 9582 9583 *lba = scsi_4btoul(cdb->addr); 9584 *len = scsi_2btoul(cdb->length); 9585 break; 9586 } 9587 case WRITE_VERIFY_10: { 9588 struct scsi_write_verify_10 *cdb; 9589 9590 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 9591 9592 *lba = scsi_4btoul(cdb->addr); 9593 *len = scsi_2btoul(cdb->length); 9594 break; 9595 } 9596 case READ_12: 9597 case WRITE_12: { 9598 struct scsi_rw_12 *cdb; 9599 9600 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 9601 9602 *lba = scsi_4btoul(cdb->addr); 9603 *len = scsi_4btoul(cdb->length); 9604 break; 9605 } 9606 case WRITE_VERIFY_12: { 9607 struct scsi_write_verify_12 *cdb; 9608 9609 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 9610 9611 *lba = scsi_4btoul(cdb->addr); 9612 *len = scsi_4btoul(cdb->length); 9613 break; 9614 } 9615 case READ_16: 9616 case WRITE_16: { 9617 struct scsi_rw_16 *cdb; 9618 9619 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 9620 9621 *lba = scsi_8btou64(cdb->addr); 9622 *len = scsi_4btoul(cdb->length); 9623 break; 9624 } 9625 case WRITE_VERIFY_16: { 9626 struct scsi_write_verify_16 *cdb; 9627 9628 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 9629 9630 9631 *lba = scsi_8btou64(cdb->addr); 9632 *len = scsi_4btoul(cdb->length); 9633 break; 9634 } 9635 default: 9636 return (1); 9637 break; /* NOTREACHED */ 9638 } 9639 9640 return (0); 9641 } 9642 9643 static ctl_action 9644 ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2) 9645 { 9646 uint64_t endlba1, endlba2; 9647 9648 endlba1 = lba1 + len1 - 1; 9649 endlba2 = lba2 + len2 - 1; 9650 9651 if ((endlba1 < lba2) 9652 || (endlba2 < lba1)) 9653 return (CTL_ACTION_PASS); 9654 else 9655 return (CTL_ACTION_BLOCK); 9656 } 9657 9658 static ctl_action 9659 ctl_extent_check(union ctl_io *io1, union ctl_io *io2) 9660 { 9661 uint64_t lba1, lba2; 9662 uint32_t len1, len2; 9663 int retval; 9664 9665 retval = ctl_get_lba_len(io1, &lba1, &len1); 9666 if (retval != 0) 9667 return (CTL_ACTION_ERROR); 9668 9669 retval = ctl_get_lba_len(io2, &lba2, &len2); 9670 if (retval != 0) 9671 return (CTL_ACTION_ERROR); 9672 9673 return (ctl_extent_check_lba(lba1, len1, lba2, len2)); 9674 } 9675 9676 static ctl_action 9677 ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io) 9678 { 9679 struct ctl_cmd_entry *pending_entry, *ooa_entry; 9680 ctl_serialize_action *serialize_row; 9681 9682 /* 9683 * The initiator attempted multiple untagged commands at the same 9684 * time. Can't do that. 9685 */ 9686 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 9687 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 9688 && ((pending_io->io_hdr.nexus.targ_port == 9689 ooa_io->io_hdr.nexus.targ_port) 9690 && (pending_io->io_hdr.nexus.initid.id == 9691 ooa_io->io_hdr.nexus.initid.id)) 9692 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 9693 return (CTL_ACTION_OVERLAP); 9694 9695 /* 9696 * The initiator attempted to send multiple tagged commands with 9697 * the same ID. (It's fine if different initiators have the same 9698 * tag ID.) 9699 * 9700 * Even if all of those conditions are true, we don't kill the I/O 9701 * if the command ahead of us has been aborted. We won't end up 9702 * sending it to the FETD, and it's perfectly legal to resend a 9703 * command with the same tag number as long as the previous 9704 * instance of this tag number has been aborted somehow. 9705 */ 9706 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 9707 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 9708 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 9709 && ((pending_io->io_hdr.nexus.targ_port == 9710 ooa_io->io_hdr.nexus.targ_port) 9711 && (pending_io->io_hdr.nexus.initid.id == 9712 ooa_io->io_hdr.nexus.initid.id)) 9713 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 9714 return (CTL_ACTION_OVERLAP_TAG); 9715 9716 /* 9717 * If we get a head of queue tag, SAM-3 says that we should 9718 * immediately execute it. 9719 * 9720 * What happens if this command would normally block for some other 9721 * reason? e.g. a request sense with a head of queue tag 9722 * immediately after a write. Normally that would block, but this 9723 * will result in its getting executed immediately... 9724 * 9725 * We currently return "pass" instead of "skip", so we'll end up 9726 * going through the rest of the queue to check for overlapped tags. 9727 * 9728 * XXX KDM check for other types of blockage first?? 9729 */ 9730 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 9731 return (CTL_ACTION_PASS); 9732 9733 /* 9734 * Ordered tags have to block until all items ahead of them 9735 * have completed. If we get called with an ordered tag, we always 9736 * block, if something else is ahead of us in the queue. 9737 */ 9738 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 9739 return (CTL_ACTION_BLOCK); 9740 9741 /* 9742 * Simple tags get blocked until all head of queue and ordered tags 9743 * ahead of them have completed. I'm lumping untagged commands in 9744 * with simple tags here. XXX KDM is that the right thing to do? 9745 */ 9746 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 9747 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 9748 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 9749 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 9750 return (CTL_ACTION_BLOCK); 9751 9752 pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]]; 9753 ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]]; 9754 9755 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 9756 9757 switch (serialize_row[pending_entry->seridx]) { 9758 case CTL_SER_BLOCK: 9759 return (CTL_ACTION_BLOCK); 9760 break; /* NOTREACHED */ 9761 case CTL_SER_EXTENT: 9762 return (ctl_extent_check(pending_io, ooa_io)); 9763 break; /* NOTREACHED */ 9764 case CTL_SER_PASS: 9765 return (CTL_ACTION_PASS); 9766 break; /* NOTREACHED */ 9767 case CTL_SER_SKIP: 9768 return (CTL_ACTION_SKIP); 9769 break; 9770 default: 9771 panic("invalid serialization value %d", 9772 serialize_row[pending_entry->seridx]); 9773 break; /* NOTREACHED */ 9774 } 9775 9776 return (CTL_ACTION_ERROR); 9777 } 9778 9779 /* 9780 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 9781 * Assumptions: 9782 * - pending_io is generally either incoming, or on the blocked queue 9783 * - starting I/O is the I/O we want to start the check with. 9784 */ 9785 static ctl_action 9786 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 9787 union ctl_io *starting_io) 9788 { 9789 union ctl_io *ooa_io; 9790 ctl_action action; 9791 9792 mtx_assert(&control_softc->ctl_lock, MA_OWNED); 9793 9794 /* 9795 * Run back along the OOA queue, starting with the current 9796 * blocked I/O and going through every I/O before it on the 9797 * queue. If starting_io is NULL, we'll just end up returning 9798 * CTL_ACTION_PASS. 9799 */ 9800 for (ooa_io = starting_io; ooa_io != NULL; 9801 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 9802 ooa_links)){ 9803 9804 /* 9805 * This routine just checks to see whether 9806 * cur_blocked is blocked by ooa_io, which is ahead 9807 * of it in the queue. It doesn't queue/dequeue 9808 * cur_blocked. 9809 */ 9810 action = ctl_check_for_blockage(pending_io, ooa_io); 9811 switch (action) { 9812 case CTL_ACTION_BLOCK: 9813 case CTL_ACTION_OVERLAP: 9814 case CTL_ACTION_OVERLAP_TAG: 9815 case CTL_ACTION_SKIP: 9816 case CTL_ACTION_ERROR: 9817 return (action); 9818 break; /* NOTREACHED */ 9819 case CTL_ACTION_PASS: 9820 break; 9821 default: 9822 panic("invalid action %d", action); 9823 break; /* NOTREACHED */ 9824 } 9825 } 9826 9827 return (CTL_ACTION_PASS); 9828 } 9829 9830 /* 9831 * Assumptions: 9832 * - An I/O has just completed, and has been removed from the per-LUN OOA 9833 * queue, so some items on the blocked queue may now be unblocked. 9834 */ 9835 static int 9836 ctl_check_blocked(struct ctl_lun *lun) 9837 { 9838 union ctl_io *cur_blocked, *next_blocked; 9839 9840 mtx_assert(&control_softc->ctl_lock, MA_OWNED); 9841 9842 /* 9843 * Run forward from the head of the blocked queue, checking each 9844 * entry against the I/Os prior to it on the OOA queue to see if 9845 * there is still any blockage. 9846 * 9847 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 9848 * with our removing a variable on it while it is traversing the 9849 * list. 9850 */ 9851 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 9852 cur_blocked != NULL; cur_blocked = next_blocked) { 9853 union ctl_io *prev_ooa; 9854 ctl_action action; 9855 9856 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 9857 blocked_links); 9858 9859 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 9860 ctl_ooaq, ooa_links); 9861 9862 /* 9863 * If cur_blocked happens to be the first item in the OOA 9864 * queue now, prev_ooa will be NULL, and the action 9865 * returned will just be CTL_ACTION_PASS. 9866 */ 9867 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 9868 9869 switch (action) { 9870 case CTL_ACTION_BLOCK: 9871 /* Nothing to do here, still blocked */ 9872 break; 9873 case CTL_ACTION_OVERLAP: 9874 case CTL_ACTION_OVERLAP_TAG: 9875 /* 9876 * This shouldn't happen! In theory we've already 9877 * checked this command for overlap... 9878 */ 9879 break; 9880 case CTL_ACTION_PASS: 9881 case CTL_ACTION_SKIP: { 9882 struct ctl_softc *softc; 9883 struct ctl_cmd_entry *entry; 9884 uint32_t initidx; 9885 uint8_t opcode; 9886 int isc_retval; 9887 9888 /* 9889 * The skip case shouldn't happen, this transaction 9890 * should have never made it onto the blocked queue. 9891 */ 9892 /* 9893 * This I/O is no longer blocked, we can remove it 9894 * from the blocked queue. Since this is a TAILQ 9895 * (doubly linked list), we can do O(1) removals 9896 * from any place on the list. 9897 */ 9898 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 9899 blocked_links); 9900 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 9901 9902 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 9903 /* 9904 * Need to send IO back to original side to 9905 * run 9906 */ 9907 union ctl_ha_msg msg_info; 9908 9909 msg_info.hdr.original_sc = 9910 cur_blocked->io_hdr.original_sc; 9911 msg_info.hdr.serializing_sc = cur_blocked; 9912 msg_info.hdr.msg_type = CTL_MSG_R2R; 9913 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 9914 &msg_info, sizeof(msg_info), 0)) > 9915 CTL_HA_STATUS_SUCCESS) { 9916 printf("CTL:Check Blocked error from " 9917 "ctl_ha_msg_send %d\n", 9918 isc_retval); 9919 } 9920 break; 9921 } 9922 opcode = cur_blocked->scsiio.cdb[0]; 9923 entry = &ctl_cmd_table[opcode]; 9924 softc = control_softc; 9925 9926 initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus); 9927 9928 /* 9929 * Check this I/O for LUN state changes that may 9930 * have happened while this command was blocked. 9931 * The LUN state may have been changed by a command 9932 * ahead of us in the queue, so we need to re-check 9933 * for any states that can be caused by SCSI 9934 * commands. 9935 */ 9936 if (ctl_scsiio_lun_check(softc, lun, entry, 9937 &cur_blocked->scsiio) == 0) { 9938 cur_blocked->io_hdr.flags |= 9939 CTL_FLAG_IS_WAS_ON_RTR; 9940 STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue, 9941 &cur_blocked->io_hdr, links); 9942 /* 9943 * In the non CTL_DONE_THREAD case, we need 9944 * to wake up the work thread here. When 9945 * we're processing completed requests from 9946 * the work thread context, we'll pop back 9947 * around and end up pulling things off the 9948 * RtR queue. When we aren't processing 9949 * things from the work thread context, 9950 * though, we won't ever check the RtR queue. 9951 * So we need to wake up the thread to clear 9952 * things off the queue. Otherwise this 9953 * transaction will just sit on the RtR queue 9954 * until a new I/O comes in. (Which may or 9955 * may not happen...) 9956 */ 9957 #ifndef CTL_DONE_THREAD 9958 ctl_wakeup_thread(); 9959 #endif 9960 } else 9961 ctl_done_lock(cur_blocked, /*have_lock*/ 1); 9962 break; 9963 } 9964 default: 9965 /* 9966 * This probably shouldn't happen -- we shouldn't 9967 * get CTL_ACTION_ERROR, or anything else. 9968 */ 9969 break; 9970 } 9971 } 9972 9973 return (CTL_RETVAL_COMPLETE); 9974 } 9975 9976 /* 9977 * This routine (with one exception) checks LUN flags that can be set by 9978 * commands ahead of us in the OOA queue. These flags have to be checked 9979 * when a command initially comes in, and when we pull a command off the 9980 * blocked queue and are preparing to execute it. The reason we have to 9981 * check these flags for commands on the blocked queue is that the LUN 9982 * state may have been changed by a command ahead of us while we're on the 9983 * blocked queue. 9984 * 9985 * Ordering is somewhat important with these checks, so please pay 9986 * careful attention to the placement of any new checks. 9987 */ 9988 static int 9989 ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 9990 struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 9991 { 9992 int retval; 9993 9994 retval = 0; 9995 9996 /* 9997 * If this shelf is a secondary shelf controller, we have to reject 9998 * any media access commands. 9999 */ 10000 #if 0 10001 /* No longer needed for HA */ 10002 if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0) 10003 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) { 10004 ctl_set_lun_standby(ctsio); 10005 retval = 1; 10006 goto bailout; 10007 } 10008 #endif 10009 10010 /* 10011 * Check for a reservation conflict. If this command isn't allowed 10012 * even on reserved LUNs, and if this initiator isn't the one who 10013 * reserved us, reject the command with a reservation conflict. 10014 */ 10015 if ((lun->flags & CTL_LUN_RESERVED) 10016 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10017 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 10018 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 10019 || (ctsio->io_hdr.nexus.targ_target.id != 10020 lun->rsv_nexus.targ_target.id)) { 10021 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 10022 ctsio->io_hdr.status = CTL_SCSI_ERROR; 10023 retval = 1; 10024 goto bailout; 10025 } 10026 } 10027 10028 if ( (lun->flags & CTL_LUN_PR_RESERVED) 10029 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) { 10030 uint32_t residx; 10031 10032 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 10033 /* 10034 * if we aren't registered or it's a res holder type 10035 * reservation and this isn't the res holder then set a 10036 * conflict. 10037 * NOTE: Commands which might be allowed on write exclusive 10038 * type reservations are checked in the particular command 10039 * for a conflict. Read and SSU are the only ones. 10040 */ 10041 if (!lun->per_res[residx].registered 10042 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10043 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 10044 ctsio->io_hdr.status = CTL_SCSI_ERROR; 10045 retval = 1; 10046 goto bailout; 10047 } 10048 10049 } 10050 10051 if ((lun->flags & CTL_LUN_OFFLINE) 10052 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 10053 ctl_set_lun_not_ready(ctsio); 10054 retval = 1; 10055 goto bailout; 10056 } 10057 10058 /* 10059 * If the LUN is stopped, see if this particular command is allowed 10060 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10061 */ 10062 if ((lun->flags & CTL_LUN_STOPPED) 10063 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10064 /* "Logical unit not ready, initializing cmd. required" */ 10065 ctl_set_lun_stopped(ctsio); 10066 retval = 1; 10067 goto bailout; 10068 } 10069 10070 if ((lun->flags & CTL_LUN_INOPERABLE) 10071 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10072 /* "Medium format corrupted" */ 10073 ctl_set_medium_format_corrupted(ctsio); 10074 retval = 1; 10075 goto bailout; 10076 } 10077 10078 bailout: 10079 return (retval); 10080 10081 } 10082 10083 static void 10084 ctl_failover_io(union ctl_io *io, int have_lock) 10085 { 10086 ctl_set_busy(&io->scsiio); 10087 ctl_done_lock(io, have_lock); 10088 } 10089 10090 static void 10091 ctl_failover(void) 10092 { 10093 struct ctl_lun *lun; 10094 struct ctl_softc *ctl_softc; 10095 union ctl_io *next_io, *pending_io; 10096 union ctl_io *io; 10097 int lun_idx; 10098 int i; 10099 10100 ctl_softc = control_softc; 10101 10102 mtx_lock(&ctl_softc->ctl_lock); 10103 /* 10104 * Remove any cmds from the other SC from the rtr queue. These 10105 * will obviously only be for LUNs for which we're the primary. 10106 * We can't send status or get/send data for these commands. 10107 * Since they haven't been executed yet, we can just remove them. 10108 * We'll either abort them or delete them below, depending on 10109 * which HA mode we're in. 10110 */ 10111 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue); 10112 io != NULL; io = next_io) { 10113 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10114 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10115 STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr, 10116 ctl_io_hdr, links); 10117 } 10118 10119 for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) { 10120 lun = ctl_softc->ctl_luns[lun_idx]; 10121 if (lun==NULL) 10122 continue; 10123 10124 /* 10125 * Processor LUNs are primary on both sides. 10126 * XXX will this always be true? 10127 */ 10128 if (lun->be_lun->lun_type == T_PROCESSOR) 10129 continue; 10130 10131 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10132 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10133 printf("FAILOVER: primary lun %d\n", lun_idx); 10134 /* 10135 * Remove all commands from the other SC. First from the 10136 * blocked queue then from the ooa queue. Once we have 10137 * removed them. Call ctl_check_blocked to see if there 10138 * is anything that can run. 10139 */ 10140 for (io = (union ctl_io *)TAILQ_FIRST( 10141 &lun->blocked_queue); io != NULL; io = next_io) { 10142 10143 next_io = (union ctl_io *)TAILQ_NEXT( 10144 &io->io_hdr, blocked_links); 10145 10146 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10147 TAILQ_REMOVE(&lun->blocked_queue, 10148 &io->io_hdr,blocked_links); 10149 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10150 TAILQ_REMOVE(&lun->ooa_queue, 10151 &io->io_hdr, ooa_links); 10152 10153 ctl_free_io_internal(io, 1); 10154 } 10155 } 10156 10157 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10158 io != NULL; io = next_io) { 10159 10160 next_io = (union ctl_io *)TAILQ_NEXT( 10161 &io->io_hdr, ooa_links); 10162 10163 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10164 10165 TAILQ_REMOVE(&lun->ooa_queue, 10166 &io->io_hdr, 10167 ooa_links); 10168 10169 ctl_free_io_internal(io, 1); 10170 } 10171 } 10172 ctl_check_blocked(lun); 10173 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10174 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10175 10176 printf("FAILOVER: primary lun %d\n", lun_idx); 10177 /* 10178 * Abort all commands from the other SC. We can't 10179 * send status back for them now. These should get 10180 * cleaned up when they are completed or come out 10181 * for a datamove operation. 10182 */ 10183 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10184 io != NULL; io = next_io) { 10185 next_io = (union ctl_io *)TAILQ_NEXT( 10186 &io->io_hdr, ooa_links); 10187 10188 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10189 io->io_hdr.flags |= CTL_FLAG_ABORT; 10190 } 10191 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10192 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10193 10194 printf("FAILOVER: secondary lun %d\n", lun_idx); 10195 10196 lun->flags |= CTL_LUN_PRIMARY_SC; 10197 10198 /* 10199 * We send all I/O that was sent to this controller 10200 * and redirected to the other side back with 10201 * busy status, and have the initiator retry it. 10202 * Figuring out how much data has been transferred, 10203 * etc. and picking up where we left off would be 10204 * very tricky. 10205 * 10206 * XXX KDM need to remove I/O from the blocked 10207 * queue as well! 10208 */ 10209 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10210 &lun->ooa_queue); pending_io != NULL; 10211 pending_io = next_io) { 10212 10213 next_io = (union ctl_io *)TAILQ_NEXT( 10214 &pending_io->io_hdr, ooa_links); 10215 10216 pending_io->io_hdr.flags &= 10217 ~CTL_FLAG_SENT_2OTHER_SC; 10218 10219 if (pending_io->io_hdr.flags & 10220 CTL_FLAG_IO_ACTIVE) { 10221 pending_io->io_hdr.flags |= 10222 CTL_FLAG_FAILOVER; 10223 } else { 10224 ctl_set_busy(&pending_io->scsiio); 10225 ctl_done_lock(pending_io, 10226 /*have_lock*/1); 10227 } 10228 } 10229 10230 /* 10231 * Build Unit Attention 10232 */ 10233 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10234 lun->pending_sense[i].ua_pending |= 10235 CTL_UA_ASYM_ACC_CHANGE; 10236 } 10237 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10238 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10239 printf("FAILOVER: secondary lun %d\n", lun_idx); 10240 /* 10241 * if the first io on the OOA is not on the RtR queue 10242 * add it. 10243 */ 10244 lun->flags |= CTL_LUN_PRIMARY_SC; 10245 10246 pending_io = (union ctl_io *)TAILQ_FIRST( 10247 &lun->ooa_queue); 10248 if (pending_io==NULL) { 10249 printf("Nothing on OOA queue\n"); 10250 continue; 10251 } 10252 10253 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10254 if ((pending_io->io_hdr.flags & 10255 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 10256 pending_io->io_hdr.flags |= 10257 CTL_FLAG_IS_WAS_ON_RTR; 10258 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, 10259 &pending_io->io_hdr, links); 10260 } 10261 #if 0 10262 else 10263 { 10264 printf("Tag 0x%04x is running\n", 10265 pending_io->scsiio.tag_num); 10266 } 10267 #endif 10268 10269 next_io = (union ctl_io *)TAILQ_NEXT( 10270 &pending_io->io_hdr, ooa_links); 10271 for (pending_io=next_io; pending_io != NULL; 10272 pending_io = next_io) { 10273 pending_io->io_hdr.flags &= 10274 ~CTL_FLAG_SENT_2OTHER_SC; 10275 next_io = (union ctl_io *)TAILQ_NEXT( 10276 &pending_io->io_hdr, ooa_links); 10277 if (pending_io->io_hdr.flags & 10278 CTL_FLAG_IS_WAS_ON_RTR) { 10279 #if 0 10280 printf("Tag 0x%04x is running\n", 10281 pending_io->scsiio.tag_num); 10282 #endif 10283 continue; 10284 } 10285 10286 switch (ctl_check_ooa(lun, pending_io, 10287 (union ctl_io *)TAILQ_PREV( 10288 &pending_io->io_hdr, ctl_ooaq, 10289 ooa_links))) { 10290 10291 case CTL_ACTION_BLOCK: 10292 TAILQ_INSERT_TAIL(&lun->blocked_queue, 10293 &pending_io->io_hdr, 10294 blocked_links); 10295 pending_io->io_hdr.flags |= 10296 CTL_FLAG_BLOCKED; 10297 break; 10298 case CTL_ACTION_PASS: 10299 case CTL_ACTION_SKIP: 10300 pending_io->io_hdr.flags |= 10301 CTL_FLAG_IS_WAS_ON_RTR; 10302 STAILQ_INSERT_TAIL( 10303 &ctl_softc->rtr_queue, 10304 &pending_io->io_hdr, links); 10305 break; 10306 case CTL_ACTION_OVERLAP: 10307 ctl_set_overlapped_cmd( 10308 (struct ctl_scsiio *)pending_io); 10309 ctl_done_lock(pending_io, 10310 /*have_lock*/ 1); 10311 break; 10312 case CTL_ACTION_OVERLAP_TAG: 10313 ctl_set_overlapped_tag( 10314 (struct ctl_scsiio *)pending_io, 10315 pending_io->scsiio.tag_num & 0xff); 10316 ctl_done_lock(pending_io, 10317 /*have_lock*/ 1); 10318 break; 10319 case CTL_ACTION_ERROR: 10320 default: 10321 ctl_set_internal_failure( 10322 (struct ctl_scsiio *)pending_io, 10323 0, // sks_valid 10324 0); //retry count 10325 ctl_done_lock(pending_io, 10326 /*have_lock*/ 1); 10327 break; 10328 } 10329 } 10330 10331 /* 10332 * Build Unit Attention 10333 */ 10334 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10335 lun->pending_sense[i].ua_pending |= 10336 CTL_UA_ASYM_ACC_CHANGE; 10337 } 10338 } else { 10339 panic("Unhandled HA mode failover, LUN flags = %#x, " 10340 "ha_mode = #%x", lun->flags, ctl_softc->ha_mode); 10341 } 10342 } 10343 ctl_pause_rtr = 0; 10344 mtx_unlock(&ctl_softc->ctl_lock); 10345 } 10346 10347 static int 10348 ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio) 10349 { 10350 struct ctl_lun *lun; 10351 struct ctl_cmd_entry *entry; 10352 uint8_t opcode; 10353 uint32_t initidx; 10354 int retval; 10355 10356 retval = 0; 10357 10358 lun = NULL; 10359 10360 opcode = ctsio->cdb[0]; 10361 10362 mtx_lock(&ctl_softc->ctl_lock); 10363 10364 if ((ctsio->io_hdr.nexus.targ_lun < CTL_MAX_LUNS) 10365 && (ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun] != NULL)) { 10366 lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun]; 10367 /* 10368 * If the LUN is invalid, pretend that it doesn't exist. 10369 * It will go away as soon as all pending I/O has been 10370 * completed. 10371 */ 10372 if (lun->flags & CTL_LUN_DISABLED) { 10373 lun = NULL; 10374 } else { 10375 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 10376 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 10377 lun->be_lun; 10378 if (lun->be_lun->lun_type == T_PROCESSOR) { 10379 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 10380 } 10381 } 10382 } else { 10383 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 10384 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 10385 } 10386 10387 entry = &ctl_cmd_table[opcode]; 10388 10389 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 10390 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 10391 10392 /* 10393 * Check to see whether we can send this command to LUNs that don't 10394 * exist. This should pretty much only be the case for inquiry 10395 * and request sense. Further checks, below, really require having 10396 * a LUN, so we can't really check the command anymore. Just put 10397 * it on the rtr queue. 10398 */ 10399 if (lun == NULL) { 10400 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 10401 goto queue_rtr; 10402 10403 ctl_set_unsupported_lun(ctsio); 10404 mtx_unlock(&ctl_softc->ctl_lock); 10405 ctl_done((union ctl_io *)ctsio); 10406 goto bailout; 10407 } else { 10408 /* 10409 * Every I/O goes into the OOA queue for a particular LUN, and 10410 * stays there until completion. 10411 */ 10412 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 10413 10414 /* 10415 * Make sure we support this particular command on this LUN. 10416 * e.g., we don't support writes to the control LUN. 10417 */ 10418 switch (lun->be_lun->lun_type) { 10419 case T_PROCESSOR: 10420 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 10421 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 10422 == 0)) { 10423 ctl_set_invalid_opcode(ctsio); 10424 mtx_unlock(&ctl_softc->ctl_lock); 10425 ctl_done((union ctl_io *)ctsio); 10426 goto bailout; 10427 } 10428 break; 10429 case T_DIRECT: 10430 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) 10431 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 10432 == 0)){ 10433 ctl_set_invalid_opcode(ctsio); 10434 mtx_unlock(&ctl_softc->ctl_lock); 10435 ctl_done((union ctl_io *)ctsio); 10436 goto bailout; 10437 } 10438 break; 10439 default: 10440 printf("Unsupported CTL LUN type %d\n", 10441 lun->be_lun->lun_type); 10442 panic("Unsupported CTL LUN type %d\n", 10443 lun->be_lun->lun_type); 10444 break; /* NOTREACHED */ 10445 } 10446 } 10447 10448 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 10449 10450 /* 10451 * If we've got a request sense, it'll clear the contingent 10452 * allegiance condition. Otherwise, if we have a CA condition for 10453 * this initiator, clear it, because it sent down a command other 10454 * than request sense. 10455 */ 10456 if ((opcode != REQUEST_SENSE) 10457 && (ctl_is_set(lun->have_ca, initidx))) 10458 ctl_clear_mask(lun->have_ca, initidx); 10459 10460 /* 10461 * If the command has this flag set, it handles its own unit 10462 * attention reporting, we shouldn't do anything. Otherwise we 10463 * check for any pending unit attentions, and send them back to the 10464 * initiator. We only do this when a command initially comes in, 10465 * not when we pull it off the blocked queue. 10466 * 10467 * According to SAM-3, section 5.3.2, the order that things get 10468 * presented back to the host is basically unit attentions caused 10469 * by some sort of reset event, busy status, reservation conflicts 10470 * or task set full, and finally any other status. 10471 * 10472 * One issue here is that some of the unit attentions we report 10473 * don't fall into the "reset" category (e.g. "reported luns data 10474 * has changed"). So reporting it here, before the reservation 10475 * check, may be technically wrong. I guess the only thing to do 10476 * would be to check for and report the reset events here, and then 10477 * check for the other unit attention types after we check for a 10478 * reservation conflict. 10479 * 10480 * XXX KDM need to fix this 10481 */ 10482 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 10483 ctl_ua_type ua_type; 10484 10485 ua_type = lun->pending_sense[initidx].ua_pending; 10486 if (ua_type != CTL_UA_NONE) { 10487 scsi_sense_data_type sense_format; 10488 10489 if (lun != NULL) 10490 sense_format = (lun->flags & 10491 CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC : 10492 SSD_TYPE_FIXED; 10493 else 10494 sense_format = SSD_TYPE_FIXED; 10495 10496 ua_type = ctl_build_ua(ua_type, &ctsio->sense_data, 10497 sense_format); 10498 if (ua_type != CTL_UA_NONE) { 10499 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 10500 ctsio->io_hdr.status = CTL_SCSI_ERROR | 10501 CTL_AUTOSENSE; 10502 ctsio->sense_len = SSD_FULL_SIZE; 10503 lun->pending_sense[initidx].ua_pending &= 10504 ~ua_type; 10505 mtx_unlock(&ctl_softc->ctl_lock); 10506 ctl_done((union ctl_io *)ctsio); 10507 goto bailout; 10508 } 10509 } 10510 } 10511 10512 10513 if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) { 10514 mtx_unlock(&ctl_softc->ctl_lock); 10515 ctl_done((union ctl_io *)ctsio); 10516 goto bailout; 10517 } 10518 10519 /* 10520 * XXX CHD this is where we want to send IO to other side if 10521 * this LUN is secondary on this SC. We will need to make a copy 10522 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 10523 * the copy we send as FROM_OTHER. 10524 * We also need to stuff the address of the original IO so we can 10525 * find it easily. Something similar will need be done on the other 10526 * side so when we are done we can find the copy. 10527 */ 10528 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10529 union ctl_ha_msg msg_info; 10530 int isc_retval; 10531 10532 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 10533 10534 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 10535 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 10536 #if 0 10537 printf("1. ctsio %p\n", ctsio); 10538 #endif 10539 msg_info.hdr.serializing_sc = NULL; 10540 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 10541 msg_info.scsi.tag_num = ctsio->tag_num; 10542 msg_info.scsi.tag_type = ctsio->tag_type; 10543 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 10544 10545 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 10546 10547 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10548 (void *)&msg_info, sizeof(msg_info), 0)) > 10549 CTL_HA_STATUS_SUCCESS) { 10550 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 10551 isc_retval); 10552 printf("CTL:opcode is %x\n",opcode); 10553 } else { 10554 #if 0 10555 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 10556 #endif 10557 } 10558 10559 /* 10560 * XXX KDM this I/O is off the incoming queue, but hasn't 10561 * been inserted on any other queue. We may need to come 10562 * up with a holding queue while we wait for serialization 10563 * so that we have an idea of what we're waiting for from 10564 * the other side. 10565 */ 10566 goto bailout_unlock; 10567 } 10568 10569 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 10570 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 10571 ctl_ooaq, ooa_links))) { 10572 case CTL_ACTION_BLOCK: 10573 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 10574 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 10575 blocked_links); 10576 goto bailout_unlock; 10577 break; /* NOTREACHED */ 10578 case CTL_ACTION_PASS: 10579 case CTL_ACTION_SKIP: 10580 goto queue_rtr; 10581 break; /* NOTREACHED */ 10582 case CTL_ACTION_OVERLAP: 10583 ctl_set_overlapped_cmd(ctsio); 10584 mtx_unlock(&ctl_softc->ctl_lock); 10585 ctl_done((union ctl_io *)ctsio); 10586 goto bailout; 10587 break; /* NOTREACHED */ 10588 case CTL_ACTION_OVERLAP_TAG: 10589 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 10590 mtx_unlock(&ctl_softc->ctl_lock); 10591 ctl_done((union ctl_io *)ctsio); 10592 goto bailout; 10593 break; /* NOTREACHED */ 10594 case CTL_ACTION_ERROR: 10595 default: 10596 ctl_set_internal_failure(ctsio, 10597 /*sks_valid*/ 0, 10598 /*retry_count*/ 0); 10599 mtx_unlock(&ctl_softc->ctl_lock); 10600 ctl_done((union ctl_io *)ctsio); 10601 goto bailout; 10602 break; /* NOTREACHED */ 10603 } 10604 10605 goto bailout_unlock; 10606 10607 queue_rtr: 10608 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 10609 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links); 10610 10611 bailout_unlock: 10612 mtx_unlock(&ctl_softc->ctl_lock); 10613 10614 bailout: 10615 return (retval); 10616 } 10617 10618 static int 10619 ctl_scsiio(struct ctl_scsiio *ctsio) 10620 { 10621 int retval; 10622 struct ctl_cmd_entry *entry; 10623 10624 retval = CTL_RETVAL_COMPLETE; 10625 10626 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 10627 10628 entry = &ctl_cmd_table[ctsio->cdb[0]]; 10629 10630 /* 10631 * If this I/O has been aborted, just send it straight to 10632 * ctl_done() without executing it. 10633 */ 10634 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 10635 ctl_done((union ctl_io *)ctsio); 10636 goto bailout; 10637 } 10638 10639 /* 10640 * All the checks should have been handled by ctl_scsiio_precheck(). 10641 * We should be clear now to just execute the I/O. 10642 */ 10643 retval = entry->execute(ctsio); 10644 10645 bailout: 10646 return (retval); 10647 } 10648 10649 /* 10650 * Since we only implement one target right now, a bus reset simply resets 10651 * our single target. 10652 */ 10653 static int 10654 ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io) 10655 { 10656 return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET)); 10657 } 10658 10659 static int 10660 ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 10661 ctl_ua_type ua_type) 10662 { 10663 struct ctl_lun *lun; 10664 int retval; 10665 10666 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 10667 union ctl_ha_msg msg_info; 10668 10669 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 10670 msg_info.hdr.nexus = io->io_hdr.nexus; 10671 if (ua_type==CTL_UA_TARG_RESET) 10672 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 10673 else 10674 msg_info.task.task_action = CTL_TASK_BUS_RESET; 10675 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 10676 msg_info.hdr.original_sc = NULL; 10677 msg_info.hdr.serializing_sc = NULL; 10678 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10679 (void *)&msg_info, sizeof(msg_info), 0)) { 10680 } 10681 } 10682 retval = 0; 10683 10684 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) 10685 retval += ctl_lun_reset(lun, io, ua_type); 10686 10687 return (retval); 10688 } 10689 10690 /* 10691 * The LUN should always be set. The I/O is optional, and is used to 10692 * distinguish between I/Os sent by this initiator, and by other 10693 * initiators. We set unit attention for initiators other than this one. 10694 * SAM-3 is vague on this point. It does say that a unit attention should 10695 * be established for other initiators when a LUN is reset (see section 10696 * 5.7.3), but it doesn't specifically say that the unit attention should 10697 * be established for this particular initiator when a LUN is reset. Here 10698 * is the relevant text, from SAM-3 rev 8: 10699 * 10700 * 5.7.2 When a SCSI initiator port aborts its own tasks 10701 * 10702 * When a SCSI initiator port causes its own task(s) to be aborted, no 10703 * notification that the task(s) have been aborted shall be returned to 10704 * the SCSI initiator port other than the completion response for the 10705 * command or task management function action that caused the task(s) to 10706 * be aborted and notification(s) associated with related effects of the 10707 * action (e.g., a reset unit attention condition). 10708 * 10709 * XXX KDM for now, we're setting unit attention for all initiators. 10710 */ 10711 static int 10712 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 10713 { 10714 union ctl_io *xio; 10715 #if 0 10716 uint32_t initindex; 10717 #endif 10718 int i; 10719 10720 /* 10721 * Run through the OOA queue and abort each I/O. 10722 */ 10723 #if 0 10724 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 10725 #endif 10726 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 10727 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 10728 xio->io_hdr.flags |= CTL_FLAG_ABORT; 10729 } 10730 10731 /* 10732 * This version sets unit attention for every 10733 */ 10734 #if 0 10735 initindex = ctl_get_initindex(&io->io_hdr.nexus); 10736 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10737 if (initindex == i) 10738 continue; 10739 lun->pending_sense[i].ua_pending |= ua_type; 10740 } 10741 #endif 10742 10743 /* 10744 * A reset (any kind, really) clears reservations established with 10745 * RESERVE/RELEASE. It does not clear reservations established 10746 * with PERSISTENT RESERVE OUT, but we don't support that at the 10747 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 10748 * reservations made with the RESERVE/RELEASE commands, because 10749 * those commands are obsolete in SPC-3. 10750 */ 10751 lun->flags &= ~CTL_LUN_RESERVED; 10752 10753 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10754 ctl_clear_mask(lun->have_ca, i); 10755 lun->pending_sense[i].ua_pending |= ua_type; 10756 } 10757 10758 return (0); 10759 } 10760 10761 static int 10762 ctl_abort_task(union ctl_io *io) 10763 { 10764 union ctl_io *xio; 10765 struct ctl_lun *lun; 10766 struct ctl_softc *ctl_softc; 10767 #if 0 10768 struct sbuf sb; 10769 char printbuf[128]; 10770 #endif 10771 int found; 10772 10773 ctl_softc = control_softc; 10774 found = 0; 10775 10776 /* 10777 * Look up the LUN. 10778 */ 10779 if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS) 10780 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL)) 10781 lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun]; 10782 else 10783 goto bailout; 10784 10785 #if 0 10786 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 10787 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 10788 #endif 10789 10790 /* 10791 * Run through the OOA queue and attempt to find the given I/O. 10792 * The target port, initiator ID, tag type and tag number have to 10793 * match the values that we got from the initiator. If we have an 10794 * untagged command to abort, simply abort the first untagged command 10795 * we come to. We only allow one untagged command at a time of course. 10796 */ 10797 #if 0 10798 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 10799 #endif 10800 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 10801 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 10802 #if 0 10803 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 10804 10805 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 10806 lun->lun, xio->scsiio.tag_num, 10807 xio->scsiio.tag_type, 10808 (xio->io_hdr.blocked_links.tqe_prev 10809 == NULL) ? "" : " BLOCKED", 10810 (xio->io_hdr.flags & 10811 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 10812 (xio->io_hdr.flags & 10813 CTL_FLAG_ABORT) ? " ABORT" : "", 10814 (xio->io_hdr.flags & 10815 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 10816 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 10817 sbuf_finish(&sb); 10818 printf("%s\n", sbuf_data(&sb)); 10819 #endif 10820 10821 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 10822 && (xio->io_hdr.nexus.initid.id == 10823 io->io_hdr.nexus.initid.id)) { 10824 /* 10825 * If the abort says that the task is untagged, the 10826 * task in the queue must be untagged. Otherwise, 10827 * we just check to see whether the tag numbers 10828 * match. This is because the QLogic firmware 10829 * doesn't pass back the tag type in an abort 10830 * request. 10831 */ 10832 #if 0 10833 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 10834 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 10835 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 10836 #endif 10837 /* 10838 * XXX KDM we've got problems with FC, because it 10839 * doesn't send down a tag type with aborts. So we 10840 * can only really go by the tag number... 10841 * This may cause problems with parallel SCSI. 10842 * Need to figure that out!! 10843 */ 10844 if (xio->scsiio.tag_num == io->taskio.tag_num) { 10845 xio->io_hdr.flags |= CTL_FLAG_ABORT; 10846 found = 1; 10847 if ((io->io_hdr.flags & 10848 CTL_FLAG_FROM_OTHER_SC) == 0 && 10849 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 10850 union ctl_ha_msg msg_info; 10851 10852 io->io_hdr.flags |= 10853 CTL_FLAG_SENT_2OTHER_SC; 10854 msg_info.hdr.nexus = io->io_hdr.nexus; 10855 msg_info.task.task_action = 10856 CTL_TASK_ABORT_TASK; 10857 msg_info.task.tag_num = 10858 io->taskio.tag_num; 10859 msg_info.task.tag_type = 10860 io->taskio.tag_type; 10861 msg_info.hdr.msg_type = 10862 CTL_MSG_MANAGE_TASKS; 10863 msg_info.hdr.original_sc = NULL; 10864 msg_info.hdr.serializing_sc = NULL; 10865 #if 0 10866 printf("Sent Abort to other side\n"); 10867 #endif 10868 if (CTL_HA_STATUS_SUCCESS != 10869 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10870 (void *)&msg_info, 10871 sizeof(msg_info), 0)) { 10872 } 10873 } 10874 #if 0 10875 printf("ctl_abort_task: found I/O to abort\n"); 10876 #endif 10877 break; 10878 } 10879 } 10880 } 10881 10882 bailout: 10883 10884 if (found == 0) { 10885 /* 10886 * This isn't really an error. It's entirely possible for 10887 * the abort and command completion to cross on the wire. 10888 * This is more of an informative/diagnostic error. 10889 */ 10890 #if 0 10891 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 10892 "%d:%d:%d:%d tag %d type %d\n", 10893 io->io_hdr.nexus.initid.id, 10894 io->io_hdr.nexus.targ_port, 10895 io->io_hdr.nexus.targ_target.id, 10896 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 10897 io->taskio.tag_type); 10898 #endif 10899 return (1); 10900 } else 10901 return (0); 10902 } 10903 10904 /* 10905 * This routine cannot block! It must be callable from an interrupt 10906 * handler as well as from the work thread. 10907 */ 10908 static void 10909 ctl_run_task_queue(struct ctl_softc *ctl_softc) 10910 { 10911 union ctl_io *io, *next_io; 10912 10913 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 10914 10915 CTL_DEBUG_PRINT(("ctl_run_task_queue\n")); 10916 10917 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue); 10918 io != NULL; io = next_io) { 10919 int retval; 10920 const char *task_desc; 10921 10922 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10923 10924 retval = 0; 10925 10926 switch (io->io_hdr.io_type) { 10927 case CTL_IO_TASK: { 10928 task_desc = ctl_scsi_task_string(&io->taskio); 10929 if (task_desc != NULL) { 10930 #ifdef NEEDTOPORT 10931 csevent_log(CSC_CTL | CSC_SHELF_SW | 10932 CTL_TASK_REPORT, 10933 csevent_LogType_Trace, 10934 csevent_Severity_Information, 10935 csevent_AlertLevel_Green, 10936 csevent_FRU_Firmware, 10937 csevent_FRU_Unknown, 10938 "CTL: received task: %s",task_desc); 10939 #endif 10940 } else { 10941 #ifdef NEEDTOPORT 10942 csevent_log(CSC_CTL | CSC_SHELF_SW | 10943 CTL_TASK_REPORT, 10944 csevent_LogType_Trace, 10945 csevent_Severity_Information, 10946 csevent_AlertLevel_Green, 10947 csevent_FRU_Firmware, 10948 csevent_FRU_Unknown, 10949 "CTL: received unknown task " 10950 "type: %d (%#x)", 10951 io->taskio.task_action, 10952 io->taskio.task_action); 10953 #endif 10954 } 10955 switch (io->taskio.task_action) { 10956 case CTL_TASK_ABORT_TASK: 10957 retval = ctl_abort_task(io); 10958 break; 10959 case CTL_TASK_ABORT_TASK_SET: 10960 break; 10961 case CTL_TASK_CLEAR_ACA: 10962 break; 10963 case CTL_TASK_CLEAR_TASK_SET: 10964 break; 10965 case CTL_TASK_LUN_RESET: { 10966 struct ctl_lun *lun; 10967 uint32_t targ_lun; 10968 int retval; 10969 10970 targ_lun = io->io_hdr.nexus.targ_lun; 10971 10972 if ((targ_lun < CTL_MAX_LUNS) 10973 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 10974 lun = ctl_softc->ctl_luns[targ_lun]; 10975 else { 10976 retval = 1; 10977 break; 10978 } 10979 10980 if (!(io->io_hdr.flags & 10981 CTL_FLAG_FROM_OTHER_SC)) { 10982 union ctl_ha_msg msg_info; 10983 10984 io->io_hdr.flags |= 10985 CTL_FLAG_SENT_2OTHER_SC; 10986 msg_info.hdr.msg_type = 10987 CTL_MSG_MANAGE_TASKS; 10988 msg_info.hdr.nexus = io->io_hdr.nexus; 10989 msg_info.task.task_action = 10990 CTL_TASK_LUN_RESET; 10991 msg_info.hdr.original_sc = NULL; 10992 msg_info.hdr.serializing_sc = NULL; 10993 if (CTL_HA_STATUS_SUCCESS != 10994 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10995 (void *)&msg_info, 10996 sizeof(msg_info), 0)) { 10997 } 10998 } 10999 11000 retval = ctl_lun_reset(lun, io, 11001 CTL_UA_LUN_RESET); 11002 break; 11003 } 11004 case CTL_TASK_TARGET_RESET: 11005 retval = ctl_target_reset(ctl_softc, io, 11006 CTL_UA_TARG_RESET); 11007 break; 11008 case CTL_TASK_BUS_RESET: 11009 retval = ctl_bus_reset(ctl_softc, io); 11010 break; 11011 case CTL_TASK_PORT_LOGIN: 11012 break; 11013 case CTL_TASK_PORT_LOGOUT: 11014 break; 11015 default: 11016 printf("ctl_run_task_queue: got unknown task " 11017 "management event %d\n", 11018 io->taskio.task_action); 11019 break; 11020 } 11021 if (retval == 0) 11022 io->io_hdr.status = CTL_SUCCESS; 11023 else 11024 io->io_hdr.status = CTL_ERROR; 11025 11026 STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr, 11027 ctl_io_hdr, links); 11028 /* 11029 * This will queue this I/O to the done queue, but the 11030 * work thread won't be able to process it until we 11031 * return and the lock is released. 11032 */ 11033 ctl_done_lock(io, /*have_lock*/ 1); 11034 break; 11035 } 11036 default: { 11037 11038 printf("%s: invalid I/O type %d msg %d cdb %x" 11039 " iptl: %ju:%d:%ju:%d tag 0x%04x\n", 11040 __func__, io->io_hdr.io_type, 11041 io->io_hdr.msg_type, io->scsiio.cdb[0], 11042 (uintmax_t)io->io_hdr.nexus.initid.id, 11043 io->io_hdr.nexus.targ_port, 11044 (uintmax_t)io->io_hdr.nexus.targ_target.id, 11045 io->io_hdr.nexus.targ_lun, 11046 (io->io_hdr.io_type == CTL_IO_TASK) ? 11047 io->taskio.tag_num : io->scsiio.tag_num); 11048 STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr, 11049 ctl_io_hdr, links); 11050 ctl_free_io_internal(io, 1); 11051 break; 11052 } 11053 } 11054 } 11055 11056 ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING; 11057 } 11058 11059 /* 11060 * For HA operation. Handle commands that come in from the other 11061 * controller. 11062 */ 11063 static void 11064 ctl_handle_isc(union ctl_io *io) 11065 { 11066 int free_io; 11067 struct ctl_lun *lun; 11068 struct ctl_softc *ctl_softc; 11069 11070 ctl_softc = control_softc; 11071 11072 lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun]; 11073 11074 switch (io->io_hdr.msg_type) { 11075 case CTL_MSG_SERIALIZE: 11076 free_io = ctl_serialize_other_sc_cmd(&io->scsiio, 11077 /*have_lock*/ 0); 11078 break; 11079 case CTL_MSG_R2R: { 11080 uint8_t opcode; 11081 struct ctl_cmd_entry *entry; 11082 11083 /* 11084 * This is only used in SER_ONLY mode. 11085 */ 11086 free_io = 0; 11087 opcode = io->scsiio.cdb[0]; 11088 entry = &ctl_cmd_table[opcode]; 11089 mtx_lock(&ctl_softc->ctl_lock); 11090 if (ctl_scsiio_lun_check(ctl_softc, lun, 11091 entry, (struct ctl_scsiio *)io) != 0) { 11092 ctl_done_lock(io, /*have_lock*/ 1); 11093 mtx_unlock(&ctl_softc->ctl_lock); 11094 break; 11095 } 11096 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11097 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, 11098 &io->io_hdr, links); 11099 mtx_unlock(&ctl_softc->ctl_lock); 11100 break; 11101 } 11102 case CTL_MSG_FINISH_IO: 11103 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 11104 free_io = 0; 11105 ctl_done_lock(io, /*have_lock*/ 0); 11106 } else { 11107 free_io = 1; 11108 mtx_lock(&ctl_softc->ctl_lock); 11109 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11110 ooa_links); 11111 STAILQ_REMOVE(&ctl_softc->task_queue, 11112 &io->io_hdr, ctl_io_hdr, links); 11113 ctl_check_blocked(lun); 11114 mtx_unlock(&ctl_softc->ctl_lock); 11115 } 11116 break; 11117 case CTL_MSG_PERS_ACTION: 11118 ctl_hndl_per_res_out_on_other_sc( 11119 (union ctl_ha_msg *)&io->presio.pr_msg); 11120 free_io = 1; 11121 break; 11122 case CTL_MSG_BAD_JUJU: 11123 free_io = 0; 11124 ctl_done_lock(io, /*have_lock*/ 0); 11125 break; 11126 case CTL_MSG_DATAMOVE: 11127 /* Only used in XFER mode */ 11128 free_io = 0; 11129 ctl_datamove_remote(io); 11130 break; 11131 case CTL_MSG_DATAMOVE_DONE: 11132 /* Only used in XFER mode */ 11133 free_io = 0; 11134 io->scsiio.be_move_done(io); 11135 break; 11136 default: 11137 free_io = 1; 11138 printf("%s: Invalid message type %d\n", 11139 __func__, io->io_hdr.msg_type); 11140 break; 11141 } 11142 if (free_io) 11143 ctl_free_io_internal(io, 0); 11144 11145 } 11146 11147 11148 /* 11149 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 11150 * there is no match. 11151 */ 11152 static ctl_lun_error_pattern 11153 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 11154 { 11155 struct ctl_cmd_entry *entry; 11156 ctl_lun_error_pattern filtered_pattern, pattern; 11157 uint8_t opcode; 11158 11159 pattern = desc->error_pattern; 11160 11161 /* 11162 * XXX KDM we need more data passed into this function to match a 11163 * custom pattern, and we actually need to implement custom pattern 11164 * matching. 11165 */ 11166 if (pattern & CTL_LUN_PAT_CMD) 11167 return (CTL_LUN_PAT_CMD); 11168 11169 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 11170 return (CTL_LUN_PAT_ANY); 11171 11172 opcode = ctsio->cdb[0]; 11173 entry = &ctl_cmd_table[opcode]; 11174 11175 filtered_pattern = entry->pattern & pattern; 11176 11177 /* 11178 * If the user requested specific flags in the pattern (e.g. 11179 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 11180 * flags. 11181 * 11182 * If the user did not specify any flags, it doesn't matter whether 11183 * or not the command supports the flags. 11184 */ 11185 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 11186 (pattern & ~CTL_LUN_PAT_MASK)) 11187 return (CTL_LUN_PAT_NONE); 11188 11189 /* 11190 * If the user asked for a range check, see if the requested LBA 11191 * range overlaps with this command's LBA range. 11192 */ 11193 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 11194 uint64_t lba1; 11195 uint32_t len1; 11196 ctl_action action; 11197 int retval; 11198 11199 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 11200 if (retval != 0) 11201 return (CTL_LUN_PAT_NONE); 11202 11203 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 11204 desc->lba_range.len); 11205 /* 11206 * A "pass" means that the LBA ranges don't overlap, so 11207 * this doesn't match the user's range criteria. 11208 */ 11209 if (action == CTL_ACTION_PASS) 11210 return (CTL_LUN_PAT_NONE); 11211 } 11212 11213 return (filtered_pattern); 11214 } 11215 11216 static void 11217 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 11218 { 11219 struct ctl_error_desc *desc, *desc2; 11220 11221 mtx_assert(&control_softc->ctl_lock, MA_OWNED); 11222 11223 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 11224 ctl_lun_error_pattern pattern; 11225 /* 11226 * Check to see whether this particular command matches 11227 * the pattern in the descriptor. 11228 */ 11229 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 11230 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 11231 continue; 11232 11233 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 11234 case CTL_LUN_INJ_ABORTED: 11235 ctl_set_aborted(&io->scsiio); 11236 break; 11237 case CTL_LUN_INJ_MEDIUM_ERR: 11238 ctl_set_medium_error(&io->scsiio); 11239 break; 11240 case CTL_LUN_INJ_UA: 11241 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 11242 * OCCURRED */ 11243 ctl_set_ua(&io->scsiio, 0x29, 0x00); 11244 break; 11245 case CTL_LUN_INJ_CUSTOM: 11246 /* 11247 * We're assuming the user knows what he is doing. 11248 * Just copy the sense information without doing 11249 * checks. 11250 */ 11251 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 11252 ctl_min(sizeof(desc->custom_sense), 11253 sizeof(io->scsiio.sense_data))); 11254 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 11255 io->scsiio.sense_len = SSD_FULL_SIZE; 11256 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11257 break; 11258 case CTL_LUN_INJ_NONE: 11259 default: 11260 /* 11261 * If this is an error injection type we don't know 11262 * about, clear the continuous flag (if it is set) 11263 * so it will get deleted below. 11264 */ 11265 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 11266 break; 11267 } 11268 /* 11269 * By default, each error injection action is a one-shot 11270 */ 11271 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 11272 continue; 11273 11274 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 11275 11276 free(desc, M_CTL); 11277 } 11278 } 11279 11280 #ifdef CTL_IO_DELAY 11281 static void 11282 ctl_datamove_timer_wakeup(void *arg) 11283 { 11284 union ctl_io *io; 11285 11286 io = (union ctl_io *)arg; 11287 11288 ctl_datamove(io); 11289 } 11290 #endif /* CTL_IO_DELAY */ 11291 11292 void 11293 ctl_datamove(union ctl_io *io) 11294 { 11295 void (*fe_datamove)(union ctl_io *io); 11296 11297 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 11298 11299 CTL_DEBUG_PRINT(("ctl_datamove\n")); 11300 11301 #ifdef CTL_TIME_IO 11302 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 11303 char str[256]; 11304 char path_str[64]; 11305 struct sbuf sb; 11306 11307 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 11308 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 11309 11310 sbuf_cat(&sb, path_str); 11311 switch (io->io_hdr.io_type) { 11312 case CTL_IO_SCSI: 11313 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 11314 sbuf_printf(&sb, "\n"); 11315 sbuf_cat(&sb, path_str); 11316 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 11317 io->scsiio.tag_num, io->scsiio.tag_type); 11318 break; 11319 case CTL_IO_TASK: 11320 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 11321 "Tag Type: %d\n", io->taskio.task_action, 11322 io->taskio.tag_num, io->taskio.tag_type); 11323 break; 11324 default: 11325 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11326 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11327 break; 11328 } 11329 sbuf_cat(&sb, path_str); 11330 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 11331 (intmax_t)time_uptime - io->io_hdr.start_time); 11332 sbuf_finish(&sb); 11333 printf("%s", sbuf_data(&sb)); 11334 } 11335 #endif /* CTL_TIME_IO */ 11336 11337 mtx_lock(&control_softc->ctl_lock); 11338 #ifdef CTL_IO_DELAY 11339 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 11340 struct ctl_lun *lun; 11341 11342 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11343 11344 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 11345 } else { 11346 struct ctl_lun *lun; 11347 11348 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11349 if ((lun != NULL) 11350 && (lun->delay_info.datamove_delay > 0)) { 11351 struct callout *callout; 11352 11353 callout = (struct callout *)&io->io_hdr.timer_bytes; 11354 callout_init(callout, /*mpsafe*/ 1); 11355 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 11356 callout_reset(callout, 11357 lun->delay_info.datamove_delay * hz, 11358 ctl_datamove_timer_wakeup, io); 11359 if (lun->delay_info.datamove_type == 11360 CTL_DELAY_TYPE_ONESHOT) 11361 lun->delay_info.datamove_delay = 0; 11362 mtx_unlock(&control_softc->ctl_lock); 11363 return; 11364 } 11365 } 11366 #endif 11367 /* 11368 * If we have any pending task management commands, process them 11369 * first. This is necessary to eliminate a race condition with the 11370 * FETD: 11371 * 11372 * - FETD submits a task management command, like an abort. 11373 * - Back end calls fe_datamove() to move the data for the aborted 11374 * command. The FETD can't really accept it, but if it did, it 11375 * would end up transmitting data for a command that the initiator 11376 * told us to abort. 11377 * 11378 * We close the race by processing all pending task management 11379 * commands here (we can't block!), and then check this I/O to see 11380 * if it has been aborted. If so, return it to the back end with 11381 * bad status, so the back end can say return an error to the back end 11382 * and then when the back end returns an error, we can return the 11383 * aborted command to the FETD, so it can clean up its resources. 11384 */ 11385 if (control_softc->flags & CTL_FLAG_TASK_PENDING) 11386 ctl_run_task_queue(control_softc); 11387 11388 /* 11389 * This command has been aborted. Set the port status, so we fail 11390 * the data move. 11391 */ 11392 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 11393 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 11394 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 11395 io->io_hdr.nexus.targ_port, 11396 (uintmax_t)io->io_hdr.nexus.targ_target.id, 11397 io->io_hdr.nexus.targ_lun); 11398 io->io_hdr.status = CTL_CMD_ABORTED; 11399 io->io_hdr.port_status = 31337; 11400 mtx_unlock(&control_softc->ctl_lock); 11401 /* 11402 * Note that the backend, in this case, will get the 11403 * callback in its context. In other cases it may get 11404 * called in the frontend's interrupt thread context. 11405 */ 11406 io->scsiio.be_move_done(io); 11407 return; 11408 } 11409 11410 /* 11411 * If we're in XFER mode and this I/O is from the other shelf 11412 * controller, we need to send the DMA to the other side to 11413 * actually transfer the data to/from the host. In serialize only 11414 * mode the transfer happens below CTL and ctl_datamove() is only 11415 * called on the machine that originally received the I/O. 11416 */ 11417 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 11418 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11419 union ctl_ha_msg msg; 11420 uint32_t sg_entries_sent; 11421 int do_sg_copy; 11422 int i; 11423 11424 memset(&msg, 0, sizeof(msg)); 11425 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 11426 msg.hdr.original_sc = io->io_hdr.original_sc; 11427 msg.hdr.serializing_sc = io; 11428 msg.hdr.nexus = io->io_hdr.nexus; 11429 msg.dt.flags = io->io_hdr.flags; 11430 /* 11431 * We convert everything into a S/G list here. We can't 11432 * pass by reference, only by value between controllers. 11433 * So we can't pass a pointer to the S/G list, only as many 11434 * S/G entries as we can fit in here. If it's possible for 11435 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 11436 * then we need to break this up into multiple transfers. 11437 */ 11438 if (io->scsiio.kern_sg_entries == 0) { 11439 msg.dt.kern_sg_entries = 1; 11440 /* 11441 * If this is in cached memory, flush the cache 11442 * before we send the DMA request to the other 11443 * controller. We want to do this in either the 11444 * read or the write case. The read case is 11445 * straightforward. In the write case, we want to 11446 * make sure nothing is in the local cache that 11447 * could overwrite the DMAed data. 11448 */ 11449 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 11450 /* 11451 * XXX KDM use bus_dmamap_sync() here. 11452 */ 11453 } 11454 11455 /* 11456 * Convert to a physical address if this is a 11457 * virtual address. 11458 */ 11459 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 11460 msg.dt.sg_list[0].addr = 11461 io->scsiio.kern_data_ptr; 11462 } else { 11463 /* 11464 * XXX KDM use busdma here! 11465 */ 11466 #if 0 11467 msg.dt.sg_list[0].addr = (void *) 11468 vtophys(io->scsiio.kern_data_ptr); 11469 #endif 11470 } 11471 11472 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 11473 do_sg_copy = 0; 11474 } else { 11475 struct ctl_sg_entry *sgl; 11476 11477 do_sg_copy = 1; 11478 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 11479 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 11480 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 11481 /* 11482 * XXX KDM use bus_dmamap_sync() here. 11483 */ 11484 } 11485 } 11486 11487 msg.dt.kern_data_len = io->scsiio.kern_data_len; 11488 msg.dt.kern_total_len = io->scsiio.kern_total_len; 11489 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 11490 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 11491 msg.dt.sg_sequence = 0; 11492 11493 /* 11494 * Loop until we've sent all of the S/G entries. On the 11495 * other end, we'll recompose these S/G entries into one 11496 * contiguous list before passing it to the 11497 */ 11498 for (sg_entries_sent = 0; sg_entries_sent < 11499 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 11500 msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/ 11501 sizeof(msg.dt.sg_list[0])), 11502 msg.dt.kern_sg_entries - sg_entries_sent); 11503 11504 if (do_sg_copy != 0) { 11505 struct ctl_sg_entry *sgl; 11506 int j; 11507 11508 sgl = (struct ctl_sg_entry *) 11509 io->scsiio.kern_data_ptr; 11510 /* 11511 * If this is in cached memory, flush the cache 11512 * before we send the DMA request to the other 11513 * controller. We want to do this in either 11514 * the * read or the write case. The read 11515 * case is straightforward. In the write 11516 * case, we want to make sure nothing is 11517 * in the local cache that could overwrite 11518 * the DMAed data. 11519 */ 11520 11521 for (i = sg_entries_sent, j = 0; 11522 i < msg.dt.cur_sg_entries; i++, j++) { 11523 if ((io->io_hdr.flags & 11524 CTL_FLAG_NO_DATASYNC) == 0) { 11525 /* 11526 * XXX KDM use bus_dmamap_sync() 11527 */ 11528 } 11529 if ((io->io_hdr.flags & 11530 CTL_FLAG_BUS_ADDR) == 0) { 11531 /* 11532 * XXX KDM use busdma. 11533 */ 11534 #if 0 11535 msg.dt.sg_list[j].addr =(void *) 11536 vtophys(sgl[i].addr); 11537 #endif 11538 } else { 11539 msg.dt.sg_list[j].addr = 11540 sgl[i].addr; 11541 } 11542 msg.dt.sg_list[j].len = sgl[i].len; 11543 } 11544 } 11545 11546 sg_entries_sent += msg.dt.cur_sg_entries; 11547 if (sg_entries_sent >= msg.dt.kern_sg_entries) 11548 msg.dt.sg_last = 1; 11549 else 11550 msg.dt.sg_last = 0; 11551 11552 /* 11553 * XXX KDM drop and reacquire the lock here? 11554 */ 11555 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 11556 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 11557 /* 11558 * XXX do something here. 11559 */ 11560 } 11561 11562 msg.dt.sent_sg_entries = sg_entries_sent; 11563 } 11564 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11565 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 11566 ctl_failover_io(io, /*have_lock*/ 1); 11567 11568 } else { 11569 11570 /* 11571 * Lookup the fe_datamove() function for this particular 11572 * front end. 11573 */ 11574 fe_datamove = 11575 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 11576 mtx_unlock(&control_softc->ctl_lock); 11577 11578 fe_datamove(io); 11579 } 11580 } 11581 11582 static void 11583 ctl_send_datamove_done(union ctl_io *io, int have_lock) 11584 { 11585 union ctl_ha_msg msg; 11586 int isc_status; 11587 11588 memset(&msg, 0, sizeof(msg)); 11589 11590 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 11591 msg.hdr.original_sc = io; 11592 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 11593 msg.hdr.nexus = io->io_hdr.nexus; 11594 msg.hdr.status = io->io_hdr.status; 11595 msg.scsi.tag_num = io->scsiio.tag_num; 11596 msg.scsi.tag_type = io->scsiio.tag_type; 11597 msg.scsi.scsi_status = io->scsiio.scsi_status; 11598 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 11599 sizeof(io->scsiio.sense_data)); 11600 msg.scsi.sense_len = io->scsiio.sense_len; 11601 msg.scsi.sense_residual = io->scsiio.sense_residual; 11602 msg.scsi.fetd_status = io->io_hdr.port_status; 11603 msg.scsi.residual = io->scsiio.residual; 11604 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11605 11606 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 11607 ctl_failover_io(io, /*have_lock*/ have_lock); 11608 return; 11609 } 11610 11611 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 11612 if (isc_status > CTL_HA_STATUS_SUCCESS) { 11613 /* XXX do something if this fails */ 11614 } 11615 11616 } 11617 11618 /* 11619 * The DMA to the remote side is done, now we need to tell the other side 11620 * we're done so it can continue with its data movement. 11621 */ 11622 static void 11623 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 11624 { 11625 union ctl_io *io; 11626 11627 io = rq->context; 11628 11629 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 11630 printf("%s: ISC DMA write failed with error %d", __func__, 11631 rq->ret); 11632 ctl_set_internal_failure(&io->scsiio, 11633 /*sks_valid*/ 1, 11634 /*retry_count*/ rq->ret); 11635 } 11636 11637 ctl_dt_req_free(rq); 11638 11639 /* 11640 * In this case, we had to malloc the memory locally. Free it. 11641 */ 11642 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 11643 int i; 11644 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 11645 free(io->io_hdr.local_sglist[i].addr, M_CTL); 11646 } 11647 /* 11648 * The data is in local and remote memory, so now we need to send 11649 * status (good or back) back to the other side. 11650 */ 11651 ctl_send_datamove_done(io, /*have_lock*/ 0); 11652 } 11653 11654 /* 11655 * We've moved the data from the host/controller into local memory. Now we 11656 * need to push it over to the remote controller's memory. 11657 */ 11658 static int 11659 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 11660 { 11661 int retval; 11662 11663 retval = 0; 11664 11665 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 11666 ctl_datamove_remote_write_cb); 11667 11668 return (retval); 11669 } 11670 11671 static void 11672 ctl_datamove_remote_write(union ctl_io *io) 11673 { 11674 int retval; 11675 void (*fe_datamove)(union ctl_io *io); 11676 11677 /* 11678 * - Get the data from the host/HBA into local memory. 11679 * - DMA memory from the local controller to the remote controller. 11680 * - Send status back to the remote controller. 11681 */ 11682 11683 retval = ctl_datamove_remote_sgl_setup(io); 11684 if (retval != 0) 11685 return; 11686 11687 /* Switch the pointer over so the FETD knows what to do */ 11688 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 11689 11690 /* 11691 * Use a custom move done callback, since we need to send completion 11692 * back to the other controller, not to the backend on this side. 11693 */ 11694 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 11695 11696 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 11697 11698 fe_datamove(io); 11699 11700 return; 11701 11702 } 11703 11704 static int 11705 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 11706 { 11707 #if 0 11708 char str[256]; 11709 char path_str[64]; 11710 struct sbuf sb; 11711 #endif 11712 11713 /* 11714 * In this case, we had to malloc the memory locally. Free it. 11715 */ 11716 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 11717 int i; 11718 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 11719 free(io->io_hdr.local_sglist[i].addr, M_CTL); 11720 } 11721 11722 #if 0 11723 scsi_path_string(io, path_str, sizeof(path_str)); 11724 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 11725 sbuf_cat(&sb, path_str); 11726 scsi_command_string(&io->scsiio, NULL, &sb); 11727 sbuf_printf(&sb, "\n"); 11728 sbuf_cat(&sb, path_str); 11729 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 11730 io->scsiio.tag_num, io->scsiio.tag_type); 11731 sbuf_cat(&sb, path_str); 11732 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 11733 io->io_hdr.flags, io->io_hdr.status); 11734 sbuf_finish(&sb); 11735 printk("%s", sbuf_data(&sb)); 11736 #endif 11737 11738 11739 /* 11740 * The read is done, now we need to send status (good or bad) back 11741 * to the other side. 11742 */ 11743 ctl_send_datamove_done(io, /*have_lock*/ 0); 11744 11745 return (0); 11746 } 11747 11748 static void 11749 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 11750 { 11751 union ctl_io *io; 11752 void (*fe_datamove)(union ctl_io *io); 11753 11754 io = rq->context; 11755 11756 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 11757 printf("%s: ISC DMA read failed with error %d", __func__, 11758 rq->ret); 11759 ctl_set_internal_failure(&io->scsiio, 11760 /*sks_valid*/ 1, 11761 /*retry_count*/ rq->ret); 11762 } 11763 11764 ctl_dt_req_free(rq); 11765 11766 /* Switch the pointer over so the FETD knows what to do */ 11767 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 11768 11769 /* 11770 * Use a custom move done callback, since we need to send completion 11771 * back to the other controller, not to the backend on this side. 11772 */ 11773 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 11774 11775 /* XXX KDM add checks like the ones in ctl_datamove? */ 11776 11777 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 11778 11779 fe_datamove(io); 11780 } 11781 11782 static int 11783 ctl_datamove_remote_sgl_setup(union ctl_io *io) 11784 { 11785 struct ctl_sg_entry *local_sglist, *remote_sglist; 11786 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 11787 struct ctl_softc *softc; 11788 int retval; 11789 int i; 11790 11791 retval = 0; 11792 softc = control_softc; 11793 11794 local_sglist = io->io_hdr.local_sglist; 11795 local_dma_sglist = io->io_hdr.local_dma_sglist; 11796 remote_sglist = io->io_hdr.remote_sglist; 11797 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 11798 11799 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 11800 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 11801 local_sglist[i].len = remote_sglist[i].len; 11802 11803 /* 11804 * XXX Detect the situation where the RS-level I/O 11805 * redirector on the other side has already read the 11806 * data off of the AOR RS on this side, and 11807 * transferred it to remote (mirror) memory on the 11808 * other side. Since we already have the data in 11809 * memory here, we just need to use it. 11810 * 11811 * XXX KDM this can probably be removed once we 11812 * get the cache device code in and take the 11813 * current AOR implementation out. 11814 */ 11815 #ifdef NEEDTOPORT 11816 if ((remote_sglist[i].addr >= 11817 (void *)vtophys(softc->mirr->addr)) 11818 && (remote_sglist[i].addr < 11819 ((void *)vtophys(softc->mirr->addr) + 11820 CacheMirrorOffset))) { 11821 local_sglist[i].addr = remote_sglist[i].addr - 11822 CacheMirrorOffset; 11823 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 11824 CTL_FLAG_DATA_IN) 11825 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 11826 } else { 11827 local_sglist[i].addr = remote_sglist[i].addr + 11828 CacheMirrorOffset; 11829 } 11830 #endif 11831 #if 0 11832 printf("%s: local %p, remote %p, len %d\n", 11833 __func__, local_sglist[i].addr, 11834 remote_sglist[i].addr, local_sglist[i].len); 11835 #endif 11836 } 11837 } else { 11838 uint32_t len_to_go; 11839 11840 /* 11841 * In this case, we don't have automatically allocated 11842 * memory for this I/O on this controller. This typically 11843 * happens with internal CTL I/O -- e.g. inquiry, mode 11844 * sense, etc. Anything coming from RAIDCore will have 11845 * a mirror area available. 11846 */ 11847 len_to_go = io->scsiio.kern_data_len; 11848 11849 /* 11850 * Clear the no datasync flag, we have to use malloced 11851 * buffers. 11852 */ 11853 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 11854 11855 /* 11856 * The difficult thing here is that the size of the various 11857 * S/G segments may be different than the size from the 11858 * remote controller. That'll make it harder when DMAing 11859 * the data back to the other side. 11860 */ 11861 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 11862 sizeof(io->io_hdr.remote_sglist[0])) && 11863 (len_to_go > 0); i++) { 11864 local_sglist[i].len = ctl_min(len_to_go, 131072); 11865 CTL_SIZE_8B(local_dma_sglist[i].len, 11866 local_sglist[i].len); 11867 local_sglist[i].addr = 11868 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 11869 11870 local_dma_sglist[i].addr = local_sglist[i].addr; 11871 11872 if (local_sglist[i].addr == NULL) { 11873 int j; 11874 11875 printf("malloc failed for %zd bytes!", 11876 local_dma_sglist[i].len); 11877 for (j = 0; j < i; j++) { 11878 free(local_sglist[j].addr, M_CTL); 11879 } 11880 ctl_set_internal_failure(&io->scsiio, 11881 /*sks_valid*/ 1, 11882 /*retry_count*/ 4857); 11883 retval = 1; 11884 goto bailout_error; 11885 11886 } 11887 /* XXX KDM do we need a sync here? */ 11888 11889 len_to_go -= local_sglist[i].len; 11890 } 11891 /* 11892 * Reset the number of S/G entries accordingly. The 11893 * original number of S/G entries is available in 11894 * rem_sg_entries. 11895 */ 11896 io->scsiio.kern_sg_entries = i; 11897 11898 #if 0 11899 printf("%s: kern_sg_entries = %d\n", __func__, 11900 io->scsiio.kern_sg_entries); 11901 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 11902 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 11903 local_sglist[i].addr, local_sglist[i].len, 11904 local_dma_sglist[i].len); 11905 #endif 11906 } 11907 11908 11909 return (retval); 11910 11911 bailout_error: 11912 11913 ctl_send_datamove_done(io, /*have_lock*/ 0); 11914 11915 return (retval); 11916 } 11917 11918 static int 11919 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 11920 ctl_ha_dt_cb callback) 11921 { 11922 struct ctl_ha_dt_req *rq; 11923 struct ctl_sg_entry *remote_sglist, *local_sglist; 11924 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 11925 uint32_t local_used, remote_used, total_used; 11926 int retval; 11927 int i, j; 11928 11929 retval = 0; 11930 11931 rq = ctl_dt_req_alloc(); 11932 11933 /* 11934 * If we failed to allocate the request, and if the DMA didn't fail 11935 * anyway, set busy status. This is just a resource allocation 11936 * failure. 11937 */ 11938 if ((rq == NULL) 11939 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 11940 ctl_set_busy(&io->scsiio); 11941 11942 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 11943 11944 if (rq != NULL) 11945 ctl_dt_req_free(rq); 11946 11947 /* 11948 * The data move failed. We need to return status back 11949 * to the other controller. No point in trying to DMA 11950 * data to the remote controller. 11951 */ 11952 11953 ctl_send_datamove_done(io, /*have_lock*/ 0); 11954 11955 retval = 1; 11956 11957 goto bailout; 11958 } 11959 11960 local_sglist = io->io_hdr.local_sglist; 11961 local_dma_sglist = io->io_hdr.local_dma_sglist; 11962 remote_sglist = io->io_hdr.remote_sglist; 11963 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 11964 local_used = 0; 11965 remote_used = 0; 11966 total_used = 0; 11967 11968 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 11969 rq->ret = CTL_HA_STATUS_SUCCESS; 11970 rq->context = io; 11971 callback(rq); 11972 goto bailout; 11973 } 11974 11975 /* 11976 * Pull/push the data over the wire from/to the other controller. 11977 * This takes into account the possibility that the local and 11978 * remote sglists may not be identical in terms of the size of 11979 * the elements and the number of elements. 11980 * 11981 * One fundamental assumption here is that the length allocated for 11982 * both the local and remote sglists is identical. Otherwise, we've 11983 * essentially got a coding error of some sort. 11984 */ 11985 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 11986 int isc_ret; 11987 uint32_t cur_len, dma_length; 11988 uint8_t *tmp_ptr; 11989 11990 rq->id = CTL_HA_DATA_CTL; 11991 rq->command = command; 11992 rq->context = io; 11993 11994 /* 11995 * Both pointers should be aligned. But it is possible 11996 * that the allocation length is not. They should both 11997 * also have enough slack left over at the end, though, 11998 * to round up to the next 8 byte boundary. 11999 */ 12000 cur_len = ctl_min(local_sglist[i].len - local_used, 12001 remote_sglist[j].len - remote_used); 12002 12003 /* 12004 * In this case, we have a size issue and need to decrease 12005 * the size, except in the case where we actually have less 12006 * than 8 bytes left. In that case, we need to increase 12007 * the DMA length to get the last bit. 12008 */ 12009 if ((cur_len & 0x7) != 0) { 12010 if (cur_len > 0x7) { 12011 cur_len = cur_len - (cur_len & 0x7); 12012 dma_length = cur_len; 12013 } else { 12014 CTL_SIZE_8B(dma_length, cur_len); 12015 } 12016 12017 } else 12018 dma_length = cur_len; 12019 12020 /* 12021 * If we had to allocate memory for this I/O, instead of using 12022 * the non-cached mirror memory, we'll need to flush the cache 12023 * before trying to DMA to the other controller. 12024 * 12025 * We could end up doing this multiple times for the same 12026 * segment if we have a larger local segment than remote 12027 * segment. That shouldn't be an issue. 12028 */ 12029 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12030 /* 12031 * XXX KDM use bus_dmamap_sync() here. 12032 */ 12033 } 12034 12035 rq->size = dma_length; 12036 12037 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12038 tmp_ptr += local_used; 12039 12040 /* Use physical addresses when talking to ISC hardware */ 12041 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12042 /* XXX KDM use busdma */ 12043 #if 0 12044 rq->local = vtophys(tmp_ptr); 12045 #endif 12046 } else 12047 rq->local = tmp_ptr; 12048 12049 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12050 tmp_ptr += remote_used; 12051 rq->remote = tmp_ptr; 12052 12053 rq->callback = NULL; 12054 12055 local_used += cur_len; 12056 if (local_used >= local_sglist[i].len) { 12057 i++; 12058 local_used = 0; 12059 } 12060 12061 remote_used += cur_len; 12062 if (remote_used >= remote_sglist[j].len) { 12063 j++; 12064 remote_used = 0; 12065 } 12066 total_used += cur_len; 12067 12068 if (total_used >= io->scsiio.kern_data_len) 12069 rq->callback = callback; 12070 12071 if ((rq->size & 0x7) != 0) { 12072 printf("%s: warning: size %d is not on 8b boundary\n", 12073 __func__, rq->size); 12074 } 12075 if (((uintptr_t)rq->local & 0x7) != 0) { 12076 printf("%s: warning: local %p not on 8b boundary\n", 12077 __func__, rq->local); 12078 } 12079 if (((uintptr_t)rq->remote & 0x7) != 0) { 12080 printf("%s: warning: remote %p not on 8b boundary\n", 12081 __func__, rq->local); 12082 } 12083 #if 0 12084 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12085 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12086 rq->local, rq->remote, rq->size); 12087 #endif 12088 12089 isc_ret = ctl_dt_single(rq); 12090 if (isc_ret == CTL_HA_STATUS_WAIT) 12091 continue; 12092 12093 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12094 rq->ret = CTL_HA_STATUS_SUCCESS; 12095 } else { 12096 rq->ret = isc_ret; 12097 } 12098 callback(rq); 12099 goto bailout; 12100 } 12101 12102 bailout: 12103 return (retval); 12104 12105 } 12106 12107 static void 12108 ctl_datamove_remote_read(union ctl_io *io) 12109 { 12110 int retval; 12111 int i; 12112 12113 /* 12114 * This will send an error to the other controller in the case of a 12115 * failure. 12116 */ 12117 retval = ctl_datamove_remote_sgl_setup(io); 12118 if (retval != 0) 12119 return; 12120 12121 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12122 ctl_datamove_remote_read_cb); 12123 if ((retval != 0) 12124 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 12125 /* 12126 * Make sure we free memory if there was an error.. The 12127 * ctl_datamove_remote_xfer() function will send the 12128 * datamove done message, or call the callback with an 12129 * error if there is a problem. 12130 */ 12131 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12132 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12133 } 12134 12135 return; 12136 } 12137 12138 /* 12139 * Process a datamove request from the other controller. This is used for 12140 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12141 * first. Once that is complete, the data gets DMAed into the remote 12142 * controller's memory. For reads, we DMA from the remote controller's 12143 * memory into our memory first, and then move it out to the FETD. 12144 */ 12145 static void 12146 ctl_datamove_remote(union ctl_io *io) 12147 { 12148 struct ctl_softc *softc; 12149 12150 softc = control_softc; 12151 12152 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 12153 12154 /* 12155 * Note that we look for an aborted I/O here, but don't do some of 12156 * the other checks that ctl_datamove() normally does. We don't 12157 * need to run the task queue, because this I/O is on the ISC 12158 * queue, which is executed by the work thread after the task queue. 12159 * We don't need to run the datamove delay code, since that should 12160 * have been done if need be on the other controller. 12161 */ 12162 mtx_lock(&softc->ctl_lock); 12163 12164 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12165 12166 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 12167 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 12168 io->io_hdr.nexus.targ_port, 12169 io->io_hdr.nexus.targ_target.id, 12170 io->io_hdr.nexus.targ_lun); 12171 io->io_hdr.status = CTL_CMD_ABORTED; 12172 io->io_hdr.port_status = 31338; 12173 12174 mtx_unlock(&softc->ctl_lock); 12175 12176 ctl_send_datamove_done(io, /*have_lock*/ 0); 12177 12178 return; 12179 } 12180 12181 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 12182 mtx_unlock(&softc->ctl_lock); 12183 ctl_datamove_remote_write(io); 12184 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 12185 mtx_unlock(&softc->ctl_lock); 12186 ctl_datamove_remote_read(io); 12187 } else { 12188 union ctl_ha_msg msg; 12189 struct scsi_sense_data *sense; 12190 uint8_t sks[3]; 12191 int retry_count; 12192 12193 memset(&msg, 0, sizeof(msg)); 12194 12195 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 12196 msg.hdr.status = CTL_SCSI_ERROR; 12197 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 12198 12199 retry_count = 4243; 12200 12201 sense = &msg.scsi.sense_data; 12202 sks[0] = SSD_SCS_VALID; 12203 sks[1] = (retry_count >> 8) & 0xff; 12204 sks[2] = retry_count & 0xff; 12205 12206 /* "Internal target failure" */ 12207 scsi_set_sense_data(sense, 12208 /*sense_format*/ SSD_TYPE_NONE, 12209 /*current_error*/ 1, 12210 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 12211 /*asc*/ 0x44, 12212 /*ascq*/ 0x00, 12213 /*type*/ SSD_ELEM_SKS, 12214 /*size*/ sizeof(sks), 12215 /*data*/ sks, 12216 SSD_ELEM_NONE); 12217 12218 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12219 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12220 ctl_failover_io(io, /*have_lock*/ 1); 12221 mtx_unlock(&softc->ctl_lock); 12222 return; 12223 } 12224 12225 mtx_unlock(&softc->ctl_lock); 12226 12227 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 12228 CTL_HA_STATUS_SUCCESS) { 12229 /* XXX KDM what to do if this fails? */ 12230 } 12231 return; 12232 } 12233 12234 } 12235 12236 static int 12237 ctl_process_done(union ctl_io *io, int have_lock) 12238 { 12239 struct ctl_lun *lun; 12240 struct ctl_softc *ctl_softc; 12241 void (*fe_done)(union ctl_io *io); 12242 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 12243 12244 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12245 12246 fe_done = 12247 control_softc->ctl_ports[targ_port]->fe_done; 12248 12249 #ifdef CTL_TIME_IO 12250 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12251 char str[256]; 12252 char path_str[64]; 12253 struct sbuf sb; 12254 12255 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12256 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12257 12258 sbuf_cat(&sb, path_str); 12259 switch (io->io_hdr.io_type) { 12260 case CTL_IO_SCSI: 12261 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12262 sbuf_printf(&sb, "\n"); 12263 sbuf_cat(&sb, path_str); 12264 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12265 io->scsiio.tag_num, io->scsiio.tag_type); 12266 break; 12267 case CTL_IO_TASK: 12268 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12269 "Tag Type: %d\n", io->taskio.task_action, 12270 io->taskio.tag_num, io->taskio.tag_type); 12271 break; 12272 default: 12273 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12274 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12275 break; 12276 } 12277 sbuf_cat(&sb, path_str); 12278 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12279 (intmax_t)time_uptime - io->io_hdr.start_time); 12280 sbuf_finish(&sb); 12281 printf("%s", sbuf_data(&sb)); 12282 } 12283 #endif /* CTL_TIME_IO */ 12284 12285 switch (io->io_hdr.io_type) { 12286 case CTL_IO_SCSI: 12287 break; 12288 case CTL_IO_TASK: 12289 ctl_io_error_print(io, NULL); 12290 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 12291 ctl_free_io_internal(io, /*have_lock*/ 0); 12292 else 12293 fe_done(io); 12294 return (CTL_RETVAL_COMPLETE); 12295 break; 12296 default: 12297 printf("ctl_process_done: invalid io type %d\n", 12298 io->io_hdr.io_type); 12299 panic("ctl_process_done: invalid io type %d\n", 12300 io->io_hdr.io_type); 12301 break; /* NOTREACHED */ 12302 } 12303 12304 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12305 if (lun == NULL) { 12306 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12307 io->io_hdr.nexus.targ_lun)); 12308 fe_done(io); 12309 goto bailout; 12310 } 12311 ctl_softc = lun->ctl_softc; 12312 12313 /* 12314 * Remove this from the OOA queue. 12315 */ 12316 if (have_lock == 0) 12317 mtx_lock(&ctl_softc->ctl_lock); 12318 12319 /* 12320 * Check to see if we have any errors to inject here. We only 12321 * inject errors for commands that don't already have errors set. 12322 */ 12323 if ((STAILQ_FIRST(&lun->error_list) != NULL) 12324 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) 12325 ctl_inject_error(lun, io); 12326 12327 /* 12328 * XXX KDM how do we treat commands that aren't completed 12329 * successfully? 12330 * 12331 * XXX KDM should we also track I/O latency? 12332 */ 12333 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { 12334 uint32_t blocksize; 12335 #ifdef CTL_TIME_IO 12336 struct bintime cur_bt; 12337 #endif 12338 12339 if ((lun->be_lun != NULL) 12340 && (lun->be_lun->blocksize != 0)) 12341 blocksize = lun->be_lun->blocksize; 12342 else 12343 blocksize = 512; 12344 12345 switch (io->io_hdr.io_type) { 12346 case CTL_IO_SCSI: { 12347 int isread; 12348 struct ctl_lba_len lbalen; 12349 12350 isread = 0; 12351 switch (io->scsiio.cdb[0]) { 12352 case READ_6: 12353 case READ_10: 12354 case READ_12: 12355 case READ_16: 12356 isread = 1; 12357 /* FALLTHROUGH */ 12358 case WRITE_6: 12359 case WRITE_10: 12360 case WRITE_12: 12361 case WRITE_16: 12362 case WRITE_VERIFY_10: 12363 case WRITE_VERIFY_12: 12364 case WRITE_VERIFY_16: 12365 memcpy(&lbalen, io->io_hdr.ctl_private[ 12366 CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen)); 12367 12368 if (isread) { 12369 lun->stats.ports[targ_port].bytes[CTL_STATS_READ] += 12370 lbalen.len * blocksize; 12371 lun->stats.ports[targ_port].operations[CTL_STATS_READ]++; 12372 12373 #ifdef CTL_TIME_IO 12374 bintime_add( 12375 &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ], 12376 &io->io_hdr.dma_bt); 12377 lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] += 12378 io->io_hdr.num_dmas; 12379 getbintime(&cur_bt); 12380 bintime_sub(&cur_bt, 12381 &io->io_hdr.start_bt); 12382 12383 bintime_add( 12384 &lun->stats.ports[targ_port].time[CTL_STATS_READ], 12385 &cur_bt); 12386 12387 #if 0 12388 cs_prof_gettime(&cur_ticks); 12389 lun->stats.time[CTL_STATS_READ] += 12390 cur_ticks - 12391 io->io_hdr.start_ticks; 12392 #endif 12393 #if 0 12394 lun->stats.time[CTL_STATS_READ] += 12395 jiffies - io->io_hdr.start_time; 12396 #endif 12397 #endif /* CTL_TIME_IO */ 12398 } else { 12399 lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] += 12400 lbalen.len * blocksize; 12401 lun->stats.ports[targ_port].operations[ 12402 CTL_STATS_WRITE]++; 12403 12404 #ifdef CTL_TIME_IO 12405 bintime_add( 12406 &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE], 12407 &io->io_hdr.dma_bt); 12408 lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] += 12409 io->io_hdr.num_dmas; 12410 getbintime(&cur_bt); 12411 bintime_sub(&cur_bt, 12412 &io->io_hdr.start_bt); 12413 12414 bintime_add( 12415 &lun->stats.ports[targ_port].time[CTL_STATS_WRITE], 12416 &cur_bt); 12417 #if 0 12418 cs_prof_gettime(&cur_ticks); 12419 lun->stats.ports[targ_port].time[CTL_STATS_WRITE] += 12420 cur_ticks - 12421 io->io_hdr.start_ticks; 12422 lun->stats.ports[targ_port].time[CTL_STATS_WRITE] += 12423 jiffies - io->io_hdr.start_time; 12424 #endif 12425 #endif /* CTL_TIME_IO */ 12426 } 12427 break; 12428 default: 12429 lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++; 12430 12431 #ifdef CTL_TIME_IO 12432 bintime_add( 12433 &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO], 12434 &io->io_hdr.dma_bt); 12435 lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] += 12436 io->io_hdr.num_dmas; 12437 getbintime(&cur_bt); 12438 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 12439 12440 bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO], 12441 &cur_bt); 12442 12443 #if 0 12444 cs_prof_gettime(&cur_ticks); 12445 lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] += 12446 cur_ticks - 12447 io->io_hdr.start_ticks; 12448 lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] += 12449 jiffies - io->io_hdr.start_time; 12450 #endif 12451 #endif /* CTL_TIME_IO */ 12452 break; 12453 } 12454 break; 12455 } 12456 default: 12457 break; 12458 } 12459 } 12460 12461 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12462 12463 /* 12464 * Run through the blocked queue on this LUN and see if anything 12465 * has become unblocked, now that this transaction is done. 12466 */ 12467 ctl_check_blocked(lun); 12468 12469 /* 12470 * If the LUN has been invalidated, free it if there is nothing 12471 * left on its OOA queue. 12472 */ 12473 if ((lun->flags & CTL_LUN_INVALID) 12474 && (TAILQ_FIRST(&lun->ooa_queue) == NULL)) 12475 ctl_free_lun(lun); 12476 12477 /* 12478 * If this command has been aborted, make sure we set the status 12479 * properly. The FETD is responsible for freeing the I/O and doing 12480 * whatever it needs to do to clean up its state. 12481 */ 12482 if (io->io_hdr.flags & CTL_FLAG_ABORT) 12483 io->io_hdr.status = CTL_CMD_ABORTED; 12484 12485 /* 12486 * We print out status for every task management command. For SCSI 12487 * commands, we filter out any unit attention errors; they happen 12488 * on every boot, and would clutter up the log. Note: task 12489 * management commands aren't printed here, they are printed above, 12490 * since they should never even make it down here. 12491 */ 12492 switch (io->io_hdr.io_type) { 12493 case CTL_IO_SCSI: { 12494 int error_code, sense_key, asc, ascq; 12495 12496 sense_key = 0; 12497 12498 if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) 12499 && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 12500 /* 12501 * Since this is just for printing, no need to 12502 * show errors here. 12503 */ 12504 scsi_extract_sense_len(&io->scsiio.sense_data, 12505 io->scsiio.sense_len, 12506 &error_code, 12507 &sense_key, 12508 &asc, 12509 &ascq, 12510 /*show_errors*/ 0); 12511 } 12512 12513 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 12514 && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) 12515 || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) 12516 || (sense_key != SSD_KEY_UNIT_ATTENTION))) { 12517 12518 if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){ 12519 ctl_softc->skipped_prints++; 12520 if (have_lock == 0) 12521 mtx_unlock(&ctl_softc->ctl_lock); 12522 } else { 12523 uint32_t skipped_prints; 12524 12525 skipped_prints = ctl_softc->skipped_prints; 12526 12527 ctl_softc->skipped_prints = 0; 12528 ctl_softc->last_print_jiffies = time_uptime; 12529 12530 if (have_lock == 0) 12531 mtx_unlock(&ctl_softc->ctl_lock); 12532 if (skipped_prints > 0) { 12533 #ifdef NEEDTOPORT 12534 csevent_log(CSC_CTL | CSC_SHELF_SW | 12535 CTL_ERROR_REPORT, 12536 csevent_LogType_Trace, 12537 csevent_Severity_Information, 12538 csevent_AlertLevel_Green, 12539 csevent_FRU_Firmware, 12540 csevent_FRU_Unknown, 12541 "High CTL error volume, %d prints " 12542 "skipped", skipped_prints); 12543 #endif 12544 } 12545 ctl_io_error_print(io, NULL); 12546 } 12547 } else { 12548 if (have_lock == 0) 12549 mtx_unlock(&ctl_softc->ctl_lock); 12550 } 12551 break; 12552 } 12553 case CTL_IO_TASK: 12554 if (have_lock == 0) 12555 mtx_unlock(&ctl_softc->ctl_lock); 12556 ctl_io_error_print(io, NULL); 12557 break; 12558 default: 12559 if (have_lock == 0) 12560 mtx_unlock(&ctl_softc->ctl_lock); 12561 break; 12562 } 12563 12564 /* 12565 * Tell the FETD or the other shelf controller we're done with this 12566 * command. Note that only SCSI commands get to this point. Task 12567 * management commands are completed above. 12568 * 12569 * We only send status to the other controller if we're in XFER 12570 * mode. In SER_ONLY mode, the I/O is done on the controller that 12571 * received the I/O (from CTL's perspective), and so the status is 12572 * generated there. 12573 * 12574 * XXX KDM if we hold the lock here, we could cause a deadlock 12575 * if the frontend comes back in in this context to queue 12576 * something. 12577 */ 12578 if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER) 12579 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12580 union ctl_ha_msg msg; 12581 12582 memset(&msg, 0, sizeof(msg)); 12583 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12584 msg.hdr.original_sc = io->io_hdr.original_sc; 12585 msg.hdr.nexus = io->io_hdr.nexus; 12586 msg.hdr.status = io->io_hdr.status; 12587 msg.scsi.scsi_status = io->scsiio.scsi_status; 12588 msg.scsi.tag_num = io->scsiio.tag_num; 12589 msg.scsi.tag_type = io->scsiio.tag_type; 12590 msg.scsi.sense_len = io->scsiio.sense_len; 12591 msg.scsi.sense_residual = io->scsiio.sense_residual; 12592 msg.scsi.residual = io->scsiio.residual; 12593 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12594 sizeof(io->scsiio.sense_data)); 12595 /* 12596 * We copy this whether or not this is an I/O-related 12597 * command. Otherwise, we'd have to go and check to see 12598 * whether it's a read/write command, and it really isn't 12599 * worth it. 12600 */ 12601 memcpy(&msg.scsi.lbalen, 12602 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 12603 sizeof(msg.scsi.lbalen)); 12604 12605 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12606 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12607 /* XXX do something here */ 12608 } 12609 12610 ctl_free_io_internal(io, /*have_lock*/ 0); 12611 } else 12612 fe_done(io); 12613 12614 bailout: 12615 12616 return (CTL_RETVAL_COMPLETE); 12617 } 12618 12619 /* 12620 * Front end should call this if it doesn't do autosense. When the request 12621 * sense comes back in from the initiator, we'll dequeue this and send it. 12622 */ 12623 int 12624 ctl_queue_sense(union ctl_io *io) 12625 { 12626 struct ctl_lun *lun; 12627 struct ctl_softc *ctl_softc; 12628 uint32_t initidx; 12629 12630 ctl_softc = control_softc; 12631 12632 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 12633 12634 /* 12635 * LUN lookup will likely move to the ctl_work_thread() once we 12636 * have our new queueing infrastructure (that doesn't put things on 12637 * a per-LUN queue initially). That is so that we can handle 12638 * things like an INQUIRY to a LUN that we don't have enabled. We 12639 * can't deal with that right now. 12640 */ 12641 mtx_lock(&ctl_softc->ctl_lock); 12642 12643 /* 12644 * If we don't have a LUN for this, just toss the sense 12645 * information. 12646 */ 12647 if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS) 12648 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL)) 12649 lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun]; 12650 else 12651 goto bailout; 12652 12653 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12654 12655 /* 12656 * Already have CA set for this LUN...toss the sense information. 12657 */ 12658 if (ctl_is_set(lun->have_ca, initidx)) 12659 goto bailout; 12660 12661 memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data, 12662 ctl_min(sizeof(lun->pending_sense[initidx].sense), 12663 sizeof(io->scsiio.sense_data))); 12664 ctl_set_mask(lun->have_ca, initidx); 12665 12666 bailout: 12667 mtx_unlock(&ctl_softc->ctl_lock); 12668 12669 ctl_free_io(io); 12670 12671 return (CTL_RETVAL_COMPLETE); 12672 } 12673 12674 /* 12675 * Primary command inlet from frontend ports. All SCSI and task I/O 12676 * requests must go through this function. 12677 */ 12678 int 12679 ctl_queue(union ctl_io *io) 12680 { 12681 struct ctl_softc *ctl_softc; 12682 12683 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 12684 12685 ctl_softc = control_softc; 12686 12687 #ifdef CTL_TIME_IO 12688 io->io_hdr.start_time = time_uptime; 12689 getbintime(&io->io_hdr.start_bt); 12690 #endif /* CTL_TIME_IO */ 12691 12692 mtx_lock(&ctl_softc->ctl_lock); 12693 12694 switch (io->io_hdr.io_type) { 12695 case CTL_IO_SCSI: 12696 STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr, 12697 links); 12698 break; 12699 case CTL_IO_TASK: 12700 STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links); 12701 /* 12702 * Set the task pending flag. This is necessary to close a 12703 * race condition with the FETD: 12704 * 12705 * - FETD submits a task management command, like an abort. 12706 * - Back end calls fe_datamove() to move the data for the 12707 * aborted command. The FETD can't really accept it, but 12708 * if it did, it would end up transmitting data for a 12709 * command that the initiator told us to abort. 12710 * 12711 * We close the race condition by setting the flag here, 12712 * and checking it in ctl_datamove(), before calling the 12713 * FETD's fe_datamove routine. If we've got a task 12714 * pending, we run the task queue and then check to see 12715 * whether our particular I/O has been aborted. 12716 */ 12717 ctl_softc->flags |= CTL_FLAG_TASK_PENDING; 12718 break; 12719 default: 12720 mtx_unlock(&ctl_softc->ctl_lock); 12721 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 12722 return (-EINVAL); 12723 break; /* NOTREACHED */ 12724 } 12725 mtx_unlock(&ctl_softc->ctl_lock); 12726 12727 ctl_wakeup_thread(); 12728 12729 return (CTL_RETVAL_COMPLETE); 12730 } 12731 12732 #ifdef CTL_IO_DELAY 12733 static void 12734 ctl_done_timer_wakeup(void *arg) 12735 { 12736 union ctl_io *io; 12737 12738 io = (union ctl_io *)arg; 12739 ctl_done_lock(io, /*have_lock*/ 0); 12740 } 12741 #endif /* CTL_IO_DELAY */ 12742 12743 void 12744 ctl_done_lock(union ctl_io *io, int have_lock) 12745 { 12746 struct ctl_softc *ctl_softc; 12747 #ifndef CTL_DONE_THREAD 12748 union ctl_io *xio; 12749 #endif /* !CTL_DONE_THREAD */ 12750 12751 ctl_softc = control_softc; 12752 12753 if (have_lock == 0) 12754 mtx_lock(&ctl_softc->ctl_lock); 12755 12756 /* 12757 * Enable this to catch duplicate completion issues. 12758 */ 12759 #if 0 12760 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 12761 printf("%s: type %d msg %d cdb %x iptl: " 12762 "%d:%d:%d:%d tag 0x%04x " 12763 "flag %#x status %x\n", 12764 __func__, 12765 io->io_hdr.io_type, 12766 io->io_hdr.msg_type, 12767 io->scsiio.cdb[0], 12768 io->io_hdr.nexus.initid.id, 12769 io->io_hdr.nexus.targ_port, 12770 io->io_hdr.nexus.targ_target.id, 12771 io->io_hdr.nexus.targ_lun, 12772 (io->io_hdr.io_type == 12773 CTL_IO_TASK) ? 12774 io->taskio.tag_num : 12775 io->scsiio.tag_num, 12776 io->io_hdr.flags, 12777 io->io_hdr.status); 12778 } else 12779 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 12780 #endif 12781 12782 /* 12783 * This is an internal copy of an I/O, and should not go through 12784 * the normal done processing logic. 12785 */ 12786 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) { 12787 if (have_lock == 0) 12788 mtx_unlock(&ctl_softc->ctl_lock); 12789 return; 12790 } 12791 12792 /* 12793 * We need to send a msg to the serializing shelf to finish the IO 12794 * as well. We don't send a finish message to the other shelf if 12795 * this is a task management command. Task management commands 12796 * aren't serialized in the OOA queue, but rather just executed on 12797 * both shelf controllers for commands that originated on that 12798 * controller. 12799 */ 12800 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 12801 && (io->io_hdr.io_type != CTL_IO_TASK)) { 12802 union ctl_ha_msg msg_io; 12803 12804 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 12805 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 12806 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 12807 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 12808 } 12809 /* continue on to finish IO */ 12810 } 12811 #ifdef CTL_IO_DELAY 12812 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12813 struct ctl_lun *lun; 12814 12815 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12816 12817 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12818 } else { 12819 struct ctl_lun *lun; 12820 12821 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12822 12823 if ((lun != NULL) 12824 && (lun->delay_info.done_delay > 0)) { 12825 struct callout *callout; 12826 12827 callout = (struct callout *)&io->io_hdr.timer_bytes; 12828 callout_init(callout, /*mpsafe*/ 1); 12829 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12830 callout_reset(callout, 12831 lun->delay_info.done_delay * hz, 12832 ctl_done_timer_wakeup, io); 12833 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 12834 lun->delay_info.done_delay = 0; 12835 if (have_lock == 0) 12836 mtx_unlock(&ctl_softc->ctl_lock); 12837 return; 12838 } 12839 } 12840 #endif /* CTL_IO_DELAY */ 12841 12842 STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links); 12843 12844 #ifdef CTL_DONE_THREAD 12845 if (have_lock == 0) 12846 mtx_unlock(&ctl_softc->ctl_lock); 12847 12848 ctl_wakeup_thread(); 12849 #else /* CTL_DONE_THREAD */ 12850 for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue); 12851 xio != NULL; 12852 xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) { 12853 12854 STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links); 12855 12856 ctl_process_done(xio, /*have_lock*/ 1); 12857 } 12858 if (have_lock == 0) 12859 mtx_unlock(&ctl_softc->ctl_lock); 12860 #endif /* CTL_DONE_THREAD */ 12861 } 12862 12863 void 12864 ctl_done(union ctl_io *io) 12865 { 12866 ctl_done_lock(io, /*have_lock*/ 0); 12867 } 12868 12869 int 12870 ctl_isc(struct ctl_scsiio *ctsio) 12871 { 12872 struct ctl_lun *lun; 12873 int retval; 12874 12875 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12876 12877 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 12878 12879 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 12880 12881 retval = lun->backend->data_submit((union ctl_io *)ctsio); 12882 12883 return (retval); 12884 } 12885 12886 12887 static void 12888 ctl_work_thread(void *arg) 12889 { 12890 struct ctl_softc *softc; 12891 union ctl_io *io; 12892 struct ctl_be_lun *be_lun; 12893 int retval; 12894 12895 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 12896 12897 softc = (struct ctl_softc *)arg; 12898 if (softc == NULL) 12899 return; 12900 12901 mtx_lock(&softc->ctl_lock); 12902 for (;;) { 12903 retval = 0; 12904 12905 /* 12906 * We handle the queues in this order: 12907 * - task management 12908 * - ISC 12909 * - done queue (to free up resources, unblock other commands) 12910 * - RtR queue 12911 * - incoming queue 12912 * 12913 * If those queues are empty, we break out of the loop and 12914 * go to sleep. 12915 */ 12916 io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue); 12917 if (io != NULL) { 12918 ctl_run_task_queue(softc); 12919 continue; 12920 } 12921 io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue); 12922 if (io != NULL) { 12923 STAILQ_REMOVE_HEAD(&softc->isc_queue, links); 12924 ctl_handle_isc(io); 12925 continue; 12926 } 12927 io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue); 12928 if (io != NULL) { 12929 STAILQ_REMOVE_HEAD(&softc->done_queue, links); 12930 /* clear any blocked commands, call fe_done */ 12931 mtx_unlock(&softc->ctl_lock); 12932 /* 12933 * XXX KDM 12934 * Call this without a lock for now. This will 12935 * depend on whether there is any way the FETD can 12936 * sleep or deadlock if called with the CTL lock 12937 * held. 12938 */ 12939 retval = ctl_process_done(io, /*have_lock*/ 0); 12940 mtx_lock(&softc->ctl_lock); 12941 continue; 12942 } 12943 if (!ctl_pause_rtr) { 12944 io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 12945 if (io != NULL) { 12946 STAILQ_REMOVE_HEAD(&softc->rtr_queue, links); 12947 mtx_unlock(&softc->ctl_lock); 12948 goto execute; 12949 } 12950 } 12951 io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue); 12952 if (io != NULL) { 12953 STAILQ_REMOVE_HEAD(&softc->incoming_queue, links); 12954 mtx_unlock(&softc->ctl_lock); 12955 ctl_scsiio_precheck(softc, &io->scsiio); 12956 mtx_lock(&softc->ctl_lock); 12957 continue; 12958 } 12959 /* 12960 * We might want to move this to a separate thread, so that 12961 * configuration requests (in this case LUN creations) 12962 * won't impact the I/O path. 12963 */ 12964 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 12965 if (be_lun != NULL) { 12966 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 12967 mtx_unlock(&softc->ctl_lock); 12968 ctl_create_lun(be_lun); 12969 mtx_lock(&softc->ctl_lock); 12970 continue; 12971 } 12972 12973 /* XXX KDM use the PDROP flag?? */ 12974 /* Sleep until we have something to do. */ 12975 mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "ctl_work", 0); 12976 12977 /* Back to the top of the loop to see what woke us up. */ 12978 continue; 12979 12980 execute: 12981 retval = ctl_scsiio(&io->scsiio); 12982 switch (retval) { 12983 case CTL_RETVAL_COMPLETE: 12984 break; 12985 default: 12986 /* 12987 * Probably need to make sure this doesn't happen. 12988 */ 12989 break; 12990 } 12991 mtx_lock(&softc->ctl_lock); 12992 } 12993 } 12994 12995 void 12996 ctl_wakeup_thread() 12997 { 12998 struct ctl_softc *softc; 12999 13000 softc = control_softc; 13001 13002 wakeup(softc); 13003 } 13004 13005 /* Initialization and failover */ 13006 13007 void 13008 ctl_init_isc_msg(void) 13009 { 13010 printf("CTL: Still calling this thing\n"); 13011 } 13012 13013 /* 13014 * Init component 13015 * Initializes component into configuration defined by bootMode 13016 * (see hasc-sv.c) 13017 * returns hasc_Status: 13018 * OK 13019 * ERROR - fatal error 13020 */ 13021 static ctl_ha_comp_status 13022 ctl_isc_init(struct ctl_ha_component *c) 13023 { 13024 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13025 13026 c->status = ret; 13027 return ret; 13028 } 13029 13030 /* Start component 13031 * Starts component in state requested. If component starts successfully, 13032 * it must set its own state to the requestrd state 13033 * When requested state is HASC_STATE_HA, the component may refine it 13034 * by adding _SLAVE or _MASTER flags. 13035 * Currently allowed state transitions are: 13036 * UNKNOWN->HA - initial startup 13037 * UNKNOWN->SINGLE - initial startup when no parter detected 13038 * HA->SINGLE - failover 13039 * returns ctl_ha_comp_status: 13040 * OK - component successfully started in requested state 13041 * FAILED - could not start the requested state, failover may 13042 * be possible 13043 * ERROR - fatal error detected, no future startup possible 13044 */ 13045 static ctl_ha_comp_status 13046 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13047 { 13048 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13049 13050 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13051 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13052 ctl_is_single = 0; 13053 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13054 != CTL_HA_STATUS_SUCCESS) { 13055 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13056 ret = CTL_HA_COMP_STATUS_ERROR; 13057 } 13058 } else if (CTL_HA_STATE_IS_HA(c->state) 13059 && CTL_HA_STATE_IS_SINGLE(state)){ 13060 // HA->SINGLE transition 13061 ctl_failover(); 13062 ctl_is_single = 1; 13063 } else { 13064 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13065 c->state, state); 13066 ret = CTL_HA_COMP_STATUS_ERROR; 13067 } 13068 if (CTL_HA_STATE_IS_SINGLE(state)) 13069 ctl_is_single = 1; 13070 13071 c->state = state; 13072 c->status = ret; 13073 return ret; 13074 } 13075 13076 /* 13077 * Quiesce component 13078 * The component must clear any error conditions (set status to OK) and 13079 * prepare itself to another Start call 13080 * returns ctl_ha_comp_status: 13081 * OK 13082 * ERROR 13083 */ 13084 static ctl_ha_comp_status 13085 ctl_isc_quiesce(struct ctl_ha_component *c) 13086 { 13087 int ret = CTL_HA_COMP_STATUS_OK; 13088 13089 ctl_pause_rtr = 1; 13090 c->status = ret; 13091 return ret; 13092 } 13093 13094 struct ctl_ha_component ctl_ha_component_ctlisc = 13095 { 13096 .name = "CTL ISC", 13097 .state = CTL_HA_STATE_UNKNOWN, 13098 .init = ctl_isc_init, 13099 .start = ctl_isc_start, 13100 .quiesce = ctl_isc_quiesce 13101 }; 13102 13103 /* 13104 * vim: ts=8 13105 */ 13106