1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #pragma ident "%Z%%M% %I% %E% SMI" 26 27 /* 28 * sf - Solaris Fibre Channel driver 29 * 30 * This module implements some of the Fibre Channel FC-4 layer, converting 31 * from FC frames to SCSI and back. (Note: no sequence management is done 32 * here, though.) 33 */ 34 35 #if defined(lint) && !defined(DEBUG) 36 #define DEBUG 1 37 #endif 38 39 /* 40 * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX 41 * Need to use the ugly RAID LUN mappings in FCP Annex D 42 * to prevent SCSA from barfing. This *REALLY* needs to 43 * be addressed by the standards committee. 44 */ 45 #define RAID_LUNS 1 46 47 #ifdef DEBUG 48 static int sfdebug = 0; 49 #include <sys/debug.h> 50 51 #define SF_DEBUG(level, args) \ 52 if (sfdebug >= (level)) sf_log args 53 #else 54 #define SF_DEBUG(level, args) 55 #endif 56 57 static int sf_bus_config_debug = 0; 58 59 /* Why do I have to do this? */ 60 #define offsetof(s, m) (size_t)(&(((s *)0)->m)) 61 62 #include <sys/scsi/scsi.h> 63 #include <sys/fc4/fcal.h> 64 #include <sys/fc4/fcp.h> 65 #include <sys/fc4/fcal_linkapp.h> 66 #include <sys/socal_cq_defs.h> 67 #include <sys/fc4/fcal_transport.h> 68 #include <sys/fc4/fcio.h> 69 #include <sys/scsi/adapters/sfvar.h> 70 #include <sys/scsi/impl/scsi_reset_notify.h> 71 #include <sys/stat.h> 72 #include <sys/varargs.h> 73 #include <sys/var.h> 74 #include <sys/thread.h> 75 #include <sys/proc.h> 76 #include <sys/kstat.h> 77 #include <sys/devctl.h> 78 #include <sys/scsi/targets/ses.h> 79 #include <sys/callb.h> 80 81 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 82 static int sf_attach(dev_info_t *, ddi_attach_cmd_t); 83 static int sf_detach(dev_info_t *, ddi_detach_cmd_t); 84 static void sf_softstate_unlink(struct sf *); 85 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag, 86 ddi_bus_config_op_t op, void *arg, dev_info_t **childp); 87 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag, 88 ddi_bus_config_op_t op, void *arg); 89 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *, 90 scsi_hba_tran_t *, struct scsi_device *); 91 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *, 92 scsi_hba_tran_t *, struct scsi_device *); 93 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *, 94 int, int, int); 95 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *); 96 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *, 97 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); 98 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *); 99 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *); 100 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *); 101 static int sf_scsi_reset_notify(struct scsi_address *, int, 102 void (*)(caddr_t), caddr_t); 103 static int sf_scsi_get_name(struct scsi_device *, char *, int); 104 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int); 105 static int sf_add_cr_pool(struct sf *); 106 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)()); 107 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *); 108 static void sf_crpool_free(struct sf *); 109 static int sf_kmem_cache_constructor(void *, void *, int); 110 static void sf_kmem_cache_destructor(void *, void *); 111 static void sf_statec_callback(void *, int); 112 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int); 113 static int sf_els_transport(struct sf *, struct sf_els_hdr *); 114 static void sf_els_callback(struct fcal_packet *); 115 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *); 116 static int sf_do_adisc(struct sf *, struct sf_els_hdr *); 117 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *, 118 struct sf_target *); 119 static void sf_reportlun_callback(struct fcal_packet *); 120 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *, 121 struct sf_target *); 122 static void sf_inq_callback(struct fcal_packet *); 123 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int, 124 int, caddr_t *, caddr_t *); 125 static void sf_els_free(struct fcal_packet *); 126 static struct sf_target *sf_create_target(struct sf *, 127 struct sf_els_hdr *, int, int64_t); 128 #ifdef RAID_LUNS 129 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int); 130 #else 131 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t); 132 #endif 133 static void sf_finish_init(struct sf *, int); 134 static void sf_offline_target(struct sf *, struct sf_target *); 135 static void sf_create_devinfo(struct sf *, struct sf_target *, int); 136 static int sf_create_props(dev_info_t *, struct sf_target *, int); 137 static int sf_commoncap(struct scsi_address *, char *, int, int, int); 138 static int sf_getcap(struct scsi_address *, char *, int); 139 static int sf_setcap(struct scsi_address *, char *, int, int); 140 static int sf_abort(struct scsi_address *, struct scsi_pkt *); 141 static int sf_reset(struct scsi_address *, int); 142 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int); 143 static int sf_start(struct scsi_address *, struct scsi_pkt *); 144 static int sf_start_internal(struct sf *, struct sf_pkt *); 145 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *); 146 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *); 147 static int sf_dopoll(struct sf *, struct sf_pkt *); 148 static void sf_cmd_callback(struct fcal_packet *); 149 static void sf_throttle(struct sf *); 150 static void sf_watch(void *); 151 static void sf_throttle_start(struct sf *); 152 static void sf_check_targets(struct sf *); 153 static void sf_check_reset_delay(void *); 154 static int sf_target_timeout(struct sf *, struct sf_pkt *); 155 static void sf_force_lip(struct sf *); 156 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t); 157 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *); 158 /*PRINTFLIKE3*/ 159 static void sf_log(struct sf *, int, const char *, ...); 160 static int sf_kstat_update(kstat_t *, int); 161 static int sf_open(dev_t *, int, int, cred_t *); 162 static int sf_close(dev_t, int, int, cred_t *); 163 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 164 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *); 165 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *, 166 ddi_eventcookie_t *); 167 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *, 168 ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id); 169 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id); 170 static int sf_bus_post_event(dev_info_t *, dev_info_t *, 171 ddi_eventcookie_t, void *); 172 173 static void sf_hp_daemon(void *); 174 175 /* 176 * this is required to be able to supply a control node 177 * where ioctls can be executed 178 */ 179 struct cb_ops sf_cb_ops = { 180 sf_open, /* open */ 181 sf_close, /* close */ 182 nodev, /* strategy */ 183 nodev, /* print */ 184 nodev, /* dump */ 185 nodev, /* read */ 186 nodev, /* write */ 187 sf_ioctl, /* ioctl */ 188 nodev, /* devmap */ 189 nodev, /* mmap */ 190 nodev, /* segmap */ 191 nochpoll, /* poll */ 192 ddi_prop_op, /* cb_prop_op */ 193 0, /* streamtab */ 194 D_MP | D_NEW | D_HOTPLUG /* driver flags */ 195 196 }; 197 198 /* 199 * autoconfiguration routines. 200 */ 201 static struct dev_ops sf_ops = { 202 DEVO_REV, /* devo_rev, */ 203 0, /* refcnt */ 204 sf_info, /* info */ 205 nulldev, /* identify */ 206 nulldev, /* probe */ 207 sf_attach, /* attach */ 208 sf_detach, /* detach */ 209 nodev, /* reset */ 210 &sf_cb_ops, /* driver operations */ 211 NULL, /* bus operations */ 212 NULL /* power management */ 213 }; 214 215 /* to ensure this module gets loaded in memory when we do */ 216 char _depends_on[] = "misc/scsi"; 217 218 #define SF_NAME "FC-AL FCP Nexus Driver" /* Name of the module. */ 219 static char sf_version[] = "%I% %E%"; /* version of the module */ 220 221 static struct modldrv modldrv = { 222 &mod_driverops, /* Type of module. This one is a driver */ 223 SF_NAME "%I%", 224 &sf_ops, /* driver ops */ 225 }; 226 227 static struct modlinkage modlinkage = { 228 MODREV_1, (void *)&modldrv, NULL 229 }; 230 231 /* XXXXXX The following is here to handle broken targets -- remove it later */ 232 static int sf_reportlun_forever = 0; 233 /* XXXXXX */ 234 static int sf_lip_on_plogo = 0; 235 static int sf_els_retries = SF_ELS_RETRIES; 236 static struct sf *sf_head = NULL; 237 static int sf_target_scan_cnt = 4; 238 static int sf_pkt_scan_cnt = 5; 239 static int sf_pool_scan_cnt = 1800; 240 static void *sf_state = NULL; 241 static int sf_watchdog_init = 0; 242 static int sf_watchdog_time = 0; 243 static int sf_watchdog_timeout = 1; 244 static int sf_watchdog_tick; 245 static int sf_watch_running = 0; 246 static timeout_id_t sf_watchdog_id; 247 static timeout_id_t sf_reset_timeout_id; 248 static int sf_max_targets = SF_MAX_TARGETS; 249 static kmutex_t sf_global_mutex; 250 static int sf_core = 0; 251 int *sf_token = NULL; /* Must not be static or lint complains. */ 252 static kcondvar_t sf_watch_cv; 253 extern pri_t minclsyspri; 254 static ddi_eventcookie_t sf_insert_eid; 255 static ddi_eventcookie_t sf_remove_eid; 256 257 static ndi_event_definition_t sf_event_defs[] = { 258 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 }, 259 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 } 260 }; 261 262 #define SF_N_NDI_EVENTS \ 263 (sizeof (sf_event_defs) / sizeof (ndi_event_definition_t)) 264 265 #ifdef DEBUG 266 static int sf_lip_flag = 1; /* bool: to allow LIPs */ 267 static int sf_reset_flag = 1; /* bool: to allow reset after LIP */ 268 static int sf_abort_flag = 0; /* bool: to do just one abort */ 269 #endif 270 271 extern volatile int64_t lbolt64; 272 273 /* 274 * for converting between target number (switch) and hard address/AL_PA 275 */ 276 static uchar_t sf_switch_to_alpa[] = { 277 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6, 278 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca, 279 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5, 280 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9, 281 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97, 282 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79, 283 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b, 284 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56, 285 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a, 286 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35, 287 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 288 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17, 289 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01 290 }; 291 292 static uchar_t sf_alpa_to_switch[] = { 293 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00, 294 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00, 295 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74, 296 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e, 297 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67, 298 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00, 299 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d, 300 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00, 301 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e, 302 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 303 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43, 304 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00, 305 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37, 306 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 307 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 308 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c, 309 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27, 310 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f, 311 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00, 312 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15, 313 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e, 314 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00, 315 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00, 316 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 317 }; 318 319 /* 320 * these macros call the proper transport-layer function given 321 * a particular transport 322 */ 323 #define soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d) 324 #define soc_transport_poll(a, b, c, d)\ 325 (*a->fcal_ops->fcal_transport_poll)(b, c, d) 326 #define soc_get_lilp_map(a, b, c, d, e)\ 327 (*a->fcal_ops->fcal_lilp_map)(b, c, d, e) 328 #define soc_force_lip(a, b, c, d, e)\ 329 (*a->fcal_ops->fcal_force_lip)(b, c, d, e) 330 #define soc_abort(a, b, c, d, e)\ 331 (*a->fcal_ops->fcal_abort_cmd)(b, c, d, e) 332 #define soc_force_reset(a, b, c, d)\ 333 (*a->fcal_ops->fcal_force_reset)(b, c, d) 334 #define soc_add_ulp(a, b, c, d, e, f, g, h)\ 335 (*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h) 336 #define soc_remove_ulp(a, b, c, d, e)\ 337 (*a->fcal_ops->fcal_remove_ulp)(b, c, d, e) 338 #define soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b) 339 340 341 /* power management property defines (should be in a common include file?) */ 342 #define PM_HARDWARE_STATE_PROP "pm-hardware-state" 343 #define PM_NEEDS_SUSPEND_RESUME "needs-suspend-resume" 344 345 346 /* node properties */ 347 #define NODE_WWN_PROP "node-wwn" 348 #define PORT_WWN_PROP "port-wwn" 349 #define LIP_CNT_PROP "lip-count" 350 #define TARGET_PROP "target" 351 #define LUN_PROP "lun" 352 353 354 /* 355 * initialize this driver and install this module 356 */ 357 int 358 _init(void) 359 { 360 int i; 361 362 i = ddi_soft_state_init(&sf_state, sizeof (struct sf), 363 SF_INIT_ITEMS); 364 if (i != 0) 365 return (i); 366 367 if ((i = scsi_hba_init(&modlinkage)) != 0) { 368 ddi_soft_state_fini(&sf_state); 369 return (i); 370 } 371 372 mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL); 373 sf_watch_running = 0; 374 cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL); 375 376 if ((i = mod_install(&modlinkage)) != 0) { 377 mutex_destroy(&sf_global_mutex); 378 cv_destroy(&sf_watch_cv); 379 scsi_hba_fini(&modlinkage); 380 ddi_soft_state_fini(&sf_state); 381 return (i); 382 } 383 384 return (i); 385 } 386 387 388 /* 389 * remove this driver module from the system 390 */ 391 int 392 _fini(void) 393 { 394 int i; 395 396 if ((i = mod_remove(&modlinkage)) == 0) { 397 scsi_hba_fini(&modlinkage); 398 mutex_destroy(&sf_global_mutex); 399 cv_destroy(&sf_watch_cv); 400 ddi_soft_state_fini(&sf_state); 401 } 402 return (i); 403 } 404 405 406 int 407 _info(struct modinfo *modinfop) 408 { 409 return (mod_info(&modlinkage, modinfop)); 410 } 411 412 /* 413 * Given the device number return the devinfo pointer or instance 414 */ 415 /*ARGSUSED*/ 416 static int 417 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 418 { 419 int instance = SF_MINOR2INST(getminor((dev_t)arg)); 420 struct sf *sf; 421 422 switch (infocmd) { 423 case DDI_INFO_DEVT2DEVINFO: 424 sf = ddi_get_soft_state(sf_state, instance); 425 if (sf != NULL) 426 *result = sf->sf_dip; 427 else { 428 *result = NULL; 429 return (DDI_FAILURE); 430 } 431 break; 432 433 case DDI_INFO_DEVT2INSTANCE: 434 *result = (void *)(uintptr_t)instance; 435 break; 436 default: 437 return (DDI_FAILURE); 438 } 439 return (DDI_SUCCESS); 440 } 441 442 /* 443 * either attach or resume this driver 444 */ 445 static int 446 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 447 { 448 int instance; 449 int mutex_initted = FALSE; 450 uint_t ccount; 451 size_t i, real_size; 452 struct fcal_transport *handle; 453 char buf[64]; 454 struct sf *sf, *tsf; 455 scsi_hba_tran_t *tran = NULL; 456 int handle_bound = FALSE; 457 kthread_t *tp; 458 459 460 switch ((int)cmd) { 461 462 case DDI_RESUME: 463 464 /* 465 * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND, 466 * so time to undo that and get going again by forcing a 467 * lip 468 */ 469 470 instance = ddi_get_instance(dip); 471 472 sf = ddi_get_soft_state(sf_state, instance); 473 SF_DEBUG(2, (sf, CE_CONT, 474 "sf_attach: DDI_RESUME for sf%d\n", instance)); 475 if (sf == NULL) { 476 cmn_err(CE_WARN, "sf%d: bad soft state", instance); 477 return (DDI_FAILURE); 478 } 479 480 /* 481 * clear suspended flag so that normal operations can resume 482 */ 483 mutex_enter(&sf->sf_mutex); 484 sf->sf_state &= ~SF_STATE_SUSPENDED; 485 mutex_exit(&sf->sf_mutex); 486 487 /* 488 * force a login by setting our state to offline 489 */ 490 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT; 491 sf->sf_state = SF_STATE_OFFLINE; 492 493 /* 494 * call transport routine to register state change and 495 * ELS callback routines (to register us as a ULP) 496 */ 497 soc_add_ulp(sf->sf_sochandle, sf->sf_socp, 498 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, 499 sf_statec_callback, sf_unsol_els_callback, NULL, sf); 500 501 /* 502 * call transport routine to force loop initialization 503 */ 504 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp, 505 sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP); 506 507 /* 508 * increment watchdog init flag, setting watchdog timeout 509 * if we are the first (since somebody has to do it) 510 */ 511 mutex_enter(&sf_global_mutex); 512 if (!sf_watchdog_init++) { 513 mutex_exit(&sf_global_mutex); 514 sf_watchdog_id = timeout(sf_watch, 515 (caddr_t)0, sf_watchdog_tick); 516 } else { 517 mutex_exit(&sf_global_mutex); 518 } 519 520 return (DDI_SUCCESS); 521 522 case DDI_ATTACH: 523 524 /* 525 * this instance attaching for the first time 526 */ 527 528 instance = ddi_get_instance(dip); 529 530 if (ddi_soft_state_zalloc(sf_state, instance) != 531 DDI_SUCCESS) { 532 cmn_err(CE_WARN, "sf%d: failed to allocate soft state", 533 instance); 534 return (DDI_FAILURE); 535 } 536 537 sf = ddi_get_soft_state(sf_state, instance); 538 SF_DEBUG(4, (sf, CE_CONT, 539 "sf_attach: DDI_ATTACH for sf%d\n", instance)); 540 if (sf == NULL) { 541 /* this shouldn't happen since we just allocated it */ 542 cmn_err(CE_WARN, "sf%d: bad soft state", instance); 543 return (DDI_FAILURE); 544 } 545 546 /* 547 * from this point on, if there's an error, we must de-allocate 548 * soft state before returning DDI_FAILURE 549 */ 550 551 if ((handle = ddi_get_parent_data(dip)) == NULL) { 552 cmn_err(CE_WARN, 553 "sf%d: failed to obtain transport handle", 554 instance); 555 goto fail; 556 } 557 558 /* fill in our soft state structure */ 559 sf->sf_dip = dip; 560 sf->sf_state = SF_STATE_INIT; 561 sf->sf_throttle = handle->fcal_cmdmax; 562 sf->sf_sochandle = handle; 563 sf->sf_socp = handle->fcal_handle; 564 sf->sf_check_n_close = 0; 565 566 /* create a command/response buffer pool for this instance */ 567 if (sf_add_cr_pool(sf) != DDI_SUCCESS) { 568 cmn_err(CE_WARN, 569 "sf%d: failed to allocate command/response pool", 570 instance); 571 goto fail; 572 } 573 574 /* create a a cache for this instance */ 575 (void) sprintf(buf, "sf%d_cache", instance); 576 sf->sf_pkt_cache = kmem_cache_create(buf, 577 sizeof (fcal_packet_t) + sizeof (struct sf_pkt) + 578 scsi_pkt_size(), 8, 579 sf_kmem_cache_constructor, sf_kmem_cache_destructor, 580 NULL, NULL, NULL, 0); 581 if (sf->sf_pkt_cache == NULL) { 582 cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache", 583 instance); 584 goto fail; 585 } 586 587 /* set up a handle and allocate memory for DMA */ 588 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle-> 589 fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf-> 590 sf_lilp_dmahandle) != DDI_SUCCESS) { 591 cmn_err(CE_WARN, 592 "sf%d: failed to allocate dma handle for lilp map", 593 instance); 594 goto fail; 595 } 596 i = sizeof (struct fcal_lilp_map) + 1; 597 if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle, 598 i, sf->sf_sochandle-> 599 fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 600 (caddr_t *)&sf->sf_lilp_map, &real_size, 601 &sf->sf_lilp_acchandle) != DDI_SUCCESS) { 602 cmn_err(CE_WARN, "sf%d: failed to allocate lilp map", 603 instance); 604 goto fail; 605 } 606 if (real_size < i) { 607 /* no error message ??? */ 608 goto fail; /* trouble allocating memory */ 609 } 610 611 /* 612 * set up the address for the DMA transfers (getting a cookie) 613 */ 614 if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL, 615 (caddr_t)sf->sf_lilp_map, real_size, 616 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 617 &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) { 618 cmn_err(CE_WARN, 619 "sf%d: failed to bind dma handle for lilp map", 620 instance); 621 goto fail; 622 } 623 handle_bound = TRUE; 624 /* ensure only one cookie was allocated */ 625 if (ccount != 1) { 626 goto fail; 627 } 628 629 /* ensure LILP map and DMA cookie addresses are even?? */ 630 sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf-> 631 sf_lilp_map + 1) & ~1); 632 sf->sf_lilp_dmacookie.dmac_address = (sf-> 633 sf_lilp_dmacookie.dmac_address + 1) & ~1; 634 635 /* set up all of our mutexes and condition variables */ 636 mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL); 637 mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL); 638 mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL); 639 mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL); 640 cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL); 641 cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL); 642 643 mutex_initted = TRUE; 644 645 /* create our devctl minor node */ 646 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 647 SF_INST2DEVCTL_MINOR(instance), 648 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 649 cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed" 650 " for devctl", instance); 651 goto fail; 652 } 653 654 /* create fc minor node */ 655 if (ddi_create_minor_node(dip, "fc", S_IFCHR, 656 SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT, 657 0) != DDI_SUCCESS) { 658 cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed" 659 " for fc", instance); 660 goto fail; 661 } 662 /* allocate a SCSI transport structure */ 663 tran = scsi_hba_tran_alloc(dip, 0); 664 if (tran == NULL) { 665 /* remove all minor nodes created */ 666 ddi_remove_minor_node(dip, NULL); 667 cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed", 668 instance); 669 goto fail; 670 } 671 672 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */ 673 scsi_size_clean(dip); /* SCSI_SIZE_CLEAN_VERIFY ok */ 674 675 /* save ptr to new transport structure and fill it in */ 676 sf->sf_tran = tran; 677 678 tran->tran_hba_private = sf; 679 tran->tran_tgt_private = NULL; 680 tran->tran_tgt_init = sf_scsi_tgt_init; 681 tran->tran_tgt_probe = NULL; 682 tran->tran_tgt_free = sf_scsi_tgt_free; 683 684 tran->tran_start = sf_start; 685 tran->tran_abort = sf_abort; 686 tran->tran_reset = sf_reset; 687 tran->tran_getcap = sf_getcap; 688 tran->tran_setcap = sf_setcap; 689 tran->tran_init_pkt = sf_scsi_init_pkt; 690 tran->tran_destroy_pkt = sf_scsi_destroy_pkt; 691 tran->tran_dmafree = sf_scsi_dmafree; 692 tran->tran_sync_pkt = sf_scsi_sync_pkt; 693 tran->tran_reset_notify = sf_scsi_reset_notify; 694 695 /* 696 * register event notification routines with scsa 697 */ 698 tran->tran_get_eventcookie = sf_bus_get_eventcookie; 699 tran->tran_add_eventcall = sf_bus_add_eventcall; 700 tran->tran_remove_eventcall = sf_bus_remove_eventcall; 701 tran->tran_post_event = sf_bus_post_event; 702 703 /* 704 * register bus configure/unconfigure 705 */ 706 tran->tran_bus_config = sf_scsi_bus_config; 707 tran->tran_bus_unconfig = sf_scsi_bus_unconfig; 708 709 /* 710 * allocate an ndi event handle 711 */ 712 sf->sf_event_defs = (ndi_event_definition_t *) 713 kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP); 714 715 bcopy(sf_event_defs, sf->sf_event_defs, 716 sizeof (sf_event_defs)); 717 718 (void) ndi_event_alloc_hdl(dip, NULL, 719 &sf->sf_event_hdl, NDI_SLEEP); 720 721 sf->sf_events.ndi_events_version = NDI_EVENTS_REV1; 722 sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS; 723 sf->sf_events.ndi_event_defs = sf->sf_event_defs; 724 725 if (ndi_event_bind_set(sf->sf_event_hdl, 726 &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) { 727 goto fail; 728 } 729 730 tran->tran_get_name = sf_scsi_get_name; 731 tran->tran_get_bus_addr = sf_scsi_get_bus_addr; 732 733 /* setup and attach SCSI hba transport */ 734 if (scsi_hba_attach_setup(dip, sf->sf_sochandle-> 735 fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) { 736 cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed", 737 instance); 738 goto fail; 739 } 740 741 /* set up kstats */ 742 if ((sf->sf_ksp = kstat_create("sf", instance, "statistics", 743 "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats), 744 KSTAT_FLAG_VIRTUAL)) == NULL) { 745 cmn_err(CE_WARN, "sf%d: failed to create kstat", 746 instance); 747 } else { 748 sf->sf_stats.version = 2; 749 (void) sprintf(sf->sf_stats.drvr_name, 750 "%s: %s", SF_NAME, sf_version); 751 sf->sf_ksp->ks_data = (void *)&sf->sf_stats; 752 sf->sf_ksp->ks_private = sf; 753 sf->sf_ksp->ks_update = sf_kstat_update; 754 kstat_install(sf->sf_ksp); 755 } 756 757 /* create the hotplug thread */ 758 mutex_enter(&sf->sf_hp_daemon_mutex); 759 tp = thread_create(NULL, 0, 760 (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri); 761 sf->sf_hp_tid = tp->t_did; 762 mutex_exit(&sf->sf_hp_daemon_mutex); 763 764 /* add this soft state instance to the head of the list */ 765 mutex_enter(&sf_global_mutex); 766 sf->sf_next = sf_head; 767 tsf = sf_head; 768 sf_head = sf; 769 770 /* 771 * find entry in list that has the same FC-AL handle (if any) 772 */ 773 while (tsf != NULL) { 774 if (tsf->sf_socp == sf->sf_socp) { 775 break; /* found matching entry */ 776 } 777 tsf = tsf->sf_next; 778 } 779 780 if (tsf != NULL) { 781 /* if we found a matching entry keep track of it */ 782 sf->sf_sibling = tsf; 783 } 784 785 /* 786 * increment watchdog init flag, setting watchdog timeout 787 * if we are the first (since somebody has to do it) 788 */ 789 if (!sf_watchdog_init++) { 790 mutex_exit(&sf_global_mutex); 791 sf_watchdog_tick = sf_watchdog_timeout * 792 drv_usectohz(1000000); 793 sf_watchdog_id = timeout(sf_watch, 794 NULL, sf_watchdog_tick); 795 } else { 796 mutex_exit(&sf_global_mutex); 797 } 798 799 if (tsf != NULL) { 800 /* 801 * set up matching entry to be our sibling 802 */ 803 mutex_enter(&tsf->sf_mutex); 804 tsf->sf_sibling = sf; 805 mutex_exit(&tsf->sf_mutex); 806 } 807 808 /* 809 * create this property so that PM code knows we want 810 * to be suspended at PM time 811 */ 812 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 813 PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME); 814 815 /* log the fact that we have a new device */ 816 ddi_report_dev(dip); 817 818 /* 819 * force a login by setting our state to offline 820 */ 821 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT; 822 sf->sf_state = SF_STATE_OFFLINE; 823 824 /* 825 * call transport routine to register state change and 826 * ELS callback routines (to register us as a ULP) 827 */ 828 soc_add_ulp(sf->sf_sochandle, sf->sf_socp, 829 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, 830 sf_statec_callback, sf_unsol_els_callback, NULL, sf); 831 832 /* 833 * call transport routine to force loop initialization 834 */ 835 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp, 836 sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP); 837 sf->sf_reset_time = lbolt64; 838 return (DDI_SUCCESS); 839 840 default: 841 return (DDI_FAILURE); 842 } 843 844 fail: 845 cmn_err(CE_WARN, "sf%d: failed to attach", instance); 846 847 /* 848 * Unbind and free event set 849 */ 850 if (sf->sf_event_hdl) { 851 (void) ndi_event_unbind_set(sf->sf_event_hdl, 852 &sf->sf_events, NDI_SLEEP); 853 (void) ndi_event_free_hdl(sf->sf_event_hdl); 854 } 855 856 if (sf->sf_event_defs) { 857 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs)); 858 } 859 860 if (sf->sf_tran != NULL) { 861 scsi_hba_tran_free(sf->sf_tran); 862 } 863 while (sf->sf_cr_pool != NULL) { 864 sf_crpool_free(sf); 865 } 866 if (sf->sf_lilp_dmahandle != NULL) { 867 if (handle_bound) { 868 (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle); 869 } 870 ddi_dma_free_handle(&sf->sf_lilp_dmahandle); 871 } 872 if (sf->sf_pkt_cache != NULL) { 873 kmem_cache_destroy(sf->sf_pkt_cache); 874 } 875 if (sf->sf_lilp_map != NULL) { 876 ddi_dma_mem_free(&sf->sf_lilp_acchandle); 877 } 878 if (sf->sf_ksp != NULL) { 879 kstat_delete(sf->sf_ksp); 880 } 881 if (mutex_initted) { 882 mutex_destroy(&sf->sf_mutex); 883 mutex_destroy(&sf->sf_cmd_mutex); 884 mutex_destroy(&sf->sf_cr_mutex); 885 mutex_destroy(&sf->sf_hp_daemon_mutex); 886 cv_destroy(&sf->sf_cr_cv); 887 cv_destroy(&sf->sf_hp_daemon_cv); 888 } 889 mutex_enter(&sf_global_mutex); 890 891 /* 892 * kill off the watchdog if we are the last instance 893 */ 894 if (!--sf_watchdog_init) { 895 timeout_id_t tid = sf_watchdog_id; 896 mutex_exit(&sf_global_mutex); 897 (void) untimeout(tid); 898 } else { 899 mutex_exit(&sf_global_mutex); 900 } 901 902 ddi_soft_state_free(sf_state, instance); 903 904 if (tran != NULL) { 905 /* remove all minor nodes */ 906 ddi_remove_minor_node(dip, NULL); 907 } 908 909 return (DDI_FAILURE); 910 } 911 912 913 /* ARGSUSED */ 914 static int 915 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 916 { 917 struct sf *sf; 918 int instance; 919 int i; 920 struct sf_target *target; 921 timeout_id_t tid; 922 923 924 925 /* NO OTHER THREADS ARE RUNNING */ 926 927 instance = ddi_get_instance(dip); 928 929 if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) { 930 cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance); 931 return (DDI_FAILURE); 932 } 933 934 switch (cmd) { 935 936 case DDI_SUSPEND: 937 /* 938 * suspend our instance 939 */ 940 941 SF_DEBUG(2, (sf, CE_CONT, 942 "sf_detach: DDI_SUSPEND for sf%d\n", instance)); 943 /* 944 * There is a race condition in socal where while doing 945 * callbacks if a ULP removes it self from the callback list 946 * the for loop in socal may panic as cblist is junk and 947 * while trying to get cblist->next the system will panic. 948 */ 949 950 /* call transport to remove our unregister our callbacks */ 951 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp, 952 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf); 953 954 /* 955 * begin process of clearing outstanding commands 956 * by issuing a lip 957 */ 958 sf_force_lip(sf); 959 960 /* 961 * toggle the device OFFLINE in order to cause 962 * outstanding commands to drain 963 */ 964 mutex_enter(&sf->sf_mutex); 965 sf->sf_lip_cnt++; 966 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT; 967 sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED); 968 for (i = 0; i < sf_max_targets; i++) { 969 target = sf->sf_targets[i]; 970 if (target != NULL) { 971 struct sf_target *ntarget; 972 973 mutex_enter(&target->sft_mutex); 974 if (!(target->sft_state & SF_TARGET_OFFLINE)) { 975 target->sft_state |= 976 (SF_TARGET_BUSY | SF_TARGET_MARK); 977 } 978 /* do this for all LUNs as well */ 979 for (ntarget = target->sft_next_lun; 980 ntarget; 981 ntarget = ntarget->sft_next_lun) { 982 mutex_enter(&ntarget->sft_mutex); 983 if (!(ntarget->sft_state & 984 SF_TARGET_OFFLINE)) { 985 ntarget->sft_state |= 986 (SF_TARGET_BUSY | 987 SF_TARGET_MARK); 988 } 989 mutex_exit(&ntarget->sft_mutex); 990 } 991 mutex_exit(&target->sft_mutex); 992 } 993 } 994 mutex_exit(&sf->sf_mutex); 995 mutex_enter(&sf_global_mutex); 996 997 /* 998 * kill off the watchdog if we are the last instance 999 */ 1000 if (!--sf_watchdog_init) { 1001 tid = sf_watchdog_id; 1002 mutex_exit(&sf_global_mutex); 1003 (void) untimeout(tid); 1004 } else { 1005 mutex_exit(&sf_global_mutex); 1006 } 1007 1008 return (DDI_SUCCESS); 1009 1010 case DDI_DETACH: 1011 /* 1012 * detach this instance 1013 */ 1014 1015 SF_DEBUG(2, (sf, CE_CONT, 1016 "sf_detach: DDI_DETACH for sf%d\n", instance)); 1017 1018 /* remove this "sf" from the list of sf softstates */ 1019 sf_softstate_unlink(sf); 1020 1021 /* 1022 * prior to taking any DDI_DETACH actions, toggle the 1023 * device OFFLINE in order to cause outstanding 1024 * commands to drain 1025 */ 1026 mutex_enter(&sf->sf_mutex); 1027 sf->sf_lip_cnt++; 1028 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT; 1029 sf->sf_state = SF_STATE_OFFLINE; 1030 for (i = 0; i < sf_max_targets; i++) { 1031 target = sf->sf_targets[i]; 1032 if (target != NULL) { 1033 struct sf_target *ntarget; 1034 1035 mutex_enter(&target->sft_mutex); 1036 if (!(target->sft_state & SF_TARGET_OFFLINE)) { 1037 target->sft_state |= 1038 (SF_TARGET_BUSY | SF_TARGET_MARK); 1039 } 1040 for (ntarget = target->sft_next_lun; 1041 ntarget; 1042 ntarget = ntarget->sft_next_lun) { 1043 mutex_enter(&ntarget->sft_mutex); 1044 if (!(ntarget->sft_state & 1045 SF_TARGET_OFFLINE)) { 1046 ntarget->sft_state |= 1047 (SF_TARGET_BUSY | 1048 SF_TARGET_MARK); 1049 } 1050 mutex_exit(&ntarget->sft_mutex); 1051 } 1052 mutex_exit(&target->sft_mutex); 1053 } 1054 } 1055 mutex_exit(&sf->sf_mutex); 1056 1057 /* call transport to remove and unregister our callbacks */ 1058 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp, 1059 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf); 1060 1061 /* 1062 * kill off the watchdog if we are the last instance 1063 */ 1064 mutex_enter(&sf_global_mutex); 1065 if (!--sf_watchdog_init) { 1066 tid = sf_watchdog_id; 1067 mutex_exit(&sf_global_mutex); 1068 (void) untimeout(tid); 1069 } else { 1070 mutex_exit(&sf_global_mutex); 1071 } 1072 1073 /* signal sf_hp_daemon() to exit and wait for exit */ 1074 mutex_enter(&sf->sf_hp_daemon_mutex); 1075 ASSERT(sf->sf_hp_tid); 1076 sf->sf_hp_exit = 1; /* flag exit */ 1077 cv_signal(&sf->sf_hp_daemon_cv); 1078 mutex_exit(&sf->sf_hp_daemon_mutex); 1079 thread_join(sf->sf_hp_tid); /* wait for hotplug to exit */ 1080 1081 /* 1082 * Unbind and free event set 1083 */ 1084 if (sf->sf_event_hdl) { 1085 (void) ndi_event_unbind_set(sf->sf_event_hdl, 1086 &sf->sf_events, NDI_SLEEP); 1087 (void) ndi_event_free_hdl(sf->sf_event_hdl); 1088 } 1089 1090 if (sf->sf_event_defs) { 1091 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs)); 1092 } 1093 1094 /* detach this instance of the HBA driver */ 1095 scsi_hba_detach(dip); 1096 scsi_hba_tran_free(sf->sf_tran); 1097 1098 /* deallocate/unbind DMA handle for lilp map */ 1099 if (sf->sf_lilp_map != NULL) { 1100 (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle); 1101 if (sf->sf_lilp_dmahandle != NULL) { 1102 ddi_dma_free_handle(&sf->sf_lilp_dmahandle); 1103 } 1104 ddi_dma_mem_free(&sf->sf_lilp_acchandle); 1105 } 1106 1107 /* 1108 * the kmem cache must be destroyed before free'ing 1109 * up the crpools 1110 * 1111 * our finagle of "ntot" and "nfree" 1112 * causes an ASSERT failure in "sf_cr_free()" 1113 * if the kmem cache is free'd after invoking 1114 * "sf_crpool_free()". 1115 */ 1116 kmem_cache_destroy(sf->sf_pkt_cache); 1117 1118 SF_DEBUG(2, (sf, CE_CONT, 1119 "sf_detach: sf_crpool_free() for instance 0x%x\n", 1120 instance)); 1121 while (sf->sf_cr_pool != NULL) { 1122 /* 1123 * set ntot to nfree for this particular entry 1124 * 1125 * this causes sf_crpool_free() to update 1126 * the cr_pool list when deallocating this entry 1127 */ 1128 sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree; 1129 sf_crpool_free(sf); 1130 } 1131 1132 /* 1133 * now that the cr_pool's are gone it's safe 1134 * to destroy all softstate mutex's and cv's 1135 */ 1136 mutex_destroy(&sf->sf_mutex); 1137 mutex_destroy(&sf->sf_cmd_mutex); 1138 mutex_destroy(&sf->sf_cr_mutex); 1139 mutex_destroy(&sf->sf_hp_daemon_mutex); 1140 cv_destroy(&sf->sf_cr_cv); 1141 cv_destroy(&sf->sf_hp_daemon_cv); 1142 1143 /* remove all minor nodes from the device tree */ 1144 ddi_remove_minor_node(dip, NULL); 1145 1146 /* remove properties created during attach() */ 1147 ddi_prop_remove_all(dip); 1148 1149 /* remove kstat's if present */ 1150 if (sf->sf_ksp != NULL) { 1151 kstat_delete(sf->sf_ksp); 1152 } 1153 1154 SF_DEBUG(2, (sf, CE_CONT, 1155 "sf_detach: ddi_soft_state_free() for instance 0x%x\n", 1156 instance)); 1157 ddi_soft_state_free(sf_state, instance); 1158 return (DDI_SUCCESS); 1159 1160 default: 1161 SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n", 1162 instance, (int)cmd)); 1163 return (DDI_FAILURE); 1164 } 1165 } 1166 1167 1168 /* 1169 * sf_softstate_unlink() - remove an sf instance from the list of softstates 1170 */ 1171 static void 1172 sf_softstate_unlink(struct sf *sf) 1173 { 1174 struct sf *sf_ptr; 1175 struct sf *sf_found_sibling; 1176 struct sf *sf_reposition = NULL; 1177 1178 1179 mutex_enter(&sf_global_mutex); 1180 while (sf_watch_running) { 1181 /* Busy working the list -- wait */ 1182 cv_wait(&sf_watch_cv, &sf_global_mutex); 1183 } 1184 if ((sf_found_sibling = sf->sf_sibling) != NULL) { 1185 /* 1186 * we have a sibling so NULL out its reference to us 1187 */ 1188 mutex_enter(&sf_found_sibling->sf_mutex); 1189 sf_found_sibling->sf_sibling = NULL; 1190 mutex_exit(&sf_found_sibling->sf_mutex); 1191 } 1192 1193 /* remove our instance from the global list */ 1194 if (sf == sf_head) { 1195 /* we were at at head of the list */ 1196 sf_head = sf->sf_next; 1197 } else { 1198 /* find us in the list */ 1199 for (sf_ptr = sf_head; 1200 sf_ptr != NULL; 1201 sf_ptr = sf_ptr->sf_next) { 1202 if (sf_ptr == sf) { 1203 break; 1204 } 1205 /* remember this place */ 1206 sf_reposition = sf_ptr; 1207 } 1208 ASSERT(sf_ptr == sf); 1209 ASSERT(sf_reposition != NULL); 1210 1211 sf_reposition->sf_next = sf_ptr->sf_next; 1212 } 1213 mutex_exit(&sf_global_mutex); 1214 } 1215 1216 1217 static int 1218 sf_scsi_bus_config(dev_info_t *parent, uint_t flag, 1219 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 1220 { 1221 int64_t reset_delay; 1222 struct sf *sf; 1223 1224 sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent)); 1225 ASSERT(sf); 1226 1227 reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) - 1228 (lbolt64 - sf->sf_reset_time); 1229 if (reset_delay < 0) 1230 reset_delay = 0; 1231 1232 if (sf_bus_config_debug) 1233 flag |= NDI_DEVI_DEBUG; 1234 1235 return (ndi_busop_bus_config(parent, flag, op, 1236 arg, childp, (clock_t)reset_delay)); 1237 } 1238 1239 static int 1240 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag, 1241 ddi_bus_config_op_t op, void *arg) 1242 { 1243 if (sf_bus_config_debug) 1244 flag |= NDI_DEVI_DEBUG; 1245 1246 return (ndi_busop_bus_unconfig(parent, flag, op, arg)); 1247 } 1248 1249 1250 /* 1251 * called by transport to initialize a SCSI target 1252 */ 1253 /* ARGSUSED */ 1254 static int 1255 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1256 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1257 { 1258 #ifdef RAID_LUNS 1259 int lun; 1260 #else 1261 int64_t lun; 1262 #endif 1263 struct sf_target *target; 1264 struct sf *sf = (struct sf *)hba_tran->tran_hba_private; 1265 int i, t_len; 1266 unsigned int lip_cnt; 1267 unsigned char wwn[FC_WWN_SIZE]; 1268 1269 1270 /* get and validate our SCSI target ID */ 1271 i = sd->sd_address.a_target; 1272 if (i >= sf_max_targets) { 1273 return (DDI_NOT_WELL_FORMED); 1274 } 1275 1276 /* get our port WWN property */ 1277 t_len = sizeof (wwn); 1278 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF, 1279 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP, 1280 (caddr_t)&wwn, &t_len) != DDI_SUCCESS) { 1281 /* no port WWN property - ignore the OBP stub node */ 1282 return (DDI_NOT_WELL_FORMED); 1283 } 1284 1285 /* get our LIP count property */ 1286 t_len = sizeof (lip_cnt); 1287 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF, 1288 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP, 1289 (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) { 1290 return (DDI_FAILURE); 1291 } 1292 /* and our LUN property */ 1293 t_len = sizeof (lun); 1294 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF, 1295 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun", 1296 (caddr_t)&lun, &t_len) != DDI_SUCCESS) { 1297 return (DDI_FAILURE); 1298 } 1299 1300 /* find the target structure for this instance */ 1301 mutex_enter(&sf->sf_mutex); 1302 if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) { 1303 mutex_exit(&sf->sf_mutex); 1304 return (DDI_FAILURE); 1305 } 1306 1307 mutex_enter(&target->sft_mutex); 1308 if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state 1309 & SF_TARGET_INIT_DONE)) { 1310 /* 1311 * set links between HBA transport and target structures 1312 * and set done flag 1313 */ 1314 hba_tran->tran_tgt_private = target; 1315 target->sft_tran = hba_tran; 1316 target->sft_state |= SF_TARGET_INIT_DONE; 1317 } else { 1318 /* already initialized ?? */ 1319 mutex_exit(&target->sft_mutex); 1320 mutex_exit(&sf->sf_mutex); 1321 return (DDI_FAILURE); 1322 } 1323 mutex_exit(&target->sft_mutex); 1324 mutex_exit(&sf->sf_mutex); 1325 1326 return (DDI_SUCCESS); 1327 } 1328 1329 1330 /* 1331 * called by transport to free a target 1332 */ 1333 /* ARGSUSED */ 1334 static void 1335 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1336 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1337 { 1338 struct sf_target *target = hba_tran->tran_tgt_private; 1339 1340 if (target != NULL) { 1341 mutex_enter(&target->sft_mutex); 1342 target->sft_tran = NULL; 1343 target->sft_state &= ~SF_TARGET_INIT_DONE; 1344 mutex_exit(&target->sft_mutex); 1345 } 1346 } 1347 1348 1349 /* 1350 * allocator for non-std size cdb/pkt_private/status -- return TRUE iff 1351 * success, else return FALSE 1352 */ 1353 /*ARGSUSED*/ 1354 static int 1355 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd, 1356 int tgtlen, int statuslen, int kf) 1357 { 1358 caddr_t scbp, tgt; 1359 int failure = FALSE; 1360 struct scsi_pkt *pkt = CMD2PKT(cmd); 1361 1362 1363 tgt = scbp = NULL; 1364 1365 if (tgtlen > PKT_PRIV_LEN) { 1366 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) { 1367 failure = TRUE; 1368 } else { 1369 cmd->cmd_flags |= CFLAG_PRIVEXTERN; 1370 pkt->pkt_private = tgt; 1371 } 1372 } 1373 if (statuslen > EXTCMDS_STATUS_SIZE) { 1374 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) { 1375 failure = TRUE; 1376 } else { 1377 cmd->cmd_flags |= CFLAG_SCBEXTERN; 1378 pkt->pkt_scbp = (opaque_t)scbp; 1379 } 1380 } 1381 if (failure) { 1382 sf_pkt_destroy_extern(sf, cmd); 1383 } 1384 return (failure); 1385 } 1386 1387 1388 /* 1389 * deallocator for non-std size cdb/pkt_private/status 1390 */ 1391 static void 1392 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd) 1393 { 1394 struct scsi_pkt *pkt = CMD2PKT(cmd); 1395 1396 if (cmd->cmd_flags & CFLAG_FREE) { 1397 cmn_err(CE_PANIC, 1398 "sf_scsi_impl_pktfree: freeing free packet"); 1399 _NOTE(NOT_REACHED) 1400 /* NOTREACHED */ 1401 } 1402 if (cmd->cmd_flags & CFLAG_SCBEXTERN) { 1403 kmem_free((caddr_t)pkt->pkt_scbp, 1404 (size_t)cmd->cmd_scblen); 1405 } 1406 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) { 1407 kmem_free((caddr_t)pkt->pkt_private, 1408 (size_t)cmd->cmd_privlen); 1409 } 1410 1411 cmd->cmd_flags = CFLAG_FREE; 1412 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd); 1413 } 1414 1415 1416 /* 1417 * create or initialize a SCSI packet -- called internally and 1418 * by the transport 1419 */ 1420 static struct scsi_pkt * 1421 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 1422 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1423 int flags, int (*callback)(), caddr_t arg) 1424 { 1425 int kf; 1426 int failure = FALSE; 1427 struct sf_pkt *cmd; 1428 struct sf *sf = ADDR2SF(ap); 1429 struct sf_target *target = ADDR2TARGET(ap); 1430 struct sf_pkt *new_cmd = NULL; 1431 struct fcal_packet *fpkt; 1432 fc_frame_header_t *hp; 1433 struct fcp_cmd *fcmd; 1434 1435 1436 /* 1437 * If we've already allocated a pkt once, 1438 * this request is for dma allocation only. 1439 */ 1440 if (pkt == NULL) { 1441 1442 /* 1443 * First step of sf_scsi_init_pkt: pkt allocation 1444 */ 1445 if (cmdlen > FCP_CDB_SIZE) { 1446 return (NULL); 1447 } 1448 1449 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP; 1450 1451 if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) { 1452 /* 1453 * Selective zeroing of the pkt. 1454 */ 1455 1456 cmd->cmd_flags = 0; 1457 cmd->cmd_forw = 0; 1458 cmd->cmd_back = 0; 1459 cmd->cmd_next = 0; 1460 cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd + 1461 sizeof (struct sf_pkt) + sizeof (struct 1462 fcal_packet)); 1463 cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd + 1464 sizeof (struct sf_pkt)); 1465 cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd; 1466 cmd->cmd_state = SF_STATE_IDLE; 1467 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd; 1468 cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb; 1469 cmd->cmd_pkt->pkt_comp = NULL; 1470 cmd->cmd_pkt->pkt_flags = 0; 1471 cmd->cmd_pkt->pkt_time = 0; 1472 cmd->cmd_pkt->pkt_resid = 0; 1473 cmd->cmd_pkt->pkt_reason = 0; 1474 cmd->cmd_cdblen = (uchar_t)cmdlen; 1475 cmd->cmd_scblen = statuslen; 1476 cmd->cmd_privlen = tgtlen; 1477 cmd->cmd_pkt->pkt_address = *ap; 1478 1479 /* zero pkt_private */ 1480 (int *)(cmd->cmd_pkt->pkt_private = 1481 cmd->cmd_pkt_private); 1482 bzero((caddr_t)cmd->cmd_pkt->pkt_private, 1483 PKT_PRIV_LEN); 1484 } else { 1485 failure = TRUE; 1486 } 1487 1488 if (failure || 1489 (tgtlen > PKT_PRIV_LEN) || 1490 (statuslen > EXTCMDS_STATUS_SIZE)) { 1491 if (!failure) { 1492 /* need to allocate more space */ 1493 failure = sf_pkt_alloc_extern(sf, cmd, 1494 tgtlen, statuslen, kf); 1495 } 1496 if (failure) { 1497 return (NULL); 1498 } 1499 } 1500 1501 fpkt = cmd->cmd_fp_pkt; 1502 if (cmd->cmd_block == NULL) { 1503 1504 /* allocate cmd/response pool buffers */ 1505 if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) { 1506 sf_pkt_destroy_extern(sf, cmd); 1507 return (NULL); 1508 } 1509 1510 /* fill in the FC-AL packet */ 1511 fpkt->fcal_pkt_cookie = sf->sf_socp; 1512 fpkt->fcal_pkt_comp = sf_cmd_callback; 1513 fpkt->fcal_pkt_flags = 0; 1514 fpkt->fcal_magic = FCALP_MAGIC; 1515 fpkt->fcal_socal_request.sr_soc_hdr.sh_flags = 1516 (ushort_t)(SOC_FC_HEADER | 1517 sf->sf_sochandle->fcal_portno); 1518 fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3; 1519 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1; 1520 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0; 1521 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0; 1522 fpkt->fcal_socal_request.sr_dataseg[0].fc_base = 1523 (uint32_t)cmd->cmd_dmac; 1524 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = 1525 sizeof (struct fcp_cmd); 1526 fpkt->fcal_socal_request.sr_dataseg[1].fc_base = 1527 (uint32_t)cmd->cmd_rsp_dmac; 1528 fpkt->fcal_socal_request.sr_dataseg[1].fc_count = 1529 FCP_MAX_RSP_IU_SIZE; 1530 1531 /* Fill in the Fabric Channel Header */ 1532 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr; 1533 hp->r_ctl = R_CTL_COMMAND; 1534 hp->type = TYPE_SCSI_FCP; 1535 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 1536 hp->reserved1 = 0; 1537 hp->seq_id = 0; 1538 hp->df_ctl = 0; 1539 hp->seq_cnt = 0; 1540 hp->ox_id = 0xffff; 1541 hp->rx_id = 0xffff; 1542 hp->ro = 0; 1543 1544 /* Establish the LUN */ 1545 bcopy((caddr_t)&target->sft_lun.b, 1546 (caddr_t)&cmd->cmd_block->fcp_ent_addr, 1547 FCP_LUN_SIZE); 1548 *((int32_t *)&cmd->cmd_block->fcp_cntl) = 0; 1549 } 1550 cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb; 1551 1552 mutex_enter(&target->sft_pkt_mutex); 1553 1554 target->sft_pkt_tail->cmd_forw = cmd; 1555 cmd->cmd_back = target->sft_pkt_tail; 1556 cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head; 1557 target->sft_pkt_tail = cmd; 1558 1559 mutex_exit(&target->sft_pkt_mutex); 1560 new_cmd = cmd; /* for later cleanup if needed */ 1561 } else { 1562 /* pkt already exists -- just a request for DMA allocation */ 1563 cmd = PKT2CMD(pkt); 1564 fpkt = cmd->cmd_fp_pkt; 1565 } 1566 1567 /* zero cdb (bzero is too slow) */ 1568 bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen); 1569 1570 /* 1571 * Second step of sf_scsi_init_pkt: dma allocation 1572 * Set up dma info 1573 */ 1574 if ((bp != NULL) && (bp->b_bcount != 0)) { 1575 int cmd_flags, dma_flags; 1576 int rval = 0; 1577 uint_t dmacookie_count; 1578 1579 /* there is a buffer and some data to transfer */ 1580 1581 /* set up command and DMA flags */ 1582 cmd_flags = cmd->cmd_flags; 1583 if (bp->b_flags & B_READ) { 1584 /* a read */ 1585 cmd_flags &= ~CFLAG_DMASEND; 1586 dma_flags = DDI_DMA_READ; 1587 } else { 1588 /* a write */ 1589 cmd_flags |= CFLAG_DMASEND; 1590 dma_flags = DDI_DMA_WRITE; 1591 } 1592 if (flags & PKT_CONSISTENT) { 1593 cmd_flags |= CFLAG_CMDIOPB; 1594 dma_flags |= DDI_DMA_CONSISTENT; 1595 } 1596 1597 /* ensure we have a DMA handle */ 1598 if (cmd->cmd_dmahandle == NULL) { 1599 rval = ddi_dma_alloc_handle(sf->sf_dip, 1600 sf->sf_sochandle->fcal_dmaattr, callback, arg, 1601 &cmd->cmd_dmahandle); 1602 } 1603 1604 if (rval == 0) { 1605 /* bind our DMA handle to our buffer */ 1606 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp, 1607 dma_flags, callback, arg, &cmd->cmd_dmacookie, 1608 &dmacookie_count); 1609 } 1610 1611 if (rval != 0) { 1612 /* DMA failure */ 1613 SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n")); 1614 switch (rval) { 1615 case DDI_DMA_NORESOURCES: 1616 bioerror(bp, 0); 1617 break; 1618 case DDI_DMA_BADATTR: 1619 case DDI_DMA_NOMAPPING: 1620 bioerror(bp, EFAULT); 1621 break; 1622 case DDI_DMA_TOOBIG: 1623 default: 1624 bioerror(bp, EINVAL); 1625 break; 1626 } 1627 /* clear valid flag */ 1628 cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID; 1629 if (new_cmd != NULL) { 1630 /* destroy packet if we just created it */ 1631 sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt); 1632 } 1633 return (NULL); 1634 } 1635 1636 ASSERT(dmacookie_count == 1); 1637 /* set up amt to transfer and set valid flag */ 1638 cmd->cmd_dmacount = bp->b_bcount; 1639 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID; 1640 1641 ASSERT(cmd->cmd_dmahandle != NULL); 1642 } 1643 1644 /* set up FC-AL packet */ 1645 fcmd = cmd->cmd_block; 1646 1647 if (cmd->cmd_flags & CFLAG_DMAVALID) { 1648 if (cmd->cmd_flags & CFLAG_DMASEND) { 1649 /* DMA write */ 1650 fcmd->fcp_cntl.cntl_read_data = 0; 1651 fcmd->fcp_cntl.cntl_write_data = 1; 1652 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = 1653 CQ_TYPE_IO_WRITE; 1654 } else { 1655 /* DMA read */ 1656 fcmd->fcp_cntl.cntl_read_data = 1; 1657 fcmd->fcp_cntl.cntl_write_data = 0; 1658 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = 1659 CQ_TYPE_IO_READ; 1660 } 1661 fpkt->fcal_socal_request.sr_dataseg[2].fc_base = 1662 (uint32_t)cmd->cmd_dmacookie.dmac_address; 1663 fpkt->fcal_socal_request.sr_dataseg[2].fc_count = 1664 cmd->cmd_dmacookie.dmac_size; 1665 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3; 1666 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = 1667 cmd->cmd_dmacookie.dmac_size; 1668 fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size; 1669 } else { 1670 /* not a read or write */ 1671 fcmd->fcp_cntl.cntl_read_data = 0; 1672 fcmd->fcp_cntl.cntl_write_data = 0; 1673 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE; 1674 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2; 1675 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = 1676 sizeof (struct fcp_cmd); 1677 fcmd->fcp_data_len = 0; 1678 } 1679 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 1680 1681 return (cmd->cmd_pkt); 1682 } 1683 1684 1685 /* 1686 * destroy a SCSI packet -- called internally and by the transport 1687 */ 1688 static void 1689 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1690 { 1691 struct sf_pkt *cmd = PKT2CMD(pkt); 1692 struct sf *sf = ADDR2SF(ap); 1693 struct sf_target *target = ADDR2TARGET(ap); 1694 struct fcal_packet *fpkt = cmd->cmd_fp_pkt; 1695 1696 1697 if (cmd->cmd_flags & CFLAG_DMAVALID) { 1698 /* DMA was set up -- clean up */ 1699 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle); 1700 cmd->cmd_flags ^= CFLAG_DMAVALID; 1701 } 1702 1703 /* take this packet off the doubly-linked list */ 1704 mutex_enter(&target->sft_pkt_mutex); 1705 cmd->cmd_back->cmd_forw = cmd->cmd_forw; 1706 cmd->cmd_forw->cmd_back = cmd->cmd_back; 1707 mutex_exit(&target->sft_pkt_mutex); 1708 1709 fpkt->fcal_pkt_flags = 0; 1710 /* free the packet */ 1711 if ((cmd->cmd_flags & 1712 (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) { 1713 /* just a regular packet */ 1714 ASSERT(cmd->cmd_state != SF_STATE_ISSUED); 1715 cmd->cmd_flags = CFLAG_FREE; 1716 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd); 1717 } else { 1718 /* a packet with extra memory */ 1719 sf_pkt_destroy_extern(sf, cmd); 1720 } 1721 } 1722 1723 1724 /* 1725 * called by transport to unbind DMA handle 1726 */ 1727 /* ARGSUSED */ 1728 static void 1729 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1730 { 1731 struct sf_pkt *cmd = PKT2CMD(pkt); 1732 1733 1734 if (cmd->cmd_flags & CFLAG_DMAVALID) { 1735 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle); 1736 cmd->cmd_flags ^= CFLAG_DMAVALID; 1737 } 1738 1739 } 1740 1741 1742 /* 1743 * called by transport to synchronize CPU and I/O views of memory 1744 */ 1745 /* ARGSUSED */ 1746 static void 1747 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1748 { 1749 struct sf_pkt *cmd = PKT2CMD(pkt); 1750 1751 1752 if (cmd->cmd_flags & CFLAG_DMAVALID) { 1753 if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0, 1754 (cmd->cmd_flags & CFLAG_DMASEND) ? 1755 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) != 1756 DDI_SUCCESS) { 1757 cmn_err(CE_WARN, "sf: sync pkt failed"); 1758 } 1759 } 1760 } 1761 1762 1763 /* 1764 * routine for reset notification setup, to register or cancel. -- called 1765 * by transport 1766 */ 1767 static int 1768 sf_scsi_reset_notify(struct scsi_address *ap, int flag, 1769 void (*callback)(caddr_t), caddr_t arg) 1770 { 1771 struct sf *sf = ADDR2SF(ap); 1772 1773 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 1774 &sf->sf_mutex, &sf->sf_reset_notify_listf)); 1775 } 1776 1777 1778 /* 1779 * called by transport to get port WWN property (except sun4u) 1780 */ 1781 /* ARGSUSED */ 1782 static int 1783 sf_scsi_get_name(struct scsi_device *sd, char *name, int len) 1784 { 1785 char tbuf[(FC_WWN_SIZE*2)+1]; 1786 unsigned char wwn[FC_WWN_SIZE]; 1787 int i, lun; 1788 dev_info_t *tgt_dip; 1789 1790 tgt_dip = sd->sd_dev; 1791 i = sizeof (wwn); 1792 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF, 1793 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP, 1794 (caddr_t)&wwn, &i) != DDI_SUCCESS) { 1795 name[0] = '\0'; 1796 return (0); 1797 } 1798 i = sizeof (lun); 1799 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF, 1800 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun", 1801 (caddr_t)&lun, &i) != DDI_SUCCESS) { 1802 name[0] = '\0'; 1803 return (0); 1804 } 1805 for (i = 0; i < FC_WWN_SIZE; i++) 1806 (void) sprintf(&tbuf[i << 1], "%02x", wwn[i]); 1807 (void) sprintf(name, "w%s,%x", tbuf, lun); 1808 return (1); 1809 } 1810 1811 1812 /* 1813 * called by transport to get target soft AL-PA (except sun4u) 1814 */ 1815 /* ARGSUSED */ 1816 static int 1817 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 1818 { 1819 struct sf_target *target = ADDR2TARGET(&sd->sd_address); 1820 1821 if (target == NULL) 1822 return (0); 1823 1824 (void) sprintf(name, "%x", target->sft_al_pa); 1825 return (1); 1826 } 1827 1828 1829 /* 1830 * add to the command/response buffer pool for this sf instance 1831 */ 1832 static int 1833 sf_add_cr_pool(struct sf *sf) 1834 { 1835 int cmd_buf_size; 1836 size_t real_cmd_buf_size; 1837 int rsp_buf_size; 1838 size_t real_rsp_buf_size; 1839 uint_t i, ccount; 1840 struct sf_cr_pool *ptr; 1841 struct sf_cr_free_elem *cptr; 1842 caddr_t dptr, eptr; 1843 ddi_dma_cookie_t cmd_cookie; 1844 ddi_dma_cookie_t rsp_cookie; 1845 int cmd_bound = FALSE, rsp_bound = FALSE; 1846 1847 1848 /* allocate room for the pool */ 1849 if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) == 1850 NULL) { 1851 return (DDI_FAILURE); 1852 } 1853 1854 /* allocate a DMA handle for the command pool */ 1855 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr, 1856 DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) { 1857 goto fail; 1858 } 1859 1860 /* 1861 * Get a piece of memory in which to put commands 1862 */ 1863 cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7; 1864 if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size, 1865 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT, 1866 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base, 1867 &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) { 1868 goto fail; 1869 } 1870 1871 /* bind the DMA handle to an address */ 1872 if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL, 1873 ptr->cmd_base, real_cmd_buf_size, 1874 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 1875 NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) { 1876 goto fail; 1877 } 1878 cmd_bound = TRUE; 1879 /* ensure only one cookie was allocated */ 1880 if (ccount != 1) { 1881 goto fail; 1882 } 1883 1884 /* allocate a DMA handle for the response pool */ 1885 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr, 1886 DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) { 1887 goto fail; 1888 } 1889 1890 /* 1891 * Get a piece of memory in which to put responses 1892 */ 1893 rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL; 1894 if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size, 1895 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT, 1896 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base, 1897 &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) { 1898 goto fail; 1899 } 1900 1901 /* bind the DMA handle to an address */ 1902 if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL, 1903 ptr->rsp_base, real_rsp_buf_size, 1904 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 1905 NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) { 1906 goto fail; 1907 } 1908 rsp_bound = TRUE; 1909 /* ensure only one cookie was allocated */ 1910 if (ccount != 1) { 1911 goto fail; 1912 } 1913 1914 /* 1915 * Generate a (cmd/rsp structure) free list 1916 */ 1917 /* ensure ptr points to start of long word (8-byte block) */ 1918 dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7); 1919 /* keep track of actual size after moving pointer */ 1920 real_cmd_buf_size -= (dptr - ptr->cmd_base); 1921 eptr = ptr->rsp_base; 1922 1923 /* set actual total number of entries */ 1924 ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)), 1925 (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE)); 1926 ptr->nfree = ptr->ntot; 1927 ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base; 1928 ptr->sf = sf; 1929 1930 /* set up DMA for each pair of entries */ 1931 i = 0; 1932 while (i < ptr->ntot) { 1933 cptr = (struct sf_cr_free_elem *)dptr; 1934 dptr += sizeof (struct fcp_cmd); 1935 1936 cptr->next = (struct sf_cr_free_elem *)dptr; 1937 cptr->rsp = eptr; 1938 1939 cptr->cmd_dmac = cmd_cookie.dmac_address + 1940 (uint32_t)((caddr_t)cptr - ptr->cmd_base); 1941 1942 cptr->rsp_dmac = rsp_cookie.dmac_address + 1943 (uint32_t)((caddr_t)eptr - ptr->rsp_base); 1944 1945 eptr += FCP_MAX_RSP_IU_SIZE; 1946 i++; 1947 } 1948 1949 /* terminate the list */ 1950 cptr->next = NULL; 1951 1952 /* add this list at front of current one */ 1953 mutex_enter(&sf->sf_cr_mutex); 1954 ptr->next = sf->sf_cr_pool; 1955 sf->sf_cr_pool = ptr; 1956 sf->sf_cr_pool_cnt++; 1957 mutex_exit(&sf->sf_cr_mutex); 1958 1959 return (DDI_SUCCESS); 1960 1961 fail: 1962 /* we failed so clean up */ 1963 if (ptr->cmd_dma_handle != NULL) { 1964 if (cmd_bound) { 1965 (void) ddi_dma_unbind_handle(ptr->cmd_dma_handle); 1966 } 1967 ddi_dma_free_handle(&ptr->cmd_dma_handle); 1968 } 1969 1970 if (ptr->rsp_dma_handle != NULL) { 1971 if (rsp_bound) { 1972 (void) ddi_dma_unbind_handle(ptr->rsp_dma_handle); 1973 } 1974 ddi_dma_free_handle(&ptr->rsp_dma_handle); 1975 } 1976 1977 if (ptr->cmd_base != NULL) { 1978 ddi_dma_mem_free(&ptr->cmd_acc_handle); 1979 } 1980 1981 if (ptr->rsp_base != NULL) { 1982 ddi_dma_mem_free(&ptr->rsp_acc_handle); 1983 } 1984 1985 kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool)); 1986 return (DDI_FAILURE); 1987 } 1988 1989 1990 /* 1991 * allocate a command/response buffer from the pool, allocating more 1992 * in the pool as needed 1993 */ 1994 static int 1995 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)()) 1996 { 1997 struct sf_cr_pool *ptr; 1998 struct sf_cr_free_elem *cptr; 1999 2000 2001 mutex_enter(&sf->sf_cr_mutex); 2002 2003 try_again: 2004 2005 /* find a free buffer in the existing pool */ 2006 ptr = sf->sf_cr_pool; 2007 while (ptr != NULL) { 2008 if (ptr->nfree != 0) { 2009 ptr->nfree--; 2010 break; 2011 } else { 2012 ptr = ptr->next; 2013 } 2014 } 2015 2016 /* did we find a free buffer ? */ 2017 if (ptr != NULL) { 2018 /* we found a free buffer -- take it off the free list */ 2019 cptr = ptr->free; 2020 ptr->free = cptr->next; 2021 mutex_exit(&sf->sf_cr_mutex); 2022 /* set up the command to use the buffer pair */ 2023 cmd->cmd_block = (struct fcp_cmd *)cptr; 2024 cmd->cmd_dmac = cptr->cmd_dmac; 2025 cmd->cmd_rsp_dmac = cptr->rsp_dmac; 2026 cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp; 2027 cmd->cmd_cr_pool = ptr; 2028 return (DDI_SUCCESS); /* success */ 2029 } 2030 2031 /* no free buffer available -- can we allocate more ? */ 2032 if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) { 2033 /* we need to allocate more buffer pairs */ 2034 if (sf->sf_cr_flag) { 2035 /* somebody already allocating for this instance */ 2036 if (func == SLEEP_FUNC) { 2037 /* user wants to wait */ 2038 cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex); 2039 /* we've been woken so go try again */ 2040 goto try_again; 2041 } 2042 /* user does not want to wait */ 2043 mutex_exit(&sf->sf_cr_mutex); 2044 sf->sf_stats.cralloc_failures++; 2045 return (DDI_FAILURE); /* give up */ 2046 } 2047 /* set flag saying we're allocating */ 2048 sf->sf_cr_flag = 1; 2049 mutex_exit(&sf->sf_cr_mutex); 2050 /* add to our pool */ 2051 if (sf_add_cr_pool(sf) != DDI_SUCCESS) { 2052 /* couldn't add to our pool for some reason */ 2053 mutex_enter(&sf->sf_cr_mutex); 2054 sf->sf_cr_flag = 0; 2055 cv_broadcast(&sf->sf_cr_cv); 2056 mutex_exit(&sf->sf_cr_mutex); 2057 sf->sf_stats.cralloc_failures++; 2058 return (DDI_FAILURE); /* give up */ 2059 } 2060 /* 2061 * clear flag saying we're allocating and tell all other 2062 * that care 2063 */ 2064 mutex_enter(&sf->sf_cr_mutex); 2065 sf->sf_cr_flag = 0; 2066 cv_broadcast(&sf->sf_cr_cv); 2067 /* now that we have more buffers try again */ 2068 goto try_again; 2069 } 2070 2071 /* we don't have room to allocate any more buffers */ 2072 mutex_exit(&sf->sf_cr_mutex); 2073 sf->sf_stats.cralloc_failures++; 2074 return (DDI_FAILURE); /* give up */ 2075 } 2076 2077 2078 /* 2079 * free a cmd/response buffer pair in our pool 2080 */ 2081 static void 2082 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd) 2083 { 2084 struct sf *sf = cp->sf; 2085 struct sf_cr_free_elem *elem; 2086 2087 elem = (struct sf_cr_free_elem *)cmd->cmd_block; 2088 elem->rsp = (caddr_t)cmd->cmd_rsp_block; 2089 elem->cmd_dmac = cmd->cmd_dmac; 2090 elem->rsp_dmac = cmd->cmd_rsp_dmac; 2091 2092 mutex_enter(&sf->sf_cr_mutex); 2093 cp->nfree++; 2094 ASSERT(cp->nfree <= cp->ntot); 2095 2096 elem->next = cp->free; 2097 cp->free = elem; 2098 mutex_exit(&sf->sf_cr_mutex); 2099 } 2100 2101 2102 /* 2103 * free our pool of cmd/response buffers 2104 */ 2105 static void 2106 sf_crpool_free(struct sf *sf) 2107 { 2108 struct sf_cr_pool *cp, *prev; 2109 2110 prev = NULL; 2111 mutex_enter(&sf->sf_cr_mutex); 2112 cp = sf->sf_cr_pool; 2113 while (cp != NULL) { 2114 if (cp->nfree == cp->ntot) { 2115 if (prev != NULL) { 2116 prev->next = cp->next; 2117 } else { 2118 sf->sf_cr_pool = cp->next; 2119 } 2120 sf->sf_cr_pool_cnt--; 2121 mutex_exit(&sf->sf_cr_mutex); 2122 2123 (void) ddi_dma_unbind_handle(cp->cmd_dma_handle); 2124 ddi_dma_free_handle(&cp->cmd_dma_handle); 2125 (void) ddi_dma_unbind_handle(cp->rsp_dma_handle); 2126 ddi_dma_free_handle(&cp->rsp_dma_handle); 2127 ddi_dma_mem_free(&cp->cmd_acc_handle); 2128 ddi_dma_mem_free(&cp->rsp_acc_handle); 2129 kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool)); 2130 return; 2131 } 2132 prev = cp; 2133 cp = cp->next; 2134 } 2135 mutex_exit(&sf->sf_cr_mutex); 2136 } 2137 2138 2139 /* ARGSUSED */ 2140 static int 2141 sf_kmem_cache_constructor(void *buf, void *arg, int size) 2142 { 2143 struct sf_pkt *cmd = buf; 2144 2145 mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL); 2146 cmd->cmd_block = NULL; 2147 cmd->cmd_dmahandle = NULL; 2148 return (0); 2149 } 2150 2151 2152 /* ARGSUSED */ 2153 static void 2154 sf_kmem_cache_destructor(void *buf, void *size) 2155 { 2156 struct sf_pkt *cmd = buf; 2157 2158 if (cmd->cmd_dmahandle != NULL) { 2159 ddi_dma_free_handle(&cmd->cmd_dmahandle); 2160 } 2161 2162 if (cmd->cmd_block != NULL) { 2163 sf_cr_free(cmd->cmd_cr_pool, cmd); 2164 } 2165 mutex_destroy(&cmd->cmd_abort_mutex); 2166 } 2167 2168 2169 /* 2170 * called by transport when a state change occurs 2171 */ 2172 static void 2173 sf_statec_callback(void *arg, int msg) 2174 { 2175 struct sf *sf = (struct sf *)arg; 2176 struct sf_target *target; 2177 int i; 2178 struct sf_pkt *cmd; 2179 struct scsi_pkt *pkt; 2180 2181 2182 2183 switch (msg) { 2184 2185 case FCAL_STATUS_LOOP_ONLINE: { 2186 uchar_t al_pa; /* to save AL-PA */ 2187 int ret; /* ret value from getmap */ 2188 int lip_cnt; /* to save current count */ 2189 int cnt; /* map length */ 2190 2191 /* 2192 * the loop has gone online 2193 */ 2194 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n", 2195 ddi_get_instance(sf->sf_dip))); 2196 mutex_enter(&sf->sf_mutex); 2197 sf->sf_lip_cnt++; 2198 sf->sf_state = SF_STATE_ONLINING; 2199 mutex_exit(&sf->sf_mutex); 2200 2201 /* scan each target hash queue */ 2202 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) { 2203 target = sf->sf_wwn_lists[i]; 2204 while (target != NULL) { 2205 /* 2206 * foreach target, if it's not offline then 2207 * mark it as busy 2208 */ 2209 mutex_enter(&target->sft_mutex); 2210 if (!(target->sft_state & SF_TARGET_OFFLINE)) 2211 target->sft_state |= (SF_TARGET_BUSY 2212 | SF_TARGET_MARK); 2213 #ifdef DEBUG 2214 /* 2215 * for debugging, print out info on any 2216 * pending commands (left hanging) 2217 */ 2218 cmd = target->sft_pkt_head; 2219 while (cmd != (struct sf_pkt *)&target-> 2220 sft_pkt_head) { 2221 if (cmd->cmd_state == 2222 SF_STATE_ISSUED) { 2223 SF_DEBUG(1, (sf, CE_CONT, 2224 "cmd 0x%p pending " 2225 "after lip\n", 2226 (void *)cmd->cmd_fp_pkt)); 2227 } 2228 cmd = cmd->cmd_forw; 2229 } 2230 #endif 2231 mutex_exit(&target->sft_mutex); 2232 target = target->sft_next; 2233 } 2234 } 2235 2236 /* 2237 * since the loop has just gone online get a new map from 2238 * the transport 2239 */ 2240 if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp, 2241 sf->sf_sochandle->fcal_portno, (uint32_t)sf-> 2242 sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) { 2243 if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) { 2244 (void) soc_take_core(sf->sf_sochandle, 2245 sf->sf_socp); 2246 sf_core = 0; 2247 } 2248 sf_log(sf, CE_WARN, 2249 "!soc lilp map failed status=0x%x\n", ret); 2250 mutex_enter(&sf->sf_mutex); 2251 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT; 2252 sf->sf_lip_cnt++; 2253 sf->sf_state = SF_STATE_OFFLINE; 2254 mutex_exit(&sf->sf_mutex); 2255 return; 2256 } 2257 2258 /* ensure consistent view of DMA memory */ 2259 (void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0, 2260 DDI_DMA_SYNC_FORKERNEL); 2261 2262 /* how many entries in map ? */ 2263 cnt = sf->sf_lilp_map->lilp_length; 2264 if (cnt >= SF_MAX_LILP_ENTRIES) { 2265 sf_log(sf, CE_WARN, "invalid lilp map\n"); 2266 return; 2267 } 2268 2269 mutex_enter(&sf->sf_mutex); 2270 sf->sf_device_count = cnt - 1; 2271 sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa; 2272 lip_cnt = sf->sf_lip_cnt; 2273 al_pa = sf->sf_al_pa; 2274 2275 SF_DEBUG(1, (sf, CE_CONT, 2276 "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa)); 2277 2278 /* 2279 * since the last entry of the map may be mine (common) check 2280 * for that, and if it is we have one less entry to look at 2281 */ 2282 if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) { 2283 cnt--; 2284 } 2285 /* If we didn't get a valid loop map enable all targets */ 2286 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) { 2287 for (i = 0; i < sizeof (sf_switch_to_alpa); i++) 2288 sf->sf_lilp_map->lilp_alpalist[i] = 2289 sf_switch_to_alpa[i]; 2290 cnt = i; 2291 sf->sf_device_count = cnt - 1; 2292 } 2293 if (sf->sf_device_count == 0) { 2294 sf_finish_init(sf, lip_cnt); 2295 mutex_exit(&sf->sf_mutex); 2296 break; 2297 } 2298 mutex_exit(&sf->sf_mutex); 2299 2300 SF_DEBUG(2, (sf, CE_WARN, 2301 "!statec_callback: starting with %d targets\n", 2302 sf->sf_device_count)); 2303 2304 /* scan loop map, logging into all ports (except mine) */ 2305 for (i = 0; i < cnt; i++) { 2306 SF_DEBUG(1, (sf, CE_CONT, 2307 "!lilp map entry %d = %x,%x\n", i, 2308 sf->sf_lilp_map->lilp_alpalist[i], 2309 sf_alpa_to_switch[ 2310 sf->sf_lilp_map->lilp_alpalist[i]])); 2311 /* is this entry for somebody else ? */ 2312 if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) { 2313 /* do a PLOGI to this port */ 2314 if (!sf_login(sf, LA_ELS_PLOGI, 2315 sf->sf_lilp_map->lilp_alpalist[i], 2316 sf->sf_lilp_map->lilp_alpalist[cnt-1], 2317 lip_cnt)) { 2318 /* a problem logging in */ 2319 mutex_enter(&sf->sf_mutex); 2320 if (lip_cnt == sf->sf_lip_cnt) { 2321 /* 2322 * problem not from a new LIP 2323 */ 2324 sf->sf_device_count--; 2325 ASSERT(sf->sf_device_count 2326 >= 0); 2327 if (sf->sf_device_count == 0) { 2328 sf_finish_init(sf, 2329 lip_cnt); 2330 } 2331 } 2332 mutex_exit(&sf->sf_mutex); 2333 } 2334 } 2335 } 2336 break; 2337 } 2338 2339 case FCAL_STATUS_ERR_OFFLINE: 2340 /* 2341 * loop has gone offline due to an error 2342 */ 2343 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n", 2344 ddi_get_instance(sf->sf_dip))); 2345 mutex_enter(&sf->sf_mutex); 2346 sf->sf_lip_cnt++; 2347 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT; 2348 if (!sf->sf_online_timer) { 2349 sf->sf_online_timer = sf_watchdog_time + 2350 SF_ONLINE_TIMEOUT; 2351 } 2352 /* 2353 * if we are suspended, preserve the SF_STATE_SUSPENDED flag, 2354 * since throttling logic in sf_watch() depends on 2355 * preservation of this flag while device is suspended 2356 */ 2357 if (sf->sf_state & SF_STATE_SUSPENDED) { 2358 sf->sf_state |= SF_STATE_OFFLINE; 2359 SF_DEBUG(1, (sf, CE_CONT, 2360 "sf_statec_callback, sf%d: " 2361 "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n", 2362 ddi_get_instance(sf->sf_dip))); 2363 } else { 2364 sf->sf_state = SF_STATE_OFFLINE; 2365 } 2366 2367 /* scan each possible target on the loop */ 2368 for (i = 0; i < sf_max_targets; i++) { 2369 target = sf->sf_targets[i]; 2370 while (target != NULL) { 2371 mutex_enter(&target->sft_mutex); 2372 if (!(target->sft_state & SF_TARGET_OFFLINE)) 2373 target->sft_state |= (SF_TARGET_BUSY 2374 | SF_TARGET_MARK); 2375 mutex_exit(&target->sft_mutex); 2376 target = target->sft_next_lun; 2377 } 2378 } 2379 mutex_exit(&sf->sf_mutex); 2380 break; 2381 2382 case FCAL_STATE_RESET: { 2383 struct sf_els_hdr *privp; /* ptr to private list */ 2384 struct sf_els_hdr *tmpp1; /* tmp prev hdr ptr */ 2385 struct sf_els_hdr *tmpp2; /* tmp next hdr ptr */ 2386 struct sf_els_hdr *head; /* to save our private list */ 2387 struct fcal_packet *fpkt; /* ptr to pkt in hdr */ 2388 2389 /* 2390 * a transport reset 2391 */ 2392 SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n", 2393 ddi_get_instance(sf->sf_dip))); 2394 tmpp1 = head = NULL; 2395 mutex_enter(&sf->sf_mutex); 2396 sf->sf_lip_cnt++; 2397 sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT; 2398 /* 2399 * if we are suspended, preserve the SF_STATE_SUSPENDED flag, 2400 * since throttling logic in sf_watch() depends on 2401 * preservation of this flag while device is suspended 2402 */ 2403 if (sf->sf_state & SF_STATE_SUSPENDED) { 2404 sf->sf_state |= SF_STATE_OFFLINE; 2405 SF_DEBUG(1, (sf, CE_CONT, 2406 "sf_statec_callback, sf%d: " 2407 "got FCAL_STATE_RESET during DDI_SUSPEND\n", 2408 ddi_get_instance(sf->sf_dip))); 2409 } else { 2410 sf->sf_state = SF_STATE_OFFLINE; 2411 } 2412 2413 /* 2414 * scan each possible target on the loop, looking for targets 2415 * that need callbacks ran 2416 */ 2417 for (i = 0; i < sf_max_targets; i++) { 2418 target = sf->sf_targets[i]; 2419 while (target != NULL) { 2420 if (!(target->sft_state & SF_TARGET_OFFLINE)) { 2421 target->sft_state |= (SF_TARGET_BUSY 2422 | SF_TARGET_MARK); 2423 mutex_exit(&sf->sf_mutex); 2424 /* 2425 * run remove event callbacks for lun 2426 * 2427 * We have a nasty race condition here 2428 * 'cause we're dropping this mutex to 2429 * run the callback and expect the 2430 * linked list to be the same. 2431 */ 2432 (void) ndi_event_retrieve_cookie( 2433 sf->sf_event_hdl, target->sft_dip, 2434 FCAL_REMOVE_EVENT, &sf_remove_eid, 2435 NDI_EVENT_NOPASS); 2436 (void) ndi_event_run_callbacks( 2437 sf->sf_event_hdl, 2438 target->sft_dip, 2439 sf_remove_eid, NULL); 2440 mutex_enter(&sf->sf_mutex); 2441 } 2442 target = target->sft_next_lun; 2443 } 2444 } 2445 2446 /* 2447 * scan for ELS commands that are in transport, not complete, 2448 * and have a valid timeout, building a private list 2449 */ 2450 privp = sf->sf_els_list; 2451 while (privp != NULL) { 2452 fpkt = privp->fpkt; 2453 if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) && 2454 (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) && 2455 (privp->timeout != SF_INVALID_TIMEOUT)) { 2456 /* 2457 * cmd in transport && not complete && 2458 * timeout valid 2459 * 2460 * move this entry from ELS input list to our 2461 * private list 2462 */ 2463 2464 tmpp2 = privp->next; /* save ptr to next */ 2465 2466 /* push this on private list head */ 2467 privp->next = head; 2468 head = privp; 2469 2470 /* remove this entry from input list */ 2471 if (tmpp1 != NULL) { 2472 /* 2473 * remove this entry from somewhere in 2474 * the middle of the list 2475 */ 2476 tmpp1->next = tmpp2; 2477 if (tmpp2 != NULL) { 2478 tmpp2->prev = tmpp1; 2479 } 2480 } else { 2481 /* 2482 * remove this entry from the head 2483 * of the list 2484 */ 2485 sf->sf_els_list = tmpp2; 2486 if (tmpp2 != NULL) { 2487 tmpp2->prev = NULL; 2488 } 2489 } 2490 privp = tmpp2; /* skip to next entry */ 2491 } else { 2492 tmpp1 = privp; /* save ptr to prev entry */ 2493 privp = privp->next; /* skip to next entry */ 2494 } 2495 } 2496 2497 mutex_exit(&sf->sf_mutex); 2498 2499 /* 2500 * foreach cmd in our list free the ELS packet associated 2501 * with it 2502 */ 2503 privp = head; 2504 while (privp != NULL) { 2505 fpkt = privp->fpkt; 2506 privp = privp->next; 2507 sf_els_free(fpkt); 2508 } 2509 2510 /* 2511 * scan for commands from each possible target 2512 */ 2513 for (i = 0; i < sf_max_targets; i++) { 2514 target = sf->sf_targets[i]; 2515 while (target != NULL) { 2516 /* 2517 * scan all active commands for this target, 2518 * looking for commands that have been issued, 2519 * are in transport, and are not yet complete 2520 * (so we can terminate them because of the 2521 * reset) 2522 */ 2523 mutex_enter(&target->sft_pkt_mutex); 2524 cmd = target->sft_pkt_head; 2525 while (cmd != (struct sf_pkt *)&target-> 2526 sft_pkt_head) { 2527 fpkt = cmd->cmd_fp_pkt; 2528 mutex_enter(&cmd->cmd_abort_mutex); 2529 if ((cmd->cmd_state == 2530 SF_STATE_ISSUED) && 2531 (fpkt->fcal_cmd_state & 2532 FCAL_CMD_IN_TRANSPORT) && 2533 (!(fpkt->fcal_cmd_state & 2534 FCAL_CMD_COMPLETE))) { 2535 /* a command to be reset */ 2536 pkt = cmd->cmd_pkt; 2537 pkt->pkt_reason = CMD_RESET; 2538 pkt->pkt_statistics |= 2539 STAT_BUS_RESET; 2540 cmd->cmd_state = SF_STATE_IDLE; 2541 mutex_exit(&cmd-> 2542 cmd_abort_mutex); 2543 mutex_exit(&target-> 2544 sft_pkt_mutex); 2545 if (pkt->pkt_comp != NULL) { 2546 (*pkt->pkt_comp)(pkt); 2547 } 2548 mutex_enter(&target-> 2549 sft_pkt_mutex); 2550 cmd = target->sft_pkt_head; 2551 } else { 2552 mutex_exit(&cmd-> 2553 cmd_abort_mutex); 2554 /* get next command */ 2555 cmd = cmd->cmd_forw; 2556 } 2557 } 2558 mutex_exit(&target->sft_pkt_mutex); 2559 target = target->sft_next_lun; 2560 } 2561 } 2562 2563 /* 2564 * get packet queue for this target, resetting all remaining 2565 * commands 2566 */ 2567 mutex_enter(&sf->sf_mutex); 2568 cmd = sf->sf_pkt_head; 2569 sf->sf_pkt_head = NULL; 2570 mutex_exit(&sf->sf_mutex); 2571 2572 while (cmd != NULL) { 2573 pkt = cmd->cmd_pkt; 2574 cmd = cmd->cmd_next; 2575 pkt->pkt_reason = CMD_RESET; 2576 pkt->pkt_statistics |= STAT_BUS_RESET; 2577 if (pkt->pkt_comp != NULL) { 2578 (*pkt->pkt_comp)(pkt); 2579 } 2580 } 2581 break; 2582 } 2583 2584 default: 2585 break; 2586 } 2587 } 2588 2589 2590 /* 2591 * called to send a PLOGI (N_port login) ELS request to a destination ID, 2592 * returning TRUE upon success, else returning FALSE 2593 */ 2594 static int 2595 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1, 2596 int lip_cnt) 2597 { 2598 struct la_els_logi *logi; 2599 struct sf_els_hdr *privp; 2600 2601 2602 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr), 2603 sizeof (union sf_els_cmd), sizeof (union sf_els_rsp), 2604 (caddr_t *)&privp, (caddr_t *)&logi) == NULL) { 2605 sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x " 2606 "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]); 2607 return (FALSE); 2608 } 2609 2610 privp->lip_cnt = lip_cnt; 2611 if (els_code == LA_ELS_PLOGI) { 2612 bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms, 2613 (caddr_t)&logi->common_service, sizeof (struct la_els_logi) 2614 - 4); 2615 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn, 2616 (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t)); 2617 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn, 2618 (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t)); 2619 bzero((caddr_t)&logi->reserved, 16); 2620 } else if (els_code == LA_ELS_LOGO) { 2621 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn, 2622 (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8); 2623 ((struct la_els_logo *)logi)->reserved = 0; 2624 ((struct la_els_logo *)logi)->nport_id[0] = 0; 2625 ((struct la_els_logo *)logi)->nport_id[1] = 0; 2626 ((struct la_els_logo *)logi)->nport_id[2] = arg1; 2627 } 2628 2629 privp->els_code = els_code; 2630 logi->ls_code = els_code; 2631 logi->mbz[0] = 0; 2632 logi->mbz[1] = 0; 2633 logi->mbz[2] = 0; 2634 2635 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT; 2636 return (sf_els_transport(sf, privp)); 2637 } 2638 2639 2640 /* 2641 * send an ELS IU via the transport, 2642 * returning TRUE upon success, else returning FALSE 2643 */ 2644 static int 2645 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp) 2646 { 2647 struct fcal_packet *fpkt = privp->fpkt; 2648 2649 2650 (void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0, 2651 DDI_DMA_SYNC_FORDEV); 2652 privp->prev = NULL; 2653 mutex_enter(&sf->sf_mutex); 2654 privp->next = sf->sf_els_list; 2655 if (sf->sf_els_list != NULL) { 2656 sf->sf_els_list->prev = privp; 2657 } 2658 sf->sf_els_list = privp; 2659 mutex_exit(&sf->sf_mutex); 2660 2661 /* call the transport to send a packet */ 2662 if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP, 2663 CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) { 2664 mutex_enter(&sf->sf_mutex); 2665 if (privp->prev != NULL) { 2666 privp->prev->next = privp->next; 2667 } 2668 if (privp->next != NULL) { 2669 privp->next->prev = privp->prev; 2670 } 2671 if (sf->sf_els_list == privp) { 2672 sf->sf_els_list = privp->next; 2673 } 2674 mutex_exit(&sf->sf_mutex); 2675 sf_els_free(fpkt); 2676 return (FALSE); /* failure */ 2677 } 2678 return (TRUE); /* success */ 2679 } 2680 2681 2682 /* 2683 * called as the pkt_comp routine for ELS FC packets 2684 */ 2685 static void 2686 sf_els_callback(struct fcal_packet *fpkt) 2687 { 2688 struct sf_els_hdr *privp = fpkt->fcal_pkt_private; 2689 struct sf *sf = privp->sf; 2690 struct sf *tsf; 2691 int tgt_id; 2692 struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp; 2693 struct la_els_adisc *adisc = (struct la_els_adisc *)ptr; 2694 struct sf_target *target; 2695 short ncmds; 2696 short free_pkt = TRUE; 2697 2698 2699 /* 2700 * we've received an ELS callback, i.e. an ELS packet has arrived 2701 */ 2702 2703 /* take the current packet off of the queue */ 2704 mutex_enter(&sf->sf_mutex); 2705 if (privp->timeout == SF_INVALID_TIMEOUT) { 2706 mutex_exit(&sf->sf_mutex); 2707 return; 2708 } 2709 if (privp->prev != NULL) { 2710 privp->prev->next = privp->next; 2711 } 2712 if (privp->next != NULL) { 2713 privp->next->prev = privp->prev; 2714 } 2715 if (sf->sf_els_list == privp) { 2716 sf->sf_els_list = privp->next; 2717 } 2718 privp->prev = privp->next = NULL; 2719 mutex_exit(&sf->sf_mutex); 2720 2721 /* get # pkts in this callback */ 2722 ncmds = fpkt->fcal_ncmds; 2723 ASSERT(ncmds >= 0); 2724 mutex_enter(&sf->sf_cmd_mutex); 2725 sf->sf_ncmds = ncmds; 2726 mutex_exit(&sf->sf_cmd_mutex); 2727 2728 /* sync idea of memory */ 2729 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0, 2730 DDI_DMA_SYNC_FORKERNEL); 2731 2732 /* was this an OK ACC msg ?? */ 2733 if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) && 2734 (ptr->ls_code == LA_ELS_ACC)) { 2735 2736 /* 2737 * this was an OK ACC pkt 2738 */ 2739 2740 switch (privp->els_code) { 2741 case LA_ELS_PLOGI: 2742 /* 2743 * was able to to an N_port login 2744 */ 2745 SF_DEBUG(2, (sf, CE_CONT, 2746 "!PLOGI to al_pa %x succeeded, wwn %x%x\n", 2747 privp->dest_nport_id, 2748 *((int *)&ptr->nport_ww_name.raw_wwn[0]), 2749 *((int *)&ptr->nport_ww_name.raw_wwn[4]))); 2750 /* try to do a process login */ 2751 if (!sf_do_prli(sf, privp, ptr)) { 2752 free_pkt = FALSE; 2753 goto fail; /* PRLI failed */ 2754 } 2755 break; 2756 case LA_ELS_PRLI: 2757 /* 2758 * was able to do a process login 2759 */ 2760 SF_DEBUG(2, (sf, CE_CONT, 2761 "!PRLI to al_pa %x succeeded\n", 2762 privp->dest_nport_id)); 2763 /* try to do address discovery */ 2764 if (sf_do_adisc(sf, privp) != 1) { 2765 free_pkt = FALSE; 2766 goto fail; /* ADISC failed */ 2767 } 2768 break; 2769 case LA_ELS_ADISC: 2770 /* 2771 * found a target via ADISC 2772 */ 2773 2774 SF_DEBUG(2, (sf, CE_CONT, 2775 "!ADISC to al_pa %x succeeded\n", 2776 privp->dest_nport_id)); 2777 2778 /* create the target info */ 2779 if ((target = sf_create_target(sf, privp, 2780 sf_alpa_to_switch[(uchar_t)adisc->hard_address], 2781 (int64_t)0)) 2782 == NULL) { 2783 goto fail; /* can't create target */ 2784 } 2785 2786 /* 2787 * ensure address discovered matches what we thought 2788 * it would be 2789 */ 2790 if ((uchar_t)adisc->hard_address != 2791 privp->dest_nport_id) { 2792 sf_log(sf, CE_WARN, 2793 "target 0x%x, AL-PA 0x%x and " 2794 "hard address 0x%x don't match\n", 2795 sf_alpa_to_switch[ 2796 (uchar_t)privp->dest_nport_id], 2797 privp->dest_nport_id, 2798 (uchar_t)adisc->hard_address); 2799 mutex_enter(&sf->sf_mutex); 2800 sf_offline_target(sf, target); 2801 mutex_exit(&sf->sf_mutex); 2802 goto fail; /* addr doesn't match */ 2803 } 2804 /* 2805 * get inquiry data from the target 2806 */ 2807 if (!sf_do_reportlun(sf, privp, target)) { 2808 mutex_enter(&sf->sf_mutex); 2809 sf_offline_target(sf, target); 2810 mutex_exit(&sf->sf_mutex); 2811 free_pkt = FALSE; 2812 goto fail; /* inquiry failed */ 2813 } 2814 break; 2815 default: 2816 SF_DEBUG(2, (sf, CE_CONT, 2817 "!ELS %x to al_pa %x succeeded\n", 2818 privp->els_code, privp->dest_nport_id)); 2819 sf_els_free(fpkt); 2820 break; 2821 } 2822 2823 } else { 2824 2825 /* 2826 * oh oh -- this was not an OK ACC packet 2827 */ 2828 2829 /* get target ID from dest loop address */ 2830 tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id]; 2831 2832 /* keep track of failures */ 2833 sf->sf_stats.tstats[tgt_id].els_failures++; 2834 if (++(privp->retries) < sf_els_retries && 2835 fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) { 2836 if (fpkt->fcal_pkt_status == 2837 FCAL_STATUS_MAX_XCHG_EXCEEDED) { 2838 tsf = sf->sf_sibling; 2839 if (tsf != NULL) { 2840 mutex_enter(&tsf->sf_cmd_mutex); 2841 tsf->sf_flag = 1; 2842 tsf->sf_throttle = SF_DECR_DELTA; 2843 mutex_exit(&tsf->sf_cmd_mutex); 2844 } 2845 } 2846 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT; 2847 privp->prev = NULL; 2848 2849 mutex_enter(&sf->sf_mutex); 2850 2851 if (privp->lip_cnt == sf->sf_lip_cnt) { 2852 SF_DEBUG(1, (sf, CE_WARN, 2853 "!ELS %x to al_pa %x failed, retrying", 2854 privp->els_code, privp->dest_nport_id)); 2855 privp->next = sf->sf_els_list; 2856 if (sf->sf_els_list != NULL) { 2857 sf->sf_els_list->prev = privp; 2858 } 2859 2860 sf->sf_els_list = privp; 2861 2862 mutex_exit(&sf->sf_mutex); 2863 /* device busy? wait a bit ... */ 2864 if (fpkt->fcal_pkt_status == 2865 FCAL_STATUS_MAX_XCHG_EXCEEDED) { 2866 privp->delayed_retry = 1; 2867 return; 2868 } 2869 /* call the transport to send a pkt */ 2870 if (soc_transport(sf->sf_sochandle, fpkt, 2871 FCAL_NOSLEEP, CQ_REQUEST_1) != 2872 FCAL_TRANSPORT_SUCCESS) { 2873 mutex_enter(&sf->sf_mutex); 2874 if (privp->prev != NULL) { 2875 privp->prev->next = 2876 privp->next; 2877 } 2878 if (privp->next != NULL) { 2879 privp->next->prev = 2880 privp->prev; 2881 } 2882 if (sf->sf_els_list == privp) { 2883 sf->sf_els_list = privp->next; 2884 } 2885 mutex_exit(&sf->sf_mutex); 2886 goto fail; 2887 } else 2888 return; 2889 } else { 2890 mutex_exit(&sf->sf_mutex); 2891 goto fail; 2892 } 2893 } else { 2894 #ifdef DEBUG 2895 if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) { 2896 SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed", 2897 privp->els_code, privp->dest_nport_id)); 2898 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) { 2899 SF_DEBUG(2, (sf, CE_NOTE, 2900 "els reply code = %x", ptr->ls_code)); 2901 if (ptr->ls_code == LA_ELS_RJT) 2902 SF_DEBUG(1, (sf, CE_CONT, 2903 "LS_RJT reason = %x\n", 2904 *(((uint_t *)ptr) + 1))); 2905 } else 2906 SF_DEBUG(2, (sf, CE_NOTE, 2907 "fc packet status = %x", 2908 fpkt->fcal_pkt_status)); 2909 } 2910 #endif 2911 goto fail; 2912 } 2913 } 2914 return; /* success */ 2915 fail: 2916 mutex_enter(&sf->sf_mutex); 2917 if (sf->sf_lip_cnt == privp->lip_cnt) { 2918 sf->sf_device_count--; 2919 ASSERT(sf->sf_device_count >= 0); 2920 if (sf->sf_device_count == 0) { 2921 sf_finish_init(sf, privp->lip_cnt); 2922 } 2923 } 2924 mutex_exit(&sf->sf_mutex); 2925 if (free_pkt) { 2926 sf_els_free(fpkt); 2927 } 2928 } 2929 2930 2931 /* 2932 * send a PRLI (process login) ELS IU via the transport, 2933 * returning TRUE upon success, else returning FALSE 2934 */ 2935 static int 2936 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr) 2937 { 2938 struct la_els_prli *prli = (struct la_els_prli *)privp->cmd; 2939 struct fcp_prli *fprli; 2940 struct fcal_packet *fpkt = privp->fpkt; 2941 2942 2943 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = 2944 sizeof (struct la_els_prli); 2945 privp->els_code = LA_ELS_PRLI; 2946 fprli = (struct fcp_prli *)prli->service_params; 2947 prli->ls_code = LA_ELS_PRLI; 2948 prli->page_length = 0x10; 2949 prli->payload_length = sizeof (struct la_els_prli); 2950 fprli->type = 0x08; /* no define here? */ 2951 fprli->resvd1 = 0; 2952 fprli->orig_process_assoc_valid = 0; 2953 fprli->resp_process_assoc_valid = 0; 2954 fprli->establish_image_pair = 1; 2955 fprli->resvd2 = 0; 2956 fprli->resvd3 = 0; 2957 fprli->data_overlay_allowed = 0; 2958 fprli->initiator_fn = 1; 2959 fprli->target_fn = 0; 2960 fprli->cmd_data_mixed = 0; 2961 fprli->data_resp_mixed = 0; 2962 fprli->read_xfer_rdy_disabled = 1; 2963 fprli->write_xfer_rdy_disabled = 0; 2964 2965 bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn, 2966 sizeof (privp->port_wwn)); 2967 bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn, 2968 sizeof (privp->node_wwn)); 2969 2970 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT; 2971 return (sf_els_transport(sf, privp)); 2972 } 2973 2974 2975 /* 2976 * send an ADISC (address discovery) ELS IU via the transport, 2977 * returning TRUE upon success, else returning FALSE 2978 */ 2979 static int 2980 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp) 2981 { 2982 struct la_els_adisc *adisc = (struct la_els_adisc *)privp->cmd; 2983 struct fcal_packet *fpkt = privp->fpkt; 2984 2985 privp->els_code = LA_ELS_ADISC; 2986 adisc->ls_code = LA_ELS_ADISC; 2987 adisc->mbz[0] = 0; 2988 adisc->mbz[1] = 0; 2989 adisc->mbz[2] = 0; 2990 adisc->hard_address = 0; /* ??? */ 2991 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = 2992 sizeof (struct la_els_adisc); 2993 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn, 2994 (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn)); 2995 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn, 2996 (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn)); 2997 adisc->nport_id = sf->sf_al_pa; 2998 2999 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT; 3000 return (sf_els_transport(sf, privp)); 3001 } 3002 3003 3004 static struct fcal_packet * 3005 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size, 3006 int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf) 3007 { 3008 struct fcal_packet *fpkt; 3009 ddi_dma_cookie_t pcookie; 3010 ddi_dma_cookie_t rcookie; 3011 struct sf_els_hdr *privp; 3012 ddi_dma_handle_t cmd_dma_handle = NULL; 3013 ddi_dma_handle_t rsp_dma_handle = NULL; 3014 ddi_acc_handle_t cmd_acc_handle = NULL; 3015 ddi_acc_handle_t rsp_acc_handle = NULL; 3016 size_t real_size; 3017 uint_t ccount; 3018 fc_frame_header_t *hp; 3019 int cmd_bound = FALSE, rsp_bound = FALSE; 3020 caddr_t cmd = NULL; 3021 caddr_t rsp = NULL; 3022 3023 if ((fpkt = (struct fcal_packet *)kmem_zalloc( 3024 sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) { 3025 SF_DEBUG(1, (sf, CE_WARN, 3026 "Could not allocate fcal_packet for ELS\n")); 3027 return (NULL); 3028 } 3029 3030 if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size, 3031 KM_NOSLEEP)) == NULL) { 3032 SF_DEBUG(1, (sf, CE_WARN, 3033 "Could not allocate sf_els_hdr for ELS\n")); 3034 goto fail; 3035 } 3036 3037 privp->size = priv_size; 3038 fpkt->fcal_pkt_private = (caddr_t)privp; 3039 3040 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr, 3041 DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) { 3042 SF_DEBUG(1, (sf, CE_WARN, 3043 "Could not allocate DMA handle for ELS\n")); 3044 goto fail; 3045 } 3046 3047 if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size, 3048 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT, 3049 DDI_DMA_DONTWAIT, NULL, &cmd, 3050 &real_size, &cmd_acc_handle) != DDI_SUCCESS) { 3051 SF_DEBUG(1, (sf, CE_WARN, 3052 "Could not allocate DMA memory for ELS\n")); 3053 goto fail; 3054 } 3055 3056 if (real_size < cmd_size) { 3057 SF_DEBUG(1, (sf, CE_WARN, 3058 "DMA memory too small for ELS\n")); 3059 goto fail; 3060 } 3061 3062 if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL, 3063 cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT, 3064 DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) { 3065 SF_DEBUG(1, (sf, CE_WARN, 3066 "Could not bind DMA memory for ELS\n")); 3067 goto fail; 3068 } 3069 cmd_bound = TRUE; 3070 3071 if (ccount != 1) { 3072 SF_DEBUG(1, (sf, CE_WARN, 3073 "Wrong cookie count for ELS\n")); 3074 goto fail; 3075 } 3076 3077 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr, 3078 DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) { 3079 SF_DEBUG(1, (sf, CE_WARN, 3080 "Could not allocate DMA handle for ELS rsp\n")); 3081 goto fail; 3082 } 3083 if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size, 3084 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT, 3085 DDI_DMA_DONTWAIT, NULL, &rsp, 3086 &real_size, &rsp_acc_handle) != DDI_SUCCESS) { 3087 SF_DEBUG(1, (sf, CE_WARN, 3088 "Could not allocate DMA memory for ELS rsp\n")); 3089 goto fail; 3090 } 3091 3092 if (real_size < rsp_size) { 3093 SF_DEBUG(1, (sf, CE_WARN, 3094 "DMA memory too small for ELS rsp\n")); 3095 goto fail; 3096 } 3097 3098 if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL, 3099 rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT, 3100 DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) { 3101 SF_DEBUG(1, (sf, CE_WARN, 3102 "Could not bind DMA memory for ELS rsp\n")); 3103 goto fail; 3104 } 3105 rsp_bound = TRUE; 3106 3107 if (ccount != 1) { 3108 SF_DEBUG(1, (sf, CE_WARN, 3109 "Wrong cookie count for ELS rsp\n")); 3110 goto fail; 3111 } 3112 3113 privp->cmd = cmd; 3114 privp->sf = sf; 3115 privp->cmd_dma_handle = cmd_dma_handle; 3116 privp->cmd_acc_handle = cmd_acc_handle; 3117 privp->rsp = rsp; 3118 privp->rsp_dma_handle = rsp_dma_handle; 3119 privp->rsp_acc_handle = rsp_acc_handle; 3120 privp->dest_nport_id = dest_id; 3121 privp->fpkt = fpkt; 3122 3123 fpkt->fcal_pkt_cookie = sf->sf_socp; 3124 fpkt->fcal_pkt_comp = sf_els_callback; 3125 fpkt->fcal_magic = FCALP_MAGIC; 3126 fpkt->fcal_pkt_flags = 0; 3127 fpkt->fcal_socal_request.sr_soc_hdr.sh_flags = 3128 (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno); 3129 fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3; 3130 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2; 3131 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size; 3132 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1; 3133 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0; 3134 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0; 3135 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE; 3136 fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t) 3137 pcookie.dmac_address; 3138 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size; 3139 fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t) 3140 rcookie.dmac_address; 3141 fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size; 3142 3143 /* Fill in the Fabric Channel Header */ 3144 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr; 3145 hp->r_ctl = R_CTL_ELS_REQ; 3146 hp->d_id = dest_id; 3147 hp->s_id = sf->sf_al_pa; 3148 hp->type = TYPE_EXTENDED_LS; 3149 hp->reserved1 = 0; 3150 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 3151 hp->seq_id = 0; 3152 hp->df_ctl = 0; 3153 hp->seq_cnt = 0; 3154 hp->ox_id = 0xffff; 3155 hp->rx_id = 0xffff; 3156 hp->ro = 0; 3157 3158 *rprivp = (caddr_t)privp; 3159 *cmd_buf = cmd; 3160 return (fpkt); 3161 3162 fail: 3163 if (cmd_dma_handle != NULL) { 3164 if (cmd_bound) { 3165 (void) ddi_dma_unbind_handle(cmd_dma_handle); 3166 } 3167 ddi_dma_free_handle(&cmd_dma_handle); 3168 privp->cmd_dma_handle = NULL; 3169 } 3170 if (rsp_dma_handle != NULL) { 3171 if (rsp_bound) { 3172 (void) ddi_dma_unbind_handle(rsp_dma_handle); 3173 } 3174 ddi_dma_free_handle(&rsp_dma_handle); 3175 privp->rsp_dma_handle = NULL; 3176 } 3177 sf_els_free(fpkt); 3178 return (NULL); 3179 } 3180 3181 3182 static void 3183 sf_els_free(struct fcal_packet *fpkt) 3184 { 3185 struct sf_els_hdr *privp = fpkt->fcal_pkt_private; 3186 3187 if (privp != NULL) { 3188 if (privp->cmd_dma_handle != NULL) { 3189 (void) ddi_dma_unbind_handle(privp->cmd_dma_handle); 3190 ddi_dma_free_handle(&privp->cmd_dma_handle); 3191 } 3192 if (privp->cmd != NULL) { 3193 ddi_dma_mem_free(&privp->cmd_acc_handle); 3194 } 3195 3196 if (privp->rsp_dma_handle != NULL) { 3197 (void) ddi_dma_unbind_handle(privp->rsp_dma_handle); 3198 ddi_dma_free_handle(&privp->rsp_dma_handle); 3199 } 3200 3201 if (privp->rsp != NULL) { 3202 ddi_dma_mem_free(&privp->rsp_acc_handle); 3203 } 3204 if (privp->data_dma_handle) { 3205 (void) ddi_dma_unbind_handle(privp->data_dma_handle); 3206 ddi_dma_free_handle(&privp->data_dma_handle); 3207 } 3208 if (privp->data_buf) { 3209 ddi_dma_mem_free(&privp->data_acc_handle); 3210 } 3211 kmem_free(privp, privp->size); 3212 } 3213 kmem_free(fpkt, sizeof (struct fcal_packet)); 3214 } 3215 3216 3217 static struct sf_target * 3218 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun) 3219 { 3220 struct sf_target *target, *ntarget, *otarget, *ptarget; 3221 int hash; 3222 #ifdef RAID_LUNS 3223 int64_t orig_lun = lun; 3224 3225 /* XXXX Work around SCSA limitations. */ 3226 lun = *((short *)&lun); 3227 #endif 3228 ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP); 3229 mutex_enter(&sf->sf_mutex); 3230 if (sf->sf_lip_cnt != privp->lip_cnt) { 3231 mutex_exit(&sf->sf_mutex); 3232 if (ntarget != NULL) 3233 kmem_free(ntarget, sizeof (struct sf_target)); 3234 return (NULL); 3235 } 3236 3237 target = sf_lookup_target(sf, privp->port_wwn, lun); 3238 if (lun != 0) { 3239 /* 3240 * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0 3241 * and enqueue the new LUN. 3242 */ 3243 if ((ptarget = sf_lookup_target(sf, privp->port_wwn, 3244 (int64_t)0)) == NULL) { 3245 /* 3246 * Yeep -- no LUN 0? 3247 */ 3248 mutex_exit(&sf->sf_mutex); 3249 sf_log(sf, CE_WARN, "target 0x%x " 3250 "lun %" PRIx64 ": No LUN 0\n", tnum, lun); 3251 if (ntarget != NULL) 3252 kmem_free(ntarget, sizeof (struct sf_target)); 3253 return (NULL); 3254 } 3255 mutex_enter(&ptarget->sft_mutex); 3256 if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt && 3257 ptarget->sft_state&SF_TARGET_OFFLINE) { 3258 /* LUN 0 already finished, duplicate its state */ 3259 mutex_exit(&ptarget->sft_mutex); 3260 sf_offline_target(sf, target); 3261 mutex_exit(&sf->sf_mutex); 3262 if (ntarget != NULL) 3263 kmem_free(ntarget, sizeof (struct sf_target)); 3264 return (target); 3265 } else if (target != NULL) { 3266 /* 3267 * LUN 0 online or not examined yet. 3268 * Try to bring the LUN back online 3269 */ 3270 mutex_exit(&ptarget->sft_mutex); 3271 mutex_enter(&target->sft_mutex); 3272 target->sft_lip_cnt = privp->lip_cnt; 3273 target->sft_state |= SF_TARGET_BUSY; 3274 target->sft_state &= ~(SF_TARGET_OFFLINE| 3275 SF_TARGET_MARK); 3276 target->sft_al_pa = (uchar_t)privp->dest_nport_id; 3277 target->sft_hard_address = sf_switch_to_alpa[tnum]; 3278 mutex_exit(&target->sft_mutex); 3279 mutex_exit(&sf->sf_mutex); 3280 if (ntarget != NULL) 3281 kmem_free(ntarget, sizeof (struct sf_target)); 3282 return (target); 3283 } 3284 mutex_exit(&ptarget->sft_mutex); 3285 if (ntarget == NULL) { 3286 mutex_exit(&sf->sf_mutex); 3287 return (NULL); 3288 } 3289 /* Initialize new target structure */ 3290 bcopy((caddr_t)&privp->node_wwn, 3291 (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn)); 3292 bcopy((caddr_t)&privp->port_wwn, 3293 (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn)); 3294 ntarget->sft_lun.l = lun; 3295 #ifdef RAID_LUNS 3296 ntarget->sft_lun.l = orig_lun; 3297 ntarget->sft_raid_lun = (uint_t)lun; 3298 #endif 3299 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL); 3300 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL); 3301 /* Don't let anyone use this till we finishup init. */ 3302 mutex_enter(&ntarget->sft_mutex); 3303 mutex_enter(&ntarget->sft_pkt_mutex); 3304 3305 hash = SF_HASH(privp->port_wwn, lun); 3306 ntarget->sft_next = sf->sf_wwn_lists[hash]; 3307 sf->sf_wwn_lists[hash] = ntarget; 3308 3309 ntarget->sft_lip_cnt = privp->lip_cnt; 3310 ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id; 3311 ntarget->sft_hard_address = sf_switch_to_alpa[tnum]; 3312 ntarget->sft_device_type = DTYPE_UNKNOWN; 3313 ntarget->sft_state = SF_TARGET_BUSY; 3314 ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget-> 3315 sft_pkt_head; 3316 ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget-> 3317 sft_pkt_head; 3318 3319 mutex_enter(&ptarget->sft_mutex); 3320 /* Traverse the list looking for this target */ 3321 for (target = ptarget; target->sft_next_lun; 3322 target = target->sft_next_lun) { 3323 otarget = target->sft_next_lun; 3324 } 3325 ntarget->sft_next_lun = target->sft_next_lun; 3326 target->sft_next_lun = ntarget; 3327 mutex_exit(&ptarget->sft_mutex); 3328 mutex_exit(&ntarget->sft_pkt_mutex); 3329 mutex_exit(&ntarget->sft_mutex); 3330 mutex_exit(&sf->sf_mutex); 3331 return (ntarget); 3332 3333 } 3334 if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) { 3335 /* It's been touched this LIP -- duplicate WWNs */ 3336 sf_offline_target(sf, target); /* And all the baby targets */ 3337 mutex_exit(&sf->sf_mutex); 3338 sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n", 3339 tnum); 3340 if (ntarget != NULL) { 3341 kmem_free(ntarget, sizeof (struct sf_target)); 3342 } 3343 return (NULL); 3344 } 3345 3346 if ((otarget = sf->sf_targets[tnum]) != NULL) { 3347 /* Someone else is in our slot */ 3348 mutex_enter(&otarget->sft_mutex); 3349 if (otarget->sft_lip_cnt == sf->sf_lip_cnt) { 3350 mutex_exit(&otarget->sft_mutex); 3351 sf_offline_target(sf, otarget); 3352 if (target != NULL) 3353 sf_offline_target(sf, target); 3354 mutex_exit(&sf->sf_mutex); 3355 sf_log(sf, CE_WARN, 3356 "target 0x%x, duplicate switch settings\n", tnum); 3357 if (ntarget != NULL) 3358 kmem_free(ntarget, sizeof (struct sf_target)); 3359 return (NULL); 3360 } 3361 mutex_exit(&otarget->sft_mutex); 3362 if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget-> 3363 sft_port_wwn, sizeof (privp->port_wwn))) { 3364 sf_offline_target(sf, otarget); 3365 mutex_exit(&sf->sf_mutex); 3366 sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n", 3367 tnum); 3368 bzero((caddr_t)&sf->sf_stats.tstats[tnum], 3369 sizeof (struct sf_target_stats)); 3370 mutex_enter(&sf->sf_mutex); 3371 } 3372 } 3373 3374 sf->sf_targets[tnum] = target; 3375 if ((target = sf->sf_targets[tnum]) == NULL) { 3376 if (ntarget == NULL) { 3377 mutex_exit(&sf->sf_mutex); 3378 return (NULL); 3379 } 3380 bcopy((caddr_t)&privp->node_wwn, 3381 (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn)); 3382 bcopy((caddr_t)&privp->port_wwn, 3383 (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn)); 3384 ntarget->sft_lun.l = lun; 3385 #ifdef RAID_LUNS 3386 ntarget->sft_lun.l = orig_lun; 3387 ntarget->sft_raid_lun = (uint_t)lun; 3388 #endif 3389 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL); 3390 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL); 3391 mutex_enter(&ntarget->sft_mutex); 3392 mutex_enter(&ntarget->sft_pkt_mutex); 3393 hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */ 3394 ntarget->sft_next = sf->sf_wwn_lists[hash]; 3395 sf->sf_wwn_lists[hash] = ntarget; 3396 3397 target = ntarget; 3398 target->sft_lip_cnt = privp->lip_cnt; 3399 target->sft_al_pa = (uchar_t)privp->dest_nport_id; 3400 target->sft_hard_address = sf_switch_to_alpa[tnum]; 3401 target->sft_device_type = DTYPE_UNKNOWN; 3402 target->sft_state = SF_TARGET_BUSY; 3403 target->sft_pkt_head = (struct sf_pkt *)&target-> 3404 sft_pkt_head; 3405 target->sft_pkt_tail = (struct sf_pkt *)&target-> 3406 sft_pkt_head; 3407 sf->sf_targets[tnum] = target; 3408 mutex_exit(&ntarget->sft_mutex); 3409 mutex_exit(&ntarget->sft_pkt_mutex); 3410 mutex_exit(&sf->sf_mutex); 3411 } else { 3412 mutex_enter(&target->sft_mutex); 3413 target->sft_lip_cnt = privp->lip_cnt; 3414 target->sft_state |= SF_TARGET_BUSY; 3415 target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK); 3416 target->sft_al_pa = (uchar_t)privp->dest_nport_id; 3417 target->sft_hard_address = sf_switch_to_alpa[tnum]; 3418 mutex_exit(&target->sft_mutex); 3419 mutex_exit(&sf->sf_mutex); 3420 if (ntarget != NULL) 3421 kmem_free(ntarget, sizeof (struct sf_target)); 3422 } 3423 return (target); 3424 } 3425 3426 3427 /* 3428 * find the target for a given sf instance 3429 */ 3430 /* ARGSUSED */ 3431 static struct sf_target * 3432 #ifdef RAID_LUNS 3433 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun) 3434 #else 3435 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun) 3436 #endif 3437 { 3438 int hash; 3439 struct sf_target *target; 3440 3441 ASSERT(mutex_owned(&sf->sf_mutex)); 3442 hash = SF_HASH(wwn, lun); 3443 3444 target = sf->sf_wwn_lists[hash]; 3445 while (target != NULL) { 3446 3447 #ifndef RAID_LUNS 3448 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn, 3449 sizeof (target->sft_port_wwn)) == 0 && 3450 target->sft_lun.l == lun) 3451 break; 3452 #else 3453 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn, 3454 sizeof (target->sft_port_wwn)) == 0 && 3455 target->sft_raid_lun == lun) 3456 break; 3457 #endif 3458 target = target->sft_next; 3459 } 3460 3461 return (target); 3462 } 3463 3464 3465 /* 3466 * Send out a REPORT_LUNS command. 3467 */ 3468 static int 3469 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp, 3470 struct sf_target *target) 3471 { 3472 struct fcal_packet *fpkt = privp->fpkt; 3473 ddi_dma_cookie_t pcookie; 3474 ddi_dma_handle_t lun_dma_handle = NULL; 3475 ddi_acc_handle_t lun_acc_handle; 3476 uint_t ccount; 3477 size_t real_size; 3478 caddr_t lun_buf = NULL; 3479 int handle_bound = 0; 3480 fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr; 3481 struct fcp_cmd *reportlun = (struct fcp_cmd *)privp->cmd; 3482 char *msg = "Transport"; 3483 3484 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr, 3485 DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) { 3486 msg = "ddi_dma_alloc_handle()"; 3487 goto fail; 3488 } 3489 3490 if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE, 3491 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT, 3492 DDI_DMA_DONTWAIT, NULL, &lun_buf, 3493 &real_size, &lun_acc_handle) != DDI_SUCCESS) { 3494 msg = "ddi_dma_mem_alloc()"; 3495 goto fail; 3496 } 3497 3498 if (real_size < REPORT_LUNS_SIZE) { 3499 msg = "DMA mem < REPORT_LUNS_SIZE"; 3500 goto fail; 3501 } 3502 3503 if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL, 3504 lun_buf, real_size, DDI_DMA_READ | 3505 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 3506 NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) { 3507 msg = "ddi_dma_addr_bind_handle()"; 3508 goto fail; 3509 } 3510 handle_bound = 1; 3511 3512 if (ccount != 1) { 3513 msg = "ccount != 1"; 3514 goto fail; 3515 } 3516 privp->els_code = 0; 3517 privp->target = target; 3518 privp->data_dma_handle = lun_dma_handle; 3519 privp->data_acc_handle = lun_acc_handle; 3520 privp->data_buf = lun_buf; 3521 3522 fpkt->fcal_pkt_comp = sf_reportlun_callback; 3523 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3; 3524 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ; 3525 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = 3526 sizeof (struct fcp_cmd); 3527 fpkt->fcal_socal_request.sr_dataseg[2].fc_base = 3528 (uint32_t)pcookie.dmac_address; 3529 fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size; 3530 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size; 3531 hp->r_ctl = R_CTL_COMMAND; 3532 hp->type = TYPE_SCSI_FCP; 3533 bzero((caddr_t)reportlun, sizeof (struct fcp_cmd)); 3534 ((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS; 3535 /* Now set the buffer size. If DDI gave us extra, that's O.K. */ 3536 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 = 3537 (real_size&0x0ff); 3538 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 = 3539 (real_size>>8)&0x0ff; 3540 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 = 3541 (real_size>>16)&0x0ff; 3542 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 = 3543 (real_size>>24)&0x0ff; 3544 reportlun->fcp_cntl.cntl_read_data = 1; 3545 reportlun->fcp_cntl.cntl_write_data = 0; 3546 reportlun->fcp_data_len = pcookie.dmac_size; 3547 reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 3548 3549 (void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); 3550 /* We know he's there, so this should be fast */ 3551 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT; 3552 if (sf_els_transport(sf, privp) == 1) 3553 return (1); 3554 3555 fail: 3556 sf_log(sf, CE_WARN, 3557 "%s failure for REPORTLUN to target 0x%x\n", 3558 msg, sf_alpa_to_switch[privp->dest_nport_id]); 3559 sf_els_free(fpkt); 3560 if (lun_dma_handle != NULL) { 3561 if (handle_bound) 3562 (void) ddi_dma_unbind_handle(lun_dma_handle); 3563 ddi_dma_free_handle(&lun_dma_handle); 3564 } 3565 if (lun_buf != NULL) { 3566 ddi_dma_mem_free(&lun_acc_handle); 3567 } 3568 return (0); 3569 } 3570 3571 /* 3572 * Handle the results of a REPORT_LUNS command: 3573 * Create additional targets if necessary 3574 * Initiate INQUIRYs on all LUNs. 3575 */ 3576 static void 3577 sf_reportlun_callback(struct fcal_packet *fpkt) 3578 { 3579 struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt-> 3580 fcal_pkt_private; 3581 struct scsi_report_luns *ptr = 3582 (struct scsi_report_luns *)privp->data_buf; 3583 struct sf *sf = privp->sf; 3584 struct sf_target *target = privp->target; 3585 struct fcp_rsp *rsp = NULL; 3586 int delayed_retry = 0; 3587 int tid = sf_alpa_to_switch[target->sft_hard_address]; 3588 int i, free_pkt = 1; 3589 short ncmds; 3590 3591 mutex_enter(&sf->sf_mutex); 3592 /* use as temporary state variable */ 3593 if (privp->timeout == SF_INVALID_TIMEOUT) { 3594 mutex_exit(&sf->sf_mutex); 3595 return; 3596 } 3597 if (privp->prev) 3598 privp->prev->next = privp->next; 3599 if (privp->next) 3600 privp->next->prev = privp->prev; 3601 if (sf->sf_els_list == privp) 3602 sf->sf_els_list = privp->next; 3603 privp->prev = privp->next = NULL; 3604 mutex_exit(&sf->sf_mutex); 3605 ncmds = fpkt->fcal_ncmds; 3606 ASSERT(ncmds >= 0); 3607 mutex_enter(&sf->sf_cmd_mutex); 3608 sf->sf_ncmds = ncmds; 3609 mutex_exit(&sf->sf_cmd_mutex); 3610 3611 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) { 3612 (void) ddi_dma_sync(privp->rsp_dma_handle, 0, 3613 0, DDI_DMA_SYNC_FORKERNEL); 3614 3615 rsp = (struct fcp_rsp *)privp->rsp; 3616 } 3617 SF_DEBUG(1, (sf, CE_CONT, 3618 "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n", 3619 privp->dest_nport_id, 3620 fpkt->fcal_pkt_status, 3621 rsp?rsp->fcp_u.fcp_status.scsi_status:0)); 3622 3623 /* See if target simply does not support REPORT_LUNS. */ 3624 if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK && 3625 rsp->fcp_u.fcp_status.sense_len_set && 3626 rsp->fcp_sense_len >= 3627 offsetof(struct scsi_extended_sense, es_qual_code)) { 3628 struct scsi_extended_sense *sense; 3629 sense = (struct scsi_extended_sense *) 3630 ((caddr_t)rsp + sizeof (struct fcp_rsp) 3631 + rsp->fcp_response_len); 3632 if (sense->es_key == KEY_ILLEGAL_REQUEST) { 3633 if (sense->es_add_code == 0x20) { 3634 /* Fake LUN 0 */ 3635 SF_DEBUG(1, (sf, CE_CONT, 3636 "!REPORTLUN Faking good " 3637 "completion for alpa %x\n", 3638 privp->dest_nport_id)); 3639 ptr->lun_list_len = FCP_LUN_SIZE; 3640 ptr->lun[0] = 0; 3641 rsp->fcp_u.fcp_status.scsi_status = 3642 STATUS_GOOD; 3643 } else if (sense->es_add_code == 0x25) { 3644 SF_DEBUG(1, (sf, CE_CONT, 3645 "!REPORTLUN device alpa %x " 3646 "key %x code %x\n", 3647 privp->dest_nport_id, 3648 sense->es_key, sense->es_add_code)); 3649 goto fail; 3650 } 3651 } else if (sense->es_key == 3652 KEY_UNIT_ATTENTION && 3653 sense->es_add_code == 0x29) { 3654 SF_DEBUG(1, (sf, CE_CONT, 3655 "!REPORTLUN device alpa %x was reset\n", 3656 privp->dest_nport_id)); 3657 } else { 3658 SF_DEBUG(1, (sf, CE_CONT, 3659 "!REPORTLUN device alpa %x " 3660 "key %x code %x\n", 3661 privp->dest_nport_id, 3662 sense->es_key, sense->es_add_code)); 3663 /* XXXXXX The following is here to handle broken targets -- remove it later */ 3664 if (sf_reportlun_forever && 3665 sense->es_key == KEY_UNIT_ATTENTION) 3666 goto retry; 3667 /* XXXXXX */ 3668 if (sense->es_key == KEY_NOT_READY) 3669 delayed_retry = 1; 3670 } 3671 } 3672 3673 if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) { 3674 struct fcp_rsp_info *bep; 3675 3676 bep = (struct fcp_rsp_info *)(&rsp-> 3677 fcp_response_len + 1); 3678 if (!rsp->fcp_u.fcp_status.rsp_len_set || 3679 bep->rsp_code == FCP_NO_FAILURE) { 3680 (void) ddi_dma_sync(privp->data_dma_handle, 3681 0, 0, DDI_DMA_SYNC_FORKERNEL); 3682 3683 /* Convert from #bytes to #ints */ 3684 ptr->lun_list_len = ptr->lun_list_len >> 3; 3685 SF_DEBUG(2, (sf, CE_CONT, 3686 "!REPORTLUN to al_pa %x succeeded: %d LUNs\n", 3687 privp->dest_nport_id, ptr->lun_list_len)); 3688 if (!ptr->lun_list_len) { 3689 /* No LUNs? Ya gotta be kidding... */ 3690 sf_log(sf, CE_WARN, 3691 "SCSI violation -- " 3692 "target 0x%x reports no LUNs\n", 3693 sf_alpa_to_switch[ 3694 privp->dest_nport_id]); 3695 ptr->lun_list_len = 1; 3696 ptr->lun[0] = 0; 3697 } 3698 3699 mutex_enter(&sf->sf_mutex); 3700 if (sf->sf_lip_cnt == privp->lip_cnt) { 3701 sf->sf_device_count += ptr->lun_list_len - 1; 3702 } 3703 3704 mutex_exit(&sf->sf_mutex); 3705 for (i = 0; i < ptr->lun_list_len && privp->lip_cnt == 3706 sf->sf_lip_cnt; i++) { 3707 struct sf_els_hdr *nprivp; 3708 struct fcal_packet *nfpkt; 3709 3710 /* LUN 0 is already in `target' */ 3711 if (ptr->lun[i] != 0) { 3712 target = sf_create_target(sf, 3713 privp, tid, ptr->lun[i]); 3714 } 3715 nprivp = NULL; 3716 nfpkt = NULL; 3717 if (target) { 3718 nfpkt = sf_els_alloc(sf, 3719 target->sft_al_pa, 3720 sizeof (struct sf_els_hdr), 3721 sizeof (union sf_els_cmd), 3722 sizeof (union sf_els_rsp), 3723 (caddr_t *)&nprivp, 3724 (caddr_t *)&rsp); 3725 if (nprivp) 3726 nprivp->lip_cnt = 3727 privp->lip_cnt; 3728 } 3729 if (nfpkt && nprivp && 3730 (sf_do_inquiry(sf, nprivp, target) == 3731 0)) { 3732 mutex_enter(&sf->sf_mutex); 3733 if (sf->sf_lip_cnt == privp-> 3734 lip_cnt) { 3735 sf->sf_device_count --; 3736 } 3737 sf_offline_target(sf, target); 3738 mutex_exit(&sf->sf_mutex); 3739 } 3740 } 3741 sf_els_free(fpkt); 3742 return; 3743 } else { 3744 SF_DEBUG(1, (sf, CE_CONT, 3745 "!REPORTLUN al_pa %x fcp failure, " 3746 "fcp_rsp_code %x scsi status %x\n", 3747 privp->dest_nport_id, bep->rsp_code, 3748 rsp ? rsp->fcp_u.fcp_status.scsi_status:0)); 3749 goto fail; 3750 } 3751 } 3752 if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) || 3753 (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) { 3754 delayed_retry = 1; 3755 } 3756 3757 if (++(privp->retries) < sf_els_retries || 3758 (delayed_retry && privp->retries < SF_BSY_RETRIES)) { 3759 /* XXXXXX The following is here to handle broken targets -- remove it later */ 3760 retry: 3761 /* XXXXXX */ 3762 if (delayed_retry) { 3763 privp->retries--; 3764 privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT; 3765 privp->delayed_retry = 1; 3766 } else { 3767 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT; 3768 } 3769 3770 privp->prev = NULL; 3771 mutex_enter(&sf->sf_mutex); 3772 if (privp->lip_cnt == sf->sf_lip_cnt) { 3773 if (!delayed_retry) 3774 SF_DEBUG(1, (sf, CE_WARN, 3775 "!REPORTLUN to al_pa %x failed, retrying\n", 3776 privp->dest_nport_id)); 3777 privp->next = sf->sf_els_list; 3778 if (sf->sf_els_list != NULL) 3779 sf->sf_els_list->prev = privp; 3780 sf->sf_els_list = privp; 3781 mutex_exit(&sf->sf_mutex); 3782 if (!delayed_retry && soc_transport(sf->sf_sochandle, 3783 fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) != 3784 FCAL_TRANSPORT_SUCCESS) { 3785 mutex_enter(&sf->sf_mutex); 3786 if (privp->prev) 3787 privp->prev->next = privp->next; 3788 if (privp->next) 3789 privp->next->prev = privp->prev; 3790 if (sf->sf_els_list == privp) 3791 sf->sf_els_list = privp->next; 3792 mutex_exit(&sf->sf_mutex); 3793 goto fail; 3794 } else 3795 return; 3796 } else { 3797 mutex_exit(&sf->sf_mutex); 3798 } 3799 } else { 3800 fail: 3801 3802 /* REPORT_LUN failed -- try inquiry */ 3803 if (sf_do_inquiry(sf, privp, target) != 0) { 3804 return; 3805 } else { 3806 free_pkt = 0; 3807 } 3808 mutex_enter(&sf->sf_mutex); 3809 if (sf->sf_lip_cnt == privp->lip_cnt) { 3810 sf_log(sf, CE_WARN, "!REPORTLUN to target 0x%x failed\n", 3811 sf_alpa_to_switch[privp->dest_nport_id]); 3812 sf_offline_target(sf, target); 3813 sf->sf_device_count--; 3814 ASSERT(sf->sf_device_count >= 0); 3815 if (sf->sf_device_count == 0) 3816 sf_finish_init(sf, privp->lip_cnt); 3817 } 3818 mutex_exit(&sf->sf_mutex); 3819 } 3820 if (free_pkt) { 3821 sf_els_free(fpkt); 3822 } 3823 } 3824 3825 static int 3826 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp, 3827 struct sf_target *target) 3828 { 3829 struct fcal_packet *fpkt = privp->fpkt; 3830 ddi_dma_cookie_t pcookie; 3831 ddi_dma_handle_t inq_dma_handle = NULL; 3832 ddi_acc_handle_t inq_acc_handle; 3833 uint_t ccount; 3834 size_t real_size; 3835 caddr_t inq_buf = NULL; 3836 int handle_bound = FALSE; 3837 fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr; 3838 struct fcp_cmd *inq = (struct fcp_cmd *)privp->cmd; 3839 char *msg = "Transport"; 3840 3841 3842 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr, 3843 DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) { 3844 msg = "ddi_dma_alloc_handle()"; 3845 goto fail; 3846 } 3847 3848 if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE, 3849 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT, 3850 DDI_DMA_DONTWAIT, NULL, &inq_buf, 3851 &real_size, &inq_acc_handle) != DDI_SUCCESS) { 3852 msg = "ddi_dma_mem_alloc()"; 3853 goto fail; 3854 } 3855 3856 if (real_size < SUN_INQSIZE) { 3857 msg = "DMA mem < inquiry size"; 3858 goto fail; 3859 } 3860 3861 if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL, 3862 inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT, 3863 DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) { 3864 msg = "ddi_dma_addr_bind_handle()"; 3865 goto fail; 3866 } 3867 handle_bound = TRUE; 3868 3869 if (ccount != 1) { 3870 msg = "ccount != 1"; 3871 goto fail; 3872 } 3873 privp->els_code = 0; /* not an ELS command */ 3874 privp->target = target; 3875 privp->data_dma_handle = inq_dma_handle; 3876 privp->data_acc_handle = inq_acc_handle; 3877 privp->data_buf = inq_buf; 3878 fpkt->fcal_pkt_comp = sf_inq_callback; 3879 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3; 3880 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ; 3881 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = 3882 sizeof (struct fcp_cmd); 3883 fpkt->fcal_socal_request.sr_dataseg[2].fc_base = 3884 (uint32_t)pcookie.dmac_address; 3885 fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size; 3886 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size; 3887 hp->r_ctl = R_CTL_COMMAND; 3888 hp->type = TYPE_SCSI_FCP; 3889 bzero((caddr_t)inq, sizeof (struct fcp_cmd)); 3890 ((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY; 3891 ((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE; 3892 bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr, 3893 FCP_LUN_SIZE); 3894 inq->fcp_cntl.cntl_read_data = 1; 3895 inq->fcp_cntl.cntl_write_data = 0; 3896 inq->fcp_data_len = pcookie.dmac_size; 3897 inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 3898 3899 (void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0, 3900 DDI_DMA_SYNC_FORDEV); 3901 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT; 3902 SF_DEBUG(5, (sf, CE_WARN, 3903 "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n", 3904 privp->dest_nport_id, 3905 SCSA_LUN(target))); 3906 return (sf_els_transport(sf, privp)); 3907 3908 fail: 3909 sf_log(sf, CE_WARN, 3910 "%s failure for INQUIRY to target 0x%x\n", 3911 msg, sf_alpa_to_switch[privp->dest_nport_id]); 3912 sf_els_free(fpkt); 3913 if (inq_dma_handle != NULL) { 3914 if (handle_bound) { 3915 (void) ddi_dma_unbind_handle(inq_dma_handle); 3916 } 3917 ddi_dma_free_handle(&inq_dma_handle); 3918 } 3919 if (inq_buf != NULL) { 3920 ddi_dma_mem_free(&inq_acc_handle); 3921 } 3922 return (FALSE); 3923 } 3924 3925 3926 /* 3927 * called as the pkt_comp routine for INQ packets 3928 */ 3929 static void 3930 sf_inq_callback(struct fcal_packet *fpkt) 3931 { 3932 struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt-> 3933 fcal_pkt_private; 3934 struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf; 3935 struct sf *sf = privp->sf; 3936 struct sf *tsf; 3937 struct sf_target *target = privp->target; 3938 struct fcp_rsp *rsp; 3939 int delayed_retry = FALSE; 3940 short ncmds; 3941 3942 3943 mutex_enter(&sf->sf_mutex); 3944 /* use as temporary state variable */ 3945 if (privp->timeout == SF_INVALID_TIMEOUT) { 3946 mutex_exit(&sf->sf_mutex); 3947 return; 3948 } 3949 if (privp->prev != NULL) { 3950 privp->prev->next = privp->next; 3951 } 3952 if (privp->next != NULL) { 3953 privp->next->prev = privp->prev; 3954 } 3955 if (sf->sf_els_list == privp) { 3956 sf->sf_els_list = privp->next; 3957 } 3958 privp->prev = privp->next = NULL; 3959 mutex_exit(&sf->sf_mutex); 3960 ncmds = fpkt->fcal_ncmds; 3961 ASSERT(ncmds >= 0); 3962 mutex_enter(&sf->sf_cmd_mutex); 3963 sf->sf_ncmds = ncmds; 3964 mutex_exit(&sf->sf_cmd_mutex); 3965 3966 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) { 3967 3968 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, 3969 (size_t)0, DDI_DMA_SYNC_FORKERNEL); 3970 3971 rsp = (struct fcp_rsp *)privp->rsp; 3972 SF_DEBUG(2, (sf, CE_CONT, 3973 "!INQUIRY to al_pa %x scsi status %x", 3974 privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status)); 3975 3976 if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) && 3977 !rsp->fcp_u.fcp_status.resid_over && 3978 (!rsp->fcp_u.fcp_status.resid_under || 3979 ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) { 3980 struct fcp_rsp_info *bep; 3981 3982 bep = (struct fcp_rsp_info *)(&rsp-> 3983 fcp_response_len + 1); 3984 3985 if (!rsp->fcp_u.fcp_status.rsp_len_set || 3986 (bep->rsp_code == FCP_NO_FAILURE)) { 3987 3988 SF_DEBUG(2, (sf, CE_CONT, 3989 "!INQUIRY to al_pa %x lun %" PRIx64 3990 " succeeded\n", 3991 privp->dest_nport_id, SCSA_LUN(target))); 3992 3993 (void) ddi_dma_sync(privp->data_dma_handle, 3994 (off_t)0, (size_t)0, 3995 DDI_DMA_SYNC_FORKERNEL); 3996 3997 mutex_enter(&sf->sf_mutex); 3998 3999 if (sf->sf_lip_cnt == privp->lip_cnt) { 4000 mutex_enter(&target->sft_mutex); 4001 target->sft_device_type = 4002 prt->inq_dtype; 4003 bcopy(prt, &target->sft_inq, 4004 sizeof (*prt)); 4005 mutex_exit(&target->sft_mutex); 4006 sf->sf_device_count--; 4007 ASSERT(sf->sf_device_count >= 0); 4008 if (sf->sf_device_count == 0) { 4009 sf_finish_init(sf, 4010 privp->lip_cnt); 4011 } 4012 } 4013 mutex_exit(&sf->sf_mutex); 4014 sf_els_free(fpkt); 4015 return; 4016 } 4017 } else if ((rsp->fcp_u.fcp_status.scsi_status == 4018 STATUS_BUSY) || 4019 (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) || 4020 (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) { 4021 delayed_retry = TRUE; 4022 } 4023 } else { 4024 SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x", 4025 privp->dest_nport_id, fpkt->fcal_pkt_status)); 4026 } 4027 4028 if (++(privp->retries) < sf_els_retries || 4029 (delayed_retry && privp->retries < SF_BSY_RETRIES)) { 4030 if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED) { 4031 tsf = sf->sf_sibling; 4032 if (tsf != NULL) { 4033 mutex_enter(&tsf->sf_cmd_mutex); 4034 tsf->sf_flag = 1; 4035 tsf->sf_throttle = SF_DECR_DELTA; 4036 mutex_exit(&tsf->sf_cmd_mutex); 4037 } 4038 delayed_retry = 1; 4039 } 4040 if (delayed_retry) { 4041 privp->retries--; 4042 privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT; 4043 privp->delayed_retry = TRUE; 4044 } else { 4045 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT; 4046 } 4047 4048 privp->prev = NULL; 4049 mutex_enter(&sf->sf_mutex); 4050 if (privp->lip_cnt == sf->sf_lip_cnt) { 4051 if (!delayed_retry) { 4052 SF_DEBUG(1, (sf, CE_WARN, 4053 "INQUIRY to al_pa %x failed, retrying", 4054 privp->dest_nport_id)); 4055 } 4056 privp->next = sf->sf_els_list; 4057 if (sf->sf_els_list != NULL) { 4058 sf->sf_els_list->prev = privp; 4059 } 4060 sf->sf_els_list = privp; 4061 mutex_exit(&sf->sf_mutex); 4062 /* if not delayed call transport to send a pkt */ 4063 if (!delayed_retry && 4064 (soc_transport(sf->sf_sochandle, fpkt, 4065 FCAL_NOSLEEP, CQ_REQUEST_1) != 4066 FCAL_TRANSPORT_SUCCESS)) { 4067 mutex_enter(&sf->sf_mutex); 4068 if (privp->prev != NULL) { 4069 privp->prev->next = privp->next; 4070 } 4071 if (privp->next != NULL) { 4072 privp->next->prev = privp->prev; 4073 } 4074 if (sf->sf_els_list == privp) { 4075 sf->sf_els_list = privp->next; 4076 } 4077 mutex_exit(&sf->sf_mutex); 4078 goto fail; 4079 } 4080 return; 4081 } 4082 mutex_exit(&sf->sf_mutex); 4083 } else { 4084 fail: 4085 mutex_enter(&sf->sf_mutex); 4086 if (sf->sf_lip_cnt == privp->lip_cnt) { 4087 sf_offline_target(sf, target); 4088 sf_log(sf, CE_NOTE, 4089 "INQUIRY to target 0x%x lun %" PRIx64 " failed. " 4090 "Retry Count: %d\n", 4091 sf_alpa_to_switch[privp->dest_nport_id], 4092 SCSA_LUN(target), 4093 privp->retries); 4094 sf->sf_device_count--; 4095 ASSERT(sf->sf_device_count >= 0); 4096 if (sf->sf_device_count == 0) { 4097 sf_finish_init(sf, privp->lip_cnt); 4098 } 4099 } 4100 mutex_exit(&sf->sf_mutex); 4101 } 4102 sf_els_free(fpkt); 4103 } 4104 4105 4106 static void 4107 sf_finish_init(struct sf *sf, int lip_cnt) 4108 { 4109 int i; /* loop index */ 4110 int cflag; 4111 struct sf_target *target; /* current target */ 4112 dev_info_t *dip; 4113 struct sf_hp_elem *elem; /* hotplug element created */ 4114 4115 SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n")); 4116 ASSERT(mutex_owned(&sf->sf_mutex)); 4117 4118 /* scan all hash queues */ 4119 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) { 4120 target = sf->sf_wwn_lists[i]; 4121 while (target != NULL) { 4122 mutex_enter(&target->sft_mutex); 4123 4124 /* see if target is not offline */ 4125 if ((target->sft_state & SF_TARGET_OFFLINE)) { 4126 /* 4127 * target already offline 4128 */ 4129 mutex_exit(&target->sft_mutex); 4130 goto next_entry; 4131 } 4132 4133 /* 4134 * target is not already offline -- see if it has 4135 * already been marked as ready to go offline 4136 */ 4137 if (target->sft_state & SF_TARGET_MARK) { 4138 /* 4139 * target already marked, so take it offline 4140 */ 4141 mutex_exit(&target->sft_mutex); 4142 sf_offline_target(sf, target); 4143 goto next_entry; 4144 } 4145 4146 /* clear target busy flag */ 4147 target->sft_state &= ~SF_TARGET_BUSY; 4148 4149 /* is target init not yet done ?? */ 4150 cflag = !(target->sft_state & SF_TARGET_INIT_DONE); 4151 4152 /* get pointer to target dip */ 4153 dip = target->sft_dip; 4154 4155 mutex_exit(&target->sft_mutex); 4156 mutex_exit(&sf->sf_mutex); 4157 4158 if (cflag && (dip == NULL)) { 4159 /* 4160 * target init not yet done && 4161 * devinfo not yet created 4162 */ 4163 sf_create_devinfo(sf, target, lip_cnt); 4164 mutex_enter(&sf->sf_mutex); 4165 goto next_entry; 4166 } 4167 4168 /* 4169 * target init already done || devinfo already created 4170 */ 4171 ASSERT(dip != NULL); 4172 if (!sf_create_props(dip, target, lip_cnt)) { 4173 /* a problem creating properties */ 4174 mutex_enter(&sf->sf_mutex); 4175 goto next_entry; 4176 } 4177 4178 /* create a new element for the hotplug list */ 4179 if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem), 4180 KM_NOSLEEP)) != NULL) { 4181 4182 /* fill in the new element */ 4183 elem->dip = dip; 4184 elem->target = target; 4185 elem->what = SF_ONLINE; 4186 4187 /* add the new element into the hotplug list */ 4188 mutex_enter(&sf->sf_hp_daemon_mutex); 4189 if (sf->sf_hp_elem_tail != NULL) { 4190 sf->sf_hp_elem_tail->next = elem; 4191 sf->sf_hp_elem_tail = elem; 4192 } else { 4193 /* this is the first element in list */ 4194 sf->sf_hp_elem_head = 4195 sf->sf_hp_elem_tail = 4196 elem; 4197 } 4198 cv_signal(&sf->sf_hp_daemon_cv); 4199 mutex_exit(&sf->sf_hp_daemon_mutex); 4200 } else { 4201 /* could not allocate memory for element ?? */ 4202 (void) ndi_devi_online_async(dip, 0); 4203 } 4204 4205 mutex_enter(&sf->sf_mutex); 4206 4207 next_entry: 4208 /* ensure no new LIPs have occurred */ 4209 if (sf->sf_lip_cnt != lip_cnt) { 4210 return; 4211 } 4212 target = target->sft_next; 4213 } 4214 4215 /* done scanning all targets in this queue */ 4216 } 4217 4218 /* done with all hash queues */ 4219 4220 sf->sf_state = SF_STATE_ONLINE; 4221 sf->sf_online_timer = 0; 4222 } 4223 4224 4225 /* 4226 * create devinfo node 4227 */ 4228 static void 4229 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt) 4230 { 4231 dev_info_t *cdip = NULL; 4232 char *nname = NULL; 4233 char **compatible = NULL; 4234 int ncompatible; 4235 struct scsi_inquiry *inq = &target->sft_inq; 4236 char *scsi_binding_set; 4237 4238 /* get the 'scsi-binding-set' property */ 4239 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip, 4240 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set", 4241 &scsi_binding_set) != DDI_PROP_SUCCESS) 4242 scsi_binding_set = NULL; 4243 4244 /* determine the node name and compatible */ 4245 scsi_hba_nodename_compatible_get(inq, scsi_binding_set, 4246 inq->inq_dtype, NULL, &nname, &compatible, &ncompatible); 4247 if (scsi_binding_set) 4248 ddi_prop_free(scsi_binding_set); 4249 4250 /* if nodename can't be determined then print a message and skip it */ 4251 if (nname == NULL) { 4252 #ifndef RAID_LUNS 4253 sf_log(sf, CE_WARN, "%s%d: no driver for device " 4254 "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n" 4255 " compatible: %s", 4256 ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip), 4257 target->sft_port_wwn[0], target->sft_port_wwn[1], 4258 target->sft_port_wwn[2], target->sft_port_wwn[3], 4259 target->sft_port_wwn[4], target->sft_port_wwn[5], 4260 target->sft_port_wwn[6], target->sft_port_wwn[7], 4261 target->sft_lun.l, *compatible); 4262 #else 4263 sf_log(sf, CE_WARN, "%s%d: no driver for device " 4264 "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n" 4265 " compatible: %s", 4266 ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip), 4267 target->sft_port_wwn[0], target->sft_port_wwn[1], 4268 target->sft_port_wwn[2], target->sft_port_wwn[3], 4269 target->sft_port_wwn[4], target->sft_port_wwn[5], 4270 target->sft_port_wwn[6], target->sft_port_wwn[7], 4271 target->sft_raid_lun, *compatible); 4272 #endif 4273 goto fail; 4274 } 4275 4276 /* allocate the node */ 4277 if (ndi_devi_alloc(sf->sf_dip, nname, 4278 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) { 4279 goto fail; 4280 } 4281 4282 /* decorate the node with compatible */ 4283 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip, 4284 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) { 4285 goto fail; 4286 } 4287 4288 /* add addressing properties to the node */ 4289 if (sf_create_props(cdip, target, lip_cnt) != 1) { 4290 goto fail; 4291 } 4292 4293 mutex_enter(&target->sft_mutex); 4294 if (target->sft_dip != NULL) { 4295 mutex_exit(&target->sft_mutex); 4296 goto fail; 4297 } 4298 target->sft_dip = cdip; 4299 mutex_exit(&target->sft_mutex); 4300 4301 if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) { 4302 goto fail; 4303 } 4304 4305 scsi_hba_nodename_compatible_free(nname, compatible); 4306 return; 4307 4308 fail: 4309 scsi_hba_nodename_compatible_free(nname, compatible); 4310 if (cdip != NULL) { 4311 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP); 4312 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP); 4313 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP); 4314 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP); 4315 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP); 4316 if (ndi_devi_free(cdip) != NDI_SUCCESS) { 4317 sf_log(sf, CE_WARN, "ndi_devi_free failed\n"); 4318 } else { 4319 mutex_enter(&target->sft_mutex); 4320 if (cdip == target->sft_dip) { 4321 target->sft_dip = NULL; 4322 } 4323 mutex_exit(&target->sft_mutex); 4324 } 4325 } 4326 } 4327 4328 /* 4329 * create required properties, returning TRUE iff we succeed, else 4330 * returning FALSE 4331 */ 4332 static int 4333 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt) 4334 { 4335 int tgt_id = sf_alpa_to_switch[target->sft_al_pa]; 4336 4337 4338 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, 4339 cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) != 4340 DDI_PROP_SUCCESS) { 4341 return (FALSE); 4342 } 4343 4344 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, 4345 cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) != 4346 DDI_PROP_SUCCESS) { 4347 return (FALSE); 4348 } 4349 4350 if (ndi_prop_update_int(DDI_DEV_T_NONE, 4351 cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) { 4352 return (FALSE); 4353 } 4354 4355 if (ndi_prop_update_int(DDI_DEV_T_NONE, 4356 cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) { 4357 return (FALSE); 4358 } 4359 4360 #ifndef RAID_LUNS 4361 if (ndi_prop_update_int(DDI_DEV_T_NONE, 4362 cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) { 4363 return (0); 4364 } 4365 #else 4366 if (ndi_prop_update_int(DDI_DEV_T_NONE, 4367 cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) { 4368 return (0); 4369 } 4370 #endif 4371 4372 return (TRUE); 4373 } 4374 4375 4376 /* 4377 * called by the transport to offline a target 4378 */ 4379 /* ARGSUSED */ 4380 static void 4381 sf_offline_target(struct sf *sf, struct sf_target *target) 4382 { 4383 dev_info_t *dip; 4384 struct sf_target *next_target = NULL; 4385 struct sf_hp_elem *elem; 4386 4387 ASSERT(mutex_owned(&sf->sf_mutex)); 4388 4389 if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) { 4390 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 4391 sf_core = 0; 4392 } 4393 4394 while (target != NULL) { 4395 sf_log(sf, CE_NOTE, 4396 "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n", 4397 sf_alpa_to_switch[target->sft_al_pa], 4398 target->sft_al_pa, SCSA_LUN(target)); 4399 mutex_enter(&target->sft_mutex); 4400 target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK); 4401 target->sft_state |= SF_TARGET_OFFLINE; 4402 mutex_exit(&target->sft_mutex); 4403 mutex_exit(&sf->sf_mutex); 4404 4405 /* XXXX if this is LUN 0, offline all other LUNs */ 4406 if (next_target || target->sft_lun.l == 0) 4407 next_target = target->sft_next_lun; 4408 4409 /* abort all cmds for this target */ 4410 sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE); 4411 4412 mutex_enter(&sf->sf_mutex); 4413 mutex_enter(&target->sft_mutex); 4414 if (target->sft_state & SF_TARGET_INIT_DONE) { 4415 dip = target->sft_dip; 4416 mutex_exit(&target->sft_mutex); 4417 mutex_exit(&sf->sf_mutex); 4418 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, 4419 TARGET_PROP); 4420 (void) ndi_event_retrieve_cookie(sf->sf_event_hdl, 4421 dip, FCAL_REMOVE_EVENT, &sf_remove_eid, 4422 NDI_EVENT_NOPASS); 4423 (void) ndi_event_run_callbacks(sf->sf_event_hdl, 4424 target->sft_dip, sf_remove_eid, NULL); 4425 4426 elem = kmem_zalloc(sizeof (struct sf_hp_elem), 4427 KM_NOSLEEP); 4428 if (elem != NULL) { 4429 elem->dip = dip; 4430 elem->target = target; 4431 elem->what = SF_OFFLINE; 4432 mutex_enter(&sf->sf_hp_daemon_mutex); 4433 if (sf->sf_hp_elem_tail != NULL) { 4434 sf->sf_hp_elem_tail->next = elem; 4435 sf->sf_hp_elem_tail = elem; 4436 } else { 4437 sf->sf_hp_elem_head = 4438 sf->sf_hp_elem_tail = 4439 elem; 4440 } 4441 cv_signal(&sf->sf_hp_daemon_cv); 4442 mutex_exit(&sf->sf_hp_daemon_mutex); 4443 } else { 4444 /* don't do NDI_DEVI_REMOVE for now */ 4445 if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) { 4446 SF_DEBUG(1, (sf, CE_WARN, 4447 "target %x lun %" PRIx64 ", " 4448 "device offline failed", 4449 sf_alpa_to_switch[target-> 4450 sft_al_pa], 4451 SCSA_LUN(target))); 4452 } else { 4453 SF_DEBUG(1, (sf, CE_NOTE, 4454 "target %x, lun %" PRIx64 ", " 4455 "device offline succeeded\n", 4456 sf_alpa_to_switch[target-> 4457 sft_al_pa], 4458 SCSA_LUN(target))); 4459 } 4460 } 4461 mutex_enter(&sf->sf_mutex); 4462 } else { 4463 mutex_exit(&target->sft_mutex); 4464 } 4465 target = next_target; 4466 } 4467 } 4468 4469 4470 /* 4471 * routine to get/set a capability 4472 * 4473 * returning: 4474 * 1 (TRUE) boolean capability is true (on get) 4475 * 0 (FALSE) invalid capability, can't set capability (on set), 4476 * or boolean capability is false (on get) 4477 * -1 (UNDEFINED) can't find capability (SCSA) or unsupported capability 4478 * 3 when getting SCSI version number 4479 * AL_PA when getting port initiator ID 4480 */ 4481 static int 4482 sf_commoncap(struct scsi_address *ap, char *cap, 4483 int val, int tgtonly, int doset) 4484 { 4485 struct sf *sf = ADDR2SF(ap); 4486 int cidx; 4487 int rval = FALSE; 4488 4489 4490 if (cap == NULL) { 4491 SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg")); 4492 return (rval); 4493 } 4494 4495 /* get index of capability string */ 4496 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 4497 /* can't find capability */ 4498 return (UNDEFINED); 4499 } 4500 4501 if (doset) { 4502 /* 4503 * Process setcap request. 4504 */ 4505 4506 /* 4507 * At present, we can only set binary (0/1) values 4508 */ 4509 switch (cidx) { 4510 case SCSI_CAP_ARQ: /* can't set this capability */ 4511 break; 4512 default: 4513 SF_DEBUG(3, (sf, CE_WARN, 4514 "sf_setcap: unsupported %d", cidx)); 4515 rval = UNDEFINED; 4516 break; 4517 } 4518 4519 SF_DEBUG(4, (sf, CE_NOTE, 4520 "set cap: cap=%s,val=0x%x,tgtonly=0x%x" 4521 ",doset=0x%x,rval=%d\n", 4522 cap, val, tgtonly, doset, rval)); 4523 4524 } else { 4525 /* 4526 * Process getcap request. 4527 */ 4528 switch (cidx) { 4529 case SCSI_CAP_DMA_MAX: 4530 break; /* don't' have this capability */ 4531 case SCSI_CAP_INITIATOR_ID: 4532 rval = sf->sf_al_pa; 4533 break; 4534 case SCSI_CAP_ARQ: 4535 rval = TRUE; /* do have this capability */ 4536 break; 4537 case SCSI_CAP_RESET_NOTIFICATION: 4538 case SCSI_CAP_TAGGED_QING: 4539 rval = TRUE; /* do have this capability */ 4540 break; 4541 case SCSI_CAP_SCSI_VERSION: 4542 rval = 3; 4543 break; 4544 case SCSI_CAP_INTERCONNECT_TYPE: 4545 rval = INTERCONNECT_FIBRE; 4546 break; 4547 default: 4548 SF_DEBUG(4, (sf, CE_WARN, 4549 "sf_scsi_getcap: unsupported")); 4550 rval = UNDEFINED; 4551 break; 4552 } 4553 SF_DEBUG(4, (sf, CE_NOTE, 4554 "get cap: cap=%s,val=0x%x,tgtonly=0x%x," 4555 "doset=0x%x,rval=%d\n", 4556 cap, val, tgtonly, doset, rval)); 4557 } 4558 4559 return (rval); 4560 } 4561 4562 4563 /* 4564 * called by the transport to get a capability 4565 */ 4566 static int 4567 sf_getcap(struct scsi_address *ap, char *cap, int whom) 4568 { 4569 return (sf_commoncap(ap, cap, 0, whom, FALSE)); 4570 } 4571 4572 4573 /* 4574 * called by the transport to set a capability 4575 */ 4576 static int 4577 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom) 4578 { 4579 return (sf_commoncap(ap, cap, value, whom, TRUE)); 4580 } 4581 4582 4583 /* 4584 * called by the transport to abort a target 4585 */ 4586 static int 4587 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4588 { 4589 struct sf *sf = ADDR2SF(ap); 4590 struct sf_target *target = ADDR2TARGET(ap); 4591 struct sf_pkt *cmd, *ncmd, *pcmd; 4592 struct fcal_packet *fpkt; 4593 int rval = 0, t, my_rval = FALSE; 4594 int old_target_state; 4595 int lip_cnt; 4596 int tgt_id; 4597 fc_frame_header_t *hp; 4598 int deferred_destroy; 4599 4600 deferred_destroy = 0; 4601 4602 if (pkt != NULL) { 4603 cmd = PKT2CMD(pkt); 4604 fpkt = cmd->cmd_fp_pkt; 4605 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n", 4606 (void *)fpkt)); 4607 pcmd = NULL; 4608 mutex_enter(&sf->sf_cmd_mutex); 4609 ncmd = sf->sf_pkt_head; 4610 while (ncmd != NULL) { 4611 if (ncmd == cmd) { 4612 if (pcmd != NULL) { 4613 pcmd->cmd_next = cmd->cmd_next; 4614 } else { 4615 sf->sf_pkt_head = cmd->cmd_next; 4616 } 4617 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 4618 cmd->cmd_state = SF_STATE_IDLE; 4619 pkt->pkt_reason = CMD_ABORTED; 4620 pkt->pkt_statistics |= STAT_ABORTED; 4621 my_rval = TRUE; 4622 break; 4623 } else { 4624 pcmd = ncmd; 4625 ncmd = ncmd->cmd_next; 4626 } 4627 } 4628 mutex_exit(&sf->sf_cmd_mutex); 4629 if (ncmd == NULL) { 4630 mutex_enter(&cmd->cmd_abort_mutex); 4631 if (cmd->cmd_state == SF_STATE_ISSUED) { 4632 cmd->cmd_state = SF_STATE_ABORTING; 4633 cmd->cmd_timeout = sf_watchdog_time + 20; 4634 mutex_exit(&cmd->cmd_abort_mutex); 4635 /* call transport to abort command */ 4636 if (((rval = soc_abort(sf->sf_sochandle, 4637 sf->sf_socp, sf->sf_sochandle->fcal_portno, 4638 fpkt, 1)) == FCAL_ABORTED) || 4639 (rval == FCAL_ABORT_FAILED)) { 4640 my_rval = TRUE; 4641 pkt->pkt_reason = CMD_ABORTED; 4642 pkt->pkt_statistics |= STAT_ABORTED; 4643 cmd->cmd_state = SF_STATE_IDLE; 4644 } else if (rval == FCAL_BAD_ABORT) { 4645 cmd->cmd_timeout = sf_watchdog_time 4646 + 20; 4647 my_rval = FALSE; 4648 } else { 4649 SF_DEBUG(1, (sf, CE_NOTE, 4650 "Command Abort failed\n")); 4651 } 4652 } else { 4653 mutex_exit(&cmd->cmd_abort_mutex); 4654 } 4655 } 4656 } else { 4657 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n")); 4658 mutex_enter(&sf->sf_mutex); 4659 lip_cnt = sf->sf_lip_cnt; 4660 mutex_enter(&target->sft_mutex); 4661 if (target->sft_state & (SF_TARGET_BUSY | 4662 SF_TARGET_OFFLINE)) { 4663 mutex_exit(&target->sft_mutex); 4664 return (rval); 4665 } 4666 old_target_state = target->sft_state; 4667 target->sft_state |= SF_TARGET_BUSY; 4668 mutex_exit(&target->sft_mutex); 4669 mutex_exit(&sf->sf_mutex); 4670 4671 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0, 4672 0, 0, 0, NULL, 0)) != NULL) { 4673 4674 cmd = PKT2CMD(pkt); 4675 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1; 4676 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL; 4677 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR; 4678 4679 /* prepare the packet for transport */ 4680 if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) { 4681 4682 cmd->cmd_state = SF_STATE_ISSUED; 4683 /* 4684 * call transport to send a pkt polled 4685 * 4686 * if that fails call the transport to abort it 4687 */ 4688 if (soc_transport_poll(sf->sf_sochandle, 4689 cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT, 4690 CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) { 4691 (void) ddi_dma_sync( 4692 cmd->cmd_cr_pool->rsp_dma_handle, 4693 (off_t) 4694 ((caddr_t)cmd->cmd_rsp_block - 4695 cmd->cmd_cr_pool->rsp_base), 4696 FCP_MAX_RSP_IU_SIZE, 4697 DDI_DMA_SYNC_FORKERNEL); 4698 if (((struct fcp_rsp_info *) 4699 (&cmd->cmd_rsp_block-> 4700 fcp_response_len + 1))-> 4701 rsp_code == FCP_NO_FAILURE) { 4702 /* abort cmds for this targ */ 4703 sf_abort_all(sf, target, TRUE, 4704 lip_cnt, TRUE); 4705 } else { 4706 hp = &cmd->cmd_fp_pkt-> 4707 fcal_socal_request. 4708 sr_fc_frame_hdr; 4709 tgt_id = sf_alpa_to_switch[ 4710 (uchar_t)hp->d_id]; 4711 sf->sf_stats.tstats[tgt_id]. 4712 task_mgmt_failures++; 4713 SF_DEBUG(1, (sf, CE_NOTE, 4714 "Target %d Abort Task " 4715 "Set failed\n", hp->d_id)); 4716 } 4717 } else { 4718 mutex_enter(&cmd->cmd_abort_mutex); 4719 if (cmd->cmd_state == SF_STATE_ISSUED) { 4720 cmd->cmd_state = SF_STATE_ABORTING; 4721 cmd->cmd_timeout = sf_watchdog_time 4722 + 20; 4723 mutex_exit(&cmd->cmd_abort_mutex); 4724 if ((t = soc_abort(sf->sf_sochandle, 4725 sf->sf_socp, sf->sf_sochandle-> 4726 fcal_portno, cmd->cmd_fp_pkt, 1)) != 4727 FCAL_ABORTED && 4728 (t != FCAL_ABORT_FAILED)) { 4729 sf_log(sf, CE_NOTE, 4730 "sf_abort failed, " 4731 "initiating LIP\n"); 4732 sf_force_lip(sf); 4733 deferred_destroy = 1; 4734 } 4735 } else { 4736 mutex_exit(&cmd->cmd_abort_mutex); 4737 } 4738 } 4739 } 4740 if (!deferred_destroy) { 4741 cmd->cmd_fp_pkt->fcal_pkt_comp = 4742 sf_cmd_callback; 4743 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0; 4744 sf_scsi_destroy_pkt(ap, pkt); 4745 my_rval = TRUE; 4746 } 4747 } 4748 mutex_enter(&sf->sf_mutex); 4749 if (lip_cnt == sf->sf_lip_cnt) { 4750 mutex_enter(&target->sft_mutex); 4751 target->sft_state = old_target_state; 4752 mutex_exit(&target->sft_mutex); 4753 } 4754 mutex_exit(&sf->sf_mutex); 4755 } 4756 return (my_rval); 4757 } 4758 4759 4760 /* 4761 * called by the transport and internally to reset a target 4762 */ 4763 static int 4764 sf_reset(struct scsi_address *ap, int level) 4765 { 4766 struct scsi_pkt *pkt; 4767 struct fcal_packet *fpkt; 4768 struct sf *sf = ADDR2SF(ap); 4769 struct sf_target *target = ADDR2TARGET(ap), *ntarget; 4770 struct sf_pkt *cmd; 4771 int rval = FALSE, t; 4772 int lip_cnt; 4773 int tgt_id, ret; 4774 fc_frame_header_t *hp; 4775 int deferred_destroy; 4776 4777 /* We don't support RESET_LUN yet. */ 4778 if (level == RESET_TARGET) { 4779 struct sf_reset_list *p; 4780 4781 if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP)) 4782 == NULL) 4783 return (rval); 4784 4785 SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n")); 4786 mutex_enter(&sf->sf_mutex); 4787 /* All target resets go to LUN 0 */ 4788 if (target->sft_lun.l) { 4789 target = sf_lookup_target(sf, target->sft_port_wwn, 0); 4790 } 4791 mutex_enter(&target->sft_mutex); 4792 if (target->sft_state & (SF_TARGET_BUSY | 4793 SF_TARGET_OFFLINE)) { 4794 mutex_exit(&target->sft_mutex); 4795 mutex_exit(&sf->sf_mutex); 4796 kmem_free(p, sizeof (struct sf_reset_list)); 4797 return (rval); 4798 } 4799 lip_cnt = sf->sf_lip_cnt; 4800 target->sft_state |= SF_TARGET_BUSY; 4801 for (ntarget = target->sft_next_lun; 4802 ntarget; 4803 ntarget = ntarget->sft_next_lun) { 4804 mutex_enter(&ntarget->sft_mutex); 4805 /* 4806 * XXXX If we supported RESET_LUN we should check here 4807 * to see if any LUN were being reset and somehow fail 4808 * that operation. 4809 */ 4810 ntarget->sft_state |= SF_TARGET_BUSY; 4811 mutex_exit(&ntarget->sft_mutex); 4812 } 4813 mutex_exit(&target->sft_mutex); 4814 mutex_exit(&sf->sf_mutex); 4815 4816 deferred_destroy = 0; 4817 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0, 4818 0, 0, 0, NULL, 0)) != NULL) { 4819 cmd = PKT2CMD(pkt); 4820 cmd->cmd_block->fcp_cntl.cntl_reset = 1; 4821 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL; 4822 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR; 4823 4824 /* prepare the packet for transport */ 4825 if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) { 4826 /* call transport to send a pkt polled */ 4827 cmd->cmd_state = SF_STATE_ISSUED; 4828 if ((ret = soc_transport_poll(sf->sf_sochandle, 4829 cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT, 4830 CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) { 4831 (void) ddi_dma_sync(cmd->cmd_cr_pool-> 4832 rsp_dma_handle, (caddr_t)cmd-> 4833 cmd_rsp_block - cmd->cmd_cr_pool-> 4834 rsp_base, FCP_MAX_RSP_IU_SIZE, 4835 DDI_DMA_SYNC_FORKERNEL); 4836 fpkt = cmd->cmd_fp_pkt; 4837 if ((fpkt->fcal_pkt_status == 4838 FCAL_STATUS_OK) && 4839 (((struct fcp_rsp_info *) 4840 (&cmd->cmd_rsp_block-> 4841 fcp_response_len + 1))-> 4842 rsp_code == FCP_NO_FAILURE)) { 4843 sf_log(sf, CE_NOTE, 4844 "!sf%d: Target 0x%x Reset " 4845 "successful\n", 4846 ddi_get_instance(\ 4847 sf->sf_dip), 4848 sf_alpa_to_switch[ 4849 target->sft_al_pa]); 4850 rval = TRUE; 4851 } else { 4852 hp = &cmd->cmd_fp_pkt-> 4853 fcal_socal_request. 4854 sr_fc_frame_hdr; 4855 tgt_id = sf_alpa_to_switch[ 4856 (uchar_t)hp->d_id]; 4857 sf->sf_stats.tstats[tgt_id]. 4858 task_mgmt_failures++; 4859 sf_log(sf, CE_NOTE, 4860 "!sf%d: Target 0x%x " 4861 "Reset failed." 4862 "Status code 0x%x " 4863 "Resp code 0x%x\n", 4864 ddi_get_instance(\ 4865 sf->sf_dip), 4866 tgt_id, 4867 fpkt->fcal_pkt_status, 4868 ((struct fcp_rsp_info *) 4869 (&cmd->cmd_rsp_block-> 4870 fcp_response_len + 1))-> 4871 rsp_code); 4872 } 4873 } else { 4874 sf_log(sf, CE_NOTE, "!sf%d: Target " 4875 "0x%x Reset Failed. Ret=%x\n", 4876 ddi_get_instance(sf->sf_dip), 4877 sf_alpa_to_switch[ 4878 target->sft_al_pa], ret); 4879 mutex_enter(&cmd->cmd_abort_mutex); 4880 if (cmd->cmd_state == SF_STATE_ISSUED) { 4881 /* call the transport to abort a cmd */ 4882 cmd->cmd_timeout = sf_watchdog_time 4883 + 20; 4884 cmd->cmd_state = SF_STATE_ABORTING; 4885 mutex_exit(&cmd->cmd_abort_mutex); 4886 if (((t = soc_abort(sf->sf_sochandle, 4887 sf->sf_socp, 4888 sf->sf_sochandle->fcal_portno, 4889 cmd->cmd_fp_pkt, 1)) != 4890 FCAL_ABORTED) && 4891 (t != FCAL_ABORT_FAILED)) { 4892 sf_log(sf, CE_NOTE, 4893 "!sf%d: Target 0x%x Reset " 4894 "failed. Abort Failed, " 4895 "forcing LIP\n", 4896 ddi_get_instance( 4897 sf->sf_dip), 4898 sf_alpa_to_switch[ 4899 target->sft_al_pa]); 4900 sf_force_lip(sf); 4901 rval = TRUE; 4902 deferred_destroy = 1; 4903 } 4904 } else { 4905 mutex_exit(&cmd->cmd_abort_mutex); 4906 } 4907 } 4908 } 4909 /* 4910 * Defer releasing the packet if we abort returned with 4911 * a BAD_ABORT or timed out, because there is a 4912 * possibility that the ucode might return it. 4913 * We wait for at least 20s and let it be released 4914 * by the sf_watch thread 4915 */ 4916 if (!deferred_destroy) { 4917 cmd->cmd_block->fcp_cntl.cntl_reset = 0; 4918 cmd->cmd_fp_pkt->fcal_pkt_comp = 4919 sf_cmd_callback; 4920 cmd->cmd_state = SF_STATE_IDLE; 4921 /* for cache */ 4922 sf_scsi_destroy_pkt(ap, pkt); 4923 } 4924 } else { 4925 cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. " 4926 "Resource allocation error.\n", 4927 ddi_get_instance(sf->sf_dip), 4928 sf_alpa_to_switch[target->sft_al_pa]); 4929 } 4930 mutex_enter(&sf->sf_mutex); 4931 if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) { 4932 p->target = target; 4933 p->lip_cnt = lip_cnt; 4934 p->timeout = ddi_get_lbolt() + 4935 drv_usectohz(SF_TARGET_RESET_DELAY); 4936 p->next = sf->sf_reset_list; 4937 sf->sf_reset_list = p; 4938 mutex_exit(&sf->sf_mutex); 4939 mutex_enter(&sf_global_mutex); 4940 if (sf_reset_timeout_id == 0) { 4941 sf_reset_timeout_id = timeout( 4942 sf_check_reset_delay, NULL, 4943 drv_usectohz(SF_TARGET_RESET_DELAY)); 4944 } 4945 mutex_exit(&sf_global_mutex); 4946 } else { 4947 if (lip_cnt == sf->sf_lip_cnt) { 4948 mutex_enter(&target->sft_mutex); 4949 target->sft_state &= ~SF_TARGET_BUSY; 4950 for (ntarget = target->sft_next_lun; 4951 ntarget; 4952 ntarget = ntarget->sft_next_lun) { 4953 mutex_enter(&ntarget->sft_mutex); 4954 ntarget->sft_state &= ~SF_TARGET_BUSY; 4955 mutex_exit(&ntarget->sft_mutex); 4956 } 4957 mutex_exit(&target->sft_mutex); 4958 } 4959 mutex_exit(&sf->sf_mutex); 4960 kmem_free(p, sizeof (struct sf_reset_list)); 4961 } 4962 } else { 4963 mutex_enter(&sf->sf_mutex); 4964 if ((sf->sf_state == SF_STATE_OFFLINE) && 4965 (sf_watchdog_time < sf->sf_timer)) { 4966 /* 4967 * We are currently in a lip, so let this one 4968 * finish before forcing another one. 4969 */ 4970 mutex_exit(&sf->sf_mutex); 4971 return (TRUE); 4972 } 4973 mutex_exit(&sf->sf_mutex); 4974 sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n"); 4975 sf_force_lip(sf); 4976 rval = TRUE; 4977 } 4978 return (rval); 4979 } 4980 4981 4982 /* 4983 * abort all commands for a target 4984 * 4985 * if try_abort is set then send an abort 4986 * if abort is set then this is abort, else this is a reset 4987 */ 4988 static void 4989 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int 4990 lip_cnt, int try_abort) 4991 { 4992 struct sf_target *ntarget; 4993 struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd; 4994 struct fcal_packet *fpkt; 4995 struct scsi_pkt *pkt; 4996 int rval = FCAL_ABORTED; 4997 4998 /* 4999 * First pull all commands for all LUNs on this target out of the 5000 * overflow list. We can tell it's the same target by comparing 5001 * the node WWN. 5002 */ 5003 mutex_enter(&sf->sf_mutex); 5004 if (lip_cnt == sf->sf_lip_cnt) { 5005 mutex_enter(&sf->sf_cmd_mutex); 5006 cmd = sf->sf_pkt_head; 5007 while (cmd != NULL) { 5008 ntarget = ADDR2TARGET(&cmd->cmd_pkt-> 5009 pkt_address); 5010 if (ntarget == target) { 5011 if (pcmd != NULL) 5012 pcmd->cmd_next = cmd->cmd_next; 5013 else 5014 sf->sf_pkt_head = cmd->cmd_next; 5015 if (sf->sf_pkt_tail == cmd) { 5016 sf->sf_pkt_tail = pcmd; 5017 if (pcmd != NULL) 5018 pcmd->cmd_next = NULL; 5019 } 5020 tcmd = cmd->cmd_next; 5021 if (head == NULL) { 5022 head = cmd; 5023 tail = cmd; 5024 } else { 5025 tail->cmd_next = cmd; 5026 tail = cmd; 5027 } 5028 cmd->cmd_next = NULL; 5029 cmd = tcmd; 5030 } else { 5031 pcmd = cmd; 5032 cmd = cmd->cmd_next; 5033 } 5034 } 5035 mutex_exit(&sf->sf_cmd_mutex); 5036 } 5037 mutex_exit(&sf->sf_mutex); 5038 5039 /* 5040 * Now complete all the commands on our list. In the process, 5041 * the completion routine may take the commands off the target 5042 * lists. 5043 */ 5044 cmd = head; 5045 while (cmd != NULL) { 5046 pkt = cmd->cmd_pkt; 5047 if (abort) { 5048 pkt->pkt_reason = CMD_ABORTED; 5049 pkt->pkt_statistics |= STAT_ABORTED; 5050 } else { 5051 pkt->pkt_reason = CMD_RESET; 5052 pkt->pkt_statistics |= STAT_DEV_RESET; 5053 } 5054 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 5055 cmd->cmd_state = SF_STATE_IDLE; 5056 cmd = cmd->cmd_next; 5057 /* 5058 * call the packet completion routine only for 5059 * non-polled commands. Ignore the polled commands as 5060 * they timeout and will be handled differently 5061 */ 5062 if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR)) 5063 (*pkt->pkt_comp)(pkt); 5064 5065 } 5066 5067 /* 5068 * Finally get all outstanding commands for each LUN, and abort them if 5069 * they've been issued, and call the completion routine. 5070 * For the case where sf_offline_target is called from sf_watch 5071 * due to a Offline Timeout, it is quite possible that the soc+ 5072 * ucode is hosed and therefore cannot return the commands. 5073 * Clear up all the issued commands as well. 5074 * Try_abort will be false only if sf_abort_all is coming from 5075 * sf_target_offline. 5076 */ 5077 5078 if (try_abort || sf->sf_state == SF_STATE_OFFLINE) { 5079 mutex_enter(&target->sft_pkt_mutex); 5080 cmd = tcmd = target->sft_pkt_head; 5081 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) { 5082 fpkt = cmd->cmd_fp_pkt; 5083 pkt = cmd->cmd_pkt; 5084 mutex_enter(&cmd->cmd_abort_mutex); 5085 if ((cmd->cmd_state == SF_STATE_ISSUED) && 5086 (fpkt->fcal_cmd_state & 5087 FCAL_CMD_IN_TRANSPORT) && 5088 ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) == 5089 0) && !(pkt->pkt_flags & FLAG_NOINTR)) { 5090 cmd->cmd_state = SF_STATE_ABORTING; 5091 cmd->cmd_timeout = sf_watchdog_time + 5092 cmd->cmd_pkt->pkt_time + 20; 5093 mutex_exit(&cmd->cmd_abort_mutex); 5094 mutex_exit(&target->sft_pkt_mutex); 5095 if (try_abort) { 5096 /* call the transport to abort a pkt */ 5097 rval = soc_abort(sf->sf_sochandle, 5098 sf->sf_socp, 5099 sf->sf_sochandle->fcal_portno, 5100 fpkt, 1); 5101 } 5102 if ((rval == FCAL_ABORTED) || 5103 (rval == FCAL_ABORT_FAILED)) { 5104 if (abort) { 5105 pkt->pkt_reason = CMD_ABORTED; 5106 pkt->pkt_statistics |= 5107 STAT_ABORTED; 5108 } else { 5109 pkt->pkt_reason = CMD_RESET; 5110 pkt->pkt_statistics |= 5111 STAT_DEV_RESET; 5112 } 5113 cmd->cmd_state = SF_STATE_IDLE; 5114 if (pkt->pkt_comp) 5115 (*pkt->pkt_comp)(pkt); 5116 } 5117 mutex_enter(&sf->sf_mutex); 5118 if (lip_cnt != sf->sf_lip_cnt) { 5119 mutex_exit(&sf->sf_mutex); 5120 return; 5121 } 5122 mutex_exit(&sf->sf_mutex); 5123 mutex_enter(&target->sft_pkt_mutex); 5124 cmd = target->sft_pkt_head; 5125 } else { 5126 mutex_exit(&cmd->cmd_abort_mutex); 5127 cmd = cmd->cmd_forw; 5128 } 5129 } 5130 mutex_exit(&target->sft_pkt_mutex); 5131 } 5132 } 5133 5134 5135 /* 5136 * called by the transport to start a packet 5137 */ 5138 static int 5139 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt) 5140 { 5141 struct sf *sf = ADDR2SF(ap); 5142 struct sf_target *target = ADDR2TARGET(ap); 5143 struct sf_pkt *cmd = PKT2CMD(pkt); 5144 int rval; 5145 5146 5147 SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n")); 5148 5149 if (cmd->cmd_state == SF_STATE_ISSUED) { 5150 cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n", 5151 (void *)cmd); 5152 } 5153 5154 /* prepare the packet for transport */ 5155 if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) { 5156 return (rval); 5157 } 5158 5159 if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) { 5160 if (target->sft_state & SF_TARGET_OFFLINE) { 5161 return (TRAN_FATAL_ERROR); 5162 } 5163 if (pkt->pkt_flags & FLAG_NOINTR) { 5164 return (TRAN_BUSY); 5165 } 5166 mutex_enter(&sf->sf_cmd_mutex); 5167 sf->sf_use_lock = TRUE; 5168 goto enque; 5169 } 5170 5171 5172 /* if no interrupts then do polled I/O */ 5173 if (pkt->pkt_flags & FLAG_NOINTR) { 5174 return (sf_dopoll(sf, cmd)); 5175 } 5176 5177 /* regular interrupt-driven I/O */ 5178 5179 if (!sf->sf_use_lock) { 5180 5181 /* locking no needed */ 5182 5183 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? 5184 sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0; 5185 cmd->cmd_state = SF_STATE_ISSUED; 5186 5187 /* call the transport to send a pkt */ 5188 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, 5189 FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) { 5190 cmd->cmd_state = SF_STATE_IDLE; 5191 return (TRAN_BADPKT); 5192 } 5193 return (TRAN_ACCEPT); 5194 } 5195 5196 /* regular I/O using locking */ 5197 5198 mutex_enter(&sf->sf_cmd_mutex); 5199 if ((sf->sf_ncmds >= sf->sf_throttle) || 5200 (sf->sf_pkt_head != NULL)) { 5201 enque: 5202 /* 5203 * either we're throttling back or there are already commands 5204 * on the queue, so enqueue this one for later 5205 */ 5206 cmd->cmd_flags |= CFLAG_IN_QUEUE; 5207 if (sf->sf_pkt_head != NULL) { 5208 /* add to the queue */ 5209 sf->sf_pkt_tail->cmd_next = cmd; 5210 cmd->cmd_next = NULL; 5211 sf->sf_pkt_tail = cmd; 5212 } else { 5213 /* this is the first entry in the queue */ 5214 sf->sf_pkt_head = sf->sf_pkt_tail = cmd; 5215 cmd->cmd_next = NULL; 5216 } 5217 mutex_exit(&sf->sf_cmd_mutex); 5218 return (TRAN_ACCEPT); 5219 } 5220 5221 /* 5222 * start this packet now 5223 */ 5224 5225 /* still have cmd mutex */ 5226 return (sf_start_internal(sf, cmd)); 5227 } 5228 5229 5230 /* 5231 * internal routine to start a packet from the queue now 5232 * 5233 * enter with cmd mutex held and leave with it released 5234 */ 5235 static int 5236 sf_start_internal(struct sf *sf, struct sf_pkt *cmd) 5237 { 5238 /* we have the cmd mutex */ 5239 sf->sf_ncmds++; 5240 mutex_exit(&sf->sf_cmd_mutex); 5241 5242 ASSERT(cmd->cmd_state != SF_STATE_ISSUED); 5243 SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n")); 5244 5245 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time + 5246 cmd->cmd_pkt->pkt_time : 0; 5247 cmd->cmd_state = SF_STATE_ISSUED; 5248 5249 /* call transport to send the pkt */ 5250 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP, 5251 CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) { 5252 cmd->cmd_state = SF_STATE_IDLE; 5253 mutex_enter(&sf->sf_cmd_mutex); 5254 sf->sf_ncmds--; 5255 mutex_exit(&sf->sf_cmd_mutex); 5256 return (TRAN_BADPKT); 5257 } 5258 return (TRAN_ACCEPT); 5259 } 5260 5261 5262 /* 5263 * prepare a packet for transport 5264 */ 5265 static int 5266 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target) 5267 { 5268 struct fcp_cmd *fcmd = cmd->cmd_block; 5269 5270 /* XXXX Need to set the LUN ? */ 5271 bcopy((caddr_t)&target->sft_lun.b, 5272 (caddr_t)&fcmd->fcp_ent_addr, 5273 FCP_LUN_SIZE); 5274 cmd->cmd_pkt->pkt_reason = CMD_CMPLT; 5275 cmd->cmd_pkt->pkt_state = 0; 5276 cmd->cmd_pkt->pkt_statistics = 0; 5277 5278 5279 if ((cmd->cmd_pkt->pkt_comp == NULL) && 5280 ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) { 5281 return (TRAN_BADPKT); 5282 } 5283 5284 /* invalidate imp field(s) of rsp block */ 5285 cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC; 5286 5287 /* set up amt of I/O to do */ 5288 if (cmd->cmd_flags & CFLAG_DMAVALID) { 5289 cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount; 5290 if (cmd->cmd_flags & CFLAG_CMDIOPB) { 5291 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0, 5292 DDI_DMA_SYNC_FORDEV); 5293 } 5294 } else { 5295 cmd->cmd_pkt->pkt_resid = 0; 5296 } 5297 5298 /* set up the Tagged Queuing type */ 5299 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) { 5300 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q; 5301 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) { 5302 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED; 5303 } 5304 5305 /* 5306 * Sync the cmd segment 5307 */ 5308 (void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle, 5309 (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base, 5310 sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV); 5311 5312 sf_fill_ids(sf, cmd, target); 5313 return (TRAN_ACCEPT); 5314 } 5315 5316 5317 /* 5318 * fill in packet hdr source and destination IDs and hdr byte count 5319 */ 5320 static void 5321 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target) 5322 { 5323 struct fcal_packet *fpkt = cmd->cmd_fp_pkt; 5324 fc_frame_header_t *hp; 5325 5326 5327 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr; 5328 hp->d_id = target->sft_al_pa; 5329 hp->s_id = sf->sf_al_pa; 5330 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = 5331 cmd->cmd_dmacookie.dmac_size; 5332 } 5333 5334 5335 /* 5336 * do polled I/O using transport 5337 */ 5338 static int 5339 sf_dopoll(struct sf *sf, struct sf_pkt *cmd) 5340 { 5341 int timeout; 5342 int rval; 5343 5344 5345 mutex_enter(&sf->sf_cmd_mutex); 5346 sf->sf_ncmds++; 5347 mutex_exit(&sf->sf_cmd_mutex); 5348 5349 timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time 5350 : SF_POLL_TIMEOUT; 5351 cmd->cmd_timeout = 0; 5352 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL; 5353 cmd->cmd_state = SF_STATE_ISSUED; 5354 5355 /* call transport to send a pkt polled */ 5356 rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt, 5357 timeout*1000000, CQ_REQUEST_1); 5358 mutex_enter(&cmd->cmd_abort_mutex); 5359 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback; 5360 if (rval != FCAL_TRANSPORT_SUCCESS) { 5361 if (rval == FCAL_TRANSPORT_TIMEOUT) { 5362 cmd->cmd_state = SF_STATE_ABORTING; 5363 mutex_exit(&cmd->cmd_abort_mutex); 5364 (void) sf_target_timeout(sf, cmd); 5365 } else { 5366 mutex_exit(&cmd->cmd_abort_mutex); 5367 } 5368 cmd->cmd_state = SF_STATE_IDLE; 5369 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback; 5370 mutex_enter(&sf->sf_cmd_mutex); 5371 sf->sf_ncmds--; 5372 mutex_exit(&sf->sf_cmd_mutex); 5373 return (TRAN_BADPKT); 5374 } 5375 mutex_exit(&cmd->cmd_abort_mutex); 5376 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback; 5377 sf_cmd_callback(cmd->cmd_fp_pkt); 5378 return (TRAN_ACCEPT); 5379 } 5380 5381 5382 /* a shortcut for defining debug messages below */ 5383 #ifdef DEBUG 5384 #define SF_DMSG1(s) msg1 = s 5385 #else 5386 #define SF_DMSG1(s) /* do nothing */ 5387 #endif 5388 5389 5390 /* 5391 * the pkt_comp callback for command packets 5392 */ 5393 static void 5394 sf_cmd_callback(struct fcal_packet *fpkt) 5395 { 5396 struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private; 5397 struct scsi_pkt *pkt = cmd->cmd_pkt; 5398 struct sf *sf = ADDR2SF(&pkt->pkt_address); 5399 struct sf_target *target = ADDR2TARGET(&pkt->pkt_address); 5400 struct fcp_rsp *rsp; 5401 char *msg1 = NULL; 5402 char *msg2 = NULL; 5403 short ncmds; 5404 int tgt_id; 5405 int good_scsi_status = TRUE; 5406 5407 5408 5409 if (cmd->cmd_state == SF_STATE_IDLE) { 5410 cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n", 5411 (void *)cmd); 5412 } 5413 5414 mutex_enter(&cmd->cmd_abort_mutex); 5415 if (cmd->cmd_state == SF_STATE_ABORTING) { 5416 /* cmd already being aborted -- nothing to do */ 5417 mutex_exit(&cmd->cmd_abort_mutex); 5418 return; 5419 } 5420 5421 cmd->cmd_state = SF_STATE_IDLE; 5422 mutex_exit(&cmd->cmd_abort_mutex); 5423 5424 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) { 5425 5426 (void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle, 5427 (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base, 5428 FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL); 5429 5430 rsp = (struct fcp_rsp *)cmd->cmd_rsp_block; 5431 5432 if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) { 5433 5434 if (sf_core && (sf_core & SF_CORE_BAD_DMA)) { 5435 sf_token = (int *)(uintptr_t) 5436 fpkt->fcal_socal_request.\ 5437 sr_soc_hdr.sh_request_token; 5438 (void) soc_take_core(sf->sf_sochandle, 5439 sf->sf_socp); 5440 } 5441 5442 pkt->pkt_reason = CMD_INCOMPLETE; 5443 pkt->pkt_state = STATE_GOT_BUS; 5444 pkt->pkt_statistics |= STAT_ABORTED; 5445 5446 } else { 5447 5448 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 5449 STATE_SENT_CMD | STATE_GOT_STATUS; 5450 pkt->pkt_resid = 0; 5451 if (cmd->cmd_flags & CFLAG_DMAVALID) { 5452 pkt->pkt_state |= STATE_XFERRED_DATA; 5453 } 5454 5455 if ((pkt->pkt_scbp != NULL) && 5456 ((*(pkt->pkt_scbp) = 5457 rsp->fcp_u.fcp_status.scsi_status) 5458 != STATUS_GOOD)) { 5459 good_scsi_status = FALSE; 5460 /* 5461 * The next two checks make sure that if there 5462 * is no sense data or a valid response and 5463 * the command came back with check condition, 5464 * the command should be retried 5465 */ 5466 if (!rsp->fcp_u.fcp_status.rsp_len_set && 5467 !rsp->fcp_u.fcp_status.sense_len_set) { 5468 pkt->pkt_state &= ~STATE_XFERRED_DATA; 5469 pkt->pkt_resid = cmd->cmd_dmacount; 5470 } 5471 } 5472 5473 if ((cmd->cmd_flags & CFLAG_CMDIOPB) && 5474 (pkt->pkt_state & STATE_XFERRED_DATA)) { 5475 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 5476 (uint_t)0, DDI_DMA_SYNC_FORCPU); 5477 } 5478 /* 5479 * Update the transfer resid, if appropriate 5480 */ 5481 if (rsp->fcp_u.fcp_status.resid_over || 5482 rsp->fcp_u.fcp_status.resid_under) 5483 pkt->pkt_resid = rsp->fcp_resid; 5484 5485 /* 5486 * Check to see if the SCSI command failed. 5487 * 5488 */ 5489 5490 /* 5491 * First see if we got a FCP protocol error. 5492 */ 5493 if (rsp->fcp_u.fcp_status.rsp_len_set) { 5494 struct fcp_rsp_info *bep; 5495 5496 bep = 5497 (struct fcp_rsp_info *)(&rsp->fcp_response_len + 1); 5498 if (bep->rsp_code != FCP_NO_FAILURE) { 5499 pkt->pkt_reason = CMD_TRAN_ERR; 5500 tgt_id = pkt->pkt_address.a_target; 5501 switch (bep->rsp_code) { 5502 case FCP_CMND_INVALID: 5503 SF_DMSG1("FCP_RSP FCP_CMND " 5504 "fields invalid"); 5505 break; 5506 case FCP_TASK_MGMT_NOT_SUPPTD: 5507 SF_DMSG1("FCP_RSP Task" 5508 "Management Function" 5509 "Not Supported"); 5510 break; 5511 case FCP_TASK_MGMT_FAILED: 5512 SF_DMSG1("FCP_RSP Task " 5513 "Management Function" 5514 "Failed"); 5515 sf->sf_stats.tstats[tgt_id]. 5516 task_mgmt_failures++; 5517 break; 5518 case FCP_DATA_RO_MISMATCH: 5519 SF_DMSG1("FCP_RSP FCP_DATA RO " 5520 "mismatch with " 5521 "FCP_XFER_RDY DATA_RO"); 5522 sf->sf_stats.tstats[tgt_id]. 5523 data_ro_mismatches++; 5524 break; 5525 case FCP_DL_LEN_MISMATCH: 5526 SF_DMSG1("FCP_RSP FCP_DATA length " 5527 "different than BURST_LEN"); 5528 sf->sf_stats.tstats[tgt_id]. 5529 dl_len_mismatches++; 5530 break; 5531 default: 5532 SF_DMSG1("FCP_RSP invalid RSP_CODE"); 5533 break; 5534 } 5535 } 5536 } 5537 5538 /* 5539 * See if we got a SCSI error with sense data 5540 */ 5541 if (rsp->fcp_u.fcp_status.sense_len_set) { 5542 uchar_t rqlen = min(rsp->fcp_sense_len, 5543 sizeof (struct scsi_extended_sense)); 5544 caddr_t sense = (caddr_t)rsp + 5545 sizeof (struct fcp_rsp) + rsp->fcp_response_len; 5546 struct scsi_arq_status *arq; 5547 struct scsi_extended_sense *sensep = 5548 (struct scsi_extended_sense *)sense; 5549 5550 if (rsp->fcp_u.fcp_status.scsi_status != 5551 STATUS_GOOD) { 5552 if (rsp->fcp_u.fcp_status.scsi_status 5553 == STATUS_CHECK) { 5554 if (sensep->es_key == 5555 KEY_RECOVERABLE_ERROR) 5556 good_scsi_status = 1; 5557 if (sensep->es_key == 5558 KEY_UNIT_ATTENTION && 5559 sensep->es_add_code == 0x3f && 5560 sensep->es_qual_code == 0x0e) { 5561 /* REPORT_LUNS_HAS_CHANGED */ 5562 sf_log(sf, CE_NOTE, 5563 "!REPORT_LUNS_HAS_CHANGED\n"); 5564 sf_force_lip(sf); 5565 } 5566 } 5567 } 5568 5569 if ((pkt->pkt_scbp != NULL) && 5570 (cmd->cmd_scblen >= 5571 sizeof (struct scsi_arq_status))) { 5572 5573 pkt->pkt_state |= STATE_ARQ_DONE; 5574 5575 arq = (struct scsi_arq_status *)pkt->pkt_scbp; 5576 /* 5577 * copy out sense information 5578 */ 5579 bcopy(sense, (caddr_t)&arq->sts_sensedata, 5580 rqlen); 5581 arq->sts_rqpkt_resid = 5582 sizeof (struct scsi_extended_sense) - 5583 rqlen; 5584 *((uchar_t *)&arq->sts_rqpkt_status) = 5585 STATUS_GOOD; 5586 arq->sts_rqpkt_reason = 0; 5587 arq->sts_rqpkt_statistics = 0; 5588 arq->sts_rqpkt_state = STATE_GOT_BUS | 5589 STATE_GOT_TARGET | STATE_SENT_CMD | 5590 STATE_GOT_STATUS | STATE_ARQ_DONE | 5591 STATE_XFERRED_DATA; 5592 } 5593 target->sft_alive = TRUE; 5594 } 5595 5596 /* 5597 * The firmware returns the number of bytes actually 5598 * xfered into/out of host. Compare this with what 5599 * we asked and if it is different, we lost frames ? 5600 */ 5601 if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) && 5602 (good_scsi_status) && 5603 (pkt->pkt_state & STATE_XFERRED_DATA) && 5604 (!(cmd->cmd_flags & CFLAG_CMDIOPB)) && 5605 (target->sft_device_type != DTYPE_ESI)) { 5606 int byte_cnt = 5607 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt; 5608 if (cmd->cmd_flags & CFLAG_DMASEND) { 5609 if (byte_cnt != 0) { 5610 sf_log(sf, CE_NOTE, 5611 "!sf_cmd_callback: Lost Frame: " 5612 "(write) received 0x%x expected" 5613 " 0x%x target 0x%x\n", 5614 byte_cnt, cmd->cmd_dmacount, 5615 sf_alpa_to_switch[ 5616 target->sft_al_pa]); 5617 pkt->pkt_reason = CMD_INCOMPLETE; 5618 pkt->pkt_statistics |= STAT_ABORTED; 5619 } 5620 } else if (byte_cnt < cmd->cmd_dmacount) { 5621 sf_log(sf, CE_NOTE, 5622 "!sf_cmd_callback: Lost Frame: (read) " 5623 "received 0x%x expected 0x%x " 5624 "target 0x%x\n", byte_cnt, 5625 cmd->cmd_dmacount, sf_alpa_to_switch[ 5626 target->sft_al_pa]); 5627 pkt->pkt_reason = CMD_INCOMPLETE; 5628 pkt->pkt_statistics |= STAT_ABORTED; 5629 } 5630 } 5631 } 5632 5633 } else { 5634 5635 /* pkt status was not ok */ 5636 5637 switch (fpkt->fcal_pkt_status) { 5638 5639 case FCAL_STATUS_ERR_OFFLINE: 5640 SF_DMSG1("Fibre Channel Offline"); 5641 mutex_enter(&target->sft_mutex); 5642 if (!(target->sft_state & SF_TARGET_OFFLINE)) { 5643 target->sft_state |= (SF_TARGET_BUSY 5644 | SF_TARGET_MARK); 5645 } 5646 mutex_exit(&target->sft_mutex); 5647 (void) ndi_event_retrieve_cookie(sf->sf_event_hdl, 5648 target->sft_dip, FCAL_REMOVE_EVENT, 5649 &sf_remove_eid, NDI_EVENT_NOPASS); 5650 (void) ndi_event_run_callbacks(sf->sf_event_hdl, 5651 target->sft_dip, sf_remove_eid, NULL); 5652 pkt->pkt_reason = CMD_TRAN_ERR; 5653 pkt->pkt_statistics |= STAT_BUS_RESET; 5654 break; 5655 5656 case FCAL_STATUS_MAX_XCHG_EXCEEDED: 5657 sf_throttle(sf); 5658 sf->sf_use_lock = TRUE; 5659 pkt->pkt_reason = CMD_TRAN_ERR; 5660 pkt->pkt_state = STATE_GOT_BUS; 5661 pkt->pkt_statistics |= STAT_ABORTED; 5662 break; 5663 5664 case FCAL_STATUS_TIMEOUT: 5665 SF_DMSG1("Fibre Channel Timeout"); 5666 pkt->pkt_reason = CMD_TIMEOUT; 5667 break; 5668 5669 case FCAL_STATUS_ERR_OVERRUN: 5670 SF_DMSG1("CMD_DATA_OVR"); 5671 pkt->pkt_reason = CMD_DATA_OVR; 5672 break; 5673 5674 case FCAL_STATUS_UNKNOWN_CQ_TYPE: 5675 SF_DMSG1("Unknown CQ type"); 5676 pkt->pkt_reason = CMD_TRAN_ERR; 5677 break; 5678 5679 case FCAL_STATUS_BAD_SEG_CNT: 5680 SF_DMSG1("Bad SEG CNT"); 5681 pkt->pkt_reason = CMD_TRAN_ERR; 5682 break; 5683 5684 case FCAL_STATUS_BAD_XID: 5685 SF_DMSG1("Fibre Channel Invalid X_ID"); 5686 pkt->pkt_reason = CMD_TRAN_ERR; 5687 break; 5688 5689 case FCAL_STATUS_XCHG_BUSY: 5690 SF_DMSG1("Fibre Channel Exchange Busy"); 5691 pkt->pkt_reason = CMD_TRAN_ERR; 5692 break; 5693 5694 case FCAL_STATUS_INSUFFICIENT_CQES: 5695 SF_DMSG1("Insufficient CQEs"); 5696 pkt->pkt_reason = CMD_TRAN_ERR; 5697 break; 5698 5699 case FCAL_STATUS_ALLOC_FAIL: 5700 SF_DMSG1("ALLOC FAIL"); 5701 pkt->pkt_reason = CMD_TRAN_ERR; 5702 break; 5703 5704 case FCAL_STATUS_BAD_SID: 5705 SF_DMSG1("Fibre Channel Invalid S_ID"); 5706 pkt->pkt_reason = CMD_TRAN_ERR; 5707 break; 5708 5709 case FCAL_STATUS_INCOMPLETE_DMA_ERR: 5710 if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) { 5711 sf_token = (int *)(uintptr_t) 5712 fpkt->fcal_socal_request.\ 5713 sr_soc_hdr.sh_request_token; 5714 (void) soc_take_core(sf->sf_sochandle, 5715 sf->sf_socp); 5716 sf_core = 0; 5717 } 5718 msg2 = 5719 "INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA"; 5720 pkt->pkt_reason = CMD_INCOMPLETE; 5721 pkt->pkt_state = STATE_GOT_BUS; 5722 pkt->pkt_statistics |= STAT_ABORTED; 5723 break; 5724 5725 case FCAL_STATUS_CRC_ERR: 5726 msg2 = "Fibre Channel CRC Error on frames"; 5727 pkt->pkt_reason = CMD_INCOMPLETE; 5728 pkt->pkt_state = STATE_GOT_BUS; 5729 pkt->pkt_statistics |= STAT_ABORTED; 5730 break; 5731 5732 case FCAL_STATUS_NO_SEQ_INIT: 5733 SF_DMSG1("Fibre Channel Seq Init Error"); 5734 pkt->pkt_reason = CMD_TRAN_ERR; 5735 break; 5736 5737 case FCAL_STATUS_OPEN_FAIL: 5738 pkt->pkt_reason = CMD_TRAN_ERR; 5739 SF_DMSG1("Fibre Channel Open Failure"); 5740 if ((target->sft_state & (SF_TARGET_BUSY | 5741 SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) { 5742 sf_log(sf, CE_NOTE, "!Open failure to target 0x%x " 5743 "forcing LIP\n", 5744 sf_alpa_to_switch[target->sft_al_pa]); 5745 sf_force_lip(sf); 5746 } 5747 break; 5748 5749 5750 case FCAL_STATUS_ONLINE_TIMEOUT: 5751 SF_DMSG1("Fibre Channel Online Timeout"); 5752 pkt->pkt_reason = CMD_TRAN_ERR; 5753 break; 5754 5755 default: 5756 SF_DMSG1("Unknown FC Status"); 5757 pkt->pkt_reason = CMD_TRAN_ERR; 5758 break; 5759 } 5760 } 5761 5762 #ifdef DEBUG 5763 /* 5764 * msg1 will be non-NULL if we've detected some sort of error 5765 */ 5766 if (msg1 != NULL && sfdebug >= 4) { 5767 sf_log(sf, CE_WARN, 5768 "!Transport error on cmd=0x%p target=0x%x: %s\n", 5769 (void *)fpkt, pkt->pkt_address.a_target, msg1); 5770 } 5771 #endif 5772 5773 if (msg2 != NULL) { 5774 sf_log(sf, CE_WARN, "!Transport error on target=0x%x: %s\n", 5775 pkt->pkt_address.a_target, msg2); 5776 } 5777 5778 ncmds = fpkt->fcal_ncmds; 5779 ASSERT(ncmds >= 0); 5780 if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) { 5781 #ifdef DEBUG 5782 if (!sf->sf_use_lock) { 5783 SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n")); 5784 } 5785 #endif 5786 sf->sf_use_lock = TRUE; 5787 } 5788 5789 mutex_enter(&sf->sf_cmd_mutex); 5790 sf->sf_ncmds = ncmds; 5791 sf_throttle_start(sf); 5792 mutex_exit(&sf->sf_cmd_mutex); 5793 5794 if (!msg1 && !msg2) 5795 SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n", 5796 (void *)pkt)); 5797 if (pkt->pkt_comp != NULL) { 5798 (*pkt->pkt_comp)(pkt); 5799 } 5800 } 5801 5802 #undef SF_DMSG1 5803 5804 5805 5806 /* 5807 * start throttling for this instance 5808 */ 5809 static void 5810 sf_throttle_start(struct sf *sf) 5811 { 5812 struct sf_pkt *cmd, *prev_cmd = NULL; 5813 struct scsi_pkt *pkt; 5814 struct sf_target *target; 5815 5816 5817 ASSERT(mutex_owned(&sf->sf_cmd_mutex)); 5818 5819 cmd = sf->sf_pkt_head; 5820 while ((cmd != NULL) && 5821 (sf->sf_state == SF_STATE_ONLINE) && 5822 (sf->sf_ncmds < sf->sf_throttle)) { 5823 5824 pkt = CMD2PKT(cmd); 5825 5826 target = ADDR2TARGET(&pkt->pkt_address); 5827 if (target->sft_state & SF_TARGET_BUSY) { 5828 /* this command is busy -- go to next */ 5829 ASSERT(cmd->cmd_state != SF_STATE_ISSUED); 5830 prev_cmd = cmd; 5831 cmd = cmd->cmd_next; 5832 continue; 5833 } 5834 5835 ASSERT(cmd->cmd_state != SF_STATE_ISSUED); 5836 5837 /* this cmd not busy and not issued */ 5838 5839 /* remove this packet from the queue */ 5840 if (sf->sf_pkt_head == cmd) { 5841 /* this was the first packet */ 5842 sf->sf_pkt_head = cmd->cmd_next; 5843 } else if (sf->sf_pkt_tail == cmd) { 5844 /* this was the last packet */ 5845 sf->sf_pkt_tail = prev_cmd; 5846 if (prev_cmd != NULL) { 5847 prev_cmd->cmd_next = NULL; 5848 } 5849 } else { 5850 /* some packet in the middle of the queue */ 5851 ASSERT(prev_cmd != NULL); 5852 prev_cmd->cmd_next = cmd->cmd_next; 5853 } 5854 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 5855 5856 if (target->sft_state & SF_TARGET_OFFLINE) { 5857 mutex_exit(&sf->sf_cmd_mutex); 5858 pkt->pkt_reason = CMD_TRAN_ERR; 5859 if (pkt->pkt_comp != NULL) { 5860 (*pkt->pkt_comp)(cmd->cmd_pkt); 5861 } 5862 } else { 5863 sf_fill_ids(sf, cmd, target); 5864 if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) { 5865 pkt->pkt_reason = CMD_TRAN_ERR; 5866 if (pkt->pkt_comp != NULL) { 5867 (*pkt->pkt_comp)(cmd->cmd_pkt); 5868 } 5869 } 5870 } 5871 mutex_enter(&sf->sf_cmd_mutex); 5872 cmd = sf->sf_pkt_head; 5873 prev_cmd = NULL; 5874 } 5875 } 5876 5877 5878 /* 5879 * called when the max exchange value is exceeded to throttle back commands 5880 */ 5881 static void 5882 sf_throttle(struct sf *sf) 5883 { 5884 int cmdmax = sf->sf_sochandle->fcal_cmdmax; 5885 5886 5887 mutex_enter(&sf->sf_cmd_mutex); 5888 5889 sf->sf_flag = TRUE; 5890 5891 if (sf->sf_ncmds > (cmdmax / 2)) { 5892 sf->sf_throttle = cmdmax / 2; 5893 } else { 5894 if (sf->sf_ncmds > SF_DECR_DELTA) { 5895 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA; 5896 } else { 5897 /* 5898 * This case is just a safeguard, should not really 5899 * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed 5900 */ 5901 sf->sf_throttle = SF_DECR_DELTA; 5902 } 5903 } 5904 mutex_exit(&sf->sf_cmd_mutex); 5905 5906 sf = sf->sf_sibling; 5907 if (sf != NULL) { 5908 mutex_enter(&sf->sf_cmd_mutex); 5909 sf->sf_flag = TRUE; 5910 if (sf->sf_ncmds >= (cmdmax / 2)) { 5911 sf->sf_throttle = cmdmax / 2; 5912 } else { 5913 if (sf->sf_ncmds > SF_DECR_DELTA) { 5914 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA; 5915 } else { 5916 sf->sf_throttle = SF_DECR_DELTA; 5917 } 5918 } 5919 5920 mutex_exit(&sf->sf_cmd_mutex); 5921 } 5922 } 5923 5924 5925 /* 5926 * sf watchdog routine, called for a timeout 5927 */ 5928 /*ARGSUSED*/ 5929 static void 5930 sf_watch(void *arg) 5931 { 5932 struct sf *sf; 5933 struct sf_els_hdr *privp; 5934 static int count = 0, pscan_count = 0; 5935 int cmdmax, i, mescount = 0; 5936 struct sf_target *target; 5937 5938 5939 sf_watchdog_time += sf_watchdog_timeout; 5940 count++; 5941 pscan_count++; 5942 5943 mutex_enter(&sf_global_mutex); 5944 sf_watch_running = 1; 5945 for (sf = sf_head; sf != NULL; sf = sf->sf_next) { 5946 5947 mutex_exit(&sf_global_mutex); 5948 5949 /* disable throttling while we're suspended */ 5950 mutex_enter(&sf->sf_mutex); 5951 if (sf->sf_state & SF_STATE_SUSPENDED) { 5952 mutex_exit(&sf->sf_mutex); 5953 SF_DEBUG(1, (sf, CE_CONT, 5954 "sf_watch, sf%d:throttle disabled " 5955 "due to DDI_SUSPEND\n", 5956 ddi_get_instance(sf->sf_dip))); 5957 mutex_enter(&sf_global_mutex); 5958 continue; 5959 } 5960 mutex_exit(&sf->sf_mutex); 5961 5962 cmdmax = sf->sf_sochandle->fcal_cmdmax; 5963 5964 if (sf->sf_take_core) { 5965 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 5966 } 5967 5968 mutex_enter(&sf->sf_cmd_mutex); 5969 5970 if (!sf->sf_flag) { 5971 if (sf->sf_throttle < (cmdmax / 2)) { 5972 sf->sf_throttle = cmdmax / 2; 5973 } else if ((sf->sf_throttle += SF_INCR_DELTA) > 5974 cmdmax) { 5975 sf->sf_throttle = cmdmax; 5976 } 5977 } else { 5978 sf->sf_flag = FALSE; 5979 } 5980 5981 sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg) 5982 >> 2; 5983 if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) && 5984 (sf->sf_pkt_head == NULL)) { 5985 #ifdef DEBUG 5986 if (sf->sf_use_lock) { 5987 SF_DEBUG(4, (sf, CE_NOTE, 5988 "use lock flag off\n")); 5989 } 5990 #endif 5991 sf->sf_use_lock = FALSE; 5992 } 5993 5994 if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head && 5995 sf->sf_ncmds < sf->sf_throttle) { 5996 sf_throttle_start(sf); 5997 } 5998 5999 mutex_exit(&sf->sf_cmd_mutex); 6000 6001 if (pscan_count >= sf_pool_scan_cnt) { 6002 if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt << 6003 SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) { 6004 sf_crpool_free(sf); 6005 } 6006 } 6007 mutex_enter(&sf->sf_mutex); 6008 6009 privp = sf->sf_els_list; 6010 while (privp != NULL) { 6011 if (privp->timeout < sf_watchdog_time) { 6012 /* timeout this command */ 6013 privp = sf_els_timeout(sf, privp); 6014 } else if ((privp->timeout == SF_INVALID_TIMEOUT) && 6015 (privp->lip_cnt != sf->sf_lip_cnt)) { 6016 if (privp->prev != NULL) { 6017 privp->prev->next = privp->next; 6018 } 6019 if (sf->sf_els_list == privp) { 6020 sf->sf_els_list = privp->next; 6021 } 6022 if (privp->next != NULL) { 6023 privp->next->prev = privp->prev; 6024 } 6025 mutex_exit(&sf->sf_mutex); 6026 sf_els_free(privp->fpkt); 6027 mutex_enter(&sf->sf_mutex); 6028 privp = sf->sf_els_list; 6029 } else { 6030 privp = privp->next; 6031 } 6032 } 6033 6034 if (sf->sf_online_timer && sf->sf_online_timer < 6035 sf_watchdog_time) { 6036 for (i = 0; i < sf_max_targets; i++) { 6037 target = sf->sf_targets[i]; 6038 if (target != NULL) { 6039 if (!mescount && target->sft_state & 6040 SF_TARGET_BUSY) { 6041 sf_log(sf, CE_WARN, "!Loop " 6042 "Unstable: Failed to bring " 6043 "Loop Online\n"); 6044 mescount = 1; 6045 } 6046 target->sft_state |= SF_TARGET_MARK; 6047 } 6048 } 6049 sf_finish_init(sf, sf->sf_lip_cnt); 6050 sf->sf_state = SF_STATE_INIT; 6051 sf->sf_online_timer = 0; 6052 } 6053 6054 if (sf->sf_state == SF_STATE_ONLINE) { 6055 mutex_exit(&sf->sf_mutex); 6056 if (count >= sf_pkt_scan_cnt) { 6057 sf_check_targets(sf); 6058 } 6059 } else if ((sf->sf_state == SF_STATE_OFFLINE) && 6060 (sf->sf_timer < sf_watchdog_time)) { 6061 for (i = 0; i < sf_max_targets; i++) { 6062 target = sf->sf_targets[i]; 6063 if ((target != NULL) && 6064 (target->sft_state & 6065 SF_TARGET_BUSY)) { 6066 sf_log(sf, CE_WARN, 6067 "!Offline Timeout\n"); 6068 if (sf_core && (sf_core & 6069 SF_CORE_OFFLINE_TIMEOUT)) { 6070 (void) soc_take_core( 6071 sf->sf_sochandle, 6072 sf->sf_socp); 6073 sf_core = 0; 6074 } 6075 break; 6076 } 6077 } 6078 sf_finish_init(sf, sf->sf_lip_cnt); 6079 sf->sf_state = SF_STATE_INIT; 6080 mutex_exit(&sf->sf_mutex); 6081 } else { 6082 mutex_exit(&sf->sf_mutex); 6083 } 6084 mutex_enter(&sf_global_mutex); 6085 } 6086 mutex_exit(&sf_global_mutex); 6087 if (count >= sf_pkt_scan_cnt) { 6088 count = 0; 6089 } 6090 if (pscan_count >= sf_pool_scan_cnt) { 6091 pscan_count = 0; 6092 } 6093 6094 /* reset timeout */ 6095 sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick); 6096 6097 /* signal waiting thread */ 6098 mutex_enter(&sf_global_mutex); 6099 sf_watch_running = 0; 6100 cv_broadcast(&sf_watch_cv); 6101 mutex_exit(&sf_global_mutex); 6102 } 6103 6104 6105 /* 6106 * called during a timeout to check targets 6107 */ 6108 static void 6109 sf_check_targets(struct sf *sf) 6110 { 6111 struct sf_target *target; 6112 int i; 6113 struct sf_pkt *cmd; 6114 struct scsi_pkt *pkt; 6115 int lip_cnt; 6116 6117 mutex_enter(&sf->sf_mutex); 6118 lip_cnt = sf->sf_lip_cnt; 6119 mutex_exit(&sf->sf_mutex); 6120 6121 /* check scan all possible targets */ 6122 for (i = 0; i < sf_max_targets; i++) { 6123 target = sf->sf_targets[i]; 6124 while (target != NULL) { 6125 mutex_enter(&target->sft_pkt_mutex); 6126 if (target->sft_alive && target->sft_scan_count != 6127 sf_target_scan_cnt) { 6128 target->sft_alive = 0; 6129 target->sft_scan_count++; 6130 mutex_exit(&target->sft_pkt_mutex); 6131 return; 6132 } 6133 target->sft_alive = 0; 6134 target->sft_scan_count = 0; 6135 cmd = target->sft_pkt_head; 6136 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) { 6137 mutex_enter(&cmd->cmd_abort_mutex); 6138 if (cmd->cmd_state == SF_STATE_ISSUED && 6139 ((cmd->cmd_timeout && sf_watchdog_time > 6140 #ifdef DEBUG 6141 cmd->cmd_timeout) || sf_abort_flag)) { 6142 sf_abort_flag = 0; 6143 #else 6144 cmd->cmd_timeout))) { 6145 #endif 6146 cmd->cmd_timeout = 0; 6147 /* prevent reset from getting at this packet */ 6148 cmd->cmd_state = SF_STATE_ABORTING; 6149 mutex_exit(&cmd->cmd_abort_mutex); 6150 mutex_exit(&target->sft_pkt_mutex); 6151 sf->sf_stats.tstats[i].timeouts++; 6152 if (sf_target_timeout(sf, cmd)) 6153 return; 6154 else { 6155 if (lip_cnt != sf->sf_lip_cnt) { 6156 return; 6157 } else { 6158 mutex_enter(&target-> 6159 sft_pkt_mutex); 6160 cmd = target-> 6161 sft_pkt_head; 6162 } 6163 } 6164 /* 6165 * if the abort and lip fail, a reset will be carried out. 6166 * But the reset will ignore this packet. We have waited at least 6167 * 20 seconds after the initial timeout. Now, complete it here. 6168 * This also takes care of spurious bad aborts. 6169 */ 6170 } else if ((cmd->cmd_state == 6171 SF_STATE_ABORTING) && (cmd->cmd_timeout 6172 <= sf_watchdog_time)) { 6173 cmd->cmd_state = SF_STATE_IDLE; 6174 mutex_exit(&cmd->cmd_abort_mutex); 6175 mutex_exit(&target->sft_pkt_mutex); 6176 SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to sft 0x%p delayed release\n", 6177 (void *)cmd, (void *)target)); 6178 pkt = cmd->cmd_pkt; 6179 pkt->pkt_statistics |= 6180 (STAT_TIMEOUT|STAT_ABORTED); 6181 pkt->pkt_reason = CMD_TIMEOUT; 6182 if (pkt->pkt_comp) { 6183 (*pkt->pkt_comp)(pkt); 6184 /* handle deferred_destroy case */ 6185 } else { 6186 if ((cmd->cmd_block->fcp_cntl. 6187 cntl_reset == 1) || 6188 (cmd->cmd_block-> 6189 fcp_cntl.cntl_abort_tsk == 1)) { 6190 cmd->cmd_block->fcp_cntl. 6191 cntl_reset = 0; 6192 cmd->cmd_block->fcp_cntl. 6193 cntl_abort_tsk = 0; 6194 cmd->cmd_fp_pkt->fcal_pkt_comp = 6195 sf_cmd_callback; 6196 /* for cache */ 6197 sf_scsi_destroy_pkt 6198 (&pkt->pkt_address, pkt); 6199 } 6200 } 6201 mutex_enter(&target->sft_pkt_mutex); 6202 cmd = target->sft_pkt_head; 6203 } else { 6204 mutex_exit(&cmd->cmd_abort_mutex); 6205 cmd = cmd->cmd_forw; 6206 } 6207 } 6208 mutex_exit(&target->sft_pkt_mutex); 6209 target = target->sft_next_lun; 6210 } 6211 } 6212 } 6213 6214 6215 /* 6216 * a command to a target has timed out 6217 * return TRUE iff cmd abort failed or timed out, else return FALSE 6218 */ 6219 static int 6220 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd) 6221 { 6222 int rval; 6223 struct scsi_pkt *pkt; 6224 struct fcal_packet *fpkt; 6225 int tgt_id; 6226 int retval = FALSE; 6227 6228 6229 SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n", 6230 (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target)); 6231 6232 fpkt = cmd->cmd_fp_pkt; 6233 6234 if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) { 6235 sf_token = (int *)(uintptr_t) 6236 fpkt->fcal_socal_request.sr_soc_hdr.\ 6237 sh_request_token; 6238 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 6239 sf_core = 0; 6240 } 6241 6242 /* call the transport to abort a command */ 6243 rval = soc_abort(sf->sf_sochandle, sf->sf_socp, 6244 sf->sf_sochandle->fcal_portno, fpkt, 1); 6245 6246 switch (rval) { 6247 case FCAL_ABORTED: 6248 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n")); 6249 pkt = cmd->cmd_pkt; 6250 cmd->cmd_state = SF_STATE_IDLE; 6251 pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED); 6252 pkt->pkt_reason = CMD_TIMEOUT; 6253 if (pkt->pkt_comp != NULL) { 6254 (*pkt->pkt_comp)(pkt); 6255 } 6256 break; /* success */ 6257 6258 case FCAL_ABORT_FAILED: 6259 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n")); 6260 pkt = cmd->cmd_pkt; 6261 cmd->cmd_state = SF_STATE_IDLE; 6262 pkt->pkt_reason = CMD_TIMEOUT; 6263 pkt->pkt_statistics |= STAT_TIMEOUT; 6264 tgt_id = pkt->pkt_address.a_target; 6265 sf->sf_stats.tstats[tgt_id].abts_failures++; 6266 if (pkt->pkt_comp != NULL) { 6267 (*pkt->pkt_comp)(pkt); 6268 } 6269 break; 6270 6271 case FCAL_BAD_ABORT: 6272 if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) { 6273 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\ 6274 sr_soc_hdr.sh_request_token; 6275 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 6276 sf_core = 0; 6277 } 6278 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n")); 6279 cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time 6280 + 20; 6281 break; 6282 6283 case FCAL_TIMEOUT: 6284 retval = TRUE; 6285 break; 6286 6287 default: 6288 pkt = cmd->cmd_pkt; 6289 tgt_id = pkt->pkt_address.a_target; 6290 sf_log(sf, CE_WARN, 6291 "Command Abort failed target 0x%x, forcing a LIP\n", tgt_id); 6292 if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) { 6293 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\ 6294 sr_soc_hdr.sh_request_token; 6295 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 6296 sf_core = 0; 6297 } 6298 sf_force_lip(sf); 6299 retval = TRUE; 6300 break; 6301 } 6302 6303 return (retval); 6304 } 6305 6306 6307 /* 6308 * an ELS command has timed out 6309 * return ??? 6310 */ 6311 static struct sf_els_hdr * 6312 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp) 6313 { 6314 struct fcal_packet *fpkt; 6315 int rval, dflag, timeout = SF_ELS_TIMEOUT; 6316 uint_t lip_cnt = privp->lip_cnt; 6317 uchar_t els_code = privp->els_code; 6318 struct sf_target *target = privp->target; 6319 char what[64]; 6320 6321 fpkt = privp->fpkt; 6322 dflag = privp->delayed_retry; 6323 /* use as temporary state variable */ 6324 privp->timeout = SF_INVALID_TIMEOUT; 6325 mutex_exit(&sf->sf_mutex); 6326 6327 if (privp->fpkt->fcal_pkt_comp == sf_els_callback) { 6328 /* 6329 * take socal core if required. Timeouts for IB and hosts 6330 * are not very interesting, so we take socal core only 6331 * if the timeout is *not* for a IB or host. 6332 */ 6333 if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) && 6334 ((sf_alpa_to_switch[privp->dest_nport_id] & 0x0d) != 0x0d) && 6335 ((privp->dest_nport_id != 1) || (privp->dest_nport_id != 2) || 6336 (privp->dest_nport_id != 4) || (privp->dest_nport_id != 8) || 6337 (privp->dest_nport_id != 0xf))) { 6338 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\ 6339 sr_soc_hdr.sh_request_token; 6340 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 6341 sf_core = 0; 6342 } 6343 (void) sprintf(what, "ELS 0x%x", privp->els_code); 6344 } else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) { 6345 if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) { 6346 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\ 6347 sr_soc_hdr.sh_request_token; 6348 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 6349 sf_core = 0; 6350 } 6351 timeout = SF_FCP_TIMEOUT; 6352 (void) sprintf(what, "REPORT_LUNS"); 6353 } else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) { 6354 if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) { 6355 sf_token = (int *)(uintptr_t) 6356 fpkt->fcal_socal_request.\ 6357 sr_soc_hdr.sh_request_token; 6358 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 6359 sf_core = 0; 6360 } 6361 timeout = SF_FCP_TIMEOUT; 6362 (void) sprintf(what, "INQUIRY to LUN 0x%lx", 6363 (long)SCSA_LUN(target)); 6364 } else { 6365 (void) sprintf(what, "UNKNOWN OPERATION"); 6366 } 6367 6368 if (dflag) { 6369 /* delayed retry */ 6370 SF_DEBUG(2, (sf, CE_CONT, 6371 "!sf%d: %s to target %x delayed retry\n", 6372 ddi_get_instance(sf->sf_dip), what, 6373 sf_alpa_to_switch[privp->dest_nport_id])); 6374 privp->delayed_retry = FALSE; 6375 goto try_again; 6376 } 6377 6378 sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n", 6379 what, sf_alpa_to_switch[privp->dest_nport_id], 6380 privp->dest_nport_id); 6381 6382 rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle 6383 ->fcal_portno, fpkt, 1); 6384 if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) { 6385 SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n", 6386 what, privp->dest_nport_id)); 6387 try_again: 6388 6389 mutex_enter(&sf->sf_mutex); 6390 if (privp->prev != NULL) { 6391 privp->prev->next = privp->next; 6392 } 6393 if (sf->sf_els_list == privp) { 6394 sf->sf_els_list = privp->next; 6395 } 6396 if (privp->next != NULL) { 6397 privp->next->prev = privp->prev; 6398 } 6399 privp->prev = privp->next = NULL; 6400 if (lip_cnt == sf->sf_lip_cnt) { 6401 privp->timeout = sf_watchdog_time + timeout; 6402 if ((++(privp->retries) < sf_els_retries) || 6403 (dflag && (privp->retries < SF_BSY_RETRIES))) { 6404 mutex_exit(&sf->sf_mutex); 6405 sf_log(sf, CE_NOTE, 6406 "!%s to target 0x%x retrying\n", 6407 what, 6408 sf_alpa_to_switch[privp->dest_nport_id]); 6409 if (sf_els_transport(sf, privp) == 1) { 6410 mutex_enter(&sf->sf_mutex); 6411 return (sf->sf_els_list); /* success */ 6412 } 6413 mutex_enter(&sf->sf_mutex); 6414 fpkt = NULL; 6415 } 6416 if ((lip_cnt == sf->sf_lip_cnt) && 6417 (els_code != LA_ELS_LOGO)) { 6418 if (target != NULL) { 6419 sf_offline_target(sf, target); 6420 } 6421 if (sf->sf_lip_cnt == lip_cnt) { 6422 sf->sf_device_count--; 6423 ASSERT(sf->sf_device_count >= 0); 6424 if (sf->sf_device_count == 0) { 6425 sf_finish_init(sf, 6426 sf->sf_lip_cnt); 6427 } 6428 } 6429 } 6430 privp = sf->sf_els_list; 6431 mutex_exit(&sf->sf_mutex); 6432 if (fpkt != NULL) { 6433 sf_els_free(fpkt); 6434 } 6435 } else { 6436 mutex_exit(&sf->sf_mutex); 6437 sf_els_free(privp->fpkt); 6438 privp = NULL; 6439 } 6440 } else { 6441 if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) { 6442 sf_token = (int *)(uintptr_t) 6443 fpkt->fcal_socal_request.\ 6444 sr_soc_hdr.sh_request_token; 6445 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp); 6446 sf_core = 0; 6447 } 6448 sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. " 6449 "status=0x%x, forcing LIP\n", what, 6450 sf_alpa_to_switch[privp->dest_nport_id], rval); 6451 privp = NULL; 6452 if (sf->sf_lip_cnt == lip_cnt) { 6453 sf_force_lip(sf); 6454 } 6455 } 6456 6457 mutex_enter(&sf->sf_mutex); 6458 return (privp); 6459 } 6460 6461 6462 /* 6463 * called by timeout when a reset times out 6464 */ 6465 /*ARGSUSED*/ 6466 static void 6467 sf_check_reset_delay(void *arg) 6468 { 6469 struct sf *sf; 6470 struct sf_target *target; 6471 struct sf_reset_list *rp, *tp; 6472 uint_t lip_cnt, reset_timeout_flag = FALSE; 6473 clock_t lb; 6474 6475 6476 lb = ddi_get_lbolt(); 6477 6478 mutex_enter(&sf_global_mutex); 6479 6480 sf_reset_timeout_id = 0; 6481 6482 for (sf = sf_head; sf != NULL; sf = sf->sf_next) { 6483 6484 mutex_exit(&sf_global_mutex); 6485 mutex_enter(&sf->sf_mutex); 6486 6487 /* is this type cast needed? */ 6488 tp = (struct sf_reset_list *)&sf->sf_reset_list; 6489 6490 rp = sf->sf_reset_list; 6491 while (rp != NULL) { 6492 if (((rp->timeout - lb) < 0) && 6493 (rp->lip_cnt == sf->sf_lip_cnt)) { 6494 tp->next = rp->next; 6495 mutex_exit(&sf->sf_mutex); 6496 target = rp->target; 6497 lip_cnt = rp->lip_cnt; 6498 kmem_free(rp, sizeof (struct sf_reset_list)); 6499 /* abort all cmds for this target */ 6500 while (target) { 6501 sf_abort_all(sf, target, FALSE, 6502 lip_cnt, TRUE); 6503 mutex_enter(&target->sft_mutex); 6504 if (lip_cnt == sf->sf_lip_cnt) { 6505 target->sft_state &= 6506 ~SF_TARGET_BUSY; 6507 } 6508 mutex_exit(&target->sft_mutex); 6509 target = target->sft_next_lun; 6510 } 6511 mutex_enter(&sf->sf_mutex); 6512 tp = (struct sf_reset_list *) 6513 &sf->sf_reset_list; 6514 rp = sf->sf_reset_list; 6515 lb = ddi_get_lbolt(); 6516 } else if (rp->lip_cnt != sf->sf_lip_cnt) { 6517 tp->next = rp->next; 6518 kmem_free(rp, sizeof (struct sf_reset_list)); 6519 rp = tp->next; 6520 } else { 6521 reset_timeout_flag = TRUE; 6522 tp = rp; 6523 rp = rp->next; 6524 } 6525 } 6526 mutex_exit(&sf->sf_mutex); 6527 mutex_enter(&sf_global_mutex); 6528 } 6529 6530 if (reset_timeout_flag && (sf_reset_timeout_id == 0)) { 6531 sf_reset_timeout_id = timeout(sf_check_reset_delay, 6532 NULL, drv_usectohz(SF_TARGET_RESET_DELAY)); 6533 } 6534 6535 mutex_exit(&sf_global_mutex); 6536 } 6537 6538 6539 /* 6540 * called to "reset the bus", i.e. force loop initialization (and address 6541 * re-negotiation) 6542 */ 6543 static void 6544 sf_force_lip(struct sf *sf) 6545 { 6546 int i; 6547 struct sf_target *target; 6548 6549 6550 /* disable restart of lip if we're suspended */ 6551 mutex_enter(&sf->sf_mutex); 6552 if (sf->sf_state & SF_STATE_SUSPENDED) { 6553 mutex_exit(&sf->sf_mutex); 6554 SF_DEBUG(1, (sf, CE_CONT, 6555 "sf_force_lip, sf%d: lip restart disabled " 6556 "due to DDI_SUSPEND\n", 6557 ddi_get_instance(sf->sf_dip))); 6558 return; 6559 } 6560 6561 sf_log(sf, CE_NOTE, "Forcing lip\n"); 6562 6563 for (i = 0; i < sf_max_targets; i++) { 6564 target = sf->sf_targets[i]; 6565 while (target != NULL) { 6566 mutex_enter(&target->sft_mutex); 6567 if (!(target->sft_state & SF_TARGET_OFFLINE)) 6568 target->sft_state |= SF_TARGET_BUSY; 6569 mutex_exit(&target->sft_mutex); 6570 target = target->sft_next_lun; 6571 } 6572 } 6573 6574 sf->sf_lip_cnt++; 6575 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT; 6576 sf->sf_state = SF_STATE_OFFLINE; 6577 mutex_exit(&sf->sf_mutex); 6578 sf->sf_stats.lip_count++; /* no mutex for this? */ 6579 6580 #ifdef DEBUG 6581 /* are we allowing LIPs ?? */ 6582 if (sf_lip_flag != 0) { 6583 #endif 6584 /* call the transport to force loop initialization */ 6585 if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp, 6586 sf->sf_sochandle->fcal_portno, 1, 6587 FCAL_FORCE_LIP)) != FCAL_SUCCESS) && 6588 (i != FCAL_TIMEOUT)) { 6589 /* force LIP failed */ 6590 if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) { 6591 (void) soc_take_core(sf->sf_sochandle, 6592 sf->sf_socp); 6593 sf_core = 0; 6594 } 6595 #ifdef DEBUG 6596 /* are we allowing reset after LIP failed ?? */ 6597 if (sf_reset_flag != 0) { 6598 #endif 6599 /* restart socal after resetting it */ 6600 sf_log(sf, CE_NOTE, 6601 "!Force lip failed Status code 0x%x. Reseting\n", 6602 i); 6603 /* call transport to force a reset */ 6604 soc_force_reset(sf->sf_sochandle, sf->sf_socp, 6605 sf->sf_sochandle->fcal_portno, 1); 6606 #ifdef DEBUG 6607 } 6608 #endif 6609 } 6610 #ifdef DEBUG 6611 } 6612 #endif 6613 } 6614 6615 6616 /* 6617 * called by the transport when an unsolicited ELS is received 6618 */ 6619 static void 6620 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload) 6621 { 6622 struct sf *sf = (struct sf *)arg; 6623 els_payload_t *els = (els_payload_t *)payload; 6624 struct la_els_rjt *rsp; 6625 int i, tgt_id; 6626 uchar_t dest_id; 6627 struct fcal_packet *fpkt; 6628 fc_frame_header_t *hp; 6629 struct sf_els_hdr *privp; 6630 6631 6632 if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) { 6633 return; 6634 } 6635 6636 if (i > SOC_CQE_PAYLOAD) { 6637 i = SOC_CQE_PAYLOAD; 6638 } 6639 6640 dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id; 6641 tgt_id = sf_alpa_to_switch[dest_id]; 6642 6643 switch (els->els_cmd.c.ls_command) { 6644 6645 case LA_ELS_LOGO: 6646 /* 6647 * logout received -- log the fact 6648 */ 6649 sf->sf_stats.tstats[tgt_id].logouts_recvd++; 6650 sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n", 6651 tgt_id, 6652 sf_lip_on_plogo ? "Forcing LIP...." : ""); 6653 if (sf_lip_on_plogo) { 6654 sf_force_lip(sf); 6655 } 6656 break; 6657 6658 default: /* includes LA_ELS_PLOGI */ 6659 /* 6660 * something besides a logout received -- we don't handle 6661 * this so send back a reject saying its unsupported 6662 */ 6663 6664 sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n", 6665 els->els_cmd.c.ls_command, tgt_id); 6666 6667 6668 /* allocate room for a response */ 6669 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr), 6670 sizeof (struct la_els_rjt), sizeof (union sf_els_rsp), 6671 (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) { 6672 break; 6673 } 6674 6675 fpkt = privp->fpkt; 6676 6677 /* fill in pkt header */ 6678 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr; 6679 hp->r_ctl = R_CTL_ELS_RSP; 6680 hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 6681 hp->ox_id = srp->sr_fc_frame_hdr.ox_id; 6682 hp->rx_id = srp->sr_fc_frame_hdr.rx_id; 6683 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = 6684 CQ_TYPE_OUTBOUND; 6685 6686 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1; 6687 6688 /* fill in response */ 6689 rsp->ls_code = LA_ELS_RJT; /* reject this ELS */ 6690 rsp->mbz[0] = 0; 6691 rsp->mbz[1] = 0; 6692 rsp->mbz[2] = 0; 6693 ((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC; 6694 *((int *)&rsp->reserved) = 0; 6695 rsp->reason_code = RJT_UNSUPPORTED; 6696 privp->retries = sf_els_retries; 6697 privp->els_code = LA_ELS_RJT; 6698 privp->timeout = (unsigned)0xffffffff; 6699 (void) sf_els_transport(sf, privp); 6700 break; 6701 } 6702 } 6703 6704 6705 /* 6706 * Error logging, printing, and debug print routines 6707 */ 6708 6709 /*PRINTFLIKE3*/ 6710 static void 6711 sf_log(struct sf *sf, int level, const char *fmt, ...) 6712 { 6713 char buf[256]; 6714 dev_info_t *dip; 6715 va_list ap; 6716 6717 if (sf != NULL) { 6718 dip = sf->sf_dip; 6719 } else { 6720 dip = NULL; 6721 } 6722 6723 va_start(ap, fmt); 6724 (void) vsprintf(buf, fmt, ap); 6725 va_end(ap); 6726 scsi_log(dip, "sf", level, buf); 6727 } 6728 6729 6730 /* 6731 * called to get some sf kstats -- return 0 on success else return errno 6732 */ 6733 static int 6734 sf_kstat_update(kstat_t *ksp, int rw) 6735 { 6736 struct sf *sf; 6737 6738 if (rw == KSTAT_WRITE) { 6739 /* can't write */ 6740 return (EACCES); 6741 } 6742 6743 sf = ksp->ks_private; 6744 sf->sf_stats.ncmds = sf->sf_ncmds; 6745 sf->sf_stats.throttle_limit = sf->sf_throttle; 6746 sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt; 6747 6748 return (0); /* success */ 6749 } 6750 6751 6752 /* 6753 * Unix Entry Points 6754 */ 6755 6756 /* 6757 * driver entry point for opens on control device 6758 */ 6759 /* ARGSUSED */ 6760 static int 6761 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 6762 { 6763 dev_t dev = *dev_p; 6764 struct sf *sf; 6765 6766 6767 /* just ensure soft state exists for this device */ 6768 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev))); 6769 if (sf == NULL) { 6770 return (ENXIO); 6771 } 6772 6773 ++(sf->sf_check_n_close); 6774 6775 return (0); 6776 } 6777 6778 6779 /* 6780 * driver entry point for last close on control device 6781 */ 6782 /* ARGSUSED */ 6783 static int 6784 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 6785 { 6786 struct sf *sf; 6787 6788 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev))); 6789 if (sf == NULL) { 6790 return (ENXIO); 6791 } 6792 6793 if (!sf->sf_check_n_close) { /* if this flag is zero */ 6794 cmn_err(CE_WARN, "sf%d: trying to close unopened instance", 6795 SF_MINOR2INST(getminor(dev))); 6796 return (ENODEV); 6797 } else { 6798 --(sf->sf_check_n_close); 6799 } 6800 return (0); 6801 } 6802 6803 6804 /* 6805 * driver entry point for sf ioctl commands 6806 */ 6807 /* ARGSUSED */ 6808 static int 6809 sf_ioctl(dev_t dev, 6810 int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p) 6811 { 6812 struct sf *sf; 6813 struct sf_target *target; 6814 uchar_t al_pa; 6815 struct sf_al_map map; 6816 int cnt, i; 6817 int retval; /* return value */ 6818 struct devctl_iocdata *dcp; 6819 dev_info_t *cdip; 6820 struct scsi_address ap; 6821 scsi_hba_tran_t *tran; 6822 6823 6824 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev))); 6825 if (sf == NULL) { 6826 return (ENXIO); 6827 } 6828 6829 /* handle all ioctls */ 6830 switch (cmd) { 6831 6832 /* 6833 * We can use the generic implementation for these ioctls 6834 */ 6835 case DEVCTL_DEVICE_GETSTATE: 6836 case DEVCTL_DEVICE_ONLINE: 6837 case DEVCTL_DEVICE_OFFLINE: 6838 case DEVCTL_BUS_GETSTATE: 6839 return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0)); 6840 6841 /* 6842 * return FC map 6843 */ 6844 case SFIOCGMAP: 6845 if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC && 6846 sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) || 6847 sf->sf_state != SF_STATE_ONLINE) { 6848 retval = ENOENT; 6849 goto dun; 6850 } 6851 mutex_enter(&sf->sf_mutex); 6852 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) { 6853 int i, j = 0; 6854 6855 /* Need to generate a fake lilp map */ 6856 for (i = 0; i < sf_max_targets; i++) { 6857 if (sf->sf_targets[i]) 6858 sf->sf_lilp_map->lilp_alpalist[j++] = 6859 sf->sf_targets[i]-> 6860 sft_hard_address; 6861 } 6862 sf->sf_lilp_map->lilp_length = (uchar_t)j; 6863 } 6864 cnt = sf->sf_lilp_map->lilp_length; 6865 map.sf_count = (short)cnt; 6866 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn, 6867 (caddr_t)&map.sf_hba_addr.sf_node_wwn, 6868 sizeof (la_wwn_t)); 6869 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn, 6870 (caddr_t)&map.sf_hba_addr.sf_port_wwn, 6871 sizeof (la_wwn_t)); 6872 map.sf_hba_addr.sf_al_pa = sf->sf_al_pa; 6873 map.sf_hba_addr.sf_hard_address = 0; 6874 map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN; 6875 for (i = 0; i < cnt; i++) { 6876 al_pa = sf->sf_lilp_map->lilp_alpalist[i]; 6877 map.sf_addr_pair[i].sf_al_pa = al_pa; 6878 if (al_pa == sf->sf_al_pa) { 6879 (void) bcopy((caddr_t)&sf->sf_sochandle 6880 ->fcal_n_wwn, (caddr_t)&map. 6881 sf_addr_pair[i].sf_node_wwn, 6882 sizeof (la_wwn_t)); 6883 (void) bcopy((caddr_t)&sf->sf_sochandle 6884 ->fcal_p_wwn, (caddr_t)&map. 6885 sf_addr_pair[i].sf_port_wwn, 6886 sizeof (la_wwn_t)); 6887 map.sf_addr_pair[i].sf_hard_address = 6888 al_pa; 6889 map.sf_addr_pair[i].sf_inq_dtype = 6890 DTYPE_PROCESSOR; 6891 continue; 6892 } 6893 target = sf->sf_targets[sf_alpa_to_switch[ 6894 al_pa]]; 6895 if (target != NULL) { 6896 mutex_enter(&target->sft_mutex); 6897 if (!(target->sft_state & 6898 (SF_TARGET_OFFLINE | 6899 SF_TARGET_BUSY))) { 6900 bcopy((caddr_t)&target-> 6901 sft_node_wwn, 6902 (caddr_t)&map.sf_addr_pair 6903 [i].sf_node_wwn, 6904 sizeof (la_wwn_t)); 6905 bcopy((caddr_t)&target-> 6906 sft_port_wwn, 6907 (caddr_t)&map.sf_addr_pair 6908 [i].sf_port_wwn, 6909 sizeof (la_wwn_t)); 6910 map.sf_addr_pair[i]. 6911 sf_hard_address 6912 = target->sft_hard_address; 6913 map.sf_addr_pair[i]. 6914 sf_inq_dtype 6915 = target->sft_device_type; 6916 mutex_exit(&target->sft_mutex); 6917 continue; 6918 } 6919 mutex_exit(&target->sft_mutex); 6920 } 6921 bzero((caddr_t)&map.sf_addr_pair[i]. 6922 sf_node_wwn, sizeof (la_wwn_t)); 6923 bzero((caddr_t)&map.sf_addr_pair[i]. 6924 sf_port_wwn, sizeof (la_wwn_t)); 6925 map.sf_addr_pair[i].sf_inq_dtype = 6926 DTYPE_UNKNOWN; 6927 } 6928 mutex_exit(&sf->sf_mutex); 6929 if (ddi_copyout((caddr_t)&map, (caddr_t)arg, 6930 sizeof (struct sf_al_map), mode) != 0) { 6931 retval = EFAULT; 6932 goto dun; 6933 } 6934 break; 6935 6936 /* 6937 * handle device control ioctls 6938 */ 6939 case DEVCTL_DEVICE_RESET: 6940 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) { 6941 retval = EFAULT; 6942 goto dun; 6943 } 6944 if ((ndi_dc_getname(dcp) == NULL) || 6945 (ndi_dc_getaddr(dcp) == NULL)) { 6946 ndi_dc_freehdl(dcp); 6947 retval = EINVAL; 6948 goto dun; 6949 } 6950 cdip = ndi_devi_find(sf->sf_dip, 6951 ndi_dc_getname(dcp), ndi_dc_getaddr(dcp)); 6952 ndi_dc_freehdl(dcp); 6953 6954 if (cdip == NULL) { 6955 retval = ENXIO; 6956 goto dun; 6957 } 6958 6959 if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) { 6960 retval = ENXIO; 6961 goto dun; 6962 } 6963 mutex_enter(&target->sft_mutex); 6964 if (!(target->sft_state & SF_TARGET_INIT_DONE)) { 6965 mutex_exit(&target->sft_mutex); 6966 retval = ENXIO; 6967 goto dun; 6968 } 6969 6970 /* This is ugly */ 6971 tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP); 6972 bcopy(target->sft_tran, tran, scsi_hba_tran_size()); 6973 mutex_exit(&target->sft_mutex); 6974 ap.a_hba_tran = tran; 6975 ap.a_target = sf_alpa_to_switch[target->sft_al_pa]; 6976 if (sf_reset(&ap, RESET_TARGET) == FALSE) { 6977 retval = EIO; 6978 } else { 6979 retval = 0; 6980 } 6981 kmem_free(tran, scsi_hba_tran_size()); 6982 goto dun; 6983 6984 case DEVCTL_BUS_QUIESCE: 6985 case DEVCTL_BUS_UNQUIESCE: 6986 retval = ENOTSUP; 6987 goto dun; 6988 6989 case DEVCTL_BUS_RESET: 6990 case DEVCTL_BUS_RESETALL: 6991 sf_force_lip(sf); 6992 break; 6993 6994 default: 6995 retval = ENOTTY; 6996 goto dun; 6997 } 6998 6999 retval = 0; /* success */ 7000 7001 dun: 7002 return (retval); 7003 } 7004 7005 7006 /* 7007 * get the target given a DIP 7008 */ 7009 static struct sf_target * 7010 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip) 7011 { 7012 int i; 7013 struct sf_target *target; 7014 7015 7016 /* scan each hash queue for the DIP in question */ 7017 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) { 7018 target = sf->sf_wwn_lists[i]; 7019 while (target != NULL) { 7020 if (target->sft_dip == dip) { 7021 return (target); /* success: target found */ 7022 } 7023 target = target->sft_next; 7024 } 7025 } 7026 return (NULL); /* failure: target not found */ 7027 } 7028 7029 7030 /* 7031 * called by the transport to get an event cookie 7032 */ 7033 static int 7034 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name, 7035 ddi_eventcookie_t *event_cookiep) 7036 { 7037 struct sf *sf; 7038 7039 sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip)); 7040 if (sf == NULL) { 7041 /* can't find instance for this device */ 7042 return (DDI_FAILURE); 7043 } 7044 7045 return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name, 7046 event_cookiep, NDI_EVENT_NOPASS)); 7047 7048 } 7049 7050 7051 /* 7052 * called by the transport to add an event callback 7053 */ 7054 static int 7055 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip, 7056 ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip, 7057 ddi_eventcookie_t event, void *arg, void *impl_data), void *arg, 7058 ddi_callback_id_t *cb_id) 7059 { 7060 struct sf *sf; 7061 7062 sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip)); 7063 if (sf == NULL) { 7064 /* can't find instance for this device */ 7065 return (DDI_FAILURE); 7066 } 7067 7068 return (ndi_event_add_callback(sf->sf_event_hdl, rdip, 7069 eventid, callback, arg, NDI_SLEEP, cb_id)); 7070 7071 } 7072 7073 7074 /* 7075 * called by the transport to remove an event callback 7076 */ 7077 static int 7078 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id) 7079 { 7080 struct sf *sf; 7081 7082 sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi)); 7083 if (sf == NULL) { 7084 /* can't find instance for this device */ 7085 return (DDI_FAILURE); 7086 } 7087 7088 return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id)); 7089 } 7090 7091 7092 /* 7093 * called by the transport to post an event 7094 */ 7095 static int 7096 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip, 7097 ddi_eventcookie_t eventid, void *impldata) 7098 { 7099 ddi_eventcookie_t remove_cookie, cookie; 7100 7101 /* is this a remove event ?? */ 7102 struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip)); 7103 remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl, 7104 SF_EVENT_TAG_REMOVE); 7105 7106 if (remove_cookie == eventid) { 7107 struct sf_target *target; 7108 7109 /* handle remove event */ 7110 7111 if (sf == NULL) { 7112 /* no sf instance for this device */ 7113 return (NDI_FAILURE); 7114 } 7115 7116 /* get the target for this event */ 7117 if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) { 7118 /* 7119 * clear device info for this target and mark as 7120 * not done 7121 */ 7122 mutex_enter(&target->sft_mutex); 7123 target->sft_dip = NULL; 7124 target->sft_state &= ~SF_TARGET_INIT_DONE; 7125 mutex_exit(&target->sft_mutex); 7126 return (NDI_SUCCESS); /* event handled */ 7127 } 7128 7129 /* no target for this event */ 7130 return (NDI_FAILURE); 7131 } 7132 7133 /* an insertion event */ 7134 if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie) 7135 != NDI_SUCCESS) { 7136 return (NDI_FAILURE); 7137 } 7138 7139 return (ndi_post_event(dip, rdip, cookie, impldata)); 7140 } 7141 7142 7143 /* 7144 * the sf hotplug daemon, one thread per sf instance 7145 */ 7146 static void 7147 sf_hp_daemon(void *arg) 7148 { 7149 struct sf *sf = (struct sf *)arg; 7150 struct sf_hp_elem *elem; 7151 struct sf_target *target; 7152 int tgt_id; 7153 callb_cpr_t cprinfo; 7154 7155 CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex, 7156 callb_generic_cpr, "sf_hp_daemon"); 7157 7158 mutex_enter(&sf->sf_hp_daemon_mutex); 7159 7160 do { 7161 while (sf->sf_hp_elem_head != NULL) { 7162 7163 /* save ptr to head of list */ 7164 elem = sf->sf_hp_elem_head; 7165 7166 /* take element off of list */ 7167 if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) { 7168 /* element only one in list -- list now empty */ 7169 sf->sf_hp_elem_head = NULL; 7170 sf->sf_hp_elem_tail = NULL; 7171 } else { 7172 /* remove element from head of list */ 7173 sf->sf_hp_elem_head = sf->sf_hp_elem_head->next; 7174 } 7175 7176 mutex_exit(&sf->sf_hp_daemon_mutex); 7177 7178 switch (elem->what) { 7179 case SF_ONLINE: 7180 /* online this target */ 7181 target = elem->target; 7182 (void) ndi_devi_online(elem->dip, 0); 7183 (void) ndi_event_retrieve_cookie( 7184 sf->sf_event_hdl, 7185 target->sft_dip, FCAL_INSERT_EVENT, 7186 &sf_insert_eid, NDI_EVENT_NOPASS); 7187 (void) ndi_event_run_callbacks(sf->sf_event_hdl, 7188 target->sft_dip, sf_insert_eid, NULL); 7189 break; 7190 case SF_OFFLINE: 7191 /* offline this target */ 7192 target = elem->target; 7193 tgt_id = sf_alpa_to_switch[target->sft_al_pa]; 7194 /* don't do NDI_DEVI_REMOVE for now */ 7195 if (ndi_devi_offline(elem->dip, 0) != 7196 NDI_SUCCESS) { 7197 SF_DEBUG(1, (sf, CE_WARN, "target %x, " 7198 "device offline failed", tgt_id)); 7199 } else { 7200 SF_DEBUG(1, (sf, CE_NOTE, "target %x, " 7201 "device offline succeeded\n", 7202 tgt_id)); 7203 } 7204 break; 7205 } 7206 kmem_free(elem, sizeof (struct sf_hp_elem)); 7207 mutex_enter(&sf->sf_hp_daemon_mutex); 7208 } 7209 7210 /* if exit is not already signaled */ 7211 if (sf->sf_hp_exit == 0) { 7212 /* wait to be signaled by work or exit */ 7213 CALLB_CPR_SAFE_BEGIN(&cprinfo); 7214 cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex); 7215 CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex); 7216 } 7217 } while (sf->sf_hp_exit == 0); 7218 7219 /* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */ 7220 CALLB_CPR_EXIT(&cprinfo); 7221 thread_exit(); /* no more hotplug thread */ 7222 /* NOTREACHED */ 7223 } 7224