1 /*- 2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 3 * 4 * Copyright (c) 1997-2006 by Matthew Jacob 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <dev/isp/isp_freebsd.h> 33 #include <sys/unistd.h> 34 #include <sys/kthread.h> 35 #include <machine/stdarg.h> /* for use by isp_prt below */ 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 41 42 MODULE_VERSION(isp, 1); 43 MODULE_DEPEND(isp, cam, 1, 1, 1); 44 int isp_announced = 0; 45 46 static d_ioctl_t ispioctl; 47 static void isp_intr_enable(void *); 48 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 49 static void isp_poll(struct cam_sim *); 50 static timeout_t isp_watchdog; 51 static void isp_kthread(void *); 52 static void isp_action(struct cam_sim *, union ccb *); 53 54 55 #if __FreeBSD_version < 500000 56 #define ISP_CDEV_MAJOR 248 57 static struct cdevsw isp_cdevsw = { 58 /* open */ nullopen, 59 /* close */ nullclose, 60 /* read */ noread, 61 /* write */ nowrite, 62 /* ioctl */ ispioctl, 63 /* poll */ nopoll, 64 /* mmap */ nommap, 65 /* strategy */ nostrategy, 66 /* name */ "isp", 67 /* maj */ ISP_CDEV_MAJOR, 68 /* dump */ nodump, 69 /* psize */ nopsize, 70 /* flags */ D_TAPE, 71 }; 72 #else 73 static struct cdevsw isp_cdevsw = { 74 .d_version = D_VERSION, 75 .d_flags = D_NEEDGIANT, 76 .d_ioctl = ispioctl, 77 .d_name = "isp", 78 }; 79 #endif 80 81 static ispsoftc_t *isplist = NULL; 82 83 void 84 isp_attach(ispsoftc_t *isp) 85 { 86 int primary, secondary; 87 struct ccb_setasync csa; 88 struct cam_devq *devq; 89 struct cam_sim *sim; 90 struct cam_path *path; 91 92 /* 93 * Establish (in case of 12X0) which bus is the primary. 94 */ 95 96 primary = 0; 97 secondary = 1; 98 99 /* 100 * Create the device queue for our SIM(s). 101 */ 102 devq = cam_simq_alloc(isp->isp_maxcmds); 103 if (devq == NULL) { 104 return; 105 } 106 107 /* 108 * Construct our SIM entry. 109 */ 110 ISPLOCK_2_CAMLOCK(isp); 111 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 112 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 113 if (sim == NULL) { 114 cam_simq_free(devq); 115 CAMLOCK_2_ISPLOCK(isp); 116 return; 117 } 118 CAMLOCK_2_ISPLOCK(isp); 119 120 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 121 isp->isp_osinfo.ehook.ich_arg = isp; 122 ISPLOCK_2_CAMLOCK(isp); 123 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 124 cam_sim_free(sim, TRUE); 125 CAMLOCK_2_ISPLOCK(isp); 126 isp_prt(isp, ISP_LOGERR, 127 "could not establish interrupt enable hook"); 128 return; 129 } 130 131 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 132 cam_sim_free(sim, TRUE); 133 CAMLOCK_2_ISPLOCK(isp); 134 return; 135 } 136 137 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 138 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 139 xpt_bus_deregister(cam_sim_path(sim)); 140 cam_sim_free(sim, TRUE); 141 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 142 CAMLOCK_2_ISPLOCK(isp); 143 return; 144 } 145 146 xpt_setup_ccb(&csa.ccb_h, path, 5); 147 csa.ccb_h.func_code = XPT_SASYNC_CB; 148 csa.event_enable = AC_LOST_DEVICE; 149 csa.callback = isp_cam_async; 150 csa.callback_arg = sim; 151 xpt_action((union ccb *)&csa); 152 CAMLOCK_2_ISPLOCK(isp); 153 isp->isp_sim = sim; 154 isp->isp_path = path; 155 /* 156 * Create a kernel thread for fibre channel instances. We 157 * don't have dual channel FC cards. 158 */ 159 if (IS_FC(isp)) { 160 ISPLOCK_2_CAMLOCK(isp); 161 #if __FreeBSD_version >= 500000 162 /* XXX: LOCK VIOLATION */ 163 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 164 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 165 RFHIGHPID, 0, "%s: fc_thrd", 166 device_get_nameunit(isp->isp_dev))) 167 #else 168 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 169 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 170 #endif 171 { 172 xpt_bus_deregister(cam_sim_path(sim)); 173 cam_sim_free(sim, TRUE); 174 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 175 CAMLOCK_2_ISPLOCK(isp); 176 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 177 return; 178 } 179 CAMLOCK_2_ISPLOCK(isp); 180 } 181 182 183 /* 184 * If we have a second channel, construct SIM entry for that. 185 */ 186 if (IS_DUALBUS(isp)) { 187 ISPLOCK_2_CAMLOCK(isp); 188 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 189 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 190 if (sim == NULL) { 191 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 192 xpt_free_path(isp->isp_path); 193 cam_simq_free(devq); 194 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 195 return; 196 } 197 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 198 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 199 xpt_free_path(isp->isp_path); 200 cam_sim_free(sim, TRUE); 201 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 202 CAMLOCK_2_ISPLOCK(isp); 203 return; 204 } 205 206 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 207 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 208 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 209 xpt_free_path(isp->isp_path); 210 xpt_bus_deregister(cam_sim_path(sim)); 211 cam_sim_free(sim, TRUE); 212 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 213 CAMLOCK_2_ISPLOCK(isp); 214 return; 215 } 216 217 xpt_setup_ccb(&csa.ccb_h, path, 5); 218 csa.ccb_h.func_code = XPT_SASYNC_CB; 219 csa.event_enable = AC_LOST_DEVICE; 220 csa.callback = isp_cam_async; 221 csa.callback_arg = sim; 222 xpt_action((union ccb *)&csa); 223 CAMLOCK_2_ISPLOCK(isp); 224 isp->isp_sim2 = sim; 225 isp->isp_path2 = path; 226 } 227 228 /* 229 * Create device nodes 230 */ 231 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 232 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 233 234 if (isp->isp_role != ISP_ROLE_NONE) { 235 isp->isp_state = ISP_RUNSTATE; 236 ENABLE_INTS(isp); 237 } 238 if (isplist == NULL) { 239 isplist = isp; 240 } else { 241 ispsoftc_t *tmp = isplist; 242 while (tmp->isp_osinfo.next) { 243 tmp = tmp->isp_osinfo.next; 244 } 245 tmp->isp_osinfo.next = isp; 246 } 247 248 } 249 250 static __inline void 251 isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 252 { 253 if (isp->isp_osinfo.simqfrozen == 0) { 254 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 255 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 256 ISPLOCK_2_CAMLOCK(isp); 257 xpt_freeze_simq(isp->isp_sim, 1); 258 CAMLOCK_2_ISPLOCK(isp); 259 } else { 260 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 261 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 262 } 263 } 264 265 266 #if __FreeBSD_version < 500000 267 #define _DEV dev_t 268 #define _IOP struct proc 269 #else 270 #define _IOP struct thread 271 #define _DEV struct cdev * 272 #endif 273 274 static int 275 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 276 { 277 ispsoftc_t *isp; 278 int nr, retval = ENOTTY; 279 280 isp = isplist; 281 while (isp) { 282 if (minor(dev) == device_get_unit(isp->isp_dev)) { 283 break; 284 } 285 isp = isp->isp_osinfo.next; 286 } 287 if (isp == NULL) 288 return (ENXIO); 289 290 switch (c) { 291 #ifdef ISP_FW_CRASH_DUMP 292 case ISP_GET_FW_CRASH_DUMP: 293 if (IS_FC(isp)) { 294 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 295 size_t sz; 296 297 retval = 0; 298 if (IS_2200(isp)) { 299 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 300 } else { 301 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 302 } 303 ISP_LOCK(isp); 304 if (ptr && *ptr) { 305 void *uaddr = *((void **) addr); 306 if (copyout(ptr, uaddr, sz)) { 307 retval = EFAULT; 308 } else { 309 *ptr = 0; 310 } 311 } else { 312 retval = ENXIO; 313 } 314 ISP_UNLOCK(isp); 315 } 316 break; 317 case ISP_FORCE_CRASH_DUMP: 318 if (IS_FC(isp)) { 319 ISP_LOCK(isp); 320 isp_freeze_loopdown(isp, 321 "ispioctl(ISP_FORCE_CRASH_DUMP)"); 322 isp_fw_dump(isp); 323 isp_reinit(isp); 324 ISP_UNLOCK(isp); 325 retval = 0; 326 } 327 break; 328 #endif 329 case ISP_SDBLEV: 330 { 331 int olddblev = isp->isp_dblev; 332 isp->isp_dblev = *(int *)addr; 333 *(int *)addr = olddblev; 334 retval = 0; 335 break; 336 } 337 case ISP_GETROLE: 338 *(int *)addr = isp->isp_role; 339 retval = 0; 340 break; 341 case ISP_SETROLE: 342 nr = *(int *)addr; 343 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 344 retval = EINVAL; 345 break; 346 } 347 *(int *)addr = isp->isp_role; 348 isp->isp_role = nr; 349 /* FALLTHROUGH */ 350 case ISP_RESETHBA: 351 ISP_LOCK(isp); 352 isp_reinit(isp); 353 ISP_UNLOCK(isp); 354 retval = 0; 355 break; 356 case ISP_RESCAN: 357 if (IS_FC(isp)) { 358 ISP_LOCK(isp); 359 if (isp_fc_runstate(isp, 5 * 1000000)) { 360 retval = EIO; 361 } else { 362 retval = 0; 363 } 364 ISP_UNLOCK(isp); 365 } 366 break; 367 case ISP_FC_LIP: 368 if (IS_FC(isp)) { 369 ISP_LOCK(isp); 370 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 371 retval = EIO; 372 } else { 373 retval = 0; 374 } 375 ISP_UNLOCK(isp); 376 } 377 break; 378 case ISP_FC_GETDINFO: 379 { 380 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 381 struct lportdb *lp; 382 383 if (IS_SCSI(isp)) { 384 break; 385 } 386 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 387 retval = EINVAL; 388 break; 389 } 390 ISP_LOCK(isp); 391 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 392 if (lp->valid) { 393 ifc->role = lp->roles; 394 ifc->loopid = lp->loopid; 395 ifc->portid = lp->portid; 396 ifc->node_wwn = lp->node_wwn; 397 ifc->port_wwn = lp->port_wwn; 398 retval = 0; 399 } else { 400 retval = ENODEV; 401 } 402 ISP_UNLOCK(isp); 403 break; 404 } 405 case ISP_GET_STATS: 406 { 407 isp_stats_t *sp = (isp_stats_t *) addr; 408 409 MEMZERO(sp, sizeof (*sp)); 410 sp->isp_stat_version = ISP_STATS_VERSION; 411 sp->isp_type = isp->isp_type; 412 sp->isp_revision = isp->isp_revision; 413 ISP_LOCK(isp); 414 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 415 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 416 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 417 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 418 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 419 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 420 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 421 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 422 ISP_UNLOCK(isp); 423 retval = 0; 424 break; 425 } 426 case ISP_CLR_STATS: 427 ISP_LOCK(isp); 428 isp->isp_intcnt = 0; 429 isp->isp_intbogus = 0; 430 isp->isp_intmboxc = 0; 431 isp->isp_intoasync = 0; 432 isp->isp_rsltccmplt = 0; 433 isp->isp_fphccmplt = 0; 434 isp->isp_rscchiwater = 0; 435 isp->isp_fpcchiwater = 0; 436 ISP_UNLOCK(isp); 437 retval = 0; 438 break; 439 case ISP_FC_GETHINFO: 440 { 441 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 442 MEMZERO(hba, sizeof (*hba)); 443 444 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 445 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 446 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 447 if (IS_FC(isp)) { 448 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 449 hba->fc_scsi_supported = 1; 450 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 451 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 452 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn; 453 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn; 454 hba->active_node_wwn = ISP_NODEWWN(isp); 455 hba->active_port_wwn = ISP_PORTWWN(isp); 456 } 457 retval = 0; 458 break; 459 } 460 case ISP_GET_FC_PARAM: 461 { 462 struct isp_fc_param *f = (struct isp_fc_param *) addr; 463 464 if (IS_SCSI(isp)) { 465 break; 466 } 467 f->parameter = 0; 468 if (strcmp(f->param_name, "framelength") == 0) { 469 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 470 retval = 0; 471 break; 472 } 473 if (strcmp(f->param_name, "exec_throttle") == 0) { 474 f->parameter = FCPARAM(isp)->isp_execthrottle; 475 retval = 0; 476 break; 477 } 478 if (strcmp(f->param_name, "fullduplex") == 0) { 479 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 480 f->parameter = 1; 481 retval = 0; 482 break; 483 } 484 if (strcmp(f->param_name, "loopid") == 0) { 485 f->parameter = FCPARAM(isp)->isp_loopid; 486 retval = 0; 487 break; 488 } 489 retval = EINVAL; 490 break; 491 } 492 case ISP_SET_FC_PARAM: 493 { 494 struct isp_fc_param *f = (struct isp_fc_param *) addr; 495 uint32_t param = f->parameter; 496 497 if (IS_SCSI(isp)) { 498 break; 499 } 500 f->parameter = 0; 501 if (strcmp(f->param_name, "framelength") == 0) { 502 if (param != 512 && param != 1024 && param != 1024) { 503 retval = EINVAL; 504 break; 505 } 506 FCPARAM(isp)->isp_maxfrmlen = param; 507 retval = 0; 508 break; 509 } 510 if (strcmp(f->param_name, "exec_throttle") == 0) { 511 if (param < 16 || param > 255) { 512 retval = EINVAL; 513 break; 514 } 515 FCPARAM(isp)->isp_execthrottle = param; 516 retval = 0; 517 break; 518 } 519 if (strcmp(f->param_name, "fullduplex") == 0) { 520 if (param != 0 && param != 1) { 521 retval = EINVAL; 522 break; 523 } 524 if (param) { 525 FCPARAM(isp)->isp_fwoptions |= 526 ICBOPT_FULL_DUPLEX; 527 } else { 528 FCPARAM(isp)->isp_fwoptions &= 529 ~ICBOPT_FULL_DUPLEX; 530 } 531 retval = 0; 532 break; 533 } 534 if (strcmp(f->param_name, "loopid") == 0) { 535 if (param < 0 || param > 125) { 536 retval = EINVAL; 537 break; 538 } 539 FCPARAM(isp)->isp_loopid = param; 540 retval = 0; 541 break; 542 } 543 retval = EINVAL; 544 break; 545 } 546 case ISP_TSK_MGMT: 547 { 548 int needmarker; 549 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 550 uint16_t loopid; 551 mbreg_t mbs; 552 553 if (IS_SCSI(isp)) { 554 break; 555 } 556 557 memset(&mbs, 0, sizeof (mbs)); 558 needmarker = retval = 0; 559 loopid = fct->loopid; 560 if (IS_2KLOGIN(isp) == 0) { 561 loopid <<= 8; 562 } 563 switch (fct->action) { 564 case CLEAR_ACA: 565 mbs.param[0] = MBOX_CLEAR_ACA; 566 mbs.param[1] = loopid; 567 mbs.param[2] = fct->lun; 568 break; 569 case TARGET_RESET: 570 mbs.param[0] = MBOX_TARGET_RESET; 571 mbs.param[1] = loopid; 572 needmarker = 1; 573 break; 574 case LUN_RESET: 575 mbs.param[0] = MBOX_LUN_RESET; 576 mbs.param[1] = loopid; 577 mbs.param[2] = fct->lun; 578 needmarker = 1; 579 break; 580 case CLEAR_TASK_SET: 581 mbs.param[0] = MBOX_CLEAR_TASK_SET; 582 mbs.param[1] = loopid; 583 mbs.param[2] = fct->lun; 584 needmarker = 1; 585 break; 586 case ABORT_TASK_SET: 587 mbs.param[0] = MBOX_ABORT_TASK_SET; 588 mbs.param[1] = loopid; 589 mbs.param[2] = fct->lun; 590 needmarker = 1; 591 break; 592 default: 593 retval = EINVAL; 594 break; 595 } 596 if (retval == 0) { 597 ISP_LOCK(isp); 598 if (needmarker) { 599 isp->isp_sendmarker |= 1; 600 } 601 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 602 ISP_UNLOCK(isp); 603 if (retval) 604 retval = EIO; 605 } 606 break; 607 } 608 default: 609 break; 610 } 611 return (retval); 612 } 613 614 static void 615 isp_intr_enable(void *arg) 616 { 617 ispsoftc_t *isp = arg; 618 if (isp->isp_role != ISP_ROLE_NONE) { 619 ENABLE_INTS(isp); 620 #if 0 621 isp->isp_osinfo.intsok = 1; 622 #endif 623 } 624 /* Release our hook so that the boot can continue. */ 625 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 626 } 627 628 /* 629 * Put the target mode functions here, because some are inlines 630 */ 631 632 #ifdef ISP_TARGET_MODE 633 634 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 635 static __inline int are_any_luns_enabled(ispsoftc_t *, int); 636 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 637 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 638 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 639 static cam_status 640 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 641 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 642 static int isp_en_lun(ispsoftc_t *, union ccb *); 643 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 644 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 645 static timeout_t isp_refire_putback_atio; 646 static void isp_complete_ctio(union ccb *); 647 static void isp_target_putback_atio(union ccb *); 648 static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 649 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 650 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 651 static int isp_handle_platform_ctio(ispsoftc_t *, void *); 652 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 653 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 654 655 static __inline int 656 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 657 { 658 tstate_t *tptr; 659 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 660 if (tptr == NULL) { 661 return (0); 662 } 663 do { 664 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 665 return (1); 666 } 667 } while ((tptr = tptr->next) != NULL); 668 return (0); 669 } 670 671 static __inline int 672 are_any_luns_enabled(ispsoftc_t *isp, int port) 673 { 674 int lo, hi; 675 if (IS_DUALBUS(isp)) { 676 lo = (port * (LUN_HASH_SIZE >> 1)); 677 hi = lo + (LUN_HASH_SIZE >> 1); 678 } else { 679 lo = 0; 680 hi = LUN_HASH_SIZE; 681 } 682 for (lo = 0; lo < hi; lo++) { 683 if (isp->isp_osinfo.lun_hash[lo]) { 684 return (1); 685 } 686 } 687 return (0); 688 } 689 690 static __inline tstate_t * 691 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 692 { 693 tstate_t *tptr = NULL; 694 695 if (lun == CAM_LUN_WILDCARD) { 696 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 697 tptr = &isp->isp_osinfo.tsdflt[bus]; 698 tptr->hold++; 699 return (tptr); 700 } 701 return (NULL); 702 } else { 703 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 704 if (tptr == NULL) { 705 return (NULL); 706 } 707 } 708 709 do { 710 if (tptr->lun == lun && tptr->bus == bus) { 711 tptr->hold++; 712 return (tptr); 713 } 714 } while ((tptr = tptr->next) != NULL); 715 return (tptr); 716 } 717 718 static __inline void 719 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 720 { 721 if (tptr->hold) 722 tptr->hold--; 723 } 724 725 static __inline atio_private_data_t * 726 isp_get_atpd(ispsoftc_t *isp, int tag) 727 { 728 atio_private_data_t *atp; 729 for (atp = isp->isp_osinfo.atpdp; 730 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 731 if (atp->tag == tag) 732 return (atp); 733 } 734 return (NULL); 735 } 736 737 static cam_status 738 create_lun_state(ispsoftc_t *isp, int bus, 739 struct cam_path *path, tstate_t **rslt) 740 { 741 cam_status status; 742 lun_id_t lun; 743 int hfx; 744 tstate_t *tptr, *new; 745 746 lun = xpt_path_lun_id(path); 747 if (lun < 0) { 748 return (CAM_LUN_INVALID); 749 } 750 if (is_lun_enabled(isp, bus, lun)) { 751 return (CAM_LUN_ALRDY_ENA); 752 } 753 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 754 if (new == NULL) { 755 return (CAM_RESRC_UNAVAIL); 756 } 757 758 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 759 xpt_path_target_id(path), xpt_path_lun_id(path)); 760 if (status != CAM_REQ_CMP) { 761 free(new, M_DEVBUF); 762 return (status); 763 } 764 new->bus = bus; 765 new->lun = lun; 766 SLIST_INIT(&new->atios); 767 SLIST_INIT(&new->inots); 768 new->hold = 1; 769 770 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 771 tptr = isp->isp_osinfo.lun_hash[hfx]; 772 if (tptr == NULL) { 773 isp->isp_osinfo.lun_hash[hfx] = new; 774 } else { 775 while (tptr->next) 776 tptr = tptr->next; 777 tptr->next = new; 778 } 779 *rslt = new; 780 return (CAM_REQ_CMP); 781 } 782 783 static __inline void 784 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 785 { 786 int hfx; 787 tstate_t *lw, *pw; 788 789 if (tptr->hold) { 790 return; 791 } 792 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 793 pw = isp->isp_osinfo.lun_hash[hfx]; 794 if (pw == NULL) { 795 return; 796 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 797 isp->isp_osinfo.lun_hash[hfx] = pw->next; 798 } else { 799 lw = pw; 800 pw = lw->next; 801 while (pw) { 802 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 803 lw->next = pw->next; 804 break; 805 } 806 lw = pw; 807 pw = pw->next; 808 } 809 if (pw == NULL) { 810 return; 811 } 812 } 813 free(tptr, M_DEVBUF); 814 } 815 816 /* 817 * Enable luns. 818 */ 819 static int 820 isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 821 { 822 struct ccb_en_lun *cel = &ccb->cel; 823 tstate_t *tptr; 824 uint32_t seq; 825 int bus, cmd, av, wildcard, tm_on; 826 lun_id_t lun; 827 target_id_t tgt; 828 829 bus = XS_CHANNEL(ccb); 830 if (bus > 1) { 831 xpt_print_path(ccb->ccb_h.path); 832 printf("illegal bus %d\n", bus); 833 ccb->ccb_h.status = CAM_PATH_INVALID; 834 return (-1); 835 } 836 tgt = ccb->ccb_h.target_id; 837 lun = ccb->ccb_h.target_lun; 838 839 isp_prt(isp, ISP_LOGTDEBUG0, 840 "isp_en_lun: %sabling lun 0x%x on channel %d", 841 cel->enable? "en" : "dis", lun, bus); 842 843 844 if ((lun != CAM_LUN_WILDCARD) && 845 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 846 ccb->ccb_h.status = CAM_LUN_INVALID; 847 return (-1); 848 } 849 850 if (IS_SCSI(isp)) { 851 sdparam *sdp = isp->isp_param; 852 sdp += bus; 853 if (tgt != CAM_TARGET_WILDCARD && 854 tgt != sdp->isp_initiator_id) { 855 ccb->ccb_h.status = CAM_TID_INVALID; 856 return (-1); 857 } 858 } else { 859 /* 860 * There's really no point in doing this yet w/o multi-tid 861 * capability. Even then, it's problematic. 862 */ 863 #if 0 864 if (tgt != CAM_TARGET_WILDCARD && 865 tgt != FCPARAM(isp)->isp_iid) { 866 ccb->ccb_h.status = CAM_TID_INVALID; 867 return (-1); 868 } 869 #endif 870 /* 871 * This is as a good a place as any to check f/w capabilities. 872 */ 873 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 874 isp_prt(isp, ISP_LOGERR, 875 "firmware does not support target mode"); 876 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 877 return (-1); 878 } 879 /* 880 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 881 * XXX: dorks with our already fragile enable/disable code. 882 */ 883 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 884 isp_prt(isp, ISP_LOGERR, 885 "firmware not SCCLUN capable"); 886 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 887 return (-1); 888 } 889 } 890 891 if (tgt == CAM_TARGET_WILDCARD) { 892 if (lun == CAM_LUN_WILDCARD) { 893 wildcard = 1; 894 } else { 895 ccb->ccb_h.status = CAM_LUN_INVALID; 896 return (-1); 897 } 898 } else { 899 wildcard = 0; 900 } 901 902 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 903 904 /* 905 * Next check to see whether this is a target/lun wildcard action. 906 * 907 * If so, we know that we can accept commands for luns that haven't 908 * been enabled yet and send them upstream. Otherwise, we have to 909 * handle them locally (if we see them at all). 910 */ 911 912 if (wildcard) { 913 tptr = &isp->isp_osinfo.tsdflt[bus]; 914 if (cel->enable) { 915 if (tm_on) { 916 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 917 return (-1); 918 } 919 ccb->ccb_h.status = 920 xpt_create_path(&tptr->owner, NULL, 921 xpt_path_path_id(ccb->ccb_h.path), 922 xpt_path_target_id(ccb->ccb_h.path), 923 xpt_path_lun_id(ccb->ccb_h.path)); 924 if (ccb->ccb_h.status != CAM_REQ_CMP) { 925 return (-1); 926 } 927 SLIST_INIT(&tptr->atios); 928 SLIST_INIT(&tptr->inots); 929 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 930 } else { 931 if (tm_on == 0) { 932 ccb->ccb_h.status = CAM_REQ_CMP; 933 return (-1); 934 } 935 if (tptr->hold) { 936 ccb->ccb_h.status = CAM_SCSI_BUSY; 937 return (-1); 938 } 939 xpt_free_path(tptr->owner); 940 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 941 } 942 } 943 944 /* 945 * Now check to see whether this bus needs to be 946 * enabled/disabled with respect to target mode. 947 */ 948 av = bus << 31; 949 if (cel->enable && tm_on == 0) { 950 av |= ENABLE_TARGET_FLAG; 951 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 952 if (av) { 953 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 954 if (wildcard) { 955 isp->isp_osinfo.tmflags[bus] &= 956 ~TM_WILDCARD_ENABLED; 957 xpt_free_path(tptr->owner); 958 } 959 return (-1); 960 } 961 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 962 isp_prt(isp, ISP_LOGINFO, 963 "Target Mode enabled on channel %d", bus); 964 } else if (cel->enable == 0 && tm_on && wildcard) { 965 if (are_any_luns_enabled(isp, bus)) { 966 ccb->ccb_h.status = CAM_SCSI_BUSY; 967 return (-1); 968 } 969 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 970 if (av) { 971 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 972 return (-1); 973 } 974 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 975 isp_prt(isp, ISP_LOGINFO, 976 "Target Mode disabled on channel %d", bus); 977 } 978 979 if (wildcard) { 980 ccb->ccb_h.status = CAM_REQ_CMP; 981 return (-1); 982 } 983 984 /* 985 * Find an empty slot 986 */ 987 for (seq = 0; seq < NLEACT; seq++) { 988 if (isp->isp_osinfo.leact[seq] == 0) { 989 break; 990 } 991 } 992 if (seq >= NLEACT) { 993 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 994 return (-1); 995 996 } 997 isp->isp_osinfo.leact[seq] = ccb; 998 999 if (cel->enable) { 1000 ccb->ccb_h.status = 1001 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 1002 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1003 isp->isp_osinfo.leact[seq] = 0; 1004 return (-1); 1005 } 1006 } else { 1007 tptr = get_lun_statep(isp, bus, lun); 1008 if (tptr == NULL) { 1009 ccb->ccb_h.status = CAM_LUN_INVALID; 1010 return (-1); 1011 } 1012 } 1013 1014 if (cel->enable) { 1015 int c, n, ulun = lun; 1016 1017 cmd = RQSTYPE_ENABLE_LUN; 1018 c = DFLT_CMND_CNT; 1019 n = DFLT_INOT_CNT; 1020 if (IS_FC(isp) && lun != 0) { 1021 cmd = RQSTYPE_MODIFY_LUN; 1022 n = 0; 1023 /* 1024 * For SCC firmware, we only deal with setting 1025 * (enabling or modifying) lun 0. 1026 */ 1027 ulun = 0; 1028 } 1029 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1030 rls_lun_statep(isp, tptr); 1031 ccb->ccb_h.status = CAM_REQ_INPROG; 1032 return (seq); 1033 } 1034 } else { 1035 int c, n, ulun = lun; 1036 1037 cmd = -RQSTYPE_MODIFY_LUN; 1038 c = DFLT_CMND_CNT; 1039 n = DFLT_INOT_CNT; 1040 if (IS_FC(isp) && lun != 0) { 1041 n = 0; 1042 /* 1043 * For SCC firmware, we only deal with setting 1044 * (enabling or modifying) lun 0. 1045 */ 1046 ulun = 0; 1047 } 1048 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1049 rls_lun_statep(isp, tptr); 1050 ccb->ccb_h.status = CAM_REQ_INPROG; 1051 return (seq); 1052 } 1053 } 1054 rls_lun_statep(isp, tptr); 1055 xpt_print_path(ccb->ccb_h.path); 1056 printf("isp_lun_cmd failed\n"); 1057 isp->isp_osinfo.leact[seq] = 0; 1058 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1059 return (-1); 1060 } 1061 1062 static void 1063 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1064 { 1065 const char lfmt[] = "lun %d now %sabled for target mode on channel %d"; 1066 union ccb *ccb; 1067 uint32_t seq; 1068 tstate_t *tptr; 1069 int av; 1070 struct ccb_en_lun *cel; 1071 1072 seq = lep->le_reserved - 1; 1073 if (seq >= NLEACT) { 1074 isp_prt(isp, ISP_LOGERR, 1075 "seq out of range (%u) in isp_ledone", seq); 1076 return; 1077 } 1078 ccb = isp->isp_osinfo.leact[seq]; 1079 if (ccb == 0) { 1080 isp_prt(isp, ISP_LOGERR, 1081 "no ccb for seq %u in isp_ledone", seq); 1082 return; 1083 } 1084 cel = &ccb->cel; 1085 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1086 if (tptr == NULL) { 1087 xpt_print_path(ccb->ccb_h.path); 1088 printf("null tptr in isp_ledone\n"); 1089 isp->isp_osinfo.leact[seq] = 0; 1090 return; 1091 } 1092 1093 if (lep->le_status != LUN_OK) { 1094 xpt_print_path(ccb->ccb_h.path); 1095 printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1096 err: 1097 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1098 xpt_print_path(ccb->ccb_h.path); 1099 rls_lun_statep(isp, tptr); 1100 isp->isp_osinfo.leact[seq] = 0; 1101 ISPLOCK_2_CAMLOCK(isp); 1102 xpt_done(ccb); 1103 CAMLOCK_2_ISPLOCK(isp); 1104 return; 1105 } else { 1106 isp_prt(isp, ISP_LOGTDEBUG0, 1107 "isp_ledone: ENABLE/MODIFY done okay"); 1108 } 1109 1110 1111 if (cel->enable) { 1112 ccb->ccb_h.status = CAM_REQ_CMP; 1113 isp_prt(isp, ISP_LOGINFO, lfmt, 1114 XS_LUN(ccb), "en", XS_CHANNEL(ccb)); 1115 rls_lun_statep(isp, tptr); 1116 isp->isp_osinfo.leact[seq] = 0; 1117 ISPLOCK_2_CAMLOCK(isp); 1118 xpt_done(ccb); 1119 CAMLOCK_2_ISPLOCK(isp); 1120 return; 1121 } 1122 1123 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1124 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1125 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1126 xpt_print_path(ccb->ccb_h.path); 1127 printf("isp_ledone: isp_lun_cmd failed\n"); 1128 goto err; 1129 } 1130 rls_lun_statep(isp, tptr); 1131 return; 1132 } 1133 1134 isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb)); 1135 rls_lun_statep(isp, tptr); 1136 destroy_lun_state(isp, tptr); 1137 ccb->ccb_h.status = CAM_REQ_CMP; 1138 isp->isp_osinfo.leact[seq] = 0; 1139 ISPLOCK_2_CAMLOCK(isp); 1140 xpt_done(ccb); 1141 CAMLOCK_2_ISPLOCK(isp); 1142 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1143 int bus = XS_CHANNEL(ccb); 1144 av = bus << 31; 1145 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1146 if (av) { 1147 isp_prt(isp, ISP_LOGWARN, 1148 "disable target mode on channel %d failed", bus); 1149 } else { 1150 isp_prt(isp, ISP_LOGINFO, 1151 "Target Mode disabled on channel %d", bus); 1152 } 1153 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1154 } 1155 } 1156 1157 1158 static cam_status 1159 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1160 { 1161 tstate_t *tptr; 1162 struct ccb_hdr_slist *lp; 1163 struct ccb_hdr *curelm; 1164 int found, *ctr; 1165 union ccb *accb = ccb->cab.abort_ccb; 1166 1167 isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb); 1168 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1169 int badpath = 0; 1170 if (IS_FC(isp) && (accb->ccb_h.target_id != 1171 ((fcparam *) isp->isp_param)->isp_loopid)) { 1172 badpath = 1; 1173 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1174 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1175 badpath = 1; 1176 } 1177 if (badpath) { 1178 /* 1179 * Being restrictive about target ids is really about 1180 * making sure we're aborting for the right multi-tid 1181 * path. This doesn't really make much sense at present. 1182 */ 1183 #if 0 1184 return (CAM_PATH_INVALID); 1185 #endif 1186 } 1187 } 1188 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1189 if (tptr == NULL) { 1190 isp_prt(isp, ISP_LOGTDEBUG0, 1191 "isp_abort_tgt_ccb: can't get statep"); 1192 return (CAM_PATH_INVALID); 1193 } 1194 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1195 lp = &tptr->atios; 1196 ctr = &tptr->atio_count; 1197 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1198 lp = &tptr->inots; 1199 ctr = &tptr->inot_count; 1200 } else { 1201 rls_lun_statep(isp, tptr); 1202 isp_prt(isp, ISP_LOGTDEBUG0, 1203 "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code); 1204 return (CAM_UA_ABORT); 1205 } 1206 curelm = SLIST_FIRST(lp); 1207 found = 0; 1208 if (curelm == &accb->ccb_h) { 1209 found = 1; 1210 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1211 } else { 1212 while(curelm != NULL) { 1213 struct ccb_hdr *nextelm; 1214 1215 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1216 if (nextelm == &accb->ccb_h) { 1217 found = 1; 1218 SLIST_NEXT(curelm, sim_links.sle) = 1219 SLIST_NEXT(nextelm, sim_links.sle); 1220 break; 1221 } 1222 curelm = nextelm; 1223 } 1224 } 1225 rls_lun_statep(isp, tptr); 1226 if (found) { 1227 (*ctr)--; 1228 accb->ccb_h.status = CAM_REQ_ABORTED; 1229 xpt_done(accb); 1230 return (CAM_REQ_CMP); 1231 } 1232 isp_prt(isp, ISP_LOGTDEBUG0, 1233 "isp_abort_tgt_ccb: CCB %p not found\n", ccb); 1234 return (CAM_PATH_INVALID); 1235 } 1236 1237 static void 1238 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1239 { 1240 void *qe; 1241 struct ccb_scsiio *cso = &ccb->csio; 1242 uint16_t *hp, save_handle; 1243 uint16_t nxti, optr; 1244 uint8_t local[QENTRY_LEN]; 1245 1246 1247 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1248 xpt_print_path(ccb->ccb_h.path); 1249 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1250 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1251 goto out; 1252 } 1253 memset(local, 0, QENTRY_LEN); 1254 1255 /* 1256 * We're either moving data or completing a command here. 1257 */ 1258 1259 if (IS_FC(isp)) { 1260 atio_private_data_t *atp; 1261 ct2_entry_t *cto = (ct2_entry_t *) local; 1262 1263 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1264 cto->ct_header.rqs_entry_count = 1; 1265 if (IS_2KLOGIN(isp)) { 1266 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1267 } else { 1268 cto->ct_iid = cso->init_id; 1269 if (!(FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN)) { 1270 cto->ct_lun = ccb->ccb_h.target_lun; 1271 } 1272 } 1273 1274 atp = isp_get_atpd(isp, cso->tag_id); 1275 if (atp == NULL) { 1276 isp_prt(isp, ISP_LOGERR, 1277 "cannot find private data adjunct for tag %x", 1278 cso->tag_id); 1279 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1280 goto out; 1281 } 1282 1283 cto->ct_rxid = cso->tag_id; 1284 if (cso->dxfer_len == 0) { 1285 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1286 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1287 cto->ct_flags |= CT2_SENDSTATUS; 1288 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1289 cto->ct_resid = 1290 atp->orig_datalen - atp->bytes_xfered; 1291 if (cto->ct_resid < 0) { 1292 cto->rsp.m1.ct_scsi_status |= 1293 CT2_DATA_OVER; 1294 } else if (cto->ct_resid > 0) { 1295 cto->rsp.m1.ct_scsi_status |= 1296 CT2_DATA_UNDER; 1297 } 1298 } 1299 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1300 int m = min(cso->sense_len, MAXRESPLEN); 1301 memcpy(cto->rsp.m1.ct_resp, 1302 &cso->sense_data, m); 1303 cto->rsp.m1.ct_senselen = m; 1304 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1305 } 1306 } else { 1307 cto->ct_flags |= CT2_FLAG_MODE0; 1308 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1309 cto->ct_flags |= CT2_DATA_IN; 1310 } else { 1311 cto->ct_flags |= CT2_DATA_OUT; 1312 } 1313 cto->ct_reloff = atp->bytes_xfered; 1314 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1315 cto->ct_flags |= CT2_SENDSTATUS; 1316 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1317 cto->ct_resid = 1318 atp->orig_datalen - 1319 (atp->bytes_xfered + cso->dxfer_len); 1320 if (cto->ct_resid < 0) { 1321 cto->rsp.m0.ct_scsi_status |= 1322 CT2_DATA_OVER; 1323 } else if (cto->ct_resid > 0) { 1324 cto->rsp.m0.ct_scsi_status |= 1325 CT2_DATA_UNDER; 1326 } 1327 } else { 1328 atp->last_xframt = cso->dxfer_len; 1329 } 1330 /* 1331 * If we're sending data and status back together, 1332 * we can't also send back sense data as well. 1333 */ 1334 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1335 } 1336 1337 if (cto->ct_flags & CT2_SENDSTATUS) { 1338 isp_prt(isp, ISP_LOGTDEBUG0, 1339 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1340 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1341 cso->dxfer_len, cto->ct_resid); 1342 cto->ct_flags |= CT2_CCINCR; 1343 atp->state = ATPD_STATE_LAST_CTIO; 1344 } else { 1345 atp->state = ATPD_STATE_CTIO; 1346 } 1347 cto->ct_timeout = 10; 1348 hp = &cto->ct_syshandle; 1349 } else { 1350 ct_entry_t *cto = (ct_entry_t *) local; 1351 1352 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1353 cto->ct_header.rqs_entry_count = 1; 1354 cto->ct_iid = cso->init_id; 1355 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1356 cto->ct_tgt = ccb->ccb_h.target_id; 1357 cto->ct_lun = ccb->ccb_h.target_lun; 1358 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1359 if (AT_HAS_TAG(cso->tag_id)) { 1360 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1361 cto->ct_flags |= CT_TQAE; 1362 } 1363 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1364 cto->ct_flags |= CT_NODISC; 1365 } 1366 if (cso->dxfer_len == 0) { 1367 cto->ct_flags |= CT_NO_DATA; 1368 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1369 cto->ct_flags |= CT_DATA_IN; 1370 } else { 1371 cto->ct_flags |= CT_DATA_OUT; 1372 } 1373 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1374 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1375 cto->ct_scsi_status = cso->scsi_status; 1376 cto->ct_resid = cso->resid; 1377 isp_prt(isp, ISP_LOGTDEBUG0, 1378 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1379 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1380 cso->tag_id); 1381 } 1382 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1383 cto->ct_timeout = 10; 1384 hp = &cto->ct_syshandle; 1385 } 1386 1387 if (isp_save_xs_tgt(isp, ccb, hp)) { 1388 xpt_print_path(ccb->ccb_h.path); 1389 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1390 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1391 goto out; 1392 } 1393 1394 1395 /* 1396 * Call the dma setup routines for this entry (and any subsequent 1397 * CTIOs) if there's data to move, and then tell the f/w it's got 1398 * new things to play with. As with isp_start's usage of DMA setup, 1399 * any swizzling is done in the machine dependent layer. Because 1400 * of this, we put the request onto the queue area first in native 1401 * format. 1402 */ 1403 1404 save_handle = *hp; 1405 1406 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1407 case CMD_QUEUED: 1408 ISP_ADD_REQUEST(isp, nxti); 1409 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1410 return; 1411 1412 case CMD_EAGAIN: 1413 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1414 break; 1415 1416 default: 1417 break; 1418 } 1419 isp_destroy_tgt_handle(isp, save_handle); 1420 1421 out: 1422 ISPLOCK_2_CAMLOCK(isp); 1423 xpt_done(ccb); 1424 CAMLOCK_2_ISPLOCK(isp); 1425 } 1426 1427 static void 1428 isp_refire_putback_atio(void *arg) 1429 { 1430 int s = splcam(); 1431 isp_target_putback_atio(arg); 1432 splx(s); 1433 } 1434 1435 static void 1436 isp_target_putback_atio(union ccb *ccb) 1437 { 1438 ispsoftc_t *isp; 1439 struct ccb_scsiio *cso; 1440 uint16_t nxti, optr; 1441 void *qe; 1442 1443 isp = XS_ISP(ccb); 1444 1445 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1446 (void) timeout(isp_refire_putback_atio, ccb, 10); 1447 isp_prt(isp, ISP_LOGWARN, 1448 "isp_target_putback_atio: Request Queue Overflow"); 1449 return; 1450 } 1451 memset(qe, 0, QENTRY_LEN); 1452 cso = &ccb->csio; 1453 if (IS_FC(isp)) { 1454 at2_entry_t local, *at = &local; 1455 MEMZERO(at, sizeof (at2_entry_t)); 1456 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1457 at->at_header.rqs_entry_count = 1; 1458 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1459 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1460 } else { 1461 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1462 } 1463 at->at_status = CT_OK; 1464 at->at_rxid = cso->tag_id; 1465 at->at_iid = cso->ccb_h.target_id; 1466 isp_put_atio2(isp, at, qe); 1467 } else { 1468 at_entry_t local, *at = &local; 1469 MEMZERO(at, sizeof (at_entry_t)); 1470 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1471 at->at_header.rqs_entry_count = 1; 1472 at->at_iid = cso->init_id; 1473 at->at_iid |= XS_CHANNEL(ccb) << 7; 1474 at->at_tgt = cso->ccb_h.target_id; 1475 at->at_lun = cso->ccb_h.target_lun; 1476 at->at_status = CT_OK; 1477 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1478 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1479 isp_put_atio(isp, at, qe); 1480 } 1481 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1482 ISP_ADD_REQUEST(isp, nxti); 1483 isp_complete_ctio(ccb); 1484 } 1485 1486 static void 1487 isp_complete_ctio(union ccb *ccb) 1488 { 1489 ISPLOCK_2_CAMLOCK(isp); 1490 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1491 ccb->ccb_h.status |= CAM_REQ_CMP; 1492 } 1493 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1494 xpt_done(ccb); 1495 CAMLOCK_2_ISPLOCK(isp); 1496 } 1497 1498 /* 1499 * Handle ATIO stuff that the generic code can't. 1500 * This means handling CDBs. 1501 */ 1502 1503 static int 1504 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1505 { 1506 tstate_t *tptr; 1507 int status, bus, iswildcard; 1508 struct ccb_accept_tio *atiop; 1509 1510 /* 1511 * The firmware status (except for the QLTM_SVALID bit) 1512 * indicates why this ATIO was sent to us. 1513 * 1514 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1515 * 1516 * If the DISCONNECTS DISABLED bit is set in the flags field, 1517 * we're still connected on the SCSI bus. 1518 */ 1519 status = aep->at_status; 1520 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1521 /* 1522 * Bus Phase Sequence error. We should have sense data 1523 * suggested by the f/w. I'm not sure quite yet what 1524 * to do about this for CAM. 1525 */ 1526 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1527 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1528 return (0); 1529 } 1530 if ((status & ~QLTM_SVALID) != AT_CDB) { 1531 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1532 status); 1533 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1534 return (0); 1535 } 1536 1537 bus = GET_BUS_VAL(aep->at_iid); 1538 tptr = get_lun_statep(isp, bus, aep->at_lun); 1539 if (tptr == NULL) { 1540 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1541 if (tptr == NULL) { 1542 /* 1543 * Because we can't autofeed sense data back with 1544 * a command for parallel SCSI, we can't give back 1545 * a CHECK CONDITION. We'll give back a BUSY status 1546 * instead. This works out okay because the only 1547 * time we should, in fact, get this, is in the 1548 * case that somebody configured us without the 1549 * blackhole driver, so they get what they deserve. 1550 */ 1551 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1552 return (0); 1553 } 1554 iswildcard = 1; 1555 } else { 1556 iswildcard = 0; 1557 } 1558 1559 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1560 if (atiop == NULL) { 1561 /* 1562 * Because we can't autofeed sense data back with 1563 * a command for parallel SCSI, we can't give back 1564 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1565 * instead. This works out okay because the only time we 1566 * should, in fact, get this, is in the case that we've 1567 * run out of ATIOS. 1568 */ 1569 xpt_print_path(tptr->owner); 1570 isp_prt(isp, ISP_LOGWARN, 1571 "no ATIOS for lun %d from initiator %d on channel %d", 1572 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1573 if (aep->at_flags & AT_TQAE) 1574 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1575 else 1576 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1577 rls_lun_statep(isp, tptr); 1578 return (0); 1579 } 1580 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1581 tptr->atio_count--; 1582 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1583 aep->at_lun, tptr->atio_count); 1584 if (iswildcard) { 1585 atiop->ccb_h.target_id = aep->at_tgt; 1586 atiop->ccb_h.target_lun = aep->at_lun; 1587 } 1588 if (aep->at_flags & AT_NODISC) { 1589 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1590 } else { 1591 atiop->ccb_h.flags = 0; 1592 } 1593 1594 if (status & QLTM_SVALID) { 1595 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1596 atiop->sense_len = amt; 1597 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1598 } else { 1599 atiop->sense_len = 0; 1600 } 1601 1602 atiop->init_id = GET_IID_VAL(aep->at_iid); 1603 atiop->cdb_len = aep->at_cdblen; 1604 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1605 atiop->ccb_h.status = CAM_CDB_RECVD; 1606 /* 1607 * Construct a tag 'id' based upon tag value (which may be 0..255) 1608 * and the handle (which we have to preserve). 1609 */ 1610 AT_MAKE_TAGID(atiop->tag_id, device_get_unit(isp->isp_dev), aep); 1611 if (aep->at_flags & AT_TQAE) { 1612 atiop->tag_action = aep->at_tag_type; 1613 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1614 } 1615 xpt_done((union ccb*)atiop); 1616 isp_prt(isp, ISP_LOGTDEBUG0, 1617 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1618 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1619 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1620 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1621 "nondisc" : "disconnecting"); 1622 rls_lun_statep(isp, tptr); 1623 return (0); 1624 } 1625 1626 static int 1627 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1628 { 1629 lun_id_t lun; 1630 tstate_t *tptr; 1631 struct ccb_accept_tio *atiop; 1632 atio_private_data_t *atp; 1633 1634 /* 1635 * The firmware status (except for the QLTM_SVALID bit) 1636 * indicates why this ATIO was sent to us. 1637 * 1638 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1639 */ 1640 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1641 isp_prt(isp, ISP_LOGWARN, 1642 "bogus atio (0x%x) leaked to platform", aep->at_status); 1643 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1644 return (0); 1645 } 1646 1647 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1648 lun = aep->at_scclun; 1649 } else { 1650 lun = aep->at_lun; 1651 } 1652 tptr = get_lun_statep(isp, 0, lun); 1653 if (tptr == NULL) { 1654 isp_prt(isp, ISP_LOGTDEBUG0, 1655 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1656 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1657 if (tptr == NULL) { 1658 isp_endcmd(isp, aep, 1659 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1660 (0x5 << 12) | (0x25 << 16), 0); 1661 return (0); 1662 } 1663 } 1664 1665 atp = isp_get_atpd(isp, 0); 1666 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1667 if (atiop == NULL || atp == NULL) { 1668 1669 /* 1670 * Because we can't autofeed sense data back with 1671 * a command for parallel SCSI, we can't give back 1672 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1673 * instead. This works out okay because the only time we 1674 * should, in fact, get this, is in the case that we've 1675 * run out of ATIOS. 1676 */ 1677 xpt_print_path(tptr->owner); 1678 isp_prt(isp, ISP_LOGWARN, 1679 "no %s for lun %d from initiator %d", 1680 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1681 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1682 rls_lun_statep(isp, tptr); 1683 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1684 return (0); 1685 } 1686 atp->state = ATPD_STATE_ATIO; 1687 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1688 tptr->atio_count--; 1689 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1690 lun, tptr->atio_count); 1691 1692 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1693 atiop->ccb_h.target_id = 1694 ((fcparam *)isp->isp_param)->isp_loopid; 1695 atiop->ccb_h.target_lun = lun; 1696 } 1697 /* 1698 * We don't get 'suggested' sense data as we do with SCSI cards. 1699 */ 1700 atiop->sense_len = 0; 1701 1702 atiop->init_id = aep->at_iid; 1703 atiop->cdb_len = ATIO2_CDBLEN; 1704 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1705 atiop->ccb_h.status = CAM_CDB_RECVD; 1706 atiop->tag_id = aep->at_rxid; 1707 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1708 case ATIO2_TC_ATTR_SIMPLEQ: 1709 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1710 break; 1711 case ATIO2_TC_ATTR_HEADOFQ: 1712 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1713 break; 1714 case ATIO2_TC_ATTR_ORDERED: 1715 atiop->tag_action = MSG_ORDERED_Q_TAG; 1716 break; 1717 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1718 case ATIO2_TC_ATTR_UNTAGGED: 1719 default: 1720 atiop->tag_action = 0; 1721 break; 1722 } 1723 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1724 1725 atp->tag = atiop->tag_id; 1726 atp->lun = lun; 1727 atp->orig_datalen = aep->at_datalen; 1728 atp->last_xframt = 0; 1729 atp->bytes_xfered = 0; 1730 atp->state = ATPD_STATE_CAM; 1731 ISPLOCK_2_CAMLOCK(siP); 1732 xpt_done((union ccb*)atiop); 1733 1734 isp_prt(isp, ISP_LOGTDEBUG0, 1735 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1736 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1737 lun, aep->at_taskflags, aep->at_datalen); 1738 rls_lun_statep(isp, tptr); 1739 return (0); 1740 } 1741 1742 static int 1743 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1744 { 1745 union ccb *ccb; 1746 int sentstatus, ok, notify_cam, resid = 0; 1747 uint16_t tval; 1748 1749 /* 1750 * CTIO and CTIO2 are close enough.... 1751 */ 1752 1753 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1754 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1755 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1756 1757 if (IS_FC(isp)) { 1758 ct2_entry_t *ct = arg; 1759 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1760 if (atp == NULL) { 1761 isp_prt(isp, ISP_LOGERR, 1762 "cannot find adjunct for %x after I/O", 1763 ct->ct_rxid); 1764 return (0); 1765 } 1766 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1767 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1768 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1769 ccb->ccb_h.status |= CAM_SENT_SENSE; 1770 } 1771 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1772 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1773 resid = ct->ct_resid; 1774 atp->bytes_xfered += (atp->last_xframt - resid); 1775 atp->last_xframt = 0; 1776 } 1777 if (sentstatus || !ok) { 1778 atp->tag = 0; 1779 } 1780 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1781 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1782 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1783 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1784 resid, sentstatus? "FIN" : "MID"); 1785 tval = ct->ct_rxid; 1786 1787 /* XXX: should really come after isp_complete_ctio */ 1788 atp->state = ATPD_STATE_PDON; 1789 } else { 1790 ct_entry_t *ct = arg; 1791 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1792 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1793 /* 1794 * We *ought* to be able to get back to the original ATIO 1795 * here, but for some reason this gets lost. It's just as 1796 * well because it's squirrelled away as part of periph 1797 * private data. 1798 * 1799 * We can live without it as long as we continue to use 1800 * the auto-replenish feature for CTIOs. 1801 */ 1802 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1803 if (ct->ct_status & QLTM_SVALID) { 1804 char *sp = (char *)ct; 1805 sp += CTIO_SENSE_OFFSET; 1806 ccb->csio.sense_len = 1807 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1808 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1809 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1810 } 1811 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1812 resid = ct->ct_resid; 1813 } 1814 isp_prt(isp, ISP_LOGTDEBUG0, 1815 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1816 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1817 ct->ct_status, ct->ct_flags, resid, 1818 sentstatus? "FIN" : "MID"); 1819 tval = ct->ct_fwhandle; 1820 } 1821 ccb->csio.resid += resid; 1822 1823 /* 1824 * We're here either because intermediate data transfers are done 1825 * and/or the final status CTIO (which may have joined with a 1826 * Data Transfer) is done. 1827 * 1828 * In any case, for this platform, the upper layers figure out 1829 * what to do next, so all we do here is collect status and 1830 * pass information along. Any DMA handles have already been 1831 * freed. 1832 */ 1833 if (notify_cam == 0) { 1834 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1835 return (0); 1836 } 1837 1838 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1839 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1840 1841 if (!ok) { 1842 isp_target_putback_atio(ccb); 1843 } else { 1844 isp_complete_ctio(ccb); 1845 1846 } 1847 return (0); 1848 } 1849 1850 static int 1851 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1852 { 1853 return (0); /* XXXX */ 1854 } 1855 1856 static int 1857 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1858 { 1859 1860 switch (inp->in_status) { 1861 case IN_PORT_LOGOUT: 1862 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1863 inp->in_iid); 1864 break; 1865 case IN_PORT_CHANGED: 1866 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1867 inp->in_iid); 1868 break; 1869 case IN_GLOBAL_LOGO: 1870 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1871 break; 1872 case IN_ABORT_TASK: 1873 { 1874 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1875 struct ccb_immed_notify *inot = NULL; 1876 1877 if (atp) { 1878 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1879 if (tptr) { 1880 inot = (struct ccb_immed_notify *) 1881 SLIST_FIRST(&tptr->inots); 1882 if (inot) { 1883 tptr->inot_count--; 1884 SLIST_REMOVE_HEAD(&tptr->inots, 1885 sim_links.sle); 1886 isp_prt(isp, ISP_LOGTDEBUG0, 1887 "Take FREE INOT count now %d", 1888 tptr->inot_count); 1889 } 1890 } 1891 isp_prt(isp, ISP_LOGWARN, 1892 "abort task RX_ID %x IID %d state %d", 1893 inp->in_seqid, inp->in_iid, atp->state); 1894 } else { 1895 isp_prt(isp, ISP_LOGWARN, 1896 "abort task RX_ID %x from iid %d, state unknown", 1897 inp->in_seqid, inp->in_iid); 1898 } 1899 if (inot) { 1900 inot->initiator_id = inp->in_iid; 1901 inot->sense_len = 0; 1902 inot->message_args[0] = MSG_ABORT_TAG; 1903 inot->message_args[1] = inp->in_seqid & 0xff; 1904 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1905 inot->ccb_h.status = CAM_MESSAGE_RECV; 1906 xpt_done((union ccb *)inot); 1907 } 1908 break; 1909 } 1910 default: 1911 break; 1912 } 1913 return (0); 1914 } 1915 #endif 1916 1917 static void 1918 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1919 { 1920 struct cam_sim *sim; 1921 ispsoftc_t *isp; 1922 1923 sim = (struct cam_sim *)cbarg; 1924 isp = (ispsoftc_t *) cam_sim_softc(sim); 1925 switch (code) { 1926 case AC_LOST_DEVICE: 1927 if (IS_SCSI(isp)) { 1928 uint16_t oflags, nflags; 1929 sdparam *sdp = isp->isp_param; 1930 int tgt; 1931 1932 tgt = xpt_path_target_id(path); 1933 if (tgt >= 0) { 1934 sdp += cam_sim_bus(sim); 1935 ISP_LOCK(isp); 1936 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1937 #ifndef ISP_TARGET_MODE 1938 nflags &= DPARM_SAFE_DFLT; 1939 if (isp->isp_loaded_fw) { 1940 nflags |= DPARM_NARROW | DPARM_ASYNC; 1941 } 1942 #else 1943 nflags = DPARM_DEFAULT; 1944 #endif 1945 oflags = sdp->isp_devparam[tgt].goal_flags; 1946 sdp->isp_devparam[tgt].goal_flags = nflags; 1947 sdp->isp_devparam[tgt].dev_update = 1; 1948 isp->isp_update |= (1 << cam_sim_bus(sim)); 1949 (void) isp_control(isp, 1950 ISPCTL_UPDATE_PARAMS, NULL); 1951 sdp->isp_devparam[tgt].goal_flags = oflags; 1952 ISP_UNLOCK(isp); 1953 } 1954 } 1955 break; 1956 default: 1957 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1958 break; 1959 } 1960 } 1961 1962 static void 1963 isp_poll(struct cam_sim *sim) 1964 { 1965 ispsoftc_t *isp = cam_sim_softc(sim); 1966 uint16_t isr, sema, mbox; 1967 1968 ISP_LOCK(isp); 1969 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1970 isp_intr(isp, isr, sema, mbox); 1971 } 1972 ISP_UNLOCK(isp); 1973 } 1974 1975 1976 static void 1977 isp_watchdog(void *arg) 1978 { 1979 XS_T *xs = arg; 1980 ispsoftc_t *isp = XS_ISP(xs); 1981 uint32_t handle; 1982 int iok; 1983 1984 /* 1985 * We've decided this command is dead. Make sure we're not trying 1986 * to kill a command that's already dead by getting it's handle and 1987 * and seeing whether it's still alive. 1988 */ 1989 ISP_LOCK(isp); 1990 iok = isp->isp_osinfo.intsok; 1991 isp->isp_osinfo.intsok = 0; 1992 handle = isp_find_handle(isp, xs); 1993 if (handle) { 1994 uint16_t isr, sema, mbox; 1995 1996 if (XS_CMD_DONE_P(xs)) { 1997 isp_prt(isp, ISP_LOGDEBUG1, 1998 "watchdog found done cmd (handle 0x%x)", handle); 1999 ISP_UNLOCK(isp); 2000 return; 2001 } 2002 2003 if (XS_CMD_WDOG_P(xs)) { 2004 isp_prt(isp, ISP_LOGDEBUG2, 2005 "recursive watchdog (handle 0x%x)", handle); 2006 ISP_UNLOCK(isp); 2007 return; 2008 } 2009 2010 XS_CMD_S_WDOG(xs); 2011 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2012 isp_intr(isp, isr, sema, mbox); 2013 } 2014 if (XS_CMD_DONE_P(xs)) { 2015 isp_prt(isp, ISP_LOGDEBUG2, 2016 "watchdog cleanup for handle 0x%x", handle); 2017 xpt_done((union ccb *) xs); 2018 } else if (XS_CMD_GRACE_P(xs)) { 2019 /* 2020 * Make sure the command is *really* dead before we 2021 * release the handle (and DMA resources) for reuse. 2022 */ 2023 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 2024 2025 /* 2026 * After this point, the comamnd is really dead. 2027 */ 2028 if (XS_XFRLEN(xs)) { 2029 ISP_DMAFREE(isp, xs, handle); 2030 } 2031 isp_destroy_handle(isp, handle); 2032 xpt_print_path(xs->ccb_h.path); 2033 isp_prt(isp, ISP_LOGWARN, 2034 "watchdog timeout for handle 0x%x", handle); 2035 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2036 XS_CMD_C_WDOG(xs); 2037 isp_done(xs); 2038 } else { 2039 uint16_t nxti, optr; 2040 ispreq_t local, *mp= &local, *qe; 2041 2042 XS_CMD_C_WDOG(xs); 2043 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2044 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 2045 ISP_UNLOCK(isp); 2046 return; 2047 } 2048 XS_CMD_S_GRACE(xs); 2049 MEMZERO((void *) mp, sizeof (*mp)); 2050 mp->req_header.rqs_entry_count = 1; 2051 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 2052 mp->req_modifier = SYNC_ALL; 2053 mp->req_target = XS_CHANNEL(xs) << 7; 2054 isp_put_request(isp, mp, qe); 2055 ISP_ADD_REQUEST(isp, nxti); 2056 } 2057 } else { 2058 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 2059 } 2060 isp->isp_osinfo.intsok = iok; 2061 ISP_UNLOCK(isp); 2062 } 2063 2064 static void 2065 isp_kthread(void *arg) 2066 { 2067 ispsoftc_t *isp = arg; 2068 2069 2070 #if __FreeBSD_version < 500000 2071 int s; 2072 2073 s = splcam(); 2074 isp->isp_osinfo.intsok = 1; 2075 #else 2076 #ifdef ISP_SMPLOCK 2077 mtx_lock(&isp->isp_lock); 2078 #else 2079 mtx_lock(&Giant); 2080 #endif 2081 #endif 2082 /* 2083 * The first loop is for our usage where we have yet to have 2084 * gotten good fibre channel state. 2085 */ 2086 for (;;) { 2087 int wasfrozen; 2088 2089 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 2090 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 2091 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 2092 if (FCPARAM(isp)->isp_fwstate != FW_READY || 2093 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 2094 if (FCPARAM(isp)->loop_seen_once == 0 || 2095 isp->isp_osinfo.ktmature == 0) { 2096 break; 2097 } 2098 } 2099 #ifdef ISP_SMPLOCK 2100 msleep(isp_kthread, &isp->isp_lock, 2101 PRIBIO, "isp_fcthrd", hz); 2102 #else 2103 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 2104 #endif 2105 } 2106 2107 /* 2108 * Even if we didn't get good loop state we may be 2109 * unfreezing the SIMQ so that we can kill off 2110 * commands (if we've never seen loop before, for example). 2111 */ 2112 isp->isp_osinfo.ktmature = 1; 2113 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2114 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2115 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2116 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 2117 ISPLOCK_2_CAMLOCK(isp); 2118 xpt_release_simq(isp->isp_sim, 1); 2119 CAMLOCK_2_ISPLOCK(isp); 2120 } 2121 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 2122 #if __FreeBSD_version < 500000 2123 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "isp_fc_worker", 0); 2124 #else 2125 #ifdef ISP_SMPLOCK 2126 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 2127 #else 2128 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0); 2129 #endif 2130 #endif 2131 } 2132 } 2133 2134 static void 2135 isp_action(struct cam_sim *sim, union ccb *ccb) 2136 { 2137 int bus, tgt, error; 2138 ispsoftc_t *isp; 2139 struct ccb_trans_settings *cts; 2140 2141 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2142 2143 isp = (ispsoftc_t *)cam_sim_softc(sim); 2144 ccb->ccb_h.sim_priv.entries[0].field = 0; 2145 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2146 if (isp->isp_state != ISP_RUNSTATE && 2147 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2148 CAMLOCK_2_ISPLOCK(isp); 2149 isp_init(isp); 2150 if (isp->isp_state != ISP_INITSTATE) { 2151 ISP_UNLOCK(isp); 2152 /* 2153 * Lie. Say it was a selection timeout. 2154 */ 2155 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2156 xpt_freeze_devq(ccb->ccb_h.path, 1); 2157 xpt_done(ccb); 2158 return; 2159 } 2160 isp->isp_state = ISP_RUNSTATE; 2161 ISPLOCK_2_CAMLOCK(isp); 2162 } 2163 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2164 2165 2166 switch (ccb->ccb_h.func_code) { 2167 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2168 /* 2169 * Do a couple of preliminary checks... 2170 */ 2171 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2172 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2173 ccb->ccb_h.status = CAM_REQ_INVALID; 2174 xpt_done(ccb); 2175 break; 2176 } 2177 } 2178 #ifdef DIAGNOSTIC 2179 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2180 ccb->ccb_h.status = CAM_PATH_INVALID; 2181 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2182 ccb->ccb_h.status = CAM_PATH_INVALID; 2183 } 2184 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2185 isp_prt(isp, ISP_LOGERR, 2186 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2187 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2188 xpt_done(ccb); 2189 break; 2190 } 2191 #endif 2192 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2193 CAMLOCK_2_ISPLOCK(isp); 2194 error = isp_start((XS_T *) ccb); 2195 switch (error) { 2196 case CMD_QUEUED: 2197 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2198 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2199 uint64_t ticks = (uint64_t) hz; 2200 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2201 ticks = 60 * 1000 * ticks; 2202 else 2203 ticks = ccb->ccb_h.timeout * hz; 2204 ticks = ((ticks + 999) / 1000) + hz + hz; 2205 if (ticks >= 0x80000000) { 2206 isp_prt(isp, ISP_LOGERR, 2207 "timeout overflow"); 2208 ticks = 0x7fffffff; 2209 } 2210 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2211 (caddr_t)ccb, (int)ticks); 2212 } else { 2213 callout_handle_init(&ccb->ccb_h.timeout_ch); 2214 } 2215 ISPLOCK_2_CAMLOCK(isp); 2216 break; 2217 case CMD_RQLATER: 2218 /* 2219 * This can only happen for Fibre Channel 2220 */ 2221 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2222 if (FCPARAM(isp)->loop_seen_once == 0 && 2223 isp->isp_osinfo.ktmature) { 2224 ISPLOCK_2_CAMLOCK(isp); 2225 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2226 xpt_done(ccb); 2227 break; 2228 } 2229 #if __FreeBSD_version < 500000 2230 wakeup(&isp->isp_osinfo.kproc); 2231 #else 2232 #ifdef ISP_SMPLOCK 2233 cv_signal(&isp->isp_osinfo.kthread_cv); 2234 #else 2235 wakeup(&isp->isp_osinfo.kthread_cv); 2236 #endif 2237 #endif 2238 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2239 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2240 ISPLOCK_2_CAMLOCK(isp); 2241 xpt_done(ccb); 2242 break; 2243 case CMD_EAGAIN: 2244 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2245 ISPLOCK_2_CAMLOCK(isp); 2246 xpt_done(ccb); 2247 break; 2248 case CMD_COMPLETE: 2249 isp_done((struct ccb_scsiio *) ccb); 2250 ISPLOCK_2_CAMLOCK(isp); 2251 break; 2252 default: 2253 isp_prt(isp, ISP_LOGERR, 2254 "What's this? 0x%x at %d in file %s", 2255 error, __LINE__, __FILE__); 2256 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2257 xpt_done(ccb); 2258 ISPLOCK_2_CAMLOCK(isp); 2259 } 2260 break; 2261 2262 #ifdef ISP_TARGET_MODE 2263 case XPT_EN_LUN: /* Enable LUN as a target */ 2264 { 2265 int seq, iok, i; 2266 CAMLOCK_2_ISPLOCK(isp); 2267 iok = isp->isp_osinfo.intsok; 2268 isp->isp_osinfo.intsok = 0; 2269 seq = isp_en_lun(isp, ccb); 2270 if (seq < 0) { 2271 isp->isp_osinfo.intsok = iok; 2272 ISPLOCK_2_CAMLOCK(isp); 2273 xpt_done(ccb); 2274 break; 2275 } 2276 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2277 uint16_t isr, sema, mbox; 2278 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2279 isp_intr(isp, isr, sema, mbox); 2280 } 2281 DELAY(1000); 2282 } 2283 isp->isp_osinfo.intsok = iok; 2284 ISPLOCK_2_CAMLOCK(isp); 2285 break; 2286 } 2287 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2288 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2289 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2290 { 2291 tstate_t *tptr = 2292 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2293 if (tptr == NULL) { 2294 ccb->ccb_h.status = CAM_LUN_INVALID; 2295 xpt_done(ccb); 2296 break; 2297 } 2298 ccb->ccb_h.sim_priv.entries[0].field = 0; 2299 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2300 ccb->ccb_h.flags = 0; 2301 2302 CAMLOCK_2_ISPLOCK(isp); 2303 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2304 /* 2305 * Note that the command itself may not be done- 2306 * it may not even have had the first CTIO sent. 2307 */ 2308 tptr->atio_count++; 2309 isp_prt(isp, ISP_LOGTDEBUG0, 2310 "Put FREE ATIO, lun %d, count now %d", 2311 ccb->ccb_h.target_lun, tptr->atio_count); 2312 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2313 sim_links.sle); 2314 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2315 tptr->inot_count++; 2316 isp_prt(isp, ISP_LOGTDEBUG0, 2317 "Put FREE INOT, lun %d, count now %d", 2318 ccb->ccb_h.target_lun, tptr->inot_count); 2319 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2320 sim_links.sle); 2321 } else { 2322 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2323 } 2324 rls_lun_statep(isp, tptr); 2325 ccb->ccb_h.status = CAM_REQ_INPROG; 2326 ISPLOCK_2_CAMLOCK(isp); 2327 break; 2328 } 2329 case XPT_CONT_TARGET_IO: 2330 { 2331 CAMLOCK_2_ISPLOCK(isp); 2332 isp_target_start_ctio(isp, ccb); 2333 ISPLOCK_2_CAMLOCK(isp); 2334 break; 2335 } 2336 #endif 2337 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2338 2339 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2340 tgt = ccb->ccb_h.target_id; 2341 tgt |= (bus << 16); 2342 2343 CAMLOCK_2_ISPLOCK(isp); 2344 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2345 ISPLOCK_2_CAMLOCK(isp); 2346 if (error) { 2347 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2348 } else { 2349 ccb->ccb_h.status = CAM_REQ_CMP; 2350 } 2351 xpt_done(ccb); 2352 break; 2353 case XPT_ABORT: /* Abort the specified CCB */ 2354 { 2355 union ccb *accb = ccb->cab.abort_ccb; 2356 CAMLOCK_2_ISPLOCK(isp); 2357 switch (accb->ccb_h.func_code) { 2358 #ifdef ISP_TARGET_MODE 2359 case XPT_ACCEPT_TARGET_IO: 2360 case XPT_IMMED_NOTIFY: 2361 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2362 break; 2363 case XPT_CONT_TARGET_IO: 2364 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2365 ccb->ccb_h.status = CAM_UA_ABORT; 2366 break; 2367 #endif 2368 case XPT_SCSI_IO: 2369 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2370 if (error) { 2371 ccb->ccb_h.status = CAM_UA_ABORT; 2372 } else { 2373 ccb->ccb_h.status = CAM_REQ_CMP; 2374 } 2375 break; 2376 default: 2377 ccb->ccb_h.status = CAM_REQ_INVALID; 2378 break; 2379 } 2380 ISPLOCK_2_CAMLOCK(isp); 2381 xpt_done(ccb); 2382 break; 2383 } 2384 #ifdef CAM_NEW_TRAN_CODE 2385 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2386 #else 2387 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2388 #endif 2389 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2390 cts = &ccb->cts; 2391 if (!IS_CURRENT_SETTINGS(cts)) { 2392 ccb->ccb_h.status = CAM_REQ_INVALID; 2393 xpt_done(ccb); 2394 break; 2395 } 2396 tgt = cts->ccb_h.target_id; 2397 CAMLOCK_2_ISPLOCK(isp); 2398 if (IS_SCSI(isp)) { 2399 #ifndef CAM_NEW_TRAN_CODE 2400 sdparam *sdp = isp->isp_param; 2401 uint16_t *dptr; 2402 2403 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2404 2405 sdp += bus; 2406 /* 2407 * We always update (internally) from goal_flags 2408 * so any request to change settings just gets 2409 * vectored to that location. 2410 */ 2411 dptr = &sdp->isp_devparam[tgt].goal_flags; 2412 2413 /* 2414 * Note that these operations affect the 2415 * the goal flags (goal_flags)- not 2416 * the current state flags. Then we mark 2417 * things so that the next operation to 2418 * this HBA will cause the update to occur. 2419 */ 2420 if (cts->valid & CCB_TRANS_DISC_VALID) { 2421 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2422 *dptr |= DPARM_DISC; 2423 } else { 2424 *dptr &= ~DPARM_DISC; 2425 } 2426 } 2427 if (cts->valid & CCB_TRANS_TQ_VALID) { 2428 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2429 *dptr |= DPARM_TQING; 2430 } else { 2431 *dptr &= ~DPARM_TQING; 2432 } 2433 } 2434 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2435 switch (cts->bus_width) { 2436 case MSG_EXT_WDTR_BUS_16_BIT: 2437 *dptr |= DPARM_WIDE; 2438 break; 2439 default: 2440 *dptr &= ~DPARM_WIDE; 2441 } 2442 } 2443 /* 2444 * Any SYNC RATE of nonzero and SYNC_OFFSET 2445 * of nonzero will cause us to go to the 2446 * selected (from NVRAM) maximum value for 2447 * this device. At a later point, we'll 2448 * allow finer control. 2449 */ 2450 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2451 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2452 (cts->sync_offset > 0)) { 2453 *dptr |= DPARM_SYNC; 2454 } else { 2455 *dptr &= ~DPARM_SYNC; 2456 } 2457 *dptr |= DPARM_SAFE_DFLT; 2458 #else 2459 struct ccb_trans_settings_scsi *scsi = 2460 &cts->proto_specific.scsi; 2461 struct ccb_trans_settings_spi *spi = 2462 &cts->xport_specific.spi; 2463 sdparam *sdp = isp->isp_param; 2464 uint16_t *dptr; 2465 2466 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2467 sdp += bus; 2468 /* 2469 * We always update (internally) from goal_flags 2470 * so any request to change settings just gets 2471 * vectored to that location. 2472 */ 2473 dptr = &sdp->isp_devparam[tgt].goal_flags; 2474 2475 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2476 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2477 *dptr |= DPARM_DISC; 2478 else 2479 *dptr &= ~DPARM_DISC; 2480 } 2481 2482 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2483 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2484 *dptr |= DPARM_TQING; 2485 else 2486 *dptr &= ~DPARM_TQING; 2487 } 2488 2489 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2490 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2491 *dptr |= DPARM_WIDE; 2492 else 2493 *dptr &= ~DPARM_WIDE; 2494 } 2495 2496 /* 2497 * XXX: FIX ME 2498 */ 2499 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2500 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2501 (spi->sync_period && spi->sync_offset)) { 2502 *dptr |= DPARM_SYNC; 2503 /* 2504 * XXX: CHECK FOR LEGALITY 2505 */ 2506 sdp->isp_devparam[tgt].goal_period = 2507 spi->sync_period; 2508 sdp->isp_devparam[tgt].goal_offset = 2509 spi->sync_offset; 2510 } else { 2511 *dptr &= ~DPARM_SYNC; 2512 } 2513 #endif 2514 isp_prt(isp, ISP_LOGDEBUG0, 2515 "SET bus %d targ %d to flags %x off %x per %x", 2516 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2517 sdp->isp_devparam[tgt].goal_offset, 2518 sdp->isp_devparam[tgt].goal_period); 2519 sdp->isp_devparam[tgt].dev_update = 1; 2520 isp->isp_update |= (1 << bus); 2521 } 2522 ISPLOCK_2_CAMLOCK(isp); 2523 ccb->ccb_h.status = CAM_REQ_CMP; 2524 xpt_done(ccb); 2525 break; 2526 case XPT_GET_TRAN_SETTINGS: 2527 cts = &ccb->cts; 2528 tgt = cts->ccb_h.target_id; 2529 CAMLOCK_2_ISPLOCK(isp); 2530 if (IS_FC(isp)) { 2531 #ifndef CAM_NEW_TRAN_CODE 2532 /* 2533 * a lot of normal SCSI things don't make sense. 2534 */ 2535 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2536 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2537 /* 2538 * How do you measure the width of a high 2539 * speed serial bus? Well, in bytes. 2540 * 2541 * Offset and period make no sense, though, so we set 2542 * (above) a 'base' transfer speed to be gigabit. 2543 */ 2544 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2545 #else 2546 fcparam *fcp = isp->isp_param; 2547 struct ccb_trans_settings_fc *fc = 2548 &cts->xport_specific.fc; 2549 2550 cts->protocol = PROTO_SCSI; 2551 cts->protocol_version = SCSI_REV_2; 2552 cts->transport = XPORT_FC; 2553 cts->transport_version = 0; 2554 2555 fc->valid = CTS_FC_VALID_SPEED; 2556 if (fcp->isp_gbspeed == 2) 2557 fc->bitrate = 200000; 2558 else 2559 fc->bitrate = 100000; 2560 if (tgt > 0 && tgt < MAX_FC_TARG) { 2561 struct lportdb *lp = &fcp->portdb[tgt]; 2562 fc->wwnn = lp->node_wwn; 2563 fc->wwpn = lp->port_wwn; 2564 fc->port = lp->portid; 2565 fc->valid |= CTS_FC_VALID_WWNN | 2566 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2567 } 2568 #endif 2569 } else { 2570 #ifdef CAM_NEW_TRAN_CODE 2571 struct ccb_trans_settings_scsi *scsi = 2572 &cts->proto_specific.scsi; 2573 struct ccb_trans_settings_spi *spi = 2574 &cts->xport_specific.spi; 2575 #endif 2576 sdparam *sdp = isp->isp_param; 2577 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2578 uint16_t dval, pval, oval; 2579 2580 sdp += bus; 2581 2582 if (IS_CURRENT_SETTINGS(cts)) { 2583 sdp->isp_devparam[tgt].dev_refresh = 1; 2584 isp->isp_update |= (1 << bus); 2585 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2586 NULL); 2587 dval = sdp->isp_devparam[tgt].actv_flags; 2588 oval = sdp->isp_devparam[tgt].actv_offset; 2589 pval = sdp->isp_devparam[tgt].actv_period; 2590 } else { 2591 dval = sdp->isp_devparam[tgt].nvrm_flags; 2592 oval = sdp->isp_devparam[tgt].nvrm_offset; 2593 pval = sdp->isp_devparam[tgt].nvrm_period; 2594 } 2595 2596 #ifndef CAM_NEW_TRAN_CODE 2597 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2598 2599 if (dval & DPARM_DISC) { 2600 cts->flags |= CCB_TRANS_DISC_ENB; 2601 } 2602 if (dval & DPARM_TQING) { 2603 cts->flags |= CCB_TRANS_TAG_ENB; 2604 } 2605 if (dval & DPARM_WIDE) { 2606 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2607 } else { 2608 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2609 } 2610 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2611 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2612 2613 if ((dval & DPARM_SYNC) && oval != 0) { 2614 cts->sync_period = pval; 2615 cts->sync_offset = oval; 2616 cts->valid |= 2617 CCB_TRANS_SYNC_RATE_VALID | 2618 CCB_TRANS_SYNC_OFFSET_VALID; 2619 } 2620 #else 2621 cts->protocol = PROTO_SCSI; 2622 cts->protocol_version = SCSI_REV_2; 2623 cts->transport = XPORT_SPI; 2624 cts->transport_version = 2; 2625 2626 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2627 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2628 if (dval & DPARM_DISC) { 2629 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2630 } 2631 if (dval & DPARM_TQING) { 2632 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2633 } 2634 if ((dval & DPARM_SYNC) && oval && pval) { 2635 spi->sync_offset = oval; 2636 spi->sync_period = pval; 2637 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2638 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2639 } 2640 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2641 if (dval & DPARM_WIDE) { 2642 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2643 } else { 2644 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2645 } 2646 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2647 scsi->valid = CTS_SCSI_VALID_TQ; 2648 spi->valid |= CTS_SPI_VALID_DISC; 2649 } else { 2650 scsi->valid = 0; 2651 } 2652 #endif 2653 isp_prt(isp, ISP_LOGDEBUG0, 2654 "GET %s bus %d targ %d to flags %x off %x per %x", 2655 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2656 bus, tgt, dval, oval, pval); 2657 } 2658 ISPLOCK_2_CAMLOCK(isp); 2659 ccb->ccb_h.status = CAM_REQ_CMP; 2660 xpt_done(ccb); 2661 break; 2662 2663 case XPT_CALC_GEOMETRY: 2664 #if __FreeBSD_version < 500000 2665 { 2666 struct ccb_calc_geometry *ccg; 2667 u_int32_t secs_per_cylinder; 2668 u_int32_t size_mb; 2669 2670 ccg = &ccb->ccg; 2671 if (ccg->block_size == 0) { 2672 ccb->ccb_h.status = CAM_REQ_INVALID; 2673 xpt_done(ccb); 2674 break; 2675 } 2676 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2677 if (size_mb > 1024) { 2678 ccg->heads = 255; 2679 ccg->secs_per_track = 63; 2680 } else { 2681 ccg->heads = 64; 2682 ccg->secs_per_track = 32; 2683 } 2684 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2685 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2686 ccb->ccb_h.status = CAM_REQ_CMP; 2687 xpt_done(ccb); 2688 break; 2689 } 2690 #else 2691 { 2692 cam_calc_geometry(&ccb->ccg, /*extended*/1); 2693 xpt_done(ccb); 2694 break; 2695 } 2696 #endif 2697 case XPT_RESET_BUS: /* Reset the specified bus */ 2698 bus = cam_sim_bus(sim); 2699 CAMLOCK_2_ISPLOCK(isp); 2700 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2701 ISPLOCK_2_CAMLOCK(isp); 2702 if (error) 2703 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2704 else { 2705 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2706 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2707 else if (isp->isp_path != NULL) 2708 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2709 ccb->ccb_h.status = CAM_REQ_CMP; 2710 } 2711 xpt_done(ccb); 2712 break; 2713 2714 case XPT_TERM_IO: /* Terminate the I/O process */ 2715 ccb->ccb_h.status = CAM_REQ_INVALID; 2716 xpt_done(ccb); 2717 break; 2718 2719 case XPT_PATH_INQ: /* Path routing inquiry */ 2720 { 2721 struct ccb_pathinq *cpi = &ccb->cpi; 2722 2723 cpi->version_num = 1; 2724 #ifdef ISP_TARGET_MODE 2725 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2726 #else 2727 cpi->target_sprt = 0; 2728 #endif 2729 cpi->hba_eng_cnt = 0; 2730 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2731 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2732 cpi->bus_id = cam_sim_bus(sim); 2733 if (IS_FC(isp)) { 2734 cpi->hba_misc = PIM_NOBUSRESET; 2735 /* 2736 * Because our loop ID can shift from time to time, 2737 * make our initiator ID out of range of our bus. 2738 */ 2739 cpi->initiator_id = cpi->max_target + 1; 2740 2741 /* 2742 * Set base transfer capabilities for Fibre Channel. 2743 * Technically not correct because we don't know 2744 * what media we're running on top of- but we'll 2745 * look good if we always say 100MB/s. 2746 */ 2747 if (FCPARAM(isp)->isp_gbspeed == 2) 2748 cpi->base_transfer_speed = 200000; 2749 else 2750 cpi->base_transfer_speed = 100000; 2751 cpi->hba_inquiry = PI_TAG_ABLE; 2752 #ifdef CAM_NEW_TRAN_CODE 2753 cpi->transport = XPORT_FC; 2754 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2755 #endif 2756 } else { 2757 sdparam *sdp = isp->isp_param; 2758 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2759 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2760 cpi->hba_misc = 0; 2761 cpi->initiator_id = sdp->isp_initiator_id; 2762 cpi->base_transfer_speed = 3300; 2763 #ifdef CAM_NEW_TRAN_CODE 2764 cpi->transport = XPORT_SPI; 2765 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2766 #endif 2767 } 2768 #ifdef CAM_NEW_TRAN_CODE 2769 cpi->protocol = PROTO_SCSI; 2770 cpi->protocol_version = SCSI_REV_2; 2771 #endif 2772 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2773 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2774 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2775 cpi->unit_number = cam_sim_unit(sim); 2776 cpi->ccb_h.status = CAM_REQ_CMP; 2777 xpt_done(ccb); 2778 break; 2779 } 2780 default: 2781 ccb->ccb_h.status = CAM_REQ_INVALID; 2782 xpt_done(ccb); 2783 break; 2784 } 2785 } 2786 2787 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2788 void 2789 isp_done(struct ccb_scsiio *sccb) 2790 { 2791 ispsoftc_t *isp = XS_ISP(sccb); 2792 2793 if (XS_NOERR(sccb)) 2794 XS_SETERR(sccb, CAM_REQ_CMP); 2795 2796 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2797 (sccb->scsi_status != SCSI_STATUS_OK)) { 2798 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2799 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2800 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2801 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2802 } else { 2803 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2804 } 2805 } 2806 2807 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2808 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2809 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2810 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2811 xpt_freeze_devq(sccb->ccb_h.path, 1); 2812 isp_prt(isp, ISP_LOGDEBUG0, 2813 "freeze devq %d.%d cam sts %x scsi sts %x", 2814 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2815 sccb->ccb_h.status, sccb->scsi_status); 2816 } 2817 } 2818 2819 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2820 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2821 xpt_print_path(sccb->ccb_h.path); 2822 isp_prt(isp, ISP_LOGINFO, 2823 "cam completion status 0x%x", sccb->ccb_h.status); 2824 } 2825 2826 XS_CMD_S_DONE(sccb); 2827 if (XS_CMD_WDOG_P(sccb) == 0) { 2828 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2829 if (XS_CMD_GRACE_P(sccb)) { 2830 isp_prt(isp, ISP_LOGDEBUG2, 2831 "finished command on borrowed time"); 2832 } 2833 XS_CMD_S_CLEAR(sccb); 2834 ISPLOCK_2_CAMLOCK(isp); 2835 xpt_done((union ccb *) sccb); 2836 CAMLOCK_2_ISPLOCK(isp); 2837 } 2838 } 2839 2840 int 2841 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 2842 { 2843 int bus, rv = 0; 2844 switch (cmd) { 2845 case ISPASYNC_NEW_TGT_PARAMS: 2846 { 2847 #ifdef CAM_NEW_TRAN_CODE 2848 struct ccb_trans_settings_scsi *scsi; 2849 struct ccb_trans_settings_spi *spi; 2850 #endif 2851 int flags, tgt; 2852 sdparam *sdp = isp->isp_param; 2853 struct ccb_trans_settings cts; 2854 struct cam_path *tmppath; 2855 2856 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 2857 2858 tgt = *((int *)arg); 2859 bus = (tgt >> 16) & 0xffff; 2860 tgt &= 0xffff; 2861 sdp += bus; 2862 ISPLOCK_2_CAMLOCK(isp); 2863 if (xpt_create_path(&tmppath, NULL, 2864 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2865 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2866 CAMLOCK_2_ISPLOCK(isp); 2867 isp_prt(isp, ISP_LOGWARN, 2868 "isp_async cannot make temp path for %d.%d", 2869 tgt, bus); 2870 rv = -1; 2871 break; 2872 } 2873 CAMLOCK_2_ISPLOCK(isp); 2874 flags = sdp->isp_devparam[tgt].actv_flags; 2875 #ifdef CAM_NEW_TRAN_CODE 2876 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2877 cts.protocol = PROTO_SCSI; 2878 cts.transport = XPORT_SPI; 2879 2880 scsi = &cts.proto_specific.scsi; 2881 spi = &cts.xport_specific.spi; 2882 2883 if (flags & DPARM_TQING) { 2884 scsi->valid |= CTS_SCSI_VALID_TQ; 2885 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2886 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2887 } 2888 2889 if (flags & DPARM_DISC) { 2890 spi->valid |= CTS_SPI_VALID_DISC; 2891 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2892 } 2893 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2894 if (flags & DPARM_WIDE) { 2895 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2896 } else { 2897 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2898 } 2899 if (flags & DPARM_SYNC) { 2900 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2901 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2902 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2903 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2904 } 2905 #else 2906 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2907 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2908 if (flags & DPARM_DISC) { 2909 cts.flags |= CCB_TRANS_DISC_ENB; 2910 } 2911 if (flags & DPARM_TQING) { 2912 cts.flags |= CCB_TRANS_TAG_ENB; 2913 } 2914 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2915 cts.bus_width = (flags & DPARM_WIDE)? 2916 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2917 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2918 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2919 if (flags & DPARM_SYNC) { 2920 cts.valid |= 2921 CCB_TRANS_SYNC_RATE_VALID | 2922 CCB_TRANS_SYNC_OFFSET_VALID; 2923 } 2924 #endif 2925 isp_prt(isp, ISP_LOGDEBUG2, 2926 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2927 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2928 sdp->isp_devparam[tgt].actv_offset, flags); 2929 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2930 ISPLOCK_2_CAMLOCK(isp); 2931 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2932 xpt_free_path(tmppath); 2933 CAMLOCK_2_ISPLOCK(isp); 2934 break; 2935 } 2936 case ISPASYNC_BUS_RESET: 2937 bus = *((int *)arg); 2938 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2939 bus); 2940 if (bus > 0 && isp->isp_path2) { 2941 ISPLOCK_2_CAMLOCK(isp); 2942 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2943 CAMLOCK_2_ISPLOCK(isp); 2944 } else if (isp->isp_path) { 2945 ISPLOCK_2_CAMLOCK(isp); 2946 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2947 CAMLOCK_2_ISPLOCK(isp); 2948 } 2949 break; 2950 case ISPASYNC_LIP: 2951 if (isp->isp_path) { 2952 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2953 } 2954 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2955 break; 2956 case ISPASYNC_LOOP_RESET: 2957 if (isp->isp_path) { 2958 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2959 } 2960 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2961 break; 2962 case ISPASYNC_LOOP_DOWN: 2963 if (isp->isp_path) { 2964 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2965 } 2966 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2967 break; 2968 case ISPASYNC_LOOP_UP: 2969 /* 2970 * Now we just note that Loop has come up. We don't 2971 * actually do anything because we're waiting for a 2972 * Change Notify before activating the FC cleanup 2973 * thread to look at the state of the loop again. 2974 */ 2975 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2976 break; 2977 case ISPASYNC_PROMENADE: 2978 { 2979 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2980 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2981 static const char *roles[4] = { 2982 "(none)", "Target", "Initiator", "Target/Initiator" 2983 }; 2984 fcparam *fcp = isp->isp_param; 2985 int tgt = *((int *) arg); 2986 #if __FreeBSD_version >= 500000 2987 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2988 struct cam_path *tmppath; 2989 #endif 2990 struct lportdb *lp = &fcp->portdb[tgt]; 2991 2992 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2993 roles[lp->roles & 0x3], 2994 (lp->valid)? "Arrived" : "Departed", 2995 (uint32_t) (lp->port_wwn >> 32), 2996 (uint32_t) (lp->port_wwn & 0xffffffffLL), 2997 (uint32_t) (lp->node_wwn >> 32), 2998 (uint32_t) (lp->node_wwn & 0xffffffffLL)); 2999 3000 ISPLOCK_2_CAMLOCK(isp); 3001 #if __FreeBSD_version >= 500000 3002 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 3003 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3004 CAMLOCK_2_ISPLOCK(isp); 3005 break; 3006 } 3007 /* 3008 * Policy: only announce targets. 3009 */ 3010 if (lp->roles & is_tgt_mask) { 3011 if (lp->valid) { 3012 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 3013 } else { 3014 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 3015 } 3016 } 3017 xpt_free_path(tmppath); 3018 #endif 3019 CAMLOCK_2_ISPLOCK(isp); 3020 break; 3021 } 3022 case ISPASYNC_CHANGE_NOTIFY: 3023 if (arg == ISPASYNC_CHANGE_PDB) { 3024 isp_prt(isp, ISP_LOGINFO, 3025 "Port Database Changed"); 3026 } else if (arg == ISPASYNC_CHANGE_SNS) { 3027 isp_prt(isp, ISP_LOGINFO, 3028 "Name Server Database Changed"); 3029 } 3030 #if __FreeBSD_version < 500000 3031 wakeup(&isp->isp_osinfo.kproc); 3032 #else 3033 #ifdef ISP_SMPLOCK 3034 cv_signal(&isp->isp_osinfo.kthread_cv); 3035 #else 3036 wakeup(&isp->isp_osinfo.kthread_cv); 3037 #endif 3038 #endif 3039 break; 3040 case ISPASYNC_FABRIC_DEV: 3041 { 3042 int target, base, lim; 3043 fcparam *fcp = isp->isp_param; 3044 struct lportdb *lp = NULL; 3045 struct lportdb *clp = (struct lportdb *) arg; 3046 char *pt; 3047 3048 switch (clp->port_type) { 3049 case 1: 3050 pt = " N_Port"; 3051 break; 3052 case 2: 3053 pt = " NL_Port"; 3054 break; 3055 case 3: 3056 pt = "F/NL_Port"; 3057 break; 3058 case 0x7f: 3059 pt = " Nx_Port"; 3060 break; 3061 case 0x81: 3062 pt = " F_port"; 3063 break; 3064 case 0x82: 3065 pt = " FL_Port"; 3066 break; 3067 case 0x84: 3068 pt = " E_port"; 3069 break; 3070 default: 3071 pt = " "; 3072 break; 3073 } 3074 3075 isp_prt(isp, ISP_LOGINFO, 3076 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 3077 3078 /* 3079 * If we don't have an initiator role we bail. 3080 * 3081 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 3082 */ 3083 3084 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 3085 break; 3086 } 3087 3088 /* 3089 * Is this entry for us? If so, we bail. 3090 */ 3091 3092 if (fcp->isp_portid == clp->portid) { 3093 break; 3094 } 3095 3096 /* 3097 * Else, the default policy is to find room for it in 3098 * our local port database. Later, when we execute 3099 * the call to isp_pdb_sync either this newly arrived 3100 * or already logged in device will be (re)announced. 3101 */ 3102 3103 if (fcp->isp_topo == TOPO_FL_PORT) 3104 base = FC_SNS_ID+1; 3105 else 3106 base = 0; 3107 3108 if (fcp->isp_topo == TOPO_N_PORT) 3109 lim = 1; 3110 else 3111 lim = MAX_FC_TARG; 3112 3113 /* 3114 * Is it already in our list? 3115 */ 3116 for (target = base; target < lim; target++) { 3117 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3118 continue; 3119 } 3120 lp = &fcp->portdb[target]; 3121 if (lp->port_wwn == clp->port_wwn && 3122 lp->node_wwn == clp->node_wwn) { 3123 lp->fabric_dev = 1; 3124 break; 3125 } 3126 } 3127 if (target < lim) { 3128 break; 3129 } 3130 for (target = base; target < lim; target++) { 3131 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3132 continue; 3133 } 3134 lp = &fcp->portdb[target]; 3135 if (lp->port_wwn == 0) { 3136 break; 3137 } 3138 } 3139 if (target == lim) { 3140 isp_prt(isp, ISP_LOGWARN, 3141 "out of space for fabric devices"); 3142 break; 3143 } 3144 lp->port_type = clp->port_type; 3145 lp->fc4_type = clp->fc4_type; 3146 lp->node_wwn = clp->node_wwn; 3147 lp->port_wwn = clp->port_wwn; 3148 lp->portid = clp->portid; 3149 lp->fabric_dev = 1; 3150 break; 3151 } 3152 #ifdef ISP_TARGET_MODE 3153 case ISPASYNC_TARGET_NOTIFY: 3154 { 3155 tmd_notify_t *nt = arg; 3156 isp_prt(isp, ISP_LOGALL, 3157 "target notify code 0x%x", nt->nt_ncode); 3158 break; 3159 } 3160 case ISPASYNC_TARGET_ACTION: 3161 switch (((isphdr_t *)arg)->rqs_entry_type) { 3162 default: 3163 isp_prt(isp, ISP_LOGWARN, 3164 "event 0x%x for unhandled target action", 3165 ((isphdr_t *)arg)->rqs_entry_type); 3166 break; 3167 case RQSTYPE_NOTIFY: 3168 if (IS_SCSI(isp)) { 3169 rv = isp_handle_platform_notify_scsi(isp, 3170 (in_entry_t *) arg); 3171 } else { 3172 rv = isp_handle_platform_notify_fc(isp, 3173 (in_fcentry_t *) arg); 3174 } 3175 break; 3176 case RQSTYPE_ATIO: 3177 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3178 break; 3179 case RQSTYPE_ATIO2: 3180 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3181 break; 3182 case RQSTYPE_CTIO3: 3183 case RQSTYPE_CTIO2: 3184 case RQSTYPE_CTIO: 3185 rv = isp_handle_platform_ctio(isp, arg); 3186 break; 3187 case RQSTYPE_ENABLE_LUN: 3188 case RQSTYPE_MODIFY_LUN: 3189 isp_ledone(isp, (lun_entry_t *) arg); 3190 break; 3191 } 3192 break; 3193 #endif 3194 case ISPASYNC_FW_CRASH: 3195 { 3196 uint16_t mbox1, mbox6; 3197 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3198 if (IS_DUALBUS(isp)) { 3199 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3200 } else { 3201 mbox6 = 0; 3202 } 3203 isp_prt(isp, ISP_LOGERR, 3204 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3205 mbox6, mbox1); 3206 #ifdef ISP_FW_CRASH_DUMP 3207 /* 3208 * XXX: really need a thread to do this right. 3209 */ 3210 if (IS_FC(isp)) { 3211 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3212 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3213 isp_freeze_loopdown(isp, "f/w crash"); 3214 isp_fw_dump(isp); 3215 } 3216 isp_reinit(isp); 3217 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3218 #endif 3219 break; 3220 } 3221 case ISPASYNC_UNHANDLED_RESPONSE: 3222 break; 3223 default: 3224 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3225 break; 3226 } 3227 return (rv); 3228 } 3229 3230 3231 /* 3232 * Locks are held before coming here. 3233 */ 3234 void 3235 isp_uninit(ispsoftc_t *isp) 3236 { 3237 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3238 DISABLE_INTS(isp); 3239 } 3240 3241 void 3242 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3243 { 3244 va_list ap; 3245 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3246 return; 3247 } 3248 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3249 va_start(ap, fmt); 3250 vprintf(fmt, ap); 3251 va_end(ap); 3252 printf("\n"); 3253 } 3254