1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002-2010 Adaptec, Inc. 5 * Copyright (c) 2010-2012 PMC-Sierra, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * CAM front-end for communicating with non-DASD devices 35 */ 36 37 #include "opt_aacraid.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/module.h> 46 #include <sys/mutex.h> 47 48 #include <cam/cam.h> 49 #include <cam/cam_ccb.h> 50 #include <cam/cam_debug.h> 51 #include <cam/cam_periph.h> 52 #include <cam/cam_sim.h> 53 #include <cam/cam_xpt_sim.h> 54 #include <cam/scsi/scsi_all.h> 55 #include <cam/scsi/scsi_message.h> 56 57 #include <sys/bus.h> 58 #include <sys/conf.h> 59 #include <sys/disk.h> 60 61 #include <machine/md_var.h> 62 #include <machine/bus.h> 63 #include <sys/rman.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 68 #include <dev/aacraid/aacraid_reg.h> 69 #include <sys/aac_ioctl.h> 70 #include <dev/aacraid/aacraid_debug.h> 71 #include <dev/aacraid/aacraid_var.h> 72 #include <dev/aacraid/aacraid_endian.h> 73 74 #ifndef CAM_NEW_TRAN_CODE 75 #define CAM_NEW_TRAN_CODE 1 76 #endif 77 78 #ifndef SVPD_SUPPORTED_PAGE_LIST 79 struct scsi_vpd_supported_page_list 80 { 81 u_int8_t device; 82 u_int8_t page_code; 83 #define SVPD_SUPPORTED_PAGE_LIST 0x00 84 u_int8_t reserved; 85 u_int8_t length; /* number of VPD entries */ 86 #define SVPD_SUPPORTED_PAGES_SIZE 251 87 u_int8_t list[SVPD_SUPPORTED_PAGES_SIZE]; 88 }; 89 #endif 90 91 /************************** Version Compatibility *************************/ 92 #define aac_sim_alloc cam_sim_alloc 93 94 struct aac_cam { 95 device_t dev; 96 struct aac_sim *inf; 97 struct cam_sim *sim; 98 struct cam_path *path; 99 }; 100 101 static int aac_cam_probe(device_t dev); 102 static int aac_cam_attach(device_t dev); 103 static int aac_cam_detach(device_t dev); 104 static void aac_cam_action(struct cam_sim *, union ccb *); 105 static void aac_cam_poll(struct cam_sim *); 106 static void aac_cam_complete(struct aac_command *); 107 static void aac_container_complete(struct aac_command *); 108 static void aac_cam_rescan(struct aac_softc *sc, uint32_t channel, 109 uint32_t target_id); 110 static void aac_set_scsi_error(struct aac_softc *sc, union ccb *ccb, 111 u_int8_t status, u_int8_t key, u_int8_t asc, u_int8_t ascq); 112 static int aac_load_map_command_sg(struct aac_softc *, struct aac_command *); 113 static u_int64_t aac_eval_blockno(u_int8_t *); 114 static void aac_container_rw_command(struct cam_sim *, union ccb *, u_int8_t *); 115 static void aac_container_special_command(struct cam_sim *, union ccb *, 116 u_int8_t *); 117 static void aac_passthrough_command(struct cam_sim *, union ccb *); 118 119 static u_int32_t aac_cam_reset_bus(struct cam_sim *, union ccb *); 120 static u_int32_t aac_cam_abort_ccb(struct cam_sim *, union ccb *); 121 static u_int32_t aac_cam_term_io(struct cam_sim *, union ccb *); 122 123 static device_method_t aacraid_pass_methods[] = { 124 DEVMETHOD(device_probe, aac_cam_probe), 125 DEVMETHOD(device_attach, aac_cam_attach), 126 DEVMETHOD(device_detach, aac_cam_detach), 127 { 0, 0 } 128 }; 129 130 static driver_t aacraid_pass_driver = { 131 "aacraidp", 132 aacraid_pass_methods, 133 sizeof(struct aac_cam) 134 }; 135 136 DRIVER_MODULE(aacraidp, aacraid, aacraid_pass_driver, 0, 0); 137 MODULE_DEPEND(aacraidp, cam, 1, 1, 1); 138 139 MALLOC_DEFINE(M_AACRAIDCAM, "aacraidcam", "AACRAID CAM info"); 140 141 static void 142 aac_set_scsi_error(struct aac_softc *sc, union ccb *ccb, u_int8_t status, 143 u_int8_t key, u_int8_t asc, u_int8_t ascq) 144 { 145 struct scsi_sense_data_fixed *sense = 146 (struct scsi_sense_data_fixed *)&ccb->csio.sense_data; 147 148 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "Error %d!", status); 149 150 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 151 ccb->csio.scsi_status = status; 152 if (status == SCSI_STATUS_CHECK_COND) { 153 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 154 bzero(&ccb->csio.sense_data, ccb->csio.sense_len); 155 ccb->csio.sense_data.error_code = 156 SSD_CURRENT_ERROR | SSD_ERRCODE_VALID; 157 sense->flags = key; 158 if (ccb->csio.sense_len >= 14) { 159 sense->extra_len = 6; 160 sense->add_sense_code = asc; 161 sense->add_sense_code_qual = ascq; 162 } 163 } 164 } 165 166 static void 167 aac_cam_rescan(struct aac_softc *sc, uint32_t channel, uint32_t target_id) 168 { 169 union ccb *ccb; 170 struct aac_sim *sim; 171 struct aac_cam *camsc; 172 173 if (target_id == AAC_CAM_TARGET_WILDCARD) 174 target_id = CAM_TARGET_WILDCARD; 175 176 TAILQ_FOREACH(sim, &sc->aac_sim_tqh, sim_link) { 177 camsc = sim->aac_cam; 178 if (camsc == NULL || camsc->inf == NULL || 179 camsc->inf->BusNumber != channel) 180 continue; 181 182 ccb = xpt_alloc_ccb_nowait(); 183 if (ccb == NULL) { 184 device_printf(sc->aac_dev, 185 "Cannot allocate ccb for bus rescan.\n"); 186 return; 187 } 188 189 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 190 cam_sim_path(camsc->sim), 191 target_id, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 192 xpt_free_ccb(ccb); 193 device_printf(sc->aac_dev, 194 "Cannot create path for bus rescan.\n"); 195 return; 196 } 197 xpt_rescan(ccb); 198 break; 199 } 200 } 201 202 static void 203 aac_cam_event(struct aac_softc *sc, struct aac_event *event, void *arg) 204 { 205 union ccb *ccb; 206 struct aac_cam *camsc; 207 208 switch (event->ev_type) { 209 case AAC_EVENT_CMFREE: 210 ccb = arg; 211 camsc = ccb->ccb_h.sim_priv.entries[0].ptr; 212 free(event, M_AACRAIDCAM); 213 xpt_release_simq(camsc->sim, 1); 214 ccb->ccb_h.status = CAM_REQUEUE_REQ; 215 xpt_done(ccb); 216 break; 217 default: 218 device_printf(sc->aac_dev, "unknown event %d in aac_cam\n", 219 event->ev_type); 220 break; 221 } 222 223 return; 224 } 225 226 static int 227 aac_cam_probe(device_t dev) 228 { 229 struct aac_cam *camsc; 230 231 camsc = (struct aac_cam *)device_get_softc(dev); 232 if (!camsc->inf) 233 return (0); 234 fwprintf(camsc->inf->aac_sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 235 return (0); 236 } 237 238 static int 239 aac_cam_detach(device_t dev) 240 { 241 struct aac_softc *sc; 242 struct aac_cam *camsc; 243 244 camsc = (struct aac_cam *)device_get_softc(dev); 245 if (!camsc->inf) 246 return (0); 247 sc = camsc->inf->aac_sc; 248 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 249 camsc->inf->aac_cam = NULL; 250 251 mtx_lock(&sc->aac_io_lock); 252 253 xpt_async(AC_LOST_DEVICE, camsc->path, NULL); 254 xpt_free_path(camsc->path); 255 xpt_bus_deregister(cam_sim_path(camsc->sim)); 256 cam_sim_free(camsc->sim, /*free_devq*/TRUE); 257 258 sc->cam_rescan_cb = NULL; 259 260 mtx_unlock(&sc->aac_io_lock); 261 262 return (0); 263 } 264 265 /* 266 * Register the driver as a CAM SIM 267 */ 268 static int 269 aac_cam_attach(device_t dev) 270 { 271 struct cam_devq *devq; 272 struct cam_sim *sim; 273 struct cam_path *path; 274 struct aac_cam *camsc; 275 struct aac_sim *inf; 276 277 camsc = (struct aac_cam *)device_get_softc(dev); 278 inf = (struct aac_sim *)device_get_ivars(dev); 279 if (!inf) 280 return (EIO); 281 fwprintf(inf->aac_sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 282 camsc->inf = inf; 283 camsc->inf->aac_cam = camsc; 284 285 devq = cam_simq_alloc(inf->TargetsPerBus); 286 if (devq == NULL) 287 return (EIO); 288 289 sim = aac_sim_alloc(aac_cam_action, aac_cam_poll, "aacraidp", camsc, 290 device_get_unit(dev), &inf->aac_sc->aac_io_lock, 1, 1, devq); 291 if (sim == NULL) { 292 cam_simq_free(devq); 293 return (EIO); 294 } 295 296 /* Since every bus has it's own sim, every bus 'appears' as bus 0 */ 297 mtx_lock(&inf->aac_sc->aac_io_lock); 298 if (aac_xpt_bus_register(sim, dev, 0) != CAM_SUCCESS) { 299 cam_sim_free(sim, TRUE); 300 mtx_unlock(&inf->aac_sc->aac_io_lock); 301 return (EIO); 302 } 303 304 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 305 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 306 xpt_bus_deregister(cam_sim_path(sim)); 307 cam_sim_free(sim, TRUE); 308 mtx_unlock(&inf->aac_sc->aac_io_lock); 309 return (EIO); 310 } 311 312 inf->aac_sc->cam_rescan_cb = aac_cam_rescan; 313 mtx_unlock(&inf->aac_sc->aac_io_lock); 314 315 camsc->sim = sim; 316 camsc->path = path; 317 318 return (0); 319 } 320 321 static u_int64_t 322 aac_eval_blockno(u_int8_t *cmdp) 323 { 324 u_int64_t blockno; 325 326 switch (cmdp[0]) { 327 case READ_6: 328 case WRITE_6: 329 blockno = scsi_3btoul(((struct scsi_rw_6 *)cmdp)->addr); 330 break; 331 case READ_10: 332 case WRITE_10: 333 blockno = scsi_4btoul(((struct scsi_rw_10 *)cmdp)->addr); 334 break; 335 case READ_12: 336 case WRITE_12: 337 blockno = scsi_4btoul(((struct scsi_rw_12 *)cmdp)->addr); 338 break; 339 case READ_16: 340 case WRITE_16: 341 blockno = scsi_8btou64(((struct scsi_rw_16 *)cmdp)->addr); 342 break; 343 default: 344 blockno = 0; 345 break; 346 } 347 return(blockno); 348 } 349 350 static void 351 aac_container_rw_command(struct cam_sim *sim, union ccb *ccb, u_int8_t *cmdp) 352 { 353 struct aac_cam *camsc; 354 struct aac_softc *sc; 355 struct aac_command *cm; 356 struct aac_fib *fib; 357 u_int64_t blockno; 358 359 camsc = (struct aac_cam *)cam_sim_softc(sim); 360 sc = camsc->inf->aac_sc; 361 mtx_assert(&sc->aac_io_lock, MA_OWNED); 362 363 if (aacraid_alloc_command(sc, &cm)) { 364 struct aac_event *event; 365 366 xpt_freeze_simq(sim, 1); 367 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 368 ccb->ccb_h.sim_priv.entries[0].ptr = camsc; 369 event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, 370 M_NOWAIT | M_ZERO); 371 if (event == NULL) { 372 device_printf(sc->aac_dev, 373 "Warning, out of memory for event\n"); 374 return; 375 } 376 event->ev_callback = aac_cam_event; 377 event->ev_arg = ccb; 378 event->ev_type = AAC_EVENT_CMFREE; 379 aacraid_add_event(sc, event); 380 return; 381 } 382 383 fib = cm->cm_fib; 384 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 385 case CAM_DIR_IN: 386 cm->cm_flags |= AAC_CMD_DATAIN; 387 break; 388 case CAM_DIR_OUT: 389 cm->cm_flags |= AAC_CMD_DATAOUT; 390 break; 391 case CAM_DIR_NONE: 392 break; 393 default: 394 cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT; 395 break; 396 } 397 398 blockno = aac_eval_blockno(cmdp); 399 400 cm->cm_complete = aac_container_complete; 401 cm->cm_ccb = ccb; 402 cm->cm_timestamp = time_uptime; 403 cm->cm_data = (void *)ccb->csio.data_ptr; 404 cm->cm_datalen = ccb->csio.dxfer_len; 405 406 fib->Header.Size = sizeof(struct aac_fib_header); 407 fib->Header.XferState = 408 AAC_FIBSTATE_HOSTOWNED | 409 AAC_FIBSTATE_INITIALISED | 410 AAC_FIBSTATE_EMPTY | 411 AAC_FIBSTATE_FROMHOST | 412 AAC_FIBSTATE_REXPECTED | 413 AAC_FIBSTATE_NORM | 414 AAC_FIBSTATE_ASYNC | 415 AAC_FIBSTATE_FAST_RESPONSE; 416 417 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { 418 struct aac_raw_io2 *raw; 419 /* NOTE: LE conversion handled at aacraid_map_command_sg() */ 420 raw = (struct aac_raw_io2 *)&fib->data[0]; 421 bzero(raw, sizeof(struct aac_raw_io2)); 422 fib->Header.Command = RawIo2; 423 raw->strtBlkLow = (u_int32_t)blockno; 424 raw->strtBlkHigh = (u_int32_t)(blockno >> 32); 425 raw->byteCnt = cm->cm_datalen; 426 raw->ldNum = ccb->ccb_h.target_id; 427 fib->Header.Size += sizeof(struct aac_raw_io2); 428 cm->cm_sgtable = (struct aac_sg_table *)raw->sge; 429 if (cm->cm_flags & AAC_CMD_DATAIN) 430 raw->flags = RIO2_IO_TYPE_READ | RIO2_SG_FORMAT_IEEE1212; 431 else 432 raw->flags = RIO2_IO_TYPE_WRITE | RIO2_SG_FORMAT_IEEE1212; 433 } else if (sc->flags & AAC_FLAGS_RAW_IO) { 434 struct aac_raw_io *raw; 435 /* NOTE: LE conversion handled at aacraid_map_command_sg() */ 436 raw = (struct aac_raw_io *)&fib->data[0]; 437 bzero(raw, sizeof(struct aac_raw_io)); 438 fib->Header.Command = RawIo; 439 raw->BlockNumber = blockno; 440 raw->ByteCount = cm->cm_datalen; 441 raw->ContainerId = ccb->ccb_h.target_id; 442 fib->Header.Size += sizeof(struct aac_raw_io); 443 cm->cm_sgtable = (struct aac_sg_table *) 444 &raw->SgMapRaw; 445 if (cm->cm_flags & AAC_CMD_DATAIN) 446 raw->Flags = 1; 447 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 448 fib->Header.Command = ContainerCommand; 449 if (cm->cm_flags & AAC_CMD_DATAIN) { 450 struct aac_blockread *br; 451 br = (struct aac_blockread *)&fib->data[0]; 452 br->Command = VM_CtBlockRead; 453 br->ContainerId = ccb->ccb_h.target_id; 454 br->BlockNumber = blockno; 455 br->ByteCount = cm->cm_datalen; 456 aac_blockread_tole(br); 457 fib->Header.Size += sizeof(struct aac_blockread); 458 cm->cm_sgtable = &br->SgMap; 459 } else { 460 struct aac_blockwrite *bw; 461 bw = (struct aac_blockwrite *)&fib->data[0]; 462 bw->Command = VM_CtBlockWrite; 463 bw->ContainerId = ccb->ccb_h.target_id; 464 bw->BlockNumber = blockno; 465 bw->ByteCount = cm->cm_datalen; 466 bw->Stable = CUNSTABLE; 467 aac_blockwrite_tole(bw); 468 fib->Header.Size += sizeof(struct aac_blockwrite); 469 cm->cm_sgtable = &bw->SgMap; 470 } 471 } else { 472 fib->Header.Command = ContainerCommand64; 473 if (cm->cm_flags & AAC_CMD_DATAIN) { 474 struct aac_blockread64 *br; 475 br = (struct aac_blockread64 *)&fib->data[0]; 476 br->Command = VM_CtHostRead64; 477 br->ContainerId = ccb->ccb_h.target_id; 478 br->SectorCount = cm->cm_datalen/AAC_BLOCK_SIZE; 479 br->BlockNumber = blockno; 480 br->Pad = 0; 481 br->Flags = 0; 482 aac_blockread64_tole(br); 483 fib->Header.Size += sizeof(struct aac_blockread64); 484 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 485 } else { 486 struct aac_blockwrite64 *bw; 487 bw = (struct aac_blockwrite64 *)&fib->data[0]; 488 bw->Command = VM_CtHostWrite64; 489 bw->ContainerId = ccb->ccb_h.target_id; 490 bw->SectorCount = cm->cm_datalen/AAC_BLOCK_SIZE; 491 bw->BlockNumber = blockno; 492 bw->Pad = 0; 493 bw->Flags = 0; 494 aac_blockwrite64_tole(bw); 495 fib->Header.Size += sizeof(struct aac_blockwrite64); 496 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 497 } 498 } 499 aac_enqueue_ready(cm); 500 aacraid_startio(cm->cm_sc); 501 } 502 503 static void 504 aac_container_special_command(struct cam_sim *sim, union ccb *ccb, 505 u_int8_t *cmdp) 506 { 507 struct aac_cam *camsc; 508 struct aac_softc *sc; 509 struct aac_container *co; 510 511 camsc = (struct aac_cam *)cam_sim_softc(sim); 512 sc = camsc->inf->aac_sc; 513 mtx_assert(&sc->aac_io_lock, MA_OWNED); 514 515 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 516 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "found container %d search for %d", co->co_mntobj.ObjectId, ccb->ccb_h.target_id); 517 if (co->co_mntobj.ObjectId == ccb->ccb_h.target_id) 518 break; 519 } 520 if (co == NULL || ccb->ccb_h.target_lun != 0) { 521 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, 522 "Container not present: cmd 0x%x id %d lun %d len %d", 523 *cmdp, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 524 ccb->csio.dxfer_len); 525 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 526 xpt_done(ccb); 527 return; 528 } 529 530 if (ccb->csio.dxfer_len) 531 bzero(ccb->csio.data_ptr, ccb->csio.dxfer_len); 532 533 switch (*cmdp) { 534 case INQUIRY: 535 { 536 struct scsi_inquiry *inq = (struct scsi_inquiry *)cmdp; 537 538 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 539 "Container INQUIRY id %d lun %d len %d VPD 0x%x Page 0x%x", 540 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 541 ccb->csio.dxfer_len, inq->byte2, inq->page_code); 542 if (!(inq->byte2 & SI_EVPD)) { 543 struct scsi_inquiry_data *p = 544 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 545 if (inq->page_code != 0) { 546 aac_set_scsi_error(sc, ccb, 547 SCSI_STATUS_CHECK_COND, 548 SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); 549 xpt_done(ccb); 550 return; 551 } 552 p->device = T_DIRECT; 553 p->version = SCSI_REV_SPC2; 554 p->response_format = 2; 555 if (ccb->csio.dxfer_len >= 36) { 556 p->additional_length = 31; 557 p->flags = SID_WBus16|SID_Sync|SID_CmdQue; 558 /* OEM Vendor defines */ 559 strncpy(p->vendor, "Adaptec ", sizeof(p->vendor)); 560 strncpy(p->product, "Array ", 561 sizeof(p->product)); 562 strncpy(p->revision, "V1.0", 563 sizeof(p->revision)); 564 } 565 } else { 566 if (inq->page_code == SVPD_SUPPORTED_PAGE_LIST) { 567 struct scsi_vpd_supported_page_list *p = 568 (struct scsi_vpd_supported_page_list *) 569 ccb->csio.data_ptr; 570 p->device = T_DIRECT; 571 p->page_code = SVPD_SUPPORTED_PAGE_LIST; 572 p->length = 2; 573 p->list[0] = SVPD_SUPPORTED_PAGE_LIST; 574 p->list[1] = SVPD_UNIT_SERIAL_NUMBER; 575 } else if (inq->page_code == SVPD_UNIT_SERIAL_NUMBER) { 576 struct scsi_vpd_unit_serial_number *p = 577 (struct scsi_vpd_unit_serial_number *) 578 ccb->csio.data_ptr; 579 p->device = T_DIRECT; 580 p->page_code = SVPD_UNIT_SERIAL_NUMBER; 581 p->length = sprintf((char *)p->serial_num, 582 "%08X%02X", co->co_uid, 583 ccb->ccb_h.target_id); 584 } else { 585 aac_set_scsi_error(sc, ccb, 586 SCSI_STATUS_CHECK_COND, 587 SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); 588 xpt_done(ccb); 589 return; 590 } 591 } 592 ccb->ccb_h.status = CAM_REQ_CMP; 593 break; 594 } 595 596 case REPORT_LUNS: 597 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 598 "Container REPORT_LUNS id %d lun %d len %d", 599 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 600 ccb->csio.dxfer_len); 601 ccb->ccb_h.status = CAM_REQ_CMP; 602 break; 603 604 case START_STOP: 605 { 606 struct scsi_start_stop_unit *ss = 607 (struct scsi_start_stop_unit *)cmdp; 608 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 609 "Container START_STOP id %d lun %d len %d", 610 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 611 ccb->csio.dxfer_len); 612 if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 613 struct aac_command *cm; 614 struct aac_fib *fib; 615 struct aac_cnt_config *ccfg; 616 617 if (aacraid_alloc_command(sc, &cm)) { 618 struct aac_event *event; 619 620 xpt_freeze_simq(sim, 1); 621 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 622 ccb->ccb_h.sim_priv.entries[0].ptr = camsc; 623 event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, 624 M_NOWAIT | M_ZERO); 625 if (event == NULL) { 626 device_printf(sc->aac_dev, 627 "Warning, out of memory for event\n"); 628 return; 629 } 630 event->ev_callback = aac_cam_event; 631 event->ev_arg = ccb; 632 event->ev_type = AAC_EVENT_CMFREE; 633 aacraid_add_event(sc, event); 634 return; 635 } 636 637 fib = cm->cm_fib; 638 cm->cm_timestamp = time_uptime; 639 cm->cm_datalen = 0; 640 641 fib->Header.Size = 642 sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config); 643 fib->Header.XferState = 644 AAC_FIBSTATE_HOSTOWNED | 645 AAC_FIBSTATE_INITIALISED | 646 AAC_FIBSTATE_EMPTY | 647 AAC_FIBSTATE_FROMHOST | 648 AAC_FIBSTATE_REXPECTED | 649 AAC_FIBSTATE_NORM | 650 AAC_FIBSTATE_ASYNC | 651 AAC_FIBSTATE_FAST_RESPONSE; 652 fib->Header.Command = ContainerCommand; 653 654 /* Start unit */ 655 ccfg = (struct aac_cnt_config *)&fib->data[0]; 656 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); 657 ccfg->Command = VM_ContainerConfig; 658 ccfg->CTCommand.command = CT_PM_DRIVER_SUPPORT; 659 ccfg->CTCommand.param[0] = (ss->how & SSS_START ? 660 AAC_PM_DRIVERSUP_START_UNIT : 661 AAC_PM_DRIVERSUP_STOP_UNIT); 662 ccfg->CTCommand.param[1] = co->co_mntobj.ObjectId; 663 ccfg->CTCommand.param[2] = 0; /* 1 - immediate */ 664 aac_cnt_config_tole(ccfg); 665 666 if (aacraid_wait_command(cm) != 0 || 667 le32toh(*(u_int32_t *)&fib->data[0]) != 0) { 668 printf("Power Management: Error start/stop container %d\n", 669 co->co_mntobj.ObjectId); 670 } 671 aacraid_release_command(cm); 672 } 673 ccb->ccb_h.status = CAM_REQ_CMP; 674 break; 675 } 676 677 case TEST_UNIT_READY: 678 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 679 "Container TEST_UNIT_READY id %d lun %d len %d", 680 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 681 ccb->csio.dxfer_len); 682 ccb->ccb_h.status = CAM_REQ_CMP; 683 break; 684 685 case REQUEST_SENSE: 686 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 687 "Container REQUEST_SENSE id %d lun %d len %d", 688 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 689 ccb->csio.dxfer_len); 690 ccb->ccb_h.status = CAM_REQ_CMP; 691 break; 692 693 case READ_CAPACITY: 694 { 695 struct scsi_read_capacity_data *p = 696 (struct scsi_read_capacity_data *)ccb->csio.data_ptr; 697 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 698 "Container READ_CAPACITY id %d lun %d len %d", 699 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 700 ccb->csio.dxfer_len); 701 scsi_ulto4b(co->co_mntobj.ObjExtension.BlockDevice.BlockSize, p->length); 702 /* check if greater than 2TB */ 703 if (co->co_mntobj.CapacityHigh) { 704 if (sc->flags & AAC_FLAGS_LBA_64BIT) 705 scsi_ulto4b(0xffffffff, p->addr); 706 } else { 707 scsi_ulto4b(co->co_mntobj.Capacity-1, p->addr); 708 } 709 ccb->ccb_h.status = CAM_REQ_CMP; 710 break; 711 } 712 713 case SERVICE_ACTION_IN: 714 { 715 struct scsi_read_capacity_data_long *p = 716 (struct scsi_read_capacity_data_long *) 717 ccb->csio.data_ptr; 718 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 719 "Container SERVICE_ACTION_IN id %d lun %d len %d", 720 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 721 ccb->csio.dxfer_len); 722 if (((struct scsi_read_capacity_16 *)cmdp)->service_action != 723 SRC16_SERVICE_ACTION) { 724 aac_set_scsi_error(sc, ccb, SCSI_STATUS_CHECK_COND, 725 SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); 726 xpt_done(ccb); 727 return; 728 } 729 scsi_ulto4b(co->co_mntobj.ObjExtension.BlockDevice.BlockSize, p->length); 730 scsi_ulto4b(co->co_mntobj.CapacityHigh, p->addr); 731 scsi_ulto4b(co->co_mntobj.Capacity-1, &p->addr[4]); 732 733 if (ccb->csio.dxfer_len >= 14) { 734 u_int32_t mapping = co->co_mntobj.ObjExtension.BlockDevice.bdLgclPhysMap; 735 p->prot_lbppbe = 0; 736 while (mapping > 1) { 737 mapping >>= 1; 738 p->prot_lbppbe++; 739 } 740 p->prot_lbppbe &= 0x0f; 741 } 742 743 ccb->ccb_h.status = CAM_REQ_CMP; 744 break; 745 } 746 747 case MODE_SENSE_6: 748 { 749 struct scsi_mode_sense_6 *msp =(struct scsi_mode_sense_6 *)cmdp; 750 struct ms6_data { 751 struct scsi_mode_hdr_6 hd; 752 struct scsi_mode_block_descr bd; 753 char pages; 754 } *p = (struct ms6_data *)ccb->csio.data_ptr; 755 char *pagep; 756 int return_all_pages = FALSE; 757 758 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 759 "Container MODE_SENSE id %d lun %d len %d page %d", 760 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 761 ccb->csio.dxfer_len, msp->page); 762 p->hd.datalen = sizeof(struct scsi_mode_hdr_6) - 1; 763 if (co->co_mntobj.ContentState & AAC_FSCS_READONLY) 764 p->hd.dev_specific = 0x80; /* WP */ 765 p->hd.dev_specific |= 0x10; /* DPOFUA */ 766 if (msp->byte2 & SMS_DBD) { 767 p->hd.block_descr_len = 0; 768 } else { 769 p->hd.block_descr_len = 770 sizeof(struct scsi_mode_block_descr); 771 p->hd.datalen += p->hd.block_descr_len; 772 scsi_ulto3b(co->co_mntobj.ObjExtension.BlockDevice.BlockSize, p->bd.block_len); 773 if (co->co_mntobj.Capacity > 0xffffff || 774 co->co_mntobj.CapacityHigh) { 775 p->bd.num_blocks[0] = 0xff; 776 p->bd.num_blocks[1] = 0xff; 777 p->bd.num_blocks[2] = 0xff; 778 } else { 779 p->bd.num_blocks[0] = (u_int8_t) 780 (co->co_mntobj.Capacity >> 16); 781 p->bd.num_blocks[1] = (u_int8_t) 782 (co->co_mntobj.Capacity >> 8); 783 p->bd.num_blocks[2] = (u_int8_t) 784 (co->co_mntobj.Capacity); 785 } 786 } 787 pagep = &p->pages; 788 switch (msp->page & SMS_PAGE_CODE) { 789 case SMS_ALL_PAGES_PAGE: 790 return_all_pages = TRUE; 791 case SMS_CONTROL_MODE_PAGE: 792 { 793 struct scsi_control_page *cp = 794 (struct scsi_control_page *)pagep; 795 796 if (ccb->csio.dxfer_len <= p->hd.datalen + 8) { 797 aac_set_scsi_error(sc, ccb, 798 SCSI_STATUS_CHECK_COND, 799 SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); 800 xpt_done(ccb); 801 return; 802 } 803 cp->page_code = SMS_CONTROL_MODE_PAGE; 804 cp->page_length = 6; 805 p->hd.datalen += 8; 806 pagep += 8; 807 if (!return_all_pages) 808 break; 809 } 810 case SMS_VENDOR_SPECIFIC_PAGE: 811 break; 812 default: 813 aac_set_scsi_error(sc, ccb, SCSI_STATUS_CHECK_COND, 814 SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); 815 xpt_done(ccb); 816 return; 817 } 818 ccb->ccb_h.status = CAM_REQ_CMP; 819 break; 820 } 821 822 case SYNCHRONIZE_CACHE: 823 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, 824 "Container SYNCHRONIZE_CACHE id %d lun %d len %d", 825 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 826 ccb->csio.dxfer_len); 827 ccb->ccb_h.status = CAM_REQ_CMP; 828 break; 829 830 default: 831 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, 832 "Container unsupp. cmd 0x%x id %d lun %d len %d", 833 *cmdp, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 834 ccb->csio.dxfer_len); 835 ccb->ccb_h.status = CAM_REQ_CMP; /*CAM_REQ_INVALID*/ 836 break; 837 } 838 xpt_done(ccb); 839 } 840 841 static void 842 aac_passthrough_command(struct cam_sim *sim, union ccb *ccb) 843 { 844 struct aac_cam *camsc; 845 struct aac_softc *sc; 846 struct aac_command *cm; 847 struct aac_fib *fib; 848 struct aac_srb *srb; 849 850 camsc = (struct aac_cam *)cam_sim_softc(sim); 851 sc = camsc->inf->aac_sc; 852 mtx_assert(&sc->aac_io_lock, MA_OWNED); 853 854 if (aacraid_alloc_command(sc, &cm)) { 855 struct aac_event *event; 856 857 xpt_freeze_simq(sim, 1); 858 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 859 ccb->ccb_h.sim_priv.entries[0].ptr = camsc; 860 event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, 861 M_NOWAIT | M_ZERO); 862 if (event == NULL) { 863 device_printf(sc->aac_dev, 864 "Warning, out of memory for event\n"); 865 return; 866 } 867 event->ev_callback = aac_cam_event; 868 event->ev_arg = ccb; 869 event->ev_type = AAC_EVENT_CMFREE; 870 aacraid_add_event(sc, event); 871 return; 872 } 873 874 fib = cm->cm_fib; 875 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 876 case CAM_DIR_IN: 877 cm->cm_flags |= AAC_CMD_DATAIN; 878 break; 879 case CAM_DIR_OUT: 880 cm->cm_flags |= AAC_CMD_DATAOUT; 881 break; 882 case CAM_DIR_NONE: 883 break; 884 default: 885 cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT; 886 break; 887 } 888 889 srb = (struct aac_srb *)&fib->data[0]; 890 srb->function = AAC_SRB_FUNC_EXECUTE_SCSI; 891 if (cm->cm_flags & (AAC_CMD_DATAIN|AAC_CMD_DATAOUT)) 892 srb->flags = AAC_SRB_FLAGS_UNSPECIFIED_DIRECTION; 893 if (cm->cm_flags & AAC_CMD_DATAIN) 894 srb->flags = AAC_SRB_FLAGS_DATA_IN; 895 else if (cm->cm_flags & AAC_CMD_DATAOUT) 896 srb->flags = AAC_SRB_FLAGS_DATA_OUT; 897 else 898 srb->flags = AAC_SRB_FLAGS_NO_DATA_XFER; 899 900 /* 901 * Copy the CDB into the SRB. It's only 6-16 bytes, 902 * so a copy is not too expensive. 903 */ 904 srb->cdb_len = ccb->csio.cdb_len; 905 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 906 bcopy(ccb->csio.cdb_io.cdb_ptr, (u_int8_t *)&srb->cdb[0], 907 srb->cdb_len); 908 else 909 bcopy(ccb->csio.cdb_io.cdb_bytes, (u_int8_t *)&srb->cdb[0], 910 srb->cdb_len); 911 912 /* Set command */ 913 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ? 914 ScsiPortCommandU64 : ScsiPortCommand; 915 fib->Header.Size = sizeof(struct aac_fib_header) + 916 sizeof(struct aac_srb); 917 918 /* Map the s/g list */ 919 cm->cm_sgtable = &srb->sg_map; 920 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 921 /* 922 * Arrange things so that the S/G 923 * map will get set up automagically 924 */ 925 cm->cm_data = (void *)ccb->csio.data_ptr; 926 cm->cm_datalen = ccb->csio.dxfer_len; 927 srb->data_len = ccb->csio.dxfer_len; 928 } else { 929 cm->cm_data = NULL; 930 cm->cm_datalen = 0; 931 srb->data_len = 0; 932 } 933 934 srb->bus = camsc->inf->BusNumber - 1; /* Bus no. rel. to the card */ 935 srb->target = ccb->ccb_h.target_id; 936 srb->lun = ccb->ccb_h.target_lun; 937 srb->timeout = ccb->ccb_h.timeout; /* XXX */ 938 srb->retry_limit = 0; 939 aac_srb_tole(srb); 940 941 cm->cm_complete = aac_cam_complete; 942 cm->cm_ccb = ccb; 943 cm->cm_timestamp = time_uptime; 944 945 fib->Header.XferState = 946 AAC_FIBSTATE_HOSTOWNED | 947 AAC_FIBSTATE_INITIALISED | 948 AAC_FIBSTATE_FROMHOST | 949 AAC_FIBSTATE_REXPECTED | 950 AAC_FIBSTATE_NORM | 951 AAC_FIBSTATE_ASYNC | 952 AAC_FIBSTATE_FAST_RESPONSE; 953 954 aac_enqueue_ready(cm); 955 aacraid_startio(cm->cm_sc); 956 } 957 958 static void 959 aac_cam_action(struct cam_sim *sim, union ccb *ccb) 960 { 961 struct aac_cam *camsc; 962 struct aac_softc *sc; 963 964 camsc = (struct aac_cam *)cam_sim_softc(sim); 965 sc = camsc->inf->aac_sc; 966 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 967 mtx_assert(&sc->aac_io_lock, MA_OWNED); 968 969 /* Synchronous ops, and ops that don't require communication with the 970 * controller */ 971 switch(ccb->ccb_h.func_code) { 972 case XPT_SCSI_IO: 973 /* This is handled down below */ 974 break; 975 case XPT_CALC_GEOMETRY: 976 { 977 struct ccb_calc_geometry *ccg; 978 u_int32_t size_mb; 979 u_int32_t secs_per_cylinder; 980 981 ccg = &ccb->ccg; 982 size_mb = ccg->volume_size / 983 ((1024L * 1024L) / ccg->block_size); 984 if (size_mb >= (2 * 1024)) { /* 2GB */ 985 ccg->heads = 255; 986 ccg->secs_per_track = 63; 987 } else if (size_mb >= (1 * 1024)) { /* 1GB */ 988 ccg->heads = 128; 989 ccg->secs_per_track = 32; 990 } else { 991 ccg->heads = 64; 992 ccg->secs_per_track = 32; 993 } 994 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 995 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 996 997 ccb->ccb_h.status = CAM_REQ_CMP; 998 xpt_done(ccb); 999 return; 1000 } 1001 case XPT_PATH_INQ: 1002 { 1003 struct ccb_pathinq *cpi = &ccb->cpi; 1004 1005 cpi->version_num = 1; 1006 cpi->target_sprt = 0; 1007 cpi->hba_eng_cnt = 0; 1008 cpi->max_target = camsc->inf->TargetsPerBus - 1; 1009 cpi->max_lun = 7; /* Per the controller spec */ 1010 cpi->initiator_id = camsc->inf->InitiatorBusId; 1011 cpi->bus_id = camsc->inf->BusNumber; 1012 cpi->maxio = AAC_MAXIO_SIZE(sc); 1013 1014 /* 1015 * Resetting via the passthrough or parallel bus scan 1016 * causes problems. 1017 */ 1018 cpi->hba_misc = PIM_NOBUSRESET; 1019 cpi->hba_inquiry = PI_TAG_ABLE; 1020 cpi->base_transfer_speed = 300000; 1021 #ifdef CAM_NEW_TRAN_CODE 1022 cpi->hba_misc |= PIM_SEQSCAN; 1023 cpi->protocol = PROTO_SCSI; 1024 cpi->transport = XPORT_SAS; 1025 cpi->transport_version = 0; 1026 cpi->protocol_version = SCSI_REV_SPC2; 1027 #endif 1028 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1029 strlcpy(cpi->hba_vid, "PMC-Sierra", HBA_IDLEN); 1030 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1031 cpi->unit_number = cam_sim_unit(sim); 1032 ccb->ccb_h.status = CAM_REQ_CMP; 1033 xpt_done(ccb); 1034 return; 1035 } 1036 case XPT_GET_TRAN_SETTINGS: 1037 { 1038 #ifdef CAM_NEW_TRAN_CODE 1039 struct ccb_trans_settings_scsi *scsi = 1040 &ccb->cts.proto_specific.scsi; 1041 struct ccb_trans_settings_spi *spi = 1042 &ccb->cts.xport_specific.spi; 1043 ccb->cts.protocol = PROTO_SCSI; 1044 ccb->cts.protocol_version = SCSI_REV_SPC2; 1045 ccb->cts.transport = XPORT_SAS; 1046 ccb->cts.transport_version = 0; 1047 scsi->valid = CTS_SCSI_VALID_TQ; 1048 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1049 spi->valid |= CTS_SPI_VALID_DISC; 1050 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 1051 #else 1052 ccb->cts.flags = ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); 1053 ccb->cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1054 #endif 1055 ccb->ccb_h.status = CAM_REQ_CMP; 1056 xpt_done(ccb); 1057 return; 1058 } 1059 case XPT_SET_TRAN_SETTINGS: 1060 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1061 xpt_done(ccb); 1062 return; 1063 case XPT_RESET_BUS: 1064 if (!(sc->flags & AAC_FLAGS_CAM_NORESET) && 1065 camsc->inf->BusType != CONTAINER_BUS) { 1066 ccb->ccb_h.status = aac_cam_reset_bus(sim, ccb); 1067 } else { 1068 ccb->ccb_h.status = CAM_REQ_CMP; 1069 } 1070 xpt_done(ccb); 1071 return; 1072 case XPT_RESET_DEV: 1073 ccb->ccb_h.status = CAM_REQ_CMP; 1074 xpt_done(ccb); 1075 return; 1076 case XPT_ABORT: 1077 ccb->ccb_h.status = aac_cam_abort_ccb(sim, ccb); 1078 xpt_done(ccb); 1079 return; 1080 case XPT_TERM_IO: 1081 ccb->ccb_h.status = aac_cam_term_io(sim, ccb); 1082 xpt_done(ccb); 1083 return; 1084 default: 1085 device_printf(sc->aac_dev, "Unsupported command 0x%x\n", 1086 ccb->ccb_h.func_code); 1087 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 1088 xpt_done(ccb); 1089 return; 1090 } 1091 1092 /* Async ops that require communcation with the controller */ 1093 if (camsc->inf->BusType == CONTAINER_BUS) { 1094 u_int8_t *cmdp; 1095 1096 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 1097 cmdp = ccb->csio.cdb_io.cdb_ptr; 1098 else 1099 cmdp = &ccb->csio.cdb_io.cdb_bytes[0]; 1100 1101 if (*cmdp==READ_6 || *cmdp==WRITE_6 || *cmdp==READ_10 || 1102 *cmdp==WRITE_10 || *cmdp==READ_12 || *cmdp==WRITE_12 || 1103 *cmdp==READ_16 || *cmdp==WRITE_16) 1104 aac_container_rw_command(sim, ccb, cmdp); 1105 else 1106 aac_container_special_command(sim, ccb, cmdp); 1107 } else { 1108 aac_passthrough_command(sim, ccb); 1109 } 1110 } 1111 1112 static void 1113 aac_cam_poll(struct cam_sim *sim) 1114 { 1115 /* 1116 * Pinging the interrupt routine isn't very safe, nor is it 1117 * really necessary. Do nothing. 1118 */ 1119 } 1120 1121 static void 1122 aac_container_complete(struct aac_command *cm) 1123 { 1124 union ccb *ccb; 1125 u_int32_t status; 1126 1127 fwprintf(cm->cm_sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1128 ccb = cm->cm_ccb; 1129 status = le32toh(((u_int32_t *)cm->cm_fib->data)[0]); 1130 1131 if (cm->cm_flags & AAC_CMD_RESET) { 1132 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1133 } else if (status == ST_OK) { 1134 ccb->ccb_h.status = CAM_REQ_CMP; 1135 } else if (status == ST_NOT_READY) { 1136 ccb->ccb_h.status = CAM_BUSY; 1137 } else { 1138 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1139 } 1140 1141 aacraid_release_command(cm); 1142 xpt_done(ccb); 1143 } 1144 1145 static void 1146 aac_cam_complete(struct aac_command *cm) 1147 { 1148 union ccb *ccb; 1149 struct aac_srb_response *srbr; 1150 struct aac_softc *sc; 1151 1152 sc = cm->cm_sc; 1153 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1154 ccb = cm->cm_ccb; 1155 srbr = (struct aac_srb_response *)&cm->cm_fib->data[0]; 1156 aac_srb_response_toh(srbr); 1157 1158 if (cm->cm_flags & AAC_CMD_FASTRESP) { 1159 /* fast response */ 1160 srbr->srb_status = CAM_REQ_CMP; 1161 srbr->scsi_status = SCSI_STATUS_OK; 1162 srbr->sense_len = 0; 1163 } 1164 1165 if (cm->cm_flags & AAC_CMD_RESET) { 1166 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1167 } else if (srbr->fib_status != 0) { 1168 device_printf(sc->aac_dev, "Passthru FIB failed!\n"); 1169 ccb->ccb_h.status = CAM_REQ_ABORTED; 1170 } else { 1171 /* 1172 * The SRB error codes just happen to match the CAM error 1173 * codes. How convenient! 1174 */ 1175 ccb->ccb_h.status = srbr->srb_status; 1176 1177 /* Take care of SCSI_IO ops. */ 1178 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1179 u_int8_t command, device; 1180 1181 ccb->csio.scsi_status = srbr->scsi_status; 1182 1183 /* Take care of autosense */ 1184 if (srbr->sense_len) { 1185 int sense_len, scsi_sense_len; 1186 1187 scsi_sense_len = sizeof(struct scsi_sense_data); 1188 bzero(&ccb->csio.sense_data, scsi_sense_len); 1189 sense_len = (srbr->sense_len > 1190 scsi_sense_len) ? scsi_sense_len : 1191 srbr->sense_len; 1192 bcopy(&srbr->sense[0], &ccb->csio.sense_data, 1193 sense_len); 1194 ccb->csio.sense_len = sense_len; 1195 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1196 // scsi_sense_print(&ccb->csio); 1197 } 1198 1199 /* If this is an inquiry command, fake things out */ 1200 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 1201 command = ccb->csio.cdb_io.cdb_ptr[0]; 1202 else 1203 command = ccb->csio.cdb_io.cdb_bytes[0]; 1204 1205 if (command == INQUIRY) { 1206 /* Ignore Data Overrun errors on INQUIRY */ 1207 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 1208 CAM_DATA_RUN_ERR) 1209 ccb->ccb_h.status = (ccb->ccb_h.status & 1210 ~CAM_STATUS_MASK) | CAM_REQ_CMP; 1211 1212 if (ccb->ccb_h.status == CAM_REQ_CMP) { 1213 device = ccb->csio.data_ptr[0] & 0x1f; 1214 /* 1215 * We want DASD and PROC devices to only be 1216 * visible through the pass device. 1217 */ 1218 if ((device == T_DIRECT && 1219 !(sc->aac_feature_bits & AAC_SUPPL_SUPPORTED_JBOD)) || 1220 (device == T_PROCESSOR)) 1221 ccb->csio.data_ptr[0] = 1222 ((device & 0xe0) | T_NODEVICE); 1223 1224 /* handle phys. components of a log. drive */ 1225 if (ccb->csio.data_ptr[0] & 0x20) { 1226 if (sc->hint_flags & 8) { 1227 /* expose phys. device (daXX) */ 1228 ccb->csio.data_ptr[0] &= 0xdf; 1229 } else { 1230 /* phys. device only visible through pass device (passXX) */ 1231 ccb->csio.data_ptr[0] |= 0x10; 1232 } 1233 } 1234 } else if (ccb->ccb_h.status == CAM_SEL_TIMEOUT && 1235 ccb->ccb_h.target_lun != 0) { 1236 /* fix for INQUIRYs on Lun>0 */ 1237 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 1238 } 1239 } 1240 } 1241 } 1242 1243 aacraid_release_command(cm); 1244 xpt_done(ccb); 1245 } 1246 1247 static u_int32_t 1248 aac_cam_reset_bus(struct cam_sim *sim, union ccb *ccb) 1249 { 1250 struct aac_command *cm; 1251 struct aac_fib *fib; 1252 struct aac_softc *sc; 1253 struct aac_cam *camsc; 1254 struct aac_vmioctl *vmi; 1255 struct aac_resetbus *rbc; 1256 u_int32_t rval; 1257 1258 camsc = (struct aac_cam *)cam_sim_softc(sim); 1259 sc = camsc->inf->aac_sc; 1260 1261 if (sc == NULL) { 1262 printf("aac: Null sc?\n"); 1263 return (CAM_REQ_ABORTED); 1264 } 1265 1266 if (aacraid_alloc_command(sc, &cm)) { 1267 struct aac_event *event; 1268 1269 xpt_freeze_simq(sim, 1); 1270 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1271 ccb->ccb_h.sim_priv.entries[0].ptr = camsc; 1272 event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, 1273 M_NOWAIT | M_ZERO); 1274 if (event == NULL) { 1275 device_printf(sc->aac_dev, 1276 "Warning, out of memory for event\n"); 1277 return (CAM_REQ_ABORTED); 1278 } 1279 event->ev_callback = aac_cam_event; 1280 event->ev_arg = ccb; 1281 event->ev_type = AAC_EVENT_CMFREE; 1282 aacraid_add_event(sc, event); 1283 return (CAM_REQ_ABORTED); 1284 } 1285 1286 fib = cm->cm_fib; 1287 cm->cm_timestamp = time_uptime; 1288 cm->cm_datalen = 0; 1289 1290 fib->Header.Size = 1291 sizeof(struct aac_fib_header) + sizeof(struct aac_vmioctl); 1292 fib->Header.XferState = 1293 AAC_FIBSTATE_HOSTOWNED | 1294 AAC_FIBSTATE_INITIALISED | 1295 AAC_FIBSTATE_EMPTY | 1296 AAC_FIBSTATE_FROMHOST | 1297 AAC_FIBSTATE_REXPECTED | 1298 AAC_FIBSTATE_NORM | 1299 AAC_FIBSTATE_ASYNC | 1300 AAC_FIBSTATE_FAST_RESPONSE; 1301 fib->Header.Command = ContainerCommand; 1302 1303 vmi = (struct aac_vmioctl *)&fib->data[0]; 1304 bzero(vmi, sizeof(struct aac_vmioctl)); 1305 1306 vmi->Command = VM_Ioctl; 1307 vmi->ObjType = FT_DRIVE; 1308 vmi->MethId = sc->scsi_method_id; 1309 vmi->ObjId = 0; 1310 vmi->IoctlCmd = ResetBus; 1311 1312 rbc = (struct aac_resetbus *)&vmi->IoctlBuf[0]; 1313 rbc->BusNumber = camsc->inf->BusNumber - 1; 1314 aac_vmioctl_tole(vmi); 1315 1316 if (aacraid_wait_command(cm) != 0) { 1317 device_printf(sc->aac_dev,"Error sending ResetBus command\n"); 1318 rval = CAM_REQ_ABORTED; 1319 } else { 1320 rval = CAM_REQ_CMP; 1321 } 1322 aacraid_release_command(cm); 1323 return (rval); 1324 } 1325 1326 static u_int32_t 1327 aac_cam_abort_ccb(struct cam_sim *sim, union ccb *ccb) 1328 { 1329 return (CAM_UA_ABORT); 1330 } 1331 1332 static u_int32_t 1333 aac_cam_term_io(struct cam_sim *sim, union ccb *ccb) 1334 { 1335 return (CAM_UA_TERMIO); 1336 } 1337 1338 static int 1339 aac_load_map_command_sg(struct aac_softc *sc, struct aac_command *cm) 1340 { 1341 int error; 1342 1343 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1344 error = bus_dmamap_load(sc->aac_buffer_dmat, 1345 cm->cm_datamap, cm->cm_data, cm->cm_datalen, 1346 aacraid_map_command_sg, cm, 0); 1347 if (error == EINPROGRESS) { 1348 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "freezing queue\n"); 1349 sc->flags |= AAC_QUEUE_FRZN; 1350 error = 0; 1351 } else if (error != 0) { 1352 panic("aac_load_map_command_sg: unexpected error %d from " 1353 "busdma", error); 1354 } 1355 return(error); 1356 } 1357 1358 /* 1359 * Start as much queued I/O as possible on the controller 1360 */ 1361 void 1362 aacraid_startio(struct aac_softc *sc) 1363 { 1364 struct aac_command *cm; 1365 1366 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1367 1368 for (;;) { 1369 if (sc->aac_state & AAC_STATE_RESET) { 1370 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "AAC_STATE_RESET"); 1371 break; 1372 } 1373 /* 1374 * This flag might be set if the card is out of resources. 1375 * Checking it here prevents an infinite loop of deferrals. 1376 */ 1377 if (sc->flags & AAC_QUEUE_FRZN) { 1378 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "AAC_QUEUE_FRZN"); 1379 break; 1380 } 1381 1382 /* 1383 * Try to get a command that's been put off for lack of 1384 * resources 1385 */ 1386 if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm) 1387 break; 1388 cm = aac_dequeue_ready(sc); 1389 1390 /* nothing to do? */ 1391 if (cm == NULL) 1392 break; 1393 1394 /* don't map more than once */ 1395 if (cm->cm_flags & AAC_CMD_MAPPED) 1396 panic("aac: command %p already mapped", cm); 1397 1398 /* 1399 * Set up the command to go to the controller. If there are no 1400 * data buffers associated with the command then it can bypass 1401 * busdma. 1402 */ 1403 if (cm->cm_datalen) 1404 aac_load_map_command_sg(sc, cm); 1405 else 1406 aacraid_map_command_sg(cm, NULL, 0, 0); 1407 } 1408 } 1409