1 /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */ 2 /*- 3 * SPDX-License-Identifier: BSD-2-Clause 4 * 5 * HighPoint RAID Driver for FreeBSD 6 * Copyright (C) 2005-2011 HighPoint Technologies, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 #include <dev/hptnr/hptnr_config.h> 31 #include <dev/hptnr/os_bsd.h> 32 #include <dev/hptnr/hptintf.h> 33 int msi = 0; 34 int debug_flag = 0; 35 static HIM *hpt_match(device_t dev, int scan) 36 { 37 PCI_ID pci_id; 38 HIM *him; 39 int i; 40 41 for (him = him_list; him; him = him->next) { 42 for (i=0; him->get_supported_device_id(i, &pci_id); i++) { 43 if (scan && him->get_controller_count) 44 him->get_controller_count(&pci_id,0,0); 45 if ((pci_get_vendor(dev) == pci_id.vid) && 46 (pci_get_device(dev) == pci_id.did)){ 47 return (him); 48 } 49 } 50 } 51 52 return (NULL); 53 } 54 55 static int hpt_probe(device_t dev) 56 { 57 HIM *him; 58 59 him = hpt_match(dev, 0); 60 if (him != NULL) { 61 KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", 62 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) 63 )); 64 device_set_desc(dev, him->name); 65 return (BUS_PROBE_DEFAULT); 66 } 67 68 return (ENXIO); 69 } 70 71 static int hpt_attach(device_t dev) 72 { 73 PHBA hba = (PHBA)device_get_softc(dev); 74 HIM *him; 75 PCI_ID pci_id; 76 HPT_UINT size; 77 PVBUS vbus; 78 PVBUS_EXT vbus_ext; 79 80 if (pci_get_domain(dev) != 0) { 81 device_printf(dev, "does not support PCI domains\n"); 82 return (ENXIO); 83 } 84 85 KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); 86 87 him = hpt_match(dev, 1); 88 hba->ext_type = EXT_TYPE_HBA; 89 hba->ldm_adapter.him = him; 90 91 pci_enable_busmaster(dev); 92 93 pci_id.vid = pci_get_vendor(dev); 94 pci_id.did = pci_get_device(dev); 95 pci_id.rev = pci_get_revid(dev); 96 pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); 97 98 size = him->get_adapter_size(&pci_id); 99 hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); 100 101 hba->pcidev = dev; 102 hba->pciaddr.tree = 0; 103 hba->pciaddr.bus = pci_get_bus(dev); 104 hba->pciaddr.device = pci_get_slot(dev); 105 hba->pciaddr.function = pci_get_function(dev); 106 107 if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { 108 free(hba->ldm_adapter.him_handle, M_DEVBUF); 109 return ENXIO; 110 } 111 112 os_printk("adapter at PCI %d:%d:%d, IRQ %d", 113 hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); 114 115 if (!ldm_register_adapter(&hba->ldm_adapter)) { 116 size = ldm_get_vbus_size(); 117 vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | 118 M_ZERO); 119 vbus_ext->ext_type = EXT_TYPE_VBUS; 120 ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); 121 ldm_register_adapter(&hba->ldm_adapter); 122 } 123 124 ldm_for_each_vbus(vbus, vbus_ext) { 125 if (hba->ldm_adapter.vbus==vbus) { 126 hba->vbus_ext = vbus_ext; 127 hba->next = vbus_ext->hba_list; 128 vbus_ext->hba_list = hba; 129 break; 130 } 131 } 132 return 0; 133 } 134 135 /* 136 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, 137 * but there are some problems currently (alignment, etc). 138 */ 139 static __inline void *__get_free_pages(int order) 140 { 141 /* don't use low memory - other devices may get starved */ 142 return contigmalloc(PAGE_SIZE<<order, 143 M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 144 } 145 146 static __inline void free_pages(void *p, int order) 147 { 148 contigfree(p, PAGE_SIZE<<order, M_DEVBUF); 149 } 150 151 static int hpt_alloc_mem(PVBUS_EXT vbus_ext) 152 { 153 PHBA hba; 154 struct freelist *f; 155 HPT_UINT i; 156 void **p; 157 158 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 159 hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); 160 161 ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); 162 163 for (f=vbus_ext->freelist_head; f; f=f->next) { 164 KdPrint(("%s: %d*%d=%d bytes", 165 f->tag, f->count, f->size, f->count*f->size)); 166 for (i=0; i<f->count; i++) { 167 p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); 168 if (!p) return (ENXIO); 169 *p = f->head; 170 f->head = p; 171 } 172 } 173 174 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 175 int order, size, j; 176 177 HPT_ASSERT((f->size & (f->alignment-1))==0); 178 179 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) 180 ; 181 182 KdPrint(("%s: %d*%d=%d bytes, order %d", 183 f->tag, f->count, f->size, f->count*f->size, order)); 184 HPT_ASSERT(f->alignment<=PAGE_SIZE); 185 186 for (i=0; i<f->count;) { 187 p = (void **)__get_free_pages(order); 188 if (!p) return -1; 189 for (j = size/f->size; j && i<f->count; i++,j--) { 190 *p = f->head; 191 *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); 192 f->head = p; 193 p = (void **)((unsigned long)p + f->size); 194 } 195 } 196 } 197 198 HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); 199 200 for (i=0; i<os_max_cache_pages; i++) { 201 p = (void **)__get_free_pages(0); 202 if (!p) return -1; 203 HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0); 204 dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p)); 205 } 206 207 return 0; 208 } 209 210 static void hpt_free_mem(PVBUS_EXT vbus_ext) 211 { 212 struct freelist *f; 213 void *p; 214 int i; 215 BUS_ADDRESS bus; 216 217 for (f=vbus_ext->freelist_head; f; f=f->next) { 218 #if DBG 219 if (f->count!=f->reserved_count) { 220 KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 221 } 222 #endif 223 while ((p=freelist_get(f))) 224 free(p, M_DEVBUF); 225 } 226 227 for (i=0; i<os_max_cache_pages; i++) { 228 p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus); 229 HPT_ASSERT(p); 230 free_pages(p, 0); 231 } 232 233 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 234 int order, size; 235 #if DBG 236 if (f->count!=f->reserved_count) { 237 KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 238 } 239 #endif 240 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ; 241 242 while ((p=freelist_get_dma(f, &bus))) { 243 if (order) 244 free_pages(p, order); 245 else { 246 /* can't free immediately since other blocks in this page may still be in the list */ 247 if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) 248 dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); 249 } 250 } 251 } 252 253 while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) 254 free_pages(p, 0); 255 } 256 257 static int hpt_init_vbus(PVBUS_EXT vbus_ext) 258 { 259 PHBA hba; 260 261 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 262 if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { 263 KdPrint(("fail to initialize %p", hba)); 264 return -1; 265 } 266 267 ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); 268 return 0; 269 } 270 271 static void hpt_flush_done(PCOMMAND pCmd) 272 { 273 PVDEV vd = pCmd->target; 274 275 if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { 276 vd = vd->u.array.transform->target; 277 HPT_ASSERT(vd); 278 pCmd->target = vd; 279 pCmd->Result = RETURN_PENDING; 280 vdev_queue_cmd(pCmd); 281 return; 282 } 283 284 *(int *)pCmd->priv = 1; 285 wakeup(pCmd); 286 } 287 288 /* 289 * flush a vdev (without retry). 290 */ 291 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) 292 { 293 PCOMMAND pCmd; 294 int result = 0, done; 295 HPT_UINT count; 296 297 KdPrint(("flusing dev %p", vd)); 298 299 hpt_assert_vbus_locked(vbus_ext); 300 301 if (mIsArray(vd->type) && vd->u.array.transform) 302 count = max(vd->u.array.transform->source->cmds_per_request, 303 vd->u.array.transform->target->cmds_per_request); 304 else 305 count = vd->cmds_per_request; 306 307 pCmd = ldm_alloc_cmds(vd->vbus, count); 308 309 if (!pCmd) { 310 return -1; 311 } 312 313 pCmd->type = CMD_TYPE_FLUSH; 314 pCmd->flags.hard_flush = 1; 315 pCmd->target = vd; 316 pCmd->done = hpt_flush_done; 317 done = 0; 318 pCmd->priv = &done; 319 320 ldm_queue_cmd(pCmd); 321 322 if (!done) { 323 while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { 324 ldm_reset_vbus(vd->vbus); 325 } 326 } 327 328 KdPrint(("flush result %d", pCmd->Result)); 329 330 if (pCmd->Result!=RETURN_SUCCESS) 331 result = -1; 332 333 ldm_free_cmds(pCmd); 334 335 return result; 336 } 337 338 static void hpt_stop_tasks(PVBUS_EXT vbus_ext); 339 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) 340 { 341 PVBUS vbus = (PVBUS)vbus_ext->vbus; 342 PHBA hba; 343 int i; 344 345 KdPrint(("hpt_shutdown_vbus")); 346 347 /* stop all ctl tasks and disable the worker taskqueue */ 348 hpt_stop_tasks(vbus_ext); 349 hpt_lock_vbus(vbus_ext); 350 vbus_ext->worker.ta_context = 0; 351 352 /* flush devices */ 353 for (i=0; i<osm_max_targets; i++) { 354 PVDEV vd = ldm_find_target(vbus, i); 355 if (vd) { 356 /* retry once */ 357 if (hpt_flush_vdev(vbus_ext, vd)) 358 hpt_flush_vdev(vbus_ext, vd); 359 } 360 } 361 362 ldm_shutdown(vbus); 363 hpt_unlock_vbus(vbus_ext); 364 365 ldm_release_vbus(vbus); 366 367 for (hba=vbus_ext->hba_list; hba; hba=hba->next) 368 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); 369 370 hpt_free_mem(vbus_ext); 371 372 while ((hba=vbus_ext->hba_list)) { 373 vbus_ext->hba_list = hba->next; 374 free(hba->ldm_adapter.him_handle, M_DEVBUF); 375 } 376 377 callout_drain(&vbus_ext->timer); 378 mtx_destroy(&vbus_ext->lock); 379 free(vbus_ext, M_DEVBUF); 380 KdPrint(("hpt_shutdown_vbus done")); 381 } 382 383 static void __hpt_do_tasks(PVBUS_EXT vbus_ext) 384 { 385 OSM_TASK *tasks; 386 387 tasks = vbus_ext->tasks; 388 vbus_ext->tasks = 0; 389 390 while (tasks) { 391 OSM_TASK *t = tasks; 392 tasks = t->next; 393 t->next = 0; 394 t->func(vbus_ext->vbus, t->data); 395 } 396 } 397 398 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) 399 { 400 if(vbus_ext){ 401 hpt_lock_vbus(vbus_ext); 402 __hpt_do_tasks(vbus_ext); 403 hpt_unlock_vbus(vbus_ext); 404 } 405 } 406 407 static void hpt_action(struct cam_sim *sim, union ccb *ccb); 408 static void hpt_poll(struct cam_sim *sim); 409 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); 410 static void hpt_pci_intr(void *arg); 411 412 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) 413 { 414 POS_CMDEXT p = vbus_ext->cmdext_list; 415 if (p) 416 vbus_ext->cmdext_list = p->next; 417 return p; 418 } 419 420 static __inline void cmdext_put(POS_CMDEXT p) 421 { 422 p->next = p->vbus_ext->cmdext_list; 423 p->vbus_ext->cmdext_list = p; 424 } 425 426 static void hpt_timeout(void *arg) 427 { 428 PCOMMAND pCmd = (PCOMMAND)arg; 429 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 430 431 KdPrint(("pCmd %p timeout", pCmd)); 432 433 ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); 434 } 435 436 static void os_cmddone(PCOMMAND pCmd) 437 { 438 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 439 union ccb *ccb = ext->ccb; 440 HPT_U8 *cdb; 441 442 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 443 cdb = ccb->csio.cdb_io.cdb_ptr; 444 else 445 cdb = ccb->csio.cdb_io.cdb_bytes; 446 447 KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); 448 449 callout_stop(&ext->timeout); 450 switch(cdb[0]) { 451 case 0x85: /*ATA_16*/ 452 case 0xA1: /*ATA_12*/ 453 { 454 PassthroughCmd *passthru = &pCmd->uCmd.Passthrough; 455 HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data; 456 memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data)); 457 458 sense_buffer[0] = 0x72; /* Response Code */ 459 sense_buffer[7] = 14; /* Additional Sense Length */ 460 461 sense_buffer[8] = 0x9; /* ATA Return Descriptor */ 462 sense_buffer[9] = 0xc; /* Additional Descriptor Length */ 463 sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */ 464 sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg; /* Sector Count (7:0) */ 465 sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */ 466 sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */ 467 sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */ 468 469 if ((cdb[0] == 0x85) && (cdb[1] & 0x1)) 470 { 471 sense_buffer[10] = 1; 472 sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */ 473 sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8); /* LBA Low (15:8) */ 474 sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */ 475 sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */ 476 } 477 478 sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */ 479 sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */ 480 KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x", 481 passthru->bCommandReg, 482 passthru->bFeaturesReg, 483 passthru->bLbaLowReg, 484 passthru->bLbaMidReg, 485 passthru->bLbaHighReg, 486 passthru->bDriveHeadReg, 487 passthru->bSectorCountReg)); 488 KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ", 489 pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg, 490 passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg)); 491 } 492 default: 493 break; 494 } 495 496 switch(pCmd->Result) { 497 case RETURN_SUCCESS: 498 ccb->ccb_h.status = CAM_REQ_CMP; 499 break; 500 case RETURN_BAD_DEVICE: 501 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 502 break; 503 case RETURN_DEVICE_BUSY: 504 ccb->ccb_h.status = CAM_BUSY; 505 break; 506 case RETURN_INVALID_REQUEST: 507 ccb->ccb_h.status = CAM_REQ_INVALID; 508 break; 509 case RETURN_SELECTION_TIMEOUT: 510 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 511 break; 512 case RETURN_RETRY: 513 ccb->ccb_h.status = CAM_BUSY; 514 break; 515 default: 516 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 517 break; 518 } 519 520 if (pCmd->flags.data_in) { 521 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); 522 } 523 else if (pCmd->flags.data_out) { 524 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); 525 } 526 527 bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); 528 529 cmdext_put(ext); 530 ldm_free_cmds(pCmd); 531 xpt_done(ccb); 532 } 533 534 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) 535 { 536 /* since we have provided physical sg, nobody will ask us to build physical sg */ 537 HPT_ASSERT(0); 538 return FALSE; 539 } 540 541 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 542 { 543 PCOMMAND pCmd = (PCOMMAND)arg; 544 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 545 PSG psg = pCmd->psg; 546 int idx; 547 548 HPT_ASSERT(pCmd->flags.physical_sg); 549 550 if (error) 551 panic("busdma error"); 552 553 HPT_ASSERT(nsegs<=os_max_sg_descriptors); 554 555 if (nsegs != 0) { 556 for (idx = 0; idx < nsegs; idx++, psg++) { 557 psg->addr.bus = segs[idx].ds_addr; 558 psg->size = segs[idx].ds_len; 559 psg->eot = 0; 560 } 561 psg[-1].eot = 1; 562 563 if (pCmd->flags.data_in) { 564 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, 565 BUS_DMASYNC_PREREAD); 566 } 567 else if (pCmd->flags.data_out) { 568 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, 569 BUS_DMASYNC_PREWRITE); 570 } 571 } 572 573 callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); 574 ldm_queue_cmd(pCmd); 575 } 576 577 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) 578 { 579 PVBUS vbus = (PVBUS)vbus_ext->vbus; 580 PVDEV vd; 581 PCOMMAND pCmd; 582 POS_CMDEXT ext; 583 HPT_U8 *cdb; 584 585 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 586 cdb = ccb->csio.cdb_io.cdb_ptr; 587 else 588 cdb = ccb->csio.cdb_io.cdb_bytes; 589 590 KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", 591 ccb, 592 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 593 *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] 594 )); 595 596 /* ccb->ccb_h.path_id is not our bus id - don't check it */ 597 if (ccb->ccb_h.target_lun != 0 || 598 ccb->ccb_h.target_id >= osm_max_targets || 599 (ccb->ccb_h.flags & CAM_CDB_PHYS)) 600 { 601 ccb->ccb_h.status = CAM_TID_INVALID; 602 xpt_done(ccb); 603 return; 604 } 605 606 vd = ldm_find_target(vbus, ccb->ccb_h.target_id); 607 608 if (!vd) { 609 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 610 xpt_done(ccb); 611 return; 612 } 613 614 switch (cdb[0]) { 615 case TEST_UNIT_READY: 616 case START_STOP_UNIT: 617 case SYNCHRONIZE_CACHE: 618 ccb->ccb_h.status = CAM_REQ_CMP; 619 break; 620 621 case 0x85: /*ATA_16*/ 622 case 0xA1: /*ATA_12*/ 623 { 624 int error; 625 HPT_U8 prot; 626 PassthroughCmd *passthru; 627 628 if (mIsArray(vd->type)) { 629 ccb->ccb_h.status = CAM_REQ_INVALID; 630 break; 631 } 632 633 HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk); 634 635 prot = (cdb[1] & 0x1e) >> 1; 636 637 638 if (prot < 3 || prot > 5) 639 { 640 ccb->ccb_h.status = CAM_REQ_INVALID; 641 break; 642 } 643 644 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); 645 if (!pCmd) { 646 HPT_ASSERT(0); 647 ccb->ccb_h.status = CAM_BUSY; 648 break; 649 } 650 651 passthru = &pCmd->uCmd.Passthrough; 652 if (cdb[0] == 0x85/*ATA_16*/) { 653 if (cdb[1] & 0x1) { 654 passthru->bFeaturesReg = 655 ((HPT_U16)cdb[3] << 8) 656 | cdb[4]; 657 passthru->bSectorCountReg = 658 ((HPT_U16)cdb[5] << 8) | 659 cdb[6]; 660 passthru->bLbaLowReg = 661 ((HPT_U16)cdb[7] << 8) | 662 cdb[8]; 663 passthru->bLbaMidReg = 664 ((HPT_U16)cdb[9] << 8) | 665 cdb[10]; 666 passthru->bLbaHighReg = 667 ((HPT_U16)cdb[11] << 8) | 668 cdb[12]; 669 } else { 670 passthru->bFeaturesReg = cdb[4]; 671 passthru->bSectorCountReg = cdb[6]; 672 passthru->bLbaLowReg = cdb[8]; 673 passthru->bLbaMidReg = cdb[10]; 674 passthru->bLbaHighReg = cdb[12]; 675 } 676 passthru->bDriveHeadReg = cdb[13]; 677 passthru->bCommandReg = cdb[14]; 678 679 } else { /*ATA_12*/ 680 681 passthru->bFeaturesReg = cdb[3]; 682 passthru->bSectorCountReg = cdb[4]; 683 passthru->bLbaLowReg = cdb[5]; 684 passthru->bLbaMidReg = cdb[6]; 685 passthru->bLbaHighReg = cdb[7]; 686 passthru->bDriveHeadReg = cdb[8]; 687 passthru->bCommandReg = cdb[9]; 688 } 689 690 if (cdb[1] & 0xe0) { 691 692 693 if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI || 694 passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT || 695 passthru->bCommandReg == ATA_CMD_WRITE_MULTI || 696 passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT || 697 passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT) 698 ) { 699 goto error; 700 } 701 } 702 703 704 if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER && 705 passthru->bCommandReg == ATA_CMD_SET_FEATURES) { 706 goto error; 707 } 708 709 710 passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE; 711 switch (prot) { 712 default: /*None data*/ 713 break; 714 case 4: /*PIO data in, T_DIR=1 match check*/ 715 if ((cdb[2] & 3) && 716 (cdb[2] & 0x8) == 0) 717 { 718 OsPrint(("PIO data in, T_DIR=1 match check")); 719 goto error; 720 } 721 pCmd->flags.data_in = 1; 722 break; 723 case 5: /*PIO data out, T_DIR=0 match check*/ 724 if ((cdb[2] & 3) && 725 (cdb[2] & 0x8)) 726 { 727 OsPrint(("PIO data out, T_DIR=0 match check")); 728 goto error; 729 } 730 731 pCmd->flags.data_out = 1; 732 break; 733 } 734 pCmd->type = CMD_TYPE_PASSTHROUGH; 735 pCmd->priv = ext = cmdext_get(vbus_ext); 736 HPT_ASSERT(ext); 737 ext->ccb = ccb; 738 pCmd->target = vd; 739 pCmd->done = os_cmddone; 740 pCmd->buildsgl = os_buildsgl; 741 pCmd->psg = ext->psg; 742 743 if(!ccb->csio.dxfer_len) 744 { 745 ldm_queue_cmd(pCmd); 746 return; 747 } 748 pCmd->flags.physical_sg = 1; 749 error = bus_dmamap_load_ccb(vbus_ext->io_dmat, 750 ext->dma_map, ccb, 751 hpt_io_dmamap_callback, pCmd, 752 BUS_DMA_WAITOK 753 ); 754 KdPrint(("bus_dmamap_load return %d", error)); 755 if (error && error!=EINPROGRESS) { 756 os_printk("bus_dmamap_load error %d", error); 757 cmdext_put(ext); 758 ldm_free_cmds(pCmd); 759 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 760 xpt_done(ccb); 761 } 762 return; 763 error: 764 ldm_free_cmds(pCmd); 765 ccb->ccb_h.status = CAM_REQ_INVALID; 766 break; 767 } 768 769 case INQUIRY: 770 { 771 PINQUIRYDATA inquiryData; 772 HIM_DEVICE_CONFIG devconf; 773 HPT_U8 *rbuf; 774 775 memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); 776 inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; 777 778 if (cdb[1] & 1) { 779 rbuf = (HPT_U8 *)inquiryData; 780 switch(cdb[2]) { 781 case 0: 782 rbuf[0] = 0; 783 rbuf[1] = 0; 784 rbuf[2] = 0; 785 rbuf[3] = 3; 786 rbuf[4] = 0; 787 rbuf[5] = 0x80; 788 rbuf[6] = 0x83; 789 ccb->ccb_h.status = CAM_REQ_CMP; 790 break; 791 case 0x80: { 792 rbuf[0] = 0; 793 rbuf[1] = 0x80; 794 rbuf[2] = 0; 795 if (vd->type == VD_RAW) { 796 rbuf[3] = 20; 797 vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); 798 memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20); 799 ldm_ide_fixstring(&rbuf[4], 20); 800 } else { 801 rbuf[3] = 1; 802 rbuf[4] = 0x20; 803 } 804 ccb->ccb_h.status = CAM_REQ_CMP; 805 break; 806 } 807 case 0x83: 808 rbuf[0] = 0; 809 rbuf[1] = 0x83; 810 rbuf[2] = 0; 811 rbuf[3] = 12; 812 rbuf[4] = 1; 813 rbuf[5] = 2; 814 rbuf[6] = 0; 815 rbuf[7] = 8; 816 rbuf[8] = 0; 817 rbuf[9] = 0x19; 818 rbuf[10] = 0x3C; 819 rbuf[11] = 0; 820 rbuf[12] = 0; 821 rbuf[13] = 0; 822 rbuf[14] = 0; 823 rbuf[15] = 0; 824 ccb->ccb_h.status = CAM_REQ_CMP; 825 break; 826 default: 827 ccb->ccb_h.status = CAM_REQ_INVALID; 828 break; 829 } 830 831 break; 832 } 833 else if (cdb[2]) { 834 ccb->ccb_h.status = CAM_REQ_INVALID; 835 break; 836 } 837 838 inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/ 839 inquiryData->Versions = 5; /*SPC-3*/ 840 inquiryData->ResponseDataFormat = 2; 841 inquiryData->AdditionalLength = 0x5b; 842 inquiryData->CommandQueue = 1; 843 844 if (ccb->csio.dxfer_len > 63) { 845 rbuf = (HPT_U8 *)inquiryData; 846 rbuf[58] = 0x60; 847 rbuf[59] = 0x3; 848 849 rbuf[64] = 0x3; 850 rbuf[66] = 0x3; 851 rbuf[67] = 0x20; 852 853 } 854 855 if (vd->type == VD_RAW) { 856 vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); 857 858 if ((devconf.pIdentifyData->GeneralConfiguration & 0x80)) 859 inquiryData->RemovableMedia = 1; 860 861 862 memcpy(&inquiryData->VendorId, "ATA ", 8); 863 memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16); 864 ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16); 865 memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4); 866 ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4); 867 if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ') 868 memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4); 869 } else { 870 memcpy(&inquiryData->VendorId, "HPT ", 8); 871 snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d ", 872 os_get_vbus_seq(vbus_ext), vd->target_id); 873 inquiryData->ProductId[15] = ' '; 874 memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); 875 } 876 877 ccb->ccb_h.status = CAM_REQ_CMP; 878 break; 879 } 880 case READ_CAPACITY: 881 { 882 HPT_U8 *rbuf = ccb->csio.data_ptr; 883 HPT_U32 cap; 884 HPT_U8 sector_size_shift = 0; 885 HPT_U64 new_cap; 886 HPT_U32 sector_size = 0; 887 888 if (mIsArray(vd->type)) 889 sector_size_shift = vd->u.array.sector_size_shift; 890 else{ 891 if(vd->type == VD_RAW){ 892 sector_size = vd->u.raw.logical_sector_size; 893 } 894 895 switch (sector_size) { 896 case 0x1000: 897 KdPrint(("set 4k setctor size in READ_CAPACITY")); 898 sector_size_shift = 3; 899 break; 900 default: 901 break; 902 } 903 } 904 new_cap = vd->capacity >> sector_size_shift; 905 906 if (new_cap > 0xfffffffful) 907 cap = 0xffffffff; 908 else 909 cap = new_cap - 1; 910 911 rbuf[0] = (HPT_U8)(cap>>24); 912 rbuf[1] = (HPT_U8)(cap>>16); 913 rbuf[2] = (HPT_U8)(cap>>8); 914 rbuf[3] = (HPT_U8)cap; 915 rbuf[4] = 0; 916 rbuf[5] = 0; 917 rbuf[6] = 2 << sector_size_shift; 918 rbuf[7] = 0; 919 920 ccb->ccb_h.status = CAM_REQ_CMP; 921 break; 922 } 923 924 case REPORT_LUNS: 925 { 926 HPT_U8 *rbuf = ccb->csio.data_ptr; 927 memset(rbuf, 0, 16); 928 rbuf[3] = 8; 929 ccb->ccb_h.status = CAM_REQ_CMP; 930 break; 931 } 932 case SERVICE_ACTION_IN: 933 { 934 HPT_U8 *rbuf = ccb->csio.data_ptr; 935 HPT_U64 cap = 0; 936 HPT_U8 sector_size_shift = 0; 937 HPT_U32 sector_size = 0; 938 939 if(mIsArray(vd->type)) 940 sector_size_shift = vd->u.array.sector_size_shift; 941 else{ 942 if(vd->type == VD_RAW){ 943 sector_size = vd->u.raw.logical_sector_size; 944 } 945 946 switch (sector_size) { 947 case 0x1000: 948 KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); 949 sector_size_shift = 3; 950 break; 951 default: 952 break; 953 } 954 } 955 cap = (vd->capacity >> sector_size_shift) - 1; 956 957 rbuf[0] = (HPT_U8)(cap>>56); 958 rbuf[1] = (HPT_U8)(cap>>48); 959 rbuf[2] = (HPT_U8)(cap>>40); 960 rbuf[3] = (HPT_U8)(cap>>32); 961 rbuf[4] = (HPT_U8)(cap>>24); 962 rbuf[5] = (HPT_U8)(cap>>16); 963 rbuf[6] = (HPT_U8)(cap>>8); 964 rbuf[7] = (HPT_U8)cap; 965 rbuf[8] = 0; 966 rbuf[9] = 0; 967 rbuf[10] = 2 << sector_size_shift; 968 rbuf[11] = 0; 969 970 if(!mIsArray(vd->type)){ 971 rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector; 972 rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f); 973 rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned); 974 } 975 976 ccb->ccb_h.status = CAM_REQ_CMP; 977 break; 978 } 979 980 case READ_6: 981 case READ_10: 982 case READ_16: 983 case WRITE_6: 984 case WRITE_10: 985 case WRITE_16: 986 case 0x13: 987 case 0x2f: 988 case 0x8f: /* VERIFY_16 */ 989 { 990 int error; 991 HPT_U8 sector_size_shift = 0; 992 HPT_U32 sector_size = 0; 993 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); 994 if(!pCmd){ 995 KdPrint(("Failed to allocate command!")); 996 ccb->ccb_h.status = CAM_BUSY; 997 break; 998 } 999 1000 switch (cdb[0]) { 1001 case READ_6: 1002 case WRITE_6: 1003 case 0x13: 1004 pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; 1005 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; 1006 break; 1007 case READ_16: 1008 case WRITE_16: 1009 case 0x8f: /* VERIFY_16 */ 1010 { 1011 HPT_U64 block = 1012 ((HPT_U64)cdb[2]<<56) | 1013 ((HPT_U64)cdb[3]<<48) | 1014 ((HPT_U64)cdb[4]<<40) | 1015 ((HPT_U64)cdb[5]<<32) | 1016 ((HPT_U64)cdb[6]<<24) | 1017 ((HPT_U64)cdb[7]<<16) | 1018 ((HPT_U64)cdb[8]<<8) | 1019 ((HPT_U64)cdb[9]); 1020 pCmd->uCmd.Ide.Lba = block; 1021 pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); 1022 break; 1023 } 1024 1025 default: 1026 pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); 1027 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); 1028 break; 1029 } 1030 1031 if(mIsArray(vd->type)) { 1032 sector_size_shift = vd->u.array.sector_size_shift; 1033 } 1034 else{ 1035 if(vd->type == VD_RAW){ 1036 sector_size = vd->u.raw.logical_sector_size; 1037 } 1038 1039 switch (sector_size) { 1040 case 0x1000: 1041 KdPrint(("<8>resize sector size from 4k to 512")); 1042 sector_size_shift = 3; 1043 break; 1044 default: 1045 break; 1046 } 1047 } 1048 pCmd->uCmd.Ide.Lba <<= sector_size_shift; 1049 pCmd->uCmd.Ide.nSectors <<= sector_size_shift; 1050 1051 1052 switch (cdb[0]) { 1053 case READ_6: 1054 case READ_10: 1055 case READ_16: 1056 pCmd->flags.data_in = 1; 1057 break; 1058 case WRITE_6: 1059 case WRITE_10: 1060 case WRITE_16: 1061 pCmd->flags.data_out = 1; 1062 break; 1063 } 1064 pCmd->priv = ext = cmdext_get(vbus_ext); 1065 HPT_ASSERT(ext); 1066 ext->ccb = ccb; 1067 pCmd->target = vd; 1068 pCmd->done = os_cmddone; 1069 pCmd->buildsgl = os_buildsgl; 1070 pCmd->psg = ext->psg; 1071 pCmd->flags.physical_sg = 1; 1072 error = bus_dmamap_load_ccb(vbus_ext->io_dmat, 1073 ext->dma_map, ccb, 1074 hpt_io_dmamap_callback, pCmd, 1075 BUS_DMA_WAITOK 1076 ); 1077 KdPrint(("bus_dmamap_load return %d", error)); 1078 if (error && error!=EINPROGRESS) { 1079 os_printk("bus_dmamap_load error %d", error); 1080 cmdext_put(ext); 1081 ldm_free_cmds(pCmd); 1082 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1083 xpt_done(ccb); 1084 } 1085 return; 1086 } 1087 1088 default: 1089 ccb->ccb_h.status = CAM_REQ_INVALID; 1090 break; 1091 } 1092 1093 xpt_done(ccb); 1094 return; 1095 } 1096 1097 static void hpt_action(struct cam_sim *sim, union ccb *ccb) 1098 { 1099 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); 1100 1101 KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); 1102 1103 hpt_assert_vbus_locked(vbus_ext); 1104 switch (ccb->ccb_h.func_code) { 1105 1106 case XPT_SCSI_IO: 1107 hpt_scsi_io(vbus_ext, ccb); 1108 return; 1109 1110 case XPT_RESET_BUS: 1111 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 1112 break; 1113 1114 case XPT_GET_TRAN_SETTINGS: 1115 case XPT_SET_TRAN_SETTINGS: 1116 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1117 break; 1118 1119 case XPT_CALC_GEOMETRY: 1120 ccb->ccg.heads = 255; 1121 ccb->ccg.secs_per_track = 63; 1122 ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); 1123 ccb->ccb_h.status = CAM_REQ_CMP; 1124 break; 1125 1126 case XPT_PATH_INQ: 1127 { 1128 struct ccb_pathinq *cpi = &ccb->cpi; 1129 1130 cpi->version_num = 1; 1131 cpi->hba_inquiry = PI_SDTR_ABLE; 1132 cpi->target_sprt = 0; 1133 cpi->hba_misc = PIM_NOBUSRESET; 1134 cpi->hba_eng_cnt = 0; 1135 cpi->max_target = osm_max_targets; 1136 cpi->max_lun = 0; 1137 cpi->unit_number = cam_sim_unit(sim); 1138 cpi->bus_id = cam_sim_bus(sim); 1139 cpi->initiator_id = osm_max_targets; 1140 cpi->base_transfer_speed = 3300; 1141 1142 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1143 strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); 1144 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1145 cpi->transport = XPORT_SPI; 1146 cpi->transport_version = 2; 1147 cpi->protocol = PROTO_SCSI; 1148 cpi->protocol_version = SCSI_REV_2; 1149 cpi->ccb_h.status = CAM_REQ_CMP; 1150 break; 1151 } 1152 1153 default: 1154 ccb->ccb_h.status = CAM_REQ_INVALID; 1155 break; 1156 } 1157 1158 xpt_done(ccb); 1159 return; 1160 } 1161 1162 static void hpt_pci_intr(void *arg) 1163 { 1164 PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; 1165 hpt_lock_vbus(vbus_ext); 1166 ldm_intr((PVBUS)vbus_ext->vbus); 1167 hpt_unlock_vbus(vbus_ext); 1168 } 1169 1170 static void hpt_poll(struct cam_sim *sim) 1171 { 1172 PVBUS_EXT vbus_ext = cam_sim_softc(sim); 1173 hpt_assert_vbus_locked(vbus_ext); 1174 ldm_intr((PVBUS)vbus_ext->vbus); 1175 } 1176 1177 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) 1178 { 1179 KdPrint(("hpt_async")); 1180 } 1181 1182 static int hpt_shutdown(device_t dev) 1183 { 1184 KdPrint(("hpt_shutdown(dev=%p)", dev)); 1185 return 0; 1186 } 1187 1188 static int hpt_detach(device_t dev) 1189 { 1190 /* we don't allow the driver to be unloaded. */ 1191 return EBUSY; 1192 } 1193 1194 static void hpt_ioctl_done(struct _IOCTL_ARG *arg) 1195 { 1196 arg->ioctl_cmnd = 0; 1197 wakeup(arg); 1198 } 1199 1200 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) 1201 { 1202 ioctl_args->result = -1; 1203 ioctl_args->done = hpt_ioctl_done; 1204 ioctl_args->ioctl_cmnd = (void *)1; 1205 1206 hpt_lock_vbus(vbus_ext); 1207 ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); 1208 1209 while (ioctl_args->ioctl_cmnd) { 1210 if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) 1211 break; 1212 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 1213 __hpt_do_tasks(vbus_ext); 1214 } 1215 1216 /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ 1217 1218 hpt_unlock_vbus(vbus_ext); 1219 } 1220 1221 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) 1222 { 1223 PVBUS vbus; 1224 PVBUS_EXT vbus_ext; 1225 1226 ldm_for_each_vbus(vbus, vbus_ext) { 1227 __hpt_do_ioctl(vbus_ext, ioctl_args); 1228 if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) 1229 return; 1230 } 1231 } 1232 1233 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ 1234 IOCTL_ARG arg;\ 1235 arg.dwIoControlCode = code;\ 1236 arg.lpInBuffer = inbuf;\ 1237 arg.lpOutBuffer = outbuf;\ 1238 arg.nInBufferSize = insize;\ 1239 arg.nOutBufferSize = outsize;\ 1240 arg.lpBytesReturned = 0;\ 1241 hpt_do_ioctl(&arg);\ 1242 arg.result;\ 1243 }) 1244 1245 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) 1246 1247 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) 1248 { 1249 int i; 1250 HPT_U32 count = nMaxCount-1; 1251 1252 if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, 1253 &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) 1254 return -1; 1255 1256 nMaxCount = (int)pIds[0]; 1257 for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1]; 1258 return nMaxCount; 1259 } 1260 1261 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo) 1262 { 1263 return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3, 1264 &id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3)); 1265 } 1266 1267 /* not belong to this file logically, but we want to use ioctl interface */ 1268 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id) 1269 { 1270 LOGICAL_DEVICE_INFO_V3 devinfo; 1271 int i, result; 1272 DEVICEID param[2] = { id, 0 }; 1273 1274 if (hpt_get_device_info_v3(id, &devinfo)) 1275 return -1; 1276 1277 if (devinfo.Type!=LDT_ARRAY) 1278 return -1; 1279 1280 if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING) 1281 param[1] = AS_REBUILD_ABORT; 1282 else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING) 1283 param[1] = AS_VERIFY_ABORT; 1284 else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING) 1285 param[1] = AS_INITIALIZE_ABORT; 1286 else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING) 1287 param[1] = AS_TRANSFORM_ABORT; 1288 else 1289 return -1; 1290 1291 KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1])); 1292 result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE, 1293 param, sizeof(param), 0, 0); 1294 1295 for (i=0; i<devinfo.u.array.nDisk; i++) 1296 if (DEVICEID_VALID(devinfo.u.array.Members[i])) 1297 __hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]); 1298 1299 return result; 1300 } 1301 1302 static void hpt_stop_tasks(PVBUS_EXT vbus_ext) 1303 { 1304 DEVICEID ids[32]; 1305 int i, count; 1306 1307 count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0])); 1308 1309 for (i=0; i<count; i++) 1310 __hpt_stop_tasks(vbus_ext, ids[i]); 1311 } 1312 1313 static d_open_t hpt_open; 1314 static d_close_t hpt_close; 1315 static d_ioctl_t hpt_ioctl; 1316 static int hpt_rescan_bus(void); 1317 1318 static struct cdevsw hpt_cdevsw = { 1319 .d_open = hpt_open, 1320 .d_close = hpt_close, 1321 .d_ioctl = hpt_ioctl, 1322 .d_name = driver_name, 1323 .d_version = D_VERSION, 1324 }; 1325 1326 static struct intr_config_hook hpt_ich; 1327 1328 /* 1329 * hpt_final_init will be called after all hpt_attach. 1330 */ 1331 static void hpt_final_init(void *dummy) 1332 { 1333 int i,unit_number=0; 1334 PVBUS_EXT vbus_ext; 1335 PVBUS vbus; 1336 PHBA hba; 1337 1338 /* Clear the config hook */ 1339 config_intrhook_disestablish(&hpt_ich); 1340 1341 /* allocate memory */ 1342 i = 0; 1343 ldm_for_each_vbus(vbus, vbus_ext) { 1344 if (hpt_alloc_mem(vbus_ext)) { 1345 os_printk("out of memory"); 1346 return; 1347 } 1348 i++; 1349 } 1350 1351 if (!i) { 1352 if (bootverbose) 1353 os_printk("no controller detected."); 1354 return; 1355 } 1356 1357 /* initializing hardware */ 1358 ldm_for_each_vbus(vbus, vbus_ext) { 1359 /* make timer available here */ 1360 mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); 1361 callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); 1362 if (hpt_init_vbus(vbus_ext)) { 1363 os_printk("fail to initialize hardware"); 1364 break; /* FIXME */ 1365 } 1366 } 1367 1368 /* register CAM interface */ 1369 ldm_for_each_vbus(vbus, vbus_ext) { 1370 struct cam_devq *devq; 1371 struct ccb_setasync ccb; 1372 1373 if (bus_dma_tag_create(NULL,/* parent */ 1374 4, /* alignment */ 1375 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ 1376 BUS_SPACE_MAXADDR, /* lowaddr */ 1377 BUS_SPACE_MAXADDR, /* highaddr */ 1378 NULL, NULL, /* filter, filterarg */ 1379 PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ 1380 os_max_sg_descriptors, /* nsegments */ 1381 0x10000, /* maxsegsize */ 1382 BUS_DMA_WAITOK, /* flags */ 1383 busdma_lock_mutex, /* lockfunc */ 1384 &vbus_ext->lock, /* lockfuncarg */ 1385 &vbus_ext->io_dmat /* tag */)) 1386 { 1387 return ; 1388 } 1389 1390 for (i=0; i<os_max_queue_comm; i++) { 1391 POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK); 1392 if (!ext) { 1393 os_printk("Can't alloc cmdext(%d)", i); 1394 return ; 1395 } 1396 ext->vbus_ext = vbus_ext; 1397 ext->next = vbus_ext->cmdext_list; 1398 vbus_ext->cmdext_list = ext; 1399 1400 if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { 1401 os_printk("Can't create dma map(%d)", i); 1402 return ; 1403 } 1404 callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); 1405 } 1406 1407 if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { 1408 os_printk("cam_simq_alloc failed"); 1409 return ; 1410 } 1411 1412 hpt_lock_vbus(vbus_ext); 1413 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, 1414 vbus_ext, unit_number, &vbus_ext->lock, 1415 os_max_queue_comm, /*tagged*/8, devq); 1416 unit_number++; 1417 if (!vbus_ext->sim) { 1418 os_printk("cam_sim_alloc failed"); 1419 cam_simq_free(devq); 1420 hpt_unlock_vbus(vbus_ext); 1421 return ; 1422 } 1423 1424 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { 1425 os_printk("xpt_bus_register failed"); 1426 cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); 1427 vbus_ext->sim = NULL; 1428 return ; 1429 } 1430 1431 if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, 1432 cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, 1433 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1434 { 1435 os_printk("xpt_create_path failed"); 1436 xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); 1437 cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); 1438 hpt_unlock_vbus(vbus_ext); 1439 vbus_ext->sim = NULL; 1440 return ; 1441 } 1442 hpt_unlock_vbus(vbus_ext); 1443 1444 memset(&ccb, 0, sizeof(ccb)); 1445 xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); 1446 ccb.ccb_h.func_code = XPT_SASYNC_CB; 1447 ccb.event_enable = AC_LOST_DEVICE; 1448 ccb.callback = hpt_async; 1449 ccb.callback_arg = vbus_ext; 1450 xpt_action((union ccb *)&ccb); 1451 1452 for (hba = vbus_ext->hba_list; hba; hba = hba->next) { 1453 int rid = 0; 1454 if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, 1455 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) 1456 { 1457 os_printk("can't allocate interrupt"); 1458 return ; 1459 } 1460 1461 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, 1462 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) 1463 { 1464 os_printk("can't set up interrupt"); 1465 return ; 1466 } 1467 hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); 1468 1469 } 1470 1471 vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1472 hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); 1473 if (!vbus_ext->shutdown_eh) 1474 os_printk("Shutdown event registration failed"); 1475 } 1476 1477 ldm_for_each_vbus(vbus, vbus_ext) { 1478 TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); 1479 if (vbus_ext->tasks) 1480 TASK_ENQUEUE(&vbus_ext->worker); 1481 } 1482 1483 make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, 1484 S_IRUSR | S_IWUSR, "%s", driver_name); 1485 } 1486 1487 #if defined(KLD_MODULE) 1488 1489 typedef struct driverlink *driverlink_t; 1490 struct driverlink { 1491 kobj_class_t driver; 1492 TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ 1493 }; 1494 1495 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; 1496 1497 struct devclass { 1498 TAILQ_ENTRY(devclass) link; 1499 devclass_t parent; /* parent in devclass hierarchy */ 1500 driver_list_t drivers; /* bus devclasses store drivers for bus */ 1501 char *name; 1502 device_t *devices; /* array of devices indexed by unit */ 1503 int maxunit; /* size of devices array */ 1504 }; 1505 1506 static void override_kernel_driver(void) 1507 { 1508 driverlink_t dl, dlfirst; 1509 driver_t *tmpdriver; 1510 devclass_t dc = devclass_find("pci"); 1511 1512 if (dc){ 1513 dlfirst = TAILQ_FIRST(&dc->drivers); 1514 for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { 1515 if(strcmp(dl->driver->name, driver_name) == 0) { 1516 tmpdriver=dl->driver; 1517 dl->driver=dlfirst->driver; 1518 dlfirst->driver=tmpdriver; 1519 break; 1520 } 1521 } 1522 } 1523 } 1524 1525 #else 1526 #define override_kernel_driver() 1527 #endif 1528 1529 static void hpt_init(void *dummy) 1530 { 1531 if (bootverbose) 1532 os_printk("%s %s", driver_name_long, driver_ver); 1533 1534 override_kernel_driver(); 1535 init_config(); 1536 1537 hpt_ich.ich_func = hpt_final_init; 1538 hpt_ich.ich_arg = NULL; 1539 if (config_intrhook_establish(&hpt_ich) != 0) { 1540 printf("%s: cannot establish configuration hook\n", 1541 driver_name_long); 1542 } 1543 1544 } 1545 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); 1546 1547 /* 1548 * CAM driver interface 1549 */ 1550 static device_method_t driver_methods[] = { 1551 /* Device interface */ 1552 DEVMETHOD(device_probe, hpt_probe), 1553 DEVMETHOD(device_attach, hpt_attach), 1554 DEVMETHOD(device_detach, hpt_detach), 1555 DEVMETHOD(device_shutdown, hpt_shutdown), 1556 { 0, 0 } 1557 }; 1558 1559 static driver_t hpt_pci_driver = { 1560 driver_name, 1561 driver_methods, 1562 sizeof(HBA) 1563 }; 1564 1565 #ifndef TARGETNAME 1566 #error "no TARGETNAME found" 1567 #endif 1568 1569 /* use this to make TARGETNAME be expanded */ 1570 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5) 1571 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) 1572 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) 1573 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0); 1574 __MODULE_VERSION(TARGETNAME, 1); 1575 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); 1576 1577 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) 1578 { 1579 return 0; 1580 } 1581 1582 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) 1583 { 1584 return 0; 1585 } 1586 1587 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 1588 { 1589 PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; 1590 IOCTL_ARG ioctl_args; 1591 HPT_U32 bytesReturned = 0; 1592 1593 switch (cmd){ 1594 case HPT_DO_IOCONTROL: 1595 { 1596 if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { 1597 KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", 1598 piop->dwIoControlCode, 1599 piop->lpInBuffer, 1600 piop->nInBufferSize, 1601 piop->lpOutBuffer, 1602 piop->nOutBufferSize)); 1603 1604 memset(&ioctl_args, 0, sizeof(ioctl_args)); 1605 1606 ioctl_args.dwIoControlCode = piop->dwIoControlCode; 1607 ioctl_args.nInBufferSize = piop->nInBufferSize; 1608 ioctl_args.nOutBufferSize = piop->nOutBufferSize; 1609 ioctl_args.lpBytesReturned = &bytesReturned; 1610 1611 if (ioctl_args.nInBufferSize) { 1612 ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); 1613 if (!ioctl_args.lpInBuffer) 1614 goto invalid; 1615 if (copyin((void*)piop->lpInBuffer, 1616 ioctl_args.lpInBuffer, piop->nInBufferSize)) 1617 goto invalid; 1618 } 1619 1620 if (ioctl_args.nOutBufferSize) { 1621 ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); 1622 if (!ioctl_args.lpOutBuffer) 1623 goto invalid; 1624 } 1625 1626 hpt_do_ioctl(&ioctl_args); 1627 1628 if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { 1629 if (piop->nOutBufferSize) { 1630 if (copyout(ioctl_args.lpOutBuffer, 1631 (void*)piop->lpOutBuffer, piop->nOutBufferSize)) 1632 goto invalid; 1633 } 1634 if (piop->lpBytesReturned) { 1635 if (copyout(&bytesReturned, 1636 (void*)piop->lpBytesReturned, sizeof(HPT_U32))) 1637 goto invalid; 1638 } 1639 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1640 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1641 return 0; 1642 } 1643 invalid: 1644 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1645 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1646 return EFAULT; 1647 } 1648 return EFAULT; 1649 } 1650 1651 case HPT_SCAN_BUS: 1652 { 1653 return hpt_rescan_bus(); 1654 } 1655 default: 1656 KdPrint(("invalid command!")); 1657 return EFAULT; 1658 } 1659 1660 } 1661 1662 static int hpt_rescan_bus(void) 1663 { 1664 union ccb *ccb; 1665 PVBUS vbus; 1666 PVBUS_EXT vbus_ext; 1667 1668 ldm_for_each_vbus(vbus, vbus_ext) { 1669 if ((ccb = xpt_alloc_ccb()) == NULL) 1670 { 1671 return(ENOMEM); 1672 } 1673 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), 1674 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1675 { 1676 xpt_free_ccb(ccb); 1677 return(EIO); 1678 } 1679 xpt_rescan(ccb); 1680 } 1681 return(0); 1682 } 1683