1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 HighPoint Technologies, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <dev/hpt27xx/hpt27xx_config.h> 30 31 #include <dev/hpt27xx/os_bsd.h> 32 #include <dev/hpt27xx/hptintf.h> 33 34 static HIM *hpt_match(device_t dev, int scan) 35 { 36 PCI_ID pci_id; 37 HIM *him; 38 int i; 39 40 for (him = him_list; him; him = him->next) { 41 for (i=0; him->get_supported_device_id(i, &pci_id); i++) { 42 if (scan && him->get_controller_count) 43 him->get_controller_count(&pci_id,0,0); 44 if ((pci_get_vendor(dev) == pci_id.vid) && 45 (pci_get_device(dev) == pci_id.did)){ 46 return (him); 47 } 48 } 49 } 50 return (NULL); 51 } 52 53 static int hpt_probe(device_t dev) 54 { 55 HIM *him; 56 57 him = hpt_match(dev, 0); 58 if (him != NULL) { 59 KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", 60 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) 61 )); 62 device_set_desc(dev, him->name); 63 return (BUS_PROBE_DEFAULT); 64 } 65 66 return (ENXIO); 67 } 68 69 static int hpt_attach(device_t dev) 70 { 71 PHBA hba = (PHBA)device_get_softc(dev); 72 HIM *him; 73 PCI_ID pci_id; 74 HPT_UINT size; 75 PVBUS vbus; 76 PVBUS_EXT vbus_ext; 77 78 if (pci_get_domain(dev) != 0) { 79 device_printf(dev, "does not support PCI domains\n"); 80 return (ENXIO); 81 } 82 83 KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); 84 85 him = hpt_match(dev, 1); 86 hba->ext_type = EXT_TYPE_HBA; 87 hba->ldm_adapter.him = him; 88 pci_enable_busmaster(dev); 89 90 pci_id.vid = pci_get_vendor(dev); 91 pci_id.did = pci_get_device(dev); 92 pci_id.rev = pci_get_revid(dev); 93 pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); 94 95 size = him->get_adapter_size(&pci_id); 96 hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); 97 hba->pcidev = dev; 98 hba->pciaddr.tree = 0; 99 hba->pciaddr.bus = pci_get_bus(dev); 100 hba->pciaddr.device = pci_get_slot(dev); 101 hba->pciaddr.function = pci_get_function(dev); 102 103 if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { 104 free(hba->ldm_adapter.him_handle, M_DEVBUF); 105 return ENXIO; 106 } 107 108 os_printk("adapter at PCI %d:%d:%d, IRQ %d", 109 hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); 110 111 if (!ldm_register_adapter(&hba->ldm_adapter)) { 112 size = ldm_get_vbus_size(); 113 vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); 114 memset(vbus_ext, 0, sizeof(VBUS_EXT)); 115 vbus_ext->ext_type = EXT_TYPE_VBUS; 116 ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); 117 ldm_register_adapter(&hba->ldm_adapter); 118 } 119 120 ldm_for_each_vbus(vbus, vbus_ext) { 121 if (hba->ldm_adapter.vbus==vbus) { 122 hba->vbus_ext = vbus_ext; 123 hba->next = vbus_ext->hba_list; 124 vbus_ext->hba_list = hba; 125 break; 126 } 127 } 128 return 0; 129 } 130 131 /* 132 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, 133 * but there are some problems currently (alignment, etc). 134 */ 135 static __inline void *__get_free_pages(int order) 136 { 137 /* don't use low memory - other devices may get starved */ 138 return contigmalloc(PAGE_SIZE<<order, 139 M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 140 } 141 142 static __inline void free_pages(void *p) 143 { 144 free(p, M_DEVBUF); 145 } 146 147 static int hpt_alloc_mem(PVBUS_EXT vbus_ext) 148 { 149 PHBA hba; 150 struct freelist *f; 151 HPT_UINT i; 152 void **p; 153 154 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 155 hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); 156 157 ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); 158 159 for (f=vbus_ext->freelist_head; f; f=f->next) { 160 KdPrint(("%s: %d*%d=%d bytes", 161 f->tag, f->count, f->size, f->count*f->size)); 162 for (i=0; i<f->count; i++) { 163 p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); 164 *p = f->head; 165 f->head = p; 166 } 167 } 168 169 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 170 int order, size, j; 171 172 HPT_ASSERT((f->size & (f->alignment-1))==0); 173 174 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) 175 ; 176 177 KdPrint(("%s: %d*%d=%d bytes, order %d", 178 f->tag, f->count, f->size, f->count*f->size, order)); 179 HPT_ASSERT(f->alignment<=PAGE_SIZE); 180 181 for (i=0; i<f->count;) { 182 p = (void **)__get_free_pages(order); 183 if (!p) return -1; 184 for (j = size/f->size; j && i<f->count; i++,j--) { 185 *p = f->head; 186 *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); 187 f->head = p; 188 p = (void **)((unsigned long)p + f->size); 189 } 190 } 191 } 192 193 HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); 194 195 for (i=0; i<os_max_cache_pages; i++) { 196 p = (void **)__get_free_pages(0); 197 if (!p) return -1; 198 HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0); 199 dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p)); 200 } 201 202 return 0; 203 } 204 205 static void hpt_free_mem(PVBUS_EXT vbus_ext) 206 { 207 struct freelist *f; 208 void *p; 209 int i; 210 BUS_ADDRESS bus; 211 212 for (f=vbus_ext->freelist_head; f; f=f->next) { 213 #if DBG 214 if (f->count!=f->reserved_count) { 215 KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 216 } 217 #endif 218 while ((p=freelist_get(f))) 219 free(p, M_DEVBUF); 220 } 221 222 for (i=0; i<os_max_cache_pages; i++) { 223 p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus); 224 HPT_ASSERT(p); 225 free_pages(p); 226 } 227 228 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 229 int order, size; 230 #if DBG 231 if (f->count!=f->reserved_count) { 232 KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 233 } 234 #endif 235 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ; 236 237 while ((p=freelist_get_dma(f, &bus))) { 238 if (order) 239 free_pages(p); 240 else { 241 /* can't free immediately since other blocks in this page may still be in the list */ 242 if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) 243 dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); 244 } 245 } 246 } 247 248 while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) 249 free_pages(p); 250 } 251 252 static int hpt_init_vbus(PVBUS_EXT vbus_ext) 253 { 254 PHBA hba; 255 256 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 257 if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { 258 KdPrint(("fail to initialize %p", hba)); 259 return -1; 260 } 261 262 ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); 263 return 0; 264 } 265 266 static void hpt_flush_done(PCOMMAND pCmd) 267 { 268 PVDEV vd = pCmd->target; 269 270 if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { 271 vd = vd->u.array.transform->target; 272 HPT_ASSERT(vd); 273 pCmd->target = vd; 274 pCmd->Result = RETURN_PENDING; 275 vdev_queue_cmd(pCmd); 276 return; 277 } 278 279 *(int *)pCmd->priv = 1; 280 wakeup(pCmd); 281 } 282 283 /* 284 * flush a vdev (without retry). 285 */ 286 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) 287 { 288 PCOMMAND pCmd; 289 int result = 0, done; 290 HPT_UINT count; 291 292 KdPrint(("flusing dev %p", vd)); 293 294 hpt_lock_vbus(vbus_ext); 295 296 if (mIsArray(vd->type) && vd->u.array.transform) 297 count = max(vd->u.array.transform->source->cmds_per_request, 298 vd->u.array.transform->target->cmds_per_request); 299 else 300 count = vd->cmds_per_request; 301 302 pCmd = ldm_alloc_cmds(vd->vbus, count); 303 304 if (!pCmd) { 305 hpt_unlock_vbus(vbus_ext); 306 return -1; 307 } 308 309 pCmd->type = CMD_TYPE_FLUSH; 310 pCmd->flags.hard_flush = 1; 311 pCmd->target = vd; 312 pCmd->done = hpt_flush_done; 313 done = 0; 314 pCmd->priv = &done; 315 316 ldm_queue_cmd(pCmd); 317 318 if (!done) { 319 while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { 320 ldm_reset_vbus(vd->vbus); 321 } 322 } 323 324 KdPrint(("flush result %d", pCmd->Result)); 325 326 if (pCmd->Result!=RETURN_SUCCESS) 327 result = -1; 328 329 ldm_free_cmds(pCmd); 330 331 hpt_unlock_vbus(vbus_ext); 332 333 return result; 334 } 335 336 static void hpt_stop_tasks(PVBUS_EXT vbus_ext); 337 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) 338 { 339 PVBUS vbus = (PVBUS)vbus_ext->vbus; 340 PHBA hba; 341 int i; 342 343 KdPrint(("hpt_shutdown_vbus")); 344 345 /* stop all ctl tasks and disable the worker taskqueue */ 346 hpt_stop_tasks(vbus_ext); 347 vbus_ext->worker.ta_context = 0; 348 349 /* flush devices */ 350 for (i=0; i<osm_max_targets; i++) { 351 PVDEV vd = ldm_find_target(vbus, i); 352 if (vd) { 353 /* retry once */ 354 if (hpt_flush_vdev(vbus_ext, vd)) 355 hpt_flush_vdev(vbus_ext, vd); 356 } 357 } 358 359 hpt_lock_vbus(vbus_ext); 360 ldm_shutdown(vbus); 361 hpt_unlock_vbus(vbus_ext); 362 363 ldm_release_vbus(vbus); 364 365 for (hba=vbus_ext->hba_list; hba; hba=hba->next) 366 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); 367 368 hpt_free_mem(vbus_ext); 369 370 while ((hba=vbus_ext->hba_list)) { 371 vbus_ext->hba_list = hba->next; 372 free(hba->ldm_adapter.him_handle, M_DEVBUF); 373 } 374 callout_drain(&vbus_ext->timer); 375 mtx_destroy(&vbus_ext->lock); 376 free(vbus_ext, M_DEVBUF); 377 KdPrint(("hpt_shutdown_vbus done")); 378 } 379 380 static void __hpt_do_tasks(PVBUS_EXT vbus_ext) 381 { 382 OSM_TASK *tasks; 383 384 tasks = vbus_ext->tasks; 385 vbus_ext->tasks = 0; 386 387 while (tasks) { 388 OSM_TASK *t = tasks; 389 tasks = t->next; 390 t->next = 0; 391 t->func(vbus_ext->vbus, t->data); 392 } 393 } 394 395 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) 396 { 397 if(vbus_ext){ 398 hpt_lock_vbus(vbus_ext); 399 __hpt_do_tasks(vbus_ext); 400 hpt_unlock_vbus(vbus_ext); 401 } 402 } 403 404 static void hpt_action(struct cam_sim *sim, union ccb *ccb); 405 static void hpt_poll(struct cam_sim *sim); 406 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); 407 static void hpt_pci_intr(void *arg); 408 409 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) 410 { 411 POS_CMDEXT p = vbus_ext->cmdext_list; 412 if (p) 413 vbus_ext->cmdext_list = p->next; 414 return p; 415 } 416 417 static __inline void cmdext_put(POS_CMDEXT p) 418 { 419 p->next = p->vbus_ext->cmdext_list; 420 p->vbus_ext->cmdext_list = p; 421 } 422 423 static void hpt_timeout(void *arg) 424 { 425 PCOMMAND pCmd = (PCOMMAND)arg; 426 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 427 428 KdPrint(("pCmd %p timeout", pCmd)); 429 430 ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); 431 } 432 433 static void os_cmddone(PCOMMAND pCmd) 434 { 435 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 436 union ccb *ccb = ext->ccb; 437 438 KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result)); 439 callout_stop(&ext->timeout); 440 switch(pCmd->Result) { 441 case RETURN_SUCCESS: 442 ccb->ccb_h.status = CAM_REQ_CMP; 443 break; 444 case RETURN_BAD_DEVICE: 445 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 446 break; 447 case RETURN_DEVICE_BUSY: 448 ccb->ccb_h.status = CAM_BUSY; 449 break; 450 case RETURN_INVALID_REQUEST: 451 ccb->ccb_h.status = CAM_REQ_INVALID; 452 break; 453 case RETURN_SELECTION_TIMEOUT: 454 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 455 break; 456 case RETURN_RETRY: 457 ccb->ccb_h.status = CAM_BUSY; 458 break; 459 default: 460 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 461 break; 462 } 463 464 if (pCmd->flags.data_in) { 465 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); 466 } 467 else if (pCmd->flags.data_out) { 468 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); 469 } 470 471 bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); 472 473 cmdext_put(ext); 474 ldm_free_cmds(pCmd); 475 xpt_done(ccb); 476 } 477 478 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) 479 { 480 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 481 union ccb *ccb = ext->ccb; 482 483 if(logical) { 484 os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); 485 pSg->size = ccb->csio.dxfer_len; 486 pSg->eot = 1; 487 return TRUE; 488 } 489 /* since we have provided physical sg, nobody will ask us to build physical sg */ 490 HPT_ASSERT(0); 491 return FALSE; 492 } 493 494 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 495 { 496 PCOMMAND pCmd = (PCOMMAND)arg; 497 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 498 PSG psg = pCmd->psg; 499 int idx; 500 501 HPT_ASSERT(pCmd->flags.physical_sg); 502 503 if (error) 504 panic("busdma error"); 505 506 HPT_ASSERT(nsegs<=os_max_sg_descriptors); 507 508 if (nsegs != 0) { 509 for (idx = 0; idx < nsegs; idx++, psg++) { 510 psg->addr.bus = segs[idx].ds_addr; 511 psg->size = segs[idx].ds_len; 512 psg->eot = 0; 513 } 514 psg[-1].eot = 1; 515 516 if (pCmd->flags.data_in) { 517 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, 518 BUS_DMASYNC_PREREAD); 519 } 520 else if (pCmd->flags.data_out) { 521 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, 522 BUS_DMASYNC_PREWRITE); 523 } 524 } 525 callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); 526 ldm_queue_cmd(pCmd); 527 } 528 529 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) 530 { 531 PVBUS vbus = (PVBUS)vbus_ext->vbus; 532 PVDEV vd; 533 PCOMMAND pCmd; 534 POS_CMDEXT ext; 535 HPT_U8 *cdb; 536 int error; 537 538 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 539 cdb = ccb->csio.cdb_io.cdb_ptr; 540 else 541 cdb = ccb->csio.cdb_io.cdb_bytes; 542 543 KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", 544 ccb, 545 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 546 *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] 547 )); 548 549 /* ccb->ccb_h.path_id is not our bus id - don't check it */ 550 if (ccb->ccb_h.target_lun != 0 || 551 ccb->ccb_h.target_id >= osm_max_targets || 552 (ccb->ccb_h.flags & CAM_CDB_PHYS)) 553 { 554 ccb->ccb_h.status = CAM_TID_INVALID; 555 xpt_done(ccb); 556 return; 557 } 558 559 vd = ldm_find_target(vbus, ccb->ccb_h.target_id); 560 561 if (!vd) { 562 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 563 xpt_done(ccb); 564 return; 565 } 566 567 switch (cdb[0]) { 568 case TEST_UNIT_READY: 569 case START_STOP_UNIT: 570 case SYNCHRONIZE_CACHE: 571 ccb->ccb_h.status = CAM_REQ_CMP; 572 break; 573 574 case INQUIRY: 575 { 576 PINQUIRYDATA inquiryData; 577 memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); 578 inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; 579 580 inquiryData->AdditionalLength = 31; 581 inquiryData->CommandQueue = 1; 582 memcpy(&inquiryData->VendorId, "HPT ", 8); 583 memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); 584 585 if (vd->target_id / 10) { 586 inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; 587 inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; 588 } 589 else 590 inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; 591 592 memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); 593 594 ccb->ccb_h.status = CAM_REQ_CMP; 595 } 596 break; 597 598 case READ_CAPACITY: 599 { 600 HPT_U8 *rbuf = ccb->csio.data_ptr; 601 HPT_U32 cap; 602 HPT_U8 sector_size_shift = 0; 603 HPT_U64 new_cap; 604 HPT_U32 sector_size = 0; 605 606 if (mIsArray(vd->type)) 607 sector_size_shift = vd->u.array.sector_size_shift; 608 else{ 609 if(vd->type == VD_RAW){ 610 sector_size = vd->u.raw.logical_sector_size; 611 } 612 613 switch (sector_size) { 614 case 0x1000: 615 KdPrint(("set 4k setctor size in READ_CAPACITY")); 616 sector_size_shift = 3; 617 break; 618 default: 619 break; 620 } 621 } 622 new_cap = vd->capacity >> sector_size_shift; 623 624 if (new_cap > 0xfffffffful) 625 cap = 0xffffffff; 626 else 627 cap = new_cap - 1; 628 629 rbuf[0] = (HPT_U8)(cap>>24); 630 rbuf[1] = (HPT_U8)(cap>>16); 631 rbuf[2] = (HPT_U8)(cap>>8); 632 rbuf[3] = (HPT_U8)cap; 633 rbuf[4] = 0; 634 rbuf[5] = 0; 635 rbuf[6] = 2 << sector_size_shift; 636 rbuf[7] = 0; 637 638 ccb->ccb_h.status = CAM_REQ_CMP; 639 break; 640 } 641 case REPORT_LUNS: 642 { 643 HPT_U8 *rbuf = ccb->csio.data_ptr; 644 memset(rbuf, 0, 16); 645 rbuf[3] = 8; 646 ccb->ccb_h.status = CAM_REQ_CMP; 647 break; 648 } 649 case SERVICE_ACTION_IN: 650 { 651 HPT_U8 *rbuf = ccb->csio.data_ptr; 652 HPT_U64 cap = 0; 653 HPT_U8 sector_size_shift = 0; 654 HPT_U32 sector_size = 0; 655 656 if(mIsArray(vd->type)) 657 sector_size_shift = vd->u.array.sector_size_shift; 658 else{ 659 if(vd->type == VD_RAW){ 660 sector_size = vd->u.raw.logical_sector_size; 661 } 662 663 switch (sector_size) { 664 case 0x1000: 665 KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); 666 sector_size_shift = 3; 667 break; 668 default: 669 break; 670 } 671 } 672 cap = (vd->capacity >> sector_size_shift) - 1; 673 674 rbuf[0] = (HPT_U8)(cap>>56); 675 rbuf[1] = (HPT_U8)(cap>>48); 676 rbuf[2] = (HPT_U8)(cap>>40); 677 rbuf[3] = (HPT_U8)(cap>>32); 678 rbuf[4] = (HPT_U8)(cap>>24); 679 rbuf[5] = (HPT_U8)(cap>>16); 680 rbuf[6] = (HPT_U8)(cap>>8); 681 rbuf[7] = (HPT_U8)cap; 682 rbuf[8] = 0; 683 rbuf[9] = 0; 684 rbuf[10] = 2 << sector_size_shift; 685 rbuf[11] = 0; 686 687 ccb->ccb_h.status = CAM_REQ_CMP; 688 break; 689 } 690 691 case READ_6: 692 case READ_10: 693 case READ_16: 694 case WRITE_6: 695 case WRITE_10: 696 case WRITE_16: 697 case 0x13: 698 case 0x2f: 699 case 0x8f: /* VERIFY_16 */ 700 { 701 HPT_U8 sector_size_shift = 0; 702 HPT_U32 sector_size = 0; 703 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); 704 if(!pCmd){ 705 KdPrint(("Failed to allocate command!")); 706 ccb->ccb_h.status = CAM_BUSY; 707 break; 708 } 709 710 switch (cdb[0]) { 711 case READ_6: 712 case WRITE_6: 713 case 0x13: 714 pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; 715 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; 716 break; 717 case READ_16: 718 case WRITE_16: 719 case 0x8f: /* VERIFY_16 */ 720 { 721 HPT_U64 block = 722 ((HPT_U64)cdb[2]<<56) | 723 ((HPT_U64)cdb[3]<<48) | 724 ((HPT_U64)cdb[4]<<40) | 725 ((HPT_U64)cdb[5]<<32) | 726 ((HPT_U64)cdb[6]<<24) | 727 ((HPT_U64)cdb[7]<<16) | 728 ((HPT_U64)cdb[8]<<8) | 729 ((HPT_U64)cdb[9]); 730 pCmd->uCmd.Ide.Lba = block; 731 pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); 732 break; 733 } 734 735 default: 736 pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); 737 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); 738 break; 739 } 740 741 if(mIsArray(vd->type)) { 742 sector_size_shift = vd->u.array.sector_size_shift; 743 } 744 else{ 745 if(vd->type == VD_RAW){ 746 sector_size = vd->u.raw.logical_sector_size; 747 } 748 749 switch (sector_size) { 750 case 0x1000: 751 KdPrint(("<8>resize sector size from 4k to 512")); 752 sector_size_shift = 3; 753 break; 754 default: 755 break; 756 } 757 } 758 pCmd->uCmd.Ide.Lba <<= sector_size_shift; 759 pCmd->uCmd.Ide.nSectors <<= sector_size_shift; 760 761 762 switch (cdb[0]) { 763 case READ_6: 764 case READ_10: 765 case READ_16: 766 pCmd->flags.data_in = 1; 767 break; 768 case WRITE_6: 769 case WRITE_10: 770 case WRITE_16: 771 pCmd->flags.data_out = 1; 772 break; 773 } 774 pCmd->priv = ext = cmdext_get(vbus_ext); 775 HPT_ASSERT(ext); 776 ext->ccb = ccb; 777 pCmd->target = vd; 778 pCmd->done = os_cmddone; 779 pCmd->buildsgl = os_buildsgl; 780 781 pCmd->psg = ext->psg; 782 pCmd->flags.physical_sg = 1; 783 error = bus_dmamap_load_ccb(vbus_ext->io_dmat, 784 ext->dma_map, ccb, 785 hpt_io_dmamap_callback, pCmd, 786 BUS_DMA_WAITOK 787 ); 788 KdPrint(("<8>bus_dmamap_load return %d", error)); 789 if (error && error!=EINPROGRESS) { 790 os_printk("bus_dmamap_load error %d", error); 791 cmdext_put(ext); 792 ldm_free_cmds(pCmd); 793 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 794 xpt_done(ccb); 795 } 796 return; 797 } 798 799 default: 800 ccb->ccb_h.status = CAM_REQ_INVALID; 801 break; 802 } 803 804 xpt_done(ccb); 805 return; 806 } 807 808 static void hpt_action(struct cam_sim *sim, union ccb *ccb) 809 { 810 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); 811 812 KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); 813 814 hpt_assert_vbus_locked(vbus_ext); 815 switch (ccb->ccb_h.func_code) { 816 817 case XPT_SCSI_IO: 818 hpt_scsi_io(vbus_ext, ccb); 819 return; 820 821 case XPT_RESET_BUS: 822 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 823 break; 824 825 case XPT_GET_TRAN_SETTINGS: 826 case XPT_SET_TRAN_SETTINGS: 827 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 828 break; 829 830 case XPT_CALC_GEOMETRY: 831 ccb->ccg.heads = 255; 832 ccb->ccg.secs_per_track = 63; 833 ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); 834 ccb->ccb_h.status = CAM_REQ_CMP; 835 break; 836 837 case XPT_PATH_INQ: 838 { 839 struct ccb_pathinq *cpi = &ccb->cpi; 840 841 cpi->version_num = 1; 842 cpi->hba_inquiry = PI_SDTR_ABLE; 843 cpi->target_sprt = 0; 844 cpi->hba_misc = PIM_NOBUSRESET; 845 cpi->hba_eng_cnt = 0; 846 cpi->max_target = osm_max_targets; 847 cpi->max_lun = 0; 848 cpi->unit_number = cam_sim_unit(sim); 849 cpi->bus_id = cam_sim_bus(sim); 850 cpi->initiator_id = osm_max_targets; 851 cpi->base_transfer_speed = 3300; 852 853 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 854 strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); 855 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 856 cpi->transport = XPORT_SPI; 857 cpi->transport_version = 2; 858 cpi->protocol = PROTO_SCSI; 859 cpi->protocol_version = SCSI_REV_2; 860 cpi->ccb_h.status = CAM_REQ_CMP; 861 break; 862 } 863 864 default: 865 ccb->ccb_h.status = CAM_REQ_INVALID; 866 break; 867 } 868 869 xpt_done(ccb); 870 return; 871 } 872 873 static void hpt_pci_intr(void *arg) 874 { 875 PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; 876 hpt_lock_vbus(vbus_ext); 877 ldm_intr((PVBUS)vbus_ext->vbus); 878 hpt_unlock_vbus(vbus_ext); 879 } 880 881 static void hpt_poll(struct cam_sim *sim) 882 { 883 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); 884 885 hpt_assert_vbus_locked(vbus_ext); 886 ldm_intr((PVBUS)vbus_ext->vbus); 887 } 888 889 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) 890 { 891 KdPrint(("<8>hpt_async")); 892 } 893 894 static int hpt_shutdown(device_t dev) 895 { 896 KdPrint(("hpt_shutdown(dev=%p)", dev)); 897 return 0; 898 } 899 900 static int hpt_detach(device_t dev) 901 { 902 /* we don't allow the driver to be unloaded. */ 903 return EBUSY; 904 } 905 906 static void hpt_ioctl_done(struct _IOCTL_ARG *arg) 907 { 908 arg->ioctl_cmnd = 0; 909 wakeup(arg); 910 } 911 912 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) 913 { 914 ioctl_args->result = -1; 915 ioctl_args->done = hpt_ioctl_done; 916 ioctl_args->ioctl_cmnd = (void *)1; 917 918 hpt_lock_vbus(vbus_ext); 919 ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); 920 921 while (ioctl_args->ioctl_cmnd) { 922 if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) 923 break; 924 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 925 __hpt_do_tasks(vbus_ext); 926 } 927 928 /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ 929 930 hpt_unlock_vbus(vbus_ext); 931 } 932 933 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) 934 { 935 PVBUS vbus; 936 PVBUS_EXT vbus_ext; 937 938 ldm_for_each_vbus(vbus, vbus_ext) { 939 __hpt_do_ioctl(vbus_ext, ioctl_args); 940 if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) 941 return; 942 } 943 } 944 945 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ 946 IOCTL_ARG arg;\ 947 arg.dwIoControlCode = code;\ 948 arg.lpInBuffer = inbuf;\ 949 arg.lpOutBuffer = outbuf;\ 950 arg.nInBufferSize = insize;\ 951 arg.nOutBufferSize = outsize;\ 952 arg.lpBytesReturned = 0;\ 953 hpt_do_ioctl(&arg);\ 954 arg.result;\ 955 }) 956 957 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) 958 959 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) 960 { 961 int i; 962 HPT_U32 count = nMaxCount-1; 963 964 if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, 965 &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) 966 return -1; 967 968 nMaxCount = (int)pIds[0]; 969 for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1]; 970 return nMaxCount; 971 } 972 973 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo) 974 { 975 return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3, 976 &id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3)); 977 } 978 979 /* not belong to this file logically, but we want to use ioctl interface */ 980 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id) 981 { 982 LOGICAL_DEVICE_INFO_V3 devinfo; 983 int i, result; 984 DEVICEID param[2] = { id, 0 }; 985 986 if (hpt_get_device_info_v3(id, &devinfo)) 987 return -1; 988 989 if (devinfo.Type!=LDT_ARRAY) 990 return -1; 991 992 if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING) 993 param[1] = AS_REBUILD_ABORT; 994 else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING) 995 param[1] = AS_VERIFY_ABORT; 996 else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING) 997 param[1] = AS_INITIALIZE_ABORT; 998 else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING) 999 param[1] = AS_TRANSFORM_ABORT; 1000 else 1001 return -1; 1002 1003 KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1])); 1004 result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE, 1005 param, sizeof(param), 0, 0); 1006 1007 for (i=0; i<devinfo.u.array.nDisk; i++) 1008 if (DEVICEID_VALID(devinfo.u.array.Members[i])) 1009 __hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]); 1010 1011 return result; 1012 } 1013 1014 static void hpt_stop_tasks(PVBUS_EXT vbus_ext) 1015 { 1016 DEVICEID ids[32]; 1017 int i, count; 1018 1019 count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0])); 1020 1021 for (i=0; i<count; i++) 1022 __hpt_stop_tasks(vbus_ext, ids[i]); 1023 } 1024 1025 static d_open_t hpt_open; 1026 static d_close_t hpt_close; 1027 static d_ioctl_t hpt_ioctl; 1028 static int hpt_rescan_bus(void); 1029 1030 static struct cdevsw hpt_cdevsw = { 1031 .d_open = hpt_open, 1032 .d_close = hpt_close, 1033 .d_ioctl = hpt_ioctl, 1034 .d_name = driver_name, 1035 .d_version = D_VERSION, 1036 }; 1037 1038 static struct intr_config_hook hpt_ich; 1039 1040 /* 1041 * hpt_final_init will be called after all hpt_attach. 1042 */ 1043 static void hpt_final_init(void *dummy) 1044 { 1045 int i,unit_number=0; 1046 PVBUS_EXT vbus_ext; 1047 PVBUS vbus; 1048 PHBA hba; 1049 1050 /* Clear the config hook */ 1051 config_intrhook_disestablish(&hpt_ich); 1052 1053 /* allocate memory */ 1054 i = 0; 1055 ldm_for_each_vbus(vbus, vbus_ext) { 1056 if (hpt_alloc_mem(vbus_ext)) { 1057 os_printk("out of memory"); 1058 return; 1059 } 1060 i++; 1061 } 1062 1063 if (!i) { 1064 if (bootverbose) 1065 os_printk("no controller detected."); 1066 return; 1067 } 1068 1069 /* initializing hardware */ 1070 ldm_for_each_vbus(vbus, vbus_ext) { 1071 /* make timer available here */ 1072 mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); 1073 callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); 1074 if (hpt_init_vbus(vbus_ext)) { 1075 os_printk("fail to initialize hardware"); 1076 break; /* FIXME */ 1077 } 1078 } 1079 1080 /* register CAM interface */ 1081 ldm_for_each_vbus(vbus, vbus_ext) { 1082 struct cam_devq *devq; 1083 struct ccb_setasync ccb; 1084 1085 if (bus_dma_tag_create(NULL,/* parent */ 1086 4, /* alignment */ 1087 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ 1088 BUS_SPACE_MAXADDR, /* lowaddr */ 1089 BUS_SPACE_MAXADDR, /* highaddr */ 1090 NULL, NULL, /* filter, filterarg */ 1091 PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ 1092 os_max_sg_descriptors, /* nsegments */ 1093 0x10000, /* maxsegsize */ 1094 BUS_DMA_WAITOK, /* flags */ 1095 busdma_lock_mutex, /* lockfunc */ 1096 &vbus_ext->lock, /* lockfuncarg */ 1097 &vbus_ext->io_dmat /* tag */)) 1098 { 1099 return ; 1100 } 1101 1102 for (i=0; i<os_max_queue_comm; i++) { 1103 POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK); 1104 ext->vbus_ext = vbus_ext; 1105 ext->next = vbus_ext->cmdext_list; 1106 vbus_ext->cmdext_list = ext; 1107 1108 if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { 1109 os_printk("Can't create dma map(%d)", i); 1110 return ; 1111 } 1112 callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); 1113 } 1114 1115 if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { 1116 os_printk("cam_simq_alloc failed"); 1117 return ; 1118 } 1119 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, 1120 vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); 1121 unit_number++; 1122 if (!vbus_ext->sim) { 1123 os_printk("cam_sim_alloc failed"); 1124 cam_simq_free(devq); 1125 return ; 1126 } 1127 1128 hpt_lock_vbus(vbus_ext); 1129 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { 1130 hpt_unlock_vbus(vbus_ext); 1131 os_printk("xpt_bus_register failed"); 1132 cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); 1133 vbus_ext->sim = NULL; 1134 return ; 1135 } 1136 1137 if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, 1138 cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, 1139 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1140 { 1141 hpt_unlock_vbus(vbus_ext); 1142 os_printk("xpt_create_path failed"); 1143 xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); 1144 cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); 1145 vbus_ext->sim = NULL; 1146 return ; 1147 } 1148 1149 memset(&ccb, 0, sizeof(ccb)); 1150 xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); 1151 ccb.ccb_h.func_code = XPT_SASYNC_CB; 1152 ccb.event_enable = AC_LOST_DEVICE; 1153 ccb.callback = hpt_async; 1154 ccb.callback_arg = vbus_ext; 1155 xpt_action((union ccb *)&ccb); 1156 hpt_unlock_vbus(vbus_ext); 1157 1158 for (hba = vbus_ext->hba_list; hba; hba = hba->next) { 1159 int rid = 0; 1160 if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, 1161 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) 1162 { 1163 os_printk("can't allocate interrupt"); 1164 return ; 1165 } 1166 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, 1167 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) 1168 { 1169 os_printk("can't set up interrupt"); 1170 return ; 1171 } 1172 hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); 1173 1174 } 1175 1176 vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1177 hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); 1178 if (!vbus_ext->shutdown_eh) 1179 os_printk("Shutdown event registration failed"); 1180 } 1181 1182 ldm_for_each_vbus(vbus, vbus_ext) { 1183 TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); 1184 if (vbus_ext->tasks) 1185 TASK_ENQUEUE(&vbus_ext->worker); 1186 } 1187 1188 make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, 1189 S_IRUSR | S_IWUSR, "%s", driver_name); 1190 } 1191 1192 #if defined(KLD_MODULE) 1193 1194 typedef struct driverlink *driverlink_t; 1195 struct driverlink { 1196 kobj_class_t driver; 1197 TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ 1198 }; 1199 1200 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; 1201 1202 struct devclass { 1203 TAILQ_ENTRY(devclass) link; 1204 devclass_t parent; /* parent in devclass hierarchy */ 1205 driver_list_t drivers; /* bus devclasses store drivers for bus */ 1206 char *name; 1207 device_t *devices; /* array of devices indexed by unit */ 1208 int maxunit; /* size of devices array */ 1209 }; 1210 1211 static void override_kernel_driver(void) 1212 { 1213 driverlink_t dl, dlfirst; 1214 driver_t *tmpdriver; 1215 devclass_t dc = devclass_find("pci"); 1216 1217 if (dc){ 1218 dlfirst = TAILQ_FIRST(&dc->drivers); 1219 for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { 1220 if(strcmp(dl->driver->name, driver_name) == 0) { 1221 tmpdriver=dl->driver; 1222 dl->driver=dlfirst->driver; 1223 dlfirst->driver=tmpdriver; 1224 break; 1225 } 1226 } 1227 } 1228 } 1229 1230 #else 1231 #define override_kernel_driver() 1232 #endif 1233 1234 static void hpt_init(void *dummy) 1235 { 1236 if (bootverbose) 1237 os_printk("%s %s", driver_name_long, driver_ver); 1238 1239 override_kernel_driver(); 1240 init_config(); 1241 1242 hpt_ich.ich_func = hpt_final_init; 1243 hpt_ich.ich_arg = NULL; 1244 if (config_intrhook_establish(&hpt_ich) != 0) { 1245 printf("%s: cannot establish configuration hook\n", 1246 driver_name_long); 1247 } 1248 1249 } 1250 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); 1251 1252 /* 1253 * CAM driver interface 1254 */ 1255 static device_method_t driver_methods[] = { 1256 /* Device interface */ 1257 DEVMETHOD(device_probe, hpt_probe), 1258 DEVMETHOD(device_attach, hpt_attach), 1259 DEVMETHOD(device_detach, hpt_detach), 1260 DEVMETHOD(device_shutdown, hpt_shutdown), 1261 { 0, 0 } 1262 }; 1263 1264 static driver_t hpt_pci_driver = { 1265 driver_name, 1266 driver_methods, 1267 sizeof(HBA) 1268 }; 1269 1270 #ifndef TARGETNAME 1271 #error "no TARGETNAME found" 1272 #endif 1273 1274 /* use this to make TARGETNAME be expanded */ 1275 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5) 1276 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) 1277 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) 1278 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0); 1279 __MODULE_VERSION(TARGETNAME, 1); 1280 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); 1281 1282 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) 1283 { 1284 return 0; 1285 } 1286 1287 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) 1288 { 1289 return 0; 1290 } 1291 1292 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 1293 { 1294 PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; 1295 IOCTL_ARG ioctl_args; 1296 HPT_U32 bytesReturned = 0; 1297 1298 switch (cmd){ 1299 case HPT_DO_IOCONTROL: 1300 { 1301 if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { 1302 KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n", 1303 piop->dwIoControlCode, 1304 piop->lpInBuffer, 1305 piop->nInBufferSize, 1306 piop->lpOutBuffer, 1307 piop->nOutBufferSize)); 1308 1309 memset(&ioctl_args, 0, sizeof(ioctl_args)); 1310 1311 ioctl_args.dwIoControlCode = piop->dwIoControlCode; 1312 ioctl_args.nInBufferSize = piop->nInBufferSize; 1313 ioctl_args.nOutBufferSize = piop->nOutBufferSize; 1314 ioctl_args.lpBytesReturned = &bytesReturned; 1315 1316 if (ioctl_args.nInBufferSize) { 1317 ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); 1318 if (copyin((void*)piop->lpInBuffer, 1319 ioctl_args.lpInBuffer, piop->nInBufferSize)) 1320 goto invalid; 1321 } 1322 1323 if (ioctl_args.nOutBufferSize) 1324 ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); 1325 1326 hpt_do_ioctl(&ioctl_args); 1327 1328 if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { 1329 if (piop->nOutBufferSize) { 1330 if (copyout(ioctl_args.lpOutBuffer, 1331 (void*)piop->lpOutBuffer, piop->nOutBufferSize)) 1332 goto invalid; 1333 } 1334 if (piop->lpBytesReturned) { 1335 if (copyout(&bytesReturned, 1336 (void*)piop->lpBytesReturned, sizeof(HPT_U32))) 1337 goto invalid; 1338 } 1339 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1340 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1341 return 0; 1342 } 1343 invalid: 1344 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1345 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1346 return EFAULT; 1347 } 1348 return EFAULT; 1349 } 1350 1351 case HPT_SCAN_BUS: 1352 { 1353 return hpt_rescan_bus(); 1354 } 1355 default: 1356 KdPrint(("invalid command!")); 1357 return EFAULT; 1358 } 1359 1360 } 1361 1362 static int hpt_rescan_bus(void) 1363 { 1364 union ccb *ccb; 1365 PVBUS vbus; 1366 PVBUS_EXT vbus_ext; 1367 1368 ldm_for_each_vbus(vbus, vbus_ext) { 1369 if ((ccb = xpt_alloc_ccb()) == NULL) 1370 { 1371 return(ENOMEM); 1372 } 1373 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), 1374 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1375 { 1376 xpt_free_ccb(ccb); 1377 return(EIO); 1378 } 1379 xpt_rescan(ccb); 1380 } 1381 return(0); 1382 } 1383 1384