1 /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */ 2 /*- 3 * HighPoint RAID Driver for FreeBSD 4 * Copyright (C) 2005-2011 HighPoint Technologies, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 #include <dev/hptnr/hptnr_config.h> 31 #include <dev/hptnr/os_bsd.h> 32 #include <dev/hptnr/hptintf.h> 33 34 static HIM *hpt_match(device_t dev) 35 { 36 PCI_ID pci_id; 37 HIM *him; 38 int i; 39 40 for (him = him_list; him; him = him->next) { 41 for (i=0; him->get_supported_device_id(i, &pci_id); i++) { 42 if (him->get_controller_count) 43 him->get_controller_count(&pci_id,0,0); 44 if ((pci_get_vendor(dev) == pci_id.vid) && 45 (pci_get_device(dev) == pci_id.did)){ 46 return (him); 47 } 48 } 49 } 50 51 return (NULL); 52 } 53 54 static int hpt_probe(device_t dev) 55 { 56 HIM *him; 57 58 him = hpt_match(dev); 59 if (him != NULL) { 60 KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", 61 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) 62 )); 63 device_set_desc(dev, him->name); 64 return (BUS_PROBE_DEFAULT); 65 } 66 67 return (ENXIO); 68 } 69 70 static int hpt_attach(device_t dev) 71 { 72 PHBA hba = (PHBA)device_get_softc(dev); 73 HIM *him; 74 PCI_ID pci_id; 75 HPT_UINT size; 76 PVBUS vbus; 77 PVBUS_EXT vbus_ext; 78 79 KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); 80 81 him = hpt_match(dev); 82 hba->ext_type = EXT_TYPE_HBA; 83 hba->ldm_adapter.him = him; 84 85 pci_enable_busmaster(dev); 86 87 pci_id.vid = pci_get_vendor(dev); 88 pci_id.did = pci_get_device(dev); 89 pci_id.rev = pci_get_revid(dev); 90 pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); 91 92 size = him->get_adapter_size(&pci_id); 93 hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); 94 95 hba->pcidev = dev; 96 hba->pciaddr.tree = 0; 97 hba->pciaddr.bus = pci_get_bus(dev); 98 hba->pciaddr.device = pci_get_slot(dev); 99 hba->pciaddr.function = pci_get_function(dev); 100 101 if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { 102 free(hba->ldm_adapter.him_handle, M_DEVBUF); 103 return ENXIO; 104 } 105 106 os_printk("adapter at PCI %d:%d:%d, IRQ %d", 107 hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); 108 109 if (!ldm_register_adapter(&hba->ldm_adapter)) { 110 size = ldm_get_vbus_size(); 111 vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | 112 M_ZERO); 113 vbus_ext->ext_type = EXT_TYPE_VBUS; 114 ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); 115 ldm_register_adapter(&hba->ldm_adapter); 116 } 117 118 ldm_for_each_vbus(vbus, vbus_ext) { 119 if (hba->ldm_adapter.vbus==vbus) { 120 hba->vbus_ext = vbus_ext; 121 hba->next = vbus_ext->hba_list; 122 vbus_ext->hba_list = hba; 123 break; 124 } 125 } 126 return 0; 127 } 128 129 /* 130 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, 131 * but there are some problems currently (alignment, etc). 132 */ 133 static __inline void *__get_free_pages(int order) 134 { 135 /* don't use low memory - other devices may get starved */ 136 return contigmalloc(PAGE_SIZE<<order, 137 M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 138 } 139 140 static __inline void free_pages(void *p, int order) 141 { 142 contigfree(p, PAGE_SIZE<<order, M_DEVBUF); 143 } 144 145 static int hpt_alloc_mem(PVBUS_EXT vbus_ext) 146 { 147 PHBA hba; 148 struct freelist *f; 149 HPT_UINT i; 150 void **p; 151 152 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 153 hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); 154 155 ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); 156 157 for (f=vbus_ext->freelist_head; f; f=f->next) { 158 KdPrint(("%s: %d*%d=%d bytes", 159 f->tag, f->count, f->size, f->count*f->size)); 160 for (i=0; i<f->count; i++) { 161 p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); 162 if (!p) return (ENXIO); 163 *p = f->head; 164 f->head = p; 165 } 166 } 167 168 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 169 int order, size, j; 170 171 HPT_ASSERT((f->size & (f->alignment-1))==0); 172 173 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) 174 ; 175 176 KdPrint(("%s: %d*%d=%d bytes, order %d", 177 f->tag, f->count, f->size, f->count*f->size, order)); 178 HPT_ASSERT(f->alignment<=PAGE_SIZE); 179 180 for (i=0; i<f->count;) { 181 p = (void **)__get_free_pages(order); 182 if (!p) return -1; 183 for (j = size/f->size; j && i<f->count; i++,j--) { 184 *p = f->head; 185 *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); 186 f->head = p; 187 p = (void **)((unsigned long)p + f->size); 188 } 189 } 190 } 191 192 HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); 193 194 for (i=0; i<os_max_cache_pages; i++) { 195 p = (void **)__get_free_pages(0); 196 if (!p) return -1; 197 HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0); 198 dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p)); 199 } 200 201 return 0; 202 } 203 204 static void hpt_free_mem(PVBUS_EXT vbus_ext) 205 { 206 struct freelist *f; 207 void *p; 208 int i; 209 BUS_ADDRESS bus; 210 211 for (f=vbus_ext->freelist_head; f; f=f->next) { 212 #if DBG 213 if (f->count!=f->reserved_count) { 214 KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 215 } 216 #endif 217 while ((p=freelist_get(f))) 218 free(p, M_DEVBUF); 219 } 220 221 for (i=0; i<os_max_cache_pages; i++) { 222 p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus); 223 HPT_ASSERT(p); 224 free_pages(p, 0); 225 } 226 227 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 228 int order, size; 229 #if DBG 230 if (f->count!=f->reserved_count) { 231 KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 232 } 233 #endif 234 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ; 235 236 while ((p=freelist_get_dma(f, &bus))) { 237 if (order) 238 free_pages(p, order); 239 else { 240 /* can't free immediately since other blocks in this page may still be in the list */ 241 if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) 242 dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); 243 } 244 } 245 } 246 247 while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) 248 free_pages(p, 0); 249 } 250 251 static int hpt_init_vbus(PVBUS_EXT vbus_ext) 252 { 253 PHBA hba; 254 255 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 256 if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { 257 KdPrint(("fail to initialize %p", hba)); 258 return -1; 259 } 260 261 ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); 262 return 0; 263 } 264 265 static void hpt_flush_done(PCOMMAND pCmd) 266 { 267 PVDEV vd = pCmd->target; 268 269 if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { 270 vd = vd->u.array.transform->target; 271 HPT_ASSERT(vd); 272 pCmd->target = vd; 273 pCmd->Result = RETURN_PENDING; 274 vdev_queue_cmd(pCmd); 275 return; 276 } 277 278 *(int *)pCmd->priv = 1; 279 wakeup(pCmd); 280 } 281 282 /* 283 * flush a vdev (without retry). 284 */ 285 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) 286 { 287 PCOMMAND pCmd; 288 int result = 0, done; 289 HPT_UINT count; 290 291 KdPrint(("flusing dev %p", vd)); 292 293 hpt_assert_vbus_locked(vbus_ext); 294 295 if (mIsArray(vd->type) && vd->u.array.transform) 296 count = MAX(vd->u.array.transform->source->cmds_per_request, 297 vd->u.array.transform->target->cmds_per_request); 298 else 299 count = vd->cmds_per_request; 300 301 pCmd = ldm_alloc_cmds(vd->vbus, count); 302 303 if (!pCmd) { 304 return -1; 305 } 306 307 pCmd->type = CMD_TYPE_FLUSH; 308 pCmd->flags.hard_flush = 1; 309 pCmd->target = vd; 310 pCmd->done = hpt_flush_done; 311 done = 0; 312 pCmd->priv = &done; 313 314 ldm_queue_cmd(pCmd); 315 316 if (!done) { 317 while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { 318 ldm_reset_vbus(vd->vbus); 319 } 320 } 321 322 KdPrint(("flush result %d", pCmd->Result)); 323 324 if (pCmd->Result!=RETURN_SUCCESS) 325 result = -1; 326 327 ldm_free_cmds(pCmd); 328 329 return result; 330 } 331 332 static void hpt_stop_tasks(PVBUS_EXT vbus_ext); 333 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) 334 { 335 PVBUS vbus = (PVBUS)vbus_ext->vbus; 336 PHBA hba; 337 int i; 338 339 KdPrint(("hpt_shutdown_vbus")); 340 341 /* stop all ctl tasks and disable the worker taskqueue */ 342 hpt_stop_tasks(vbus_ext); 343 hpt_lock_vbus(vbus_ext); 344 vbus_ext->worker.ta_context = 0; 345 346 /* flush devices */ 347 for (i=0; i<osm_max_targets; i++) { 348 PVDEV vd = ldm_find_target(vbus, i); 349 if (vd) { 350 /* retry once */ 351 if (hpt_flush_vdev(vbus_ext, vd)) 352 hpt_flush_vdev(vbus_ext, vd); 353 } 354 } 355 356 ldm_shutdown(vbus); 357 hpt_unlock_vbus(vbus_ext); 358 359 ldm_release_vbus(vbus); 360 361 for (hba=vbus_ext->hba_list; hba; hba=hba->next) 362 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); 363 364 hpt_free_mem(vbus_ext); 365 366 while ((hba=vbus_ext->hba_list)) { 367 vbus_ext->hba_list = hba->next; 368 free(hba->ldm_adapter.him_handle, M_DEVBUF); 369 } 370 371 callout_drain(&vbus_ext->timer); 372 mtx_destroy(&vbus_ext->lock); 373 free(vbus_ext, M_DEVBUF); 374 KdPrint(("hpt_shutdown_vbus done")); 375 } 376 377 static void __hpt_do_tasks(PVBUS_EXT vbus_ext) 378 { 379 OSM_TASK *tasks; 380 381 tasks = vbus_ext->tasks; 382 vbus_ext->tasks = 0; 383 384 while (tasks) { 385 OSM_TASK *t = tasks; 386 tasks = t->next; 387 t->next = 0; 388 t->func(vbus_ext->vbus, t->data); 389 } 390 } 391 392 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) 393 { 394 if(vbus_ext){ 395 hpt_lock_vbus(vbus_ext); 396 __hpt_do_tasks(vbus_ext); 397 hpt_unlock_vbus(vbus_ext); 398 } 399 } 400 401 static void hpt_action(struct cam_sim *sim, union ccb *ccb); 402 static void hpt_poll(struct cam_sim *sim); 403 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); 404 static void hpt_pci_intr(void *arg); 405 406 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) 407 { 408 POS_CMDEXT p = vbus_ext->cmdext_list; 409 if (p) 410 vbus_ext->cmdext_list = p->next; 411 return p; 412 } 413 414 static __inline void cmdext_put(POS_CMDEXT p) 415 { 416 p->next = p->vbus_ext->cmdext_list; 417 p->vbus_ext->cmdext_list = p; 418 } 419 420 static void hpt_timeout(void *arg) 421 { 422 PCOMMAND pCmd = (PCOMMAND)arg; 423 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 424 425 KdPrint(("pCmd %p timeout", pCmd)); 426 427 ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); 428 } 429 430 static void os_cmddone(PCOMMAND pCmd) 431 { 432 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 433 union ccb *ccb = ext->ccb; 434 435 KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); 436 437 callout_stop(&ext->timeout); 438 439 switch(pCmd->Result) { 440 case RETURN_SUCCESS: 441 ccb->ccb_h.status = CAM_REQ_CMP; 442 break; 443 case RETURN_BAD_DEVICE: 444 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 445 break; 446 case RETURN_DEVICE_BUSY: 447 ccb->ccb_h.status = CAM_BUSY; 448 break; 449 case RETURN_INVALID_REQUEST: 450 ccb->ccb_h.status = CAM_REQ_INVALID; 451 break; 452 case RETURN_SELECTION_TIMEOUT: 453 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 454 break; 455 case RETURN_RETRY: 456 ccb->ccb_h.status = CAM_BUSY; 457 break; 458 default: 459 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 460 break; 461 } 462 463 if (pCmd->flags.data_in) { 464 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); 465 } 466 else if (pCmd->flags.data_out) { 467 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); 468 } 469 470 bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); 471 472 cmdext_put(ext); 473 ldm_free_cmds(pCmd); 474 xpt_done(ccb); 475 } 476 477 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) 478 { 479 /* since we have provided physical sg, nobody will ask us to build physical sg */ 480 HPT_ASSERT(0); 481 return FALSE; 482 } 483 484 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 485 { 486 PCOMMAND pCmd = (PCOMMAND)arg; 487 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 488 PSG psg = pCmd->psg; 489 int idx; 490 491 HPT_ASSERT(pCmd->flags.physical_sg); 492 493 if (error) 494 panic("busdma error"); 495 496 HPT_ASSERT(nsegs<=os_max_sg_descriptors); 497 498 if (nsegs != 0) { 499 for (idx = 0; idx < nsegs; idx++, psg++) { 500 psg->addr.bus = segs[idx].ds_addr; 501 psg->size = segs[idx].ds_len; 502 psg->eot = 0; 503 } 504 psg[-1].eot = 1; 505 506 if (pCmd->flags.data_in) { 507 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, 508 BUS_DMASYNC_PREREAD); 509 } 510 else if (pCmd->flags.data_out) { 511 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, 512 BUS_DMASYNC_PREWRITE); 513 } 514 } 515 516 callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); 517 ldm_queue_cmd(pCmd); 518 } 519 520 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) 521 { 522 PVBUS vbus = (PVBUS)vbus_ext->vbus; 523 PVDEV vd; 524 PCOMMAND pCmd; 525 POS_CMDEXT ext; 526 HPT_U8 *cdb; 527 528 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 529 cdb = ccb->csio.cdb_io.cdb_ptr; 530 else 531 cdb = ccb->csio.cdb_io.cdb_bytes; 532 533 KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", 534 ccb, 535 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 536 *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] 537 )); 538 539 /* ccb->ccb_h.path_id is not our bus id - don't check it */ 540 if (ccb->ccb_h.target_lun != 0 || 541 ccb->ccb_h.target_id >= osm_max_targets || 542 (ccb->ccb_h.flags & CAM_CDB_PHYS)) 543 { 544 ccb->ccb_h.status = CAM_TID_INVALID; 545 xpt_done(ccb); 546 return; 547 } 548 549 vd = ldm_find_target(vbus, ccb->ccb_h.target_id); 550 551 if (!vd) { 552 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 553 xpt_done(ccb); 554 return; 555 } 556 557 switch (cdb[0]) { 558 case TEST_UNIT_READY: 559 case START_STOP_UNIT: 560 case SYNCHRONIZE_CACHE: 561 ccb->ccb_h.status = CAM_REQ_CMP; 562 break; 563 564 case INQUIRY: 565 { 566 PINQUIRYDATA inquiryData; 567 memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); 568 inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; 569 570 inquiryData->AdditionalLength = 31; 571 inquiryData->CommandQueue = 1; 572 memcpy(&inquiryData->VendorId, "HPT ", 8); 573 memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); 574 575 if (vd->target_id / 10) { 576 inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; 577 inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; 578 } 579 else 580 inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; 581 582 memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); 583 584 ccb->ccb_h.status = CAM_REQ_CMP; 585 } 586 break; 587 588 case READ_CAPACITY: 589 { 590 HPT_U8 *rbuf = ccb->csio.data_ptr; 591 HPT_U32 cap; 592 593 if (vd->capacity>0xfffffffful) 594 cap = 0xfffffffful; 595 else 596 cap = vd->capacity - 1; 597 598 rbuf[0] = (HPT_U8)(cap>>24); 599 rbuf[1] = (HPT_U8)(cap>>16); 600 rbuf[2] = (HPT_U8)(cap>>8); 601 rbuf[3] = (HPT_U8)cap; 602 rbuf[4] = 0; 603 rbuf[5] = 0; 604 rbuf[6] = 2; 605 rbuf[7] = 0; 606 607 ccb->ccb_h.status = CAM_REQ_CMP; 608 break; 609 } 610 611 case SERVICE_ACTION_IN: 612 { 613 HPT_U8 *rbuf = ccb->csio.data_ptr; 614 HPT_U64 cap = vd->capacity - 1; 615 616 rbuf[0] = (HPT_U8)(cap>>56); 617 rbuf[1] = (HPT_U8)(cap>>48); 618 rbuf[2] = (HPT_U8)(cap>>40); 619 rbuf[3] = (HPT_U8)(cap>>32); 620 rbuf[4] = (HPT_U8)(cap>>24); 621 rbuf[5] = (HPT_U8)(cap>>16); 622 rbuf[6] = (HPT_U8)(cap>>8); 623 rbuf[7] = (HPT_U8)cap; 624 rbuf[8] = 0; 625 rbuf[9] = 0; 626 rbuf[10] = 2; 627 rbuf[11] = 0; 628 629 ccb->ccb_h.status = CAM_REQ_CMP; 630 break; 631 } 632 633 case READ_6: 634 case READ_10: 635 case READ_16: 636 case WRITE_6: 637 case WRITE_10: 638 case WRITE_16: 639 case 0x13: 640 case 0x2f: 641 case 0x8f: /* VERIFY_16 */ 642 { 643 int error; 644 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); 645 if(!pCmd){ 646 KdPrint(("Failed to allocate command!")); 647 ccb->ccb_h.status = CAM_BUSY; 648 break; 649 } 650 651 switch (cdb[0]) { 652 case READ_6: 653 case WRITE_6: 654 case 0x13: 655 pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; 656 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; 657 break; 658 case READ_16: 659 case WRITE_16: 660 case 0x8f: /* VERIFY_16 */ 661 { 662 HPT_U64 block = 663 ((HPT_U64)cdb[2]<<56) | 664 ((HPT_U64)cdb[3]<<48) | 665 ((HPT_U64)cdb[4]<<40) | 666 ((HPT_U64)cdb[5]<<32) | 667 ((HPT_U64)cdb[6]<<24) | 668 ((HPT_U64)cdb[7]<<16) | 669 ((HPT_U64)cdb[8]<<8) | 670 ((HPT_U64)cdb[9]); 671 pCmd->uCmd.Ide.Lba = block; 672 pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); 673 break; 674 } 675 676 default: 677 pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); 678 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); 679 break; 680 } 681 682 switch (cdb[0]) { 683 case READ_6: 684 case READ_10: 685 case READ_16: 686 pCmd->flags.data_in = 1; 687 break; 688 case WRITE_6: 689 case WRITE_10: 690 case WRITE_16: 691 pCmd->flags.data_out = 1; 692 break; 693 } 694 pCmd->priv = ext = cmdext_get(vbus_ext); 695 HPT_ASSERT(ext); 696 ext->ccb = ccb; 697 pCmd->target = vd; 698 pCmd->done = os_cmddone; 699 pCmd->buildsgl = os_buildsgl; 700 pCmd->psg = ext->psg; 701 pCmd->flags.physical_sg = 1; 702 error = bus_dmamap_load_ccb(vbus_ext->io_dmat, 703 ext->dma_map, ccb, 704 hpt_io_dmamap_callback, pCmd, 705 BUS_DMA_WAITOK 706 ); 707 KdPrint(("bus_dmamap_load return %d", error)); 708 if (error && error!=EINPROGRESS) { 709 os_printk("bus_dmamap_load error %d", error); 710 cmdext_put(ext); 711 ldm_free_cmds(pCmd); 712 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 713 xpt_done(ccb); 714 } 715 return; 716 } 717 718 default: 719 ccb->ccb_h.status = CAM_REQ_INVALID; 720 break; 721 } 722 723 xpt_done(ccb); 724 return; 725 } 726 727 static void hpt_action(struct cam_sim *sim, union ccb *ccb) 728 { 729 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); 730 731 KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); 732 733 hpt_assert_vbus_locked(vbus_ext); 734 switch (ccb->ccb_h.func_code) { 735 736 case XPT_SCSI_IO: 737 hpt_scsi_io(vbus_ext, ccb); 738 return; 739 740 case XPT_RESET_BUS: 741 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 742 break; 743 744 case XPT_GET_TRAN_SETTINGS: 745 case XPT_SET_TRAN_SETTINGS: 746 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 747 break; 748 749 case XPT_CALC_GEOMETRY: 750 ccb->ccg.heads = 255; 751 ccb->ccg.secs_per_track = 63; 752 ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); 753 ccb->ccb_h.status = CAM_REQ_CMP; 754 break; 755 756 case XPT_PATH_INQ: 757 { 758 struct ccb_pathinq *cpi = &ccb->cpi; 759 760 cpi->version_num = 1; 761 cpi->hba_inquiry = PI_SDTR_ABLE; 762 cpi->target_sprt = 0; 763 cpi->hba_misc = PIM_NOBUSRESET; 764 cpi->hba_eng_cnt = 0; 765 cpi->max_target = osm_max_targets; 766 cpi->max_lun = 0; 767 cpi->unit_number = cam_sim_unit(sim); 768 cpi->bus_id = cam_sim_bus(sim); 769 cpi->initiator_id = osm_max_targets; 770 cpi->base_transfer_speed = 3300; 771 772 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 773 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); 774 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 775 cpi->transport = XPORT_SPI; 776 cpi->transport_version = 2; 777 cpi->protocol = PROTO_SCSI; 778 cpi->protocol_version = SCSI_REV_2; 779 cpi->ccb_h.status = CAM_REQ_CMP; 780 break; 781 } 782 783 default: 784 ccb->ccb_h.status = CAM_REQ_INVALID; 785 break; 786 } 787 788 xpt_done(ccb); 789 return; 790 } 791 792 static void hpt_pci_intr(void *arg) 793 { 794 PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; 795 hpt_lock_vbus(vbus_ext); 796 ldm_intr((PVBUS)vbus_ext->vbus); 797 hpt_unlock_vbus(vbus_ext); 798 } 799 800 static void hpt_poll(struct cam_sim *sim) 801 { 802 PVBUS_EXT vbus_ext = cam_sim_softc(sim); 803 hpt_assert_vbus_locked(vbus_ext); 804 ldm_intr((PVBUS)vbus_ext->vbus); 805 } 806 807 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) 808 { 809 KdPrint(("hpt_async")); 810 } 811 812 static int hpt_shutdown(device_t dev) 813 { 814 KdPrint(("hpt_shutdown(dev=%p)", dev)); 815 return 0; 816 } 817 818 static int hpt_detach(device_t dev) 819 { 820 /* we don't allow the driver to be unloaded. */ 821 return EBUSY; 822 } 823 824 static void hpt_ioctl_done(struct _IOCTL_ARG *arg) 825 { 826 arg->ioctl_cmnd = 0; 827 wakeup(arg); 828 } 829 830 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) 831 { 832 ioctl_args->result = -1; 833 ioctl_args->done = hpt_ioctl_done; 834 ioctl_args->ioctl_cmnd = (void *)1; 835 836 hpt_lock_vbus(vbus_ext); 837 ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); 838 839 while (ioctl_args->ioctl_cmnd) { 840 if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) 841 break; 842 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 843 __hpt_do_tasks(vbus_ext); 844 } 845 846 /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ 847 848 hpt_unlock_vbus(vbus_ext); 849 } 850 851 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) 852 { 853 PVBUS vbus; 854 PVBUS_EXT vbus_ext; 855 856 ldm_for_each_vbus(vbus, vbus_ext) { 857 __hpt_do_ioctl(vbus_ext, ioctl_args); 858 if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) 859 return; 860 } 861 } 862 863 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ 864 IOCTL_ARG arg;\ 865 arg.dwIoControlCode = code;\ 866 arg.lpInBuffer = inbuf;\ 867 arg.lpOutBuffer = outbuf;\ 868 arg.nInBufferSize = insize;\ 869 arg.nOutBufferSize = outsize;\ 870 arg.lpBytesReturned = 0;\ 871 hpt_do_ioctl(&arg);\ 872 arg.result;\ 873 }) 874 875 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) 876 877 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) 878 { 879 int i; 880 HPT_U32 count = nMaxCount-1; 881 882 if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, 883 &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) 884 return -1; 885 886 nMaxCount = (int)pIds[0]; 887 for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1]; 888 return nMaxCount; 889 } 890 891 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo) 892 { 893 return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3, 894 &id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3)); 895 } 896 897 /* not belong to this file logically, but we want to use ioctl interface */ 898 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id) 899 { 900 LOGICAL_DEVICE_INFO_V3 devinfo; 901 int i, result; 902 DEVICEID param[2] = { id, 0 }; 903 904 if (hpt_get_device_info_v3(id, &devinfo)) 905 return -1; 906 907 if (devinfo.Type!=LDT_ARRAY) 908 return -1; 909 910 if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING) 911 param[1] = AS_REBUILD_ABORT; 912 else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING) 913 param[1] = AS_VERIFY_ABORT; 914 else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING) 915 param[1] = AS_INITIALIZE_ABORT; 916 else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING) 917 param[1] = AS_TRANSFORM_ABORT; 918 else 919 return -1; 920 921 KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1])); 922 result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE, 923 param, sizeof(param), 0, 0); 924 925 for (i=0; i<devinfo.u.array.nDisk; i++) 926 if (DEVICEID_VALID(devinfo.u.array.Members[i])) 927 __hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]); 928 929 return result; 930 } 931 932 static void hpt_stop_tasks(PVBUS_EXT vbus_ext) 933 { 934 DEVICEID ids[32]; 935 int i, count; 936 937 count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0])); 938 939 for (i=0; i<count; i++) 940 __hpt_stop_tasks(vbus_ext, ids[i]); 941 } 942 943 static d_open_t hpt_open; 944 static d_close_t hpt_close; 945 static d_ioctl_t hpt_ioctl; 946 static int hpt_rescan_bus(void); 947 948 static struct cdevsw hpt_cdevsw = { 949 .d_open = hpt_open, 950 .d_close = hpt_close, 951 .d_ioctl = hpt_ioctl, 952 .d_name = driver_name, 953 .d_version = D_VERSION, 954 }; 955 956 static struct intr_config_hook hpt_ich; 957 958 /* 959 * hpt_final_init will be called after all hpt_attach. 960 */ 961 static void hpt_final_init(void *dummy) 962 { 963 int i,unit_number=0; 964 PVBUS_EXT vbus_ext; 965 PVBUS vbus; 966 PHBA hba; 967 968 /* Clear the config hook */ 969 config_intrhook_disestablish(&hpt_ich); 970 971 /* allocate memory */ 972 i = 0; 973 ldm_for_each_vbus(vbus, vbus_ext) { 974 if (hpt_alloc_mem(vbus_ext)) { 975 os_printk("out of memory"); 976 return; 977 } 978 i++; 979 } 980 981 if (!i) { 982 if (bootverbose) 983 os_printk("no controller detected."); 984 return; 985 } 986 987 /* initializing hardware */ 988 ldm_for_each_vbus(vbus, vbus_ext) { 989 /* make timer available here */ 990 mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); 991 callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); 992 if (hpt_init_vbus(vbus_ext)) { 993 os_printk("fail to initialize hardware"); 994 break; /* FIXME */ 995 } 996 } 997 998 /* register CAM interface */ 999 ldm_for_each_vbus(vbus, vbus_ext) { 1000 struct cam_devq *devq; 1001 struct ccb_setasync ccb; 1002 1003 if (bus_dma_tag_create(NULL,/* parent */ 1004 4, /* alignment */ 1005 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ 1006 BUS_SPACE_MAXADDR, /* lowaddr */ 1007 BUS_SPACE_MAXADDR, /* highaddr */ 1008 NULL, NULL, /* filter, filterarg */ 1009 PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ 1010 os_max_sg_descriptors, /* nsegments */ 1011 0x10000, /* maxsegsize */ 1012 BUS_DMA_WAITOK, /* flags */ 1013 busdma_lock_mutex, /* lockfunc */ 1014 &vbus_ext->lock, /* lockfuncarg */ 1015 &vbus_ext->io_dmat /* tag */)) 1016 { 1017 return ; 1018 } 1019 1020 for (i=0; i<os_max_queue_comm; i++) { 1021 POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK); 1022 if (!ext) { 1023 os_printk("Can't alloc cmdext(%d)", i); 1024 return ; 1025 } 1026 ext->vbus_ext = vbus_ext; 1027 ext->next = vbus_ext->cmdext_list; 1028 vbus_ext->cmdext_list = ext; 1029 1030 if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { 1031 os_printk("Can't create dma map(%d)", i); 1032 return ; 1033 } 1034 callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); 1035 } 1036 1037 if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { 1038 os_printk("cam_simq_alloc failed"); 1039 return ; 1040 } 1041 1042 hpt_lock_vbus(vbus_ext); 1043 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, 1044 vbus_ext, unit_number, &vbus_ext->lock, 1045 os_max_queue_comm, /*tagged*/8, devq); 1046 unit_number++; 1047 if (!vbus_ext->sim) { 1048 os_printk("cam_sim_alloc failed"); 1049 cam_simq_free(devq); 1050 hpt_unlock_vbus(vbus_ext); 1051 return ; 1052 } 1053 1054 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { 1055 os_printk("xpt_bus_register failed"); 1056 cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); 1057 vbus_ext->sim = NULL; 1058 return ; 1059 } 1060 1061 if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, 1062 cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, 1063 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1064 { 1065 os_printk("xpt_create_path failed"); 1066 xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); 1067 cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); 1068 hpt_unlock_vbus(vbus_ext); 1069 vbus_ext->sim = NULL; 1070 return ; 1071 } 1072 hpt_unlock_vbus(vbus_ext); 1073 1074 xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); 1075 ccb.ccb_h.func_code = XPT_SASYNC_CB; 1076 ccb.event_enable = AC_LOST_DEVICE; 1077 ccb.callback = hpt_async; 1078 ccb.callback_arg = vbus_ext; 1079 xpt_action((union ccb *)&ccb); 1080 1081 for (hba = vbus_ext->hba_list; hba; hba = hba->next) { 1082 int rid = 0; 1083 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, 1084 SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) 1085 { 1086 os_printk("can't allocate interrupt"); 1087 return ; 1088 } 1089 1090 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, 1091 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) 1092 { 1093 os_printk("can't set up interrupt"); 1094 return ; 1095 } 1096 hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); 1097 1098 } 1099 1100 vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1101 hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); 1102 if (!vbus_ext->shutdown_eh) 1103 os_printk("Shutdown event registration failed"); 1104 } 1105 1106 ldm_for_each_vbus(vbus, vbus_ext) { 1107 TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); 1108 if (vbus_ext->tasks) 1109 TASK_ENQUEUE(&vbus_ext->worker); 1110 } 1111 1112 make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, 1113 S_IRUSR | S_IWUSR, "%s", driver_name); 1114 } 1115 1116 #if defined(KLD_MODULE) 1117 1118 typedef struct driverlink *driverlink_t; 1119 struct driverlink { 1120 kobj_class_t driver; 1121 TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ 1122 }; 1123 1124 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; 1125 1126 struct devclass { 1127 TAILQ_ENTRY(devclass) link; 1128 devclass_t parent; /* parent in devclass hierarchy */ 1129 driver_list_t drivers; /* bus devclasses store drivers for bus */ 1130 char *name; 1131 device_t *devices; /* array of devices indexed by unit */ 1132 int maxunit; /* size of devices array */ 1133 }; 1134 1135 static void override_kernel_driver(void) 1136 { 1137 driverlink_t dl, dlfirst; 1138 driver_t *tmpdriver; 1139 devclass_t dc = devclass_find("pci"); 1140 1141 if (dc){ 1142 dlfirst = TAILQ_FIRST(&dc->drivers); 1143 for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { 1144 if(strcmp(dl->driver->name, driver_name) == 0) { 1145 tmpdriver=dl->driver; 1146 dl->driver=dlfirst->driver; 1147 dlfirst->driver=tmpdriver; 1148 break; 1149 } 1150 } 1151 } 1152 } 1153 1154 #else 1155 #define override_kernel_driver() 1156 #endif 1157 1158 static void hpt_init(void *dummy) 1159 { 1160 if (bootverbose) 1161 os_printk("%s %s", driver_name_long, driver_ver); 1162 1163 override_kernel_driver(); 1164 init_config(); 1165 1166 hpt_ich.ich_func = hpt_final_init; 1167 hpt_ich.ich_arg = NULL; 1168 if (config_intrhook_establish(&hpt_ich) != 0) { 1169 printf("%s: cannot establish configuration hook\n", 1170 driver_name_long); 1171 } 1172 1173 } 1174 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); 1175 1176 /* 1177 * CAM driver interface 1178 */ 1179 static device_method_t driver_methods[] = { 1180 /* Device interface */ 1181 DEVMETHOD(device_probe, hpt_probe), 1182 DEVMETHOD(device_attach, hpt_attach), 1183 DEVMETHOD(device_detach, hpt_detach), 1184 DEVMETHOD(device_shutdown, hpt_shutdown), 1185 { 0, 0 } 1186 }; 1187 1188 static driver_t hpt_pci_driver = { 1189 driver_name, 1190 driver_methods, 1191 sizeof(HBA) 1192 }; 1193 1194 static devclass_t hpt_devclass; 1195 1196 #ifndef TARGETNAME 1197 #error "no TARGETNAME found" 1198 #endif 1199 1200 /* use this to make TARGETNAME be expanded */ 1201 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) 1202 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) 1203 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) 1204 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); 1205 __MODULE_VERSION(TARGETNAME, 1); 1206 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); 1207 1208 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) 1209 { 1210 return 0; 1211 } 1212 1213 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) 1214 { 1215 return 0; 1216 } 1217 1218 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 1219 { 1220 PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; 1221 IOCTL_ARG ioctl_args; 1222 HPT_U32 bytesReturned; 1223 1224 switch (cmd){ 1225 case HPT_DO_IOCONTROL: 1226 { 1227 if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { 1228 KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", 1229 piop->dwIoControlCode, 1230 piop->lpInBuffer, 1231 piop->nInBufferSize, 1232 piop->lpOutBuffer, 1233 piop->nOutBufferSize)); 1234 1235 memset(&ioctl_args, 0, sizeof(ioctl_args)); 1236 1237 ioctl_args.dwIoControlCode = piop->dwIoControlCode; 1238 ioctl_args.nInBufferSize = piop->nInBufferSize; 1239 ioctl_args.nOutBufferSize = piop->nOutBufferSize; 1240 ioctl_args.lpBytesReturned = &bytesReturned; 1241 1242 if (ioctl_args.nInBufferSize) { 1243 ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); 1244 if (!ioctl_args.lpInBuffer) 1245 goto invalid; 1246 if (copyin((void*)piop->lpInBuffer, 1247 ioctl_args.lpInBuffer, piop->nInBufferSize)) 1248 goto invalid; 1249 } 1250 1251 if (ioctl_args.nOutBufferSize) { 1252 ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); 1253 if (!ioctl_args.lpOutBuffer) 1254 goto invalid; 1255 } 1256 1257 hpt_do_ioctl(&ioctl_args); 1258 1259 if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { 1260 if (piop->nOutBufferSize) { 1261 if (copyout(ioctl_args.lpOutBuffer, 1262 (void*)piop->lpOutBuffer, piop->nOutBufferSize)) 1263 goto invalid; 1264 } 1265 if (piop->lpBytesReturned) { 1266 if (copyout(&bytesReturned, 1267 (void*)piop->lpBytesReturned, sizeof(HPT_U32))) 1268 goto invalid; 1269 } 1270 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1271 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1272 return 0; 1273 } 1274 invalid: 1275 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1276 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1277 return EFAULT; 1278 } 1279 return EFAULT; 1280 } 1281 1282 case HPT_SCAN_BUS: 1283 { 1284 return hpt_rescan_bus(); 1285 } 1286 default: 1287 KdPrint(("invalid command!")); 1288 return EFAULT; 1289 } 1290 1291 } 1292 1293 static int hpt_rescan_bus(void) 1294 { 1295 union ccb *ccb; 1296 PVBUS vbus; 1297 PVBUS_EXT vbus_ext; 1298 1299 ldm_for_each_vbus(vbus, vbus_ext) { 1300 if ((ccb = xpt_alloc_ccb()) == NULL) 1301 { 1302 return(ENOMEM); 1303 } 1304 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), 1305 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1306 { 1307 xpt_free_ccb(ccb); 1308 return(EIO); 1309 } 1310 xpt_rescan(ccb); 1311 } 1312 return(0); 1313 } 1314