1 /* 2 * Copyright (c) HighPoint Technologies, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 #include <dev/hptrr/hptrr_config.h> 29 /* $Id: osm_bsd.c,v 1.26 2007/02/28 03:53:47 gmm Exp $ 30 * 31 * HighPoint RAID Driver for FreeBSD 32 * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. 33 */ 34 #include <dev/hptrr/os_bsd.h> 35 #include <dev/hptrr/hptintf.h> 36 37 static int hpt_probe(device_t dev) 38 { 39 PCI_ID pci_id; 40 HIM *him; 41 int i; 42 PHBA hba; 43 44 for (him = him_list; him; him = him->next) { 45 for (i=0; him->get_supported_device_id(i, &pci_id); i++) { 46 if ((pci_get_vendor(dev) == pci_id.vid) && 47 (pci_get_device(dev) == pci_id.did)){ 48 KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", 49 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) 50 )); 51 device_set_desc(dev, him->name); 52 hba = (PHBA)device_get_softc(dev); 53 memset(hba, 0, sizeof(HBA)); 54 hba->ext_type = EXT_TYPE_HBA; 55 hba->ldm_adapter.him = him; 56 return 0; 57 } 58 } 59 } 60 61 return (ENXIO); 62 } 63 64 static int hpt_attach(device_t dev) 65 { 66 PHBA hba = (PHBA)device_get_softc(dev); 67 HIM *him = hba->ldm_adapter.him; 68 PCI_ID pci_id; 69 HPT_UINT size; 70 PVBUS vbus; 71 PVBUS_EXT vbus_ext; 72 73 KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); 74 75 #if __FreeBSD_version >=440000 76 pci_enable_busmaster(dev); 77 #endif 78 79 pci_id.vid = pci_get_vendor(dev); 80 pci_id.did = pci_get_device(dev); 81 pci_id.rev = pci_get_revid(dev); 82 83 size = him->get_adapter_size(&pci_id); 84 hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); 85 if (!hba->ldm_adapter.him_handle) 86 return ENXIO; 87 88 hba->pcidev = dev; 89 hba->pciaddr.tree = 0; 90 hba->pciaddr.bus = pci_get_bus(dev); 91 hba->pciaddr.device = pci_get_slot(dev); 92 hba->pciaddr.function = pci_get_function(dev); 93 94 if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { 95 free(hba->ldm_adapter.him_handle, M_DEVBUF); 96 return -1; 97 } 98 99 os_printk("adapter at PCI %d:%d:%d, IRQ %d", 100 hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); 101 102 if (!ldm_register_adapter(&hba->ldm_adapter)) { 103 size = ldm_get_vbus_size(); 104 vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); 105 if (!vbus_ext) { 106 free(hba->ldm_adapter.him_handle, M_DEVBUF); 107 return -1; 108 } 109 memset(vbus_ext, 0, sizeof(VBUS_EXT)); 110 vbus_ext->ext_type = EXT_TYPE_VBUS; 111 ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); 112 ldm_register_adapter(&hba->ldm_adapter); 113 } 114 115 ldm_for_each_vbus(vbus, vbus_ext) { 116 if (hba->ldm_adapter.vbus==vbus) { 117 hba->vbus_ext = vbus_ext; 118 hba->next = vbus_ext->hba_list; 119 vbus_ext->hba_list = hba; 120 break; 121 } 122 } 123 return 0; 124 } 125 126 /* 127 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, 128 * but there are some problems currently (alignment, etc). 129 */ 130 static __inline void *__get_free_pages(int order) 131 { 132 /* don't use low memory - other devices may get starved */ 133 return contigmalloc(PAGE_SIZE<<order, 134 M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 135 } 136 137 static __inline void free_pages(void *p, int order) 138 { 139 contigfree(p, PAGE_SIZE<<order, M_DEVBUF); 140 } 141 142 static int hpt_alloc_mem(PVBUS_EXT vbus_ext) 143 { 144 PHBA hba; 145 struct freelist *f; 146 HPT_UINT i; 147 void **p; 148 149 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 150 hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); 151 152 ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); 153 154 for (f=vbus_ext->freelist_head; f; f=f->next) { 155 KdPrint(("%s: %d*%d=%d bytes", 156 f->tag, f->count, f->size, f->count*f->size)); 157 for (i=0; i<f->count; i++) { 158 p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); 159 if (!p) return (ENXIO); 160 *p = f->head; 161 f->head = p; 162 } 163 } 164 165 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 166 int order, size, j; 167 168 HPT_ASSERT((f->size & (f->alignment-1))==0); 169 170 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ; 171 172 KdPrint(("%s: %d*%d=%d bytes, order %d", 173 f->tag, f->count, f->size, f->count*f->size, order)); 174 HPT_ASSERT(f->alignment<=PAGE_SIZE); 175 176 for (i=0; i<f->count;) { 177 p = (void **)__get_free_pages(order); 178 if (!p) return -1; 179 for (j = size/f->size; j && i<f->count; i++,j--) { 180 *p = f->head; 181 *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); 182 f->head = p; 183 p = (void **)((unsigned long)p + f->size); 184 } 185 } 186 } 187 188 HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); 189 190 for (i=0; i<os_max_cache_pages; i++) { 191 p = (void **)__get_free_pages(0); 192 if (!p) return -1; 193 HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0); 194 dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p)); 195 } 196 197 return 0; 198 } 199 200 static void hpt_free_mem(PVBUS_EXT vbus_ext) 201 { 202 struct freelist *f; 203 void *p; 204 int i; 205 BUS_ADDRESS bus; 206 207 for (f=vbus_ext->freelist_head; f; f=f->next) { 208 #ifdef DBG 209 if (f->count!=f->reserved_count) { 210 KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 211 } 212 #endif 213 while ((p=freelist_get(f))) 214 free(p, M_DEVBUF); 215 } 216 217 for (i=0; i<os_max_cache_pages; i++) { 218 p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus); 219 HPT_ASSERT(p); 220 free_pages(p, 0); 221 } 222 223 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 224 int order, size; 225 #ifdef DBG 226 if (f->count!=f->reserved_count) { 227 KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 228 } 229 #endif 230 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ; 231 232 while ((p=freelist_get_dma(f, &bus))) { 233 if (order) 234 free_pages(p, order); 235 else { 236 /* can't free immediately since other blocks in this page may still be in the list */ 237 if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) 238 dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); 239 } 240 } 241 } 242 243 while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) 244 free_pages(p, 0); 245 } 246 247 static int hpt_init_vbus(PVBUS_EXT vbus_ext) 248 { 249 PHBA hba; 250 251 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 252 if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { 253 KdPrint(("fail to initialize %p", hba)); 254 return -1; 255 } 256 257 ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); 258 return 0; 259 } 260 261 static void hpt_flush_done(PCOMMAND pCmd) 262 { 263 PVDEV vd = pCmd->target; 264 265 if (mIsArray(vd->Class->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { 266 vd = vd->u.array.transform->target; 267 HPT_ASSERT(vd); 268 pCmd->target = vd; 269 pCmd->Result = RETURN_PENDING; 270 vdev_queue_cmd(pCmd); 271 return; 272 } 273 274 *(int *)pCmd->priv = 1; 275 wakeup(pCmd); 276 } 277 278 /* 279 * flush a vdev (without retry). 280 */ 281 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) 282 { 283 PCOMMAND pCmd; 284 int result = 0, done; 285 HPT_UINT count; 286 287 KdPrint(("flusing dev %p", vd)); 288 289 hpt_lock_vbus(vbus_ext); 290 291 if (mIsArray(vd->Class->type) && vd->u.array.transform) 292 count = MAX(vd->u.array.transform->source->cmds_per_request, 293 vd->u.array.transform->target->cmds_per_request); 294 else 295 count = vd->cmds_per_request; 296 297 pCmd = ldm_alloc_cmds(vd->vbus, count); 298 299 if (!pCmd) { 300 hpt_unlock_vbus(vbus_ext); 301 return -1; 302 } 303 304 pCmd->type = CMD_TYPE_FLUSH; 305 pCmd->flags.hard_flush = 1; 306 pCmd->target = vd; 307 pCmd->done = hpt_flush_done; 308 done = 0; 309 pCmd->priv = &done; 310 311 ldm_queue_cmd(pCmd); 312 313 if (!done) { 314 while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { 315 ldm_reset_vbus(vd->vbus); 316 } 317 } 318 319 KdPrint(("flush result %d", pCmd->Result)); 320 321 if (pCmd->Result!=RETURN_SUCCESS) 322 result = -1; 323 324 ldm_free_cmds(pCmd); 325 326 hpt_unlock_vbus(vbus_ext); 327 328 return result; 329 } 330 331 static void hpt_stop_tasks(PVBUS_EXT vbus_ext); 332 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) 333 { 334 PVBUS vbus = (PVBUS)vbus_ext->vbus; 335 PHBA hba; 336 int i; 337 338 KdPrint(("hpt_shutdown_vbus")); 339 340 /* stop all ctl tasks and disable the worker taskqueue */ 341 hpt_stop_tasks(vbus_ext); 342 vbus_ext->worker.ta_context = 0; 343 344 /* flush devices */ 345 for (i=0; i<osm_max_targets; i++) { 346 PVDEV vd = ldm_find_target(vbus, i); 347 if (vd) { 348 /* retry once */ 349 if (hpt_flush_vdev(vbus_ext, vd)) 350 hpt_flush_vdev(vbus_ext, vd); 351 } 352 } 353 354 hpt_lock_vbus(vbus_ext); 355 ldm_shutdown(vbus); 356 hpt_unlock_vbus(vbus_ext); 357 358 ldm_release_vbus(vbus); 359 360 for (hba=vbus_ext->hba_list; hba; hba=hba->next) 361 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); 362 363 hpt_free_mem(vbus_ext); 364 365 while ((hba=vbus_ext->hba_list)) { 366 vbus_ext->hba_list = hba->next; 367 free(hba->ldm_adapter.him_handle, M_DEVBUF); 368 } 369 370 free(vbus_ext, M_DEVBUF); 371 KdPrint(("hpt_shutdown_vbus done")); 372 } 373 374 static void __hpt_do_tasks(PVBUS_EXT vbus_ext) 375 { 376 OSM_TASK *tasks; 377 378 tasks = vbus_ext->tasks; 379 vbus_ext->tasks = 0; 380 381 while (tasks) { 382 OSM_TASK *t = tasks; 383 tasks = t->next; 384 t->next = 0; 385 t->func(vbus_ext->vbus, t->data); 386 } 387 } 388 389 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) 390 { 391 if(vbus_ext){ 392 hpt_lock_vbus(vbus_ext); 393 __hpt_do_tasks(vbus_ext); 394 hpt_unlock_vbus(vbus_ext); 395 } 396 } 397 398 static void hpt_action(struct cam_sim *sim, union ccb *ccb); 399 static void hpt_poll(struct cam_sim *sim); 400 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); 401 static void hpt_pci_intr(void *arg); 402 403 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) 404 { 405 POS_CMDEXT p = vbus_ext->cmdext_list; 406 if (p) 407 vbus_ext->cmdext_list = p->next; 408 return p; 409 } 410 411 static __inline void cmdext_put(POS_CMDEXT p) 412 { 413 p->next = p->vbus_ext->cmdext_list; 414 p->vbus_ext->cmdext_list = p; 415 } 416 417 static void hpt_timeout(void *arg) 418 { 419 PCOMMAND pCmd = (PCOMMAND)arg; 420 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 421 422 KdPrint(("pCmd %p timeout", pCmd)); 423 424 ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); 425 } 426 427 static void os_cmddone(PCOMMAND pCmd) 428 { 429 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 430 union ccb *ccb = ext->ccb; 431 432 KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); 433 434 untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); 435 436 switch(pCmd->Result) { 437 case RETURN_SUCCESS: 438 ccb->ccb_h.status = CAM_REQ_CMP; 439 break; 440 case RETURN_BAD_DEVICE: 441 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 442 break; 443 case RETURN_DEVICE_BUSY: 444 ccb->ccb_h.status = CAM_BUSY; 445 break; 446 case RETURN_INVALID_REQUEST: 447 ccb->ccb_h.status = CAM_REQ_INVALID; 448 break; 449 case RETURN_SELECTION_TIMEOUT: 450 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 451 break; 452 case RETURN_RETRY: 453 ccb->ccb_h.status = CAM_BUSY; 454 break; 455 default: 456 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 457 break; 458 } 459 460 if (pCmd->flags.data_in) { 461 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); 462 } 463 else if (pCmd->flags.data_out) { 464 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); 465 } 466 467 bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); 468 469 cmdext_put(ext); 470 ldm_free_cmds(pCmd); 471 xpt_done(ccb); 472 } 473 474 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) 475 { 476 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 477 union ccb *ccb = ext->ccb; 478 bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; 479 int idx; 480 481 if(logical) { 482 if (ccb->ccb_h.flags & CAM_DATA_PHYS) 483 panic("physical address unsupported"); 484 485 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 486 if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) 487 panic("physical address unsupported"); 488 489 for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { 490 os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); 491 pSg[idx].size = sgList[idx].ds_len; 492 pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; 493 } 494 } 495 else { 496 os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); 497 pSg->size = ccb->csio.dxfer_len; 498 pSg->eot = 1; 499 } 500 return TRUE; 501 } 502 503 /* since we have provided physical sg, nobody will ask us to build physical sg */ 504 HPT_ASSERT(0); 505 return FALSE; 506 } 507 508 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 509 { 510 PCOMMAND pCmd = (PCOMMAND)arg; 511 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 512 PSG psg = pCmd->psg; 513 int idx; 514 515 HPT_ASSERT(pCmd->flags.physical_sg); 516 517 if (error || nsegs == 0) 518 panic("busdma error"); 519 520 HPT_ASSERT(nsegs<=os_max_sg_descriptors); 521 522 for (idx = 0; idx < nsegs; idx++, psg++) { 523 psg->addr.bus = segs[idx].ds_addr; 524 psg->size = segs[idx].ds_len; 525 psg->eot = 0; 526 } 527 psg[-1].eot = 1; 528 529 if (pCmd->flags.data_in) { 530 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); 531 } 532 else if (pCmd->flags.data_out) { 533 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); 534 } 535 536 ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); 537 ldm_queue_cmd(pCmd); 538 } 539 540 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) 541 { 542 PVBUS vbus = (PVBUS)vbus_ext->vbus; 543 PVDEV vd; 544 PCOMMAND pCmd; 545 POS_CMDEXT ext; 546 HPT_U8 *cdb; 547 548 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 549 cdb = ccb->csio.cdb_io.cdb_ptr; 550 else 551 cdb = ccb->csio.cdb_io.cdb_bytes; 552 553 KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", 554 ccb, 555 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 556 *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] 557 )); 558 559 /* ccb->ccb_h.path_id is not our bus id - don't check it */ 560 if (ccb->ccb_h.target_lun != 0 || 561 ccb->ccb_h.target_id >= osm_max_targets || 562 (ccb->ccb_h.flags & CAM_CDB_PHYS)) 563 { 564 ccb->ccb_h.status = CAM_TID_INVALID; 565 xpt_done(ccb); 566 return; 567 } 568 569 vd = ldm_find_target(vbus, ccb->ccb_h.target_id); 570 571 if (!vd) { 572 ccb->ccb_h.status = CAM_TID_INVALID; 573 xpt_done(ccb); 574 return; 575 } 576 577 switch (cdb[0]) { 578 case TEST_UNIT_READY: 579 case START_STOP_UNIT: 580 case SYNCHRONIZE_CACHE: 581 ccb->ccb_h.status = CAM_REQ_CMP; 582 break; 583 584 case INQUIRY: 585 { 586 PINQUIRYDATA inquiryData; 587 memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); 588 inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; 589 590 inquiryData->AdditionalLength = 31; 591 inquiryData->CommandQueue = 1; 592 memcpy(&inquiryData->VendorId, "HPT ", 8); 593 memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); 594 595 if (vd->target_id / 10) { 596 inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; 597 inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; 598 } 599 else 600 inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; 601 602 memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); 603 604 ccb->ccb_h.status = CAM_REQ_CMP; 605 } 606 break; 607 608 case READ_CAPACITY: 609 { 610 HPT_U8 *rbuf = ccb->csio.data_ptr; 611 HPT_U32 cap; 612 613 if (vd->capacity>0xfffffffful) 614 cap = 0xfffffffful; 615 else 616 cap = vd->capacity - 1; 617 618 rbuf[0] = (HPT_U8)(cap>>24); 619 rbuf[1] = (HPT_U8)(cap>>16); 620 rbuf[2] = (HPT_U8)(cap>>8); 621 rbuf[3] = (HPT_U8)cap; 622 rbuf[4] = 0; 623 rbuf[5] = 0; 624 rbuf[6] = 2; 625 rbuf[7] = 0; 626 627 ccb->ccb_h.status = CAM_REQ_CMP; 628 break; 629 } 630 631 case SERVICE_ACTION_IN: 632 { 633 HPT_U8 *rbuf = ccb->csio.data_ptr; 634 HPT_U64 cap = vd->capacity - 1; 635 636 rbuf[0] = (HPT_U8)(cap>>56); 637 rbuf[1] = (HPT_U8)(cap>>48); 638 rbuf[2] = (HPT_U8)(cap>>40); 639 rbuf[3] = (HPT_U8)(cap>>32); 640 rbuf[4] = (HPT_U8)(cap>>24); 641 rbuf[5] = (HPT_U8)(cap>>16); 642 rbuf[6] = (HPT_U8)(cap>>8); 643 rbuf[7] = (HPT_U8)cap; 644 rbuf[8] = 0; 645 rbuf[9] = 0; 646 rbuf[10] = 2; 647 rbuf[11] = 0; 648 649 ccb->ccb_h.status = CAM_REQ_CMP; 650 break; 651 } 652 653 case READ_6: 654 case READ_10: 655 case READ_16: 656 case WRITE_6: 657 case WRITE_10: 658 case WRITE_16: 659 case 0x13: 660 case 0x2f: 661 { 662 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); 663 if(!pCmd){ 664 KdPrint(("Failed to allocate command!")); 665 ccb->ccb_h.status = CAM_BUSY; 666 break; 667 } 668 669 switch (cdb[0]) { 670 case READ_6: 671 case WRITE_6: 672 case 0x13: 673 pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; 674 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; 675 break; 676 case READ_16: 677 case WRITE_16: 678 { 679 HPT_U64 block = 680 ((HPT_U64)cdb[2]<<56) | 681 ((HPT_U64)cdb[3]<<48) | 682 ((HPT_U64)cdb[4]<<40) | 683 ((HPT_U64)cdb[5]<<32) | 684 ((HPT_U64)cdb[6]<<24) | 685 ((HPT_U64)cdb[7]<<16) | 686 ((HPT_U64)cdb[8]<<8) | 687 ((HPT_U64)cdb[9]); 688 pCmd->uCmd.Ide.Lba = block; 689 pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); 690 break; 691 } 692 693 default: 694 pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); 695 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); 696 break; 697 } 698 699 switch (cdb[0]) { 700 case READ_6: 701 case READ_10: 702 case READ_16: 703 pCmd->flags.data_in = 1; 704 break; 705 case WRITE_6: 706 case WRITE_10: 707 case WRITE_16: 708 pCmd->flags.data_out = 1; 709 break; 710 } 711 pCmd->priv = ext = cmdext_get(vbus_ext); 712 HPT_ASSERT(ext); 713 ext->ccb = ccb; 714 pCmd->target = vd; 715 pCmd->done = os_cmddone; 716 pCmd->buildsgl = os_buildsgl; 717 718 pCmd->psg = ext->psg; 719 720 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 721 int idx; 722 bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; 723 724 if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) 725 pCmd->flags.physical_sg = 1; 726 727 for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { 728 pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; 729 pCmd->psg[idx].size = sgList[idx].ds_len; 730 pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; 731 } 732 733 ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); 734 ldm_queue_cmd(pCmd); 735 } 736 else { 737 int error; 738 pCmd->flags.physical_sg = 1; 739 error = bus_dmamap_load(vbus_ext->io_dmat, 740 ext->dma_map, 741 ccb->csio.data_ptr, ccb->csio.dxfer_len, 742 hpt_io_dmamap_callback, pCmd, 743 BUS_DMA_WAITOK 744 ); 745 KdPrint(("bus_dmamap_load return %d", error)); 746 if (error && error!=EINPROGRESS) { 747 os_printk("bus_dmamap_load error %d", error); 748 cmdext_put(ext); 749 ldm_free_cmds(pCmd); 750 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 751 xpt_done(ccb); 752 } 753 } 754 return; 755 } 756 757 default: 758 ccb->ccb_h.status = CAM_REQ_INVALID; 759 break; 760 } 761 762 xpt_done(ccb); 763 return; 764 } 765 766 static void hpt_action(struct cam_sim *sim, union ccb *ccb) 767 { 768 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); 769 770 KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); 771 772 switch (ccb->ccb_h.func_code) { 773 774 case XPT_SCSI_IO: 775 hpt_lock_vbus(vbus_ext); 776 hpt_scsi_io(vbus_ext, ccb); 777 hpt_unlock_vbus(vbus_ext); 778 return; 779 780 case XPT_RESET_BUS: 781 hpt_lock_vbus(vbus_ext); 782 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 783 hpt_unlock_vbus(vbus_ext); 784 break; 785 786 case XPT_GET_TRAN_SETTINGS: 787 case XPT_SET_TRAN_SETTINGS: 788 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 789 break; 790 791 case XPT_CALC_GEOMETRY: 792 ccb->ccg.heads = 255; 793 ccb->ccg.secs_per_track = 63; 794 ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); 795 ccb->ccb_h.status = CAM_REQ_CMP; 796 break; 797 798 case XPT_PATH_INQ: 799 { 800 struct ccb_pathinq *cpi = &ccb->cpi; 801 802 cpi->version_num = 1; 803 cpi->hba_inquiry = PI_SDTR_ABLE; 804 cpi->target_sprt = 0; 805 cpi->hba_misc = PIM_NOBUSRESET; 806 cpi->hba_eng_cnt = 0; 807 cpi->max_target = osm_max_targets; 808 cpi->max_lun = 0; 809 cpi->unit_number = cam_sim_unit(sim); 810 cpi->bus_id = cam_sim_bus(sim); 811 cpi->initiator_id = osm_max_targets; 812 cpi->base_transfer_speed = 3300; 813 814 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 815 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); 816 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 817 cpi->ccb_h.status = CAM_REQ_CMP; 818 break; 819 } 820 821 default: 822 ccb->ccb_h.status = CAM_REQ_INVALID; 823 break; 824 } 825 826 xpt_done(ccb); 827 return; 828 } 829 830 static void hpt_pci_intr(void *arg) 831 { 832 PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; 833 hpt_lock_vbus(vbus_ext); 834 ldm_intr((PVBUS)vbus_ext->vbus); 835 hpt_unlock_vbus(vbus_ext); 836 } 837 838 static void hpt_poll(struct cam_sim *sim) 839 { 840 hpt_pci_intr(cam_sim_softc(sim)); 841 } 842 843 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) 844 { 845 KdPrint(("hpt_async")); 846 } 847 848 static int hpt_shutdown(device_t dev) 849 { 850 KdPrint(("hpt_shutdown(dev=%p)", dev)); 851 return 0; 852 } 853 854 static int hpt_detach(device_t dev) 855 { 856 /* we don't allow the driver to be unloaded. */ 857 return EBUSY; 858 } 859 860 static void hpt_ioctl_done(struct _IOCTL_ARG *arg) 861 { 862 arg->ioctl_cmnd = 0; 863 wakeup(arg); 864 } 865 866 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) 867 { 868 ioctl_args->result = -1; 869 ioctl_args->done = hpt_ioctl_done; 870 ioctl_args->ioctl_cmnd = (void *)1; 871 872 hpt_lock_vbus(vbus_ext); 873 ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); 874 875 while (ioctl_args->ioctl_cmnd) { 876 if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) 877 break; 878 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 879 __hpt_do_tasks(vbus_ext); 880 } 881 882 /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ 883 884 hpt_unlock_vbus(vbus_ext); 885 } 886 887 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) 888 { 889 PVBUS vbus; 890 PVBUS_EXT vbus_ext; 891 892 ldm_for_each_vbus(vbus, vbus_ext) { 893 __hpt_do_ioctl(vbus_ext, ioctl_args); 894 if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) 895 return; 896 } 897 } 898 899 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ 900 IOCTL_ARG arg;\ 901 arg.dwIoControlCode = code;\ 902 arg.lpInBuffer = inbuf;\ 903 arg.lpOutBuffer = outbuf;\ 904 arg.nInBufferSize = insize;\ 905 arg.nOutBufferSize = outsize;\ 906 arg.lpBytesReturned = 0;\ 907 hpt_do_ioctl(&arg);\ 908 arg.result;\ 909 }) 910 911 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) 912 913 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) 914 { 915 int i; 916 HPT_U32 count = nMaxCount-1; 917 918 if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, 919 &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) 920 return -1; 921 922 nMaxCount = (int)pIds[0]; 923 for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1]; 924 return nMaxCount; 925 } 926 927 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo) 928 { 929 return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3, 930 &id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3)); 931 } 932 933 /* not belong to this file logically, but we want to use ioctl interface */ 934 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id) 935 { 936 LOGICAL_DEVICE_INFO_V3 devinfo; 937 int i, result; 938 DEVICEID param[2] = { id, 0 }; 939 940 if (hpt_get_device_info_v3(id, &devinfo)) 941 return -1; 942 943 if (devinfo.Type!=LDT_ARRAY) 944 return -1; 945 946 if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING) 947 param[1] = AS_REBUILD_ABORT; 948 else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING) 949 param[1] = AS_VERIFY_ABORT; 950 else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING) 951 param[1] = AS_INITIALIZE_ABORT; 952 else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING) 953 param[1] = AS_TRANSFORM_ABORT; 954 else 955 return -1; 956 957 KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1])); 958 result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE, 959 param, sizeof(param), 0, 0); 960 961 for (i=0; i<devinfo.u.array.nDisk; i++) 962 if (DEVICEID_VALID(devinfo.u.array.Members[i])) 963 __hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]); 964 965 return result; 966 } 967 968 static void hpt_stop_tasks(PVBUS_EXT vbus_ext) 969 { 970 DEVICEID ids[32]; 971 int i, count; 972 973 count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0])); 974 975 for (i=0; i<count; i++) 976 __hpt_stop_tasks(vbus_ext, ids[i]); 977 } 978 979 static d_open_t hpt_open; 980 static d_close_t hpt_close; 981 static d_ioctl_t hpt_ioctl; 982 static void hpt_bus_scan_cb(struct cam_periph *periph, union ccb *ccb); 983 static int hpt_rescan_bus(void); 984 985 static struct cdevsw hpt_cdevsw = { 986 .d_open = hpt_open, 987 .d_close = hpt_close, 988 .d_ioctl = hpt_ioctl, 989 .d_name = driver_name, 990 #if __FreeBSD_version>=503000 991 .d_version = D_VERSION, 992 #endif 993 #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) 994 .d_flags = D_NEEDGIANT, 995 #endif 996 #if __FreeBSD_version<600034 997 #if __FreeBSD_version>501000 998 .d_maj = MAJOR_AUTO, 999 #else 1000 .d_maj = HPT_DEV_MAJOR, 1001 #endif 1002 #endif 1003 }; 1004 1005 static struct intr_config_hook hpt_ich; 1006 1007 /* 1008 * hpt_final_init will be called after all hpt_attach. 1009 */ 1010 static void hpt_final_init(void *dummy) 1011 { 1012 int i; 1013 PVBUS_EXT vbus_ext; 1014 PVBUS vbus; 1015 PHBA hba; 1016 1017 #ifdef SUPPORT_ALL 1018 /* ldm_fix_him() */ 1019 #endif 1020 /* Clear the config hook */ 1021 config_intrhook_disestablish(&hpt_ich); 1022 1023 /* allocate memory */ 1024 i = 0; 1025 ldm_for_each_vbus(vbus, vbus_ext) { 1026 if (hpt_alloc_mem(vbus_ext)) { 1027 os_printk("out of memory"); 1028 return; 1029 } 1030 i++; 1031 } 1032 1033 if (!i) { 1034 if (bootverbose) 1035 os_printk("no controller detected."); 1036 return; 1037 } 1038 1039 /* initializing hardware */ 1040 ldm_for_each_vbus(vbus, vbus_ext) { 1041 /* make timer available here */ 1042 callout_handle_init(&vbus_ext->timer); 1043 if (hpt_init_vbus(vbus_ext)) { 1044 os_printk("fail to initialize hardware"); 1045 break; /* FIXME */ 1046 } 1047 } 1048 1049 /* register CAM interface */ 1050 ldm_for_each_vbus(vbus, vbus_ext) { 1051 struct cam_devq *devq; 1052 struct ccb_setasync ccb; 1053 1054 #if (__FreeBSD_version >= 500000) 1055 mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); 1056 #endif 1057 if (bus_dma_tag_create(NULL,/* parent */ 1058 4, /* alignment */ 1059 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ 1060 BUS_SPACE_MAXADDR, /* lowaddr */ 1061 BUS_SPACE_MAXADDR, /* highaddr */ 1062 NULL, NULL, /* filter, filterarg */ 1063 PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ 1064 os_max_sg_descriptors, /* nsegments */ 1065 0x10000, /* maxsegsize */ 1066 BUS_DMA_WAITOK, /* flags */ 1067 #if __FreeBSD_version>502000 1068 busdma_lock_mutex, /* lockfunc */ 1069 &vbus_ext->lock, /* lockfuncarg */ 1070 #endif 1071 &vbus_ext->io_dmat /* tag */)) 1072 { 1073 return ; 1074 } 1075 1076 for (i=0; i<os_max_queue_comm; i++) { 1077 POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK); 1078 if (!ext) { 1079 os_printk("Can't alloc cmdext(%d)", i); 1080 return ; 1081 } 1082 ext->vbus_ext = vbus_ext; 1083 ext->next = vbus_ext->cmdext_list; 1084 vbus_ext->cmdext_list = ext; 1085 1086 if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { 1087 os_printk("Can't create dma map(%d)", i); 1088 return ; 1089 } 1090 } 1091 1092 if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { 1093 os_printk("cam_simq_alloc failed"); 1094 return ; 1095 } 1096 1097 #if __FreeBSD_version > 700025 1098 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, 1099 vbus_ext, 0, &Giant, os_max_queue_comm, /*tagged*/8, devq); 1100 #else 1101 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, 1102 vbus_ext, 0, os_max_queue_comm, /*tagged*/8, devq); 1103 #endif 1104 1105 if (!vbus_ext->sim) { 1106 os_printk("cam_sim_alloc failed"); 1107 cam_simq_free(devq); 1108 return ; 1109 } 1110 1111 #if __FreeBSD_version > 700044 1112 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { 1113 #else 1114 if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) { 1115 #endif 1116 os_printk("xpt_bus_register failed"); 1117 cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); 1118 vbus_ext->sim = NULL; 1119 return ; 1120 } 1121 1122 if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, 1123 cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, 1124 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1125 { 1126 os_printk("xpt_create_path failed"); 1127 xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); 1128 cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); 1129 vbus_ext->sim = NULL; 1130 return ; 1131 } 1132 1133 xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); 1134 ccb.ccb_h.func_code = XPT_SASYNC_CB; 1135 ccb.event_enable = AC_LOST_DEVICE; 1136 ccb.callback = hpt_async; 1137 ccb.callback_arg = vbus_ext; 1138 xpt_action((union ccb *)&ccb); 1139 1140 for (hba = vbus_ext->hba_list; hba; hba = hba->next) { 1141 int rid = 0; 1142 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, 1143 SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) 1144 { 1145 os_printk("can't allocate interrupt"); 1146 return ; 1147 } 1148 1149 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, 1150 #if __FreeBSD_version > 700025 1151 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) 1152 #else 1153 hpt_pci_intr, vbus_ext, &hba->irq_handle)) 1154 #endif 1155 { 1156 os_printk("can't set up interrupt"); 1157 return ; 1158 } 1159 hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); 1160 } 1161 1162 vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1163 hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); 1164 if (!vbus_ext->shutdown_eh) 1165 os_printk("Shutdown event registration failed"); 1166 } 1167 1168 ldm_for_each_vbus(vbus, vbus_ext) { 1169 TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); 1170 if (vbus_ext->tasks) 1171 TASK_ENQUEUE(&vbus_ext->worker); 1172 } 1173 1174 make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, 1175 S_IRUSR | S_IWUSR, driver_name); 1176 } 1177 1178 #if defined(KLD_MODULE) && (__FreeBSD_version >= 503000) 1179 1180 typedef struct driverlink *driverlink_t; 1181 struct driverlink { 1182 kobj_class_t driver; 1183 TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ 1184 }; 1185 1186 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; 1187 1188 struct devclass { 1189 TAILQ_ENTRY(devclass) link; 1190 devclass_t parent; /* parent in devclass hierarchy */ 1191 driver_list_t drivers; /* bus devclasses store drivers for bus */ 1192 char *name; 1193 device_t *devices; /* array of devices indexed by unit */ 1194 int maxunit; /* size of devices array */ 1195 }; 1196 1197 static void override_kernel_driver(void) 1198 { 1199 driverlink_t dl, dlfirst; 1200 driver_t *tmpdriver; 1201 devclass_t dc = devclass_find("pci"); 1202 1203 if (dc){ 1204 dlfirst = TAILQ_FIRST(&dc->drivers); 1205 for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { 1206 if(strcmp(dl->driver->name, driver_name) == 0) { 1207 tmpdriver=dl->driver; 1208 dl->driver=dlfirst->driver; 1209 dlfirst->driver=tmpdriver; 1210 break; 1211 } 1212 } 1213 } 1214 } 1215 1216 #else 1217 #define override_kernel_driver() 1218 #endif 1219 1220 static void hpt_init(void *dummy) 1221 { 1222 if (bootverbose) 1223 os_printk("%s %s", driver_name_long, driver_ver); 1224 1225 override_kernel_driver(); 1226 init_config(); 1227 1228 hpt_ich.ich_func = hpt_final_init; 1229 hpt_ich.ich_arg = NULL; 1230 if (config_intrhook_establish(&hpt_ich) != 0) { 1231 printf("%s: cannot establish configuration hook\n", 1232 driver_name_long); 1233 } 1234 1235 } 1236 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); 1237 1238 /* 1239 * CAM driver interface 1240 */ 1241 static device_method_t driver_methods[] = { 1242 /* Device interface */ 1243 DEVMETHOD(device_probe, hpt_probe), 1244 DEVMETHOD(device_attach, hpt_attach), 1245 DEVMETHOD(device_detach, hpt_detach), 1246 DEVMETHOD(device_shutdown, hpt_shutdown), 1247 { 0, 0 } 1248 }; 1249 1250 static driver_t hpt_pci_driver = { 1251 driver_name, 1252 driver_methods, 1253 sizeof(HBA) 1254 }; 1255 1256 static devclass_t hpt_devclass; 1257 1258 #ifndef TARGETNAME 1259 #error "no TARGETNAME found" 1260 #endif 1261 1262 /* use this to make TARGETNAME be expanded */ 1263 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) 1264 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) 1265 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) 1266 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); 1267 __MODULE_VERSION(TARGETNAME, 1); 1268 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); 1269 1270 #if __FreeBSD_version>503000 1271 typedef struct cdev * ioctl_dev_t; 1272 #else 1273 typedef dev_t ioctl_dev_t; 1274 #endif 1275 1276 #if __FreeBSD_version >= 500000 1277 typedef struct thread * ioctl_thread_t; 1278 #else 1279 typedef struct proc * ioctl_thread_t; 1280 #endif 1281 1282 static int hpt_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) 1283 { 1284 return 0; 1285 } 1286 1287 static int hpt_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) 1288 { 1289 return 0; 1290 } 1291 1292 static int hpt_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int fflag, ioctl_thread_t td) 1293 { 1294 PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; 1295 IOCTL_ARG ioctl_args; 1296 HPT_U32 bytesReturned; 1297 1298 switch (cmd){ 1299 case HPT_DO_IOCONTROL: 1300 { 1301 if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { 1302 KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", 1303 piop->dwIoControlCode, 1304 piop->lpInBuffer, 1305 piop->nInBufferSize, 1306 piop->lpOutBuffer, 1307 piop->nOutBufferSize)); 1308 1309 memset(&ioctl_args, 0, sizeof(ioctl_args)); 1310 1311 ioctl_args.dwIoControlCode = piop->dwIoControlCode; 1312 ioctl_args.nInBufferSize = piop->nInBufferSize; 1313 ioctl_args.nOutBufferSize = piop->nOutBufferSize; 1314 ioctl_args.lpBytesReturned = &bytesReturned; 1315 1316 if (ioctl_args.nInBufferSize) { 1317 ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); 1318 if (!ioctl_args.lpInBuffer) 1319 goto invalid; 1320 if (copyin((void*)piop->lpInBuffer, 1321 ioctl_args.lpInBuffer, piop->nInBufferSize)) 1322 goto invalid; 1323 } 1324 1325 if (ioctl_args.nOutBufferSize) { 1326 ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); 1327 if (!ioctl_args.lpOutBuffer) 1328 goto invalid; 1329 } 1330 1331 #if (__FreeBSD_version >= 500000) 1332 mtx_lock(&Giant); 1333 #endif 1334 1335 hpt_do_ioctl(&ioctl_args); 1336 1337 #if (__FreeBSD_version >= 500000) 1338 mtx_unlock(&Giant); 1339 #endif 1340 1341 if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { 1342 if (piop->nOutBufferSize) { 1343 if (copyout(ioctl_args.lpOutBuffer, 1344 (void*)piop->lpOutBuffer, piop->nOutBufferSize)) 1345 goto invalid; 1346 } 1347 if (piop->lpBytesReturned) { 1348 if (copyout(&bytesReturned, 1349 (void*)piop->lpBytesReturned, sizeof(HPT_U32))) 1350 goto invalid; 1351 } 1352 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1353 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1354 return 0; 1355 } 1356 invalid: 1357 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1358 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1359 return EFAULT; 1360 } 1361 return EFAULT; 1362 } 1363 1364 case HPT_SCAN_BUS: 1365 { 1366 return hpt_rescan_bus(); 1367 } 1368 default: 1369 KdPrint(("invalid command!")); 1370 return EFAULT; 1371 } 1372 1373 } 1374 1375 static int hpt_rescan_bus(void) 1376 { 1377 struct cam_path *path; 1378 union ccb *ccb; 1379 PVBUS vbus; 1380 PVBUS_EXT vbus_ext; 1381 1382 #if (__FreeBSD_version >= 500000) 1383 mtx_lock(&Giant); 1384 #endif 1385 1386 ldm_for_each_vbus(vbus, vbus_ext) { 1387 if (xpt_create_path(&path, xpt_periph, cam_sim_path(vbus_ext->sim), 1388 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1389 return(EIO); 1390 if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK)) == NULL) 1391 return(ENOMEM); 1392 bzero(ccb, sizeof(union ccb)); 1393 xpt_setup_ccb(&ccb->ccb_h, path, 5); 1394 ccb->ccb_h.func_code = XPT_SCAN_BUS; 1395 ccb->ccb_h.cbfcnp = hpt_bus_scan_cb; 1396 ccb->crcn.flags = CAM_FLAG_NONE; 1397 xpt_action(ccb); 1398 } 1399 1400 #if (__FreeBSD_version >= 500000) 1401 mtx_unlock(&Giant); 1402 #endif 1403 1404 return(0); 1405 } 1406 1407 static void hpt_bus_scan_cb(struct cam_periph *periph, union ccb *ccb) 1408 { 1409 if (ccb->ccb_h.status != CAM_REQ_CMP) 1410 KdPrint(("cam_scan_callback: failure status = %x",ccb->ccb_h.status)); 1411 else 1412 KdPrint(("Scan bus successfully!")); 1413 1414 xpt_free_path(ccb->ccb_h.path); 1415 free(ccb, M_TEMP); 1416 return; 1417 } 1418