1 /* 2 * Copyright (c) HighPoint Technologies, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 #include <dev/hptrr/hptrr_config.h> 29 /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $ 30 * 31 * HighPoint RAID Driver for FreeBSD 32 * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. 33 */ 34 #include <dev/hptrr/os_bsd.h> 35 #include <dev/hptrr/hptintf.h> 36 37 static int attach_generic = 1; 38 TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic); 39 40 static int hpt_probe(device_t dev) 41 { 42 PCI_ID pci_id; 43 HIM *him; 44 int i; 45 PHBA hba; 46 47 /* Some of supported chips are used not only by HPT. */ 48 if (pci_get_vendor(dev) != 0x1103 && !attach_generic) 49 return (ENXIO); 50 for (him = him_list; him; him = him->next) { 51 for (i=0; him->get_supported_device_id(i, &pci_id); i++) { 52 if ((pci_get_vendor(dev) == pci_id.vid) && 53 (pci_get_device(dev) == pci_id.did)){ 54 KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", 55 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) 56 )); 57 device_set_desc(dev, him->name); 58 hba = (PHBA)device_get_softc(dev); 59 memset(hba, 0, sizeof(HBA)); 60 hba->ext_type = EXT_TYPE_HBA; 61 hba->ldm_adapter.him = him; 62 return 0; 63 } 64 } 65 } 66 67 return (ENXIO); 68 } 69 70 static int hpt_attach(device_t dev) 71 { 72 PHBA hba = (PHBA)device_get_softc(dev); 73 HIM *him = hba->ldm_adapter.him; 74 PCI_ID pci_id; 75 HPT_UINT size; 76 PVBUS vbus; 77 PVBUS_EXT vbus_ext; 78 79 KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); 80 81 #if __FreeBSD_version >=440000 82 pci_enable_busmaster(dev); 83 #endif 84 85 pci_id.vid = pci_get_vendor(dev); 86 pci_id.did = pci_get_device(dev); 87 pci_id.rev = pci_get_revid(dev); 88 89 size = him->get_adapter_size(&pci_id); 90 hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); 91 if (!hba->ldm_adapter.him_handle) 92 return ENXIO; 93 94 hba->pcidev = dev; 95 hba->pciaddr.tree = 0; 96 hba->pciaddr.bus = pci_get_bus(dev); 97 hba->pciaddr.device = pci_get_slot(dev); 98 hba->pciaddr.function = pci_get_function(dev); 99 100 if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { 101 free(hba->ldm_adapter.him_handle, M_DEVBUF); 102 return -1; 103 } 104 105 os_printk("adapter at PCI %d:%d:%d, IRQ %d", 106 hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); 107 108 if (!ldm_register_adapter(&hba->ldm_adapter)) { 109 size = ldm_get_vbus_size(); 110 vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); 111 if (!vbus_ext) { 112 free(hba->ldm_adapter.him_handle, M_DEVBUF); 113 return -1; 114 } 115 memset(vbus_ext, 0, sizeof(VBUS_EXT)); 116 vbus_ext->ext_type = EXT_TYPE_VBUS; 117 ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); 118 ldm_register_adapter(&hba->ldm_adapter); 119 } 120 121 ldm_for_each_vbus(vbus, vbus_ext) { 122 if (hba->ldm_adapter.vbus==vbus) { 123 hba->vbus_ext = vbus_ext; 124 hba->next = vbus_ext->hba_list; 125 vbus_ext->hba_list = hba; 126 break; 127 } 128 } 129 return 0; 130 } 131 132 /* 133 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, 134 * but there are some problems currently (alignment, etc). 135 */ 136 static __inline void *__get_free_pages(int order) 137 { 138 /* don't use low memory - other devices may get starved */ 139 return contigmalloc(PAGE_SIZE<<order, 140 M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 141 } 142 143 static __inline void free_pages(void *p, int order) 144 { 145 contigfree(p, PAGE_SIZE<<order, M_DEVBUF); 146 } 147 148 static int hpt_alloc_mem(PVBUS_EXT vbus_ext) 149 { 150 PHBA hba; 151 struct freelist *f; 152 HPT_UINT i; 153 void **p; 154 155 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 156 hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); 157 158 ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); 159 160 for (f=vbus_ext->freelist_head; f; f=f->next) { 161 KdPrint(("%s: %d*%d=%d bytes", 162 f->tag, f->count, f->size, f->count*f->size)); 163 for (i=0; i<f->count; i++) { 164 p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); 165 if (!p) return (ENXIO); 166 *p = f->head; 167 f->head = p; 168 } 169 } 170 171 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 172 int order, size, j; 173 174 HPT_ASSERT((f->size & (f->alignment-1))==0); 175 176 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ; 177 178 KdPrint(("%s: %d*%d=%d bytes, order %d", 179 f->tag, f->count, f->size, f->count*f->size, order)); 180 HPT_ASSERT(f->alignment<=PAGE_SIZE); 181 182 for (i=0; i<f->count;) { 183 p = (void **)__get_free_pages(order); 184 if (!p) return -1; 185 for (j = size/f->size; j && i<f->count; i++,j--) { 186 *p = f->head; 187 *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); 188 f->head = p; 189 p = (void **)((unsigned long)p + f->size); 190 } 191 } 192 } 193 194 HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); 195 196 for (i=0; i<os_max_cache_pages; i++) { 197 p = (void **)__get_free_pages(0); 198 if (!p) return -1; 199 HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0); 200 dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p)); 201 } 202 203 return 0; 204 } 205 206 static void hpt_free_mem(PVBUS_EXT vbus_ext) 207 { 208 struct freelist *f; 209 void *p; 210 int i; 211 BUS_ADDRESS bus; 212 213 for (f=vbus_ext->freelist_head; f; f=f->next) { 214 #if DBG 215 if (f->count!=f->reserved_count) { 216 KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 217 } 218 #endif 219 while ((p=freelist_get(f))) 220 free(p, M_DEVBUF); 221 } 222 223 for (i=0; i<os_max_cache_pages; i++) { 224 p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus); 225 HPT_ASSERT(p); 226 free_pages(p, 0); 227 } 228 229 for (f=vbus_ext->freelist_dma_head; f; f=f->next) { 230 int order, size; 231 #if DBG 232 if (f->count!=f->reserved_count) { 233 KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); 234 } 235 #endif 236 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ; 237 238 while ((p=freelist_get_dma(f, &bus))) { 239 if (order) 240 free_pages(p, order); 241 else { 242 /* can't free immediately since other blocks in this page may still be in the list */ 243 if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) 244 dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); 245 } 246 } 247 } 248 249 while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) 250 free_pages(p, 0); 251 } 252 253 static int hpt_init_vbus(PVBUS_EXT vbus_ext) 254 { 255 PHBA hba; 256 257 for (hba = vbus_ext->hba_list; hba; hba = hba->next) 258 if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { 259 KdPrint(("fail to initialize %p", hba)); 260 return -1; 261 } 262 263 ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); 264 return 0; 265 } 266 267 static void hpt_flush_done(PCOMMAND pCmd) 268 { 269 PVDEV vd = pCmd->target; 270 271 if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { 272 vd = vd->u.array.transform->target; 273 HPT_ASSERT(vd); 274 pCmd->target = vd; 275 pCmd->Result = RETURN_PENDING; 276 vdev_queue_cmd(pCmd); 277 return; 278 } 279 280 *(int *)pCmd->priv = 1; 281 wakeup(pCmd); 282 } 283 284 /* 285 * flush a vdev (without retry). 286 */ 287 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) 288 { 289 PCOMMAND pCmd; 290 int result = 0, done; 291 HPT_UINT count; 292 293 KdPrint(("flusing dev %p", vd)); 294 295 hpt_lock_vbus(vbus_ext); 296 297 if (mIsArray(vd->type) && vd->u.array.transform) 298 count = MAX(vd->u.array.transform->source->cmds_per_request, 299 vd->u.array.transform->target->cmds_per_request); 300 else 301 count = vd->cmds_per_request; 302 303 pCmd = ldm_alloc_cmds(vd->vbus, count); 304 305 if (!pCmd) { 306 hpt_unlock_vbus(vbus_ext); 307 return -1; 308 } 309 310 pCmd->type = CMD_TYPE_FLUSH; 311 pCmd->flags.hard_flush = 1; 312 pCmd->target = vd; 313 pCmd->done = hpt_flush_done; 314 done = 0; 315 pCmd->priv = &done; 316 317 ldm_queue_cmd(pCmd); 318 319 if (!done) { 320 while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { 321 ldm_reset_vbus(vd->vbus); 322 } 323 } 324 325 KdPrint(("flush result %d", pCmd->Result)); 326 327 if (pCmd->Result!=RETURN_SUCCESS) 328 result = -1; 329 330 ldm_free_cmds(pCmd); 331 332 hpt_unlock_vbus(vbus_ext); 333 334 return result; 335 } 336 337 static void hpt_stop_tasks(PVBUS_EXT vbus_ext); 338 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) 339 { 340 PVBUS vbus = (PVBUS)vbus_ext->vbus; 341 PHBA hba; 342 int i; 343 344 KdPrint(("hpt_shutdown_vbus")); 345 346 /* stop all ctl tasks and disable the worker taskqueue */ 347 hpt_stop_tasks(vbus_ext); 348 vbus_ext->worker.ta_context = 0; 349 350 /* flush devices */ 351 for (i=0; i<osm_max_targets; i++) { 352 PVDEV vd = ldm_find_target(vbus, i); 353 if (vd) { 354 /* retry once */ 355 if (hpt_flush_vdev(vbus_ext, vd)) 356 hpt_flush_vdev(vbus_ext, vd); 357 } 358 } 359 360 hpt_lock_vbus(vbus_ext); 361 ldm_shutdown(vbus); 362 hpt_unlock_vbus(vbus_ext); 363 364 ldm_release_vbus(vbus); 365 366 for (hba=vbus_ext->hba_list; hba; hba=hba->next) 367 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); 368 369 hpt_free_mem(vbus_ext); 370 371 while ((hba=vbus_ext->hba_list)) { 372 vbus_ext->hba_list = hba->next; 373 free(hba->ldm_adapter.him_handle, M_DEVBUF); 374 } 375 376 free(vbus_ext, M_DEVBUF); 377 KdPrint(("hpt_shutdown_vbus done")); 378 } 379 380 static void __hpt_do_tasks(PVBUS_EXT vbus_ext) 381 { 382 OSM_TASK *tasks; 383 384 tasks = vbus_ext->tasks; 385 vbus_ext->tasks = 0; 386 387 while (tasks) { 388 OSM_TASK *t = tasks; 389 tasks = t->next; 390 t->next = 0; 391 t->func(vbus_ext->vbus, t->data); 392 } 393 } 394 395 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) 396 { 397 if(vbus_ext){ 398 hpt_lock_vbus(vbus_ext); 399 __hpt_do_tasks(vbus_ext); 400 hpt_unlock_vbus(vbus_ext); 401 } 402 } 403 404 static void hpt_action(struct cam_sim *sim, union ccb *ccb); 405 static void hpt_poll(struct cam_sim *sim); 406 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); 407 static void hpt_pci_intr(void *arg); 408 409 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) 410 { 411 POS_CMDEXT p = vbus_ext->cmdext_list; 412 if (p) 413 vbus_ext->cmdext_list = p->next; 414 return p; 415 } 416 417 static __inline void cmdext_put(POS_CMDEXT p) 418 { 419 p->next = p->vbus_ext->cmdext_list; 420 p->vbus_ext->cmdext_list = p; 421 } 422 423 static void hpt_timeout(void *arg) 424 { 425 PCOMMAND pCmd = (PCOMMAND)arg; 426 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 427 428 KdPrint(("pCmd %p timeout", pCmd)); 429 430 ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); 431 } 432 433 static void os_cmddone(PCOMMAND pCmd) 434 { 435 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 436 union ccb *ccb = ext->ccb; 437 438 KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); 439 440 untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); 441 442 switch(pCmd->Result) { 443 case RETURN_SUCCESS: 444 ccb->ccb_h.status = CAM_REQ_CMP; 445 break; 446 case RETURN_BAD_DEVICE: 447 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 448 break; 449 case RETURN_DEVICE_BUSY: 450 ccb->ccb_h.status = CAM_BUSY; 451 break; 452 case RETURN_INVALID_REQUEST: 453 ccb->ccb_h.status = CAM_REQ_INVALID; 454 break; 455 case RETURN_SELECTION_TIMEOUT: 456 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 457 break; 458 case RETURN_RETRY: 459 ccb->ccb_h.status = CAM_BUSY; 460 break; 461 default: 462 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 463 break; 464 } 465 466 if (pCmd->flags.data_in) { 467 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); 468 } 469 else if (pCmd->flags.data_out) { 470 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); 471 } 472 473 bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); 474 475 cmdext_put(ext); 476 ldm_free_cmds(pCmd); 477 xpt_done(ccb); 478 } 479 480 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) 481 { 482 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 483 union ccb *ccb = ext->ccb; 484 bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; 485 int idx; 486 487 if(logical) { 488 if (ccb->ccb_h.flags & CAM_DATA_PHYS) 489 panic("physical address unsupported"); 490 491 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 492 if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) 493 panic("physical address unsupported"); 494 495 for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { 496 os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); 497 pSg[idx].size = sgList[idx].ds_len; 498 pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; 499 } 500 } 501 else { 502 os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); 503 pSg->size = ccb->csio.dxfer_len; 504 pSg->eot = 1; 505 } 506 return TRUE; 507 } 508 509 /* since we have provided physical sg, nobody will ask us to build physical sg */ 510 HPT_ASSERT(0); 511 return FALSE; 512 } 513 514 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 515 { 516 PCOMMAND pCmd = (PCOMMAND)arg; 517 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; 518 PSG psg = pCmd->psg; 519 int idx; 520 521 HPT_ASSERT(pCmd->flags.physical_sg); 522 523 if (error || nsegs == 0) 524 panic("busdma error"); 525 526 HPT_ASSERT(nsegs<=os_max_sg_descriptors); 527 528 for (idx = 0; idx < nsegs; idx++, psg++) { 529 psg->addr.bus = segs[idx].ds_addr; 530 psg->size = segs[idx].ds_len; 531 psg->eot = 0; 532 } 533 psg[-1].eot = 1; 534 535 if (pCmd->flags.data_in) { 536 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); 537 } 538 else if (pCmd->flags.data_out) { 539 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); 540 } 541 542 ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); 543 ldm_queue_cmd(pCmd); 544 } 545 546 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) 547 { 548 PVBUS vbus = (PVBUS)vbus_ext->vbus; 549 PVDEV vd; 550 PCOMMAND pCmd; 551 POS_CMDEXT ext; 552 HPT_U8 *cdb; 553 554 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 555 cdb = ccb->csio.cdb_io.cdb_ptr; 556 else 557 cdb = ccb->csio.cdb_io.cdb_bytes; 558 559 KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", 560 ccb, 561 ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 562 *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] 563 )); 564 565 /* ccb->ccb_h.path_id is not our bus id - don't check it */ 566 if (ccb->ccb_h.target_lun != 0 || 567 ccb->ccb_h.target_id >= osm_max_targets || 568 (ccb->ccb_h.flags & CAM_CDB_PHYS)) 569 { 570 ccb->ccb_h.status = CAM_TID_INVALID; 571 xpt_done(ccb); 572 return; 573 } 574 575 vd = ldm_find_target(vbus, ccb->ccb_h.target_id); 576 577 if (!vd) { 578 ccb->ccb_h.status = CAM_TID_INVALID; 579 xpt_done(ccb); 580 return; 581 } 582 583 switch (cdb[0]) { 584 case TEST_UNIT_READY: 585 case START_STOP_UNIT: 586 case SYNCHRONIZE_CACHE: 587 ccb->ccb_h.status = CAM_REQ_CMP; 588 break; 589 590 case INQUIRY: 591 { 592 PINQUIRYDATA inquiryData; 593 memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); 594 inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; 595 596 inquiryData->AdditionalLength = 31; 597 inquiryData->CommandQueue = 1; 598 memcpy(&inquiryData->VendorId, "HPT ", 8); 599 memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); 600 601 if (vd->target_id / 10) { 602 inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; 603 inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; 604 } 605 else 606 inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; 607 608 memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); 609 610 ccb->ccb_h.status = CAM_REQ_CMP; 611 } 612 break; 613 614 case READ_CAPACITY: 615 { 616 HPT_U8 *rbuf = ccb->csio.data_ptr; 617 HPT_U32 cap; 618 619 if (vd->capacity>0xfffffffful) 620 cap = 0xfffffffful; 621 else 622 cap = vd->capacity - 1; 623 624 rbuf[0] = (HPT_U8)(cap>>24); 625 rbuf[1] = (HPT_U8)(cap>>16); 626 rbuf[2] = (HPT_U8)(cap>>8); 627 rbuf[3] = (HPT_U8)cap; 628 rbuf[4] = 0; 629 rbuf[5] = 0; 630 rbuf[6] = 2; 631 rbuf[7] = 0; 632 633 ccb->ccb_h.status = CAM_REQ_CMP; 634 break; 635 } 636 637 case SERVICE_ACTION_IN: 638 { 639 HPT_U8 *rbuf = ccb->csio.data_ptr; 640 HPT_U64 cap = vd->capacity - 1; 641 642 rbuf[0] = (HPT_U8)(cap>>56); 643 rbuf[1] = (HPT_U8)(cap>>48); 644 rbuf[2] = (HPT_U8)(cap>>40); 645 rbuf[3] = (HPT_U8)(cap>>32); 646 rbuf[4] = (HPT_U8)(cap>>24); 647 rbuf[5] = (HPT_U8)(cap>>16); 648 rbuf[6] = (HPT_U8)(cap>>8); 649 rbuf[7] = (HPT_U8)cap; 650 rbuf[8] = 0; 651 rbuf[9] = 0; 652 rbuf[10] = 2; 653 rbuf[11] = 0; 654 655 ccb->ccb_h.status = CAM_REQ_CMP; 656 break; 657 } 658 659 case READ_6: 660 case READ_10: 661 case READ_16: 662 case WRITE_6: 663 case WRITE_10: 664 case WRITE_16: 665 case 0x13: 666 case 0x2f: 667 { 668 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); 669 if(!pCmd){ 670 KdPrint(("Failed to allocate command!")); 671 ccb->ccb_h.status = CAM_BUSY; 672 break; 673 } 674 675 switch (cdb[0]) { 676 case READ_6: 677 case WRITE_6: 678 case 0x13: 679 pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; 680 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; 681 break; 682 case READ_16: 683 case WRITE_16: 684 { 685 HPT_U64 block = 686 ((HPT_U64)cdb[2]<<56) | 687 ((HPT_U64)cdb[3]<<48) | 688 ((HPT_U64)cdb[4]<<40) | 689 ((HPT_U64)cdb[5]<<32) | 690 ((HPT_U64)cdb[6]<<24) | 691 ((HPT_U64)cdb[7]<<16) | 692 ((HPT_U64)cdb[8]<<8) | 693 ((HPT_U64)cdb[9]); 694 pCmd->uCmd.Ide.Lba = block; 695 pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); 696 break; 697 } 698 699 default: 700 pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); 701 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); 702 break; 703 } 704 705 switch (cdb[0]) { 706 case READ_6: 707 case READ_10: 708 case READ_16: 709 pCmd->flags.data_in = 1; 710 break; 711 case WRITE_6: 712 case WRITE_10: 713 case WRITE_16: 714 pCmd->flags.data_out = 1; 715 break; 716 } 717 pCmd->priv = ext = cmdext_get(vbus_ext); 718 HPT_ASSERT(ext); 719 ext->ccb = ccb; 720 pCmd->target = vd; 721 pCmd->done = os_cmddone; 722 pCmd->buildsgl = os_buildsgl; 723 724 pCmd->psg = ext->psg; 725 726 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 727 int idx; 728 bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; 729 730 if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) 731 pCmd->flags.physical_sg = 1; 732 733 for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { 734 pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; 735 pCmd->psg[idx].size = sgList[idx].ds_len; 736 pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; 737 } 738 739 ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); 740 ldm_queue_cmd(pCmd); 741 } 742 else { 743 int error; 744 pCmd->flags.physical_sg = 1; 745 error = bus_dmamap_load(vbus_ext->io_dmat, 746 ext->dma_map, 747 ccb->csio.data_ptr, ccb->csio.dxfer_len, 748 hpt_io_dmamap_callback, pCmd, 749 BUS_DMA_WAITOK 750 ); 751 KdPrint(("bus_dmamap_load return %d", error)); 752 if (error && error!=EINPROGRESS) { 753 os_printk("bus_dmamap_load error %d", error); 754 cmdext_put(ext); 755 ldm_free_cmds(pCmd); 756 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 757 xpt_done(ccb); 758 } 759 } 760 return; 761 } 762 763 default: 764 ccb->ccb_h.status = CAM_REQ_INVALID; 765 break; 766 } 767 768 xpt_done(ccb); 769 return; 770 } 771 772 static void hpt_action(struct cam_sim *sim, union ccb *ccb) 773 { 774 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); 775 776 KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); 777 778 switch (ccb->ccb_h.func_code) { 779 780 case XPT_SCSI_IO: 781 hpt_lock_vbus(vbus_ext); 782 hpt_scsi_io(vbus_ext, ccb); 783 hpt_unlock_vbus(vbus_ext); 784 return; 785 786 case XPT_RESET_BUS: 787 hpt_lock_vbus(vbus_ext); 788 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 789 hpt_unlock_vbus(vbus_ext); 790 break; 791 792 case XPT_GET_TRAN_SETTINGS: 793 case XPT_SET_TRAN_SETTINGS: 794 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 795 break; 796 797 case XPT_CALC_GEOMETRY: 798 ccb->ccg.heads = 255; 799 ccb->ccg.secs_per_track = 63; 800 ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); 801 ccb->ccb_h.status = CAM_REQ_CMP; 802 break; 803 804 case XPT_PATH_INQ: 805 { 806 struct ccb_pathinq *cpi = &ccb->cpi; 807 808 cpi->version_num = 1; 809 cpi->hba_inquiry = PI_SDTR_ABLE; 810 cpi->target_sprt = 0; 811 cpi->hba_misc = PIM_NOBUSRESET; 812 cpi->hba_eng_cnt = 0; 813 cpi->max_target = osm_max_targets; 814 cpi->max_lun = 0; 815 cpi->unit_number = cam_sim_unit(sim); 816 cpi->bus_id = cam_sim_bus(sim); 817 cpi->initiator_id = osm_max_targets; 818 cpi->base_transfer_speed = 3300; 819 820 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 821 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); 822 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 823 cpi->transport = XPORT_SPI; 824 cpi->transport_version = 2; 825 cpi->protocol = PROTO_SCSI; 826 cpi->protocol_version = SCSI_REV_2; 827 cpi->ccb_h.status = CAM_REQ_CMP; 828 break; 829 } 830 831 default: 832 ccb->ccb_h.status = CAM_REQ_INVALID; 833 break; 834 } 835 836 xpt_done(ccb); 837 return; 838 } 839 840 static void hpt_pci_intr(void *arg) 841 { 842 PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; 843 hpt_lock_vbus(vbus_ext); 844 ldm_intr((PVBUS)vbus_ext->vbus); 845 hpt_unlock_vbus(vbus_ext); 846 } 847 848 static void hpt_poll(struct cam_sim *sim) 849 { 850 hpt_pci_intr(cam_sim_softc(sim)); 851 } 852 853 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) 854 { 855 KdPrint(("hpt_async")); 856 } 857 858 static int hpt_shutdown(device_t dev) 859 { 860 KdPrint(("hpt_shutdown(dev=%p)", dev)); 861 return 0; 862 } 863 864 static int hpt_detach(device_t dev) 865 { 866 /* we don't allow the driver to be unloaded. */ 867 return EBUSY; 868 } 869 870 static void hpt_ioctl_done(struct _IOCTL_ARG *arg) 871 { 872 arg->ioctl_cmnd = 0; 873 wakeup(arg); 874 } 875 876 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) 877 { 878 ioctl_args->result = -1; 879 ioctl_args->done = hpt_ioctl_done; 880 ioctl_args->ioctl_cmnd = (void *)1; 881 882 hpt_lock_vbus(vbus_ext); 883 ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); 884 885 while (ioctl_args->ioctl_cmnd) { 886 if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) 887 break; 888 ldm_reset_vbus((PVBUS)vbus_ext->vbus); 889 __hpt_do_tasks(vbus_ext); 890 } 891 892 /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ 893 894 hpt_unlock_vbus(vbus_ext); 895 } 896 897 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) 898 { 899 PVBUS vbus; 900 PVBUS_EXT vbus_ext; 901 902 ldm_for_each_vbus(vbus, vbus_ext) { 903 __hpt_do_ioctl(vbus_ext, ioctl_args); 904 if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) 905 return; 906 } 907 } 908 909 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ 910 IOCTL_ARG arg;\ 911 arg.dwIoControlCode = code;\ 912 arg.lpInBuffer = inbuf;\ 913 arg.lpOutBuffer = outbuf;\ 914 arg.nInBufferSize = insize;\ 915 arg.nOutBufferSize = outsize;\ 916 arg.lpBytesReturned = 0;\ 917 hpt_do_ioctl(&arg);\ 918 arg.result;\ 919 }) 920 921 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) 922 923 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) 924 { 925 int i; 926 HPT_U32 count = nMaxCount-1; 927 928 if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, 929 &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) 930 return -1; 931 932 nMaxCount = (int)pIds[0]; 933 for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1]; 934 return nMaxCount; 935 } 936 937 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo) 938 { 939 return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3, 940 &id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3)); 941 } 942 943 /* not belong to this file logically, but we want to use ioctl interface */ 944 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id) 945 { 946 LOGICAL_DEVICE_INFO_V3 devinfo; 947 int i, result; 948 DEVICEID param[2] = { id, 0 }; 949 950 if (hpt_get_device_info_v3(id, &devinfo)) 951 return -1; 952 953 if (devinfo.Type!=LDT_ARRAY) 954 return -1; 955 956 if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING) 957 param[1] = AS_REBUILD_ABORT; 958 else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING) 959 param[1] = AS_VERIFY_ABORT; 960 else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING) 961 param[1] = AS_INITIALIZE_ABORT; 962 else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING) 963 param[1] = AS_TRANSFORM_ABORT; 964 else 965 return -1; 966 967 KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1])); 968 result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE, 969 param, sizeof(param), 0, 0); 970 971 for (i=0; i<devinfo.u.array.nDisk; i++) 972 if (DEVICEID_VALID(devinfo.u.array.Members[i])) 973 __hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]); 974 975 return result; 976 } 977 978 static void hpt_stop_tasks(PVBUS_EXT vbus_ext) 979 { 980 DEVICEID ids[32]; 981 int i, count; 982 983 count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0])); 984 985 for (i=0; i<count; i++) 986 __hpt_stop_tasks(vbus_ext, ids[i]); 987 } 988 989 static d_open_t hpt_open; 990 static d_close_t hpt_close; 991 static d_ioctl_t hpt_ioctl; 992 static void hpt_bus_scan_cb(struct cam_periph *periph, union ccb *ccb); 993 static int hpt_rescan_bus(void); 994 995 static struct cdevsw hpt_cdevsw = { 996 .d_open = hpt_open, 997 .d_close = hpt_close, 998 .d_ioctl = hpt_ioctl, 999 .d_name = driver_name, 1000 #if __FreeBSD_version>=503000 1001 .d_version = D_VERSION, 1002 #endif 1003 #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) 1004 .d_flags = D_NEEDGIANT, 1005 #endif 1006 #if __FreeBSD_version<600034 1007 #if __FreeBSD_version>501000 1008 .d_maj = MAJOR_AUTO, 1009 #else 1010 .d_maj = HPT_DEV_MAJOR, 1011 #endif 1012 #endif 1013 }; 1014 1015 static struct intr_config_hook hpt_ich; 1016 1017 /* 1018 * hpt_final_init will be called after all hpt_attach. 1019 */ 1020 static void hpt_final_init(void *dummy) 1021 { 1022 int i; 1023 PVBUS_EXT vbus_ext; 1024 PVBUS vbus; 1025 PHBA hba; 1026 1027 /* Clear the config hook */ 1028 config_intrhook_disestablish(&hpt_ich); 1029 1030 /* allocate memory */ 1031 i = 0; 1032 ldm_for_each_vbus(vbus, vbus_ext) { 1033 if (hpt_alloc_mem(vbus_ext)) { 1034 os_printk("out of memory"); 1035 return; 1036 } 1037 i++; 1038 } 1039 1040 if (!i) { 1041 if (bootverbose) 1042 os_printk("no controller detected."); 1043 return; 1044 } 1045 1046 /* initializing hardware */ 1047 ldm_for_each_vbus(vbus, vbus_ext) { 1048 /* make timer available here */ 1049 callout_handle_init(&vbus_ext->timer); 1050 if (hpt_init_vbus(vbus_ext)) { 1051 os_printk("fail to initialize hardware"); 1052 break; /* FIXME */ 1053 } 1054 } 1055 1056 /* register CAM interface */ 1057 ldm_for_each_vbus(vbus, vbus_ext) { 1058 struct cam_devq *devq; 1059 struct ccb_setasync ccb; 1060 1061 #if (__FreeBSD_version >= 500000) 1062 mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); 1063 #endif 1064 if (bus_dma_tag_create(NULL,/* parent */ 1065 4, /* alignment */ 1066 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ 1067 BUS_SPACE_MAXADDR, /* lowaddr */ 1068 BUS_SPACE_MAXADDR, /* highaddr */ 1069 NULL, NULL, /* filter, filterarg */ 1070 PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ 1071 os_max_sg_descriptors, /* nsegments */ 1072 0x10000, /* maxsegsize */ 1073 BUS_DMA_WAITOK, /* flags */ 1074 #if __FreeBSD_version>502000 1075 busdma_lock_mutex, /* lockfunc */ 1076 &vbus_ext->lock, /* lockfuncarg */ 1077 #endif 1078 &vbus_ext->io_dmat /* tag */)) 1079 { 1080 return ; 1081 } 1082 1083 for (i=0; i<os_max_queue_comm; i++) { 1084 POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK); 1085 if (!ext) { 1086 os_printk("Can't alloc cmdext(%d)", i); 1087 return ; 1088 } 1089 ext->vbus_ext = vbus_ext; 1090 ext->next = vbus_ext->cmdext_list; 1091 vbus_ext->cmdext_list = ext; 1092 1093 if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { 1094 os_printk("Can't create dma map(%d)", i); 1095 return ; 1096 } 1097 } 1098 1099 if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { 1100 os_printk("cam_simq_alloc failed"); 1101 return ; 1102 } 1103 1104 #if __FreeBSD_version > 700025 1105 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, 1106 vbus_ext, 0, &Giant, os_max_queue_comm, /*tagged*/8, devq); 1107 #else 1108 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, 1109 vbus_ext, 0, os_max_queue_comm, /*tagged*/8, devq); 1110 #endif 1111 1112 if (!vbus_ext->sim) { 1113 os_printk("cam_sim_alloc failed"); 1114 cam_simq_free(devq); 1115 return ; 1116 } 1117 1118 #if __FreeBSD_version > 700044 1119 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { 1120 #else 1121 if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) { 1122 #endif 1123 os_printk("xpt_bus_register failed"); 1124 cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); 1125 vbus_ext->sim = NULL; 1126 return ; 1127 } 1128 1129 if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, 1130 cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, 1131 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1132 { 1133 os_printk("xpt_create_path failed"); 1134 xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); 1135 cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); 1136 vbus_ext->sim = NULL; 1137 return ; 1138 } 1139 1140 xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); 1141 ccb.ccb_h.func_code = XPT_SASYNC_CB; 1142 ccb.event_enable = AC_LOST_DEVICE; 1143 ccb.callback = hpt_async; 1144 ccb.callback_arg = vbus_ext; 1145 xpt_action((union ccb *)&ccb); 1146 1147 for (hba = vbus_ext->hba_list; hba; hba = hba->next) { 1148 int rid = 0; 1149 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, 1150 SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) 1151 { 1152 os_printk("can't allocate interrupt"); 1153 return ; 1154 } 1155 1156 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, 1157 #if __FreeBSD_version > 700025 1158 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) 1159 #else 1160 hpt_pci_intr, vbus_ext, &hba->irq_handle)) 1161 #endif 1162 { 1163 os_printk("can't set up interrupt"); 1164 return ; 1165 } 1166 hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); 1167 } 1168 1169 vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1170 hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); 1171 if (!vbus_ext->shutdown_eh) 1172 os_printk("Shutdown event registration failed"); 1173 } 1174 1175 ldm_for_each_vbus(vbus, vbus_ext) { 1176 TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); 1177 if (vbus_ext->tasks) 1178 TASK_ENQUEUE(&vbus_ext->worker); 1179 } 1180 1181 make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, 1182 S_IRUSR | S_IWUSR, driver_name); 1183 } 1184 1185 #if defined(KLD_MODULE) && (__FreeBSD_version >= 503000) 1186 1187 typedef struct driverlink *driverlink_t; 1188 struct driverlink { 1189 kobj_class_t driver; 1190 TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ 1191 }; 1192 1193 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; 1194 1195 struct devclass { 1196 TAILQ_ENTRY(devclass) link; 1197 devclass_t parent; /* parent in devclass hierarchy */ 1198 driver_list_t drivers; /* bus devclasses store drivers for bus */ 1199 char *name; 1200 device_t *devices; /* array of devices indexed by unit */ 1201 int maxunit; /* size of devices array */ 1202 }; 1203 1204 static void override_kernel_driver(void) 1205 { 1206 driverlink_t dl, dlfirst; 1207 driver_t *tmpdriver; 1208 devclass_t dc = devclass_find("pci"); 1209 1210 if (dc){ 1211 dlfirst = TAILQ_FIRST(&dc->drivers); 1212 for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { 1213 if(strcmp(dl->driver->name, driver_name) == 0) { 1214 tmpdriver=dl->driver; 1215 dl->driver=dlfirst->driver; 1216 dlfirst->driver=tmpdriver; 1217 break; 1218 } 1219 } 1220 } 1221 } 1222 1223 #else 1224 #define override_kernel_driver() 1225 #endif 1226 1227 static void hpt_init(void *dummy) 1228 { 1229 if (bootverbose) 1230 os_printk("%s %s", driver_name_long, driver_ver); 1231 1232 override_kernel_driver(); 1233 init_config(); 1234 1235 hpt_ich.ich_func = hpt_final_init; 1236 hpt_ich.ich_arg = NULL; 1237 if (config_intrhook_establish(&hpt_ich) != 0) { 1238 printf("%s: cannot establish configuration hook\n", 1239 driver_name_long); 1240 } 1241 1242 } 1243 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); 1244 1245 /* 1246 * CAM driver interface 1247 */ 1248 static device_method_t driver_methods[] = { 1249 /* Device interface */ 1250 DEVMETHOD(device_probe, hpt_probe), 1251 DEVMETHOD(device_attach, hpt_attach), 1252 DEVMETHOD(device_detach, hpt_detach), 1253 DEVMETHOD(device_shutdown, hpt_shutdown), 1254 { 0, 0 } 1255 }; 1256 1257 static driver_t hpt_pci_driver = { 1258 driver_name, 1259 driver_methods, 1260 sizeof(HBA) 1261 }; 1262 1263 static devclass_t hpt_devclass; 1264 1265 #ifndef TARGETNAME 1266 #error "no TARGETNAME found" 1267 #endif 1268 1269 /* use this to make TARGETNAME be expanded */ 1270 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) 1271 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) 1272 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) 1273 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); 1274 __MODULE_VERSION(TARGETNAME, 1); 1275 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); 1276 1277 #if __FreeBSD_version>503000 1278 typedef struct cdev * ioctl_dev_t; 1279 #else 1280 typedef dev_t ioctl_dev_t; 1281 #endif 1282 1283 #if __FreeBSD_version >= 500000 1284 typedef struct thread * ioctl_thread_t; 1285 #else 1286 typedef struct proc * ioctl_thread_t; 1287 #endif 1288 1289 static int hpt_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) 1290 { 1291 return 0; 1292 } 1293 1294 static int hpt_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) 1295 { 1296 return 0; 1297 } 1298 1299 static int hpt_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int fflag, ioctl_thread_t td) 1300 { 1301 PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; 1302 IOCTL_ARG ioctl_args; 1303 HPT_U32 bytesReturned; 1304 1305 switch (cmd){ 1306 case HPT_DO_IOCONTROL: 1307 { 1308 if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { 1309 KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", 1310 piop->dwIoControlCode, 1311 piop->lpInBuffer, 1312 piop->nInBufferSize, 1313 piop->lpOutBuffer, 1314 piop->nOutBufferSize)); 1315 1316 memset(&ioctl_args, 0, sizeof(ioctl_args)); 1317 1318 ioctl_args.dwIoControlCode = piop->dwIoControlCode; 1319 ioctl_args.nInBufferSize = piop->nInBufferSize; 1320 ioctl_args.nOutBufferSize = piop->nOutBufferSize; 1321 ioctl_args.lpBytesReturned = &bytesReturned; 1322 1323 if (ioctl_args.nInBufferSize) { 1324 ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); 1325 if (!ioctl_args.lpInBuffer) 1326 goto invalid; 1327 if (copyin((void*)piop->lpInBuffer, 1328 ioctl_args.lpInBuffer, piop->nInBufferSize)) 1329 goto invalid; 1330 } 1331 1332 if (ioctl_args.nOutBufferSize) { 1333 ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); 1334 if (!ioctl_args.lpOutBuffer) 1335 goto invalid; 1336 } 1337 1338 #if (__FreeBSD_version >= 500000) 1339 mtx_lock(&Giant); 1340 #endif 1341 1342 hpt_do_ioctl(&ioctl_args); 1343 1344 #if (__FreeBSD_version >= 500000) 1345 mtx_unlock(&Giant); 1346 #endif 1347 1348 if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { 1349 if (piop->nOutBufferSize) { 1350 if (copyout(ioctl_args.lpOutBuffer, 1351 (void*)piop->lpOutBuffer, piop->nOutBufferSize)) 1352 goto invalid; 1353 } 1354 if (piop->lpBytesReturned) { 1355 if (copyout(&bytesReturned, 1356 (void*)piop->lpBytesReturned, sizeof(HPT_U32))) 1357 goto invalid; 1358 } 1359 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1360 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1361 return 0; 1362 } 1363 invalid: 1364 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); 1365 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); 1366 return EFAULT; 1367 } 1368 return EFAULT; 1369 } 1370 1371 case HPT_SCAN_BUS: 1372 { 1373 return hpt_rescan_bus(); 1374 } 1375 default: 1376 KdPrint(("invalid command!")); 1377 return EFAULT; 1378 } 1379 1380 } 1381 1382 static int hpt_rescan_bus(void) 1383 { 1384 struct cam_path *path; 1385 union ccb *ccb; 1386 PVBUS vbus; 1387 PVBUS_EXT vbus_ext; 1388 1389 #if (__FreeBSD_version >= 500000) 1390 mtx_lock(&Giant); 1391 #endif 1392 1393 ldm_for_each_vbus(vbus, vbus_ext) { 1394 if (xpt_create_path(&path, xpt_periph, cam_sim_path(vbus_ext->sim), 1395 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1396 return(EIO); 1397 if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK)) == NULL) 1398 return(ENOMEM); 1399 bzero(ccb, sizeof(union ccb)); 1400 xpt_setup_ccb(&ccb->ccb_h, path, 5); 1401 ccb->ccb_h.func_code = XPT_SCAN_BUS; 1402 ccb->ccb_h.cbfcnp = hpt_bus_scan_cb; 1403 ccb->crcn.flags = CAM_FLAG_NONE; 1404 xpt_action(ccb); 1405 } 1406 1407 #if (__FreeBSD_version >= 500000) 1408 mtx_unlock(&Giant); 1409 #endif 1410 1411 return(0); 1412 } 1413 1414 static void hpt_bus_scan_cb(struct cam_periph *periph, union ccb *ccb) 1415 { 1416 if (ccb->ccb_h.status != CAM_REQ_CMP) 1417 KdPrint(("cam_scan_callback: failure status = %x",ccb->ccb_h.status)); 1418 else 1419 KdPrint(("Scan bus successfully!")); 1420 1421 xpt_free_path(ccb->ccb_h.path); 1422 free(ccb, M_TEMP); 1423 return; 1424 } 1425