xref: /freebsd/sys/dev/hptrr/hptrr_osm_bsd.c (revision 193d9e768ba63fcfb187cfd17f461f7d41345048)
1 /*
2  * Copyright (c) HighPoint Technologies, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <dev/hptrr/hptrr_config.h>
31 /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $
32  *
33  * HighPoint RAID Driver for FreeBSD
34  * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved.
35  */
36 #include <dev/hptrr/os_bsd.h>
37 #include <dev/hptrr/hptintf.h>
38 
39 static int attach_generic = 0;
40 TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic);
41 
42 static HIM *hpt_match(device_t dev)
43 {
44 	PCI_ID pci_id;
45 	int i;
46 	HIM *him;
47 
48 	/* Some of supported chips are used not only by HPT. */
49 	if (pci_get_vendor(dev) != 0x1103 && !attach_generic)
50 		return (NULL);
51 	for (him = him_list; him; him = him->next) {
52 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
53 			if ((pci_get_vendor(dev) == pci_id.vid) &&
54 				(pci_get_device(dev) == pci_id.did)){
55 				return (him);
56 			}
57 		}
58 	}
59 	return (NULL);
60 }
61 
62 static int hpt_probe(device_t dev)
63 {
64 	HIM *him;
65 
66 	him = hpt_match(dev);
67 	if (him != NULL) {
68 		KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
69 			pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
70 			    ));
71 		device_set_desc(dev, him->name);
72 		return (BUS_PROBE_DEFAULT);
73 	}
74 
75 	return (ENXIO);
76 }
77 
78 static int hpt_attach(device_t dev)
79 {
80 	PHBA hba = (PHBA)device_get_softc(dev);
81 	HIM *him;
82 	PCI_ID pci_id;
83 	HPT_UINT size;
84 	PVBUS vbus;
85 	PVBUS_EXT vbus_ext;
86 
87 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
88 
89 	him = hpt_match(dev);
90 	hba->ext_type = EXT_TYPE_HBA;
91 	hba->ldm_adapter.him = him;
92 
93 	pci_enable_busmaster(dev);
94 
95 	pci_id.vid = pci_get_vendor(dev);
96 	pci_id.did = pci_get_device(dev);
97 	pci_id.rev = pci_get_revid(dev);
98 
99 	size = him->get_adapter_size(&pci_id);
100 	hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
101 
102 	hba->pcidev = dev;
103 	hba->pciaddr.tree = 0;
104 	hba->pciaddr.bus = pci_get_bus(dev);
105 	hba->pciaddr.device = pci_get_slot(dev);
106 	hba->pciaddr.function = pci_get_function(dev);
107 
108 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
109 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
110 		return ENXIO;
111 	}
112 
113 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
114 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
115 
116 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
117 		size = ldm_get_vbus_size();
118 		vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK |
119 		    M_ZERO);
120 		vbus_ext->ext_type = EXT_TYPE_VBUS;
121 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
122 		ldm_register_adapter(&hba->ldm_adapter);
123 	}
124 
125 	ldm_for_each_vbus(vbus, vbus_ext) {
126 		if (hba->ldm_adapter.vbus==vbus) {
127 			hba->vbus_ext = vbus_ext;
128 			hba->next = vbus_ext->hba_list;
129 			vbus_ext->hba_list = hba;
130 			break;
131 		}
132 	}
133 	return 0;
134 }
135 
136 /*
137  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
138  * but there are some problems currently (alignment, etc).
139  */
140 static __inline void *__get_free_pages(int order)
141 {
142 	/* don't use low memory - other devices may get starved */
143 	return contigmalloc(PAGE_SIZE<<order,
144 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
145 }
146 
147 static __inline void free_pages(void *p, int order)
148 {
149 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
150 }
151 
152 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
153 {
154 	PHBA hba;
155 	struct freelist *f;
156 	HPT_UINT i;
157 	void **p;
158 
159 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
160 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
161 
162 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
163 
164 	for (f=vbus_ext->freelist_head; f; f=f->next) {
165 		KdPrint(("%s: %d*%d=%d bytes",
166 			f->tag, f->count, f->size, f->count*f->size));
167 		for (i=0; i<f->count; i++) {
168 			p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
169 			if (!p)	return (ENXIO);
170 			*p = f->head;
171 			f->head = p;
172 		}
173 	}
174 
175 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
176 		int order, size, j;
177 
178 		HPT_ASSERT((f->size & (f->alignment-1))==0);
179 
180 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
181 			;
182 
183 		KdPrint(("%s: %d*%d=%d bytes, order %d",
184 			f->tag, f->count, f->size, f->count*f->size, order));
185 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
186 
187 		for (i=0; i<f->count;) {
188 			p = (void **)__get_free_pages(order);
189 			if (!p) return -1;
190 			for (j = size/f->size; j && i<f->count; i++,j--) {
191 				*p = f->head;
192 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
193 				f->head = p;
194 				p = (void **)((unsigned long)p + f->size);
195 			}
196 		}
197 	}
198 
199 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
200 
201 	for (i=0; i<os_max_cache_pages; i++) {
202 		p = (void **)__get_free_pages(0);
203 		if (!p) return -1;
204 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
205 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
206 	}
207 
208 	return 0;
209 }
210 
211 static void hpt_free_mem(PVBUS_EXT vbus_ext)
212 {
213 	struct freelist *f;
214 	void *p;
215 	int i;
216 	BUS_ADDRESS bus;
217 
218 	for (f=vbus_ext->freelist_head; f; f=f->next) {
219 #if DBG
220 		if (f->count!=f->reserved_count) {
221 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
222 		}
223 #endif
224 		while ((p=freelist_get(f)))
225 			free(p, M_DEVBUF);
226 	}
227 
228 	for (i=0; i<os_max_cache_pages; i++) {
229 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
230 		HPT_ASSERT(p);
231 		free_pages(p, 0);
232 	}
233 
234 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
235 		int order, size;
236 #if DBG
237 		if (f->count!=f->reserved_count) {
238 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
239 		}
240 #endif
241 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
242 
243 		while ((p=freelist_get_dma(f, &bus))) {
244 			if (order)
245 				free_pages(p, order);
246 			else {
247 			/* can't free immediately since other blocks in this page may still be in the list */
248 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
249 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
250 			}
251 		}
252 	}
253 
254 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
255 		free_pages(p, 0);
256 }
257 
258 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
259 {
260 	PHBA hba;
261 
262 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
263 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
264 			KdPrint(("fail to initialize %p", hba));
265 			return -1;
266 		}
267 
268 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
269 	return 0;
270 }
271 
272 static void hpt_flush_done(PCOMMAND pCmd)
273 {
274 	PVDEV vd = pCmd->target;
275 
276 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
277 		vd = vd->u.array.transform->target;
278 		HPT_ASSERT(vd);
279 		pCmd->target = vd;
280 		pCmd->Result = RETURN_PENDING;
281 		vdev_queue_cmd(pCmd);
282 		return;
283 	}
284 
285 	*(int *)pCmd->priv = 1;
286 	wakeup(pCmd);
287 }
288 
289 /*
290  * flush a vdev (without retry).
291  */
292 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
293 {
294 	PCOMMAND pCmd;
295 	int result = 0, done;
296 	HPT_UINT count;
297 
298 	KdPrint(("flusing dev %p", vd));
299 
300 	hpt_assert_vbus_locked(vbus_ext);
301 
302 	if (mIsArray(vd->type) && vd->u.array.transform)
303 		count = max(vd->u.array.transform->source->cmds_per_request,
304 					vd->u.array.transform->target->cmds_per_request);
305 	else
306 		count = vd->cmds_per_request;
307 
308 	pCmd = ldm_alloc_cmds(vd->vbus, count);
309 
310 	if (!pCmd) {
311 		return -1;
312 	}
313 
314 	pCmd->type = CMD_TYPE_FLUSH;
315 	pCmd->flags.hard_flush = 1;
316 	pCmd->target = vd;
317 	pCmd->done = hpt_flush_done;
318 	done = 0;
319 	pCmd->priv = &done;
320 
321 	ldm_queue_cmd(pCmd);
322 
323 	if (!done) {
324 		while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
325 			ldm_reset_vbus(vd->vbus);
326 		}
327 	}
328 
329 	KdPrint(("flush result %d", pCmd->Result));
330 
331 	if (pCmd->Result!=RETURN_SUCCESS)
332 		result = -1;
333 
334 	ldm_free_cmds(pCmd);
335 
336 	return result;
337 }
338 
339 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
340 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
341 {
342 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
343 	PHBA hba;
344 	int i;
345 
346 	KdPrint(("hpt_shutdown_vbus"));
347 
348 	/* stop all ctl tasks and disable the worker taskqueue */
349 	hpt_stop_tasks(vbus_ext);
350 	hpt_lock_vbus(vbus_ext);
351 	vbus_ext->worker.ta_context = 0;
352 
353 	/* flush devices */
354 	for (i=0; i<osm_max_targets; i++) {
355 		PVDEV vd = ldm_find_target(vbus, i);
356 		if (vd) {
357 			/* retry once */
358 			if (hpt_flush_vdev(vbus_ext, vd))
359 				hpt_flush_vdev(vbus_ext, vd);
360 		}
361 	}
362 
363 	ldm_shutdown(vbus);
364 	hpt_unlock_vbus(vbus_ext);
365 
366 	ldm_release_vbus(vbus);
367 
368 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
369 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
370 
371 	hpt_free_mem(vbus_ext);
372 
373 	while ((hba=vbus_ext->hba_list)) {
374 		vbus_ext->hba_list = hba->next;
375 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
376 	}
377 
378 	callout_drain(&vbus_ext->timer);
379 	mtx_destroy(&vbus_ext->lock);
380 	free(vbus_ext, M_DEVBUF);
381 	KdPrint(("hpt_shutdown_vbus done"));
382 }
383 
384 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
385 {
386 	OSM_TASK *tasks;
387 
388 	tasks = vbus_ext->tasks;
389 	vbus_ext->tasks = 0;
390 
391 	while (tasks) {
392 		OSM_TASK *t = tasks;
393 		tasks = t->next;
394 		t->next = 0;
395 		t->func(vbus_ext->vbus, t->data);
396 	}
397 }
398 
399 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
400 {
401 	if(vbus_ext){
402 		hpt_lock_vbus(vbus_ext);
403 		__hpt_do_tasks(vbus_ext);
404 		hpt_unlock_vbus(vbus_ext);
405 	}
406 }
407 
408 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
409 static void hpt_poll(struct cam_sim *sim);
410 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
411 static void hpt_pci_intr(void *arg);
412 
413 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
414 {
415 	POS_CMDEXT p = vbus_ext->cmdext_list;
416 	if (p)
417 		vbus_ext->cmdext_list = p->next;
418 	return p;
419 }
420 
421 static __inline void cmdext_put(POS_CMDEXT p)
422 {
423 	p->next = p->vbus_ext->cmdext_list;
424 	p->vbus_ext->cmdext_list = p;
425 }
426 
427 static void hpt_timeout(void *arg)
428 {
429 	PCOMMAND pCmd = (PCOMMAND)arg;
430 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
431 
432 	KdPrint(("pCmd %p timeout", pCmd));
433 
434 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
435 }
436 
437 static void os_cmddone(PCOMMAND pCmd)
438 {
439 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
440 	union ccb *ccb = ext->ccb;
441 
442 	KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result));
443 
444 	callout_stop(&ext->timeout);
445 
446 	switch(pCmd->Result) {
447 	case RETURN_SUCCESS:
448 		ccb->ccb_h.status = CAM_REQ_CMP;
449 		break;
450 	case RETURN_BAD_DEVICE:
451 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
452 		break;
453 	case RETURN_DEVICE_BUSY:
454 		ccb->ccb_h.status = CAM_BUSY;
455 		break;
456 	case RETURN_INVALID_REQUEST:
457 		ccb->ccb_h.status = CAM_REQ_INVALID;
458 		break;
459 	case RETURN_SELECTION_TIMEOUT:
460 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
461 		break;
462 	case RETURN_RETRY:
463 		ccb->ccb_h.status = CAM_BUSY;
464 		break;
465 	default:
466 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
467 		break;
468 	}
469 
470 	if (pCmd->flags.data_in) {
471 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
472 	}
473 	else if (pCmd->flags.data_out) {
474 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
475 	}
476 
477 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
478 
479 	cmdext_put(ext);
480 	ldm_free_cmds(pCmd);
481 	xpt_done(ccb);
482 }
483 
484 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
485 {
486 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
487 	union ccb *ccb = ext->ccb;
488 
489 	if (logical) {
490 		os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
491 		pSg->size = ccb->csio.dxfer_len;
492 		pSg->eot = 1;
493 		return TRUE;
494 	}
495 
496 	/* since we have provided physical sg, nobody will ask us to build physical sg */
497 	HPT_ASSERT(0);
498 	return FALSE;
499 }
500 
501 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
502 {
503 	PCOMMAND pCmd = (PCOMMAND)arg;
504 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
505 	PSG psg = pCmd->psg;
506 	int idx;
507 
508 	HPT_ASSERT(pCmd->flags.physical_sg);
509 
510 	if (error)
511 		panic("busdma error");
512 
513 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
514 
515 	if (nsegs != 0) {
516 		for (idx = 0; idx < nsegs; idx++, psg++) {
517 			psg->addr.bus = segs[idx].ds_addr;
518 			psg->size = segs[idx].ds_len;
519 			psg->eot = 0;
520 		}
521 			psg[-1].eot = 1;
522 
523 		if (pCmd->flags.data_in) {
524 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
525 			    BUS_DMASYNC_PREREAD);
526 		}
527 		else if (pCmd->flags.data_out) {
528 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
529 			    BUS_DMASYNC_PREWRITE);
530 		}
531 	}
532 	callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
533 	ldm_queue_cmd(pCmd);
534 }
535 
536 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
537 {
538 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
539 	PVDEV vd;
540 	PCOMMAND pCmd;
541 	POS_CMDEXT ext;
542 	HPT_U8 *cdb;
543 
544 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
545 		cdb = ccb->csio.cdb_io.cdb_ptr;
546 	else
547 		cdb = ccb->csio.cdb_io.cdb_bytes;
548 
549 	KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
550 		ccb,
551 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
552 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
553 	));
554 
555 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
556 	if (ccb->ccb_h.target_lun != 0 ||
557 		ccb->ccb_h.target_id >= osm_max_targets ||
558 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
559 	{
560 		ccb->ccb_h.status = CAM_TID_INVALID;
561 		xpt_done(ccb);
562 		return;
563 	}
564 
565 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
566 
567 	if (!vd) {
568 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
569 		xpt_done(ccb);
570 		return;
571 	}
572 
573 	switch (cdb[0]) {
574 	case TEST_UNIT_READY:
575 	case START_STOP_UNIT:
576 	case SYNCHRONIZE_CACHE:
577 		ccb->ccb_h.status = CAM_REQ_CMP;
578 		break;
579 
580 	case INQUIRY:
581 		{
582 			PINQUIRYDATA inquiryData;
583 			memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
584 			inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
585 
586 			inquiryData->AdditionalLength = 31;
587 			inquiryData->CommandQueue = 1;
588 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
589 			memcpy(&inquiryData->ProductId, "DISK 0_0        ", 16);
590 
591 			if (vd->target_id / 10) {
592 				inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0';
593 				inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0';
594 			}
595 			else
596 				inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0';
597 
598 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
599 
600 			ccb->ccb_h.status = CAM_REQ_CMP;
601 		}
602 		break;
603 
604 	case READ_CAPACITY:
605 	{
606 		HPT_U8 *rbuf = ccb->csio.data_ptr;
607 		HPT_U32 cap;
608 
609 		if (vd->capacity>0xfffffffful)
610 			cap = 0xfffffffful;
611 		else
612 			cap = vd->capacity - 1;
613 
614 		rbuf[0] = (HPT_U8)(cap>>24);
615 		rbuf[1] = (HPT_U8)(cap>>16);
616 		rbuf[2] = (HPT_U8)(cap>>8);
617 		rbuf[3] = (HPT_U8)cap;
618 		rbuf[4] = 0;
619 		rbuf[5] = 0;
620 		rbuf[6] = 2;
621 		rbuf[7] = 0;
622 
623 		ccb->ccb_h.status = CAM_REQ_CMP;
624 		break;
625 	}
626 
627 	case SERVICE_ACTION_IN:
628 	{
629 		HPT_U8 *rbuf = ccb->csio.data_ptr;
630 		HPT_U64	cap = vd->capacity - 1;
631 
632 		rbuf[0] = (HPT_U8)(cap>>56);
633 		rbuf[1] = (HPT_U8)(cap>>48);
634 		rbuf[2] = (HPT_U8)(cap>>40);
635 		rbuf[3] = (HPT_U8)(cap>>32);
636 		rbuf[4] = (HPT_U8)(cap>>24);
637 		rbuf[5] = (HPT_U8)(cap>>16);
638 		rbuf[6] = (HPT_U8)(cap>>8);
639 		rbuf[7] = (HPT_U8)cap;
640 		rbuf[8] = 0;
641 		rbuf[9] = 0;
642 		rbuf[10] = 2;
643 		rbuf[11] = 0;
644 
645 		ccb->ccb_h.status = CAM_REQ_CMP;
646 		break;
647 	}
648 
649 	case READ_6:
650 	case READ_10:
651 	case READ_16:
652 	case WRITE_6:
653 	case WRITE_10:
654 	case WRITE_16:
655 	case 0x13:
656 	case 0x2f:
657 	{
658 		int error;
659 
660 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
661 		if(!pCmd){
662 			KdPrint(("Failed to allocate command!"));
663 			ccb->ccb_h.status = CAM_BUSY;
664 			break;
665 		}
666 
667 		switch (cdb[0])	{
668 		case READ_6:
669 		case WRITE_6:
670 		case 0x13:
671 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
672 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
673 			break;
674 		case READ_16:
675 		case WRITE_16:
676 		{
677 			HPT_U64 block =
678 				((HPT_U64)cdb[2]<<56) |
679 				((HPT_U64)cdb[3]<<48) |
680 				((HPT_U64)cdb[4]<<40) |
681 				((HPT_U64)cdb[5]<<32) |
682 				((HPT_U64)cdb[6]<<24) |
683 				((HPT_U64)cdb[7]<<16) |
684 				((HPT_U64)cdb[8]<<8) |
685 				((HPT_U64)cdb[9]);
686 			pCmd->uCmd.Ide.Lba = block;
687 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
688 			break;
689 		}
690 
691 		default:
692 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
693 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
694 			break;
695 		}
696 
697 		switch (cdb[0]) {
698 		case READ_6:
699 		case READ_10:
700 		case READ_16:
701 			pCmd->flags.data_in = 1;
702 			break;
703 		case WRITE_6:
704 		case WRITE_10:
705 		case WRITE_16:
706 			pCmd->flags.data_out = 1;
707 			break;
708 		}
709 		pCmd->priv = ext = cmdext_get(vbus_ext);
710 		HPT_ASSERT(ext);
711 		ext->ccb = ccb;
712 		pCmd->target = vd;
713 		pCmd->done = os_cmddone;
714 		pCmd->buildsgl = os_buildsgl;
715 		pCmd->psg = ext->psg;
716 		pCmd->flags.physical_sg = 1;
717 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
718 					ext->dma_map,
719 					ccb,
720 					hpt_io_dmamap_callback, pCmd,
721 			    		BUS_DMA_WAITOK
722 					);
723 		KdPrint(("bus_dmamap_load return %d", error));
724 		if (error && error!=EINPROGRESS) {
725 			os_printk("bus_dmamap_load error %d", error);
726 			cmdext_put(ext);
727 			ldm_free_cmds(pCmd);
728 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
729 			xpt_done(ccb);
730 		}
731 		return;
732 	}
733 
734 	default:
735 		ccb->ccb_h.status = CAM_REQ_INVALID;
736 		break;
737 	}
738 
739 	xpt_done(ccb);
740 	return;
741 }
742 
743 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
744 {
745 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
746 
747 	KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
748 
749 	hpt_assert_vbus_locked(vbus_ext);
750 	switch (ccb->ccb_h.func_code) {
751 
752 	case XPT_SCSI_IO:
753 		hpt_scsi_io(vbus_ext, ccb);
754 		return;
755 
756 	case XPT_RESET_BUS:
757 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
758 		break;
759 
760 	case XPT_GET_TRAN_SETTINGS:
761 	case XPT_SET_TRAN_SETTINGS:
762 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
763 		break;
764 
765 	case XPT_CALC_GEOMETRY:
766 		cam_calc_geometry(&ccb->ccg, 1);
767 		break;
768 
769 	case XPT_PATH_INQ:
770 	{
771 		struct ccb_pathinq *cpi = &ccb->cpi;
772 
773 		cpi->version_num = 1;
774 		cpi->hba_inquiry = PI_SDTR_ABLE;
775 		cpi->target_sprt = 0;
776 		cpi->hba_misc = PIM_NOBUSRESET;
777 		cpi->hba_eng_cnt = 0;
778 		cpi->max_target = osm_max_targets;
779 		cpi->max_lun = 0;
780 		cpi->unit_number = cam_sim_unit(sim);
781 		cpi->bus_id = cam_sim_bus(sim);
782 		cpi->initiator_id = osm_max_targets;
783 		cpi->base_transfer_speed = 3300;
784 
785 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
786 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
787 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
788 		cpi->transport = XPORT_SPI;
789 		cpi->transport_version = 2;
790 		cpi->protocol = PROTO_SCSI;
791 		cpi->protocol_version = SCSI_REV_2;
792 		cpi->ccb_h.status = CAM_REQ_CMP;
793 		break;
794 	}
795 
796 	default:
797 		ccb->ccb_h.status = CAM_REQ_INVALID;
798 		break;
799 	}
800 
801 	xpt_done(ccb);
802 	return;
803 }
804 
805 static void hpt_pci_intr(void *arg)
806 {
807 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
808 	hpt_lock_vbus(vbus_ext);
809 	ldm_intr((PVBUS)vbus_ext->vbus);
810 	hpt_unlock_vbus(vbus_ext);
811 }
812 
813 static void hpt_poll(struct cam_sim *sim)
814 {
815 	PVBUS_EXT vbus_ext = cam_sim_softc(sim);
816 	hpt_assert_vbus_locked(vbus_ext);
817 	ldm_intr((PVBUS)vbus_ext->vbus);
818 }
819 
820 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
821 {
822 	KdPrint(("hpt_async"));
823 }
824 
825 static int hpt_shutdown(device_t dev)
826 {
827 	KdPrint(("hpt_shutdown(dev=%p)", dev));
828 	return 0;
829 }
830 
831 static int hpt_detach(device_t dev)
832 {
833 	/* we don't allow the driver to be unloaded. */
834 	return EBUSY;
835 }
836 
837 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
838 {
839 	arg->ioctl_cmnd = 0;
840 	wakeup(arg);
841 }
842 
843 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
844 {
845 	ioctl_args->result = -1;
846 	ioctl_args->done = hpt_ioctl_done;
847 	ioctl_args->ioctl_cmnd = (void *)1;
848 
849 	hpt_lock_vbus(vbus_ext);
850 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
851 
852 	while (ioctl_args->ioctl_cmnd) {
853 		if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
854 			break;
855 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
856 		__hpt_do_tasks(vbus_ext);
857 	}
858 
859 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
860 
861 	hpt_unlock_vbus(vbus_ext);
862 }
863 
864 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
865 {
866 	PVBUS vbus;
867 	PVBUS_EXT vbus_ext;
868 
869 	ldm_for_each_vbus(vbus, vbus_ext) {
870 		__hpt_do_ioctl(vbus_ext, ioctl_args);
871 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
872 			return;
873 	}
874 }
875 
876 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
877 	IOCTL_ARG arg;\
878 	arg.dwIoControlCode = code;\
879 	arg.lpInBuffer = inbuf;\
880 	arg.lpOutBuffer = outbuf;\
881 	arg.nInBufferSize = insize;\
882 	arg.nOutBufferSize = outsize;\
883 	arg.lpBytesReturned = 0;\
884 	hpt_do_ioctl(&arg);\
885 	arg.result;\
886 })
887 
888 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
889 
890 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
891 {
892 	int i;
893 	HPT_U32 count = nMaxCount-1;
894 
895 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
896 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
897 		return -1;
898 
899 	nMaxCount = (int)pIds[0];
900 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
901 	return nMaxCount;
902 }
903 
904 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
905 {
906 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
907 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
908 }
909 
910 /* not belong to this file logically, but we want to use ioctl interface */
911 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
912 {
913 	LOGICAL_DEVICE_INFO_V3 devinfo;
914 	int i, result;
915 	DEVICEID param[2] = { id, 0 };
916 
917 	if (hpt_get_device_info_v3(id, &devinfo))
918 		return -1;
919 
920 	if (devinfo.Type!=LDT_ARRAY)
921 		return -1;
922 
923 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
924 		param[1] = AS_REBUILD_ABORT;
925 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
926 		param[1] = AS_VERIFY_ABORT;
927 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
928 		param[1] = AS_INITIALIZE_ABORT;
929 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
930 		param[1] = AS_TRANSFORM_ABORT;
931 	else
932 		return -1;
933 
934 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
935 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
936 				param, sizeof(param), 0, 0);
937 
938 	for (i=0; i<devinfo.u.array.nDisk; i++)
939 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
940 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
941 
942 	return result;
943 }
944 
945 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
946 {
947 	DEVICEID ids[32];
948 	int i, count;
949 
950 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
951 
952 	for (i=0; i<count; i++)
953 		__hpt_stop_tasks(vbus_ext, ids[i]);
954 }
955 
956 static	d_open_t	hpt_open;
957 static	d_close_t	hpt_close;
958 static	d_ioctl_t	hpt_ioctl;
959 static  int 		hpt_rescan_bus(void);
960 
961 static struct cdevsw hpt_cdevsw = {
962 	.d_open =	hpt_open,
963 	.d_close =	hpt_close,
964 	.d_ioctl =	hpt_ioctl,
965 	.d_name =	driver_name,
966 	.d_version =	D_VERSION,
967 };
968 
969 static struct intr_config_hook hpt_ich;
970 
971 /*
972  * hpt_final_init will be called after all hpt_attach.
973  */
974 static void hpt_final_init(void *dummy)
975 {
976 	int       i;
977 	PVBUS_EXT vbus_ext;
978 	PVBUS vbus;
979 	PHBA hba;
980 
981 	/* Clear the config hook */
982 	config_intrhook_disestablish(&hpt_ich);
983 
984 	/* allocate memory */
985 	i = 0;
986 	ldm_for_each_vbus(vbus, vbus_ext) {
987 		if (hpt_alloc_mem(vbus_ext)) {
988 			os_printk("out of memory");
989 			return;
990 		}
991 		i++;
992 	}
993 
994 	if (!i) {
995 		if (bootverbose)
996 			os_printk("no controller detected.");
997 		return;
998 	}
999 
1000 	/* initializing hardware */
1001 	ldm_for_each_vbus(vbus, vbus_ext) {
1002 		/* make timer available here */
1003 		mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1004 		callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1005 		if (hpt_init_vbus(vbus_ext)) {
1006 			os_printk("fail to initialize hardware");
1007 			break; /* FIXME */
1008 		}
1009 	}
1010 
1011 	/* register CAM interface */
1012 	ldm_for_each_vbus(vbus, vbus_ext) {
1013 		struct cam_devq *devq;
1014 		struct ccb_setasync	ccb;
1015 
1016 		if (bus_dma_tag_create(NULL,/* parent */
1017 				4,	/* alignment */
1018 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1019 				BUS_SPACE_MAXADDR,	/* lowaddr */
1020 				BUS_SPACE_MAXADDR, 	/* highaddr */
1021 				NULL, NULL, 		/* filter, filterarg */
1022 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1023 				os_max_sg_descriptors,	/* nsegments */
1024 				0x10000,	/* maxsegsize */
1025 				BUS_DMA_WAITOK,		/* flags */
1026 				busdma_lock_mutex,	/* lockfunc */
1027 				&vbus_ext->lock,		/* lockfuncarg */
1028 				&vbus_ext->io_dmat	/* tag */))
1029 		{
1030 			return ;
1031 		}
1032 
1033 		for (i=0; i<os_max_queue_comm; i++) {
1034 			POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1035 			if (!ext) {
1036 				os_printk("Can't alloc cmdext(%d)", i);
1037 				return ;
1038 			}
1039 			ext->vbus_ext = vbus_ext;
1040 			ext->next = vbus_ext->cmdext_list;
1041 			vbus_ext->cmdext_list = ext;
1042 
1043 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1044 				os_printk("Can't create dma map(%d)", i);
1045 				return ;
1046 			}
1047 			callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1048 		}
1049 
1050 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1051 			os_printk("cam_simq_alloc failed");
1052 			return ;
1053 		}
1054 
1055 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1056 				vbus_ext, 0, &vbus_ext->lock, os_max_queue_comm,
1057 				/*tagged*/8,  devq);
1058 
1059 		if (!vbus_ext->sim) {
1060 			os_printk("cam_sim_alloc failed");
1061 			cam_simq_free(devq);
1062 			return ;
1063 		}
1064 
1065 		hpt_lock_vbus(vbus_ext);
1066 		if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1067 			os_printk("xpt_bus_register failed");
1068 			cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1069 			hpt_unlock_vbus(vbus_ext);
1070 			vbus_ext->sim = NULL;
1071 			return ;
1072 		}
1073 
1074 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1075 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1076 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1077 		{
1078 			os_printk("xpt_create_path failed");
1079 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1080 			cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1081 			hpt_unlock_vbus(vbus_ext);
1082 			vbus_ext->sim = NULL;
1083 			return ;
1084 		}
1085 		hpt_unlock_vbus(vbus_ext);
1086 
1087 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1088 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1089 		ccb.event_enable = AC_LOST_DEVICE;
1090 		ccb.callback = hpt_async;
1091 		ccb.callback_arg = vbus_ext;
1092 		xpt_action((union ccb *)&ccb);
1093 
1094 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1095 			int rid = 0;
1096 			if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1097 				SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1098 			{
1099 				os_printk("can't allocate interrupt");
1100 				return ;
1101 			}
1102 
1103 			if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1104 				NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1105 			{
1106 				os_printk("can't set up interrupt");
1107 				return ;
1108 			}
1109 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1110 		}
1111 
1112 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1113 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1114 		if (!vbus_ext->shutdown_eh)
1115 			os_printk("Shutdown event registration failed");
1116 	}
1117 
1118 	ldm_for_each_vbus(vbus, vbus_ext) {
1119 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1120 		if (vbus_ext->tasks)
1121 			TASK_ENQUEUE(&vbus_ext->worker);
1122 	}
1123 
1124 	make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1125 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1126 }
1127 
1128 #if defined(KLD_MODULE)
1129 
1130 typedef struct driverlink *driverlink_t;
1131 struct driverlink {
1132 	kobj_class_t	driver;
1133 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1134 };
1135 
1136 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1137 
1138 struct devclass {
1139 	TAILQ_ENTRY(devclass) link;
1140 	devclass_t	parent;		/* parent in devclass hierarchy */
1141 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1142 	char		*name;
1143 	device_t	*devices;	/* array of devices indexed by unit */
1144 	int		maxunit;	/* size of devices array */
1145 };
1146 
1147 static void override_kernel_driver(void)
1148 {
1149 	driverlink_t dl, dlfirst;
1150 	driver_t *tmpdriver;
1151 	devclass_t dc = devclass_find("pci");
1152 
1153 	if (dc){
1154 		dlfirst = TAILQ_FIRST(&dc->drivers);
1155 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1156 			if(strcmp(dl->driver->name, driver_name) == 0) {
1157 				tmpdriver=dl->driver;
1158 				dl->driver=dlfirst->driver;
1159 				dlfirst->driver=tmpdriver;
1160 				break;
1161 			}
1162 		}
1163 	}
1164 }
1165 
1166 #else
1167 #define override_kernel_driver()
1168 #endif
1169 
1170 static void hpt_init(void *dummy)
1171 {
1172 	if (bootverbose)
1173 		os_printk("%s %s", driver_name_long, driver_ver);
1174 
1175 	override_kernel_driver();
1176 	init_config();
1177 
1178 	hpt_ich.ich_func = hpt_final_init;
1179 	hpt_ich.ich_arg = NULL;
1180 	if (config_intrhook_establish(&hpt_ich) != 0) {
1181 		printf("%s: cannot establish configuration hook\n",
1182 		    driver_name_long);
1183 	}
1184 
1185 }
1186 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1187 
1188 /*
1189  * CAM driver interface
1190  */
1191 static device_method_t driver_methods[] = {
1192 	/* Device interface */
1193 	DEVMETHOD(device_probe,		hpt_probe),
1194 	DEVMETHOD(device_attach,	hpt_attach),
1195 	DEVMETHOD(device_detach,	hpt_detach),
1196 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1197 	DEVMETHOD_END
1198 };
1199 
1200 static driver_t hpt_pci_driver = {
1201 	driver_name,
1202 	driver_methods,
1203 	sizeof(HBA)
1204 };
1205 
1206 static devclass_t	hpt_devclass;
1207 
1208 #ifndef TARGETNAME
1209 #error "no TARGETNAME found"
1210 #endif
1211 
1212 /* use this to make TARGETNAME be expanded */
1213 #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6)
1214 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1215 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1216 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0);
1217 __MODULE_VERSION(TARGETNAME, 1);
1218 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1219 
1220 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1221 {
1222 	return 0;
1223 }
1224 
1225 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1226 {
1227 	return 0;
1228 }
1229 
1230 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1231 {
1232 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1233 	IOCTL_ARG ioctl_args;
1234 	HPT_U32 bytesReturned;
1235 
1236 	switch (cmd){
1237 	case HPT_DO_IOCONTROL:
1238 	{
1239 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1240 			KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n",
1241 				piop->dwIoControlCode,
1242 				piop->lpInBuffer,
1243 				piop->nInBufferSize,
1244 				piop->lpOutBuffer,
1245 				piop->nOutBufferSize));
1246 
1247 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1248 
1249 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1250 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1251 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1252 		ioctl_args.lpBytesReturned = &bytesReturned;
1253 
1254 		if (ioctl_args.nInBufferSize) {
1255 			ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1256 			if (!ioctl_args.lpInBuffer)
1257 				goto invalid;
1258 			if (copyin((void*)piop->lpInBuffer,
1259 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1260 				goto invalid;
1261 		}
1262 
1263 		if (ioctl_args.nOutBufferSize) {
1264 			ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK);
1265 			if (!ioctl_args.lpOutBuffer)
1266 				goto invalid;
1267 		}
1268 
1269 		hpt_do_ioctl(&ioctl_args);
1270 
1271 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1272 			if (piop->nOutBufferSize) {
1273 				if (copyout(ioctl_args.lpOutBuffer,
1274 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1275 					goto invalid;
1276 			}
1277 			if (piop->lpBytesReturned) {
1278 				if (copyout(&bytesReturned,
1279 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1280 					goto invalid;
1281 			}
1282 			if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1283 			if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1284 			return 0;
1285 		}
1286 invalid:
1287 		if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1288 		if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1289 		return EFAULT;
1290 	}
1291 	return EFAULT;
1292 	}
1293 
1294 	case HPT_SCAN_BUS:
1295 	{
1296 		return hpt_rescan_bus();
1297 	}
1298 	default:
1299 		KdPrint(("invalid command!"));
1300 		return EFAULT;
1301 	}
1302 
1303 }
1304 
1305 static int	hpt_rescan_bus(void)
1306 {
1307 	union ccb			*ccb;
1308 	PVBUS 				vbus;
1309 	PVBUS_EXT			vbus_ext;
1310 
1311 	ldm_for_each_vbus(vbus, vbus_ext) {
1312 		if ((ccb = xpt_alloc_ccb()) == NULL)
1313 			return(ENOMEM);
1314 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
1315 		    cam_sim_path(vbus_ext->sim),
1316 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1317 			xpt_free_ccb(ccb);
1318 			return(EIO);
1319 		}
1320 		xpt_rescan(ccb);
1321 	}
1322 
1323 	return(0);
1324 }
1325