xref: /freebsd/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 1c4ee7dfb8affed302171232b0f612e6bcba3c10)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 HighPoint Technologies, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <dev/hpt27xx/hpt27xx_config.h>
30 
31 #include <dev/hpt27xx/os_bsd.h>
32 #include <dev/hpt27xx/hptintf.h>
33 
34 static HIM *hpt_match(device_t dev, int scan)
35 {
36 	PCI_ID pci_id;
37 	HIM *him;
38 	int i;
39 
40 	for (him = him_list; him; him = him->next) {
41 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
42 			if (scan && him->get_controller_count)
43 				him->get_controller_count(&pci_id,0,0);
44 			if ((pci_get_vendor(dev) == pci_id.vid) &&
45 				(pci_get_device(dev) == pci_id.did)){
46 				return (him);
47 			}
48 		}
49 	}
50 	return (NULL);
51 }
52 
53 static int hpt_probe(device_t dev)
54 {
55 	HIM *him;
56 
57 	him = hpt_match(dev, 0);
58 	if (him != NULL) {
59 		KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
60 			pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
61 			));
62 		device_set_desc(dev, him->name);
63 		return (BUS_PROBE_DEFAULT);
64 	}
65 
66 	return (ENXIO);
67 }
68 
69 static int hpt_attach(device_t dev)
70 {
71 	PHBA hba = (PHBA)device_get_softc(dev);
72 	HIM *him;
73 	PCI_ID pci_id;
74 	HPT_UINT size;
75 	PVBUS vbus;
76 	PVBUS_EXT vbus_ext;
77 
78 	if (pci_get_domain(dev) != 0) {
79 		device_printf(dev, "does not support PCI domains\n");
80 		return (ENXIO);
81 	}
82 
83 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
84 
85 	him = hpt_match(dev, 1);
86 	hba->ext_type = EXT_TYPE_HBA;
87 	hba->ldm_adapter.him = him;
88 	pci_enable_busmaster(dev);
89 
90 	pci_id.vid = pci_get_vendor(dev);
91 	pci_id.did = pci_get_device(dev);
92 	pci_id.rev = pci_get_revid(dev);
93 	pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
94 
95 	size = him->get_adapter_size(&pci_id);
96 	hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
97 	if (!hba->ldm_adapter.him_handle)
98 		return ENXIO;
99 
100 	hba->pcidev = dev;
101 	hba->pciaddr.tree = 0;
102 	hba->pciaddr.bus = pci_get_bus(dev);
103 	hba->pciaddr.device = pci_get_slot(dev);
104 	hba->pciaddr.function = pci_get_function(dev);
105 
106 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
107 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
108 		return ENXIO;
109 	}
110 
111 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
112 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
113 
114 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
115 		size = ldm_get_vbus_size();
116 		vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
117 		if (!vbus_ext) {
118 			free(hba->ldm_adapter.him_handle, M_DEVBUF);
119 			return ENXIO;
120 		}
121 		memset(vbus_ext, 0, sizeof(VBUS_EXT));
122 		vbus_ext->ext_type = EXT_TYPE_VBUS;
123 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
124 		ldm_register_adapter(&hba->ldm_adapter);
125 	}
126 
127 	ldm_for_each_vbus(vbus, vbus_ext) {
128 		if (hba->ldm_adapter.vbus==vbus) {
129 			hba->vbus_ext = vbus_ext;
130 			hba->next = vbus_ext->hba_list;
131 			vbus_ext->hba_list = hba;
132 			break;
133 		}
134 	}
135 	return 0;
136 }
137 
138 /*
139  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
140  * but there are some problems currently (alignment, etc).
141  */
142 static __inline void *__get_free_pages(int order)
143 {
144 	/* don't use low memory - other devices may get starved */
145 	return contigmalloc(PAGE_SIZE<<order,
146 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
147 }
148 
149 static __inline void free_pages(void *p, int order)
150 {
151 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
152 }
153 
154 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
155 {
156 	PHBA hba;
157 	struct freelist *f;
158 	HPT_UINT i;
159 	void **p;
160 
161 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
162 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
163 
164 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
165 
166 	for (f=vbus_ext->freelist_head; f; f=f->next) {
167 		KdPrint(("%s: %d*%d=%d bytes",
168 			f->tag, f->count, f->size, f->count*f->size));
169 		for (i=0; i<f->count; i++) {
170 			p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
171 			if (!p)	return (ENXIO);
172 			*p = f->head;
173 			f->head = p;
174 		}
175 	}
176 
177 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
178 		int order, size, j;
179 
180 		HPT_ASSERT((f->size & (f->alignment-1))==0);
181 
182 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
183 			;
184 
185 		KdPrint(("%s: %d*%d=%d bytes, order %d",
186 			f->tag, f->count, f->size, f->count*f->size, order));
187 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
188 
189 		for (i=0; i<f->count;) {
190 			p = (void **)__get_free_pages(order);
191 			if (!p) return -1;
192 			for (j = size/f->size; j && i<f->count; i++,j--) {
193 				*p = f->head;
194 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
195 				f->head = p;
196 				p = (void **)((unsigned long)p + f->size);
197 			}
198 		}
199 	}
200 
201 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
202 
203 	for (i=0; i<os_max_cache_pages; i++) {
204 		p = (void **)__get_free_pages(0);
205 		if (!p) return -1;
206 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
207 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
208 	}
209 
210 	return 0;
211 }
212 
213 static void hpt_free_mem(PVBUS_EXT vbus_ext)
214 {
215 	struct freelist *f;
216 	void *p;
217 	int i;
218 	BUS_ADDRESS bus;
219 
220 	for (f=vbus_ext->freelist_head; f; f=f->next) {
221 #if DBG
222 		if (f->count!=f->reserved_count) {
223 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
224 		}
225 #endif
226 		while ((p=freelist_get(f)))
227 			free(p, M_DEVBUF);
228 	}
229 
230 	for (i=0; i<os_max_cache_pages; i++) {
231 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
232 		HPT_ASSERT(p);
233 		free_pages(p, 0);
234 	}
235 
236 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
237 		int order, size;
238 #if DBG
239 		if (f->count!=f->reserved_count) {
240 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
241 		}
242 #endif
243 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
244 
245 		while ((p=freelist_get_dma(f, &bus))) {
246 			if (order)
247 				free_pages(p, order);
248 			else {
249 			/* can't free immediately since other blocks in this page may still be in the list */
250 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
251 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
252 			}
253 		}
254 	}
255 
256 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
257 		free_pages(p, 0);
258 }
259 
260 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
261 {
262 	PHBA hba;
263 
264 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
265 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
266 			KdPrint(("fail to initialize %p", hba));
267 			return -1;
268 		}
269 
270 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
271 	return 0;
272 }
273 
274 static void hpt_flush_done(PCOMMAND pCmd)
275 {
276 	PVDEV vd = pCmd->target;
277 
278 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
279 		vd = vd->u.array.transform->target;
280 		HPT_ASSERT(vd);
281 		pCmd->target = vd;
282 		pCmd->Result = RETURN_PENDING;
283 		vdev_queue_cmd(pCmd);
284 		return;
285 	}
286 
287 	*(int *)pCmd->priv = 1;
288 	wakeup(pCmd);
289 }
290 
291 /*
292  * flush a vdev (without retry).
293  */
294 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
295 {
296 	PCOMMAND pCmd;
297 	int result = 0, done;
298 	HPT_UINT count;
299 
300 	KdPrint(("flusing dev %p", vd));
301 
302 	hpt_lock_vbus(vbus_ext);
303 
304 	if (mIsArray(vd->type) && vd->u.array.transform)
305 		count = max(vd->u.array.transform->source->cmds_per_request,
306 					vd->u.array.transform->target->cmds_per_request);
307 	else
308 		count = vd->cmds_per_request;
309 
310 	pCmd = ldm_alloc_cmds(vd->vbus, count);
311 
312 	if (!pCmd) {
313 		hpt_unlock_vbus(vbus_ext);
314 		return -1;
315 	}
316 
317 	pCmd->type = CMD_TYPE_FLUSH;
318 	pCmd->flags.hard_flush = 1;
319 	pCmd->target = vd;
320 	pCmd->done = hpt_flush_done;
321 	done = 0;
322 	pCmd->priv = &done;
323 
324 	ldm_queue_cmd(pCmd);
325 
326 	if (!done) {
327 		while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
328 			ldm_reset_vbus(vd->vbus);
329 		}
330 	}
331 
332 	KdPrint(("flush result %d", pCmd->Result));
333 
334 	if (pCmd->Result!=RETURN_SUCCESS)
335 		result = -1;
336 
337 	ldm_free_cmds(pCmd);
338 
339 	hpt_unlock_vbus(vbus_ext);
340 
341 	return result;
342 }
343 
344 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
345 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
346 {
347 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
348 	PHBA hba;
349 	int i;
350 
351 	KdPrint(("hpt_shutdown_vbus"));
352 
353 	/* stop all ctl tasks and disable the worker taskqueue */
354 	hpt_stop_tasks(vbus_ext);
355 	vbus_ext->worker.ta_context = 0;
356 
357 	/* flush devices */
358 	for (i=0; i<osm_max_targets; i++) {
359 		PVDEV vd = ldm_find_target(vbus, i);
360 		if (vd) {
361 			/* retry once */
362 			if (hpt_flush_vdev(vbus_ext, vd))
363 				hpt_flush_vdev(vbus_ext, vd);
364 		}
365 	}
366 
367 	hpt_lock_vbus(vbus_ext);
368 	ldm_shutdown(vbus);
369 	hpt_unlock_vbus(vbus_ext);
370 
371 	ldm_release_vbus(vbus);
372 
373 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
374 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
375 
376 	hpt_free_mem(vbus_ext);
377 
378 	while ((hba=vbus_ext->hba_list)) {
379 		vbus_ext->hba_list = hba->next;
380 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
381 	}
382 	callout_drain(&vbus_ext->timer);
383 	mtx_destroy(&vbus_ext->lock);
384 	free(vbus_ext, M_DEVBUF);
385 	KdPrint(("hpt_shutdown_vbus done"));
386 }
387 
388 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
389 {
390 	OSM_TASK *tasks;
391 
392 	tasks = vbus_ext->tasks;
393 	vbus_ext->tasks = 0;
394 
395 	while (tasks) {
396 		OSM_TASK *t = tasks;
397 		tasks = t->next;
398 		t->next = 0;
399 		t->func(vbus_ext->vbus, t->data);
400 	}
401 }
402 
403 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
404 {
405 	if(vbus_ext){
406 		hpt_lock_vbus(vbus_ext);
407 		__hpt_do_tasks(vbus_ext);
408 		hpt_unlock_vbus(vbus_ext);
409 	}
410 }
411 
412 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
413 static void hpt_poll(struct cam_sim *sim);
414 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
415 static void hpt_pci_intr(void *arg);
416 
417 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
418 {
419 	POS_CMDEXT p = vbus_ext->cmdext_list;
420 	if (p)
421 		vbus_ext->cmdext_list = p->next;
422 	return p;
423 }
424 
425 static __inline void cmdext_put(POS_CMDEXT p)
426 {
427 	p->next = p->vbus_ext->cmdext_list;
428 	p->vbus_ext->cmdext_list = p;
429 }
430 
431 static void hpt_timeout(void *arg)
432 {
433 	PCOMMAND pCmd = (PCOMMAND)arg;
434 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
435 
436 	KdPrint(("pCmd %p timeout", pCmd));
437 
438 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
439 }
440 
441 static void os_cmddone(PCOMMAND pCmd)
442 {
443 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
444 	union ccb *ccb = ext->ccb;
445 
446 	KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result));
447 	callout_stop(&ext->timeout);
448 	switch(pCmd->Result) {
449 	case RETURN_SUCCESS:
450 		ccb->ccb_h.status = CAM_REQ_CMP;
451 		break;
452 	case RETURN_BAD_DEVICE:
453 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
454 		break;
455 	case RETURN_DEVICE_BUSY:
456 		ccb->ccb_h.status = CAM_BUSY;
457 		break;
458 	case RETURN_INVALID_REQUEST:
459 		ccb->ccb_h.status = CAM_REQ_INVALID;
460 		break;
461 	case RETURN_SELECTION_TIMEOUT:
462 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
463 		break;
464 	case RETURN_RETRY:
465 		ccb->ccb_h.status = CAM_BUSY;
466 		break;
467 	default:
468 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
469 		break;
470 	}
471 
472 	if (pCmd->flags.data_in) {
473 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
474 	}
475 	else if (pCmd->flags.data_out) {
476 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
477 	}
478 
479 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
480 
481 	cmdext_put(ext);
482 	ldm_free_cmds(pCmd);
483 	xpt_done(ccb);
484 }
485 
486 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
487 {
488 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
489 	union ccb *ccb = ext->ccb;
490 
491 	if(logical)	{
492 		os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
493 		pSg->size = ccb->csio.dxfer_len;
494 		pSg->eot = 1;
495 		return TRUE;
496 	}
497 	/* since we have provided physical sg, nobody will ask us to build physical sg */
498 	HPT_ASSERT(0);
499 	return FALSE;
500 }
501 
502 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
503 {
504 	PCOMMAND pCmd = (PCOMMAND)arg;
505 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
506 	PSG psg = pCmd->psg;
507 	int idx;
508 
509 	HPT_ASSERT(pCmd->flags.physical_sg);
510 
511 	if (error)
512 		panic("busdma error");
513 
514 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
515 
516 	if (nsegs != 0) {
517 		for (idx = 0; idx < nsegs; idx++, psg++) {
518 			psg->addr.bus = segs[idx].ds_addr;
519 			psg->size = segs[idx].ds_len;
520 			psg->eot = 0;
521 		}
522 		psg[-1].eot = 1;
523 
524 		if (pCmd->flags.data_in) {
525 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
526 			    BUS_DMASYNC_PREREAD);
527 		}
528 		else if (pCmd->flags.data_out) {
529 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
530 			    BUS_DMASYNC_PREWRITE);
531 		}
532 	}
533 	callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
534 	ldm_queue_cmd(pCmd);
535 }
536 
537 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
538 {
539 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
540 	PVDEV vd;
541 	PCOMMAND pCmd;
542 	POS_CMDEXT ext;
543 	HPT_U8 *cdb;
544 	int error;
545 
546 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
547 		cdb = ccb->csio.cdb_io.cdb_ptr;
548 	else
549 		cdb = ccb->csio.cdb_io.cdb_bytes;
550 
551 	KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
552 		ccb,
553 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
554 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
555 	));
556 
557 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
558 	if (ccb->ccb_h.target_lun != 0 ||
559 		ccb->ccb_h.target_id >= osm_max_targets ||
560 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
561 	{
562 		ccb->ccb_h.status = CAM_TID_INVALID;
563 		xpt_done(ccb);
564 		return;
565 	}
566 
567 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
568 
569 	if (!vd) {
570 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
571 		xpt_done(ccb);
572 		return;
573 	}
574 
575 	switch (cdb[0]) {
576 	case TEST_UNIT_READY:
577 	case START_STOP_UNIT:
578 	case SYNCHRONIZE_CACHE:
579 		ccb->ccb_h.status = CAM_REQ_CMP;
580 		break;
581 
582 	case INQUIRY:
583 		{
584 			PINQUIRYDATA inquiryData;
585 			memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
586 			inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
587 
588 			inquiryData->AdditionalLength = 31;
589 			inquiryData->CommandQueue = 1;
590 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
591 			memcpy(&inquiryData->ProductId, "DISK 0_0        ", 16);
592 
593 			if (vd->target_id / 10) {
594 				inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0';
595 				inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0';
596 			}
597 			else
598 				inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0';
599 
600 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
601 
602 			ccb->ccb_h.status = CAM_REQ_CMP;
603 		}
604 		break;
605 
606 	case READ_CAPACITY:
607 	{
608 		HPT_U8 *rbuf = ccb->csio.data_ptr;
609 		HPT_U32 cap;
610 		HPT_U8 sector_size_shift = 0;
611 		HPT_U64 new_cap;
612 		HPT_U32 sector_size = 0;
613 
614 		if (mIsArray(vd->type))
615 			sector_size_shift = vd->u.array.sector_size_shift;
616 		else{
617 			if(vd->type == VD_RAW){
618 				sector_size = vd->u.raw.logical_sector_size;
619 			}
620 
621 			switch (sector_size) {
622 				case 0x1000:
623 					KdPrint(("set 4k setctor size in READ_CAPACITY"));
624 					sector_size_shift = 3;
625 					break;
626 				default:
627 					break;
628 			}
629 		}
630 		new_cap = vd->capacity >> sector_size_shift;
631 
632 		if (new_cap > 0xfffffffful)
633 			cap = 0xffffffff;
634 		else
635 			cap = new_cap - 1;
636 
637 		rbuf[0] = (HPT_U8)(cap>>24);
638 		rbuf[1] = (HPT_U8)(cap>>16);
639 		rbuf[2] = (HPT_U8)(cap>>8);
640 		rbuf[3] = (HPT_U8)cap;
641 		rbuf[4] = 0;
642 		rbuf[5] = 0;
643 		rbuf[6] = 2 << sector_size_shift;
644 		rbuf[7] = 0;
645 
646 		ccb->ccb_h.status = CAM_REQ_CMP;
647 		break;
648 	}
649 	case REPORT_LUNS:
650 	{
651 		HPT_U8 *rbuf = ccb->csio.data_ptr;
652 		memset(rbuf, 0, 16);
653 		rbuf[3] = 8;
654 		ccb->ccb_h.status = CAM_REQ_CMP;
655 		break;
656 	}
657 	case SERVICE_ACTION_IN:
658 	{
659 		HPT_U8 *rbuf = ccb->csio.data_ptr;
660 		HPT_U64	cap = 0;
661 		HPT_U8 sector_size_shift = 0;
662 		HPT_U32 sector_size = 0;
663 
664 		if(mIsArray(vd->type))
665 			sector_size_shift = vd->u.array.sector_size_shift;
666 		else{
667 			if(vd->type == VD_RAW){
668 				sector_size = vd->u.raw.logical_sector_size;
669 			}
670 
671 			switch (sector_size) {
672 				case 0x1000:
673 					KdPrint(("set 4k setctor size in SERVICE_ACTION_IN"));
674 					sector_size_shift = 3;
675 					break;
676 				default:
677 					break;
678 			}
679 		}
680 		cap = (vd->capacity >> sector_size_shift) - 1;
681 
682 		rbuf[0] = (HPT_U8)(cap>>56);
683 		rbuf[1] = (HPT_U8)(cap>>48);
684 		rbuf[2] = (HPT_U8)(cap>>40);
685 		rbuf[3] = (HPT_U8)(cap>>32);
686 		rbuf[4] = (HPT_U8)(cap>>24);
687 		rbuf[5] = (HPT_U8)(cap>>16);
688 		rbuf[6] = (HPT_U8)(cap>>8);
689 		rbuf[7] = (HPT_U8)cap;
690 		rbuf[8] = 0;
691 		rbuf[9] = 0;
692 		rbuf[10] = 2 << sector_size_shift;
693 		rbuf[11] = 0;
694 
695 		ccb->ccb_h.status = CAM_REQ_CMP;
696 		break;
697 	}
698 
699 	case READ_6:
700 	case READ_10:
701 	case READ_16:
702 	case WRITE_6:
703 	case WRITE_10:
704 	case WRITE_16:
705 	case 0x13:
706 	case 0x2f:
707 	case 0x8f: /* VERIFY_16 */
708 	{
709 		HPT_U8 sector_size_shift = 0;
710 		HPT_U32 sector_size = 0;
711 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
712 		if(!pCmd){
713 			KdPrint(("Failed to allocate command!"));
714 			ccb->ccb_h.status = CAM_BUSY;
715 			break;
716 		}
717 
718 		switch (cdb[0])	{
719 		case READ_6:
720 		case WRITE_6:
721 		case 0x13:
722 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
723 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
724 			break;
725 		case READ_16:
726 		case WRITE_16:
727 		case 0x8f: /* VERIFY_16 */
728 		{
729 			HPT_U64 block =
730 				((HPT_U64)cdb[2]<<56) |
731 				((HPT_U64)cdb[3]<<48) |
732 				((HPT_U64)cdb[4]<<40) |
733 				((HPT_U64)cdb[5]<<32) |
734 				((HPT_U64)cdb[6]<<24) |
735 				((HPT_U64)cdb[7]<<16) |
736 				((HPT_U64)cdb[8]<<8) |
737 				((HPT_U64)cdb[9]);
738 			pCmd->uCmd.Ide.Lba = block;
739 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
740 			break;
741 		}
742 
743 		default:
744 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
745 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
746 			break;
747 		}
748 
749 		if(mIsArray(vd->type)) {
750 			sector_size_shift = vd->u.array.sector_size_shift;
751 		}
752 		else{
753 			if(vd->type == VD_RAW){
754 				sector_size = vd->u.raw.logical_sector_size;
755 			}
756 
757 			switch (sector_size) {
758 				case 0x1000:
759 					KdPrint(("<8>resize sector size from 4k to 512"));
760 					sector_size_shift = 3;
761 					break;
762 				default:
763 					break;
764 	 		}
765 		}
766 		pCmd->uCmd.Ide.Lba <<= sector_size_shift;
767 		pCmd->uCmd.Ide.nSectors <<= sector_size_shift;
768 
769 
770 		switch (cdb[0]) {
771 		case READ_6:
772 		case READ_10:
773 		case READ_16:
774 			pCmd->flags.data_in = 1;
775 			break;
776 		case WRITE_6:
777 		case WRITE_10:
778 		case WRITE_16:
779 			pCmd->flags.data_out = 1;
780 			break;
781 		}
782 		pCmd->priv = ext = cmdext_get(vbus_ext);
783 		HPT_ASSERT(ext);
784 		ext->ccb = ccb;
785 		pCmd->target = vd;
786 		pCmd->done = os_cmddone;
787 		pCmd->buildsgl = os_buildsgl;
788 
789 		pCmd->psg = ext->psg;
790 		pCmd->flags.physical_sg = 1;
791 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
792 					ext->dma_map, ccb,
793 					hpt_io_dmamap_callback, pCmd,
794 					BUS_DMA_WAITOK
795 				);
796 		KdPrint(("<8>bus_dmamap_load return %d", error));
797 		if (error && error!=EINPROGRESS) {
798 			os_printk("bus_dmamap_load error %d", error);
799 			cmdext_put(ext);
800 			ldm_free_cmds(pCmd);
801 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
802 			xpt_done(ccb);
803 		}
804 		return;
805 	}
806 
807 	default:
808 		ccb->ccb_h.status = CAM_REQ_INVALID;
809 		break;
810 	}
811 
812 	xpt_done(ccb);
813 	return;
814 }
815 
816 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
817 {
818 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
819 
820 	KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
821 
822 	hpt_assert_vbus_locked(vbus_ext);
823 	switch (ccb->ccb_h.func_code) {
824 
825 	case XPT_SCSI_IO:
826 		hpt_scsi_io(vbus_ext, ccb);
827 		return;
828 
829 	case XPT_RESET_BUS:
830 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
831 		break;
832 
833 	case XPT_GET_TRAN_SETTINGS:
834 	case XPT_SET_TRAN_SETTINGS:
835 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
836 		break;
837 
838 	case XPT_CALC_GEOMETRY:
839 		ccb->ccg.heads = 255;
840 		ccb->ccg.secs_per_track = 63;
841 		ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
842 		ccb->ccb_h.status = CAM_REQ_CMP;
843 		break;
844 
845 	case XPT_PATH_INQ:
846 	{
847 		struct ccb_pathinq *cpi = &ccb->cpi;
848 
849 		cpi->version_num = 1;
850 		cpi->hba_inquiry = PI_SDTR_ABLE;
851 		cpi->target_sprt = 0;
852 		cpi->hba_misc = PIM_NOBUSRESET;
853 		cpi->hba_eng_cnt = 0;
854 		cpi->max_target = osm_max_targets;
855 		cpi->max_lun = 0;
856 		cpi->unit_number = cam_sim_unit(sim);
857 		cpi->bus_id = cam_sim_bus(sim);
858 		cpi->initiator_id = osm_max_targets;
859 		cpi->base_transfer_speed = 3300;
860 
861 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
862 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
863 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
864 		cpi->transport = XPORT_SPI;
865 		cpi->transport_version = 2;
866 		cpi->protocol = PROTO_SCSI;
867 		cpi->protocol_version = SCSI_REV_2;
868 		cpi->ccb_h.status = CAM_REQ_CMP;
869 		break;
870 	}
871 
872 	default:
873 		ccb->ccb_h.status = CAM_REQ_INVALID;
874 		break;
875 	}
876 
877 	xpt_done(ccb);
878 	return;
879 }
880 
881 static void hpt_pci_intr(void *arg)
882 {
883 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
884 	hpt_lock_vbus(vbus_ext);
885 	ldm_intr((PVBUS)vbus_ext->vbus);
886 	hpt_unlock_vbus(vbus_ext);
887 }
888 
889 static void hpt_poll(struct cam_sim *sim)
890 {
891 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
892 
893 	hpt_assert_vbus_locked(vbus_ext);
894 	ldm_intr((PVBUS)vbus_ext->vbus);
895 }
896 
897 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
898 {
899 	KdPrint(("<8>hpt_async"));
900 }
901 
902 static int hpt_shutdown(device_t dev)
903 {
904 	KdPrint(("hpt_shutdown(dev=%p)", dev));
905 	return 0;
906 }
907 
908 static int hpt_detach(device_t dev)
909 {
910 	/* we don't allow the driver to be unloaded. */
911 	return EBUSY;
912 }
913 
914 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
915 {
916 	arg->ioctl_cmnd = 0;
917 	wakeup(arg);
918 }
919 
920 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
921 {
922 	ioctl_args->result = -1;
923 	ioctl_args->done = hpt_ioctl_done;
924 	ioctl_args->ioctl_cmnd = (void *)1;
925 
926 	hpt_lock_vbus(vbus_ext);
927 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
928 
929 	while (ioctl_args->ioctl_cmnd) {
930 		if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
931 			break;
932 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
933 		__hpt_do_tasks(vbus_ext);
934 	}
935 
936 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
937 
938 	hpt_unlock_vbus(vbus_ext);
939 }
940 
941 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
942 {
943 	PVBUS vbus;
944 	PVBUS_EXT vbus_ext;
945 
946 	ldm_for_each_vbus(vbus, vbus_ext) {
947 		__hpt_do_ioctl(vbus_ext, ioctl_args);
948 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
949 			return;
950 	}
951 }
952 
953 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
954 	IOCTL_ARG arg;\
955 	arg.dwIoControlCode = code;\
956 	arg.lpInBuffer = inbuf;\
957 	arg.lpOutBuffer = outbuf;\
958 	arg.nInBufferSize = insize;\
959 	arg.nOutBufferSize = outsize;\
960 	arg.lpBytesReturned = 0;\
961 	hpt_do_ioctl(&arg);\
962 	arg.result;\
963 })
964 
965 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
966 
967 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
968 {
969 	int i;
970 	HPT_U32 count = nMaxCount-1;
971 
972 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
973 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
974 		return -1;
975 
976 	nMaxCount = (int)pIds[0];
977 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
978 	return nMaxCount;
979 }
980 
981 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
982 {
983 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
984 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
985 }
986 
987 /* not belong to this file logically, but we want to use ioctl interface */
988 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
989 {
990 	LOGICAL_DEVICE_INFO_V3 devinfo;
991 	int i, result;
992 	DEVICEID param[2] = { id, 0 };
993 
994 	if (hpt_get_device_info_v3(id, &devinfo))
995 		return -1;
996 
997 	if (devinfo.Type!=LDT_ARRAY)
998 		return -1;
999 
1000 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
1001 		param[1] = AS_REBUILD_ABORT;
1002 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
1003 		param[1] = AS_VERIFY_ABORT;
1004 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
1005 		param[1] = AS_INITIALIZE_ABORT;
1006 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
1007 		param[1] = AS_TRANSFORM_ABORT;
1008 	else
1009 		return -1;
1010 
1011 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
1012 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
1013 				param, sizeof(param), 0, 0);
1014 
1015 	for (i=0; i<devinfo.u.array.nDisk; i++)
1016 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
1017 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
1018 
1019 	return result;
1020 }
1021 
1022 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
1023 {
1024 	DEVICEID ids[32];
1025 	int i, count;
1026 
1027 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
1028 
1029 	for (i=0; i<count; i++)
1030 		__hpt_stop_tasks(vbus_ext, ids[i]);
1031 }
1032 
1033 static	d_open_t	hpt_open;
1034 static	d_close_t	hpt_close;
1035 static	d_ioctl_t	hpt_ioctl;
1036 static  int 		hpt_rescan_bus(void);
1037 
1038 static struct cdevsw hpt_cdevsw = {
1039 	.d_open =	hpt_open,
1040 	.d_close =	hpt_close,
1041 	.d_ioctl =	hpt_ioctl,
1042 	.d_name =	driver_name,
1043 	.d_version =	D_VERSION,
1044 };
1045 
1046 static struct intr_config_hook hpt_ich;
1047 
1048 /*
1049  * hpt_final_init will be called after all hpt_attach.
1050  */
1051 static void hpt_final_init(void *dummy)
1052 {
1053 	int       i,unit_number=0;
1054 	PVBUS_EXT vbus_ext;
1055 	PVBUS vbus;
1056 	PHBA hba;
1057 
1058 	/* Clear the config hook */
1059 	config_intrhook_disestablish(&hpt_ich);
1060 
1061 	/* allocate memory */
1062 	i = 0;
1063 	ldm_for_each_vbus(vbus, vbus_ext) {
1064 		if (hpt_alloc_mem(vbus_ext)) {
1065 			os_printk("out of memory");
1066 			return;
1067 		}
1068 		i++;
1069 	}
1070 
1071 	if (!i) {
1072 		if (bootverbose)
1073 			os_printk("no controller detected.");
1074 		return;
1075 	}
1076 
1077 	/* initializing hardware */
1078 	ldm_for_each_vbus(vbus, vbus_ext) {
1079 		/* make timer available here */
1080 		mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1081 		callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1082 		if (hpt_init_vbus(vbus_ext)) {
1083 			os_printk("fail to initialize hardware");
1084 			break; /* FIXME */
1085 		}
1086 	}
1087 
1088 	/* register CAM interface */
1089 	ldm_for_each_vbus(vbus, vbus_ext) {
1090 		struct cam_devq *devq;
1091 		struct ccb_setasync	ccb;
1092 
1093 		if (bus_dma_tag_create(NULL,/* parent */
1094 				4,	/* alignment */
1095 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1096 				BUS_SPACE_MAXADDR,	/* lowaddr */
1097 				BUS_SPACE_MAXADDR, 	/* highaddr */
1098 				NULL, NULL, 		/* filter, filterarg */
1099 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1100 				os_max_sg_descriptors,	/* nsegments */
1101 				0x10000,	/* maxsegsize */
1102 				BUS_DMA_WAITOK,		/* flags */
1103 				busdma_lock_mutex,	/* lockfunc */
1104 				&vbus_ext->lock,		/* lockfuncarg */
1105 				&vbus_ext->io_dmat	/* tag */))
1106 		{
1107 			return ;
1108 		}
1109 
1110 		for (i=0; i<os_max_queue_comm; i++) {
1111 			POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1112 			if (!ext) {
1113 				os_printk("Can't alloc cmdext(%d)", i);
1114 				return ;
1115 			}
1116 			ext->vbus_ext = vbus_ext;
1117 			ext->next = vbus_ext->cmdext_list;
1118 			vbus_ext->cmdext_list = ext;
1119 
1120 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1121 				os_printk("Can't create dma map(%d)", i);
1122 				return ;
1123 			}
1124 			callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1125 		}
1126 
1127 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1128 			os_printk("cam_simq_alloc failed");
1129 			return ;
1130 		}
1131 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1132 				vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8,  devq);
1133 		unit_number++;
1134 		if (!vbus_ext->sim) {
1135 			os_printk("cam_sim_alloc failed");
1136 			cam_simq_free(devq);
1137 			return ;
1138 		}
1139 
1140 		hpt_lock_vbus(vbus_ext);
1141 		if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1142 			hpt_unlock_vbus(vbus_ext);
1143 			os_printk("xpt_bus_register failed");
1144 			cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1145 			vbus_ext->sim = NULL;
1146 			return ;
1147 		}
1148 
1149 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1150 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1151 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1152 		{
1153 			hpt_unlock_vbus(vbus_ext);
1154 			os_printk("xpt_create_path failed");
1155 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1156 			cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1157 			vbus_ext->sim = NULL;
1158 			return ;
1159 		}
1160 
1161 		memset(&ccb, 0, sizeof(ccb));
1162 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1163 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1164 		ccb.event_enable = AC_LOST_DEVICE;
1165 		ccb.callback = hpt_async;
1166 		ccb.callback_arg = vbus_ext;
1167 		xpt_action((union ccb *)&ccb);
1168 		hpt_unlock_vbus(vbus_ext);
1169 
1170 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1171 			int rid = 0;
1172 			if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1173 				SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1174 			{
1175 				os_printk("can't allocate interrupt");
1176 				return ;
1177 			}
1178 			if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1179 				NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1180 			{
1181 				os_printk("can't set up interrupt");
1182 				return ;
1183 			}
1184 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1185 
1186 		}
1187 
1188 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1189 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1190 		if (!vbus_ext->shutdown_eh)
1191 			os_printk("Shutdown event registration failed");
1192 	}
1193 
1194 	ldm_for_each_vbus(vbus, vbus_ext) {
1195 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1196 		if (vbus_ext->tasks)
1197 			TASK_ENQUEUE(&vbus_ext->worker);
1198 	}
1199 
1200 	make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1201 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1202 }
1203 
1204 #if defined(KLD_MODULE)
1205 
1206 typedef struct driverlink *driverlink_t;
1207 struct driverlink {
1208 	kobj_class_t	driver;
1209 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1210 };
1211 
1212 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1213 
1214 struct devclass {
1215 	TAILQ_ENTRY(devclass) link;
1216 	devclass_t	parent;		/* parent in devclass hierarchy */
1217 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1218 	char		*name;
1219 	device_t	*devices;	/* array of devices indexed by unit */
1220 	int		maxunit;	/* size of devices array */
1221 };
1222 
1223 static void override_kernel_driver(void)
1224 {
1225 	driverlink_t dl, dlfirst;
1226 	driver_t *tmpdriver;
1227 	devclass_t dc = devclass_find("pci");
1228 
1229 	if (dc){
1230 		dlfirst = TAILQ_FIRST(&dc->drivers);
1231 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1232 			if(strcmp(dl->driver->name, driver_name) == 0) {
1233 				tmpdriver=dl->driver;
1234 				dl->driver=dlfirst->driver;
1235 				dlfirst->driver=tmpdriver;
1236 				break;
1237 			}
1238 		}
1239 	}
1240 }
1241 
1242 #else
1243 #define override_kernel_driver()
1244 #endif
1245 
1246 static void hpt_init(void *dummy)
1247 {
1248 	if (bootverbose)
1249 		os_printk("%s %s", driver_name_long, driver_ver);
1250 
1251 	override_kernel_driver();
1252 	init_config();
1253 
1254 	hpt_ich.ich_func = hpt_final_init;
1255 	hpt_ich.ich_arg = NULL;
1256 	if (config_intrhook_establish(&hpt_ich) != 0) {
1257 		printf("%s: cannot establish configuration hook\n",
1258 		    driver_name_long);
1259 	}
1260 
1261 }
1262 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1263 
1264 /*
1265  * CAM driver interface
1266  */
1267 static device_method_t driver_methods[] = {
1268 	/* Device interface */
1269 	DEVMETHOD(device_probe,		hpt_probe),
1270 	DEVMETHOD(device_attach,	hpt_attach),
1271 	DEVMETHOD(device_detach,	hpt_detach),
1272 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1273 	{ 0, 0 }
1274 };
1275 
1276 static driver_t hpt_pci_driver = {
1277 	driver_name,
1278 	driver_methods,
1279 	sizeof(HBA)
1280 };
1281 
1282 #ifndef TARGETNAME
1283 #error "no TARGETNAME found"
1284 #endif
1285 
1286 /* use this to make TARGETNAME be expanded */
1287 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
1288 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1289 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1290 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
1291 __MODULE_VERSION(TARGETNAME, 1);
1292 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1293 
1294 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1295 {
1296 	return 0;
1297 }
1298 
1299 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1300 {
1301 	return 0;
1302 }
1303 
1304 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1305 {
1306 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1307 	IOCTL_ARG ioctl_args;
1308 	HPT_U32 bytesReturned = 0;
1309 
1310 	switch (cmd){
1311 	case HPT_DO_IOCONTROL:
1312 	{
1313 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1314 			KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n",
1315 				piop->dwIoControlCode,
1316 				piop->lpInBuffer,
1317 				piop->nInBufferSize,
1318 				piop->lpOutBuffer,
1319 				piop->nOutBufferSize));
1320 
1321 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1322 
1323 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1324 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1325 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1326 		ioctl_args.lpBytesReturned = &bytesReturned;
1327 
1328 		if (ioctl_args.nInBufferSize) {
1329 			ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1330 			if (!ioctl_args.lpInBuffer)
1331 				goto invalid;
1332 			if (copyin((void*)piop->lpInBuffer,
1333 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1334 				goto invalid;
1335 		}
1336 
1337 		if (ioctl_args.nOutBufferSize) {
1338 			ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
1339 			if (!ioctl_args.lpOutBuffer)
1340 				goto invalid;
1341 		}
1342 
1343 		hpt_do_ioctl(&ioctl_args);
1344 
1345 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1346 			if (piop->nOutBufferSize) {
1347 				if (copyout(ioctl_args.lpOutBuffer,
1348 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1349 					goto invalid;
1350 			}
1351 			if (piop->lpBytesReturned) {
1352 				if (copyout(&bytesReturned,
1353 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1354 					goto invalid;
1355 			}
1356 			if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1357 			if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1358 			return 0;
1359 		}
1360 invalid:
1361 		if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1362 		if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1363 		return EFAULT;
1364 	}
1365 	return EFAULT;
1366 	}
1367 
1368 	case HPT_SCAN_BUS:
1369 	{
1370 		return hpt_rescan_bus();
1371 	}
1372 	default:
1373 		KdPrint(("invalid command!"));
1374 		return EFAULT;
1375 	}
1376 
1377 }
1378 
1379 static int	hpt_rescan_bus(void)
1380 {
1381 	union ccb			*ccb;
1382 	PVBUS 				vbus;
1383 	PVBUS_EXT			vbus_ext;
1384 
1385 	ldm_for_each_vbus(vbus, vbus_ext) {
1386 		if ((ccb = xpt_alloc_ccb()) == NULL)
1387 		{
1388 			return(ENOMEM);
1389 		}
1390 		if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim),
1391 			CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1392 		{
1393 			xpt_free_ccb(ccb);
1394 			return(EIO);
1395 		}
1396 		xpt_rescan(ccb);
1397 	}
1398 	return(0);
1399 }
1400 
1401