xref: /freebsd/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 63f537551380d2dab29fa402ad1269feae17e594)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 HighPoint Technologies, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <dev/hpt27xx/hpt27xx_config.h>
30 
31 #include <dev/hpt27xx/os_bsd.h>
32 #include <dev/hpt27xx/hptintf.h>
33 
34 static HIM *hpt_match(device_t dev, int scan)
35 {
36 	PCI_ID pci_id;
37 	HIM *him;
38 	int i;
39 
40 	for (him = him_list; him; him = him->next) {
41 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
42 			if (scan && him->get_controller_count)
43 				him->get_controller_count(&pci_id,0,0);
44 			if ((pci_get_vendor(dev) == pci_id.vid) &&
45 				(pci_get_device(dev) == pci_id.did)){
46 				return (him);
47 			}
48 		}
49 	}
50 	return (NULL);
51 }
52 
53 static int hpt_probe(device_t dev)
54 {
55 	HIM *him;
56 
57 	him = hpt_match(dev, 0);
58 	if (him != NULL) {
59 		KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
60 			pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
61 			));
62 		device_set_desc(dev, him->name);
63 		return (BUS_PROBE_DEFAULT);
64 	}
65 
66 	return (ENXIO);
67 }
68 
69 static int hpt_attach(device_t dev)
70 {
71 	PHBA hba = (PHBA)device_get_softc(dev);
72 	HIM *him;
73 	PCI_ID pci_id;
74 	HPT_UINT size;
75 	PVBUS vbus;
76 	PVBUS_EXT vbus_ext;
77 
78 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
79 
80 	him = hpt_match(dev, 1);
81 	hba->ext_type = EXT_TYPE_HBA;
82 	hba->ldm_adapter.him = him;
83 	pci_enable_busmaster(dev);
84 
85 	pci_id.vid = pci_get_vendor(dev);
86 	pci_id.did = pci_get_device(dev);
87 	pci_id.rev = pci_get_revid(dev);
88 	pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
89 
90 	size = him->get_adapter_size(&pci_id);
91 	hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
92 	if (!hba->ldm_adapter.him_handle)
93 		return ENXIO;
94 
95 	hba->pcidev = dev;
96 	hba->pciaddr.tree = 0;
97 	hba->pciaddr.bus = pci_get_bus(dev);
98 	hba->pciaddr.device = pci_get_slot(dev);
99 	hba->pciaddr.function = pci_get_function(dev);
100 
101 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
102 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
103 		return ENXIO;
104 	}
105 
106 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
107 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
108 
109 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
110 		size = ldm_get_vbus_size();
111 		vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
112 		if (!vbus_ext) {
113 			free(hba->ldm_adapter.him_handle, M_DEVBUF);
114 			return ENXIO;
115 		}
116 		memset(vbus_ext, 0, sizeof(VBUS_EXT));
117 		vbus_ext->ext_type = EXT_TYPE_VBUS;
118 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
119 		ldm_register_adapter(&hba->ldm_adapter);
120 	}
121 
122 	ldm_for_each_vbus(vbus, vbus_ext) {
123 		if (hba->ldm_adapter.vbus==vbus) {
124 			hba->vbus_ext = vbus_ext;
125 			hba->next = vbus_ext->hba_list;
126 			vbus_ext->hba_list = hba;
127 			break;
128 		}
129 	}
130 	return 0;
131 }
132 
133 /*
134  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
135  * but there are some problems currently (alignment, etc).
136  */
137 static __inline void *__get_free_pages(int order)
138 {
139 	/* don't use low memory - other devices may get starved */
140 	return contigmalloc(PAGE_SIZE<<order,
141 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
142 }
143 
144 static __inline void free_pages(void *p, int order)
145 {
146 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
147 }
148 
149 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
150 {
151 	PHBA hba;
152 	struct freelist *f;
153 	HPT_UINT i;
154 	void **p;
155 
156 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
157 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
158 
159 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
160 
161 	for (f=vbus_ext->freelist_head; f; f=f->next) {
162 		KdPrint(("%s: %d*%d=%d bytes",
163 			f->tag, f->count, f->size, f->count*f->size));
164 		for (i=0; i<f->count; i++) {
165 			p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
166 			if (!p)	return (ENXIO);
167 			*p = f->head;
168 			f->head = p;
169 		}
170 	}
171 
172 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
173 		int order, size, j;
174 
175 		HPT_ASSERT((f->size & (f->alignment-1))==0);
176 
177 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
178 			;
179 
180 		KdPrint(("%s: %d*%d=%d bytes, order %d",
181 			f->tag, f->count, f->size, f->count*f->size, order));
182 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
183 
184 		for (i=0; i<f->count;) {
185 			p = (void **)__get_free_pages(order);
186 			if (!p) return -1;
187 			for (j = size/f->size; j && i<f->count; i++,j--) {
188 				*p = f->head;
189 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
190 				f->head = p;
191 				p = (void **)((unsigned long)p + f->size);
192 			}
193 		}
194 	}
195 
196 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
197 
198 	for (i=0; i<os_max_cache_pages; i++) {
199 		p = (void **)__get_free_pages(0);
200 		if (!p) return -1;
201 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
202 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
203 	}
204 
205 	return 0;
206 }
207 
208 static void hpt_free_mem(PVBUS_EXT vbus_ext)
209 {
210 	struct freelist *f;
211 	void *p;
212 	int i;
213 	BUS_ADDRESS bus;
214 
215 	for (f=vbus_ext->freelist_head; f; f=f->next) {
216 #if DBG
217 		if (f->count!=f->reserved_count) {
218 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
219 		}
220 #endif
221 		while ((p=freelist_get(f)))
222 			free(p, M_DEVBUF);
223 	}
224 
225 	for (i=0; i<os_max_cache_pages; i++) {
226 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
227 		HPT_ASSERT(p);
228 		free_pages(p, 0);
229 	}
230 
231 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
232 		int order, size;
233 #if DBG
234 		if (f->count!=f->reserved_count) {
235 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
236 		}
237 #endif
238 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
239 
240 		while ((p=freelist_get_dma(f, &bus))) {
241 			if (order)
242 				free_pages(p, order);
243 			else {
244 			/* can't free immediately since other blocks in this page may still be in the list */
245 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
246 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
247 			}
248 		}
249 	}
250 
251 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
252 		free_pages(p, 0);
253 }
254 
255 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
256 {
257 	PHBA hba;
258 
259 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
260 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
261 			KdPrint(("fail to initialize %p", hba));
262 			return -1;
263 		}
264 
265 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
266 	return 0;
267 }
268 
269 static void hpt_flush_done(PCOMMAND pCmd)
270 {
271 	PVDEV vd = pCmd->target;
272 
273 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
274 		vd = vd->u.array.transform->target;
275 		HPT_ASSERT(vd);
276 		pCmd->target = vd;
277 		pCmd->Result = RETURN_PENDING;
278 		vdev_queue_cmd(pCmd);
279 		return;
280 	}
281 
282 	*(int *)pCmd->priv = 1;
283 	wakeup(pCmd);
284 }
285 
286 /*
287  * flush a vdev (without retry).
288  */
289 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
290 {
291 	PCOMMAND pCmd;
292 	int result = 0, done;
293 	HPT_UINT count;
294 
295 	KdPrint(("flusing dev %p", vd));
296 
297 	hpt_lock_vbus(vbus_ext);
298 
299 	if (mIsArray(vd->type) && vd->u.array.transform)
300 		count = max(vd->u.array.transform->source->cmds_per_request,
301 					vd->u.array.transform->target->cmds_per_request);
302 	else
303 		count = vd->cmds_per_request;
304 
305 	pCmd = ldm_alloc_cmds(vd->vbus, count);
306 
307 	if (!pCmd) {
308 		hpt_unlock_vbus(vbus_ext);
309 		return -1;
310 	}
311 
312 	pCmd->type = CMD_TYPE_FLUSH;
313 	pCmd->flags.hard_flush = 1;
314 	pCmd->target = vd;
315 	pCmd->done = hpt_flush_done;
316 	done = 0;
317 	pCmd->priv = &done;
318 
319 	ldm_queue_cmd(pCmd);
320 
321 	if (!done) {
322 		while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
323 			ldm_reset_vbus(vd->vbus);
324 		}
325 	}
326 
327 	KdPrint(("flush result %d", pCmd->Result));
328 
329 	if (pCmd->Result!=RETURN_SUCCESS)
330 		result = -1;
331 
332 	ldm_free_cmds(pCmd);
333 
334 	hpt_unlock_vbus(vbus_ext);
335 
336 	return result;
337 }
338 
339 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
340 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
341 {
342 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
343 	PHBA hba;
344 	int i;
345 
346 	KdPrint(("hpt_shutdown_vbus"));
347 
348 	/* stop all ctl tasks and disable the worker taskqueue */
349 	hpt_stop_tasks(vbus_ext);
350 	vbus_ext->worker.ta_context = 0;
351 
352 	/* flush devices */
353 	for (i=0; i<osm_max_targets; i++) {
354 		PVDEV vd = ldm_find_target(vbus, i);
355 		if (vd) {
356 			/* retry once */
357 			if (hpt_flush_vdev(vbus_ext, vd))
358 				hpt_flush_vdev(vbus_ext, vd);
359 		}
360 	}
361 
362 	hpt_lock_vbus(vbus_ext);
363 	ldm_shutdown(vbus);
364 	hpt_unlock_vbus(vbus_ext);
365 
366 	ldm_release_vbus(vbus);
367 
368 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
369 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
370 
371 	hpt_free_mem(vbus_ext);
372 
373 	while ((hba=vbus_ext->hba_list)) {
374 		vbus_ext->hba_list = hba->next;
375 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
376 	}
377 	callout_drain(&vbus_ext->timer);
378 	mtx_destroy(&vbus_ext->lock);
379 	free(vbus_ext, M_DEVBUF);
380 	KdPrint(("hpt_shutdown_vbus done"));
381 }
382 
383 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
384 {
385 	OSM_TASK *tasks;
386 
387 	tasks = vbus_ext->tasks;
388 	vbus_ext->tasks = 0;
389 
390 	while (tasks) {
391 		OSM_TASK *t = tasks;
392 		tasks = t->next;
393 		t->next = 0;
394 		t->func(vbus_ext->vbus, t->data);
395 	}
396 }
397 
398 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
399 {
400 	if(vbus_ext){
401 		hpt_lock_vbus(vbus_ext);
402 		__hpt_do_tasks(vbus_ext);
403 		hpt_unlock_vbus(vbus_ext);
404 	}
405 }
406 
407 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
408 static void hpt_poll(struct cam_sim *sim);
409 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
410 static void hpt_pci_intr(void *arg);
411 
412 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
413 {
414 	POS_CMDEXT p = vbus_ext->cmdext_list;
415 	if (p)
416 		vbus_ext->cmdext_list = p->next;
417 	return p;
418 }
419 
420 static __inline void cmdext_put(POS_CMDEXT p)
421 {
422 	p->next = p->vbus_ext->cmdext_list;
423 	p->vbus_ext->cmdext_list = p;
424 }
425 
426 static void hpt_timeout(void *arg)
427 {
428 	PCOMMAND pCmd = (PCOMMAND)arg;
429 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
430 
431 	KdPrint(("pCmd %p timeout", pCmd));
432 
433 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
434 }
435 
436 static void os_cmddone(PCOMMAND pCmd)
437 {
438 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
439 	union ccb *ccb = ext->ccb;
440 
441 	KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result));
442 	callout_stop(&ext->timeout);
443 	switch(pCmd->Result) {
444 	case RETURN_SUCCESS:
445 		ccb->ccb_h.status = CAM_REQ_CMP;
446 		break;
447 	case RETURN_BAD_DEVICE:
448 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
449 		break;
450 	case RETURN_DEVICE_BUSY:
451 		ccb->ccb_h.status = CAM_BUSY;
452 		break;
453 	case RETURN_INVALID_REQUEST:
454 		ccb->ccb_h.status = CAM_REQ_INVALID;
455 		break;
456 	case RETURN_SELECTION_TIMEOUT:
457 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
458 		break;
459 	case RETURN_RETRY:
460 		ccb->ccb_h.status = CAM_BUSY;
461 		break;
462 	default:
463 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
464 		break;
465 	}
466 
467 	if (pCmd->flags.data_in) {
468 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
469 	}
470 	else if (pCmd->flags.data_out) {
471 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
472 	}
473 
474 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
475 
476 	cmdext_put(ext);
477 	ldm_free_cmds(pCmd);
478 	xpt_done(ccb);
479 }
480 
481 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
482 {
483 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
484 	union ccb *ccb = ext->ccb;
485 
486 	if(logical)	{
487 		os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
488 		pSg->size = ccb->csio.dxfer_len;
489 		pSg->eot = 1;
490 		return TRUE;
491 	}
492 	/* since we have provided physical sg, nobody will ask us to build physical sg */
493 	HPT_ASSERT(0);
494 	return FALSE;
495 }
496 
497 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
498 {
499 	PCOMMAND pCmd = (PCOMMAND)arg;
500 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
501 	PSG psg = pCmd->psg;
502 	int idx;
503 
504 	HPT_ASSERT(pCmd->flags.physical_sg);
505 
506 	if (error)
507 		panic("busdma error");
508 
509 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
510 
511 	if (nsegs != 0) {
512 		for (idx = 0; idx < nsegs; idx++, psg++) {
513 			psg->addr.bus = segs[idx].ds_addr;
514 			psg->size = segs[idx].ds_len;
515 			psg->eot = 0;
516 		}
517 		psg[-1].eot = 1;
518 
519 		if (pCmd->flags.data_in) {
520 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
521 			    BUS_DMASYNC_PREREAD);
522 		}
523 		else if (pCmd->flags.data_out) {
524 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
525 			    BUS_DMASYNC_PREWRITE);
526 		}
527 	}
528 	callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
529 	ldm_queue_cmd(pCmd);
530 }
531 
532 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
533 {
534 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
535 	PVDEV vd;
536 	PCOMMAND pCmd;
537 	POS_CMDEXT ext;
538 	HPT_U8 *cdb;
539 	int error;
540 
541 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
542 		cdb = ccb->csio.cdb_io.cdb_ptr;
543 	else
544 		cdb = ccb->csio.cdb_io.cdb_bytes;
545 
546 	KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
547 		ccb,
548 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
549 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
550 	));
551 
552 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
553 	if (ccb->ccb_h.target_lun != 0 ||
554 		ccb->ccb_h.target_id >= osm_max_targets ||
555 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
556 	{
557 		ccb->ccb_h.status = CAM_TID_INVALID;
558 		xpt_done(ccb);
559 		return;
560 	}
561 
562 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
563 
564 	if (!vd) {
565 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
566 		xpt_done(ccb);
567 		return;
568 	}
569 
570 	switch (cdb[0]) {
571 	case TEST_UNIT_READY:
572 	case START_STOP_UNIT:
573 	case SYNCHRONIZE_CACHE:
574 		ccb->ccb_h.status = CAM_REQ_CMP;
575 		break;
576 
577 	case INQUIRY:
578 		{
579 			PINQUIRYDATA inquiryData;
580 			memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
581 			inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
582 
583 			inquiryData->AdditionalLength = 31;
584 			inquiryData->CommandQueue = 1;
585 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
586 			memcpy(&inquiryData->ProductId, "DISK 0_0        ", 16);
587 
588 			if (vd->target_id / 10) {
589 				inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0';
590 				inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0';
591 			}
592 			else
593 				inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0';
594 
595 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
596 
597 			ccb->ccb_h.status = CAM_REQ_CMP;
598 		}
599 		break;
600 
601 	case READ_CAPACITY:
602 	{
603 		HPT_U8 *rbuf = ccb->csio.data_ptr;
604 		HPT_U32 cap;
605 		HPT_U8 sector_size_shift = 0;
606 		HPT_U64 new_cap;
607 		HPT_U32 sector_size = 0;
608 
609 		if (mIsArray(vd->type))
610 			sector_size_shift = vd->u.array.sector_size_shift;
611 		else{
612 			if(vd->type == VD_RAW){
613 				sector_size = vd->u.raw.logical_sector_size;
614 			}
615 
616 			switch (sector_size) {
617 				case 0x1000:
618 					KdPrint(("set 4k setctor size in READ_CAPACITY"));
619 					sector_size_shift = 3;
620 					break;
621 				default:
622 					break;
623 			}
624 		}
625 		new_cap = vd->capacity >> sector_size_shift;
626 
627 		if (new_cap > 0xfffffffful)
628 			cap = 0xffffffff;
629 		else
630 			cap = new_cap - 1;
631 
632 		rbuf[0] = (HPT_U8)(cap>>24);
633 		rbuf[1] = (HPT_U8)(cap>>16);
634 		rbuf[2] = (HPT_U8)(cap>>8);
635 		rbuf[3] = (HPT_U8)cap;
636 		rbuf[4] = 0;
637 		rbuf[5] = 0;
638 		rbuf[6] = 2 << sector_size_shift;
639 		rbuf[7] = 0;
640 
641 		ccb->ccb_h.status = CAM_REQ_CMP;
642 		break;
643 	}
644 	case REPORT_LUNS:
645 	{
646 		HPT_U8 *rbuf = ccb->csio.data_ptr;
647 		memset(rbuf, 0, 16);
648 		rbuf[3] = 8;
649 		ccb->ccb_h.status = CAM_REQ_CMP;
650 		break;
651 	}
652 	case SERVICE_ACTION_IN:
653 	{
654 		HPT_U8 *rbuf = ccb->csio.data_ptr;
655 		HPT_U64	cap = 0;
656 		HPT_U8 sector_size_shift = 0;
657 		HPT_U32 sector_size = 0;
658 
659 		if(mIsArray(vd->type))
660 			sector_size_shift = vd->u.array.sector_size_shift;
661 		else{
662 			if(vd->type == VD_RAW){
663 				sector_size = vd->u.raw.logical_sector_size;
664 			}
665 
666 			switch (sector_size) {
667 				case 0x1000:
668 					KdPrint(("set 4k setctor size in SERVICE_ACTION_IN"));
669 					sector_size_shift = 3;
670 					break;
671 				default:
672 					break;
673 			}
674 		}
675 		cap = (vd->capacity >> sector_size_shift) - 1;
676 
677 		rbuf[0] = (HPT_U8)(cap>>56);
678 		rbuf[1] = (HPT_U8)(cap>>48);
679 		rbuf[2] = (HPT_U8)(cap>>40);
680 		rbuf[3] = (HPT_U8)(cap>>32);
681 		rbuf[4] = (HPT_U8)(cap>>24);
682 		rbuf[5] = (HPT_U8)(cap>>16);
683 		rbuf[6] = (HPT_U8)(cap>>8);
684 		rbuf[7] = (HPT_U8)cap;
685 		rbuf[8] = 0;
686 		rbuf[9] = 0;
687 		rbuf[10] = 2 << sector_size_shift;
688 		rbuf[11] = 0;
689 
690 		ccb->ccb_h.status = CAM_REQ_CMP;
691 		break;
692 	}
693 
694 	case READ_6:
695 	case READ_10:
696 	case READ_16:
697 	case WRITE_6:
698 	case WRITE_10:
699 	case WRITE_16:
700 	case 0x13:
701 	case 0x2f:
702 	case 0x8f: /* VERIFY_16 */
703 	{
704 		HPT_U8 sector_size_shift = 0;
705 		HPT_U32 sector_size = 0;
706 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
707 		if(!pCmd){
708 			KdPrint(("Failed to allocate command!"));
709 			ccb->ccb_h.status = CAM_BUSY;
710 			break;
711 		}
712 
713 		switch (cdb[0])	{
714 		case READ_6:
715 		case WRITE_6:
716 		case 0x13:
717 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
718 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
719 			break;
720 		case READ_16:
721 		case WRITE_16:
722 		case 0x8f: /* VERIFY_16 */
723 		{
724 			HPT_U64 block =
725 				((HPT_U64)cdb[2]<<56) |
726 				((HPT_U64)cdb[3]<<48) |
727 				((HPT_U64)cdb[4]<<40) |
728 				((HPT_U64)cdb[5]<<32) |
729 				((HPT_U64)cdb[6]<<24) |
730 				((HPT_U64)cdb[7]<<16) |
731 				((HPT_U64)cdb[8]<<8) |
732 				((HPT_U64)cdb[9]);
733 			pCmd->uCmd.Ide.Lba = block;
734 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
735 			break;
736 		}
737 
738 		default:
739 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
740 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
741 			break;
742 		}
743 
744 		if(mIsArray(vd->type)) {
745 			sector_size_shift = vd->u.array.sector_size_shift;
746 		}
747 		else{
748 			if(vd->type == VD_RAW){
749 				sector_size = vd->u.raw.logical_sector_size;
750 			}
751 
752 			switch (sector_size) {
753 				case 0x1000:
754 					KdPrint(("<8>resize sector size from 4k to 512"));
755 					sector_size_shift = 3;
756 					break;
757 				default:
758 					break;
759 	 		}
760 		}
761 		pCmd->uCmd.Ide.Lba <<= sector_size_shift;
762 		pCmd->uCmd.Ide.nSectors <<= sector_size_shift;
763 
764 
765 		switch (cdb[0]) {
766 		case READ_6:
767 		case READ_10:
768 		case READ_16:
769 			pCmd->flags.data_in = 1;
770 			break;
771 		case WRITE_6:
772 		case WRITE_10:
773 		case WRITE_16:
774 			pCmd->flags.data_out = 1;
775 			break;
776 		}
777 		pCmd->priv = ext = cmdext_get(vbus_ext);
778 		HPT_ASSERT(ext);
779 		ext->ccb = ccb;
780 		pCmd->target = vd;
781 		pCmd->done = os_cmddone;
782 		pCmd->buildsgl = os_buildsgl;
783 
784 		pCmd->psg = ext->psg;
785 		pCmd->flags.physical_sg = 1;
786 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
787 					ext->dma_map, ccb,
788 					hpt_io_dmamap_callback, pCmd,
789 					BUS_DMA_WAITOK
790 				);
791 		KdPrint(("<8>bus_dmamap_load return %d", error));
792 		if (error && error!=EINPROGRESS) {
793 			os_printk("bus_dmamap_load error %d", error);
794 			cmdext_put(ext);
795 			ldm_free_cmds(pCmd);
796 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
797 			xpt_done(ccb);
798 		}
799 		return;
800 	}
801 
802 	default:
803 		ccb->ccb_h.status = CAM_REQ_INVALID;
804 		break;
805 	}
806 
807 	xpt_done(ccb);
808 	return;
809 }
810 
811 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
812 {
813 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
814 
815 	KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
816 
817 	hpt_assert_vbus_locked(vbus_ext);
818 	switch (ccb->ccb_h.func_code) {
819 
820 	case XPT_SCSI_IO:
821 		hpt_scsi_io(vbus_ext, ccb);
822 		return;
823 
824 	case XPT_RESET_BUS:
825 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
826 		break;
827 
828 	case XPT_GET_TRAN_SETTINGS:
829 	case XPT_SET_TRAN_SETTINGS:
830 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
831 		break;
832 
833 	case XPT_CALC_GEOMETRY:
834 		ccb->ccg.heads = 255;
835 		ccb->ccg.secs_per_track = 63;
836 		ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
837 		ccb->ccb_h.status = CAM_REQ_CMP;
838 		break;
839 
840 	case XPT_PATH_INQ:
841 	{
842 		struct ccb_pathinq *cpi = &ccb->cpi;
843 
844 		cpi->version_num = 1;
845 		cpi->hba_inquiry = PI_SDTR_ABLE;
846 		cpi->target_sprt = 0;
847 		cpi->hba_misc = PIM_NOBUSRESET;
848 		cpi->hba_eng_cnt = 0;
849 		cpi->max_target = osm_max_targets;
850 		cpi->max_lun = 0;
851 		cpi->unit_number = cam_sim_unit(sim);
852 		cpi->bus_id = cam_sim_bus(sim);
853 		cpi->initiator_id = osm_max_targets;
854 		cpi->base_transfer_speed = 3300;
855 
856 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
857 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
858 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
859 		cpi->transport = XPORT_SPI;
860 		cpi->transport_version = 2;
861 		cpi->protocol = PROTO_SCSI;
862 		cpi->protocol_version = SCSI_REV_2;
863 		cpi->ccb_h.status = CAM_REQ_CMP;
864 		break;
865 	}
866 
867 	default:
868 		ccb->ccb_h.status = CAM_REQ_INVALID;
869 		break;
870 	}
871 
872 	xpt_done(ccb);
873 	return;
874 }
875 
876 static void hpt_pci_intr(void *arg)
877 {
878 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
879 	hpt_lock_vbus(vbus_ext);
880 	ldm_intr((PVBUS)vbus_ext->vbus);
881 	hpt_unlock_vbus(vbus_ext);
882 }
883 
884 static void hpt_poll(struct cam_sim *sim)
885 {
886 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
887 
888 	hpt_assert_vbus_locked(vbus_ext);
889 	ldm_intr((PVBUS)vbus_ext->vbus);
890 }
891 
892 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
893 {
894 	KdPrint(("<8>hpt_async"));
895 }
896 
897 static int hpt_shutdown(device_t dev)
898 {
899 	KdPrint(("hpt_shutdown(dev=%p)", dev));
900 	return 0;
901 }
902 
903 static int hpt_detach(device_t dev)
904 {
905 	/* we don't allow the driver to be unloaded. */
906 	return EBUSY;
907 }
908 
909 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
910 {
911 	arg->ioctl_cmnd = 0;
912 	wakeup(arg);
913 }
914 
915 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
916 {
917 	ioctl_args->result = -1;
918 	ioctl_args->done = hpt_ioctl_done;
919 	ioctl_args->ioctl_cmnd = (void *)1;
920 
921 	hpt_lock_vbus(vbus_ext);
922 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
923 
924 	while (ioctl_args->ioctl_cmnd) {
925 		if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
926 			break;
927 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
928 		__hpt_do_tasks(vbus_ext);
929 	}
930 
931 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
932 
933 	hpt_unlock_vbus(vbus_ext);
934 }
935 
936 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
937 {
938 	PVBUS vbus;
939 	PVBUS_EXT vbus_ext;
940 
941 	ldm_for_each_vbus(vbus, vbus_ext) {
942 		__hpt_do_ioctl(vbus_ext, ioctl_args);
943 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
944 			return;
945 	}
946 }
947 
948 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
949 	IOCTL_ARG arg;\
950 	arg.dwIoControlCode = code;\
951 	arg.lpInBuffer = inbuf;\
952 	arg.lpOutBuffer = outbuf;\
953 	arg.nInBufferSize = insize;\
954 	arg.nOutBufferSize = outsize;\
955 	arg.lpBytesReturned = 0;\
956 	hpt_do_ioctl(&arg);\
957 	arg.result;\
958 })
959 
960 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
961 
962 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
963 {
964 	int i;
965 	HPT_U32 count = nMaxCount-1;
966 
967 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
968 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
969 		return -1;
970 
971 	nMaxCount = (int)pIds[0];
972 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
973 	return nMaxCount;
974 }
975 
976 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
977 {
978 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
979 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
980 }
981 
982 /* not belong to this file logically, but we want to use ioctl interface */
983 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
984 {
985 	LOGICAL_DEVICE_INFO_V3 devinfo;
986 	int i, result;
987 	DEVICEID param[2] = { id, 0 };
988 
989 	if (hpt_get_device_info_v3(id, &devinfo))
990 		return -1;
991 
992 	if (devinfo.Type!=LDT_ARRAY)
993 		return -1;
994 
995 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
996 		param[1] = AS_REBUILD_ABORT;
997 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
998 		param[1] = AS_VERIFY_ABORT;
999 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
1000 		param[1] = AS_INITIALIZE_ABORT;
1001 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
1002 		param[1] = AS_TRANSFORM_ABORT;
1003 	else
1004 		return -1;
1005 
1006 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
1007 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
1008 				param, sizeof(param), 0, 0);
1009 
1010 	for (i=0; i<devinfo.u.array.nDisk; i++)
1011 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
1012 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
1013 
1014 	return result;
1015 }
1016 
1017 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
1018 {
1019 	DEVICEID ids[32];
1020 	int i, count;
1021 
1022 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
1023 
1024 	for (i=0; i<count; i++)
1025 		__hpt_stop_tasks(vbus_ext, ids[i]);
1026 }
1027 
1028 static	d_open_t	hpt_open;
1029 static	d_close_t	hpt_close;
1030 static	d_ioctl_t	hpt_ioctl;
1031 static  int 		hpt_rescan_bus(void);
1032 
1033 static struct cdevsw hpt_cdevsw = {
1034 	.d_open =	hpt_open,
1035 	.d_close =	hpt_close,
1036 	.d_ioctl =	hpt_ioctl,
1037 	.d_name =	driver_name,
1038 	.d_version =	D_VERSION,
1039 };
1040 
1041 static struct intr_config_hook hpt_ich;
1042 
1043 /*
1044  * hpt_final_init will be called after all hpt_attach.
1045  */
1046 static void hpt_final_init(void *dummy)
1047 {
1048 	int       i,unit_number=0;
1049 	PVBUS_EXT vbus_ext;
1050 	PVBUS vbus;
1051 	PHBA hba;
1052 
1053 	/* Clear the config hook */
1054 	config_intrhook_disestablish(&hpt_ich);
1055 
1056 	/* allocate memory */
1057 	i = 0;
1058 	ldm_for_each_vbus(vbus, vbus_ext) {
1059 		if (hpt_alloc_mem(vbus_ext)) {
1060 			os_printk("out of memory");
1061 			return;
1062 		}
1063 		i++;
1064 	}
1065 
1066 	if (!i) {
1067 		if (bootverbose)
1068 			os_printk("no controller detected.");
1069 		return;
1070 	}
1071 
1072 	/* initializing hardware */
1073 	ldm_for_each_vbus(vbus, vbus_ext) {
1074 		/* make timer available here */
1075 		mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1076 		callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1077 		if (hpt_init_vbus(vbus_ext)) {
1078 			os_printk("fail to initialize hardware");
1079 			break; /* FIXME */
1080 		}
1081 	}
1082 
1083 	/* register CAM interface */
1084 	ldm_for_each_vbus(vbus, vbus_ext) {
1085 		struct cam_devq *devq;
1086 		struct ccb_setasync	ccb;
1087 
1088 		if (bus_dma_tag_create(NULL,/* parent */
1089 				4,	/* alignment */
1090 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1091 				BUS_SPACE_MAXADDR,	/* lowaddr */
1092 				BUS_SPACE_MAXADDR, 	/* highaddr */
1093 				NULL, NULL, 		/* filter, filterarg */
1094 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1095 				os_max_sg_descriptors,	/* nsegments */
1096 				0x10000,	/* maxsegsize */
1097 				BUS_DMA_WAITOK,		/* flags */
1098 				busdma_lock_mutex,	/* lockfunc */
1099 				&vbus_ext->lock,		/* lockfuncarg */
1100 				&vbus_ext->io_dmat	/* tag */))
1101 		{
1102 			return ;
1103 		}
1104 
1105 		for (i=0; i<os_max_queue_comm; i++) {
1106 			POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1107 			if (!ext) {
1108 				os_printk("Can't alloc cmdext(%d)", i);
1109 				return ;
1110 			}
1111 			ext->vbus_ext = vbus_ext;
1112 			ext->next = vbus_ext->cmdext_list;
1113 			vbus_ext->cmdext_list = ext;
1114 
1115 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1116 				os_printk("Can't create dma map(%d)", i);
1117 				return ;
1118 			}
1119 			callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1120 		}
1121 
1122 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1123 			os_printk("cam_simq_alloc failed");
1124 			return ;
1125 		}
1126 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1127 				vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8,  devq);
1128 		unit_number++;
1129 		if (!vbus_ext->sim) {
1130 			os_printk("cam_sim_alloc failed");
1131 			cam_simq_free(devq);
1132 			return ;
1133 		}
1134 
1135 		hpt_lock_vbus(vbus_ext);
1136 		if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1137 			hpt_unlock_vbus(vbus_ext);
1138 			os_printk("xpt_bus_register failed");
1139 			cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1140 			vbus_ext->sim = NULL;
1141 			return ;
1142 		}
1143 
1144 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1145 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1146 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1147 		{
1148 			hpt_unlock_vbus(vbus_ext);
1149 			os_printk("xpt_create_path failed");
1150 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1151 			cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1152 			vbus_ext->sim = NULL;
1153 			return ;
1154 		}
1155 
1156 		memset(&ccb, 0, sizeof(ccb));
1157 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1158 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1159 		ccb.event_enable = AC_LOST_DEVICE;
1160 		ccb.callback = hpt_async;
1161 		ccb.callback_arg = vbus_ext;
1162 		xpt_action((union ccb *)&ccb);
1163 		hpt_unlock_vbus(vbus_ext);
1164 
1165 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1166 			int rid = 0;
1167 			if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1168 				SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1169 			{
1170 				os_printk("can't allocate interrupt");
1171 				return ;
1172 			}
1173 			if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1174 				NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1175 			{
1176 				os_printk("can't set up interrupt");
1177 				return ;
1178 			}
1179 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1180 
1181 		}
1182 
1183 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1184 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1185 		if (!vbus_ext->shutdown_eh)
1186 			os_printk("Shutdown event registration failed");
1187 	}
1188 
1189 	ldm_for_each_vbus(vbus, vbus_ext) {
1190 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1191 		if (vbus_ext->tasks)
1192 			TASK_ENQUEUE(&vbus_ext->worker);
1193 	}
1194 
1195 	make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1196 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1197 }
1198 
1199 #if defined(KLD_MODULE)
1200 
1201 typedef struct driverlink *driverlink_t;
1202 struct driverlink {
1203 	kobj_class_t	driver;
1204 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1205 };
1206 
1207 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1208 
1209 struct devclass {
1210 	TAILQ_ENTRY(devclass) link;
1211 	devclass_t	parent;		/* parent in devclass hierarchy */
1212 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1213 	char		*name;
1214 	device_t	*devices;	/* array of devices indexed by unit */
1215 	int		maxunit;	/* size of devices array */
1216 };
1217 
1218 static void override_kernel_driver(void)
1219 {
1220 	driverlink_t dl, dlfirst;
1221 	driver_t *tmpdriver;
1222 	devclass_t dc = devclass_find("pci");
1223 
1224 	if (dc){
1225 		dlfirst = TAILQ_FIRST(&dc->drivers);
1226 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1227 			if(strcmp(dl->driver->name, driver_name) == 0) {
1228 				tmpdriver=dl->driver;
1229 				dl->driver=dlfirst->driver;
1230 				dlfirst->driver=tmpdriver;
1231 				break;
1232 			}
1233 		}
1234 	}
1235 }
1236 
1237 #else
1238 #define override_kernel_driver()
1239 #endif
1240 
1241 static void hpt_init(void *dummy)
1242 {
1243 	if (bootverbose)
1244 		os_printk("%s %s", driver_name_long, driver_ver);
1245 
1246 	override_kernel_driver();
1247 	init_config();
1248 
1249 	hpt_ich.ich_func = hpt_final_init;
1250 	hpt_ich.ich_arg = NULL;
1251 	if (config_intrhook_establish(&hpt_ich) != 0) {
1252 		printf("%s: cannot establish configuration hook\n",
1253 		    driver_name_long);
1254 	}
1255 
1256 }
1257 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1258 
1259 /*
1260  * CAM driver interface
1261  */
1262 static device_method_t driver_methods[] = {
1263 	/* Device interface */
1264 	DEVMETHOD(device_probe,		hpt_probe),
1265 	DEVMETHOD(device_attach,	hpt_attach),
1266 	DEVMETHOD(device_detach,	hpt_detach),
1267 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1268 	{ 0, 0 }
1269 };
1270 
1271 static driver_t hpt_pci_driver = {
1272 	driver_name,
1273 	driver_methods,
1274 	sizeof(HBA)
1275 };
1276 
1277 #ifndef TARGETNAME
1278 #error "no TARGETNAME found"
1279 #endif
1280 
1281 /* use this to make TARGETNAME be expanded */
1282 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
1283 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1284 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1285 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
1286 __MODULE_VERSION(TARGETNAME, 1);
1287 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1288 
1289 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1290 {
1291 	return 0;
1292 }
1293 
1294 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1295 {
1296 	return 0;
1297 }
1298 
1299 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1300 {
1301 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1302 	IOCTL_ARG ioctl_args;
1303 	HPT_U32 bytesReturned = 0;
1304 
1305 	switch (cmd){
1306 	case HPT_DO_IOCONTROL:
1307 	{
1308 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1309 			KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n",
1310 				piop->dwIoControlCode,
1311 				piop->lpInBuffer,
1312 				piop->nInBufferSize,
1313 				piop->lpOutBuffer,
1314 				piop->nOutBufferSize));
1315 
1316 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1317 
1318 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1319 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1320 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1321 		ioctl_args.lpBytesReturned = &bytesReturned;
1322 
1323 		if (ioctl_args.nInBufferSize) {
1324 			ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1325 			if (!ioctl_args.lpInBuffer)
1326 				goto invalid;
1327 			if (copyin((void*)piop->lpInBuffer,
1328 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1329 				goto invalid;
1330 		}
1331 
1332 		if (ioctl_args.nOutBufferSize) {
1333 			ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
1334 			if (!ioctl_args.lpOutBuffer)
1335 				goto invalid;
1336 		}
1337 
1338 		hpt_do_ioctl(&ioctl_args);
1339 
1340 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1341 			if (piop->nOutBufferSize) {
1342 				if (copyout(ioctl_args.lpOutBuffer,
1343 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1344 					goto invalid;
1345 			}
1346 			if (piop->lpBytesReturned) {
1347 				if (copyout(&bytesReturned,
1348 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1349 					goto invalid;
1350 			}
1351 			if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1352 			if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1353 			return 0;
1354 		}
1355 invalid:
1356 		if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1357 		if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1358 		return EFAULT;
1359 	}
1360 	return EFAULT;
1361 	}
1362 
1363 	case HPT_SCAN_BUS:
1364 	{
1365 		return hpt_rescan_bus();
1366 	}
1367 	default:
1368 		KdPrint(("invalid command!"));
1369 		return EFAULT;
1370 	}
1371 
1372 }
1373 
1374 static int	hpt_rescan_bus(void)
1375 {
1376 	union ccb			*ccb;
1377 	PVBUS 				vbus;
1378 	PVBUS_EXT			vbus_ext;
1379 
1380 	ldm_for_each_vbus(vbus, vbus_ext) {
1381 		if ((ccb = xpt_alloc_ccb()) == NULL)
1382 		{
1383 			return(ENOMEM);
1384 		}
1385 		if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim),
1386 			CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1387 		{
1388 			xpt_free_ccb(ccb);
1389 			return(EIO);
1390 		}
1391 		xpt_rescan(ccb);
1392 	}
1393 	return(0);
1394 }
1395 
1396