xref: /freebsd/sys/dev/hptnr/hptnr_osm_bsd.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause
4  *
5  * HighPoint RAID Driver for FreeBSD
6  * Copyright (C) 2005-2011 HighPoint Technologies, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 #include <dev/hptnr/hptnr_config.h>
33 #include <dev/hptnr/os_bsd.h>
34 #include <dev/hptnr/hptintf.h>
35 int msi = 0;
36 int debug_flag = 0;
37 static HIM *hpt_match(device_t dev, int scan)
38 {
39 	PCI_ID pci_id;
40 	HIM *him;
41 	int i;
42 
43 	for (him = him_list; him; him = him->next) {
44 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
45 			if (scan && him->get_controller_count)
46 				him->get_controller_count(&pci_id,0,0);
47 			if ((pci_get_vendor(dev) == pci_id.vid) &&
48 				(pci_get_device(dev) == pci_id.did)){
49 				return (him);
50 			}
51 		}
52 	}
53 
54 	return (NULL);
55 }
56 
57 static int hpt_probe(device_t dev)
58 {
59 	HIM *him;
60 
61 	him = hpt_match(dev, 0);
62 	if (him != NULL) {
63 		KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
64 			pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
65 			));
66 		device_set_desc(dev, him->name);
67 		return (BUS_PROBE_DEFAULT);
68 	}
69 
70 	return (ENXIO);
71 }
72 
73 static int hpt_attach(device_t dev)
74 {
75 	PHBA hba = (PHBA)device_get_softc(dev);
76 	HIM *him;
77 	PCI_ID pci_id;
78 	HPT_UINT size;
79 	PVBUS vbus;
80 	PVBUS_EXT vbus_ext;
81 
82 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
83 
84 	him = hpt_match(dev, 1);
85 	hba->ext_type = EXT_TYPE_HBA;
86 	hba->ldm_adapter.him = him;
87 
88 	pci_enable_busmaster(dev);
89 
90 	pci_id.vid = pci_get_vendor(dev);
91 	pci_id.did = pci_get_device(dev);
92 	pci_id.rev = pci_get_revid(dev);
93 	pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
94 
95 	size = him->get_adapter_size(&pci_id);
96 	hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
97 
98 	hba->pcidev = dev;
99 	hba->pciaddr.tree = 0;
100 	hba->pciaddr.bus = pci_get_bus(dev);
101 	hba->pciaddr.device = pci_get_slot(dev);
102 	hba->pciaddr.function = pci_get_function(dev);
103 
104 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
105 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
106 		return ENXIO;
107 	}
108 
109 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
110 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
111 
112 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
113 		size = ldm_get_vbus_size();
114 		vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK |
115 			M_ZERO);
116 		vbus_ext->ext_type = EXT_TYPE_VBUS;
117 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
118 		ldm_register_adapter(&hba->ldm_adapter);
119 	}
120 
121 	ldm_for_each_vbus(vbus, vbus_ext) {
122 		if (hba->ldm_adapter.vbus==vbus) {
123 			hba->vbus_ext = vbus_ext;
124 			hba->next = vbus_ext->hba_list;
125 			vbus_ext->hba_list = hba;
126 			break;
127 		}
128 	}
129 	return 0;
130 }
131 
132 /*
133  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
134  * but there are some problems currently (alignment, etc).
135  */
136 static __inline void *__get_free_pages(int order)
137 {
138 	/* don't use low memory - other devices may get starved */
139 	return contigmalloc(PAGE_SIZE<<order,
140 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
141 }
142 
143 static __inline void free_pages(void *p, int order)
144 {
145 	contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
146 }
147 
148 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
149 {
150 	PHBA hba;
151 	struct freelist *f;
152 	HPT_UINT i;
153 	void **p;
154 
155 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
156 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
157 
158 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
159 
160 	for (f=vbus_ext->freelist_head; f; f=f->next) {
161 		KdPrint(("%s: %d*%d=%d bytes",
162 			f->tag, f->count, f->size, f->count*f->size));
163 		for (i=0; i<f->count; i++) {
164 			p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
165 			if (!p)	return (ENXIO);
166 			*p = f->head;
167 			f->head = p;
168 		}
169 	}
170 
171 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
172 		int order, size, j;
173 
174 		HPT_ASSERT((f->size & (f->alignment-1))==0);
175 
176 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
177 			;
178 
179 		KdPrint(("%s: %d*%d=%d bytes, order %d",
180 			f->tag, f->count, f->size, f->count*f->size, order));
181 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
182 
183 		for (i=0; i<f->count;) {
184 			p = (void **)__get_free_pages(order);
185 			if (!p) return -1;
186 			for (j = size/f->size; j && i<f->count; i++,j--) {
187 				*p = f->head;
188 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
189 				f->head = p;
190 				p = (void **)((unsigned long)p + f->size);
191 			}
192 		}
193 	}
194 
195 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
196 
197 	for (i=0; i<os_max_cache_pages; i++) {
198 		p = (void **)__get_free_pages(0);
199 		if (!p) return -1;
200 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
201 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
202 	}
203 
204 	return 0;
205 }
206 
207 static void hpt_free_mem(PVBUS_EXT vbus_ext)
208 {
209 	struct freelist *f;
210 	void *p;
211 	int i;
212 	BUS_ADDRESS bus;
213 
214 	for (f=vbus_ext->freelist_head; f; f=f->next) {
215 #if DBG
216 		if (f->count!=f->reserved_count) {
217 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
218 		}
219 #endif
220 		while ((p=freelist_get(f)))
221 			free(p, M_DEVBUF);
222 	}
223 
224 	for (i=0; i<os_max_cache_pages; i++) {
225 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
226 		HPT_ASSERT(p);
227 		free_pages(p, 0);
228 	}
229 
230 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
231 		int order, size;
232 #if DBG
233 		if (f->count!=f->reserved_count) {
234 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
235 		}
236 #endif
237 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
238 
239 		while ((p=freelist_get_dma(f, &bus))) {
240 			if (order)
241 				free_pages(p, order);
242 			else {
243 			/* can't free immediately since other blocks in this page may still be in the list */
244 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
245 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
246 			}
247 		}
248 	}
249 
250 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
251 		free_pages(p, 0);
252 }
253 
254 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
255 {
256 	PHBA hba;
257 
258 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
259 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
260 			KdPrint(("fail to initialize %p", hba));
261 			return -1;
262 		}
263 
264 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
265 	return 0;
266 }
267 
268 static void hpt_flush_done(PCOMMAND pCmd)
269 {
270 	PVDEV vd = pCmd->target;
271 
272 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
273 		vd = vd->u.array.transform->target;
274 		HPT_ASSERT(vd);
275 		pCmd->target = vd;
276 		pCmd->Result = RETURN_PENDING;
277 		vdev_queue_cmd(pCmd);
278 		return;
279 	}
280 
281 	*(int *)pCmd->priv = 1;
282 	wakeup(pCmd);
283 }
284 
285 /*
286  * flush a vdev (without retry).
287  */
288 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
289 {
290 	PCOMMAND pCmd;
291 	int result = 0, done;
292 	HPT_UINT count;
293 
294 	KdPrint(("flusing dev %p", vd));
295 
296 	hpt_assert_vbus_locked(vbus_ext);
297 
298 	if (mIsArray(vd->type) && vd->u.array.transform)
299 		count = max(vd->u.array.transform->source->cmds_per_request,
300 					vd->u.array.transform->target->cmds_per_request);
301 	else
302 		count = vd->cmds_per_request;
303 
304 	pCmd = ldm_alloc_cmds(vd->vbus, count);
305 
306 	if (!pCmd) {
307 		return -1;
308 	}
309 
310 	pCmd->type = CMD_TYPE_FLUSH;
311 	pCmd->flags.hard_flush = 1;
312 	pCmd->target = vd;
313 	pCmd->done = hpt_flush_done;
314 	done = 0;
315 	pCmd->priv = &done;
316 
317 	ldm_queue_cmd(pCmd);
318 
319 	if (!done) {
320 		while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
321 			ldm_reset_vbus(vd->vbus);
322 		}
323 	}
324 
325 	KdPrint(("flush result %d", pCmd->Result));
326 
327 	if (pCmd->Result!=RETURN_SUCCESS)
328 		result = -1;
329 
330 	ldm_free_cmds(pCmd);
331 
332 	return result;
333 }
334 
335 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
336 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
337 {
338 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
339 	PHBA hba;
340 	int i;
341 
342 	KdPrint(("hpt_shutdown_vbus"));
343 
344 	/* stop all ctl tasks and disable the worker taskqueue */
345 	hpt_stop_tasks(vbus_ext);
346 	hpt_lock_vbus(vbus_ext);
347 	vbus_ext->worker.ta_context = 0;
348 
349 	/* flush devices */
350 	for (i=0; i<osm_max_targets; i++) {
351 		PVDEV vd = ldm_find_target(vbus, i);
352 		if (vd) {
353 			/* retry once */
354 			if (hpt_flush_vdev(vbus_ext, vd))
355 				hpt_flush_vdev(vbus_ext, vd);
356 		}
357 	}
358 
359 	ldm_shutdown(vbus);
360 	hpt_unlock_vbus(vbus_ext);
361 
362 	ldm_release_vbus(vbus);
363 
364 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
365 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
366 
367 	hpt_free_mem(vbus_ext);
368 
369 	while ((hba=vbus_ext->hba_list)) {
370 		vbus_ext->hba_list = hba->next;
371 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
372 	}
373 
374 	callout_drain(&vbus_ext->timer);
375 	mtx_destroy(&vbus_ext->lock);
376 	free(vbus_ext, M_DEVBUF);
377 	KdPrint(("hpt_shutdown_vbus done"));
378 }
379 
380 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
381 {
382 	OSM_TASK *tasks;
383 
384 	tasks = vbus_ext->tasks;
385 	vbus_ext->tasks = 0;
386 
387 	while (tasks) {
388 		OSM_TASK *t = tasks;
389 		tasks = t->next;
390 		t->next = 0;
391 		t->func(vbus_ext->vbus, t->data);
392 	}
393 }
394 
395 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
396 {
397 	if(vbus_ext){
398 		hpt_lock_vbus(vbus_ext);
399 		__hpt_do_tasks(vbus_ext);
400 		hpt_unlock_vbus(vbus_ext);
401 	}
402 }
403 
404 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
405 static void hpt_poll(struct cam_sim *sim);
406 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
407 static void hpt_pci_intr(void *arg);
408 
409 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
410 {
411 	POS_CMDEXT p = vbus_ext->cmdext_list;
412 	if (p)
413 		vbus_ext->cmdext_list = p->next;
414 	return p;
415 }
416 
417 static __inline void cmdext_put(POS_CMDEXT p)
418 {
419 	p->next = p->vbus_ext->cmdext_list;
420 	p->vbus_ext->cmdext_list = p;
421 }
422 
423 static void hpt_timeout(void *arg)
424 {
425 	PCOMMAND pCmd = (PCOMMAND)arg;
426 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
427 
428 	KdPrint(("pCmd %p timeout", pCmd));
429 
430 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
431 }
432 
433 static void os_cmddone(PCOMMAND pCmd)
434 {
435 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
436 	union ccb *ccb = ext->ccb;
437 	HPT_U8 *cdb;
438 
439 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
440 		cdb = ccb->csio.cdb_io.cdb_ptr;
441 	else
442 		cdb = ccb->csio.cdb_io.cdb_bytes;
443 
444 	KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result));
445 
446 	callout_stop(&ext->timeout);
447 	switch(cdb[0]) {
448 		case 0x85: /*ATA_16*/
449 		case 0xA1: /*ATA_12*/
450 		{
451 			PassthroughCmd *passthru = &pCmd->uCmd.Passthrough;
452 			HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data;
453 			memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data));
454 
455 			sense_buffer[0] = 0x72; /* Response Code */
456 			sense_buffer[7] = 14; /* Additional Sense Length */
457 
458 			sense_buffer[8] = 0x9; /* ATA Return Descriptor */
459 			sense_buffer[9] = 0xc; /* Additional Descriptor Length */
460 			sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */
461 			sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg;  /* Sector Count (7:0) */
462 			sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */
463 			sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */
464 			sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */
465 
466 			if ((cdb[0] == 0x85) && (cdb[1] & 0x1))
467 			{
468 				sense_buffer[10] = 1;
469 				sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */
470 				sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8);	/* LBA Low (15:8) */
471 				sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */
472 				sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */
473 			}
474 
475 			sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */
476 			sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */
477 			KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x",
478 					 passthru->bCommandReg,
479 					 passthru->bFeaturesReg,
480 					 passthru->bLbaLowReg,
481 					 passthru->bLbaMidReg,
482 					 passthru->bLbaHighReg,
483 					 passthru->bDriveHeadReg,
484 					 passthru->bSectorCountReg));
485 			KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ",
486 				pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg,
487 				passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg));
488 		}
489 		default:
490 			break;
491 	}
492 
493 	switch(pCmd->Result) {
494 	case RETURN_SUCCESS:
495 		ccb->ccb_h.status = CAM_REQ_CMP;
496 		break;
497 	case RETURN_BAD_DEVICE:
498 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
499 		break;
500 	case RETURN_DEVICE_BUSY:
501 		ccb->ccb_h.status = CAM_BUSY;
502 		break;
503 	case RETURN_INVALID_REQUEST:
504 		ccb->ccb_h.status = CAM_REQ_INVALID;
505 		break;
506 	case RETURN_SELECTION_TIMEOUT:
507 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
508 		break;
509 	case RETURN_RETRY:
510 		ccb->ccb_h.status = CAM_BUSY;
511 		break;
512 	default:
513 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
514 		break;
515 	}
516 
517 	if (pCmd->flags.data_in) {
518 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
519 	}
520 	else if (pCmd->flags.data_out) {
521 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
522 	}
523 
524 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
525 
526 	cmdext_put(ext);
527 	ldm_free_cmds(pCmd);
528 	xpt_done(ccb);
529 }
530 
531 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
532 {
533 	/* since we have provided physical sg, nobody will ask us to build physical sg */
534 	HPT_ASSERT(0);
535 	return FALSE;
536 }
537 
538 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
539 {
540 	PCOMMAND pCmd = (PCOMMAND)arg;
541 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
542 	PSG psg = pCmd->psg;
543 	int idx;
544 
545 	HPT_ASSERT(pCmd->flags.physical_sg);
546 
547 	if (error)
548 		panic("busdma error");
549 
550 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
551 
552 	if (nsegs != 0) {
553 		for (idx = 0; idx < nsegs; idx++, psg++) {
554 			psg->addr.bus = segs[idx].ds_addr;
555 			psg->size = segs[idx].ds_len;
556 			psg->eot = 0;
557 		}
558 		psg[-1].eot = 1;
559 
560 		if (pCmd->flags.data_in) {
561 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
562 			    BUS_DMASYNC_PREREAD);
563 		}
564 		else if (pCmd->flags.data_out) {
565 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
566 			    BUS_DMASYNC_PREWRITE);
567 		}
568 	}
569 
570 	callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
571 	ldm_queue_cmd(pCmd);
572 }
573 
574 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
575 {
576 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
577 	PVDEV vd;
578 	PCOMMAND pCmd;
579 	POS_CMDEXT ext;
580 	HPT_U8 *cdb;
581 
582 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
583 		cdb = ccb->csio.cdb_io.cdb_ptr;
584 	else
585 		cdb = ccb->csio.cdb_io.cdb_bytes;
586 
587 	KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
588 		ccb,
589 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
590 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
591 	));
592 
593 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
594 	if (ccb->ccb_h.target_lun != 0 ||
595 		ccb->ccb_h.target_id >= osm_max_targets ||
596 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
597 	{
598 		ccb->ccb_h.status = CAM_TID_INVALID;
599 		xpt_done(ccb);
600 		return;
601 	}
602 
603 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
604 
605 	if (!vd) {
606 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
607 		xpt_done(ccb);
608 		return;
609 	}
610 
611 	switch (cdb[0]) {
612 	case TEST_UNIT_READY:
613 	case START_STOP_UNIT:
614 	case SYNCHRONIZE_CACHE:
615 		ccb->ccb_h.status = CAM_REQ_CMP;
616 		break;
617 
618 	case 0x85: /*ATA_16*/
619 	case 0xA1: /*ATA_12*/
620 	{
621 		int error;
622 		HPT_U8 prot;
623 		PassthroughCmd *passthru;
624 
625 		if (mIsArray(vd->type)) {
626 			ccb->ccb_h.status = CAM_REQ_INVALID;
627 			break;
628 		}
629 
630 		HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk);
631 
632 		prot = (cdb[1] & 0x1e) >> 1;
633 
634 
635 		if (prot < 3 || prot > 5)
636 		{
637 			ccb->ccb_h.status = CAM_REQ_INVALID;
638 			break;
639 		}
640 
641 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
642 		if (!pCmd) {
643 			HPT_ASSERT(0);
644 			ccb->ccb_h.status = CAM_BUSY;
645 			break;
646 		}
647 
648 		passthru = &pCmd->uCmd.Passthrough;
649 		if (cdb[0] == 0x85/*ATA_16*/) {
650 			if (cdb[1] & 0x1) {
651 				passthru->bFeaturesReg =
652 					((HPT_U16)cdb[3] << 8)
653 						| cdb[4];
654 				passthru->bSectorCountReg =
655 					((HPT_U16)cdb[5] << 8) |
656 						cdb[6];
657 				passthru->bLbaLowReg =
658 					((HPT_U16)cdb[7] << 8) |
659 						cdb[8];
660 				passthru->bLbaMidReg =
661 					((HPT_U16)cdb[9] << 8) |
662 						cdb[10];
663 				passthru->bLbaHighReg =
664 					((HPT_U16)cdb[11] << 8) |
665 						cdb[12];
666 			} else {
667 				passthru->bFeaturesReg = cdb[4];
668 				passthru->bSectorCountReg = cdb[6];
669 				passthru->bLbaLowReg = cdb[8];
670 				passthru->bLbaMidReg = cdb[10];
671 				passthru->bLbaHighReg = cdb[12];
672 			}
673 			passthru->bDriveHeadReg = cdb[13];
674 			passthru->bCommandReg = cdb[14];
675 
676 		} else { /*ATA_12*/
677 
678 			passthru->bFeaturesReg = cdb[3];
679 			passthru->bSectorCountReg = cdb[4];
680 			passthru->bLbaLowReg = cdb[5];
681 			passthru->bLbaMidReg = cdb[6];
682 			passthru->bLbaHighReg = cdb[7];
683 			passthru->bDriveHeadReg = cdb[8];
684 			passthru->bCommandReg = cdb[9];
685 		}
686 
687 		if (cdb[1] & 0xe0) {
688 
689 
690 			if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI ||
691 				passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT ||
692 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI ||
693 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT ||
694 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT)
695 				) {
696 				goto error;
697 			}
698 		}
699 
700 
701 		if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER &&
702 			passthru->bCommandReg == ATA_CMD_SET_FEATURES) {
703 			goto error;
704 		}
705 
706 
707 		passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE;
708 		switch (prot) {
709 			default: /*None data*/
710 				break;
711 			case 4: /*PIO data in, T_DIR=1 match check*/
712 				if ((cdb[2] & 3) &&
713 					(cdb[2] & 0x8) == 0)
714 				{
715 					OsPrint(("PIO data in, T_DIR=1 match check"));
716 					goto error;
717 				}
718 				pCmd->flags.data_in = 1;
719 						break;
720 			case 5: /*PIO data out, T_DIR=0 match check*/
721 				if ((cdb[2] & 3) &&
722 					(cdb[2] & 0x8))
723 				{
724 					OsPrint(("PIO data out, T_DIR=0 match check"));
725 					goto error;
726 				}
727 
728 				pCmd->flags.data_out = 1;
729 				break;
730 		}
731 		pCmd->type = CMD_TYPE_PASSTHROUGH;
732 		pCmd->priv = ext = cmdext_get(vbus_ext);
733 		HPT_ASSERT(ext);
734 		ext->ccb = ccb;
735 		pCmd->target = vd;
736 		pCmd->done = os_cmddone;
737 		pCmd->buildsgl = os_buildsgl;
738 		pCmd->psg = ext->psg;
739 
740 		if(!ccb->csio.dxfer_len)
741 		{
742 			ldm_queue_cmd(pCmd);
743 			return;
744 		}
745 		pCmd->flags.physical_sg = 1;
746 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
747 					ext->dma_map, ccb,
748 					hpt_io_dmamap_callback, pCmd,
749 				    	BUS_DMA_WAITOK
750 					);
751 		KdPrint(("bus_dmamap_load return %d", error));
752 		if (error && error!=EINPROGRESS) {
753 			os_printk("bus_dmamap_load error %d", error);
754 			cmdext_put(ext);
755 			ldm_free_cmds(pCmd);
756 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
757 			xpt_done(ccb);
758 		}
759 		return;
760 error:
761 		ldm_free_cmds(pCmd);
762 		ccb->ccb_h.status = CAM_REQ_INVALID;
763 		break;
764 	}
765 
766 	case INQUIRY:
767 	{
768 		PINQUIRYDATA inquiryData;
769 		HIM_DEVICE_CONFIG devconf;
770 		HPT_U8 *rbuf;
771 
772 		memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
773 		inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
774 
775 		if (cdb[1] & 1) {
776 			rbuf = (HPT_U8 *)inquiryData;
777 			switch(cdb[2]) {
778 			case 0:
779 				rbuf[0] = 0;
780 				rbuf[1] = 0;
781 				rbuf[2] = 0;
782 				rbuf[3] = 3;
783 				rbuf[4] = 0;
784 				rbuf[5] = 0x80;
785 				rbuf[6] = 0x83;
786 				ccb->ccb_h.status = CAM_REQ_CMP;
787 				break;
788 			case 0x80: {
789 				rbuf[0] = 0;
790 				rbuf[1] = 0x80;
791 				rbuf[2] = 0;
792 				if (vd->type == VD_RAW) {
793 					rbuf[3] = 20;
794 					vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf);
795 					memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20);
796 					ldm_ide_fixstring(&rbuf[4], 20);
797 				} else {
798 					rbuf[3] = 1;
799 					rbuf[4] = 0x20;
800 				}
801 				ccb->ccb_h.status = CAM_REQ_CMP;
802 				break;
803 			}
804 			case 0x83:
805 				rbuf[0] = 0;
806 				rbuf[1] = 0x83;
807 				rbuf[2] = 0;
808 				rbuf[3] = 12;
809 				rbuf[4] = 1;
810 				rbuf[5] = 2;
811 				rbuf[6] = 0;
812 				rbuf[7] = 8;
813 				rbuf[8] = 0;
814 				rbuf[9] = 0x19;
815 				rbuf[10] = 0x3C;
816 				rbuf[11] = 0;
817 				rbuf[12] = 0;
818 				rbuf[13] = 0;
819 				rbuf[14] = 0;
820 				rbuf[15] = 0;
821 				ccb->ccb_h.status = CAM_REQ_CMP;
822 				break;
823 			default:
824 				ccb->ccb_h.status = CAM_REQ_INVALID;
825 				break;
826 			}
827 
828 			break;
829 		}
830 		else if (cdb[2]) {
831 			ccb->ccb_h.status = CAM_REQ_INVALID;
832 			break;
833 		}
834 
835 		inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/
836 		inquiryData->Versions = 5; /*SPC-3*/
837 		inquiryData->ResponseDataFormat = 2;
838 		inquiryData->AdditionalLength = 0x5b;
839 		inquiryData->CommandQueue = 1;
840 
841 		if (ccb->csio.dxfer_len > 63) {
842 			rbuf = (HPT_U8 *)inquiryData;
843 			rbuf[58] = 0x60;
844 			rbuf[59] = 0x3;
845 
846 			rbuf[64] = 0x3;
847 			rbuf[66] = 0x3;
848 			rbuf[67] = 0x20;
849 
850 		}
851 
852 		if (vd->type == VD_RAW) {
853 			vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf);
854 
855 			if ((devconf.pIdentifyData->GeneralConfiguration & 0x80))
856 				inquiryData->RemovableMedia = 1;
857 
858 
859 			memcpy(&inquiryData->VendorId, "ATA     ", 8);
860 			memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16);
861 			ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16);
862 			memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4);
863 			ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4);
864 			if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ')
865 				memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4);
866 		} else {
867 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
868 			snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d        ",
869 				os_get_vbus_seq(vbus_ext), vd->target_id);
870 			inquiryData->ProductId[15] = ' ';
871 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
872 		}
873 
874 		ccb->ccb_h.status = CAM_REQ_CMP;
875 		break;
876 	}
877 	case READ_CAPACITY:
878 	{
879 		HPT_U8 *rbuf = ccb->csio.data_ptr;
880 		HPT_U32 cap;
881 		HPT_U8 sector_size_shift = 0;
882 		HPT_U64 new_cap;
883 		HPT_U32 sector_size = 0;
884 
885 		if (mIsArray(vd->type))
886 			sector_size_shift = vd->u.array.sector_size_shift;
887 		else{
888 			if(vd->type == VD_RAW){
889 				sector_size = vd->u.raw.logical_sector_size;
890 			}
891 
892 			switch (sector_size) {
893 				case 0x1000:
894 					KdPrint(("set 4k setctor size in READ_CAPACITY"));
895 					sector_size_shift = 3;
896 					break;
897 				default:
898 					break;
899 			}
900 		}
901 		new_cap = vd->capacity >> sector_size_shift;
902 
903 		if (new_cap > 0xfffffffful)
904 			cap = 0xffffffff;
905 		else
906 			cap = new_cap - 1;
907 
908 		rbuf[0] = (HPT_U8)(cap>>24);
909 		rbuf[1] = (HPT_U8)(cap>>16);
910 		rbuf[2] = (HPT_U8)(cap>>8);
911 		rbuf[3] = (HPT_U8)cap;
912 		rbuf[4] = 0;
913 		rbuf[5] = 0;
914 		rbuf[6] = 2 << sector_size_shift;
915 		rbuf[7] = 0;
916 
917 		ccb->ccb_h.status = CAM_REQ_CMP;
918 		break;
919 	}
920 
921 	case REPORT_LUNS:
922 	{
923 		HPT_U8 *rbuf = ccb->csio.data_ptr;
924 		memset(rbuf, 0, 16);
925 		rbuf[3] = 8;
926 		ccb->ccb_h.status = CAM_REQ_CMP;
927 		break;
928 	}
929 	case SERVICE_ACTION_IN:
930 	{
931 		HPT_U8 *rbuf = ccb->csio.data_ptr;
932 		HPT_U64	cap = 0;
933 		HPT_U8 sector_size_shift = 0;
934 		HPT_U32 sector_size = 0;
935 
936 		if(mIsArray(vd->type))
937 			sector_size_shift = vd->u.array.sector_size_shift;
938 		else{
939 			if(vd->type == VD_RAW){
940 				sector_size = vd->u.raw.logical_sector_size;
941 			}
942 
943 			switch (sector_size) {
944 				case 0x1000:
945 					KdPrint(("set 4k setctor size in SERVICE_ACTION_IN"));
946 					sector_size_shift = 3;
947 					break;
948 				default:
949 					break;
950 			}
951 		}
952 		cap = (vd->capacity >> sector_size_shift) - 1;
953 
954 		rbuf[0] = (HPT_U8)(cap>>56);
955 		rbuf[1] = (HPT_U8)(cap>>48);
956 		rbuf[2] = (HPT_U8)(cap>>40);
957 		rbuf[3] = (HPT_U8)(cap>>32);
958 		rbuf[4] = (HPT_U8)(cap>>24);
959 		rbuf[5] = (HPT_U8)(cap>>16);
960 		rbuf[6] = (HPT_U8)(cap>>8);
961 		rbuf[7] = (HPT_U8)cap;
962 		rbuf[8] = 0;
963 		rbuf[9] = 0;
964 		rbuf[10] = 2 << sector_size_shift;
965 		rbuf[11] = 0;
966 
967 		if(!mIsArray(vd->type)){
968 			rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector;
969 			rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f);
970 			rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned);
971 		}
972 
973 		ccb->ccb_h.status = CAM_REQ_CMP;
974 		break;
975 	}
976 
977 	case READ_6:
978 	case READ_10:
979 	case READ_16:
980 	case WRITE_6:
981 	case WRITE_10:
982 	case WRITE_16:
983 	case 0x13:
984 	case 0x2f:
985 	case 0x8f: /* VERIFY_16 */
986 	{
987 		int error;
988 		HPT_U8 sector_size_shift = 0;
989 		HPT_U32 sector_size = 0;
990 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
991 		if(!pCmd){
992 			KdPrint(("Failed to allocate command!"));
993 			ccb->ccb_h.status = CAM_BUSY;
994 			break;
995 		}
996 
997 		switch (cdb[0])	{
998 		case READ_6:
999 		case WRITE_6:
1000 		case 0x13:
1001 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
1002 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
1003 			break;
1004 		case READ_16:
1005 		case WRITE_16:
1006 		case 0x8f: /* VERIFY_16 */
1007 		{
1008 			HPT_U64 block =
1009 				((HPT_U64)cdb[2]<<56) |
1010 				((HPT_U64)cdb[3]<<48) |
1011 				((HPT_U64)cdb[4]<<40) |
1012 				((HPT_U64)cdb[5]<<32) |
1013 				((HPT_U64)cdb[6]<<24) |
1014 				((HPT_U64)cdb[7]<<16) |
1015 				((HPT_U64)cdb[8]<<8) |
1016 				((HPT_U64)cdb[9]);
1017 			pCmd->uCmd.Ide.Lba = block;
1018 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
1019 			break;
1020 		}
1021 
1022 		default:
1023 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
1024 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
1025 			break;
1026 		}
1027 
1028 		if(mIsArray(vd->type)) {
1029 			sector_size_shift = vd->u.array.sector_size_shift;
1030 		}
1031 		else{
1032 			if(vd->type == VD_RAW){
1033 				sector_size = vd->u.raw.logical_sector_size;
1034 			}
1035 
1036 			switch (sector_size) {
1037 				case 0x1000:
1038 					KdPrint(("<8>resize sector size from 4k to 512"));
1039 					sector_size_shift = 3;
1040 					break;
1041 				default:
1042 					break;
1043 	 		}
1044 		}
1045 		pCmd->uCmd.Ide.Lba <<= sector_size_shift;
1046 		pCmd->uCmd.Ide.nSectors <<= sector_size_shift;
1047 
1048 
1049 		switch (cdb[0]) {
1050 		case READ_6:
1051 		case READ_10:
1052 		case READ_16:
1053 			pCmd->flags.data_in = 1;
1054 			break;
1055 		case WRITE_6:
1056 		case WRITE_10:
1057 		case WRITE_16:
1058 			pCmd->flags.data_out = 1;
1059 			break;
1060 		}
1061 		pCmd->priv = ext = cmdext_get(vbus_ext);
1062 		HPT_ASSERT(ext);
1063 		ext->ccb = ccb;
1064 		pCmd->target = vd;
1065 		pCmd->done = os_cmddone;
1066 		pCmd->buildsgl = os_buildsgl;
1067 		pCmd->psg = ext->psg;
1068 		pCmd->flags.physical_sg = 1;
1069 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
1070 					ext->dma_map, ccb,
1071 					hpt_io_dmamap_callback, pCmd,
1072 				    	BUS_DMA_WAITOK
1073 					);
1074 		KdPrint(("bus_dmamap_load return %d", error));
1075 		if (error && error!=EINPROGRESS) {
1076 			os_printk("bus_dmamap_load error %d", error);
1077 			cmdext_put(ext);
1078 			ldm_free_cmds(pCmd);
1079 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1080 			xpt_done(ccb);
1081 		}
1082 		return;
1083 	}
1084 
1085 	default:
1086 		ccb->ccb_h.status = CAM_REQ_INVALID;
1087 		break;
1088 	}
1089 
1090 	xpt_done(ccb);
1091 	return;
1092 }
1093 
1094 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
1095 {
1096 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
1097 
1098 	KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
1099 
1100 	hpt_assert_vbus_locked(vbus_ext);
1101 	switch (ccb->ccb_h.func_code) {
1102 
1103 	case XPT_SCSI_IO:
1104 		hpt_scsi_io(vbus_ext, ccb);
1105 		return;
1106 
1107 	case XPT_RESET_BUS:
1108 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
1109 		break;
1110 
1111 	case XPT_GET_TRAN_SETTINGS:
1112 	case XPT_SET_TRAN_SETTINGS:
1113 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1114 		break;
1115 
1116 	case XPT_CALC_GEOMETRY:
1117 		ccb->ccg.heads = 255;
1118 		ccb->ccg.secs_per_track = 63;
1119 		ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
1120 		ccb->ccb_h.status = CAM_REQ_CMP;
1121 		break;
1122 
1123 	case XPT_PATH_INQ:
1124 	{
1125 		struct ccb_pathinq *cpi = &ccb->cpi;
1126 
1127 		cpi->version_num = 1;
1128 		cpi->hba_inquiry = PI_SDTR_ABLE;
1129 		cpi->target_sprt = 0;
1130 		cpi->hba_misc = PIM_NOBUSRESET;
1131 		cpi->hba_eng_cnt = 0;
1132 		cpi->max_target = osm_max_targets;
1133 		cpi->max_lun = 0;
1134 		cpi->unit_number = cam_sim_unit(sim);
1135 		cpi->bus_id = cam_sim_bus(sim);
1136 		cpi->initiator_id = osm_max_targets;
1137 		cpi->base_transfer_speed = 3300;
1138 
1139 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1140 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
1141 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1142 		cpi->transport = XPORT_SPI;
1143 		cpi->transport_version = 2;
1144 		cpi->protocol = PROTO_SCSI;
1145 		cpi->protocol_version = SCSI_REV_2;
1146 		cpi->ccb_h.status = CAM_REQ_CMP;
1147 		break;
1148 	}
1149 
1150 	default:
1151 		ccb->ccb_h.status = CAM_REQ_INVALID;
1152 		break;
1153 	}
1154 
1155 	xpt_done(ccb);
1156 	return;
1157 }
1158 
1159 static void hpt_pci_intr(void *arg)
1160 {
1161 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
1162 	hpt_lock_vbus(vbus_ext);
1163 	ldm_intr((PVBUS)vbus_ext->vbus);
1164 	hpt_unlock_vbus(vbus_ext);
1165 }
1166 
1167 static void hpt_poll(struct cam_sim *sim)
1168 {
1169 	PVBUS_EXT vbus_ext = cam_sim_softc(sim);
1170 	hpt_assert_vbus_locked(vbus_ext);
1171 	ldm_intr((PVBUS)vbus_ext->vbus);
1172 }
1173 
1174 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
1175 {
1176 	KdPrint(("hpt_async"));
1177 }
1178 
1179 static int hpt_shutdown(device_t dev)
1180 {
1181 	KdPrint(("hpt_shutdown(dev=%p)", dev));
1182 	return 0;
1183 }
1184 
1185 static int hpt_detach(device_t dev)
1186 {
1187 	/* we don't allow the driver to be unloaded. */
1188 	return EBUSY;
1189 }
1190 
1191 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
1192 {
1193 	arg->ioctl_cmnd = 0;
1194 	wakeup(arg);
1195 }
1196 
1197 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
1198 {
1199 	ioctl_args->result = -1;
1200 	ioctl_args->done = hpt_ioctl_done;
1201 	ioctl_args->ioctl_cmnd = (void *)1;
1202 
1203 	hpt_lock_vbus(vbus_ext);
1204 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
1205 
1206 	while (ioctl_args->ioctl_cmnd) {
1207 		if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1208 			break;
1209 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
1210 		__hpt_do_tasks(vbus_ext);
1211 	}
1212 
1213 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
1214 
1215 	hpt_unlock_vbus(vbus_ext);
1216 }
1217 
1218 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
1219 {
1220 	PVBUS vbus;
1221 	PVBUS_EXT vbus_ext;
1222 
1223 	ldm_for_each_vbus(vbus, vbus_ext) {
1224 		__hpt_do_ioctl(vbus_ext, ioctl_args);
1225 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
1226 			return;
1227 	}
1228 }
1229 
1230 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
1231 	IOCTL_ARG arg;\
1232 	arg.dwIoControlCode = code;\
1233 	arg.lpInBuffer = inbuf;\
1234 	arg.lpOutBuffer = outbuf;\
1235 	arg.nInBufferSize = insize;\
1236 	arg.nOutBufferSize = outsize;\
1237 	arg.lpBytesReturned = 0;\
1238 	hpt_do_ioctl(&arg);\
1239 	arg.result;\
1240 })
1241 
1242 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
1243 
1244 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
1245 {
1246 	int i;
1247 	HPT_U32 count = nMaxCount-1;
1248 
1249 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
1250 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
1251 		return -1;
1252 
1253 	nMaxCount = (int)pIds[0];
1254 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
1255 	return nMaxCount;
1256 }
1257 
1258 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
1259 {
1260 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
1261 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
1262 }
1263 
1264 /* not belong to this file logically, but we want to use ioctl interface */
1265 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
1266 {
1267 	LOGICAL_DEVICE_INFO_V3 devinfo;
1268 	int i, result;
1269 	DEVICEID param[2] = { id, 0 };
1270 
1271 	if (hpt_get_device_info_v3(id, &devinfo))
1272 		return -1;
1273 
1274 	if (devinfo.Type!=LDT_ARRAY)
1275 		return -1;
1276 
1277 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
1278 		param[1] = AS_REBUILD_ABORT;
1279 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
1280 		param[1] = AS_VERIFY_ABORT;
1281 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
1282 		param[1] = AS_INITIALIZE_ABORT;
1283 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
1284 		param[1] = AS_TRANSFORM_ABORT;
1285 	else
1286 		return -1;
1287 
1288 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
1289 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
1290 				param, sizeof(param), 0, 0);
1291 
1292 	for (i=0; i<devinfo.u.array.nDisk; i++)
1293 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
1294 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
1295 
1296 	return result;
1297 }
1298 
1299 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
1300 {
1301 	DEVICEID ids[32];
1302 	int i, count;
1303 
1304 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
1305 
1306 	for (i=0; i<count; i++)
1307 		__hpt_stop_tasks(vbus_ext, ids[i]);
1308 }
1309 
1310 static	d_open_t	hpt_open;
1311 static	d_close_t	hpt_close;
1312 static	d_ioctl_t	hpt_ioctl;
1313 static  int 		hpt_rescan_bus(void);
1314 
1315 static struct cdevsw hpt_cdevsw = {
1316 	.d_open =	hpt_open,
1317 	.d_close =	hpt_close,
1318 	.d_ioctl =	hpt_ioctl,
1319 	.d_name =	driver_name,
1320 	.d_version =	D_VERSION,
1321 };
1322 
1323 static struct intr_config_hook hpt_ich;
1324 
1325 /*
1326  * hpt_final_init will be called after all hpt_attach.
1327  */
1328 static void hpt_final_init(void *dummy)
1329 {
1330 	int       i,unit_number=0;
1331 	PVBUS_EXT vbus_ext;
1332 	PVBUS vbus;
1333 	PHBA hba;
1334 
1335 	/* Clear the config hook */
1336 	config_intrhook_disestablish(&hpt_ich);
1337 
1338 	/* allocate memory */
1339 	i = 0;
1340 	ldm_for_each_vbus(vbus, vbus_ext) {
1341 		if (hpt_alloc_mem(vbus_ext)) {
1342 			os_printk("out of memory");
1343 			return;
1344 		}
1345 		i++;
1346 	}
1347 
1348 	if (!i) {
1349 		if (bootverbose)
1350 			os_printk("no controller detected.");
1351 		return;
1352 	}
1353 
1354 	/* initializing hardware */
1355 	ldm_for_each_vbus(vbus, vbus_ext) {
1356 		/* make timer available here */
1357 		mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1358 		callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1359 		if (hpt_init_vbus(vbus_ext)) {
1360 			os_printk("fail to initialize hardware");
1361 			break; /* FIXME */
1362 		}
1363 	}
1364 
1365 	/* register CAM interface */
1366 	ldm_for_each_vbus(vbus, vbus_ext) {
1367 		struct cam_devq *devq;
1368 		struct ccb_setasync	ccb;
1369 
1370 		if (bus_dma_tag_create(NULL,/* parent */
1371 				4,	/* alignment */
1372 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1373 				BUS_SPACE_MAXADDR,	/* lowaddr */
1374 				BUS_SPACE_MAXADDR, 	/* highaddr */
1375 				NULL, NULL, 		/* filter, filterarg */
1376 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1377 				os_max_sg_descriptors,	/* nsegments */
1378 				0x10000,	/* maxsegsize */
1379 				BUS_DMA_WAITOK,		/* flags */
1380 				busdma_lock_mutex,	/* lockfunc */
1381 				&vbus_ext->lock,		/* lockfuncarg */
1382 				&vbus_ext->io_dmat	/* tag */))
1383 		{
1384 			return ;
1385 		}
1386 
1387 		for (i=0; i<os_max_queue_comm; i++) {
1388 			POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1389 			if (!ext) {
1390 				os_printk("Can't alloc cmdext(%d)", i);
1391 				return ;
1392 			}
1393 			ext->vbus_ext = vbus_ext;
1394 			ext->next = vbus_ext->cmdext_list;
1395 			vbus_ext->cmdext_list = ext;
1396 
1397 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1398 				os_printk("Can't create dma map(%d)", i);
1399 				return ;
1400 			}
1401 			callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1402 		}
1403 
1404 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1405 			os_printk("cam_simq_alloc failed");
1406 			return ;
1407 		}
1408 
1409 		hpt_lock_vbus(vbus_ext);
1410 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1411 				vbus_ext, unit_number, &vbus_ext->lock,
1412 				os_max_queue_comm, /*tagged*/8,  devq);
1413 		unit_number++;
1414 		if (!vbus_ext->sim) {
1415 			os_printk("cam_sim_alloc failed");
1416 			cam_simq_free(devq);
1417 			hpt_unlock_vbus(vbus_ext);
1418 			return ;
1419 		}
1420 
1421 		if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1422 			os_printk("xpt_bus_register failed");
1423 			cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1424 			vbus_ext->sim = NULL;
1425 			return ;
1426 		}
1427 
1428 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1429 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1430 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1431 		{
1432 			os_printk("xpt_create_path failed");
1433 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1434 			cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1435 			hpt_unlock_vbus(vbus_ext);
1436 			vbus_ext->sim = NULL;
1437 			return ;
1438 		}
1439 		hpt_unlock_vbus(vbus_ext);
1440 
1441 		memset(&ccb, 0, sizeof(ccb));
1442 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1443 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1444 		ccb.event_enable = AC_LOST_DEVICE;
1445 		ccb.callback = hpt_async;
1446 		ccb.callback_arg = vbus_ext;
1447 		xpt_action((union ccb *)&ccb);
1448 
1449 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1450 			int rid = 0;
1451 			if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1452 				SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1453 			{
1454 				os_printk("can't allocate interrupt");
1455 				return ;
1456 			}
1457 
1458 			if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1459 				NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1460 			{
1461 				os_printk("can't set up interrupt");
1462 				return ;
1463 			}
1464 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1465 
1466 		}
1467 
1468 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1469 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1470 		if (!vbus_ext->shutdown_eh)
1471 			os_printk("Shutdown event registration failed");
1472 	}
1473 
1474 	ldm_for_each_vbus(vbus, vbus_ext) {
1475 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1476 		if (vbus_ext->tasks)
1477 			TASK_ENQUEUE(&vbus_ext->worker);
1478 	}
1479 
1480 	make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1481 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1482 }
1483 
1484 #if defined(KLD_MODULE)
1485 
1486 typedef struct driverlink *driverlink_t;
1487 struct driverlink {
1488 	kobj_class_t	driver;
1489 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1490 };
1491 
1492 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1493 
1494 struct devclass {
1495 	TAILQ_ENTRY(devclass) link;
1496 	devclass_t	parent;		/* parent in devclass hierarchy */
1497 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1498 	char		*name;
1499 	device_t	*devices;	/* array of devices indexed by unit */
1500 	int		maxunit;	/* size of devices array */
1501 };
1502 
1503 static void override_kernel_driver(void)
1504 {
1505 	driverlink_t dl, dlfirst;
1506 	driver_t *tmpdriver;
1507 	devclass_t dc = devclass_find("pci");
1508 
1509 	if (dc){
1510 		dlfirst = TAILQ_FIRST(&dc->drivers);
1511 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1512 			if(strcmp(dl->driver->name, driver_name) == 0) {
1513 				tmpdriver=dl->driver;
1514 				dl->driver=dlfirst->driver;
1515 				dlfirst->driver=tmpdriver;
1516 				break;
1517 			}
1518 		}
1519 	}
1520 }
1521 
1522 #else
1523 #define override_kernel_driver()
1524 #endif
1525 
1526 static void hpt_init(void *dummy)
1527 {
1528 	if (bootverbose)
1529 		os_printk("%s %s", driver_name_long, driver_ver);
1530 
1531 	override_kernel_driver();
1532 	init_config();
1533 
1534 	hpt_ich.ich_func = hpt_final_init;
1535 	hpt_ich.ich_arg = NULL;
1536 	if (config_intrhook_establish(&hpt_ich) != 0) {
1537 		printf("%s: cannot establish configuration hook\n",
1538 		    driver_name_long);
1539 	}
1540 
1541 }
1542 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1543 
1544 /*
1545  * CAM driver interface
1546  */
1547 static device_method_t driver_methods[] = {
1548 	/* Device interface */
1549 	DEVMETHOD(device_probe,		hpt_probe),
1550 	DEVMETHOD(device_attach,	hpt_attach),
1551 	DEVMETHOD(device_detach,	hpt_detach),
1552 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1553 	{ 0, 0 }
1554 };
1555 
1556 static driver_t hpt_pci_driver = {
1557 	driver_name,
1558 	driver_methods,
1559 	sizeof(HBA)
1560 };
1561 
1562 #ifndef TARGETNAME
1563 #error "no TARGETNAME found"
1564 #endif
1565 
1566 /* use this to make TARGETNAME be expanded */
1567 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
1568 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1569 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1570 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
1571 __MODULE_VERSION(TARGETNAME, 1);
1572 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1573 
1574 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1575 {
1576 	return 0;
1577 }
1578 
1579 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1580 {
1581 	return 0;
1582 }
1583 
1584 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1585 {
1586 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1587 	IOCTL_ARG ioctl_args;
1588 	HPT_U32 bytesReturned = 0;
1589 
1590 	switch (cmd){
1591 	case HPT_DO_IOCONTROL:
1592 	{
1593 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1594 			KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n",
1595 				piop->dwIoControlCode,
1596 				piop->lpInBuffer,
1597 				piop->nInBufferSize,
1598 				piop->lpOutBuffer,
1599 				piop->nOutBufferSize));
1600 
1601 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1602 
1603 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1604 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1605 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1606 		ioctl_args.lpBytesReturned = &bytesReturned;
1607 
1608 		if (ioctl_args.nInBufferSize) {
1609 			ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1610 			if (!ioctl_args.lpInBuffer)
1611 				goto invalid;
1612 			if (copyin((void*)piop->lpInBuffer,
1613 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1614 				goto invalid;
1615 		}
1616 
1617 		if (ioctl_args.nOutBufferSize) {
1618 			ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
1619 			if (!ioctl_args.lpOutBuffer)
1620 				goto invalid;
1621 		}
1622 
1623 		hpt_do_ioctl(&ioctl_args);
1624 
1625 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1626 			if (piop->nOutBufferSize) {
1627 				if (copyout(ioctl_args.lpOutBuffer,
1628 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1629 					goto invalid;
1630 			}
1631 			if (piop->lpBytesReturned) {
1632 				if (copyout(&bytesReturned,
1633 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1634 					goto invalid;
1635 			}
1636 			if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1637 			if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1638 			return 0;
1639 		}
1640 invalid:
1641 		if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1642 		if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1643 		return EFAULT;
1644 	}
1645 	return EFAULT;
1646 	}
1647 
1648 	case HPT_SCAN_BUS:
1649 	{
1650 		return hpt_rescan_bus();
1651 	}
1652 	default:
1653 		KdPrint(("invalid command!"));
1654 		return EFAULT;
1655 	}
1656 
1657 }
1658 
1659 static int	hpt_rescan_bus(void)
1660 {
1661 	union ccb			*ccb;
1662 	PVBUS 				vbus;
1663 	PVBUS_EXT			vbus_ext;
1664 
1665 	ldm_for_each_vbus(vbus, vbus_ext) {
1666 		if ((ccb = xpt_alloc_ccb()) == NULL)
1667 		{
1668 			return(ENOMEM);
1669 		}
1670 		if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim),
1671 			CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1672 		{
1673 			xpt_free_ccb(ccb);
1674 			return(EIO);
1675 		}
1676 		xpt_rescan(ccb);
1677 	}
1678 	return(0);
1679 }
1680