xref: /freebsd/sys/dev/hptnr/hptnr_osm_bsd.c (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1 /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause
4  *
5  * HighPoint RAID Driver for FreeBSD
6  * Copyright (C) 2005-2011 HighPoint Technologies, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 #include <dev/hptnr/hptnr_config.h>
31 #include <dev/hptnr/os_bsd.h>
32 #include <dev/hptnr/hptintf.h>
33 int msi = 0;
34 int debug_flag = 0;
35 static HIM *hpt_match(device_t dev, int scan)
36 {
37 	PCI_ID pci_id;
38 	HIM *him;
39 	int i;
40 
41 	for (him = him_list; him; him = him->next) {
42 		for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
43 			if (scan && him->get_controller_count)
44 				him->get_controller_count(&pci_id,0,0);
45 			if ((pci_get_vendor(dev) == pci_id.vid) &&
46 				(pci_get_device(dev) == pci_id.did)){
47 				return (him);
48 			}
49 		}
50 	}
51 
52 	return (NULL);
53 }
54 
55 static int hpt_probe(device_t dev)
56 {
57 	HIM *him;
58 
59 	him = hpt_match(dev, 0);
60 	if (him != NULL) {
61 		KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
62 			pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
63 			));
64 		device_set_desc(dev, him->name);
65 		return (BUS_PROBE_DEFAULT);
66 	}
67 
68 	return (ENXIO);
69 }
70 
71 static int hpt_attach(device_t dev)
72 {
73 	PHBA hba = (PHBA)device_get_softc(dev);
74 	HIM *him;
75 	PCI_ID pci_id;
76 	HPT_UINT size;
77 	PVBUS vbus;
78 	PVBUS_EXT vbus_ext;
79 
80 	if (pci_get_domain(dev) != 0) {
81 		device_printf(dev, "does not support PCI domains\n");
82 		return (ENXIO);
83 	}
84 
85 	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
86 
87 	him = hpt_match(dev, 1);
88 	hba->ext_type = EXT_TYPE_HBA;
89 	hba->ldm_adapter.him = him;
90 
91 	pci_enable_busmaster(dev);
92 
93 	pci_id.vid = pci_get_vendor(dev);
94 	pci_id.did = pci_get_device(dev);
95 	pci_id.rev = pci_get_revid(dev);
96 	pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
97 
98 	size = him->get_adapter_size(&pci_id);
99 	hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
100 
101 	hba->pcidev = dev;
102 	hba->pciaddr.tree = 0;
103 	hba->pciaddr.bus = pci_get_bus(dev);
104 	hba->pciaddr.device = pci_get_slot(dev);
105 	hba->pciaddr.function = pci_get_function(dev);
106 
107 	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
108 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
109 		return ENXIO;
110 	}
111 
112 	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
113 		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
114 
115 	if (!ldm_register_adapter(&hba->ldm_adapter)) {
116 		size = ldm_get_vbus_size();
117 		vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK |
118 			M_ZERO);
119 		vbus_ext->ext_type = EXT_TYPE_VBUS;
120 		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
121 		ldm_register_adapter(&hba->ldm_adapter);
122 	}
123 
124 	ldm_for_each_vbus(vbus, vbus_ext) {
125 		if (hba->ldm_adapter.vbus==vbus) {
126 			hba->vbus_ext = vbus_ext;
127 			hba->next = vbus_ext->hba_list;
128 			vbus_ext->hba_list = hba;
129 			break;
130 		}
131 	}
132 	return 0;
133 }
134 
135 /*
136  * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
137  * but there are some problems currently (alignment, etc).
138  */
139 static __inline void *__get_free_pages(int order)
140 {
141 	/* don't use low memory - other devices may get starved */
142 	return contigmalloc(PAGE_SIZE<<order,
143 			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
144 }
145 
146 static __inline void free_pages(void *p)
147 {
148 	free(p, M_DEVBUF);
149 }
150 
151 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
152 {
153 	PHBA hba;
154 	struct freelist *f;
155 	HPT_UINT i;
156 	void **p;
157 
158 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
159 		hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
160 
161 	ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
162 
163 	for (f=vbus_ext->freelist_head; f; f=f->next) {
164 		KdPrint(("%s: %d*%d=%d bytes",
165 			f->tag, f->count, f->size, f->count*f->size));
166 		for (i=0; i<f->count; i++) {
167 			p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
168 			*p = f->head;
169 			f->head = p;
170 		}
171 	}
172 
173 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
174 		int order, size, j;
175 
176 		HPT_ASSERT((f->size & (f->alignment-1))==0);
177 
178 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
179 			;
180 
181 		KdPrint(("%s: %d*%d=%d bytes, order %d",
182 			f->tag, f->count, f->size, f->count*f->size, order));
183 		HPT_ASSERT(f->alignment<=PAGE_SIZE);
184 
185 		for (i=0; i<f->count;) {
186 			p = (void **)__get_free_pages(order);
187 			if (!p) return -1;
188 			for (j = size/f->size; j && i<f->count; i++,j--) {
189 				*p = f->head;
190 				*(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
191 				f->head = p;
192 				p = (void **)((unsigned long)p + f->size);
193 			}
194 		}
195 	}
196 
197 	HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
198 
199 	for (i=0; i<os_max_cache_pages; i++) {
200 		p = (void **)__get_free_pages(0);
201 		if (!p) return -1;
202 		HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
203 		dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
204 	}
205 
206 	return 0;
207 }
208 
209 static void hpt_free_mem(PVBUS_EXT vbus_ext)
210 {
211 	struct freelist *f;
212 	void *p;
213 	int i;
214 	BUS_ADDRESS bus;
215 
216 	for (f=vbus_ext->freelist_head; f; f=f->next) {
217 #if DBG
218 		if (f->count!=f->reserved_count) {
219 			KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
220 		}
221 #endif
222 		while ((p=freelist_get(f)))
223 			free(p, M_DEVBUF);
224 	}
225 
226 	for (i=0; i<os_max_cache_pages; i++) {
227 		p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
228 		HPT_ASSERT(p);
229 		free_pages(p);
230 	}
231 
232 	for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
233 		int order, size;
234 #if DBG
235 		if (f->count!=f->reserved_count) {
236 			KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
237 		}
238 #endif
239 		for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
240 
241 		while ((p=freelist_get_dma(f, &bus))) {
242 			if (order)
243 				free_pages(p);
244 			else {
245 			/* can't free immediately since other blocks in this page may still be in the list */
246 				if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
247 					dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
248 			}
249 		}
250 	}
251 
252 	while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
253 		free_pages(p);
254 }
255 
256 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
257 {
258 	PHBA hba;
259 
260 	for (hba = vbus_ext->hba_list; hba; hba = hba->next)
261 		if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
262 			KdPrint(("fail to initialize %p", hba));
263 			return -1;
264 		}
265 
266 	ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
267 	return 0;
268 }
269 
270 static void hpt_flush_done(PCOMMAND pCmd)
271 {
272 	PVDEV vd = pCmd->target;
273 
274 	if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
275 		vd = vd->u.array.transform->target;
276 		HPT_ASSERT(vd);
277 		pCmd->target = vd;
278 		pCmd->Result = RETURN_PENDING;
279 		vdev_queue_cmd(pCmd);
280 		return;
281 	}
282 
283 	*(int *)pCmd->priv = 1;
284 	wakeup(pCmd);
285 }
286 
287 /*
288  * flush a vdev (without retry).
289  */
290 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
291 {
292 	PCOMMAND pCmd;
293 	int result = 0, done;
294 	HPT_UINT count;
295 
296 	KdPrint(("flusing dev %p", vd));
297 
298 	hpt_assert_vbus_locked(vbus_ext);
299 
300 	if (mIsArray(vd->type) && vd->u.array.transform)
301 		count = max(vd->u.array.transform->source->cmds_per_request,
302 					vd->u.array.transform->target->cmds_per_request);
303 	else
304 		count = vd->cmds_per_request;
305 
306 	pCmd = ldm_alloc_cmds(vd->vbus, count);
307 
308 	if (!pCmd) {
309 		return -1;
310 	}
311 
312 	pCmd->type = CMD_TYPE_FLUSH;
313 	pCmd->flags.hard_flush = 1;
314 	pCmd->target = vd;
315 	pCmd->done = hpt_flush_done;
316 	done = 0;
317 	pCmd->priv = &done;
318 
319 	ldm_queue_cmd(pCmd);
320 
321 	if (!done) {
322 		while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
323 			ldm_reset_vbus(vd->vbus);
324 		}
325 	}
326 
327 	KdPrint(("flush result %d", pCmd->Result));
328 
329 	if (pCmd->Result!=RETURN_SUCCESS)
330 		result = -1;
331 
332 	ldm_free_cmds(pCmd);
333 
334 	return result;
335 }
336 
337 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
338 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
339 {
340 	PVBUS     vbus = (PVBUS)vbus_ext->vbus;
341 	PHBA hba;
342 	int i;
343 
344 	KdPrint(("hpt_shutdown_vbus"));
345 
346 	/* stop all ctl tasks and disable the worker taskqueue */
347 	hpt_stop_tasks(vbus_ext);
348 	hpt_lock_vbus(vbus_ext);
349 	vbus_ext->worker.ta_context = 0;
350 
351 	/* flush devices */
352 	for (i=0; i<osm_max_targets; i++) {
353 		PVDEV vd = ldm_find_target(vbus, i);
354 		if (vd) {
355 			/* retry once */
356 			if (hpt_flush_vdev(vbus_ext, vd))
357 				hpt_flush_vdev(vbus_ext, vd);
358 		}
359 	}
360 
361 	ldm_shutdown(vbus);
362 	hpt_unlock_vbus(vbus_ext);
363 
364 	ldm_release_vbus(vbus);
365 
366 	for (hba=vbus_ext->hba_list; hba; hba=hba->next)
367 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
368 
369 	hpt_free_mem(vbus_ext);
370 
371 	while ((hba=vbus_ext->hba_list)) {
372 		vbus_ext->hba_list = hba->next;
373 		free(hba->ldm_adapter.him_handle, M_DEVBUF);
374 	}
375 
376 	callout_drain(&vbus_ext->timer);
377 	mtx_destroy(&vbus_ext->lock);
378 	free(vbus_ext, M_DEVBUF);
379 	KdPrint(("hpt_shutdown_vbus done"));
380 }
381 
382 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
383 {
384 	OSM_TASK *tasks;
385 
386 	tasks = vbus_ext->tasks;
387 	vbus_ext->tasks = 0;
388 
389 	while (tasks) {
390 		OSM_TASK *t = tasks;
391 		tasks = t->next;
392 		t->next = 0;
393 		t->func(vbus_ext->vbus, t->data);
394 	}
395 }
396 
397 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
398 {
399 	if(vbus_ext){
400 		hpt_lock_vbus(vbus_ext);
401 		__hpt_do_tasks(vbus_ext);
402 		hpt_unlock_vbus(vbus_ext);
403 	}
404 }
405 
406 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
407 static void hpt_poll(struct cam_sim *sim);
408 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
409 static void hpt_pci_intr(void *arg);
410 
411 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
412 {
413 	POS_CMDEXT p = vbus_ext->cmdext_list;
414 	if (p)
415 		vbus_ext->cmdext_list = p->next;
416 	return p;
417 }
418 
419 static __inline void cmdext_put(POS_CMDEXT p)
420 {
421 	p->next = p->vbus_ext->cmdext_list;
422 	p->vbus_ext->cmdext_list = p;
423 }
424 
425 static void hpt_timeout(void *arg)
426 {
427 	PCOMMAND pCmd = (PCOMMAND)arg;
428 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
429 
430 	KdPrint(("pCmd %p timeout", pCmd));
431 
432 	ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
433 }
434 
435 static void os_cmddone(PCOMMAND pCmd)
436 {
437 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
438 	union ccb *ccb = ext->ccb;
439 	HPT_U8 *cdb;
440 
441 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
442 		cdb = ccb->csio.cdb_io.cdb_ptr;
443 	else
444 		cdb = ccb->csio.cdb_io.cdb_bytes;
445 
446 	KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result));
447 
448 	callout_stop(&ext->timeout);
449 	switch(cdb[0]) {
450 		case 0x85: /*ATA_16*/
451 		case 0xA1: /*ATA_12*/
452 		{
453 			PassthroughCmd *passthru = &pCmd->uCmd.Passthrough;
454 			HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data;
455 			memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data));
456 
457 			sense_buffer[0] = 0x72; /* Response Code */
458 			sense_buffer[7] = 14; /* Additional Sense Length */
459 
460 			sense_buffer[8] = 0x9; /* ATA Return Descriptor */
461 			sense_buffer[9] = 0xc; /* Additional Descriptor Length */
462 			sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */
463 			sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg;  /* Sector Count (7:0) */
464 			sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */
465 			sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */
466 			sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */
467 
468 			if ((cdb[0] == 0x85) && (cdb[1] & 0x1))
469 			{
470 				sense_buffer[10] = 1;
471 				sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */
472 				sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8);	/* LBA Low (15:8) */
473 				sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */
474 				sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */
475 			}
476 
477 			sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */
478 			sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */
479 			KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x",
480 					 passthru->bCommandReg,
481 					 passthru->bFeaturesReg,
482 					 passthru->bLbaLowReg,
483 					 passthru->bLbaMidReg,
484 					 passthru->bLbaHighReg,
485 					 passthru->bDriveHeadReg,
486 					 passthru->bSectorCountReg));
487 			KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ",
488 				pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg,
489 				passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg));
490 		}
491 		default:
492 			break;
493 	}
494 
495 	switch(pCmd->Result) {
496 	case RETURN_SUCCESS:
497 		ccb->ccb_h.status = CAM_REQ_CMP;
498 		break;
499 	case RETURN_BAD_DEVICE:
500 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
501 		break;
502 	case RETURN_DEVICE_BUSY:
503 		ccb->ccb_h.status = CAM_BUSY;
504 		break;
505 	case RETURN_INVALID_REQUEST:
506 		ccb->ccb_h.status = CAM_REQ_INVALID;
507 		break;
508 	case RETURN_SELECTION_TIMEOUT:
509 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
510 		break;
511 	case RETURN_RETRY:
512 		ccb->ccb_h.status = CAM_BUSY;
513 		break;
514 	default:
515 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
516 		break;
517 	}
518 
519 	if (pCmd->flags.data_in) {
520 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
521 	}
522 	else if (pCmd->flags.data_out) {
523 		bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
524 	}
525 
526 	bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
527 
528 	cmdext_put(ext);
529 	ldm_free_cmds(pCmd);
530 	xpt_done(ccb);
531 }
532 
533 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
534 {
535 	/* since we have provided physical sg, nobody will ask us to build physical sg */
536 	HPT_ASSERT(0);
537 	return FALSE;
538 }
539 
540 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
541 {
542 	PCOMMAND pCmd = (PCOMMAND)arg;
543 	POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
544 	PSG psg = pCmd->psg;
545 	int idx;
546 
547 	HPT_ASSERT(pCmd->flags.physical_sg);
548 
549 	if (error)
550 		panic("busdma error");
551 
552 	HPT_ASSERT(nsegs<=os_max_sg_descriptors);
553 
554 	if (nsegs != 0) {
555 		for (idx = 0; idx < nsegs; idx++, psg++) {
556 			psg->addr.bus = segs[idx].ds_addr;
557 			psg->size = segs[idx].ds_len;
558 			psg->eot = 0;
559 		}
560 		psg[-1].eot = 1;
561 
562 		if (pCmd->flags.data_in) {
563 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
564 			    BUS_DMASYNC_PREREAD);
565 		}
566 		else if (pCmd->flags.data_out) {
567 			bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
568 			    BUS_DMASYNC_PREWRITE);
569 		}
570 	}
571 
572 	callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
573 	ldm_queue_cmd(pCmd);
574 }
575 
576 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
577 {
578 	PVBUS vbus = (PVBUS)vbus_ext->vbus;
579 	PVDEV vd;
580 	PCOMMAND pCmd;
581 	POS_CMDEXT ext;
582 	HPT_U8 *cdb;
583 
584 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
585 		cdb = ccb->csio.cdb_io.cdb_ptr;
586 	else
587 		cdb = ccb->csio.cdb_io.cdb_bytes;
588 
589 	KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
590 		ccb,
591 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
592 		*(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
593 	));
594 
595 	/* ccb->ccb_h.path_id is not our bus id - don't check it */
596 	if (ccb->ccb_h.target_lun != 0 ||
597 		ccb->ccb_h.target_id >= osm_max_targets ||
598 		(ccb->ccb_h.flags & CAM_CDB_PHYS))
599 	{
600 		ccb->ccb_h.status = CAM_TID_INVALID;
601 		xpt_done(ccb);
602 		return;
603 	}
604 
605 	vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
606 
607 	if (!vd) {
608 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
609 		xpt_done(ccb);
610 		return;
611 	}
612 
613 	switch (cdb[0]) {
614 	case TEST_UNIT_READY:
615 	case START_STOP_UNIT:
616 	case SYNCHRONIZE_CACHE:
617 		ccb->ccb_h.status = CAM_REQ_CMP;
618 		break;
619 
620 	case 0x85: /*ATA_16*/
621 	case 0xA1: /*ATA_12*/
622 	{
623 		int error;
624 		HPT_U8 prot;
625 		PassthroughCmd *passthru;
626 
627 		if (mIsArray(vd->type)) {
628 			ccb->ccb_h.status = CAM_REQ_INVALID;
629 			break;
630 		}
631 
632 		HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk);
633 
634 		prot = (cdb[1] & 0x1e) >> 1;
635 
636 
637 		if (prot < 3 || prot > 5)
638 		{
639 			ccb->ccb_h.status = CAM_REQ_INVALID;
640 			break;
641 		}
642 
643 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
644 		if (!pCmd) {
645 			HPT_ASSERT(0);
646 			ccb->ccb_h.status = CAM_BUSY;
647 			break;
648 		}
649 
650 		passthru = &pCmd->uCmd.Passthrough;
651 		if (cdb[0] == 0x85/*ATA_16*/) {
652 			if (cdb[1] & 0x1) {
653 				passthru->bFeaturesReg =
654 					((HPT_U16)cdb[3] << 8)
655 						| cdb[4];
656 				passthru->bSectorCountReg =
657 					((HPT_U16)cdb[5] << 8) |
658 						cdb[6];
659 				passthru->bLbaLowReg =
660 					((HPT_U16)cdb[7] << 8) |
661 						cdb[8];
662 				passthru->bLbaMidReg =
663 					((HPT_U16)cdb[9] << 8) |
664 						cdb[10];
665 				passthru->bLbaHighReg =
666 					((HPT_U16)cdb[11] << 8) |
667 						cdb[12];
668 			} else {
669 				passthru->bFeaturesReg = cdb[4];
670 				passthru->bSectorCountReg = cdb[6];
671 				passthru->bLbaLowReg = cdb[8];
672 				passthru->bLbaMidReg = cdb[10];
673 				passthru->bLbaHighReg = cdb[12];
674 			}
675 			passthru->bDriveHeadReg = cdb[13];
676 			passthru->bCommandReg = cdb[14];
677 
678 		} else { /*ATA_12*/
679 
680 			passthru->bFeaturesReg = cdb[3];
681 			passthru->bSectorCountReg = cdb[4];
682 			passthru->bLbaLowReg = cdb[5];
683 			passthru->bLbaMidReg = cdb[6];
684 			passthru->bLbaHighReg = cdb[7];
685 			passthru->bDriveHeadReg = cdb[8];
686 			passthru->bCommandReg = cdb[9];
687 		}
688 
689 		if (cdb[1] & 0xe0) {
690 
691 
692 			if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI ||
693 				passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT ||
694 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI ||
695 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT ||
696 				passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT)
697 				) {
698 				goto error;
699 			}
700 		}
701 
702 
703 		if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER &&
704 			passthru->bCommandReg == ATA_CMD_SET_FEATURES) {
705 			goto error;
706 		}
707 
708 
709 		passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE;
710 		switch (prot) {
711 			default: /*None data*/
712 				break;
713 			case 4: /*PIO data in, T_DIR=1 match check*/
714 				if ((cdb[2] & 3) &&
715 					(cdb[2] & 0x8) == 0)
716 				{
717 					OsPrint(("PIO data in, T_DIR=1 match check"));
718 					goto error;
719 				}
720 				pCmd->flags.data_in = 1;
721 						break;
722 			case 5: /*PIO data out, T_DIR=0 match check*/
723 				if ((cdb[2] & 3) &&
724 					(cdb[2] & 0x8))
725 				{
726 					OsPrint(("PIO data out, T_DIR=0 match check"));
727 					goto error;
728 				}
729 
730 				pCmd->flags.data_out = 1;
731 				break;
732 		}
733 		pCmd->type = CMD_TYPE_PASSTHROUGH;
734 		pCmd->priv = ext = cmdext_get(vbus_ext);
735 		HPT_ASSERT(ext);
736 		ext->ccb = ccb;
737 		pCmd->target = vd;
738 		pCmd->done = os_cmddone;
739 		pCmd->buildsgl = os_buildsgl;
740 		pCmd->psg = ext->psg;
741 
742 		if(!ccb->csio.dxfer_len)
743 		{
744 			ldm_queue_cmd(pCmd);
745 			return;
746 		}
747 		pCmd->flags.physical_sg = 1;
748 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
749 					ext->dma_map, ccb,
750 					hpt_io_dmamap_callback, pCmd,
751 				    	BUS_DMA_WAITOK
752 					);
753 		KdPrint(("bus_dmamap_load return %d", error));
754 		if (error && error!=EINPROGRESS) {
755 			os_printk("bus_dmamap_load error %d", error);
756 			cmdext_put(ext);
757 			ldm_free_cmds(pCmd);
758 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
759 			xpt_done(ccb);
760 		}
761 		return;
762 error:
763 		ldm_free_cmds(pCmd);
764 		ccb->ccb_h.status = CAM_REQ_INVALID;
765 		break;
766 	}
767 
768 	case INQUIRY:
769 	{
770 		PINQUIRYDATA inquiryData;
771 		HIM_DEVICE_CONFIG devconf;
772 		HPT_U8 *rbuf;
773 
774 		memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
775 		inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
776 
777 		if (cdb[1] & 1) {
778 			rbuf = (HPT_U8 *)inquiryData;
779 			switch(cdb[2]) {
780 			case 0:
781 				rbuf[0] = 0;
782 				rbuf[1] = 0;
783 				rbuf[2] = 0;
784 				rbuf[3] = 3;
785 				rbuf[4] = 0;
786 				rbuf[5] = 0x80;
787 				rbuf[6] = 0x83;
788 				ccb->ccb_h.status = CAM_REQ_CMP;
789 				break;
790 			case 0x80: {
791 				rbuf[0] = 0;
792 				rbuf[1] = 0x80;
793 				rbuf[2] = 0;
794 				if (vd->type == VD_RAW) {
795 					rbuf[3] = 20;
796 					vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf);
797 					memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20);
798 					ldm_ide_fixstring(&rbuf[4], 20);
799 				} else {
800 					rbuf[3] = 1;
801 					rbuf[4] = 0x20;
802 				}
803 				ccb->ccb_h.status = CAM_REQ_CMP;
804 				break;
805 			}
806 			case 0x83:
807 				rbuf[0] = 0;
808 				rbuf[1] = 0x83;
809 				rbuf[2] = 0;
810 				rbuf[3] = 12;
811 				rbuf[4] = 1;
812 				rbuf[5] = 2;
813 				rbuf[6] = 0;
814 				rbuf[7] = 8;
815 				rbuf[8] = 0;
816 				rbuf[9] = 0x19;
817 				rbuf[10] = 0x3C;
818 				rbuf[11] = 0;
819 				rbuf[12] = 0;
820 				rbuf[13] = 0;
821 				rbuf[14] = 0;
822 				rbuf[15] = 0;
823 				ccb->ccb_h.status = CAM_REQ_CMP;
824 				break;
825 			default:
826 				ccb->ccb_h.status = CAM_REQ_INVALID;
827 				break;
828 			}
829 
830 			break;
831 		}
832 		else if (cdb[2]) {
833 			ccb->ccb_h.status = CAM_REQ_INVALID;
834 			break;
835 		}
836 
837 		inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/
838 		inquiryData->Versions = 5; /*SPC-3*/
839 		inquiryData->ResponseDataFormat = 2;
840 		inquiryData->AdditionalLength = 0x5b;
841 		inquiryData->CommandQueue = 1;
842 
843 		if (ccb->csio.dxfer_len > 63) {
844 			rbuf = (HPT_U8 *)inquiryData;
845 			rbuf[58] = 0x60;
846 			rbuf[59] = 0x3;
847 
848 			rbuf[64] = 0x3;
849 			rbuf[66] = 0x3;
850 			rbuf[67] = 0x20;
851 
852 		}
853 
854 		if (vd->type == VD_RAW) {
855 			vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf);
856 
857 			if ((devconf.pIdentifyData->GeneralConfiguration & 0x80))
858 				inquiryData->RemovableMedia = 1;
859 
860 
861 			memcpy(&inquiryData->VendorId, "ATA     ", 8);
862 			memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16);
863 			ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16);
864 			memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4);
865 			ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4);
866 			if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ')
867 				memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4);
868 		} else {
869 			memcpy(&inquiryData->VendorId, "HPT     ", 8);
870 			snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d        ",
871 				os_get_vbus_seq(vbus_ext), vd->target_id);
872 			inquiryData->ProductId[15] = ' ';
873 			memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
874 		}
875 
876 		ccb->ccb_h.status = CAM_REQ_CMP;
877 		break;
878 	}
879 	case READ_CAPACITY:
880 	{
881 		HPT_U8 *rbuf = ccb->csio.data_ptr;
882 		HPT_U32 cap;
883 		HPT_U8 sector_size_shift = 0;
884 		HPT_U64 new_cap;
885 		HPT_U32 sector_size = 0;
886 
887 		if (mIsArray(vd->type))
888 			sector_size_shift = vd->u.array.sector_size_shift;
889 		else{
890 			if(vd->type == VD_RAW){
891 				sector_size = vd->u.raw.logical_sector_size;
892 			}
893 
894 			switch (sector_size) {
895 				case 0x1000:
896 					KdPrint(("set 4k setctor size in READ_CAPACITY"));
897 					sector_size_shift = 3;
898 					break;
899 				default:
900 					break;
901 			}
902 		}
903 		new_cap = vd->capacity >> sector_size_shift;
904 
905 		if (new_cap > 0xfffffffful)
906 			cap = 0xffffffff;
907 		else
908 			cap = new_cap - 1;
909 
910 		rbuf[0] = (HPT_U8)(cap>>24);
911 		rbuf[1] = (HPT_U8)(cap>>16);
912 		rbuf[2] = (HPT_U8)(cap>>8);
913 		rbuf[3] = (HPT_U8)cap;
914 		rbuf[4] = 0;
915 		rbuf[5] = 0;
916 		rbuf[6] = 2 << sector_size_shift;
917 		rbuf[7] = 0;
918 
919 		ccb->ccb_h.status = CAM_REQ_CMP;
920 		break;
921 	}
922 
923 	case REPORT_LUNS:
924 	{
925 		HPT_U8 *rbuf = ccb->csio.data_ptr;
926 		memset(rbuf, 0, 16);
927 		rbuf[3] = 8;
928 		ccb->ccb_h.status = CAM_REQ_CMP;
929 		break;
930 	}
931 	case SERVICE_ACTION_IN:
932 	{
933 		HPT_U8 *rbuf = ccb->csio.data_ptr;
934 		HPT_U64	cap = 0;
935 		HPT_U8 sector_size_shift = 0;
936 		HPT_U32 sector_size = 0;
937 
938 		if(mIsArray(vd->type))
939 			sector_size_shift = vd->u.array.sector_size_shift;
940 		else{
941 			if(vd->type == VD_RAW){
942 				sector_size = vd->u.raw.logical_sector_size;
943 			}
944 
945 			switch (sector_size) {
946 				case 0x1000:
947 					KdPrint(("set 4k setctor size in SERVICE_ACTION_IN"));
948 					sector_size_shift = 3;
949 					break;
950 				default:
951 					break;
952 			}
953 		}
954 		cap = (vd->capacity >> sector_size_shift) - 1;
955 
956 		rbuf[0] = (HPT_U8)(cap>>56);
957 		rbuf[1] = (HPT_U8)(cap>>48);
958 		rbuf[2] = (HPT_U8)(cap>>40);
959 		rbuf[3] = (HPT_U8)(cap>>32);
960 		rbuf[4] = (HPT_U8)(cap>>24);
961 		rbuf[5] = (HPT_U8)(cap>>16);
962 		rbuf[6] = (HPT_U8)(cap>>8);
963 		rbuf[7] = (HPT_U8)cap;
964 		rbuf[8] = 0;
965 		rbuf[9] = 0;
966 		rbuf[10] = 2 << sector_size_shift;
967 		rbuf[11] = 0;
968 
969 		if(!mIsArray(vd->type)){
970 			rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector;
971 			rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f);
972 			rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned);
973 		}
974 
975 		ccb->ccb_h.status = CAM_REQ_CMP;
976 		break;
977 	}
978 
979 	case READ_6:
980 	case READ_10:
981 	case READ_16:
982 	case WRITE_6:
983 	case WRITE_10:
984 	case WRITE_16:
985 	case 0x13:
986 	case 0x2f:
987 	case 0x8f: /* VERIFY_16 */
988 	{
989 		int error;
990 		HPT_U8 sector_size_shift = 0;
991 		HPT_U32 sector_size = 0;
992 		pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
993 		if(!pCmd){
994 			KdPrint(("Failed to allocate command!"));
995 			ccb->ccb_h.status = CAM_BUSY;
996 			break;
997 		}
998 
999 		switch (cdb[0])	{
1000 		case READ_6:
1001 		case WRITE_6:
1002 		case 0x13:
1003 			pCmd->uCmd.Ide.Lba =  ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
1004 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
1005 			break;
1006 		case READ_16:
1007 		case WRITE_16:
1008 		case 0x8f: /* VERIFY_16 */
1009 		{
1010 			HPT_U64 block =
1011 				((HPT_U64)cdb[2]<<56) |
1012 				((HPT_U64)cdb[3]<<48) |
1013 				((HPT_U64)cdb[4]<<40) |
1014 				((HPT_U64)cdb[5]<<32) |
1015 				((HPT_U64)cdb[6]<<24) |
1016 				((HPT_U64)cdb[7]<<16) |
1017 				((HPT_U64)cdb[8]<<8) |
1018 				((HPT_U64)cdb[9]);
1019 			pCmd->uCmd.Ide.Lba = block;
1020 			pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
1021 			break;
1022 		}
1023 
1024 		default:
1025 			pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
1026 			pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
1027 			break;
1028 		}
1029 
1030 		if(mIsArray(vd->type)) {
1031 			sector_size_shift = vd->u.array.sector_size_shift;
1032 		}
1033 		else{
1034 			if(vd->type == VD_RAW){
1035 				sector_size = vd->u.raw.logical_sector_size;
1036 			}
1037 
1038 			switch (sector_size) {
1039 				case 0x1000:
1040 					KdPrint(("<8>resize sector size from 4k to 512"));
1041 					sector_size_shift = 3;
1042 					break;
1043 				default:
1044 					break;
1045 	 		}
1046 		}
1047 		pCmd->uCmd.Ide.Lba <<= sector_size_shift;
1048 		pCmd->uCmd.Ide.nSectors <<= sector_size_shift;
1049 
1050 
1051 		switch (cdb[0]) {
1052 		case READ_6:
1053 		case READ_10:
1054 		case READ_16:
1055 			pCmd->flags.data_in = 1;
1056 			break;
1057 		case WRITE_6:
1058 		case WRITE_10:
1059 		case WRITE_16:
1060 			pCmd->flags.data_out = 1;
1061 			break;
1062 		}
1063 		pCmd->priv = ext = cmdext_get(vbus_ext);
1064 		HPT_ASSERT(ext);
1065 		ext->ccb = ccb;
1066 		pCmd->target = vd;
1067 		pCmd->done = os_cmddone;
1068 		pCmd->buildsgl = os_buildsgl;
1069 		pCmd->psg = ext->psg;
1070 		pCmd->flags.physical_sg = 1;
1071 		error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
1072 					ext->dma_map, ccb,
1073 					hpt_io_dmamap_callback, pCmd,
1074 				    	BUS_DMA_WAITOK
1075 					);
1076 		KdPrint(("bus_dmamap_load return %d", error));
1077 		if (error && error!=EINPROGRESS) {
1078 			os_printk("bus_dmamap_load error %d", error);
1079 			cmdext_put(ext);
1080 			ldm_free_cmds(pCmd);
1081 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1082 			xpt_done(ccb);
1083 		}
1084 		return;
1085 	}
1086 
1087 	default:
1088 		ccb->ccb_h.status = CAM_REQ_INVALID;
1089 		break;
1090 	}
1091 
1092 	xpt_done(ccb);
1093 	return;
1094 }
1095 
1096 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
1097 {
1098 	PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
1099 
1100 	KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
1101 
1102 	hpt_assert_vbus_locked(vbus_ext);
1103 	switch (ccb->ccb_h.func_code) {
1104 
1105 	case XPT_SCSI_IO:
1106 		hpt_scsi_io(vbus_ext, ccb);
1107 		return;
1108 
1109 	case XPT_RESET_BUS:
1110 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
1111 		break;
1112 
1113 	case XPT_GET_TRAN_SETTINGS:
1114 	case XPT_SET_TRAN_SETTINGS:
1115 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1116 		break;
1117 
1118 	case XPT_CALC_GEOMETRY:
1119 		ccb->ccg.heads = 255;
1120 		ccb->ccg.secs_per_track = 63;
1121 		ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
1122 		ccb->ccb_h.status = CAM_REQ_CMP;
1123 		break;
1124 
1125 	case XPT_PATH_INQ:
1126 	{
1127 		struct ccb_pathinq *cpi = &ccb->cpi;
1128 
1129 		cpi->version_num = 1;
1130 		cpi->hba_inquiry = PI_SDTR_ABLE;
1131 		cpi->target_sprt = 0;
1132 		cpi->hba_misc = PIM_NOBUSRESET;
1133 		cpi->hba_eng_cnt = 0;
1134 		cpi->max_target = osm_max_targets;
1135 		cpi->max_lun = 0;
1136 		cpi->unit_number = cam_sim_unit(sim);
1137 		cpi->bus_id = cam_sim_bus(sim);
1138 		cpi->initiator_id = osm_max_targets;
1139 		cpi->base_transfer_speed = 3300;
1140 
1141 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1142 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
1143 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1144 		cpi->transport = XPORT_SPI;
1145 		cpi->transport_version = 2;
1146 		cpi->protocol = PROTO_SCSI;
1147 		cpi->protocol_version = SCSI_REV_2;
1148 		cpi->ccb_h.status = CAM_REQ_CMP;
1149 		break;
1150 	}
1151 
1152 	default:
1153 		ccb->ccb_h.status = CAM_REQ_INVALID;
1154 		break;
1155 	}
1156 
1157 	xpt_done(ccb);
1158 	return;
1159 }
1160 
1161 static void hpt_pci_intr(void *arg)
1162 {
1163 	PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
1164 	hpt_lock_vbus(vbus_ext);
1165 	ldm_intr((PVBUS)vbus_ext->vbus);
1166 	hpt_unlock_vbus(vbus_ext);
1167 }
1168 
1169 static void hpt_poll(struct cam_sim *sim)
1170 {
1171 	PVBUS_EXT vbus_ext = cam_sim_softc(sim);
1172 	hpt_assert_vbus_locked(vbus_ext);
1173 	ldm_intr((PVBUS)vbus_ext->vbus);
1174 }
1175 
1176 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
1177 {
1178 	KdPrint(("hpt_async"));
1179 }
1180 
1181 static int hpt_shutdown(device_t dev)
1182 {
1183 	KdPrint(("hpt_shutdown(dev=%p)", dev));
1184 	return 0;
1185 }
1186 
1187 static int hpt_detach(device_t dev)
1188 {
1189 	/* we don't allow the driver to be unloaded. */
1190 	return EBUSY;
1191 }
1192 
1193 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
1194 {
1195 	arg->ioctl_cmnd = 0;
1196 	wakeup(arg);
1197 }
1198 
1199 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
1200 {
1201 	ioctl_args->result = -1;
1202 	ioctl_args->done = hpt_ioctl_done;
1203 	ioctl_args->ioctl_cmnd = (void *)1;
1204 
1205 	hpt_lock_vbus(vbus_ext);
1206 	ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
1207 
1208 	while (ioctl_args->ioctl_cmnd) {
1209 		if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1210 			break;
1211 		ldm_reset_vbus((PVBUS)vbus_ext->vbus);
1212 		__hpt_do_tasks(vbus_ext);
1213 	}
1214 
1215 	/* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
1216 
1217 	hpt_unlock_vbus(vbus_ext);
1218 }
1219 
1220 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
1221 {
1222 	PVBUS vbus;
1223 	PVBUS_EXT vbus_ext;
1224 
1225 	ldm_for_each_vbus(vbus, vbus_ext) {
1226 		__hpt_do_ioctl(vbus_ext, ioctl_args);
1227 		if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
1228 			return;
1229 	}
1230 }
1231 
1232 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
1233 	IOCTL_ARG arg;\
1234 	arg.dwIoControlCode = code;\
1235 	arg.lpInBuffer = inbuf;\
1236 	arg.lpOutBuffer = outbuf;\
1237 	arg.nInBufferSize = insize;\
1238 	arg.nOutBufferSize = outsize;\
1239 	arg.lpBytesReturned = 0;\
1240 	hpt_do_ioctl(&arg);\
1241 	arg.result;\
1242 })
1243 
1244 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
1245 
1246 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
1247 {
1248 	int i;
1249 	HPT_U32 count = nMaxCount-1;
1250 
1251 	if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
1252 			&count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
1253 		return -1;
1254 
1255 	nMaxCount = (int)pIds[0];
1256 	for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
1257 	return nMaxCount;
1258 }
1259 
1260 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
1261 {
1262 	return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
1263 				&id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
1264 }
1265 
1266 /* not belong to this file logically, but we want to use ioctl interface */
1267 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
1268 {
1269 	LOGICAL_DEVICE_INFO_V3 devinfo;
1270 	int i, result;
1271 	DEVICEID param[2] = { id, 0 };
1272 
1273 	if (hpt_get_device_info_v3(id, &devinfo))
1274 		return -1;
1275 
1276 	if (devinfo.Type!=LDT_ARRAY)
1277 		return -1;
1278 
1279 	if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
1280 		param[1] = AS_REBUILD_ABORT;
1281 	else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
1282 		param[1] = AS_VERIFY_ABORT;
1283 	else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
1284 		param[1] = AS_INITIALIZE_ABORT;
1285 	else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
1286 		param[1] = AS_TRANSFORM_ABORT;
1287 	else
1288 		return -1;
1289 
1290 	KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
1291 	result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
1292 				param, sizeof(param), 0, 0);
1293 
1294 	for (i=0; i<devinfo.u.array.nDisk; i++)
1295 		if (DEVICEID_VALID(devinfo.u.array.Members[i]))
1296 			__hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
1297 
1298 	return result;
1299 }
1300 
1301 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
1302 {
1303 	DEVICEID ids[32];
1304 	int i, count;
1305 
1306 	count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
1307 
1308 	for (i=0; i<count; i++)
1309 		__hpt_stop_tasks(vbus_ext, ids[i]);
1310 }
1311 
1312 static	d_open_t	hpt_open;
1313 static	d_close_t	hpt_close;
1314 static	d_ioctl_t	hpt_ioctl;
1315 static  int 		hpt_rescan_bus(void);
1316 
1317 static struct cdevsw hpt_cdevsw = {
1318 	.d_open =	hpt_open,
1319 	.d_close =	hpt_close,
1320 	.d_ioctl =	hpt_ioctl,
1321 	.d_name =	driver_name,
1322 	.d_version =	D_VERSION,
1323 };
1324 
1325 static struct intr_config_hook hpt_ich;
1326 
1327 /*
1328  * hpt_final_init will be called after all hpt_attach.
1329  */
1330 static void hpt_final_init(void *dummy)
1331 {
1332 	int       i,unit_number=0;
1333 	PVBUS_EXT vbus_ext;
1334 	PVBUS vbus;
1335 	PHBA hba;
1336 
1337 	/* Clear the config hook */
1338 	config_intrhook_disestablish(&hpt_ich);
1339 
1340 	/* allocate memory */
1341 	i = 0;
1342 	ldm_for_each_vbus(vbus, vbus_ext) {
1343 		if (hpt_alloc_mem(vbus_ext)) {
1344 			os_printk("out of memory");
1345 			return;
1346 		}
1347 		i++;
1348 	}
1349 
1350 	if (!i) {
1351 		if (bootverbose)
1352 			os_printk("no controller detected.");
1353 		return;
1354 	}
1355 
1356 	/* initializing hardware */
1357 	ldm_for_each_vbus(vbus, vbus_ext) {
1358 		/* make timer available here */
1359 		mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1360 		callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1361 		if (hpt_init_vbus(vbus_ext)) {
1362 			os_printk("fail to initialize hardware");
1363 			break; /* FIXME */
1364 		}
1365 	}
1366 
1367 	/* register CAM interface */
1368 	ldm_for_each_vbus(vbus, vbus_ext) {
1369 		struct cam_devq *devq;
1370 		struct ccb_setasync	ccb;
1371 
1372 		if (bus_dma_tag_create(NULL,/* parent */
1373 				4,	/* alignment */
1374 				BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1375 				BUS_SPACE_MAXADDR,	/* lowaddr */
1376 				BUS_SPACE_MAXADDR, 	/* highaddr */
1377 				NULL, NULL, 		/* filter, filterarg */
1378 				PAGE_SIZE * (os_max_sg_descriptors-1),	/* maxsize */
1379 				os_max_sg_descriptors,	/* nsegments */
1380 				0x10000,	/* maxsegsize */
1381 				BUS_DMA_WAITOK,		/* flags */
1382 				busdma_lock_mutex,	/* lockfunc */
1383 				&vbus_ext->lock,		/* lockfuncarg */
1384 				&vbus_ext->io_dmat	/* tag */))
1385 		{
1386 			return ;
1387 		}
1388 
1389 		for (i=0; i<os_max_queue_comm; i++) {
1390 			POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1391 			ext->vbus_ext = vbus_ext;
1392 			ext->next = vbus_ext->cmdext_list;
1393 			vbus_ext->cmdext_list = ext;
1394 
1395 			if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1396 				os_printk("Can't create dma map(%d)", i);
1397 				return ;
1398 			}
1399 			callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1400 		}
1401 
1402 		if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1403 			os_printk("cam_simq_alloc failed");
1404 			return ;
1405 		}
1406 
1407 		hpt_lock_vbus(vbus_ext);
1408 		vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1409 				vbus_ext, unit_number, &vbus_ext->lock,
1410 				os_max_queue_comm, /*tagged*/8,  devq);
1411 		unit_number++;
1412 		if (!vbus_ext->sim) {
1413 			os_printk("cam_sim_alloc failed");
1414 			cam_simq_free(devq);
1415 			hpt_unlock_vbus(vbus_ext);
1416 			return ;
1417 		}
1418 
1419 		if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1420 			os_printk("xpt_bus_register failed");
1421 			cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1422 			vbus_ext->sim = NULL;
1423 			return ;
1424 		}
1425 
1426 		if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1427 				cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1428 				CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1429 		{
1430 			os_printk("xpt_create_path failed");
1431 			xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1432 			cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1433 			hpt_unlock_vbus(vbus_ext);
1434 			vbus_ext->sim = NULL;
1435 			return ;
1436 		}
1437 		hpt_unlock_vbus(vbus_ext);
1438 
1439 		memset(&ccb, 0, sizeof(ccb));
1440 		xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1441 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1442 		ccb.event_enable = AC_LOST_DEVICE;
1443 		ccb.callback = hpt_async;
1444 		ccb.callback_arg = vbus_ext;
1445 		xpt_action((union ccb *)&ccb);
1446 
1447 		for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1448 			int rid = 0;
1449 			if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1450 				SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1451 			{
1452 				os_printk("can't allocate interrupt");
1453 				return ;
1454 			}
1455 
1456 			if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1457 				NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1458 			{
1459 				os_printk("can't set up interrupt");
1460 				return ;
1461 			}
1462 			hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1463 
1464 		}
1465 
1466 		vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1467 									hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1468 		if (!vbus_ext->shutdown_eh)
1469 			os_printk("Shutdown event registration failed");
1470 	}
1471 
1472 	ldm_for_each_vbus(vbus, vbus_ext) {
1473 		TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1474 		if (vbus_ext->tasks)
1475 			TASK_ENQUEUE(&vbus_ext->worker);
1476 	}
1477 
1478 	make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1479 	    S_IRUSR | S_IWUSR, "%s", driver_name);
1480 }
1481 
1482 #if defined(KLD_MODULE)
1483 
1484 typedef struct driverlink *driverlink_t;
1485 struct driverlink {
1486 	kobj_class_t	driver;
1487 	TAILQ_ENTRY(driverlink) link;	/* list of drivers in devclass */
1488 };
1489 
1490 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1491 
1492 struct devclass {
1493 	TAILQ_ENTRY(devclass) link;
1494 	devclass_t	parent;		/* parent in devclass hierarchy */
1495 	driver_list_t	drivers;     /* bus devclasses store drivers for bus */
1496 	char		*name;
1497 	device_t	*devices;	/* array of devices indexed by unit */
1498 	int		maxunit;	/* size of devices array */
1499 };
1500 
1501 static void override_kernel_driver(void)
1502 {
1503 	driverlink_t dl, dlfirst;
1504 	driver_t *tmpdriver;
1505 	devclass_t dc = devclass_find("pci");
1506 
1507 	if (dc){
1508 		dlfirst = TAILQ_FIRST(&dc->drivers);
1509 		for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1510 			if(strcmp(dl->driver->name, driver_name) == 0) {
1511 				tmpdriver=dl->driver;
1512 				dl->driver=dlfirst->driver;
1513 				dlfirst->driver=tmpdriver;
1514 				break;
1515 			}
1516 		}
1517 	}
1518 }
1519 
1520 #else
1521 #define override_kernel_driver()
1522 #endif
1523 
1524 static void hpt_init(void *dummy)
1525 {
1526 	if (bootverbose)
1527 		os_printk("%s %s", driver_name_long, driver_ver);
1528 
1529 	override_kernel_driver();
1530 	init_config();
1531 
1532 	hpt_ich.ich_func = hpt_final_init;
1533 	hpt_ich.ich_arg = NULL;
1534 	if (config_intrhook_establish(&hpt_ich) != 0) {
1535 		printf("%s: cannot establish configuration hook\n",
1536 		    driver_name_long);
1537 	}
1538 
1539 }
1540 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1541 
1542 /*
1543  * CAM driver interface
1544  */
1545 static device_method_t driver_methods[] = {
1546 	/* Device interface */
1547 	DEVMETHOD(device_probe,		hpt_probe),
1548 	DEVMETHOD(device_attach,	hpt_attach),
1549 	DEVMETHOD(device_detach,	hpt_detach),
1550 	DEVMETHOD(device_shutdown,	hpt_shutdown),
1551 	{ 0, 0 }
1552 };
1553 
1554 static driver_t hpt_pci_driver = {
1555 	driver_name,
1556 	driver_methods,
1557 	sizeof(HBA)
1558 };
1559 
1560 #ifndef TARGETNAME
1561 #error "no TARGETNAME found"
1562 #endif
1563 
1564 /* use this to make TARGETNAME be expanded */
1565 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
1566 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1567 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1568 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
1569 __MODULE_VERSION(TARGETNAME, 1);
1570 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1571 
1572 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1573 {
1574 	return 0;
1575 }
1576 
1577 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1578 {
1579 	return 0;
1580 }
1581 
1582 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1583 {
1584 	PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1585 	IOCTL_ARG ioctl_args;
1586 	HPT_U32 bytesReturned = 0;
1587 
1588 	switch (cmd){
1589 	case HPT_DO_IOCONTROL:
1590 	{
1591 		if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1592 			KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n",
1593 				piop->dwIoControlCode,
1594 				piop->lpInBuffer,
1595 				piop->nInBufferSize,
1596 				piop->lpOutBuffer,
1597 				piop->nOutBufferSize));
1598 
1599 		memset(&ioctl_args, 0, sizeof(ioctl_args));
1600 
1601 		ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1602 		ioctl_args.nInBufferSize = piop->nInBufferSize;
1603 		ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1604 		ioctl_args.lpBytesReturned = &bytesReturned;
1605 
1606 		if (ioctl_args.nInBufferSize) {
1607 			ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1608 			if (copyin((void*)piop->lpInBuffer,
1609 					ioctl_args.lpInBuffer, piop->nInBufferSize))
1610 				goto invalid;
1611 		}
1612 
1613 		if (ioctl_args.nOutBufferSize)
1614 			ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
1615 
1616 		hpt_do_ioctl(&ioctl_args);
1617 
1618 		if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1619 			if (piop->nOutBufferSize) {
1620 				if (copyout(ioctl_args.lpOutBuffer,
1621 					(void*)piop->lpOutBuffer, piop->nOutBufferSize))
1622 					goto invalid;
1623 			}
1624 			if (piop->lpBytesReturned) {
1625 				if (copyout(&bytesReturned,
1626 					(void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1627 					goto invalid;
1628 			}
1629 			if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1630 			if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1631 			return 0;
1632 		}
1633 invalid:
1634 		if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1635 		if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1636 		return EFAULT;
1637 	}
1638 	return EFAULT;
1639 	}
1640 
1641 	case HPT_SCAN_BUS:
1642 	{
1643 		return hpt_rescan_bus();
1644 	}
1645 	default:
1646 		KdPrint(("invalid command!"));
1647 		return EFAULT;
1648 	}
1649 
1650 }
1651 
1652 static int	hpt_rescan_bus(void)
1653 {
1654 	union ccb			*ccb;
1655 	PVBUS 				vbus;
1656 	PVBUS_EXT			vbus_ext;
1657 
1658 	ldm_for_each_vbus(vbus, vbus_ext) {
1659 		if ((ccb = xpt_alloc_ccb()) == NULL)
1660 		{
1661 			return(ENOMEM);
1662 		}
1663 		if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim),
1664 			CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1665 		{
1666 			xpt_free_ccb(ccb);
1667 			return(EIO);
1668 		}
1669 		xpt_rescan(ccb);
1670 	}
1671 	return(0);
1672 }
1673