xref: /freebsd/sys/dev/hptiop/hptiop.c (revision 145992504973bd16cf3518af9ba5ce185fefa82a)
1 /*
2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3  * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/cons.h>
33 #if (__FreeBSD_version >= 500000)
34 #include <sys/time.h>
35 #include <sys/systm.h>
36 #else
37 #include <machine/clock.h>
38 #endif
39 
40 #include <sys/stat.h>
41 #include <sys/malloc.h>
42 #include <sys/conf.h>
43 #include <sys/libkern.h>
44 #include <sys/kernel.h>
45 
46 #if (__FreeBSD_version >= 500000)
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/module.h>
50 #endif
51 
52 #include <sys/eventhandler.h>
53 #include <sys/bus.h>
54 #include <sys/taskqueue.h>
55 #include <sys/ioccom.h>
56 
57 #include <machine/resource.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <sys/rman.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 
65 #if (__FreeBSD_version >= 500000)
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 #else
69 #include <pci/pcivar.h>
70 #include <pci/pcireg.h>
71 #endif
72 
73 #if (__FreeBSD_version <= 500043)
74 #include <sys/devicestat.h>
75 #endif
76 
77 #include <cam/cam.h>
78 #include <cam/cam_ccb.h>
79 #include <cam/cam_sim.h>
80 #include <cam/cam_xpt_sim.h>
81 #include <cam/cam_debug.h>
82 #include <cam/cam_periph.h>
83 #include <cam/scsi/scsi_all.h>
84 #include <cam/scsi/scsi_message.h>
85 
86 #if (__FreeBSD_version < 500043)
87 #include <sys/bus_private.h>
88 #endif
89 
90 #include <dev/hptiop/hptiop.h>
91 
92 static char driver_name[] = "hptiop";
93 static char driver_version[] = "v1.3 (010208)";
94 
95 static devclass_t hptiop_devclass;
96 
97 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
98 				u_int32_t msg, u_int32_t millisec);
99 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
100 							u_int32_t req);
101 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
102 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
103 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
104 				struct hpt_iop_ioctl_param *pParams);
105 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
106 				struct hpt_iop_ioctl_param *pParams);
107 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
108 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
109 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
110 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
111 				struct hpt_iop_request_get_config *config);
112 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
113 				struct hpt_iop_request_get_config *config);
114 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
115 				struct hpt_iop_request_set_config *config);
116 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
117 				struct hpt_iop_request_set_config *config);
118 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
119 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
120 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
121 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
122 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
123 				struct hpt_iop_request_ioctl_command *req,
124 				struct hpt_iop_ioctl_param *pParams);
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126 				struct hpt_iop_srb *srb,
127 				bus_dma_segment_t *segs, int nsegs);
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129 				struct hpt_iop_srb *srb,
130 				bus_dma_segment_t *segs, int nsegs);
131 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
132 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
133 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
134 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
135 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
138 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
139 static int  hptiop_probe(device_t dev);
140 static int  hptiop_attach(device_t dev);
141 static int  hptiop_detach(device_t dev);
142 static int  hptiop_shutdown(device_t dev);
143 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
144 static void hptiop_poll(struct cam_sim *sim);
145 static void hptiop_async(void *callback_arg, u_int32_t code,
146 					struct cam_path *path, void *arg);
147 static void hptiop_pci_intr(void *arg);
148 static void hptiop_release_resource(struct hpt_iop_hba *hba);
149 static int  hptiop_reset_adapter(struct hpt_iop_hba *hba);
150 
151 static d_open_t hptiop_open;
152 static d_close_t hptiop_close;
153 static d_ioctl_t hptiop_ioctl;
154 
155 static struct cdevsw hptiop_cdevsw = {
156 	.d_open = hptiop_open,
157 	.d_close = hptiop_close,
158 	.d_ioctl = hptiop_ioctl,
159 	.d_name = driver_name,
160 #if __FreeBSD_version>=503000
161 	.d_version = D_VERSION,
162 #endif
163 #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
164 	.d_flags = D_NEEDGIANT,
165 #endif
166 #if __FreeBSD_version<600034
167 #if __FreeBSD_version>=501000
168 	.d_maj = MAJOR_AUTO,
169 #else
170 	.d_maj = HPT_DEV_MAJOR,
171 #endif
172 #endif
173 };
174 
175 #if __FreeBSD_version < 503000
176 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
177 #else
178 #define hba_from_dev(dev) \
179 	((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
180 #endif
181 
182 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
183 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
184 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
185 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
186 
187 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
188 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
189 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
190 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
191 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
192 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
193 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
194 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
195 
196 static int hptiop_open(ioctl_dev_t dev, int flags,
197 					int devtype, ioctl_thread_t proc)
198 {
199 	struct hpt_iop_hba *hba = hba_from_dev(dev);
200 
201 	if (hba==NULL)
202 		return ENXIO;
203 	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
204 		return EBUSY;
205 	hba->flag |= HPT_IOCTL_FLAG_OPEN;
206 	return 0;
207 }
208 
209 static int hptiop_close(ioctl_dev_t dev, int flags,
210 					int devtype, ioctl_thread_t proc)
211 {
212 	struct hpt_iop_hba *hba = hba_from_dev(dev);
213 	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
214 	return 0;
215 }
216 
217 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
218 					int flags, ioctl_thread_t proc)
219 {
220 	int ret = EFAULT;
221 	struct hpt_iop_hba *hba = hba_from_dev(dev);
222 
223 #if (__FreeBSD_version >= 500000)
224 	mtx_lock(&Giant);
225 #endif
226 
227 	switch (cmd) {
228 	case HPT_DO_IOCONTROL:
229 		ret = hba->ops->do_ioctl(hba,
230 				(struct hpt_iop_ioctl_param *)data);
231 		break;
232 	case HPT_SCAN_BUS:
233 		ret = hptiop_rescan_bus(hba);
234 		break;
235 	}
236 
237 #if (__FreeBSD_version >= 500000)
238 	mtx_unlock(&Giant);
239 #endif
240 
241 	return ret;
242 }
243 
244 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
245 {
246 	u_int64_t p;
247 	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
248 	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
249 
250 	if (outbound_tail != outbound_head) {
251 		bus_space_read_region_4(hba->bar2t, hba->bar2h,
252 			offsetof(struct hpt_iopmu_mv,
253 				outbound_q[outbound_tail]),
254 			(u_int32_t *)&p, 2);
255 
256 		outbound_tail++;
257 
258 		if (outbound_tail == MVIOP_QUEUE_LEN)
259 			outbound_tail = 0;
260 
261 		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
262 		return p;
263 	} else
264 		return 0;
265 }
266 
267 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
268 {
269 	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
270 	u_int32_t head = inbound_head + 1;
271 
272 	if (head == MVIOP_QUEUE_LEN)
273 		head = 0;
274 
275 	bus_space_write_region_4(hba->bar2t, hba->bar2h,
276 			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
277 			(u_int32_t *)&p, 2);
278 	BUS_SPACE_WRT4_MV2(inbound_head, head);
279 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
280 }
281 
282 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
283 {
284 	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
285 	BUS_SPACE_RD4_ITL(outbound_intstatus);
286 }
287 
288 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
289 {
290 
291 	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
292 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
293 
294 	BUS_SPACE_RD4_MV0(outbound_intmask);
295 }
296 
297 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
298 {
299 	u_int32_t req=0;
300 	int i;
301 
302 	for (i = 0; i < millisec; i++) {
303 		req = BUS_SPACE_RD4_ITL(inbound_queue);
304 		if (req != IOPMU_QUEUE_EMPTY)
305 			break;
306 		DELAY(1000);
307 	}
308 
309 	if (req!=IOPMU_QUEUE_EMPTY) {
310 		BUS_SPACE_WRT4_ITL(outbound_queue, req);
311 		BUS_SPACE_RD4_ITL(outbound_intstatus);
312 		return 0;
313 	}
314 
315 	return -1;
316 }
317 
318 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
319 {
320 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321 		return -1;
322 
323 	return 0;
324 }
325 
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327 							u_int32_t index)
328 {
329 	struct hpt_iop_srb *srb;
330 	struct hpt_iop_request_scsi_command *req=0;
331 	union ccb *ccb;
332 	u_int8_t *cdb;
333 	u_int32_t result, temp, dxfer;
334 	u_int64_t temp64;
335 
336 	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 		if (hba->firmware_version > 0x01020000 ||
338 			hba->interface_version > 0x01020000) {
339 			srb = hba->srb[index & ~(u_int32_t)
340 				(IOPMU_QUEUE_ADDR_HOST_BIT
341 				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 			req = (struct hpt_iop_request_scsi_command *)srb;
343 			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 				result = IOP_RESULT_SUCCESS;
345 			else
346 				result = req->header.result;
347 		} else {
348 			srb = hba->srb[index &
349 				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 			req = (struct hpt_iop_request_scsi_command *)srb;
351 			result = req->header.result;
352 		}
353 		dxfer = req->dataxfer_length;
354 		goto srb_complete;
355 	}
356 
357 	/*iop req*/
358 	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 		offsetof(struct hpt_iop_request_header, type));
360 	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 		offsetof(struct hpt_iop_request_header, result));
362 	switch(temp) {
363 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364 	{
365 		temp64 = 0;
366 		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 			offsetof(struct hpt_iop_request_header, context),
368 			(u_int32_t *)&temp64, 2);
369 		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370 		break;
371 	}
372 
373 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 			offsetof(struct hpt_iop_request_header, context),
376 			(u_int32_t *)&temp64, 2);
377 		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 				index + offsetof(struct hpt_iop_request_scsi_command,
380 				dataxfer_length));
381 srb_complete:
382 		ccb = (union ccb *)srb->ccb;
383 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 			cdb = ccb->csio.cdb_io.cdb_ptr;
385 		else
386 			cdb = ccb->csio.cdb_io.cdb_bytes;
387 
388 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 			ccb->ccb_h.status = CAM_REQ_CMP;
390 			goto scsi_done;
391 		}
392 
393 		switch (result) {
394 		case IOP_RESULT_SUCCESS:
395 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396 			case CAM_DIR_IN:
397 				bus_dmamap_sync(hba->io_dmat,
398 					srb->dma_map, BUS_DMASYNC_POSTREAD);
399 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 				break;
401 			case CAM_DIR_OUT:
402 				bus_dmamap_sync(hba->io_dmat,
403 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 				break;
406 			}
407 
408 			ccb->ccb_h.status = CAM_REQ_CMP;
409 			break;
410 
411 		case IOP_RESULT_BAD_TARGET:
412 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413 			break;
414 		case IOP_RESULT_BUSY:
415 			ccb->ccb_h.status = CAM_BUSY;
416 			break;
417 		case IOP_RESULT_INVALID_REQUEST:
418 			ccb->ccb_h.status = CAM_REQ_INVALID;
419 			break;
420 		case IOP_RESULT_FAIL:
421 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422 			break;
423 		case IOP_RESULT_RESET:
424 			ccb->ccb_h.status = CAM_BUSY;
425 			break;
426 		case IOP_RESULT_CHECK_CONDITION:
427 			memset(&ccb->csio.sense_data, 0,
428 			    sizeof(ccb->csio.sense_data));
429 			if (dxfer < ccb->csio.sense_len)
430 				ccb->csio.sense_resid = ccb->csio.sense_len -
431 				    dxfer;
432 			else
433 				ccb->csio.sense_resid = 0;
434 			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 				bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 					index + offsetof(struct hpt_iop_request_scsi_command,
437 					sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
439 			} else {
440 				memcpy(&ccb->csio.sense_data, &req->sg_list,
441 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
442 			}
443 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446 			break;
447 		default:
448 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 			break;
450 		}
451 scsi_done:
452 		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 			BUS_SPACE_WRT4_ITL(outbound_queue, index);
454 
455 		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456 
457 		hptiop_free_srb(hba, srb);
458 		xpt_done(ccb);
459 		break;
460 	}
461 }
462 
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464 {
465 	u_int32_t req, temp;
466 
467 	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 			hptiop_request_callback_itl(hba, req);
470 		else {
471 			struct hpt_iop_request_header *p;
472 
473 			p = (struct hpt_iop_request_header *)
474 				((char *)hba->u.itl.mu + req);
475 			temp = bus_space_read_4(hba->bar0t,
476 					hba->bar0h,req +
477 					offsetof(struct hpt_iop_request_header,
478 						flags));
479 			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
480 				u_int64_t temp64;
481 				bus_space_read_region_4(hba->bar0t,
482 					hba->bar0h,req +
483 					offsetof(struct hpt_iop_request_header,
484 						context),
485 					(u_int32_t *)&temp64, 2);
486 				if (temp64) {
487 					hptiop_request_callback_itl(hba, req);
488 				} else {
489 					temp64 = 1;
490 					bus_space_write_region_4(hba->bar0t,
491 						hba->bar0h,req +
492 						offsetof(struct hpt_iop_request_header,
493 							context),
494 						(u_int32_t *)&temp64, 2);
495 				}
496 			} else
497 				hptiop_request_callback_itl(hba, req);
498 		}
499 	}
500 }
501 
502 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
503 {
504 	u_int32_t status;
505 	int ret = 0;
506 
507 	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
508 
509 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
510 		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
511 		KdPrint(("hptiop: received outbound msg %x\n", msg));
512 		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
513 		hptiop_os_message_callback(hba, msg);
514 		ret = 1;
515 	}
516 
517 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
518 		hptiop_drain_outbound_queue_itl(hba);
519 		ret = 1;
520 	}
521 
522 	return ret;
523 }
524 
525 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
526 							u_int64_t _tag)
527 {
528 	u_int32_t context = (u_int32_t)_tag;
529 
530 	if (context & MVIOP_CMD_TYPE_SCSI) {
531 		struct hpt_iop_srb *srb;
532 		struct hpt_iop_request_scsi_command *req;
533 		union ccb *ccb;
534 		u_int8_t *cdb;
535 
536 		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
537 		req = (struct hpt_iop_request_scsi_command *)srb;
538 		ccb = (union ccb *)srb->ccb;
539 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
540 			cdb = ccb->csio.cdb_io.cdb_ptr;
541 		else
542 			cdb = ccb->csio.cdb_io.cdb_bytes;
543 
544 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
545 			ccb->ccb_h.status = CAM_REQ_CMP;
546 			goto scsi_done;
547 		}
548 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
549 			req->header.result = IOP_RESULT_SUCCESS;
550 
551 		switch (req->header.result) {
552 		case IOP_RESULT_SUCCESS:
553 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
554 			case CAM_DIR_IN:
555 				bus_dmamap_sync(hba->io_dmat,
556 					srb->dma_map, BUS_DMASYNC_POSTREAD);
557 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
558 				break;
559 			case CAM_DIR_OUT:
560 				bus_dmamap_sync(hba->io_dmat,
561 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
562 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563 				break;
564 			}
565 			ccb->ccb_h.status = CAM_REQ_CMP;
566 			break;
567 		case IOP_RESULT_BAD_TARGET:
568 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
569 			break;
570 		case IOP_RESULT_BUSY:
571 			ccb->ccb_h.status = CAM_BUSY;
572 			break;
573 		case IOP_RESULT_INVALID_REQUEST:
574 			ccb->ccb_h.status = CAM_REQ_INVALID;
575 			break;
576 		case IOP_RESULT_FAIL:
577 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
578 			break;
579 		case IOP_RESULT_RESET:
580 			ccb->ccb_h.status = CAM_BUSY;
581 			break;
582 		case IOP_RESULT_CHECK_CONDITION:
583 			memset(&ccb->csio.sense_data, 0,
584 			    sizeof(ccb->csio.sense_data));
585 			if (req->dataxfer_length < ccb->csio.sense_len)
586 				ccb->csio.sense_resid = ccb->csio.sense_len -
587 				    req->dataxfer_length;
588 			else
589 				ccb->csio.sense_resid = 0;
590 			memcpy(&ccb->csio.sense_data, &req->sg_list,
591 				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
592 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
593 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
594 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
595 			break;
596 		default:
597 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
598 			break;
599 		}
600 scsi_done:
601 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
602 
603 		hptiop_free_srb(hba, srb);
604 		xpt_done(ccb);
605 	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
606 		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
607 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
608 			hba->config_done = 1;
609 		else
610 			hba->config_done = -1;
611 		wakeup(req);
612 	} else if (context &
613 			(MVIOP_CMD_TYPE_SET_CONFIG |
614 				MVIOP_CMD_TYPE_GET_CONFIG))
615 		hba->config_done = 1;
616 	else {
617 		device_printf(hba->pcidev, "wrong callback type\n");
618 	}
619 }
620 
621 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
622 {
623 	u_int64_t req;
624 
625 	while ((req = hptiop_mv_outbound_read(hba))) {
626 		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
627 			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
628 				hptiop_request_callback_mv(hba, req);
629 			}
630 	    	}
631 	}
632 }
633 
634 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
635 {
636 	u_int32_t status;
637 	int ret = 0;
638 
639 	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
640 
641 	if (status)
642 		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
643 
644 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
645 		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
646 		KdPrint(("hptiop: received outbound msg %x\n", msg));
647 		hptiop_os_message_callback(hba, msg);
648 		ret = 1;
649 	}
650 
651 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
652 		hptiop_drain_outbound_queue_mv(hba);
653 		ret = 1;
654 	}
655 
656 	return ret;
657 }
658 
659 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
660 					u_int32_t req32, u_int32_t millisec)
661 {
662 	u_int32_t i;
663 	u_int64_t temp64;
664 
665 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
666 	BUS_SPACE_RD4_ITL(outbound_intstatus);
667 
668 	for (i = 0; i < millisec; i++) {
669 		hptiop_intr_itl(hba);
670 		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
671 			offsetof(struct hpt_iop_request_header, context),
672 			(u_int32_t *)&temp64, 2);
673 		if (temp64)
674 			return 0;
675 		DELAY(1000);
676 	}
677 
678 	return -1;
679 }
680 
681 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
682 					void *req, u_int32_t millisec)
683 {
684 	u_int32_t i;
685 	u_int64_t phy_addr;
686 	hba->config_done = 0;
687 
688 	phy_addr = hba->ctlcfgcmd_phy |
689 			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
690 	((struct hpt_iop_request_get_config *)req)->header.flags |=
691 		IOP_REQUEST_FLAG_SYNC_REQUEST |
692 		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
693 	hptiop_mv_inbound_write(phy_addr, hba);
694 	BUS_SPACE_RD4_MV0(outbound_intmask);
695 
696 	for (i = 0; i < millisec; i++) {
697 		hptiop_intr_mv(hba);
698 		if (hba->config_done)
699 			return 0;
700 		DELAY(1000);
701 	}
702 	return -1;
703 }
704 
705 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
706 					u_int32_t msg, u_int32_t millisec)
707 {
708 	u_int32_t i;
709 
710 	hba->msg_done = 0;
711 	hba->ops->post_msg(hba, msg);
712 
713 	for (i=0; i<millisec; i++) {
714 		hba->ops->iop_intr(hba);
715 		if (hba->msg_done)
716 			break;
717 		DELAY(1000);
718 	}
719 
720 	return hba->msg_done? 0 : -1;
721 }
722 
723 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
724 				struct hpt_iop_request_get_config * config)
725 {
726 	u_int32_t req32;
727 
728 	config->header.size = sizeof(struct hpt_iop_request_get_config);
729 	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
730 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
731 	config->header.result = IOP_RESULT_PENDING;
732 	config->header.context = 0;
733 
734 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
735 	if (req32 == IOPMU_QUEUE_EMPTY)
736 		return -1;
737 
738 	bus_space_write_region_4(hba->bar0t, hba->bar0h,
739 			req32, (u_int32_t *)config,
740 			sizeof(struct hpt_iop_request_header) >> 2);
741 
742 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
743 		KdPrint(("hptiop: get config send cmd failed"));
744 		return -1;
745 	}
746 
747 	bus_space_read_region_4(hba->bar0t, hba->bar0h,
748 			req32, (u_int32_t *)config,
749 			sizeof(struct hpt_iop_request_get_config) >> 2);
750 
751 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
752 
753 	return 0;
754 }
755 
756 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
757 				struct hpt_iop_request_get_config * config)
758 {
759 	struct hpt_iop_request_get_config *req;
760 
761 	if (!(req = hba->ctlcfg_ptr))
762 		return -1;
763 
764 	req->header.flags = 0;
765 	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
766 	req->header.size = sizeof(struct hpt_iop_request_get_config);
767 	req->header.result = IOP_RESULT_PENDING;
768 	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
769 
770 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
771 		KdPrint(("hptiop: get config send cmd failed"));
772 		return -1;
773 	}
774 
775 	*config = *req;
776 	return 0;
777 }
778 
779 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
780 				struct hpt_iop_request_set_config *config)
781 {
782 	u_int32_t req32;
783 
784 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
785 
786 	if (req32 == IOPMU_QUEUE_EMPTY)
787 		return -1;
788 
789 	config->header.size = sizeof(struct hpt_iop_request_set_config);
790 	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
791 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
792 	config->header.result = IOP_RESULT_PENDING;
793 	config->header.context = 0;
794 
795 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
796 		(u_int32_t *)config,
797 		sizeof(struct hpt_iop_request_set_config) >> 2);
798 
799 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
800 		KdPrint(("hptiop: set config send cmd failed"));
801 		return -1;
802 	}
803 
804 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
805 
806 	return 0;
807 }
808 
809 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
810 				struct hpt_iop_request_set_config *config)
811 {
812 	struct hpt_iop_request_set_config *req;
813 
814 	if (!(req = hba->ctlcfg_ptr))
815 		return -1;
816 
817 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
818 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
819 		sizeof(struct hpt_iop_request_set_config) -
820 			sizeof(struct hpt_iop_request_header));
821 
822 	req->header.flags = 0;
823 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
824 	req->header.size = sizeof(struct hpt_iop_request_set_config);
825 	req->header.result = IOP_RESULT_PENDING;
826 	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
827 
828 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
829 		KdPrint(("hptiop: set config send cmd failed"));
830 		return -1;
831 	}
832 
833 	return 0;
834 }
835 
836 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
837 				u_int32_t req32,
838 				struct hpt_iop_ioctl_param *pParams)
839 {
840 	u_int64_t temp64;
841 	struct hpt_iop_request_ioctl_command req;
842 
843 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
844 			(hba->max_request_size -
845 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
846 		device_printf(hba->pcidev, "request size beyond max value");
847 		return -1;
848 	}
849 
850 	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
851 		+ pParams->nInBufferSize;
852 	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
853 	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
854 	req.header.result = IOP_RESULT_PENDING;
855 	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
856 	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
857 	req.inbuf_size = pParams->nInBufferSize;
858 	req.outbuf_size = pParams->nOutBufferSize;
859 	req.bytes_returned = 0;
860 
861 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
862 		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
863 
864 	hptiop_lock_adapter(hba);
865 
866 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
867 	BUS_SPACE_RD4_ITL(outbound_intstatus);
868 
869 	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
870 		offsetof(struct hpt_iop_request_ioctl_command, header.context),
871 		(u_int32_t *)&temp64, 2);
872 	while (temp64) {
873 		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
874 				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
875 			break;
876 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
877 		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
878 			offsetof(struct hpt_iop_request_ioctl_command,
879 				header.context),
880 			(u_int32_t *)&temp64, 2);
881 	}
882 
883 	hptiop_unlock_adapter(hba);
884 	return 0;
885 }
886 
887 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
888 {
889 	unsigned char byte;
890 	int i;
891 
892 	for (i=0; i<size; i++) {
893 		if (copyin((u_int8_t *)user + i, &byte, 1))
894 			return -1;
895 		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
896 	}
897 
898 	return 0;
899 }
900 
901 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
902 {
903 	unsigned char byte;
904 	int i;
905 
906 	for (i=0; i<size; i++) {
907 		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
908 		if (copyout(&byte, (u_int8_t *)user + i, 1))
909 			return -1;
910 	}
911 
912 	return 0;
913 }
914 
915 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
916 				struct hpt_iop_ioctl_param * pParams)
917 {
918 	u_int32_t req32;
919 	u_int32_t result;
920 
921 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
922 		(pParams->Magic != HPT_IOCTL_MAGIC32))
923 		return EFAULT;
924 
925 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
926 	if (req32 == IOPMU_QUEUE_EMPTY)
927 		return EFAULT;
928 
929 	if (pParams->nInBufferSize)
930 		if (hptiop_bus_space_copyin(hba, req32 +
931 			offsetof(struct hpt_iop_request_ioctl_command, buf),
932 			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
933 			goto invalid;
934 
935 	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
936 		goto invalid;
937 
938 	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
939 			offsetof(struct hpt_iop_request_ioctl_command,
940 				header.result));
941 
942 	if (result == IOP_RESULT_SUCCESS) {
943 		if (pParams->nOutBufferSize)
944 			if (hptiop_bus_space_copyout(hba, req32 +
945 				offsetof(struct hpt_iop_request_ioctl_command, buf) +
946 					((pParams->nInBufferSize + 3) & ~3),
947 				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
948 				goto invalid;
949 
950 		if (pParams->lpBytesReturned) {
951 			if (hptiop_bus_space_copyout(hba, req32 +
952 				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
953 				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
954 				goto invalid;
955 		}
956 
957 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
958 
959 		return 0;
960 	} else{
961 invalid:
962 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
963 
964 		return EFAULT;
965 	}
966 }
967 
968 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
969 				struct hpt_iop_request_ioctl_command *req,
970 				struct hpt_iop_ioctl_param *pParams)
971 {
972 	u_int64_t req_phy;
973 	int size = 0;
974 
975 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
976 			(hba->max_request_size -
977 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
978 		device_printf(hba->pcidev, "request size beyond max value");
979 		return -1;
980 	}
981 
982 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
983 	req->inbuf_size = pParams->nInBufferSize;
984 	req->outbuf_size = pParams->nOutBufferSize;
985 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
986 					+ pParams->nInBufferSize;
987 	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
988 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
989 	req->header.result = IOP_RESULT_PENDING;
990 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
991 	size = req->header.size >> 8;
992 	size = size > 3 ? 3 : size;
993 	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
994 	hptiop_mv_inbound_write(req_phy, hba);
995 
996 	BUS_SPACE_RD4_MV0(outbound_intmask);
997 
998 	while (hba->config_done == 0) {
999 		if (hptiop_sleep(hba, req, PPAUSE,
1000 			"hptctl", HPT_OSM_TIMEOUT)==0)
1001 			continue;
1002 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1003 	}
1004 	return 0;
1005 }
1006 
1007 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1008 				struct hpt_iop_ioctl_param *pParams)
1009 {
1010 	struct hpt_iop_request_ioctl_command *req;
1011 
1012 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1013 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1014 		return EFAULT;
1015 
1016 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1017 	hba->config_done = 0;
1018 	hptiop_lock_adapter(hba);
1019 	if (pParams->nInBufferSize)
1020 		if (copyin((void *)pParams->lpInBuffer,
1021 				req->buf, pParams->nInBufferSize))
1022 			goto invalid;
1023 	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1024 		goto invalid;
1025 
1026 	if (hba->config_done == 1) {
1027 		if (pParams->nOutBufferSize)
1028 			if (copyout(req->buf +
1029 				((pParams->nInBufferSize + 3) & ~3),
1030 				(void *)pParams->lpOutBuffer,
1031 				pParams->nOutBufferSize))
1032 				goto invalid;
1033 
1034 		if (pParams->lpBytesReturned)
1035 			if (copyout(&req->bytes_returned,
1036 				(void*)pParams->lpBytesReturned,
1037 				sizeof(u_int32_t)))
1038 				goto invalid;
1039 		hptiop_unlock_adapter(hba);
1040 		return 0;
1041 	} else{
1042 invalid:
1043 		hptiop_unlock_adapter(hba);
1044 		return EFAULT;
1045 	}
1046 }
1047 
1048 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1049 {
1050 	union ccb           *ccb;
1051 
1052 	if ((ccb = xpt_alloc_ccb()) == NULL)
1053 		return(ENOMEM);
1054 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1055 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1056 		xpt_free_ccb(ccb);
1057 		return(EIO);
1058 	}
1059 	xpt_rescan(ccb);
1060 	return(0);
1061 }
1062 
1063 static  bus_dmamap_callback_t   hptiop_map_srb;
1064 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1065 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1066 
1067 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1068 {
1069 	hba->bar0_rid = 0x10;
1070 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1071 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1072 
1073 	if (hba->bar0_res == NULL) {
1074 		device_printf(hba->pcidev,
1075 			"failed to get iop base adrress.\n");
1076 		return -1;
1077 	}
1078 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1079 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1080 	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1081 				rman_get_virtual(hba->bar0_res);
1082 
1083 	if (!hba->u.itl.mu) {
1084 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1085 					hba->bar0_rid, hba->bar0_res);
1086 		device_printf(hba->pcidev, "alloc mem res failed\n");
1087 		return -1;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1094 {
1095 	hba->bar0_rid = 0x10;
1096 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1097 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1098 
1099 	if (hba->bar0_res == NULL) {
1100 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1101 		return -1;
1102 	}
1103 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1104 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1105 	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1106 				rman_get_virtual(hba->bar0_res);
1107 
1108 	if (!hba->u.mv.regs) {
1109 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1110 					hba->bar0_rid, hba->bar0_res);
1111 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1112 		return -1;
1113 	}
1114 
1115 	hba->bar2_rid = 0x18;
1116 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1117 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1118 
1119 	if (hba->bar2_res == NULL) {
1120 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1121 					hba->bar0_rid, hba->bar0_res);
1122 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1123 		return -1;
1124 	}
1125 
1126 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1127 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1128 	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1129 
1130 	if (!hba->u.mv.mu) {
1131 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1132 					hba->bar0_rid, hba->bar0_res);
1133 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1134 					hba->bar2_rid, hba->bar2_res);
1135 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1136 		return -1;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1143 {
1144 	if (hba->bar0_res)
1145 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1146 			hba->bar0_rid, hba->bar0_res);
1147 }
1148 
1149 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1150 {
1151 	if (hba->bar0_res)
1152 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1153 			hba->bar0_rid, hba->bar0_res);
1154 	if (hba->bar2_res)
1155 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1156 			hba->bar2_rid, hba->bar2_res);
1157 }
1158 
1159 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1160 {
1161 	if (bus_dma_tag_create(hba->parent_dmat,
1162 				1,
1163 				0,
1164 				BUS_SPACE_MAXADDR_32BIT,
1165 				BUS_SPACE_MAXADDR,
1166 				NULL, NULL,
1167 				0x800 - 0x8,
1168 				1,
1169 				BUS_SPACE_MAXSIZE_32BIT,
1170 				BUS_DMA_ALLOCNOW,
1171 #if __FreeBSD_version > 502000
1172 				NULL,
1173 				NULL,
1174 #endif
1175                            		&hba->ctlcfg_dmat)) {
1176 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1177 		return -1;
1178 	}
1179 
1180 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1181 #if __FreeBSD_version>501000
1182 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1183 #else
1184 		BUS_DMA_WAITOK,
1185 #endif
1186 		&hba->ctlcfg_dmamap) != 0) {
1187 			device_printf(hba->pcidev,
1188 					"bus_dmamem_alloc failed!\n");
1189 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1190 			return -1;
1191 	}
1192 
1193 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1194 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1195 			MVIOP_IOCTLCFG_SIZE,
1196 			hptiop_mv_map_ctlcfg, hba, 0)) {
1197 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1198 		if (hba->ctlcfg_dmat)
1199 			bus_dmamem_free(hba->ctlcfg_dmat,
1200 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1201 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1202 		return -1;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1209 {
1210 	if (hba->ctlcfg_dmat) {
1211 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1212 		bus_dmamem_free(hba->ctlcfg_dmat,
1213 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1214 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 /*
1221  * CAM driver interface
1222  */
1223 static device_method_t driver_methods[] = {
1224 	/* Device interface */
1225 	DEVMETHOD(device_probe,     hptiop_probe),
1226 	DEVMETHOD(device_attach,    hptiop_attach),
1227 	DEVMETHOD(device_detach,    hptiop_detach),
1228 	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1229 	{ 0, 0 }
1230 };
1231 
1232 static struct hptiop_adapter_ops hptiop_itl_ops = {
1233 	.iop_wait_ready    = hptiop_wait_ready_itl,
1234 	.internal_memalloc = 0,
1235 	.internal_memfree  = 0,
1236 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1237 	.release_pci_res   = hptiop_release_pci_res_itl,
1238 	.enable_intr       = hptiop_enable_intr_itl,
1239 	.disable_intr      = hptiop_disable_intr_itl,
1240 	.get_config        = hptiop_get_config_itl,
1241 	.set_config        = hptiop_set_config_itl,
1242 	.iop_intr          = hptiop_intr_itl,
1243 	.post_msg          = hptiop_post_msg_itl,
1244 	.post_req          = hptiop_post_req_itl,
1245 	.do_ioctl          = hptiop_do_ioctl_itl,
1246 };
1247 
1248 static struct hptiop_adapter_ops hptiop_mv_ops = {
1249 	.iop_wait_ready    = hptiop_wait_ready_mv,
1250 	.internal_memalloc = hptiop_internal_memalloc_mv,
1251 	.internal_memfree  = hptiop_internal_memfree_mv,
1252 	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1253 	.release_pci_res   = hptiop_release_pci_res_mv,
1254 	.enable_intr       = hptiop_enable_intr_mv,
1255 	.disable_intr      = hptiop_disable_intr_mv,
1256 	.get_config        = hptiop_get_config_mv,
1257 	.set_config        = hptiop_set_config_mv,
1258 	.iop_intr          = hptiop_intr_mv,
1259 	.post_msg          = hptiop_post_msg_mv,
1260 	.post_req          = hptiop_post_req_mv,
1261 	.do_ioctl          = hptiop_do_ioctl_mv,
1262 };
1263 
1264 static driver_t hptiop_pci_driver = {
1265 	driver_name,
1266 	driver_methods,
1267 	sizeof(struct hpt_iop_hba)
1268 };
1269 
1270 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1271 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1272 
1273 static int hptiop_probe(device_t dev)
1274 {
1275 	struct hpt_iop_hba *hba;
1276 	u_int32_t id;
1277 	static char buf[256];
1278 	int sas = 0;
1279 	struct hptiop_adapter_ops *ops;
1280 
1281 	if (pci_get_vendor(dev) != 0x1103)
1282 		return (ENXIO);
1283 
1284 	id = pci_get_device(dev);
1285 
1286 	switch (id) {
1287 		case 0x4210:
1288 		case 0x4211:
1289 		case 0x4310:
1290 		case 0x4311:
1291 		case 0x4320:
1292 		case 0x4321:
1293  		case 0x4322:
1294 			sas = 1;
1295 		case 0x3220:
1296 		case 0x3320:
1297 		case 0x3410:
1298 		case 0x3520:
1299 		case 0x3510:
1300 		case 0x3511:
1301 		case 0x3521:
1302 		case 0x3522:
1303 		case 0x3530:
1304 		case 0x3540:
1305 		case 0x3560:
1306 			ops = &hptiop_itl_ops;
1307 			break;
1308 		case 0x3020:
1309 		case 0x3120:
1310 		case 0x3122:
1311 			ops = &hptiop_mv_ops;
1312 			break;
1313 		default:
1314 			return (ENXIO);
1315 	}
1316 
1317 	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1318 		pci_get_bus(dev), pci_get_slot(dev),
1319 		pci_get_function(dev), pci_get_irq(dev));
1320 
1321 	sprintf(buf, "RocketRAID %x %s Controller\n",
1322 				id, sas ? "SAS" : "SATA");
1323 	device_set_desc_copy(dev, buf);
1324 
1325 	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1326 	bzero(hba, sizeof(struct hpt_iop_hba));
1327 	hba->ops = ops;
1328 
1329 	KdPrint(("hba->ops=%p\n", hba->ops));
1330 	return 0;
1331 }
1332 
1333 static int hptiop_attach(device_t dev)
1334 {
1335 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1336 	struct hpt_iop_request_get_config  iop_config;
1337 	struct hpt_iop_request_set_config  set_config;
1338 	int rid = 0;
1339 	struct cam_devq *devq;
1340 	struct ccb_setasync ccb;
1341 	u_int32_t unit = device_get_unit(dev);
1342 
1343 	device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1344 			unit, driver_version);
1345 
1346 	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1347 		pci_get_bus(dev), pci_get_slot(dev),
1348 		pci_get_function(dev), hba->ops));
1349 
1350 #if __FreeBSD_version >=440000
1351 	pci_enable_busmaster(dev);
1352 #endif
1353 	hba->pcidev = dev;
1354 	hba->pciunit = unit;
1355 
1356 	if (hba->ops->alloc_pci_res(hba))
1357 		return ENXIO;
1358 
1359 	if (hba->ops->iop_wait_ready(hba, 2000)) {
1360 		device_printf(dev, "adapter is not ready\n");
1361 		goto release_pci_res;
1362 	}
1363 
1364 #if (__FreeBSD_version >= 500000)
1365 	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1366 #endif
1367 
1368 	if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1369 			1,  /* alignment */
1370 			0, /* boundary */
1371 			BUS_SPACE_MAXADDR,  /* lowaddr */
1372 			BUS_SPACE_MAXADDR,  /* highaddr */
1373 			NULL, NULL,         /* filter, filterarg */
1374 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1375 			BUS_SPACE_UNRESTRICTED, /* nsegments */
1376 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1377 			0,      /* flags */
1378 #if __FreeBSD_version>502000
1379 			NULL,   /* lockfunc */
1380 			NULL,       /* lockfuncarg */
1381 #endif
1382 			&hba->parent_dmat   /* tag */))
1383 	{
1384 		device_printf(dev, "alloc parent_dmat failed\n");
1385 		goto release_pci_res;
1386 	}
1387 
1388 	if (hba->ops->internal_memalloc) {
1389 		if (hba->ops->internal_memalloc(hba)) {
1390 			device_printf(dev, "alloc srb_dmat failed\n");
1391 			goto destroy_parent_tag;
1392 		}
1393 	}
1394 
1395 	if (hba->ops->get_config(hba, &iop_config)) {
1396 		device_printf(dev, "get iop config failed.\n");
1397 		goto get_config_failed;
1398 	}
1399 
1400 	hba->firmware_version = iop_config.firmware_version;
1401 	hba->interface_version = iop_config.interface_version;
1402 	hba->max_requests = iop_config.max_requests;
1403 	hba->max_devices = iop_config.max_devices;
1404 	hba->max_request_size = iop_config.request_size;
1405 	hba->max_sg_count = iop_config.max_sg_count;
1406 
1407 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1408 			4,  /* alignment */
1409 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1410 			BUS_SPACE_MAXADDR,  /* lowaddr */
1411 			BUS_SPACE_MAXADDR,  /* highaddr */
1412 			NULL, NULL,         /* filter, filterarg */
1413 			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1414 			hba->max_sg_count,  /* nsegments */
1415 			0x20000,    /* maxsegsize */
1416 			BUS_DMA_ALLOCNOW,       /* flags */
1417 #if __FreeBSD_version>502000
1418 			busdma_lock_mutex,  /* lockfunc */
1419 			&hba->lock,     /* lockfuncarg */
1420 #endif
1421 			&hba->io_dmat   /* tag */))
1422 	{
1423 		device_printf(dev, "alloc io_dmat failed\n");
1424 		goto get_config_failed;
1425 	}
1426 
1427 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1428 			1,  /* alignment */
1429 			0, /* boundary */
1430 			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1431 			BUS_SPACE_MAXADDR,  /* highaddr */
1432 			NULL, NULL,         /* filter, filterarg */
1433 			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1434 			1,  /* nsegments */
1435 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1436 			0,      /* flags */
1437 #if __FreeBSD_version>502000
1438 			NULL,   /* lockfunc */
1439 			NULL,       /* lockfuncarg */
1440 #endif
1441 			&hba->srb_dmat  /* tag */))
1442 	{
1443 		device_printf(dev, "alloc srb_dmat failed\n");
1444 		goto destroy_io_dmat;
1445 	}
1446 
1447 	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1448 #if __FreeBSD_version>501000
1449 			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1450 #else
1451 			BUS_DMA_WAITOK,
1452 #endif
1453 			&hba->srb_dmamap) != 0)
1454 	{
1455 		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1456 		goto destroy_srb_dmat;
1457 	}
1458 
1459 	if (bus_dmamap_load(hba->srb_dmat,
1460 			hba->srb_dmamap, hba->uncached_ptr,
1461 			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1462 			hptiop_map_srb, hba, 0))
1463 	{
1464 		device_printf(dev, "bus_dmamap_load failed!\n");
1465 		goto srb_dmamem_free;
1466 	}
1467 
1468 	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1469 		device_printf(dev, "cam_simq_alloc failed\n");
1470 		goto srb_dmamap_unload;
1471 	}
1472 
1473 #if __FreeBSD_version <700000
1474 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1475 			hba, unit, hba->max_requests - 1, 1, devq);
1476 #else
1477 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1478 			hba, unit, &Giant, hba->max_requests - 1, 1, devq);
1479 #endif
1480 	if (!hba->sim) {
1481 		device_printf(dev, "cam_sim_alloc failed\n");
1482 		cam_simq_free(devq);
1483 		goto srb_dmamap_unload;
1484 	}
1485 #if __FreeBSD_version <700000
1486 	if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
1487 #else
1488 	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
1489 #endif
1490 	{
1491 		device_printf(dev, "xpt_bus_register failed\n");
1492 		goto free_cam_sim;
1493 	}
1494 
1495 	if (xpt_create_path(&hba->path, /*periph */ NULL,
1496 			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
1497 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1498 		device_printf(dev, "xpt_create_path failed\n");
1499 		goto deregister_xpt_bus;
1500 	}
1501 
1502 	bzero(&set_config, sizeof(set_config));
1503 	set_config.iop_id = unit;
1504 	set_config.vbus_id = cam_sim_path(hba->sim);
1505 	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
1506 
1507 	if (hba->ops->set_config(hba, &set_config)) {
1508 		device_printf(dev, "set iop config failed.\n");
1509 		goto free_hba_path;
1510 	}
1511 
1512 	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
1513 	ccb.ccb_h.func_code = XPT_SASYNC_CB;
1514 	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
1515 	ccb.callback = hptiop_async;
1516 	ccb.callback_arg = hba->sim;
1517 	xpt_action((union ccb *)&ccb);
1518 
1519 	rid = 0;
1520 	if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
1521 			&rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1522 		device_printf(dev, "allocate irq failed!\n");
1523 		goto free_hba_path;
1524 	}
1525 
1526 #if __FreeBSD_version <700000
1527 	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1528 				hptiop_pci_intr, hba, &hba->irq_handle))
1529 #else
1530 	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1531 				NULL, hptiop_pci_intr, hba, &hba->irq_handle))
1532 #endif
1533 	{
1534 		device_printf(dev, "allocate intr function failed!\n");
1535 		goto free_irq_resource;
1536 	}
1537 
1538 	if (hptiop_send_sync_msg(hba,
1539 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1540 		device_printf(dev, "fail to start background task\n");
1541 		goto teartown_irq_resource;
1542 	}
1543 
1544 	hba->ops->enable_intr(hba);
1545 
1546 	hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
1547 				UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
1548 				S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
1549 
1550 #if __FreeBSD_version < 503000
1551 	hba->ioctl_dev->si_drv1 = hba;
1552 #endif
1553 
1554 	return 0;
1555 
1556 
1557 teartown_irq_resource:
1558 	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
1559 
1560 free_irq_resource:
1561 	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
1562 
1563 free_hba_path:
1564 	xpt_free_path(hba->path);
1565 
1566 deregister_xpt_bus:
1567 	xpt_bus_deregister(cam_sim_path(hba->sim));
1568 
1569 free_cam_sim:
1570 	cam_sim_free(hba->sim, /*free devq*/ TRUE);
1571 
1572 srb_dmamap_unload:
1573 	if (hba->uncached_ptr)
1574 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
1575 
1576 srb_dmamem_free:
1577 	if (hba->uncached_ptr)
1578 		bus_dmamem_free(hba->srb_dmat,
1579 			hba->uncached_ptr, hba->srb_dmamap);
1580 
1581 destroy_srb_dmat:
1582 	if (hba->srb_dmat)
1583 		bus_dma_tag_destroy(hba->srb_dmat);
1584 
1585 destroy_io_dmat:
1586 	if (hba->io_dmat)
1587 		bus_dma_tag_destroy(hba->io_dmat);
1588 
1589 get_config_failed:
1590 	if (hba->ops->internal_memfree)
1591 		hba->ops->internal_memfree(hba);
1592 
1593 destroy_parent_tag:
1594 	if (hba->parent_dmat)
1595 		bus_dma_tag_destroy(hba->parent_dmat);
1596 
1597 release_pci_res:
1598 	if (hba->ops->release_pci_res)
1599 		hba->ops->release_pci_res(hba);
1600 
1601 	return ENXIO;
1602 }
1603 
1604 static int hptiop_detach(device_t dev)
1605 {
1606 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1607 	int i;
1608 	int error = EBUSY;
1609 
1610 	hptiop_lock_adapter(hba);
1611 	for (i = 0; i < hba->max_devices; i++)
1612 		if (hptiop_os_query_remove_device(hba, i)) {
1613 			device_printf(dev, "%d file system is busy. id=%d",
1614 						hba->pciunit, i);
1615 			goto out;
1616 		}
1617 
1618 	if ((error = hptiop_shutdown(dev)) != 0)
1619 		goto out;
1620 	if (hptiop_send_sync_msg(hba,
1621 		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
1622 		goto out;
1623 
1624 	hptiop_release_resource(hba);
1625 	error = 0;
1626 out:
1627 	hptiop_unlock_adapter(hba);
1628 	return error;
1629 }
1630 
1631 static int hptiop_shutdown(device_t dev)
1632 {
1633 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1634 
1635 	int error = 0;
1636 
1637 	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
1638 		device_printf(dev, "%d device is busy", hba->pciunit);
1639 		return EBUSY;
1640 	}
1641 
1642 	hba->ops->disable_intr(hba);
1643 
1644 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1645 		error = EBUSY;
1646 
1647 	return error;
1648 }
1649 
1650 static void hptiop_pci_intr(void *arg)
1651 {
1652 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
1653 	hptiop_lock_adapter(hba);
1654 	hba->ops->iop_intr(hba);
1655 	hptiop_unlock_adapter(hba);
1656 }
1657 
1658 static void hptiop_poll(struct cam_sim *sim)
1659 {
1660 	hptiop_pci_intr(cam_sim_softc(sim));
1661 }
1662 
1663 static void hptiop_async(void * callback_arg, u_int32_t code,
1664 					struct cam_path * path, void * arg)
1665 {
1666 }
1667 
1668 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
1669 {
1670 	BUS_SPACE_WRT4_ITL(outbound_intmask,
1671 		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
1672 }
1673 
1674 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
1675 {
1676 	u_int32_t int_mask;
1677 
1678 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1679 
1680 	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
1681 			| MVIOP_MU_OUTBOUND_INT_MSG;
1682     	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1683 }
1684 
1685 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
1686 {
1687 	u_int32_t int_mask;
1688 
1689 	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
1690 
1691 	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
1692 	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
1693 	BUS_SPACE_RD4_ITL(outbound_intstatus);
1694 }
1695 
1696 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
1697 {
1698 	u_int32_t int_mask;
1699 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1700 
1701 	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
1702 			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
1703 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1704 	BUS_SPACE_RD4_MV0(outbound_intmask);
1705 }
1706 
1707 static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
1708 {
1709 	return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1710 }
1711 
1712 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
1713 {
1714 	struct hpt_iop_srb * srb;
1715 
1716 	if (hba->srb_list) {
1717 		srb = hba->srb_list;
1718 		hba->srb_list = srb->next;
1719 		return srb;
1720 	}
1721 
1722 	return NULL;
1723 }
1724 
1725 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
1726 {
1727 	srb->next = hba->srb_list;
1728 	hba->srb_list = srb;
1729 }
1730 
1731 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
1732 {
1733 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
1734 	struct hpt_iop_srb * srb;
1735 
1736 	switch (ccb->ccb_h.func_code) {
1737 
1738 	case XPT_SCSI_IO:
1739 		hptiop_lock_adapter(hba);
1740 		if (ccb->ccb_h.target_lun != 0 ||
1741 			ccb->ccb_h.target_id >= hba->max_devices ||
1742 			(ccb->ccb_h.flags & CAM_CDB_PHYS))
1743 		{
1744 			ccb->ccb_h.status = CAM_TID_INVALID;
1745 			xpt_done(ccb);
1746 			goto scsi_done;
1747 		}
1748 
1749 		if ((srb = hptiop_get_srb(hba)) == NULL) {
1750 			device_printf(hba->pcidev, "srb allocated failed");
1751 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1752 			xpt_done(ccb);
1753 			goto scsi_done;
1754 		}
1755 
1756 		srb->ccb = ccb;
1757 
1758 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1759 			hptiop_post_scsi_command(srb, NULL, 0, 0);
1760 		else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1761 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1762 				int error;
1763 
1764 				error = bus_dmamap_load(hba->io_dmat,
1765 						srb->dma_map,
1766 						ccb->csio.data_ptr,
1767 						ccb->csio.dxfer_len,
1768 						hptiop_post_scsi_command,
1769 						srb, 0);
1770 
1771 				if (error && error != EINPROGRESS) {
1772 					device_printf(hba->pcidev,
1773 						"%d bus_dmamap_load error %d",
1774 						hba->pciunit, error);
1775 					xpt_freeze_simq(hba->sim, 1);
1776 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1777 invalid:
1778 					hptiop_free_srb(hba, srb);
1779 					xpt_done(ccb);
1780 					goto scsi_done;
1781 				}
1782 			}
1783 			else {
1784 				device_printf(hba->pcidev,
1785 					"CAM_DATA_PHYS not supported");
1786 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1787 				goto invalid;
1788 			}
1789 		}
1790 		else {
1791 			struct bus_dma_segment *segs;
1792 
1793 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
1794 				(ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1795 				device_printf(hba->pcidev, "SCSI cmd failed");
1796 				ccb->ccb_h.status=CAM_PROVIDE_FAIL;
1797 				goto invalid;
1798 			}
1799 
1800 			segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1801 			hptiop_post_scsi_command(srb, segs,
1802 						ccb->csio.sglist_cnt, 0);
1803 		}
1804 
1805 scsi_done:
1806 		hptiop_unlock_adapter(hba);
1807 		return;
1808 
1809 	case XPT_RESET_BUS:
1810 		device_printf(hba->pcidev, "reset adapter");
1811 		hptiop_lock_adapter(hba);
1812 		hba->msg_done = 0;
1813 		hptiop_reset_adapter(hba);
1814 		hptiop_unlock_adapter(hba);
1815 		break;
1816 
1817 	case XPT_GET_TRAN_SETTINGS:
1818 	case XPT_SET_TRAN_SETTINGS:
1819 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1820 		break;
1821 
1822 	case XPT_CALC_GEOMETRY:
1823 #if __FreeBSD_version >= 500000
1824 		cam_calc_geometry(&ccb->ccg, 1);
1825 #else
1826 		ccb->ccg.heads = 255;
1827 		ccb->ccg.secs_per_track = 63;
1828 		ccb->ccg.cylinders = ccb->ccg.volume_size /
1829 				(ccb->ccg.heads * ccb->ccg.secs_per_track);
1830 		ccb->ccb_h.status = CAM_REQ_CMP;
1831 #endif
1832 		break;
1833 
1834 	case XPT_PATH_INQ:
1835 	{
1836 		struct ccb_pathinq *cpi = &ccb->cpi;
1837 
1838 		cpi->version_num = 1;
1839 		cpi->hba_inquiry = PI_SDTR_ABLE;
1840 		cpi->target_sprt = 0;
1841 		cpi->hba_misc = PIM_NOBUSRESET;
1842 		cpi->hba_eng_cnt = 0;
1843 		cpi->max_target = hba->max_devices;
1844 		cpi->max_lun = 0;
1845 		cpi->unit_number = cam_sim_unit(sim);
1846 		cpi->bus_id = cam_sim_bus(sim);
1847 		cpi->initiator_id = hba->max_devices;
1848 		cpi->base_transfer_speed = 3300;
1849 
1850 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1851 		strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
1852 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1853 		cpi->transport = XPORT_SPI;
1854 		cpi->transport_version = 2;
1855 		cpi->protocol = PROTO_SCSI;
1856 		cpi->protocol_version = SCSI_REV_2;
1857 		cpi->ccb_h.status = CAM_REQ_CMP;
1858 		break;
1859 	}
1860 
1861 	default:
1862 		ccb->ccb_h.status = CAM_REQ_INVALID;
1863 		break;
1864 	}
1865 
1866 	xpt_done(ccb);
1867 	return;
1868 }
1869 
1870 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
1871 				struct hpt_iop_srb *srb,
1872 				bus_dma_segment_t *segs, int nsegs)
1873 {
1874 	int idx;
1875 	union ccb *ccb = srb->ccb;
1876 	u_int8_t *cdb;
1877 
1878 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1879 		cdb = ccb->csio.cdb_io.cdb_ptr;
1880 	else
1881 		cdb = ccb->csio.cdb_io.cdb_bytes;
1882 
1883 	KdPrint(("ccb=%p %x-%x-%x\n",
1884 		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
1885 
1886 	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1887 		u_int32_t iop_req32;
1888 		struct hpt_iop_request_scsi_command req;
1889 
1890 		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1891 
1892 		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
1893 			device_printf(hba->pcidev, "invaild req offset\n");
1894 			ccb->ccb_h.status = CAM_BUSY;
1895 			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1896 			hptiop_free_srb(hba, srb);
1897 			xpt_done(ccb);
1898 			return;
1899 		}
1900 
1901 		if (ccb->csio.dxfer_len && nsegs > 0) {
1902 			struct hpt_iopsg *psg = req.sg_list;
1903 			for (idx = 0; idx < nsegs; idx++, psg++) {
1904 				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1905 				psg->size = segs[idx].ds_len;
1906 				psg->eot = 0;
1907 			}
1908 			psg[-1].eot = 1;
1909 		}
1910 
1911 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
1912 
1913 		req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1914 				+ nsegs*sizeof(struct hpt_iopsg);
1915 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1916 		req.header.flags = 0;
1917 		req.header.result = IOP_RESULT_PENDING;
1918 		req.header.context = (u_int64_t)(unsigned long)srb;
1919 		req.dataxfer_length = ccb->csio.dxfer_len;
1920 		req.channel =  0;
1921 		req.target =  ccb->ccb_h.target_id;
1922 		req.lun =  ccb->ccb_h.target_lun;
1923 
1924 		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
1925 			(u_int8_t *)&req, req.header.size);
1926 
1927 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1928 			bus_dmamap_sync(hba->io_dmat,
1929 				srb->dma_map, BUS_DMASYNC_PREREAD);
1930 		}
1931 		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1932 			bus_dmamap_sync(hba->io_dmat,
1933 				srb->dma_map, BUS_DMASYNC_PREWRITE);
1934 
1935 		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
1936 	} else {
1937 		struct hpt_iop_request_scsi_command *req;
1938 
1939 		req = (struct hpt_iop_request_scsi_command *)srb;
1940 		if (ccb->csio.dxfer_len && nsegs > 0) {
1941 			struct hpt_iopsg *psg = req->sg_list;
1942 			for (idx = 0; idx < nsegs; idx++, psg++) {
1943 				psg->pci_address =
1944 					(u_int64_t)segs[idx].ds_addr;
1945 				psg->size = segs[idx].ds_len;
1946 				psg->eot = 0;
1947 			}
1948 			psg[-1].eot = 1;
1949 		}
1950 
1951 		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1952 
1953 		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1954 		req->header.result = IOP_RESULT_PENDING;
1955 		req->dataxfer_length = ccb->csio.dxfer_len;
1956 		req->channel =  0;
1957 		req->target =  ccb->ccb_h.target_id;
1958 		req->lun =  ccb->ccb_h.target_lun;
1959 		req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1960 			+ nsegs*sizeof(struct hpt_iopsg);
1961 		req->header.context = (u_int64_t)srb->index |
1962 						IOPMU_QUEUE_ADDR_HOST_BIT;
1963 		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1964 
1965 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1966 			bus_dmamap_sync(hba->io_dmat,
1967 				srb->dma_map, BUS_DMASYNC_PREREAD);
1968 		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1969 			bus_dmamap_sync(hba->io_dmat,
1970 				srb->dma_map, BUS_DMASYNC_PREWRITE);
1971 		}
1972 
1973 		if (hba->firmware_version > 0x01020000
1974 			|| hba->interface_version > 0x01020000) {
1975 			u_int32_t size_bits;
1976 
1977 			if (req->header.size < 256)
1978 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
1979 			else if (req->header.size < 512)
1980 				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
1981 			else
1982 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
1983 						| IOPMU_QUEUE_ADDR_HOST_BIT;
1984 
1985 			BUS_SPACE_WRT4_ITL(inbound_queue,
1986 				(u_int32_t)srb->phy_addr | size_bits);
1987 		} else
1988 			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
1989 				|IOPMU_QUEUE_ADDR_HOST_BIT);
1990 	}
1991 }
1992 
1993 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
1994 				struct hpt_iop_srb *srb,
1995 				bus_dma_segment_t *segs, int nsegs)
1996 {
1997 	int idx, size;
1998 	union ccb *ccb = srb->ccb;
1999 	u_int8_t *cdb;
2000 	struct hpt_iop_request_scsi_command *req;
2001 	u_int64_t req_phy;
2002 
2003     	req = (struct hpt_iop_request_scsi_command *)srb;
2004 	req_phy = srb->phy_addr;
2005 
2006 	if (ccb->csio.dxfer_len && nsegs > 0) {
2007 		struct hpt_iopsg *psg = req->sg_list;
2008 		for (idx = 0; idx < nsegs; idx++, psg++) {
2009 			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2010 			psg->size = segs[idx].ds_len;
2011 			psg->eot = 0;
2012 		}
2013 		psg[-1].eot = 1;
2014 	}
2015 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2016 		cdb = ccb->csio.cdb_io.cdb_ptr;
2017 	else
2018 		cdb = ccb->csio.cdb_io.cdb_bytes;
2019 
2020 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2021 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2022 	req->header.result = IOP_RESULT_PENDING;
2023 	req->dataxfer_length = ccb->csio.dxfer_len;
2024 	req->channel = 0;
2025 	req->target =  ccb->ccb_h.target_id;
2026 	req->lun =  ccb->ccb_h.target_lun;
2027 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2028 				- sizeof(struct hpt_iopsg)
2029 				+ nsegs * sizeof(struct hpt_iopsg);
2030 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2031 		bus_dmamap_sync(hba->io_dmat,
2032 			srb->dma_map, BUS_DMASYNC_PREREAD);
2033 	}
2034 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2035 		bus_dmamap_sync(hba->io_dmat,
2036 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2037 	req->header.context = (u_int64_t)srb->index
2038 					<< MVIOP_REQUEST_NUMBER_START_BIT
2039 					| MVIOP_CMD_TYPE_SCSI;
2040 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2041 	size = req->header.size >> 8;
2042 	hptiop_mv_inbound_write(req_phy
2043 			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2044 			| (size > 3 ? 3 : size), hba);
2045 }
2046 
2047 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2048 					int nsegs, int error)
2049 {
2050 	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2051 	union ccb *ccb = srb->ccb;
2052 	struct hpt_iop_hba *hba = srb->hba;
2053 
2054 	if (error || nsegs > hba->max_sg_count) {
2055 		KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2056 			ccb->ccb_h.func_code,
2057 			ccb->ccb_h.target_id,
2058 			ccb->ccb_h.target_lun, nsegs));
2059 		ccb->ccb_h.status = CAM_BUSY;
2060 		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2061 		hptiop_free_srb(hba, srb);
2062 		xpt_done(ccb);
2063 		return;
2064 	}
2065 
2066 	hba->ops->post_req(hba, srb, segs, nsegs);
2067 }
2068 
2069 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2070 				int nsegs, int error)
2071 {
2072 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2073 	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2074 				& ~(u_int64_t)0x1F;
2075 	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2076 				& ~0x1F);
2077 }
2078 
2079 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2080 				int nsegs, int error)
2081 {
2082 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2083 	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2084 	struct hpt_iop_srb *srb, *tmp_srb;
2085 	int i;
2086 
2087 	if (error || nsegs == 0) {
2088 		device_printf(hba->pcidev, "hptiop_map_srb error");
2089 		return;
2090 	}
2091 
2092 	/* map srb */
2093 	srb = (struct hpt_iop_srb *)
2094 		(((unsigned long)hba->uncached_ptr + 0x1F)
2095 		& ~(unsigned long)0x1F);
2096 
2097 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2098 		tmp_srb = (struct hpt_iop_srb *)
2099 					((char *)srb + i * HPT_SRB_MAX_SIZE);
2100 		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2101 			if (bus_dmamap_create(hba->io_dmat,
2102 						0, &tmp_srb->dma_map)) {
2103 				device_printf(hba->pcidev, "dmamap create failed");
2104 				return;
2105 			}
2106 
2107 			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2108 			tmp_srb->hba = hba;
2109 			tmp_srb->index = i;
2110 			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2111 				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2112 							(phy_addr >> 5);
2113 				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2114 					tmp_srb->srb_flag =
2115 						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2116 			} else {
2117 				tmp_srb->phy_addr = phy_addr;
2118 			}
2119 
2120 			hptiop_free_srb(hba, tmp_srb);
2121 			hba->srb[i] = tmp_srb;
2122 			phy_addr += HPT_SRB_MAX_SIZE;
2123 		}
2124 		else {
2125 			device_printf(hba->pcidev, "invalid alignment");
2126 			return;
2127 		}
2128 	}
2129 }
2130 
2131 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2132 {
2133 		hba->msg_done = 1;
2134 }
2135 
2136 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2137 						int target_id)
2138 {
2139 	struct cam_periph       *periph = NULL;
2140 	struct cam_path         *path;
2141 	int                     status, retval = 0;
2142 
2143 	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2144 
2145 	if (status == CAM_REQ_CMP) {
2146 		if ((periph = cam_periph_find(path, "da")) != NULL) {
2147 			if (periph->refcount >= 1) {
2148 				device_printf(hba->pcidev, "%d ,"
2149 					"target_id=0x%x,"
2150 					"refcount=%d",
2151 				    hba->pciunit, target_id, periph->refcount);
2152 				retval = -1;
2153 			}
2154 		}
2155 		xpt_free_path(path);
2156 	}
2157 	return retval;
2158 }
2159 
2160 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2161 {
2162 	int i;
2163 	if (hba->path) {
2164 		struct ccb_setasync ccb;
2165 
2166 		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2167 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
2168 		ccb.event_enable = 0;
2169 		ccb.callback = hptiop_async;
2170 		ccb.callback_arg = hba->sim;
2171 		xpt_action((union ccb *)&ccb);
2172 		xpt_free_path(hba->path);
2173 	}
2174 
2175 	if (hba->sim) {
2176 		xpt_bus_deregister(cam_sim_path(hba->sim));
2177 		cam_sim_free(hba->sim, TRUE);
2178 	}
2179 
2180 	if (hba->ctlcfg_dmat) {
2181 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2182 		bus_dmamem_free(hba->ctlcfg_dmat,
2183 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2184 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2185 	}
2186 
2187 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2188 		struct hpt_iop_srb *srb = hba->srb[i];
2189 		if (srb->dma_map)
2190 			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2191 	}
2192 
2193 	if (hba->srb_dmat) {
2194 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2195 		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2196 		bus_dma_tag_destroy(hba->srb_dmat);
2197 	}
2198 
2199 	if (hba->io_dmat)
2200 		bus_dma_tag_destroy(hba->io_dmat);
2201 
2202 	if (hba->parent_dmat)
2203 		bus_dma_tag_destroy(hba->parent_dmat);
2204 
2205 	if (hba->irq_handle)
2206 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2207 
2208 	if (hba->irq_res)
2209 		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2210 					0, hba->irq_res);
2211 
2212 	if (hba->bar0_res)
2213 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2214 					hba->bar0_rid, hba->bar0_res);
2215 	if (hba->bar2_res)
2216 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2217 					hba->bar2_rid, hba->bar2_res);
2218 	if (hba->ioctl_dev)
2219 		destroy_dev(hba->ioctl_dev);
2220 }
2221