xref: /freebsd/sys/dev/hptiop/hptiop.c (revision 55141f2c8991b2a6adbf30bb0fe3e6cbc303f06d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/cons.h>
33 #include <sys/time.h>
34 #include <sys/systm.h>
35 
36 #include <sys/stat.h>
37 #include <sys/malloc.h>
38 #include <sys/conf.h>
39 #include <sys/libkern.h>
40 #include <sys/kernel.h>
41 
42 #include <sys/kthread.h>
43 #include <sys/mutex.h>
44 #include <sys/module.h>
45 
46 #include <sys/eventhandler.h>
47 #include <sys/bus.h>
48 #include <sys/taskqueue.h>
49 #include <sys/ioccom.h>
50 
51 #include <machine/resource.h>
52 #include <machine/bus.h>
53 #include <machine/stdarg.h>
54 #include <sys/rman.h>
55 
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_debug.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 
72 
73 #include <dev/hptiop/hptiop.h>
74 
75 static const char driver_name[] = "hptiop";
76 static const char driver_version[] = "v1.9";
77 
78 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
79 				u_int32_t msg, u_int32_t millisec);
80 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
81 							u_int32_t req);
82 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
83 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
84 							u_int32_t req);
85 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
86 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
87 				struct hpt_iop_ioctl_param *pParams);
88 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
89 				struct hpt_iop_ioctl_param *pParams);
90 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
91 				struct hpt_iop_ioctl_param *pParams);
92 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
93 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
94 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
95 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
96 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
97 				struct hpt_iop_request_get_config *config);
98 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
99 				struct hpt_iop_request_get_config *config);
100 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
101 				struct hpt_iop_request_get_config *config);
102 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
103 				struct hpt_iop_request_set_config *config);
104 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
105 				struct hpt_iop_request_set_config *config);
106 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
107 				struct hpt_iop_request_set_config *config);
108 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
109 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
110 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
111 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
112 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
113 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
114 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
115 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
116 				struct hpt_iop_request_ioctl_command *req,
117 				struct hpt_iop_ioctl_param *pParams);
118 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
119 				struct hpt_iop_request_ioctl_command *req,
120 				struct hpt_iop_ioctl_param *pParams);
121 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
122 				struct hpt_iop_srb *srb,
123 				bus_dma_segment_t *segs, int nsegs);
124 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
125 				struct hpt_iop_srb *srb,
126 				bus_dma_segment_t *segs, int nsegs);
127 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
128 				struct hpt_iop_srb *srb,
129 				bus_dma_segment_t *segs, int nsegs);
130 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
131 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
132 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
133 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
134 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
135 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
136 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
137 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
138 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
139 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
140 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
141 static int  hptiop_probe(device_t dev);
142 static int  hptiop_attach(device_t dev);
143 static int  hptiop_detach(device_t dev);
144 static int  hptiop_shutdown(device_t dev);
145 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
146 static void hptiop_poll(struct cam_sim *sim);
147 static void hptiop_async(void *callback_arg, u_int32_t code,
148 					struct cam_path *path, void *arg);
149 static void hptiop_pci_intr(void *arg);
150 static void hptiop_release_resource(struct hpt_iop_hba *hba);
151 static void hptiop_reset_adapter(void *argv);
152 static d_open_t hptiop_open;
153 static d_close_t hptiop_close;
154 static d_ioctl_t hptiop_ioctl;
155 
156 static struct cdevsw hptiop_cdevsw = {
157 	.d_open = hptiop_open,
158 	.d_close = hptiop_close,
159 	.d_ioctl = hptiop_ioctl,
160 	.d_name = driver_name,
161 	.d_version = D_VERSION,
162 };
163 
164 #define hba_from_dev(dev) \
165 	((struct hpt_iop_hba *)((dev)->si_drv1))
166 
167 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
168 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
169 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
170 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
171 
172 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
173 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
174 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
175 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
176 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
177 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
178 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
179 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
180 
181 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
182 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
183 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
184 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
185 
186 static int hptiop_open(ioctl_dev_t dev, int flags,
187 					int devtype, ioctl_thread_t proc)
188 {
189 	struct hpt_iop_hba *hba = hba_from_dev(dev);
190 
191 	if (hba==NULL)
192 		return ENXIO;
193 	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
194 		return EBUSY;
195 	hba->flag |= HPT_IOCTL_FLAG_OPEN;
196 	return 0;
197 }
198 
199 static int hptiop_close(ioctl_dev_t dev, int flags,
200 					int devtype, ioctl_thread_t proc)
201 {
202 	struct hpt_iop_hba *hba = hba_from_dev(dev);
203 	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
204 	return 0;
205 }
206 
207 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
208 					int flags, ioctl_thread_t proc)
209 {
210 	int ret = EFAULT;
211 	struct hpt_iop_hba *hba = hba_from_dev(dev);
212 
213 	switch (cmd) {
214 	case HPT_DO_IOCONTROL:
215 		ret = hba->ops->do_ioctl(hba,
216 				(struct hpt_iop_ioctl_param *)data);
217 		break;
218 	case HPT_SCAN_BUS:
219 		ret = hptiop_rescan_bus(hba);
220 		break;
221 	}
222 	return ret;
223 }
224 
225 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
226 {
227 	u_int64_t p;
228 	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
229 	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
230 
231 	if (outbound_tail != outbound_head) {
232 		bus_space_read_region_4(hba->bar2t, hba->bar2h,
233 			offsetof(struct hpt_iopmu_mv,
234 				outbound_q[outbound_tail]),
235 			(u_int32_t *)&p, 2);
236 
237 		outbound_tail++;
238 
239 		if (outbound_tail == MVIOP_QUEUE_LEN)
240 			outbound_tail = 0;
241 
242 		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
243 		return p;
244 	} else
245 		return 0;
246 }
247 
248 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
249 {
250 	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
251 	u_int32_t head = inbound_head + 1;
252 
253 	if (head == MVIOP_QUEUE_LEN)
254 		head = 0;
255 
256 	bus_space_write_region_4(hba->bar2t, hba->bar2h,
257 			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
258 			(u_int32_t *)&p, 2);
259 	BUS_SPACE_WRT4_MV2(inbound_head, head);
260 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
261 }
262 
263 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
264 {
265 	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
266 	BUS_SPACE_RD4_ITL(outbound_intstatus);
267 }
268 
269 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
270 {
271 
272 	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
273 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
274 
275 	BUS_SPACE_RD4_MV0(outbound_intmask);
276 }
277 
278 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
279 {
280 	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
281 	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
282 }
283 
284 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
285 {
286 	u_int32_t req=0;
287 	int i;
288 
289 	for (i = 0; i < millisec; i++) {
290 		req = BUS_SPACE_RD4_ITL(inbound_queue);
291 		if (req != IOPMU_QUEUE_EMPTY)
292 			break;
293 		DELAY(1000);
294 	}
295 
296 	if (req!=IOPMU_QUEUE_EMPTY) {
297 		BUS_SPACE_WRT4_ITL(outbound_queue, req);
298 		BUS_SPACE_RD4_ITL(outbound_intstatus);
299 		return 0;
300 	}
301 
302 	return -1;
303 }
304 
305 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
306 {
307 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
308 		return -1;
309 
310 	return 0;
311 }
312 
313 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
314 							u_int32_t millisec)
315 {
316 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
317 		return -1;
318 
319 	return 0;
320 }
321 
322 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
323 							u_int32_t index)
324 {
325 	struct hpt_iop_srb *srb;
326 	struct hpt_iop_request_scsi_command *req=NULL;
327 	union ccb *ccb;
328 	u_int8_t *cdb;
329 	u_int32_t result, temp, dxfer;
330 	u_int64_t temp64;
331 
332 	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
333 		if (hba->firmware_version > 0x01020000 ||
334 			hba->interface_version > 0x01020000) {
335 			srb = hba->srb[index & ~(u_int32_t)
336 				(IOPMU_QUEUE_ADDR_HOST_BIT
337 				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
338 			req = (struct hpt_iop_request_scsi_command *)srb;
339 			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
340 				result = IOP_RESULT_SUCCESS;
341 			else
342 				result = req->header.result;
343 		} else {
344 			srb = hba->srb[index &
345 				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
346 			req = (struct hpt_iop_request_scsi_command *)srb;
347 			result = req->header.result;
348 		}
349 		dxfer = req->dataxfer_length;
350 		goto srb_complete;
351 	}
352 
353 	/*iop req*/
354 	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
355 		offsetof(struct hpt_iop_request_header, type));
356 	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
357 		offsetof(struct hpt_iop_request_header, result));
358 	switch(temp) {
359 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
360 	{
361 		temp64 = 0;
362 		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
363 			offsetof(struct hpt_iop_request_header, context),
364 			(u_int32_t *)&temp64, 2);
365 		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
366 		break;
367 	}
368 
369 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
370 		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
371 			offsetof(struct hpt_iop_request_header, context),
372 			(u_int32_t *)&temp64, 2);
373 		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
374 		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
375 				index + offsetof(struct hpt_iop_request_scsi_command,
376 				dataxfer_length));
377 srb_complete:
378 		ccb = (union ccb *)srb->ccb;
379 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
380 			cdb = ccb->csio.cdb_io.cdb_ptr;
381 		else
382 			cdb = ccb->csio.cdb_io.cdb_bytes;
383 
384 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
385 			ccb->ccb_h.status = CAM_REQ_CMP;
386 			goto scsi_done;
387 		}
388 
389 		switch (result) {
390 		case IOP_RESULT_SUCCESS:
391 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
392 			case CAM_DIR_IN:
393 				bus_dmamap_sync(hba->io_dmat,
394 					srb->dma_map, BUS_DMASYNC_POSTREAD);
395 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
396 				break;
397 			case CAM_DIR_OUT:
398 				bus_dmamap_sync(hba->io_dmat,
399 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
400 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
401 				break;
402 			}
403 
404 			ccb->ccb_h.status = CAM_REQ_CMP;
405 			break;
406 
407 		case IOP_RESULT_BAD_TARGET:
408 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
409 			break;
410 		case IOP_RESULT_BUSY:
411 			ccb->ccb_h.status = CAM_BUSY;
412 			break;
413 		case IOP_RESULT_INVALID_REQUEST:
414 			ccb->ccb_h.status = CAM_REQ_INVALID;
415 			break;
416 		case IOP_RESULT_FAIL:
417 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
418 			break;
419 		case IOP_RESULT_RESET:
420 			ccb->ccb_h.status = CAM_BUSY;
421 			break;
422 		case IOP_RESULT_CHECK_CONDITION:
423 			memset(&ccb->csio.sense_data, 0,
424 			    sizeof(ccb->csio.sense_data));
425 			if (dxfer < ccb->csio.sense_len)
426 				ccb->csio.sense_resid = ccb->csio.sense_len -
427 				    dxfer;
428 			else
429 				ccb->csio.sense_resid = 0;
430 			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
431 				bus_space_read_region_1(hba->bar0t, hba->bar0h,
432 					index + offsetof(struct hpt_iop_request_scsi_command,
433 					sg_list), (u_int8_t *)&ccb->csio.sense_data,
434 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
435 			} else {
436 				memcpy(&ccb->csio.sense_data, &req->sg_list,
437 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
438 			}
439 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
440 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
441 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
442 			break;
443 		default:
444 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
445 			break;
446 		}
447 scsi_done:
448 		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
449 			BUS_SPACE_WRT4_ITL(outbound_queue, index);
450 
451 		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
452 
453 		hptiop_free_srb(hba, srb);
454 		xpt_done(ccb);
455 		break;
456 	}
457 }
458 
459 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
460 {
461 	u_int32_t req, temp;
462 
463 	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
464 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
465 			hptiop_request_callback_itl(hba, req);
466 		else {
467 			temp = bus_space_read_4(hba->bar0t,
468 					hba->bar0h,req +
469 					offsetof(struct hpt_iop_request_header,
470 						flags));
471 			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
472 				u_int64_t temp64;
473 				bus_space_read_region_4(hba->bar0t,
474 					hba->bar0h,req +
475 					offsetof(struct hpt_iop_request_header,
476 						context),
477 					(u_int32_t *)&temp64, 2);
478 				if (temp64) {
479 					hptiop_request_callback_itl(hba, req);
480 				} else {
481 					temp64 = 1;
482 					bus_space_write_region_4(hba->bar0t,
483 						hba->bar0h,req +
484 						offsetof(struct hpt_iop_request_header,
485 							context),
486 						(u_int32_t *)&temp64, 2);
487 				}
488 			} else
489 				hptiop_request_callback_itl(hba, req);
490 		}
491 	}
492 }
493 
494 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
495 {
496 	u_int32_t status;
497 	int ret = 0;
498 
499 	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
500 
501 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
502 		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
503 		KdPrint(("hptiop: received outbound msg %x\n", msg));
504 		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
505 		hptiop_os_message_callback(hba, msg);
506 		ret = 1;
507 	}
508 
509 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
510 		hptiop_drain_outbound_queue_itl(hba);
511 		ret = 1;
512 	}
513 
514 	return ret;
515 }
516 
517 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
518 							u_int64_t _tag)
519 {
520 	u_int32_t context = (u_int32_t)_tag;
521 
522 	if (context & MVIOP_CMD_TYPE_SCSI) {
523 		struct hpt_iop_srb *srb;
524 		struct hpt_iop_request_scsi_command *req;
525 		union ccb *ccb;
526 		u_int8_t *cdb;
527 
528 		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
529 		req = (struct hpt_iop_request_scsi_command *)srb;
530 		ccb = (union ccb *)srb->ccb;
531 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
532 			cdb = ccb->csio.cdb_io.cdb_ptr;
533 		else
534 			cdb = ccb->csio.cdb_io.cdb_bytes;
535 
536 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
537 			ccb->ccb_h.status = CAM_REQ_CMP;
538 			goto scsi_done;
539 		}
540 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
541 			req->header.result = IOP_RESULT_SUCCESS;
542 
543 		switch (req->header.result) {
544 		case IOP_RESULT_SUCCESS:
545 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
546 			case CAM_DIR_IN:
547 				bus_dmamap_sync(hba->io_dmat,
548 					srb->dma_map, BUS_DMASYNC_POSTREAD);
549 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
550 				break;
551 			case CAM_DIR_OUT:
552 				bus_dmamap_sync(hba->io_dmat,
553 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
554 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
555 				break;
556 			}
557 			ccb->ccb_h.status = CAM_REQ_CMP;
558 			break;
559 		case IOP_RESULT_BAD_TARGET:
560 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
561 			break;
562 		case IOP_RESULT_BUSY:
563 			ccb->ccb_h.status = CAM_BUSY;
564 			break;
565 		case IOP_RESULT_INVALID_REQUEST:
566 			ccb->ccb_h.status = CAM_REQ_INVALID;
567 			break;
568 		case IOP_RESULT_FAIL:
569 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
570 			break;
571 		case IOP_RESULT_RESET:
572 			ccb->ccb_h.status = CAM_BUSY;
573 			break;
574 		case IOP_RESULT_CHECK_CONDITION:
575 			memset(&ccb->csio.sense_data, 0,
576 			    sizeof(ccb->csio.sense_data));
577 			if (req->dataxfer_length < ccb->csio.sense_len)
578 				ccb->csio.sense_resid = ccb->csio.sense_len -
579 				    req->dataxfer_length;
580 			else
581 				ccb->csio.sense_resid = 0;
582 			memcpy(&ccb->csio.sense_data, &req->sg_list,
583 				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
584 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
585 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
586 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
587 			break;
588 		default:
589 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
590 			break;
591 		}
592 scsi_done:
593 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
594 
595 		hptiop_free_srb(hba, srb);
596 		xpt_done(ccb);
597 	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
598 		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
599 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
600 			hba->config_done = 1;
601 		else
602 			hba->config_done = -1;
603 		wakeup(req);
604 	} else if (context &
605 			(MVIOP_CMD_TYPE_SET_CONFIG |
606 				MVIOP_CMD_TYPE_GET_CONFIG))
607 		hba->config_done = 1;
608 	else {
609 		device_printf(hba->pcidev, "wrong callback type\n");
610 	}
611 }
612 
613 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
614 				u_int32_t _tag)
615 {
616 	u_int32_t req_type = _tag & 0xf;
617 
618 	struct hpt_iop_srb *srb;
619 	struct hpt_iop_request_scsi_command *req;
620 	union ccb *ccb;
621 	u_int8_t *cdb;
622 
623 	switch (req_type) {
624 	case IOP_REQUEST_TYPE_GET_CONFIG:
625 	case IOP_REQUEST_TYPE_SET_CONFIG:
626 		hba->config_done = 1;
627 		break;
628 
629 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
630 		srb = hba->srb[(_tag >> 4) & 0xff];
631 		req = (struct hpt_iop_request_scsi_command *)srb;
632 
633 		ccb = (union ccb *)srb->ccb;
634 
635 		callout_stop(&srb->timeout);
636 
637 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
638 			cdb = ccb->csio.cdb_io.cdb_ptr;
639 		else
640 			cdb = ccb->csio.cdb_io.cdb_bytes;
641 
642 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
643 			ccb->ccb_h.status = CAM_REQ_CMP;
644 			goto scsi_done;
645 		}
646 
647 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
648 			req->header.result = IOP_RESULT_SUCCESS;
649 
650 		switch (req->header.result) {
651 		case IOP_RESULT_SUCCESS:
652 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
653 			case CAM_DIR_IN:
654 				bus_dmamap_sync(hba->io_dmat,
655 						srb->dma_map, BUS_DMASYNC_POSTREAD);
656 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
657 				break;
658 			case CAM_DIR_OUT:
659 				bus_dmamap_sync(hba->io_dmat,
660 						srb->dma_map, BUS_DMASYNC_POSTWRITE);
661 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
662 				break;
663 			}
664 			ccb->ccb_h.status = CAM_REQ_CMP;
665 			break;
666 		case IOP_RESULT_BAD_TARGET:
667 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
668 			break;
669 		case IOP_RESULT_BUSY:
670 			ccb->ccb_h.status = CAM_BUSY;
671 			break;
672 		case IOP_RESULT_INVALID_REQUEST:
673 			ccb->ccb_h.status = CAM_REQ_INVALID;
674 			break;
675 		case IOP_RESULT_FAIL:
676 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
677 			break;
678 		case IOP_RESULT_RESET:
679 			ccb->ccb_h.status = CAM_BUSY;
680 			break;
681 		case IOP_RESULT_CHECK_CONDITION:
682 			memset(&ccb->csio.sense_data, 0,
683 			       sizeof(ccb->csio.sense_data));
684 			if (req->dataxfer_length < ccb->csio.sense_len)
685 				ccb->csio.sense_resid = ccb->csio.sense_len -
686 				req->dataxfer_length;
687 			else
688 				ccb->csio.sense_resid = 0;
689 			memcpy(&ccb->csio.sense_data, &req->sg_list,
690 			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
691 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
692 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
693 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
694 			break;
695 		default:
696 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
697 			break;
698 		}
699 scsi_done:
700 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
701 
702 		hptiop_free_srb(hba, srb);
703 		xpt_done(ccb);
704 		break;
705 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
706 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
707 			hba->config_done = 1;
708 		else
709 			hba->config_done = -1;
710 		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
711 		break;
712 	default:
713 		device_printf(hba->pcidev, "wrong callback type\n");
714 		break;
715 	}
716 }
717 
718 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
719 {
720 	u_int64_t req;
721 
722 	while ((req = hptiop_mv_outbound_read(hba))) {
723 		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
724 			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
725 				hptiop_request_callback_mv(hba, req);
726 			}
727 	    	}
728 	}
729 }
730 
731 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
732 {
733 	u_int32_t status;
734 	int ret = 0;
735 
736 	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
737 
738 	if (status)
739 		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
740 
741 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
742 		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
743 		KdPrint(("hptiop: received outbound msg %x\n", msg));
744 		hptiop_os_message_callback(hba, msg);
745 		ret = 1;
746 	}
747 
748 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
749 		hptiop_drain_outbound_queue_mv(hba);
750 		ret = 1;
751 	}
752 
753 	return ret;
754 }
755 
756 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
757 {
758 	u_int32_t status, _tag, cptr;
759 	int ret = 0;
760 
761 	if (hba->initialized) {
762 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
763 	}
764 
765 	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
766 	if (status) {
767 		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
768 		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
769 			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
770 			hptiop_os_message_callback(hba, msg);
771 		}
772 		ret = 1;
773 	}
774 
775 	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
776 	if (status) {
777 		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
778 		do {
779 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
780 			while (hba->u.mvfrey.outlist_rptr != cptr) {
781 				hba->u.mvfrey.outlist_rptr++;
782 				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
783 					hba->u.mvfrey.outlist_rptr = 0;
784 				}
785 
786 				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
787 				hptiop_request_callback_mvfrey(hba, _tag);
788 				ret = 2;
789 			}
790 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
791 	}
792 
793 	if (hba->initialized) {
794 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
795 	}
796 
797 	return ret;
798 }
799 
800 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
801 					u_int32_t req32, u_int32_t millisec)
802 {
803 	u_int32_t i;
804 	u_int64_t temp64;
805 
806 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
807 	BUS_SPACE_RD4_ITL(outbound_intstatus);
808 
809 	for (i = 0; i < millisec; i++) {
810 		hptiop_intr_itl(hba);
811 		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
812 			offsetof(struct hpt_iop_request_header, context),
813 			(u_int32_t *)&temp64, 2);
814 		if (temp64)
815 			return 0;
816 		DELAY(1000);
817 	}
818 
819 	return -1;
820 }
821 
822 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
823 					void *req, u_int32_t millisec)
824 {
825 	u_int32_t i;
826 	u_int64_t phy_addr;
827 	hba->config_done = 0;
828 
829 	phy_addr = hba->ctlcfgcmd_phy |
830 			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
831 	((struct hpt_iop_request_get_config *)req)->header.flags |=
832 		IOP_REQUEST_FLAG_SYNC_REQUEST |
833 		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
834 	hptiop_mv_inbound_write(phy_addr, hba);
835 	BUS_SPACE_RD4_MV0(outbound_intmask);
836 
837 	for (i = 0; i < millisec; i++) {
838 		hptiop_intr_mv(hba);
839 		if (hba->config_done)
840 			return 0;
841 		DELAY(1000);
842 	}
843 	return -1;
844 }
845 
846 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
847 					void *req, u_int32_t millisec)
848 {
849 	u_int32_t i, index;
850 	u_int64_t phy_addr;
851 	struct hpt_iop_request_header *reqhdr =
852 										(struct hpt_iop_request_header *)req;
853 
854 	hba->config_done = 0;
855 
856 	phy_addr = hba->ctlcfgcmd_phy;
857 	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
858 					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
859 					| IOP_REQUEST_FLAG_ADDR_BITS
860 					| ((phy_addr >> 16) & 0xffff0000);
861 	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
862 					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
863 
864 	hba->u.mvfrey.inlist_wptr++;
865 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
866 
867 	if (index == hba->u.mvfrey.list_count) {
868 		index = 0;
869 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
870 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
871 	}
872 
873 	hba->u.mvfrey.inlist[index].addr = phy_addr;
874 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
875 
876 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
877 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
878 
879 	for (i = 0; i < millisec; i++) {
880 		hptiop_intr_mvfrey(hba);
881 		if (hba->config_done)
882 			return 0;
883 		DELAY(1000);
884 	}
885 	return -1;
886 }
887 
888 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
889 					u_int32_t msg, u_int32_t millisec)
890 {
891 	u_int32_t i;
892 
893 	hba->msg_done = 0;
894 	hba->ops->post_msg(hba, msg);
895 
896 	for (i=0; i<millisec; i++) {
897 		hba->ops->iop_intr(hba);
898 		if (hba->msg_done)
899 			break;
900 		DELAY(1000);
901 	}
902 
903 	return hba->msg_done? 0 : -1;
904 }
905 
906 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
907 				struct hpt_iop_request_get_config * config)
908 {
909 	u_int32_t req32;
910 
911 	config->header.size = sizeof(struct hpt_iop_request_get_config);
912 	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
913 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
914 	config->header.result = IOP_RESULT_PENDING;
915 	config->header.context = 0;
916 
917 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
918 	if (req32 == IOPMU_QUEUE_EMPTY)
919 		return -1;
920 
921 	bus_space_write_region_4(hba->bar0t, hba->bar0h,
922 			req32, (u_int32_t *)config,
923 			sizeof(struct hpt_iop_request_header) >> 2);
924 
925 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
926 		KdPrint(("hptiop: get config send cmd failed"));
927 		return -1;
928 	}
929 
930 	bus_space_read_region_4(hba->bar0t, hba->bar0h,
931 			req32, (u_int32_t *)config,
932 			sizeof(struct hpt_iop_request_get_config) >> 2);
933 
934 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
935 
936 	return 0;
937 }
938 
939 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
940 				struct hpt_iop_request_get_config * config)
941 {
942 	struct hpt_iop_request_get_config *req;
943 
944 	if (!(req = hba->ctlcfg_ptr))
945 		return -1;
946 
947 	req->header.flags = 0;
948 	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
949 	req->header.size = sizeof(struct hpt_iop_request_get_config);
950 	req->header.result = IOP_RESULT_PENDING;
951 	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
952 
953 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
954 		KdPrint(("hptiop: get config send cmd failed"));
955 		return -1;
956 	}
957 
958 	*config = *req;
959 	return 0;
960 }
961 
962 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
963 				struct hpt_iop_request_get_config * config)
964 {
965 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
966 
967 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
968 	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
969 		KdPrint(("hptiop: header size %x/%x type %x/%x",
970 			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
971 			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
972 		return -1;
973 	}
974 
975 	config->interface_version = info->interface_version;
976 	config->firmware_version = info->firmware_version;
977 	config->max_requests = info->max_requests;
978 	config->request_size = info->request_size;
979 	config->max_sg_count = info->max_sg_count;
980 	config->data_transfer_length = info->data_transfer_length;
981 	config->alignment_mask = info->alignment_mask;
982 	config->max_devices = info->max_devices;
983 	config->sdram_size = info->sdram_size;
984 
985 	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
986 		 config->max_requests, config->request_size,
987 		 config->data_transfer_length, config->max_devices,
988 		 config->sdram_size));
989 
990 	return 0;
991 }
992 
993 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
994 				struct hpt_iop_request_set_config *config)
995 {
996 	u_int32_t req32;
997 
998 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
999 
1000 	if (req32 == IOPMU_QUEUE_EMPTY)
1001 		return -1;
1002 
1003 	config->header.size = sizeof(struct hpt_iop_request_set_config);
1004 	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1005 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1006 	config->header.result = IOP_RESULT_PENDING;
1007 	config->header.context = 0;
1008 
1009 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1010 		(u_int32_t *)config,
1011 		sizeof(struct hpt_iop_request_set_config) >> 2);
1012 
1013 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1014 		KdPrint(("hptiop: set config send cmd failed"));
1015 		return -1;
1016 	}
1017 
1018 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1019 
1020 	return 0;
1021 }
1022 
1023 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1024 				struct hpt_iop_request_set_config *config)
1025 {
1026 	struct hpt_iop_request_set_config *req;
1027 
1028 	if (!(req = hba->ctlcfg_ptr))
1029 		return -1;
1030 
1031 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1032 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1033 		sizeof(struct hpt_iop_request_set_config) -
1034 			sizeof(struct hpt_iop_request_header));
1035 
1036 	req->header.flags = 0;
1037 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1038 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1039 	req->header.result = IOP_RESULT_PENDING;
1040 	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1041 
1042 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1043 		KdPrint(("hptiop: set config send cmd failed"));
1044 		return -1;
1045 	}
1046 
1047 	return 0;
1048 }
1049 
1050 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1051 				struct hpt_iop_request_set_config *config)
1052 {
1053 	struct hpt_iop_request_set_config *req;
1054 
1055 	if (!(req = hba->ctlcfg_ptr))
1056 		return -1;
1057 
1058 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1059 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1060 		sizeof(struct hpt_iop_request_set_config) -
1061 			sizeof(struct hpt_iop_request_header));
1062 
1063 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1064 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1065 	req->header.result = IOP_RESULT_PENDING;
1066 
1067 	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1068 		KdPrint(("hptiop: set config send cmd failed"));
1069 		return -1;
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1076 				u_int32_t req32,
1077 				struct hpt_iop_ioctl_param *pParams)
1078 {
1079 	u_int64_t temp64;
1080 	struct hpt_iop_request_ioctl_command req;
1081 
1082 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1083 			(hba->max_request_size -
1084 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1085 		device_printf(hba->pcidev, "request size beyond max value");
1086 		return -1;
1087 	}
1088 
1089 	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1090 		+ pParams->nInBufferSize;
1091 	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1092 	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1093 	req.header.result = IOP_RESULT_PENDING;
1094 	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1095 	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1096 	req.inbuf_size = pParams->nInBufferSize;
1097 	req.outbuf_size = pParams->nOutBufferSize;
1098 	req.bytes_returned = 0;
1099 
1100 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1101 		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1102 
1103 	hptiop_lock_adapter(hba);
1104 
1105 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1106 	BUS_SPACE_RD4_ITL(outbound_intstatus);
1107 
1108 	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1109 		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1110 		(u_int32_t *)&temp64, 2);
1111 	while (temp64) {
1112 		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1113 				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1114 			break;
1115 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1116 		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1117 			offsetof(struct hpt_iop_request_ioctl_command,
1118 				header.context),
1119 			(u_int32_t *)&temp64, 2);
1120 	}
1121 
1122 	hptiop_unlock_adapter(hba);
1123 	return 0;
1124 }
1125 
1126 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1127 									void *user, int size)
1128 {
1129 	unsigned char byte;
1130 	int i;
1131 
1132 	for (i=0; i<size; i++) {
1133 		if (copyin((u_int8_t *)user + i, &byte, 1))
1134 			return -1;
1135 		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1136 	}
1137 
1138 	return 0;
1139 }
1140 
1141 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1142 									void *user, int size)
1143 {
1144 	unsigned char byte;
1145 	int i;
1146 
1147 	for (i=0; i<size; i++) {
1148 		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1149 		if (copyout(&byte, (u_int8_t *)user + i, 1))
1150 			return -1;
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1157 				struct hpt_iop_ioctl_param * pParams)
1158 {
1159 	u_int32_t req32;
1160 	u_int32_t result;
1161 
1162 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1163 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1164 		return EFAULT;
1165 
1166 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1167 	if (req32 == IOPMU_QUEUE_EMPTY)
1168 		return EFAULT;
1169 
1170 	if (pParams->nInBufferSize)
1171 		if (hptiop_bus_space_copyin(hba, req32 +
1172 			offsetof(struct hpt_iop_request_ioctl_command, buf),
1173 			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
1174 			goto invalid;
1175 
1176 	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1177 		goto invalid;
1178 
1179 	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1180 			offsetof(struct hpt_iop_request_ioctl_command,
1181 				header.result));
1182 
1183 	if (result == IOP_RESULT_SUCCESS) {
1184 		if (pParams->nOutBufferSize)
1185 			if (hptiop_bus_space_copyout(hba, req32 +
1186 				offsetof(struct hpt_iop_request_ioctl_command, buf) +
1187 					((pParams->nInBufferSize + 3) & ~3),
1188 				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1189 				goto invalid;
1190 
1191 		if (pParams->lpBytesReturned) {
1192 			if (hptiop_bus_space_copyout(hba, req32 +
1193 				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1194 				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1195 				goto invalid;
1196 		}
1197 
1198 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1199 
1200 		return 0;
1201 	} else{
1202 invalid:
1203 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1204 
1205 		return EFAULT;
1206 	}
1207 }
1208 
1209 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1210 				struct hpt_iop_request_ioctl_command *req,
1211 				struct hpt_iop_ioctl_param *pParams)
1212 {
1213 	u_int64_t req_phy;
1214 	int size = 0;
1215 
1216 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1217 			(hba->max_request_size -
1218 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1219 		device_printf(hba->pcidev, "request size beyond max value");
1220 		return -1;
1221 	}
1222 
1223 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1224 	req->inbuf_size = pParams->nInBufferSize;
1225 	req->outbuf_size = pParams->nOutBufferSize;
1226 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1227 					+ pParams->nInBufferSize;
1228 	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1229 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1230 	req->header.result = IOP_RESULT_PENDING;
1231 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1232 	size = req->header.size >> 8;
1233 	size = imin(3, size);
1234 	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1235 	hptiop_mv_inbound_write(req_phy, hba);
1236 
1237 	BUS_SPACE_RD4_MV0(outbound_intmask);
1238 
1239 	while (hba->config_done == 0) {
1240 		if (hptiop_sleep(hba, req, PPAUSE,
1241 			"hptctl", HPT_OSM_TIMEOUT)==0)
1242 			continue;
1243 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1244 	}
1245 	return 0;
1246 }
1247 
1248 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1249 				struct hpt_iop_ioctl_param *pParams)
1250 {
1251 	struct hpt_iop_request_ioctl_command *req;
1252 
1253 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1254 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1255 		return EFAULT;
1256 
1257 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1258 	hba->config_done = 0;
1259 	hptiop_lock_adapter(hba);
1260 	if (pParams->nInBufferSize)
1261 		if (copyin((void *)pParams->lpInBuffer,
1262 				req->buf, pParams->nInBufferSize))
1263 			goto invalid;
1264 	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1265 		goto invalid;
1266 
1267 	if (hba->config_done == 1) {
1268 		if (pParams->nOutBufferSize)
1269 			if (copyout(req->buf +
1270 				((pParams->nInBufferSize + 3) & ~3),
1271 				(void *)pParams->lpOutBuffer,
1272 				pParams->nOutBufferSize))
1273 				goto invalid;
1274 
1275 		if (pParams->lpBytesReturned)
1276 			if (copyout(&req->bytes_returned,
1277 				(void*)pParams->lpBytesReturned,
1278 				sizeof(u_int32_t)))
1279 				goto invalid;
1280 		hptiop_unlock_adapter(hba);
1281 		return 0;
1282 	} else{
1283 invalid:
1284 		hptiop_unlock_adapter(hba);
1285 		return EFAULT;
1286 	}
1287 }
1288 
1289 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1290 				struct hpt_iop_request_ioctl_command *req,
1291 				struct hpt_iop_ioctl_param *pParams)
1292 {
1293 	u_int64_t phy_addr;
1294 	u_int32_t index;
1295 
1296 	phy_addr = hba->ctlcfgcmd_phy;
1297 
1298 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1299 			(hba->max_request_size -
1300 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1301 		device_printf(hba->pcidev, "request size beyond max value");
1302 		return -1;
1303 	}
1304 
1305 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1306 	req->inbuf_size = pParams->nInBufferSize;
1307 	req->outbuf_size = pParams->nOutBufferSize;
1308 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1309 					+ pParams->nInBufferSize;
1310 
1311 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1312 	req->header.result = IOP_RESULT_PENDING;
1313 
1314 	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1315 						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1316 						| IOP_REQUEST_FLAG_ADDR_BITS
1317 						| ((phy_addr >> 16) & 0xffff0000);
1318 	req->header.context = ((phy_addr & 0xffffffff) << 32 )
1319 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1320 
1321 	hba->u.mvfrey.inlist_wptr++;
1322 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1323 
1324 	if (index == hba->u.mvfrey.list_count) {
1325 		index = 0;
1326 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1327 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1328 	}
1329 
1330 	hba->u.mvfrey.inlist[index].addr = phy_addr;
1331 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1332 
1333 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1334 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1335 
1336 	while (hba->config_done == 0) {
1337 		if (hptiop_sleep(hba, req, PPAUSE,
1338 			"hptctl", HPT_OSM_TIMEOUT)==0)
1339 			continue;
1340 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1341 	}
1342 	return 0;
1343 }
1344 
1345 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1346 				struct hpt_iop_ioctl_param *pParams)
1347 {
1348 	struct hpt_iop_request_ioctl_command *req;
1349 
1350 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1351 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1352 		return EFAULT;
1353 
1354 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1355 	hba->config_done = 0;
1356 	hptiop_lock_adapter(hba);
1357 	if (pParams->nInBufferSize)
1358 		if (copyin((void *)pParams->lpInBuffer,
1359 				req->buf, pParams->nInBufferSize))
1360 			goto invalid;
1361 	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1362 		goto invalid;
1363 
1364 	if (hba->config_done == 1) {
1365 		if (pParams->nOutBufferSize)
1366 			if (copyout(req->buf +
1367 				((pParams->nInBufferSize + 3) & ~3),
1368 				(void *)pParams->lpOutBuffer,
1369 				pParams->nOutBufferSize))
1370 				goto invalid;
1371 
1372 		if (pParams->lpBytesReturned)
1373 			if (copyout(&req->bytes_returned,
1374 				(void*)pParams->lpBytesReturned,
1375 				sizeof(u_int32_t)))
1376 				goto invalid;
1377 		hptiop_unlock_adapter(hba);
1378 		return 0;
1379 	} else{
1380 invalid:
1381 		hptiop_unlock_adapter(hba);
1382 		return EFAULT;
1383 	}
1384 }
1385 
1386 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1387 {
1388 	union ccb           *ccb;
1389 
1390 	if ((ccb = xpt_alloc_ccb()) == NULL)
1391 		return(ENOMEM);
1392 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1393 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1394 		xpt_free_ccb(ccb);
1395 		return(EIO);
1396 	}
1397 	xpt_rescan(ccb);
1398 	return(0);
1399 }
1400 
1401 static  bus_dmamap_callback_t   hptiop_map_srb;
1402 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1403 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1404 static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1405 
1406 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1407 {
1408 	hba->bar0_rid = 0x10;
1409 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1410 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1411 
1412 	if (hba->bar0_res == NULL) {
1413 		device_printf(hba->pcidev,
1414 			"failed to get iop base adrress.\n");
1415 		return -1;
1416 	}
1417 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1418 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1419 	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1420 				rman_get_virtual(hba->bar0_res);
1421 
1422 	if (!hba->u.itl.mu) {
1423 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1424 					hba->bar0_rid, hba->bar0_res);
1425 		device_printf(hba->pcidev, "alloc mem res failed\n");
1426 		return -1;
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1433 {
1434 	hba->bar0_rid = 0x10;
1435 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1436 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1437 
1438 	if (hba->bar0_res == NULL) {
1439 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1440 		return -1;
1441 	}
1442 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1443 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1444 	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1445 				rman_get_virtual(hba->bar0_res);
1446 
1447 	if (!hba->u.mv.regs) {
1448 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1449 					hba->bar0_rid, hba->bar0_res);
1450 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1451 		return -1;
1452 	}
1453 
1454 	hba->bar2_rid = 0x18;
1455 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1456 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1457 
1458 	if (hba->bar2_res == NULL) {
1459 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1460 					hba->bar0_rid, hba->bar0_res);
1461 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1462 		return -1;
1463 	}
1464 
1465 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1466 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1467 	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1468 
1469 	if (!hba->u.mv.mu) {
1470 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1471 					hba->bar0_rid, hba->bar0_res);
1472 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1473 					hba->bar2_rid, hba->bar2_res);
1474 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1475 		return -1;
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1482 {
1483 	hba->bar0_rid = 0x10;
1484 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1485 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1486 
1487 	if (hba->bar0_res == NULL) {
1488 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1489 		return -1;
1490 	}
1491 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1492 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1493 	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1494 				rman_get_virtual(hba->bar0_res);
1495 
1496 	if (!hba->u.mvfrey.config) {
1497 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1498 					hba->bar0_rid, hba->bar0_res);
1499 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1500 		return -1;
1501 	}
1502 
1503 	hba->bar2_rid = 0x18;
1504 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1505 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1506 
1507 	if (hba->bar2_res == NULL) {
1508 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1509 					hba->bar0_rid, hba->bar0_res);
1510 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1511 		return -1;
1512 	}
1513 
1514 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1515 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1516 	hba->u.mvfrey.mu =
1517 					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1518 
1519 	if (!hba->u.mvfrey.mu) {
1520 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1521 					hba->bar0_rid, hba->bar0_res);
1522 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1523 					hba->bar2_rid, hba->bar2_res);
1524 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1525 		return -1;
1526 	}
1527 
1528 	return 0;
1529 }
1530 
1531 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1532 {
1533 	if (hba->bar0_res)
1534 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1535 			hba->bar0_rid, hba->bar0_res);
1536 }
1537 
1538 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1539 {
1540 	if (hba->bar0_res)
1541 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1542 			hba->bar0_rid, hba->bar0_res);
1543 	if (hba->bar2_res)
1544 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1545 			hba->bar2_rid, hba->bar2_res);
1546 }
1547 
1548 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1549 {
1550 	if (hba->bar0_res)
1551 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1552 			hba->bar0_rid, hba->bar0_res);
1553 	if (hba->bar2_res)
1554 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1555 			hba->bar2_rid, hba->bar2_res);
1556 }
1557 
1558 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1559 {
1560 	if (bus_dma_tag_create(hba->parent_dmat,
1561 				1,
1562 				0,
1563 				BUS_SPACE_MAXADDR_32BIT,
1564 				BUS_SPACE_MAXADDR,
1565 				NULL, NULL,
1566 				0x800 - 0x8,
1567 				1,
1568 				BUS_SPACE_MAXSIZE_32BIT,
1569 				BUS_DMA_ALLOCNOW,
1570 				NULL,
1571 				NULL,
1572 				&hba->ctlcfg_dmat)) {
1573 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1574 		return -1;
1575 	}
1576 
1577 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1578 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1579 		&hba->ctlcfg_dmamap) != 0) {
1580 			device_printf(hba->pcidev,
1581 					"bus_dmamem_alloc failed!\n");
1582 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1583 			return -1;
1584 	}
1585 
1586 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1587 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1588 			MVIOP_IOCTLCFG_SIZE,
1589 			hptiop_mv_map_ctlcfg, hba, 0)) {
1590 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1591 		if (hba->ctlcfg_dmat) {
1592 			bus_dmamem_free(hba->ctlcfg_dmat,
1593 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1594 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1595 		}
1596 		return -1;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1603 {
1604 	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1605 
1606 	list_count >>= 16;
1607 
1608 	if (list_count == 0) {
1609 		return -1;
1610 	}
1611 
1612 	hba->u.mvfrey.list_count = list_count;
1613 	hba->u.mvfrey.internal_mem_size = 0x800
1614 							+ list_count * sizeof(struct mvfrey_inlist_entry)
1615 							+ list_count * sizeof(struct mvfrey_outlist_entry)
1616 							+ sizeof(int);
1617 	if (bus_dma_tag_create(hba->parent_dmat,
1618 				1,
1619 				0,
1620 				BUS_SPACE_MAXADDR_32BIT,
1621 				BUS_SPACE_MAXADDR,
1622 				NULL, NULL,
1623 				hba->u.mvfrey.internal_mem_size,
1624 				1,
1625 				BUS_SPACE_MAXSIZE_32BIT,
1626 				BUS_DMA_ALLOCNOW,
1627 				NULL,
1628 				NULL,
1629 				&hba->ctlcfg_dmat)) {
1630 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1631 		return -1;
1632 	}
1633 
1634 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1635 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1636 		&hba->ctlcfg_dmamap) != 0) {
1637 			device_printf(hba->pcidev,
1638 					"bus_dmamem_alloc failed!\n");
1639 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1640 			return -1;
1641 	}
1642 
1643 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1644 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1645 			hba->u.mvfrey.internal_mem_size,
1646 			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1647 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1648 		if (hba->ctlcfg_dmat) {
1649 			bus_dmamem_free(hba->ctlcfg_dmat,
1650 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1651 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1652 		}
1653 		return -1;
1654 	}
1655 
1656 	return 0;
1657 }
1658 
1659 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1660 	return 0;
1661 }
1662 
1663 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1664 {
1665 	if (hba->ctlcfg_dmat) {
1666 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1667 		bus_dmamem_free(hba->ctlcfg_dmat,
1668 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1669 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1676 {
1677 	if (hba->ctlcfg_dmat) {
1678 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1679 		bus_dmamem_free(hba->ctlcfg_dmat,
1680 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1681 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1688 {
1689 	u_int32_t i = 100;
1690 
1691 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1692 		return -1;
1693 
1694 	/* wait 100ms for MCU ready */
1695 	while(i--) {
1696 		DELAY(1000);
1697 	}
1698 
1699 	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1700 							hba->u.mvfrey.inlist_phy & 0xffffffff);
1701 	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1702 							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1703 
1704 	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1705 							hba->u.mvfrey.outlist_phy & 0xffffffff);
1706 	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1707 							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1708 
1709 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1710 							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1711 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1712 							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1713 
1714 	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1715 								| CL_POINTER_TOGGLE;
1716 	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1717 								| CL_POINTER_TOGGLE;
1718 	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1719 
1720 	return 0;
1721 }
1722 
1723 /*
1724  * CAM driver interface
1725  */
1726 static device_method_t driver_methods[] = {
1727 	/* Device interface */
1728 	DEVMETHOD(device_probe,     hptiop_probe),
1729 	DEVMETHOD(device_attach,    hptiop_attach),
1730 	DEVMETHOD(device_detach,    hptiop_detach),
1731 	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1732 	{ 0, 0 }
1733 };
1734 
1735 static struct hptiop_adapter_ops hptiop_itl_ops = {
1736 	.family	           = INTEL_BASED_IOP,
1737 	.iop_wait_ready    = hptiop_wait_ready_itl,
1738 	.internal_memalloc = 0,
1739 	.internal_memfree  = hptiop_internal_memfree_itl,
1740 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1741 	.release_pci_res   = hptiop_release_pci_res_itl,
1742 	.enable_intr       = hptiop_enable_intr_itl,
1743 	.disable_intr      = hptiop_disable_intr_itl,
1744 	.get_config        = hptiop_get_config_itl,
1745 	.set_config        = hptiop_set_config_itl,
1746 	.iop_intr          = hptiop_intr_itl,
1747 	.post_msg          = hptiop_post_msg_itl,
1748 	.post_req          = hptiop_post_req_itl,
1749 	.do_ioctl          = hptiop_do_ioctl_itl,
1750 	.reset_comm        = 0,
1751 };
1752 
1753 static struct hptiop_adapter_ops hptiop_mv_ops = {
1754 	.family	           = MV_BASED_IOP,
1755 	.iop_wait_ready    = hptiop_wait_ready_mv,
1756 	.internal_memalloc = hptiop_internal_memalloc_mv,
1757 	.internal_memfree  = hptiop_internal_memfree_mv,
1758 	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1759 	.release_pci_res   = hptiop_release_pci_res_mv,
1760 	.enable_intr       = hptiop_enable_intr_mv,
1761 	.disable_intr      = hptiop_disable_intr_mv,
1762 	.get_config        = hptiop_get_config_mv,
1763 	.set_config        = hptiop_set_config_mv,
1764 	.iop_intr          = hptiop_intr_mv,
1765 	.post_msg          = hptiop_post_msg_mv,
1766 	.post_req          = hptiop_post_req_mv,
1767 	.do_ioctl          = hptiop_do_ioctl_mv,
1768 	.reset_comm        = 0,
1769 };
1770 
1771 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1772 	.family	           = MVFREY_BASED_IOP,
1773 	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1774 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1775 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1776 	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1777 	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1778 	.enable_intr       = hptiop_enable_intr_mvfrey,
1779 	.disable_intr      = hptiop_disable_intr_mvfrey,
1780 	.get_config        = hptiop_get_config_mvfrey,
1781 	.set_config        = hptiop_set_config_mvfrey,
1782 	.iop_intr          = hptiop_intr_mvfrey,
1783 	.post_msg          = hptiop_post_msg_mvfrey,
1784 	.post_req          = hptiop_post_req_mvfrey,
1785 	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1786 	.reset_comm        = hptiop_reset_comm_mvfrey,
1787 };
1788 
1789 static driver_t hptiop_pci_driver = {
1790 	driver_name,
1791 	driver_methods,
1792 	sizeof(struct hpt_iop_hba)
1793 };
1794 
1795 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, 0, 0);
1796 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1797 
1798 static int hptiop_probe(device_t dev)
1799 {
1800 	struct hpt_iop_hba *hba;
1801 	u_int32_t id;
1802 	static char buf[256];
1803 	int sas = 0;
1804 	struct hptiop_adapter_ops *ops;
1805 
1806 	if (pci_get_vendor(dev) != 0x1103)
1807 		return (ENXIO);
1808 
1809 	id = pci_get_device(dev);
1810 
1811 	switch (id) {
1812 		case 0x4520:
1813 		case 0x4521:
1814 		case 0x4522:
1815 			sas = 1;
1816 		case 0x3620:
1817 		case 0x3622:
1818 		case 0x3640:
1819 			ops = &hptiop_mvfrey_ops;
1820 			break;
1821 		case 0x4210:
1822 		case 0x4211:
1823 		case 0x4310:
1824 		case 0x4311:
1825 		case 0x4320:
1826 		case 0x4321:
1827  		case 0x4322:
1828 			sas = 1;
1829 		case 0x3220:
1830 		case 0x3320:
1831 		case 0x3410:
1832 		case 0x3520:
1833 		case 0x3510:
1834 		case 0x3511:
1835 		case 0x3521:
1836 		case 0x3522:
1837 		case 0x3530:
1838 		case 0x3540:
1839 		case 0x3560:
1840 			ops = &hptiop_itl_ops;
1841 			break;
1842 		case 0x3020:
1843 		case 0x3120:
1844 		case 0x3122:
1845 			ops = &hptiop_mv_ops;
1846 			break;
1847 		default:
1848 			return (ENXIO);
1849 	}
1850 
1851 	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1852 		pci_get_bus(dev), pci_get_slot(dev),
1853 		pci_get_function(dev), pci_get_irq(dev));
1854 
1855 	sprintf(buf, "RocketRAID %x %s Controller\n",
1856 				id, sas ? "SAS" : "SATA");
1857 	device_set_desc_copy(dev, buf);
1858 
1859 	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1860 	bzero(hba, sizeof(struct hpt_iop_hba));
1861 	hba->ops = ops;
1862 
1863 	KdPrint(("hba->ops=%p\n", hba->ops));
1864 	return 0;
1865 }
1866 
1867 static int hptiop_attach(device_t dev)
1868 {
1869 	struct make_dev_args args;
1870 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1871 	struct hpt_iop_request_get_config  iop_config;
1872 	struct hpt_iop_request_set_config  set_config;
1873 	int rid = 0;
1874 	struct cam_devq *devq;
1875 	struct ccb_setasync ccb;
1876 	u_int32_t unit = device_get_unit(dev);
1877 
1878 	device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1879 			unit, driver_version);
1880 
1881 	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1882 		pci_get_bus(dev), pci_get_slot(dev),
1883 		pci_get_function(dev), hba->ops));
1884 
1885 	pci_enable_busmaster(dev);
1886 	hba->pcidev = dev;
1887 	hba->pciunit = unit;
1888 
1889 	if (hba->ops->alloc_pci_res(hba))
1890 		return ENXIO;
1891 
1892 	if (hba->ops->iop_wait_ready(hba, 2000)) {
1893 		device_printf(dev, "adapter is not ready\n");
1894 		goto release_pci_res;
1895 	}
1896 
1897 	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1898 
1899 	if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1900 			1,  /* alignment */
1901 			0, /* boundary */
1902 			BUS_SPACE_MAXADDR,  /* lowaddr */
1903 			BUS_SPACE_MAXADDR,  /* highaddr */
1904 			NULL, NULL,         /* filter, filterarg */
1905 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1906 			BUS_SPACE_UNRESTRICTED, /* nsegments */
1907 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1908 			0,      /* flags */
1909 			NULL,   /* lockfunc */
1910 			NULL,       /* lockfuncarg */
1911 			&hba->parent_dmat   /* tag */))
1912 	{
1913 		device_printf(dev, "alloc parent_dmat failed\n");
1914 		goto release_pci_res;
1915 	}
1916 
1917 	if (hba->ops->family == MV_BASED_IOP) {
1918 		if (hba->ops->internal_memalloc(hba)) {
1919 			device_printf(dev, "alloc srb_dmat failed\n");
1920 			goto destroy_parent_tag;
1921 		}
1922 	}
1923 
1924 	if (hba->ops->get_config(hba, &iop_config)) {
1925 		device_printf(dev, "get iop config failed.\n");
1926 		goto get_config_failed;
1927 	}
1928 
1929 	hba->firmware_version = iop_config.firmware_version;
1930 	hba->interface_version = iop_config.interface_version;
1931 	hba->max_requests = iop_config.max_requests;
1932 	hba->max_devices = iop_config.max_devices;
1933 	hba->max_request_size = iop_config.request_size;
1934 	hba->max_sg_count = iop_config.max_sg_count;
1935 
1936 	if (hba->ops->family == MVFREY_BASED_IOP) {
1937 		if (hba->ops->internal_memalloc(hba)) {
1938 			device_printf(dev, "alloc srb_dmat failed\n");
1939 			goto destroy_parent_tag;
1940 		}
1941 		if (hba->ops->reset_comm(hba)) {
1942 			device_printf(dev, "reset comm failed\n");
1943 			goto get_config_failed;
1944 		}
1945 	}
1946 
1947 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1948 			4,  /* alignment */
1949 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1950 			BUS_SPACE_MAXADDR,  /* lowaddr */
1951 			BUS_SPACE_MAXADDR,  /* highaddr */
1952 			NULL, NULL,         /* filter, filterarg */
1953 			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1954 			hba->max_sg_count,  /* nsegments */
1955 			0x20000,    /* maxsegsize */
1956 			BUS_DMA_ALLOCNOW,       /* flags */
1957 			busdma_lock_mutex,  /* lockfunc */
1958 			&hba->lock,     /* lockfuncarg */
1959 			&hba->io_dmat   /* tag */))
1960 	{
1961 		device_printf(dev, "alloc io_dmat failed\n");
1962 		goto get_config_failed;
1963 	}
1964 
1965 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1966 			1,  /* alignment */
1967 			0, /* boundary */
1968 			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1969 			BUS_SPACE_MAXADDR,  /* highaddr */
1970 			NULL, NULL,         /* filter, filterarg */
1971 			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1972 			1,  /* nsegments */
1973 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1974 			0,      /* flags */
1975 			NULL,   /* lockfunc */
1976 			NULL,       /* lockfuncarg */
1977 			&hba->srb_dmat  /* tag */))
1978 	{
1979 		device_printf(dev, "alloc srb_dmat failed\n");
1980 		goto destroy_io_dmat;
1981 	}
1982 
1983 	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1984 			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1985 			&hba->srb_dmamap) != 0)
1986 	{
1987 		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1988 		goto destroy_srb_dmat;
1989 	}
1990 
1991 	if (bus_dmamap_load(hba->srb_dmat,
1992 			hba->srb_dmamap, hba->uncached_ptr,
1993 			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1994 			hptiop_map_srb, hba, 0))
1995 	{
1996 		device_printf(dev, "bus_dmamap_load failed!\n");
1997 		goto srb_dmamem_free;
1998 	}
1999 
2000 	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2001 		device_printf(dev, "cam_simq_alloc failed\n");
2002 		goto srb_dmamap_unload;
2003 	}
2004 
2005 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2006 			hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2007 	if (!hba->sim) {
2008 		device_printf(dev, "cam_sim_alloc failed\n");
2009 		cam_simq_free(devq);
2010 		goto srb_dmamap_unload;
2011 	}
2012 	hptiop_lock_adapter(hba);
2013 	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2014 	{
2015 		device_printf(dev, "xpt_bus_register failed\n");
2016 		goto free_cam_sim;
2017 	}
2018 
2019 	if (xpt_create_path(&hba->path, /*periph */ NULL,
2020 			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2021 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2022 		device_printf(dev, "xpt_create_path failed\n");
2023 		goto deregister_xpt_bus;
2024 	}
2025 	hptiop_unlock_adapter(hba);
2026 
2027 	bzero(&set_config, sizeof(set_config));
2028 	set_config.iop_id = unit;
2029 	set_config.vbus_id = cam_sim_path(hba->sim);
2030 	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2031 
2032 	if (hba->ops->set_config(hba, &set_config)) {
2033 		device_printf(dev, "set iop config failed.\n");
2034 		goto free_hba_path;
2035 	}
2036 
2037 	memset(&ccb, 0, sizeof(ccb));
2038 	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2039 	ccb.ccb_h.func_code = XPT_SASYNC_CB;
2040 	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2041 	ccb.callback = hptiop_async;
2042 	ccb.callback_arg = hba->sim;
2043 	xpt_action((union ccb *)&ccb);
2044 
2045 	rid = 0;
2046 	if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2047 			&rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2048 		device_printf(dev, "allocate irq failed!\n");
2049 		goto free_hba_path;
2050 	}
2051 
2052 	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2053 				NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2054 	{
2055 		device_printf(dev, "allocate intr function failed!\n");
2056 		goto free_irq_resource;
2057 	}
2058 
2059 	if (hptiop_send_sync_msg(hba,
2060 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2061 		device_printf(dev, "fail to start background task\n");
2062 		goto teartown_irq_resource;
2063 	}
2064 
2065 	hba->ops->enable_intr(hba);
2066 	hba->initialized = 1;
2067 
2068 	make_dev_args_init(&args);
2069 	args.mda_devsw = &hptiop_cdevsw;
2070 	args.mda_uid = UID_ROOT;
2071 	args.mda_gid = GID_WHEEL /*GID_OPERATOR*/;
2072 	args.mda_mode = S_IRUSR | S_IWUSR;
2073 	args.mda_si_drv1 = hba;
2074 
2075 	make_dev_s(&args, &hba->ioctl_dev, "%s%d", driver_name, unit);
2076 
2077 	return 0;
2078 
2079 
2080 teartown_irq_resource:
2081 	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2082 
2083 free_irq_resource:
2084 	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2085 
2086 	hptiop_lock_adapter(hba);
2087 free_hba_path:
2088 	xpt_free_path(hba->path);
2089 
2090 deregister_xpt_bus:
2091 	xpt_bus_deregister(cam_sim_path(hba->sim));
2092 
2093 free_cam_sim:
2094 	cam_sim_free(hba->sim, /*free devq*/ TRUE);
2095 	hptiop_unlock_adapter(hba);
2096 
2097 srb_dmamap_unload:
2098 	if (hba->uncached_ptr)
2099 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2100 
2101 srb_dmamem_free:
2102 	if (hba->uncached_ptr)
2103 		bus_dmamem_free(hba->srb_dmat,
2104 			hba->uncached_ptr, hba->srb_dmamap);
2105 
2106 destroy_srb_dmat:
2107 	if (hba->srb_dmat)
2108 		bus_dma_tag_destroy(hba->srb_dmat);
2109 
2110 destroy_io_dmat:
2111 	if (hba->io_dmat)
2112 		bus_dma_tag_destroy(hba->io_dmat);
2113 
2114 get_config_failed:
2115 	hba->ops->internal_memfree(hba);
2116 
2117 destroy_parent_tag:
2118 	if (hba->parent_dmat)
2119 		bus_dma_tag_destroy(hba->parent_dmat);
2120 
2121 release_pci_res:
2122 	if (hba->ops->release_pci_res)
2123 		hba->ops->release_pci_res(hba);
2124 
2125 	return ENXIO;
2126 }
2127 
2128 static int hptiop_detach(device_t dev)
2129 {
2130 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2131 	int i;
2132 	int error = EBUSY;
2133 
2134 	hptiop_lock_adapter(hba);
2135 	for (i = 0; i < hba->max_devices; i++)
2136 		if (hptiop_os_query_remove_device(hba, i)) {
2137 			device_printf(dev, "%d file system is busy. id=%d",
2138 						hba->pciunit, i);
2139 			goto out;
2140 		}
2141 
2142 	if ((error = hptiop_shutdown(dev)) != 0)
2143 		goto out;
2144 	if (hptiop_send_sync_msg(hba,
2145 		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2146 		goto out;
2147 	hptiop_unlock_adapter(hba);
2148 
2149 	hptiop_release_resource(hba);
2150 	return (0);
2151 out:
2152 	hptiop_unlock_adapter(hba);
2153 	return error;
2154 }
2155 
2156 static int hptiop_shutdown(device_t dev)
2157 {
2158 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2159 
2160 	int error = 0;
2161 
2162 	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2163 		device_printf(dev, "%d device is busy", hba->pciunit);
2164 		return EBUSY;
2165 	}
2166 
2167 	hba->ops->disable_intr(hba);
2168 
2169 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2170 		error = EBUSY;
2171 
2172 	return error;
2173 }
2174 
2175 static void hptiop_pci_intr(void *arg)
2176 {
2177 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2178 	hptiop_lock_adapter(hba);
2179 	hba->ops->iop_intr(hba);
2180 	hptiop_unlock_adapter(hba);
2181 }
2182 
2183 static void hptiop_poll(struct cam_sim *sim)
2184 {
2185 	struct hpt_iop_hba *hba;
2186 
2187 	hba = cam_sim_softc(sim);
2188 	hba->ops->iop_intr(hba);
2189 }
2190 
2191 static void hptiop_async(void * callback_arg, u_int32_t code,
2192 					struct cam_path * path, void * arg)
2193 {
2194 }
2195 
2196 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2197 {
2198 	BUS_SPACE_WRT4_ITL(outbound_intmask,
2199 		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2200 }
2201 
2202 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2203 {
2204 	u_int32_t int_mask;
2205 
2206 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2207 
2208 	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2209 			| MVIOP_MU_OUTBOUND_INT_MSG;
2210     	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2211 }
2212 
2213 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2214 {
2215 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2216 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2217 
2218 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2219 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2220 
2221 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2222 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2223 }
2224 
2225 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2226 {
2227 	u_int32_t int_mask;
2228 
2229 	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2230 
2231 	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2232 	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2233 	BUS_SPACE_RD4_ITL(outbound_intstatus);
2234 }
2235 
2236 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2237 {
2238 	u_int32_t int_mask;
2239 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2240 
2241 	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2242 			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2243 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2244 	BUS_SPACE_RD4_MV0(outbound_intmask);
2245 }
2246 
2247 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2248 {
2249 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2250 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2251 
2252 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2253 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2254 
2255 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2256 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2257 }
2258 
2259 static void hptiop_reset_adapter(void *argv)
2260 {
2261 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2262 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2263 		return;
2264 	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2265 }
2266 
2267 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2268 {
2269 	struct hpt_iop_srb * srb;
2270 
2271 	if (hba->srb_list) {
2272 		srb = hba->srb_list;
2273 		hba->srb_list = srb->next;
2274 		return srb;
2275 	}
2276 
2277 	return NULL;
2278 }
2279 
2280 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2281 {
2282 	srb->next = hba->srb_list;
2283 	hba->srb_list = srb;
2284 }
2285 
2286 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2287 {
2288 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2289 	struct hpt_iop_srb * srb;
2290 	int error;
2291 
2292 	switch (ccb->ccb_h.func_code) {
2293 
2294 	case XPT_SCSI_IO:
2295 		if (ccb->ccb_h.target_lun != 0 ||
2296 			ccb->ccb_h.target_id >= hba->max_devices ||
2297 			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2298 		{
2299 			ccb->ccb_h.status = CAM_TID_INVALID;
2300 			xpt_done(ccb);
2301 			return;
2302 		}
2303 
2304 		if ((srb = hptiop_get_srb(hba)) == NULL) {
2305 			device_printf(hba->pcidev, "srb allocated failed");
2306 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2307 			xpt_done(ccb);
2308 			return;
2309 		}
2310 
2311 		srb->ccb = ccb;
2312 		error = bus_dmamap_load_ccb(hba->io_dmat,
2313 					    srb->dma_map,
2314 					    ccb,
2315 					    hptiop_post_scsi_command,
2316 					    srb,
2317 					    0);
2318 
2319 		if (error && error != EINPROGRESS) {
2320 			device_printf(hba->pcidev,
2321 				"%d bus_dmamap_load error %d",
2322 				hba->pciunit, error);
2323 			xpt_freeze_simq(hba->sim, 1);
2324 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2325 			hptiop_free_srb(hba, srb);
2326 			xpt_done(ccb);
2327 			return;
2328 		}
2329 
2330 		return;
2331 
2332 	case XPT_RESET_BUS:
2333 		device_printf(hba->pcidev, "reset adapter");
2334 		hba->msg_done = 0;
2335 		hptiop_reset_adapter(hba);
2336 		break;
2337 
2338 	case XPT_GET_TRAN_SETTINGS:
2339 	case XPT_SET_TRAN_SETTINGS:
2340 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2341 		break;
2342 
2343 	case XPT_CALC_GEOMETRY:
2344 		cam_calc_geometry(&ccb->ccg, 1);
2345 		break;
2346 
2347 	case XPT_PATH_INQ:
2348 	{
2349 		struct ccb_pathinq *cpi = &ccb->cpi;
2350 
2351 		cpi->version_num = 1;
2352 		cpi->hba_inquiry = PI_SDTR_ABLE;
2353 		cpi->target_sprt = 0;
2354 		cpi->hba_misc = PIM_NOBUSRESET;
2355 		cpi->hba_eng_cnt = 0;
2356 		cpi->max_target = hba->max_devices;
2357 		cpi->max_lun = 0;
2358 		cpi->unit_number = cam_sim_unit(sim);
2359 		cpi->bus_id = cam_sim_bus(sim);
2360 		cpi->initiator_id = hba->max_devices;
2361 		cpi->base_transfer_speed = 3300;
2362 
2363 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2364 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2365 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2366 		cpi->transport = XPORT_SPI;
2367 		cpi->transport_version = 2;
2368 		cpi->protocol = PROTO_SCSI;
2369 		cpi->protocol_version = SCSI_REV_2;
2370 		cpi->ccb_h.status = CAM_REQ_CMP;
2371 		break;
2372 	}
2373 
2374 	default:
2375 		ccb->ccb_h.status = CAM_REQ_INVALID;
2376 		break;
2377 	}
2378 
2379 	xpt_done(ccb);
2380 	return;
2381 }
2382 
2383 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2384 				struct hpt_iop_srb *srb,
2385 				bus_dma_segment_t *segs, int nsegs)
2386 {
2387 	int idx;
2388 	union ccb *ccb = srb->ccb;
2389 	u_int8_t *cdb;
2390 
2391 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2392 		cdb = ccb->csio.cdb_io.cdb_ptr;
2393 	else
2394 		cdb = ccb->csio.cdb_io.cdb_bytes;
2395 
2396 	KdPrint(("ccb=%p %x-%x-%x\n",
2397 		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2398 
2399 	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2400 		u_int32_t iop_req32;
2401 		struct hpt_iop_request_scsi_command req;
2402 
2403 		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2404 
2405 		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2406 			device_printf(hba->pcidev, "invalid req offset\n");
2407 			ccb->ccb_h.status = CAM_BUSY;
2408 			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2409 			hptiop_free_srb(hba, srb);
2410 			xpt_done(ccb);
2411 			return;
2412 		}
2413 
2414 		if (ccb->csio.dxfer_len && nsegs > 0) {
2415 			struct hpt_iopsg *psg = req.sg_list;
2416 			for (idx = 0; idx < nsegs; idx++, psg++) {
2417 				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2418 				psg->size = segs[idx].ds_len;
2419 				psg->eot = 0;
2420 			}
2421 			psg[-1].eot = 1;
2422 		}
2423 
2424 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2425 
2426 		req.header.size =
2427 				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2428 				+ nsegs*sizeof(struct hpt_iopsg);
2429 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2430 		req.header.flags = 0;
2431 		req.header.result = IOP_RESULT_PENDING;
2432 		req.header.context = (u_int64_t)(unsigned long)srb;
2433 		req.dataxfer_length = ccb->csio.dxfer_len;
2434 		req.channel =  0;
2435 		req.target =  ccb->ccb_h.target_id;
2436 		req.lun =  ccb->ccb_h.target_lun;
2437 
2438 		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2439 			(u_int8_t *)&req, req.header.size);
2440 
2441 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2442 			bus_dmamap_sync(hba->io_dmat,
2443 				srb->dma_map, BUS_DMASYNC_PREREAD);
2444 		}
2445 		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2446 			bus_dmamap_sync(hba->io_dmat,
2447 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2448 
2449 		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2450 	} else {
2451 		struct hpt_iop_request_scsi_command *req;
2452 
2453 		req = (struct hpt_iop_request_scsi_command *)srb;
2454 		if (ccb->csio.dxfer_len && nsegs > 0) {
2455 			struct hpt_iopsg *psg = req->sg_list;
2456 			for (idx = 0; idx < nsegs; idx++, psg++) {
2457 				psg->pci_address =
2458 					(u_int64_t)segs[idx].ds_addr;
2459 				psg->size = segs[idx].ds_len;
2460 				psg->eot = 0;
2461 			}
2462 			psg[-1].eot = 1;
2463 		}
2464 
2465 		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2466 
2467 		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2468 		req->header.result = IOP_RESULT_PENDING;
2469 		req->dataxfer_length = ccb->csio.dxfer_len;
2470 		req->channel =  0;
2471 		req->target =  ccb->ccb_h.target_id;
2472 		req->lun =  ccb->ccb_h.target_lun;
2473 		req->header.size =
2474 			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2475 			+ nsegs*sizeof(struct hpt_iopsg);
2476 		req->header.context = (u_int64_t)srb->index |
2477 						IOPMU_QUEUE_ADDR_HOST_BIT;
2478 		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2479 
2480 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2481 			bus_dmamap_sync(hba->io_dmat,
2482 				srb->dma_map, BUS_DMASYNC_PREREAD);
2483 		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2484 			bus_dmamap_sync(hba->io_dmat,
2485 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2486 		}
2487 
2488 		if (hba->firmware_version > 0x01020000
2489 			|| hba->interface_version > 0x01020000) {
2490 			u_int32_t size_bits;
2491 
2492 			if (req->header.size < 256)
2493 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2494 			else if (req->header.size < 512)
2495 				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2496 			else
2497 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2498 						| IOPMU_QUEUE_ADDR_HOST_BIT;
2499 
2500 			BUS_SPACE_WRT4_ITL(inbound_queue,
2501 				(u_int32_t)srb->phy_addr | size_bits);
2502 		} else
2503 			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2504 				|IOPMU_QUEUE_ADDR_HOST_BIT);
2505 	}
2506 }
2507 
2508 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2509 				struct hpt_iop_srb *srb,
2510 				bus_dma_segment_t *segs, int nsegs)
2511 {
2512 	int idx, size;
2513 	union ccb *ccb = srb->ccb;
2514 	u_int8_t *cdb;
2515 	struct hpt_iop_request_scsi_command *req;
2516 	u_int64_t req_phy;
2517 
2518     	req = (struct hpt_iop_request_scsi_command *)srb;
2519 	req_phy = srb->phy_addr;
2520 
2521 	if (ccb->csio.dxfer_len && nsegs > 0) {
2522 		struct hpt_iopsg *psg = req->sg_list;
2523 		for (idx = 0; idx < nsegs; idx++, psg++) {
2524 			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2525 			psg->size = segs[idx].ds_len;
2526 			psg->eot = 0;
2527 		}
2528 		psg[-1].eot = 1;
2529 	}
2530 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2531 		cdb = ccb->csio.cdb_io.cdb_ptr;
2532 	else
2533 		cdb = ccb->csio.cdb_io.cdb_bytes;
2534 
2535 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2536 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2537 	req->header.result = IOP_RESULT_PENDING;
2538 	req->dataxfer_length = ccb->csio.dxfer_len;
2539 	req->channel = 0;
2540 	req->target =  ccb->ccb_h.target_id;
2541 	req->lun =  ccb->ccb_h.target_lun;
2542 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2543 				- sizeof(struct hpt_iopsg)
2544 				+ nsegs * sizeof(struct hpt_iopsg);
2545 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2546 		bus_dmamap_sync(hba->io_dmat,
2547 			srb->dma_map, BUS_DMASYNC_PREREAD);
2548 	}
2549 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2550 		bus_dmamap_sync(hba->io_dmat,
2551 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2552 	req->header.context = (u_int64_t)srb->index
2553 					<< MVIOP_REQUEST_NUMBER_START_BIT
2554 					| MVIOP_CMD_TYPE_SCSI;
2555 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2556 	size = req->header.size >> 8;
2557 	hptiop_mv_inbound_write(req_phy
2558 			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2559 			| imin(3, size), hba);
2560 }
2561 
2562 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2563 				struct hpt_iop_srb *srb,
2564 				bus_dma_segment_t *segs, int nsegs)
2565 {
2566 	int idx, index;
2567 	union ccb *ccb = srb->ccb;
2568 	u_int8_t *cdb;
2569 	struct hpt_iop_request_scsi_command *req;
2570 	u_int64_t req_phy;
2571 
2572 	req = (struct hpt_iop_request_scsi_command *)srb;
2573 	req_phy = srb->phy_addr;
2574 
2575 	if (ccb->csio.dxfer_len && nsegs > 0) {
2576 		struct hpt_iopsg *psg = req->sg_list;
2577 		for (idx = 0; idx < nsegs; idx++, psg++) {
2578 			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2579 			psg->size = segs[idx].ds_len;
2580 			psg->eot = 0;
2581 		}
2582 		psg[-1].eot = 1;
2583 	}
2584 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2585 		cdb = ccb->csio.cdb_io.cdb_ptr;
2586 	else
2587 		cdb = ccb->csio.cdb_io.cdb_bytes;
2588 
2589 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2590 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2591 	req->header.result = IOP_RESULT_PENDING;
2592 	req->dataxfer_length = ccb->csio.dxfer_len;
2593 	req->channel = 0;
2594 	req->target = ccb->ccb_h.target_id;
2595 	req->lun = ccb->ccb_h.target_lun;
2596 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2597 				- sizeof(struct hpt_iopsg)
2598 				+ nsegs * sizeof(struct hpt_iopsg);
2599 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2600 		bus_dmamap_sync(hba->io_dmat,
2601 			srb->dma_map, BUS_DMASYNC_PREREAD);
2602 	}
2603 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2604 		bus_dmamap_sync(hba->io_dmat,
2605 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2606 
2607 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2608 						| IOP_REQUEST_FLAG_ADDR_BITS
2609 						| ((req_phy >> 16) & 0xffff0000);
2610 	req->header.context = ((req_phy & 0xffffffff) << 32 )
2611 						| srb->index << 4
2612 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2613 
2614 	hba->u.mvfrey.inlist_wptr++;
2615 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2616 
2617 	if (index == hba->u.mvfrey.list_count) {
2618 		index = 0;
2619 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2620 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2621 	}
2622 
2623 	hba->u.mvfrey.inlist[index].addr = req_phy;
2624 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2625 
2626 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2627 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2628 
2629 	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2630 		callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2631 	}
2632 }
2633 
2634 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2635 					int nsegs, int error)
2636 {
2637 	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2638 	union ccb *ccb = srb->ccb;
2639 	struct hpt_iop_hba *hba = srb->hba;
2640 
2641 	if (error || nsegs > hba->max_sg_count) {
2642 		KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2643 			ccb->ccb_h.func_code,
2644 			ccb->ccb_h.target_id,
2645 			(uintmax_t)ccb->ccb_h.target_lun, nsegs));
2646 		ccb->ccb_h.status = CAM_BUSY;
2647 		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2648 		hptiop_free_srb(hba, srb);
2649 		xpt_done(ccb);
2650 		return;
2651 	}
2652 
2653 	hba->ops->post_req(hba, srb, segs, nsegs);
2654 }
2655 
2656 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2657 				int nsegs, int error)
2658 {
2659 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2660 	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2661 				& ~(u_int64_t)0x1F;
2662 	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2663 				& ~0x1F);
2664 }
2665 
2666 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2667 				int nsegs, int error)
2668 {
2669 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2670 	char *p;
2671 	u_int64_t phy;
2672 	u_int32_t list_count = hba->u.mvfrey.list_count;
2673 
2674 	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2675 				& ~(u_int64_t)0x1F;
2676 	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2677 				& ~0x1F);
2678 
2679 	hba->ctlcfgcmd_phy = phy;
2680 	hba->ctlcfg_ptr = p;
2681 
2682 	p += 0x800;
2683 	phy += 0x800;
2684 
2685 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2686 	hba->u.mvfrey.inlist_phy = phy;
2687 
2688 	p += list_count * sizeof(struct mvfrey_inlist_entry);
2689 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2690 
2691 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2692 	hba->u.mvfrey.outlist_phy = phy;
2693 
2694 	p += list_count * sizeof(struct mvfrey_outlist_entry);
2695 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2696 
2697 	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2698 	hba->u.mvfrey.outlist_cptr_phy = phy;
2699 }
2700 
2701 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2702 				int nsegs, int error)
2703 {
2704 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2705 	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2706 	struct hpt_iop_srb *srb, *tmp_srb;
2707 	int i;
2708 
2709 	if (error || nsegs == 0) {
2710 		device_printf(hba->pcidev, "hptiop_map_srb error");
2711 		return;
2712 	}
2713 
2714 	/* map srb */
2715 	srb = (struct hpt_iop_srb *)
2716 		(((unsigned long)hba->uncached_ptr + 0x1F)
2717 		& ~(unsigned long)0x1F);
2718 
2719 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2720 		tmp_srb = (struct hpt_iop_srb *)
2721 					((char *)srb + i * HPT_SRB_MAX_SIZE);
2722 		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2723 			if (bus_dmamap_create(hba->io_dmat,
2724 						0, &tmp_srb->dma_map)) {
2725 				device_printf(hba->pcidev, "dmamap create failed");
2726 				return;
2727 			}
2728 
2729 			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2730 			tmp_srb->hba = hba;
2731 			tmp_srb->index = i;
2732 			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2733 				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2734 							(phy_addr >> 5);
2735 				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2736 					tmp_srb->srb_flag =
2737 						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2738 			} else {
2739 				tmp_srb->phy_addr = phy_addr;
2740 			}
2741 
2742 			callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2743 			hptiop_free_srb(hba, tmp_srb);
2744 			hba->srb[i] = tmp_srb;
2745 			phy_addr += HPT_SRB_MAX_SIZE;
2746 		}
2747 		else {
2748 			device_printf(hba->pcidev, "invalid alignment");
2749 			return;
2750 		}
2751 	}
2752 }
2753 
2754 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2755 {
2756 	hba->msg_done = 1;
2757 }
2758 
2759 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2760 						int target_id)
2761 {
2762 	struct cam_periph       *periph = NULL;
2763 	struct cam_path         *path;
2764 	int                     status, retval = 0;
2765 
2766 	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2767 
2768 	if (status == CAM_REQ_CMP) {
2769 		if ((periph = cam_periph_find(path, "da")) != NULL) {
2770 			if (periph->refcount >= 1) {
2771 				device_printf(hba->pcidev, "%d ,"
2772 					"target_id=0x%x,"
2773 					"refcount=%d",
2774 				    hba->pciunit, target_id, periph->refcount);
2775 				retval = -1;
2776 			}
2777 		}
2778 		xpt_free_path(path);
2779 	}
2780 	return retval;
2781 }
2782 
2783 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2784 {
2785 	int i;
2786 
2787 	if (hba->ioctl_dev)
2788 		destroy_dev(hba->ioctl_dev);
2789 
2790 	if (hba->path) {
2791 		struct ccb_setasync ccb;
2792 
2793 		memset(&ccb, 0, sizeof(ccb));
2794 		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2795 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
2796 		ccb.event_enable = 0;
2797 		ccb.callback = hptiop_async;
2798 		ccb.callback_arg = hba->sim;
2799 		xpt_action((union ccb *)&ccb);
2800 		xpt_free_path(hba->path);
2801 	}
2802 
2803 	if (hba->irq_handle)
2804 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2805 
2806 	if (hba->sim) {
2807 		hptiop_lock_adapter(hba);
2808 		xpt_bus_deregister(cam_sim_path(hba->sim));
2809 		cam_sim_free(hba->sim, TRUE);
2810 		hptiop_unlock_adapter(hba);
2811 	}
2812 
2813 	if (hba->ctlcfg_dmat) {
2814 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2815 		bus_dmamem_free(hba->ctlcfg_dmat,
2816 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2817 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2818 	}
2819 
2820 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2821 		struct hpt_iop_srb *srb = hba->srb[i];
2822 		if (srb->dma_map)
2823 			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2824 		callout_drain(&srb->timeout);
2825 	}
2826 
2827 	if (hba->srb_dmat) {
2828 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2829 		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2830 		bus_dma_tag_destroy(hba->srb_dmat);
2831 	}
2832 
2833 	if (hba->io_dmat)
2834 		bus_dma_tag_destroy(hba->io_dmat);
2835 
2836 	if (hba->parent_dmat)
2837 		bus_dma_tag_destroy(hba->parent_dmat);
2838 
2839 	if (hba->irq_res)
2840 		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2841 					0, hba->irq_res);
2842 
2843 	if (hba->bar0_res)
2844 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2845 					hba->bar0_rid, hba->bar0_res);
2846 	if (hba->bar2_res)
2847 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2848 					hba->bar2_rid, hba->bar2_res);
2849 	mtx_destroy(&hba->lock);
2850 }
2851