xref: /freebsd/sys/dev/hptiop/hptiop.c (revision 25ecdc7d52770caf1c9b44b5ec11f468f6b636f3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/cons.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37 
38 #include <sys/stat.h>
39 #include <sys/malloc.h>
40 #include <sys/conf.h>
41 #include <sys/libkern.h>
42 #include <sys/kernel.h>
43 
44 #include <sys/kthread.h>
45 #include <sys/mutex.h>
46 #include <sys/module.h>
47 
48 #include <sys/eventhandler.h>
49 #include <sys/bus.h>
50 #include <sys/taskqueue.h>
51 #include <sys/ioccom.h>
52 
53 #include <machine/resource.h>
54 #include <machine/bus.h>
55 #include <machine/stdarg.h>
56 #include <sys/rman.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 
64 
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_debug.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 
74 
75 #include <dev/hptiop/hptiop.h>
76 
77 static const char driver_name[] = "hptiop";
78 static const char driver_version[] = "v1.9";
79 
80 static devclass_t hptiop_devclass;
81 
82 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
83 				u_int32_t msg, u_int32_t millisec);
84 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
85 							u_int32_t req);
86 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
87 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
88 							u_int32_t req);
89 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
90 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
91 				struct hpt_iop_ioctl_param *pParams);
92 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
93 				struct hpt_iop_ioctl_param *pParams);
94 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
95 				struct hpt_iop_ioctl_param *pParams);
96 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
98 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
99 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
100 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
101 				struct hpt_iop_request_get_config *config);
102 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
103 				struct hpt_iop_request_get_config *config);
104 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
105 				struct hpt_iop_request_get_config *config);
106 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
107 				struct hpt_iop_request_set_config *config);
108 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
109 				struct hpt_iop_request_set_config *config);
110 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
111 				struct hpt_iop_request_set_config *config);
112 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
115 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
116 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
117 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
118 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
119 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
120 				struct hpt_iop_request_ioctl_command *req,
121 				struct hpt_iop_ioctl_param *pParams);
122 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
123 				struct hpt_iop_request_ioctl_command *req,
124 				struct hpt_iop_ioctl_param *pParams);
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126 				struct hpt_iop_srb *srb,
127 				bus_dma_segment_t *segs, int nsegs);
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129 				struct hpt_iop_srb *srb,
130 				bus_dma_segment_t *segs, int nsegs);
131 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
132 				struct hpt_iop_srb *srb,
133 				bus_dma_segment_t *segs, int nsegs);
134 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
136 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
137 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
138 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
139 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
141 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
142 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
143 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
144 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
145 static int  hptiop_probe(device_t dev);
146 static int  hptiop_attach(device_t dev);
147 static int  hptiop_detach(device_t dev);
148 static int  hptiop_shutdown(device_t dev);
149 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
150 static void hptiop_poll(struct cam_sim *sim);
151 static void hptiop_async(void *callback_arg, u_int32_t code,
152 					struct cam_path *path, void *arg);
153 static void hptiop_pci_intr(void *arg);
154 static void hptiop_release_resource(struct hpt_iop_hba *hba);
155 static void hptiop_reset_adapter(void *argv);
156 static d_open_t hptiop_open;
157 static d_close_t hptiop_close;
158 static d_ioctl_t hptiop_ioctl;
159 
160 static struct cdevsw hptiop_cdevsw = {
161 	.d_open = hptiop_open,
162 	.d_close = hptiop_close,
163 	.d_ioctl = hptiop_ioctl,
164 	.d_name = driver_name,
165 	.d_version = D_VERSION,
166 };
167 
168 #define hba_from_dev(dev) \
169 	((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
170 
171 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
172 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
173 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
174 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
175 
176 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
177 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
178 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
179 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
180 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
181 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
182 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
183 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
184 
185 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
186 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
187 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
188 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
189 
190 static int hptiop_open(ioctl_dev_t dev, int flags,
191 					int devtype, ioctl_thread_t proc)
192 {
193 	struct hpt_iop_hba *hba = hba_from_dev(dev);
194 
195 	if (hba==NULL)
196 		return ENXIO;
197 	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
198 		return EBUSY;
199 	hba->flag |= HPT_IOCTL_FLAG_OPEN;
200 	return 0;
201 }
202 
203 static int hptiop_close(ioctl_dev_t dev, int flags,
204 					int devtype, ioctl_thread_t proc)
205 {
206 	struct hpt_iop_hba *hba = hba_from_dev(dev);
207 	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
208 	return 0;
209 }
210 
211 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
212 					int flags, ioctl_thread_t proc)
213 {
214 	int ret = EFAULT;
215 	struct hpt_iop_hba *hba = hba_from_dev(dev);
216 
217 	mtx_lock(&Giant);
218 
219 	switch (cmd) {
220 	case HPT_DO_IOCONTROL:
221 		ret = hba->ops->do_ioctl(hba,
222 				(struct hpt_iop_ioctl_param *)data);
223 		break;
224 	case HPT_SCAN_BUS:
225 		ret = hptiop_rescan_bus(hba);
226 		break;
227 	}
228 
229 	mtx_unlock(&Giant);
230 
231 	return ret;
232 }
233 
234 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
235 {
236 	u_int64_t p;
237 	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
238 	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
239 
240 	if (outbound_tail != outbound_head) {
241 		bus_space_read_region_4(hba->bar2t, hba->bar2h,
242 			offsetof(struct hpt_iopmu_mv,
243 				outbound_q[outbound_tail]),
244 			(u_int32_t *)&p, 2);
245 
246 		outbound_tail++;
247 
248 		if (outbound_tail == MVIOP_QUEUE_LEN)
249 			outbound_tail = 0;
250 
251 		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
252 		return p;
253 	} else
254 		return 0;
255 }
256 
257 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
258 {
259 	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
260 	u_int32_t head = inbound_head + 1;
261 
262 	if (head == MVIOP_QUEUE_LEN)
263 		head = 0;
264 
265 	bus_space_write_region_4(hba->bar2t, hba->bar2h,
266 			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
267 			(u_int32_t *)&p, 2);
268 	BUS_SPACE_WRT4_MV2(inbound_head, head);
269 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
270 }
271 
272 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
273 {
274 	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
275 	BUS_SPACE_RD4_ITL(outbound_intstatus);
276 }
277 
278 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
279 {
280 
281 	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
282 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
283 
284 	BUS_SPACE_RD4_MV0(outbound_intmask);
285 }
286 
287 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
288 {
289 	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
290 	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
291 }
292 
293 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
294 {
295 	u_int32_t req=0;
296 	int i;
297 
298 	for (i = 0; i < millisec; i++) {
299 		req = BUS_SPACE_RD4_ITL(inbound_queue);
300 		if (req != IOPMU_QUEUE_EMPTY)
301 			break;
302 		DELAY(1000);
303 	}
304 
305 	if (req!=IOPMU_QUEUE_EMPTY) {
306 		BUS_SPACE_WRT4_ITL(outbound_queue, req);
307 		BUS_SPACE_RD4_ITL(outbound_intstatus);
308 		return 0;
309 	}
310 
311 	return -1;
312 }
313 
314 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
315 {
316 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
317 		return -1;
318 
319 	return 0;
320 }
321 
322 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
323 							u_int32_t millisec)
324 {
325 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
326 		return -1;
327 
328 	return 0;
329 }
330 
331 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
332 							u_int32_t index)
333 {
334 	struct hpt_iop_srb *srb;
335 	struct hpt_iop_request_scsi_command *req=NULL;
336 	union ccb *ccb;
337 	u_int8_t *cdb;
338 	u_int32_t result, temp, dxfer;
339 	u_int64_t temp64;
340 
341 	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
342 		if (hba->firmware_version > 0x01020000 ||
343 			hba->interface_version > 0x01020000) {
344 			srb = hba->srb[index & ~(u_int32_t)
345 				(IOPMU_QUEUE_ADDR_HOST_BIT
346 				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
347 			req = (struct hpt_iop_request_scsi_command *)srb;
348 			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
349 				result = IOP_RESULT_SUCCESS;
350 			else
351 				result = req->header.result;
352 		} else {
353 			srb = hba->srb[index &
354 				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
355 			req = (struct hpt_iop_request_scsi_command *)srb;
356 			result = req->header.result;
357 		}
358 		dxfer = req->dataxfer_length;
359 		goto srb_complete;
360 	}
361 
362 	/*iop req*/
363 	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
364 		offsetof(struct hpt_iop_request_header, type));
365 	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
366 		offsetof(struct hpt_iop_request_header, result));
367 	switch(temp) {
368 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
369 	{
370 		temp64 = 0;
371 		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
372 			offsetof(struct hpt_iop_request_header, context),
373 			(u_int32_t *)&temp64, 2);
374 		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
375 		break;
376 	}
377 
378 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
379 		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
380 			offsetof(struct hpt_iop_request_header, context),
381 			(u_int32_t *)&temp64, 2);
382 		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
383 		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
384 				index + offsetof(struct hpt_iop_request_scsi_command,
385 				dataxfer_length));
386 srb_complete:
387 		ccb = (union ccb *)srb->ccb;
388 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
389 			cdb = ccb->csio.cdb_io.cdb_ptr;
390 		else
391 			cdb = ccb->csio.cdb_io.cdb_bytes;
392 
393 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
394 			ccb->ccb_h.status = CAM_REQ_CMP;
395 			goto scsi_done;
396 		}
397 
398 		switch (result) {
399 		case IOP_RESULT_SUCCESS:
400 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
401 			case CAM_DIR_IN:
402 				bus_dmamap_sync(hba->io_dmat,
403 					srb->dma_map, BUS_DMASYNC_POSTREAD);
404 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 				break;
406 			case CAM_DIR_OUT:
407 				bus_dmamap_sync(hba->io_dmat,
408 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
409 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
410 				break;
411 			}
412 
413 			ccb->ccb_h.status = CAM_REQ_CMP;
414 			break;
415 
416 		case IOP_RESULT_BAD_TARGET:
417 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
418 			break;
419 		case IOP_RESULT_BUSY:
420 			ccb->ccb_h.status = CAM_BUSY;
421 			break;
422 		case IOP_RESULT_INVALID_REQUEST:
423 			ccb->ccb_h.status = CAM_REQ_INVALID;
424 			break;
425 		case IOP_RESULT_FAIL:
426 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
427 			break;
428 		case IOP_RESULT_RESET:
429 			ccb->ccb_h.status = CAM_BUSY;
430 			break;
431 		case IOP_RESULT_CHECK_CONDITION:
432 			memset(&ccb->csio.sense_data, 0,
433 			    sizeof(ccb->csio.sense_data));
434 			if (dxfer < ccb->csio.sense_len)
435 				ccb->csio.sense_resid = ccb->csio.sense_len -
436 				    dxfer;
437 			else
438 				ccb->csio.sense_resid = 0;
439 			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
440 				bus_space_read_region_1(hba->bar0t, hba->bar0h,
441 					index + offsetof(struct hpt_iop_request_scsi_command,
442 					sg_list), (u_int8_t *)&ccb->csio.sense_data,
443 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
444 			} else {
445 				memcpy(&ccb->csio.sense_data, &req->sg_list,
446 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
447 			}
448 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
450 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
451 			break;
452 		default:
453 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
454 			break;
455 		}
456 scsi_done:
457 		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
458 			BUS_SPACE_WRT4_ITL(outbound_queue, index);
459 
460 		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
461 
462 		hptiop_free_srb(hba, srb);
463 		xpt_done(ccb);
464 		break;
465 	}
466 }
467 
468 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
469 {
470 	u_int32_t req, temp;
471 
472 	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
473 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
474 			hptiop_request_callback_itl(hba, req);
475 		else {
476 			struct hpt_iop_request_header *p;
477 
478 			p = (struct hpt_iop_request_header *)
479 				((char *)hba->u.itl.mu + req);
480 			temp = bus_space_read_4(hba->bar0t,
481 					hba->bar0h,req +
482 					offsetof(struct hpt_iop_request_header,
483 						flags));
484 			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
485 				u_int64_t temp64;
486 				bus_space_read_region_4(hba->bar0t,
487 					hba->bar0h,req +
488 					offsetof(struct hpt_iop_request_header,
489 						context),
490 					(u_int32_t *)&temp64, 2);
491 				if (temp64) {
492 					hptiop_request_callback_itl(hba, req);
493 				} else {
494 					temp64 = 1;
495 					bus_space_write_region_4(hba->bar0t,
496 						hba->bar0h,req +
497 						offsetof(struct hpt_iop_request_header,
498 							context),
499 						(u_int32_t *)&temp64, 2);
500 				}
501 			} else
502 				hptiop_request_callback_itl(hba, req);
503 		}
504 	}
505 }
506 
507 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
508 {
509 	u_int32_t status;
510 	int ret = 0;
511 
512 	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
513 
514 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
515 		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
516 		KdPrint(("hptiop: received outbound msg %x\n", msg));
517 		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
518 		hptiop_os_message_callback(hba, msg);
519 		ret = 1;
520 	}
521 
522 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
523 		hptiop_drain_outbound_queue_itl(hba);
524 		ret = 1;
525 	}
526 
527 	return ret;
528 }
529 
530 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
531 							u_int64_t _tag)
532 {
533 	u_int32_t context = (u_int32_t)_tag;
534 
535 	if (context & MVIOP_CMD_TYPE_SCSI) {
536 		struct hpt_iop_srb *srb;
537 		struct hpt_iop_request_scsi_command *req;
538 		union ccb *ccb;
539 		u_int8_t *cdb;
540 
541 		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
542 		req = (struct hpt_iop_request_scsi_command *)srb;
543 		ccb = (union ccb *)srb->ccb;
544 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
545 			cdb = ccb->csio.cdb_io.cdb_ptr;
546 		else
547 			cdb = ccb->csio.cdb_io.cdb_bytes;
548 
549 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
550 			ccb->ccb_h.status = CAM_REQ_CMP;
551 			goto scsi_done;
552 		}
553 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
554 			req->header.result = IOP_RESULT_SUCCESS;
555 
556 		switch (req->header.result) {
557 		case IOP_RESULT_SUCCESS:
558 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
559 			case CAM_DIR_IN:
560 				bus_dmamap_sync(hba->io_dmat,
561 					srb->dma_map, BUS_DMASYNC_POSTREAD);
562 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563 				break;
564 			case CAM_DIR_OUT:
565 				bus_dmamap_sync(hba->io_dmat,
566 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
567 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
568 				break;
569 			}
570 			ccb->ccb_h.status = CAM_REQ_CMP;
571 			break;
572 		case IOP_RESULT_BAD_TARGET:
573 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
574 			break;
575 		case IOP_RESULT_BUSY:
576 			ccb->ccb_h.status = CAM_BUSY;
577 			break;
578 		case IOP_RESULT_INVALID_REQUEST:
579 			ccb->ccb_h.status = CAM_REQ_INVALID;
580 			break;
581 		case IOP_RESULT_FAIL:
582 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
583 			break;
584 		case IOP_RESULT_RESET:
585 			ccb->ccb_h.status = CAM_BUSY;
586 			break;
587 		case IOP_RESULT_CHECK_CONDITION:
588 			memset(&ccb->csio.sense_data, 0,
589 			    sizeof(ccb->csio.sense_data));
590 			if (req->dataxfer_length < ccb->csio.sense_len)
591 				ccb->csio.sense_resid = ccb->csio.sense_len -
592 				    req->dataxfer_length;
593 			else
594 				ccb->csio.sense_resid = 0;
595 			memcpy(&ccb->csio.sense_data, &req->sg_list,
596 				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
597 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
598 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
599 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
600 			break;
601 		default:
602 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
603 			break;
604 		}
605 scsi_done:
606 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
607 
608 		hptiop_free_srb(hba, srb);
609 		xpt_done(ccb);
610 	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
611 		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
612 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
613 			hba->config_done = 1;
614 		else
615 			hba->config_done = -1;
616 		wakeup(req);
617 	} else if (context &
618 			(MVIOP_CMD_TYPE_SET_CONFIG |
619 				MVIOP_CMD_TYPE_GET_CONFIG))
620 		hba->config_done = 1;
621 	else {
622 		device_printf(hba->pcidev, "wrong callback type\n");
623 	}
624 }
625 
626 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
627 				u_int32_t _tag)
628 {
629 	u_int32_t req_type = _tag & 0xf;
630 
631 	struct hpt_iop_srb *srb;
632 	struct hpt_iop_request_scsi_command *req;
633 	union ccb *ccb;
634 	u_int8_t *cdb;
635 
636 	switch (req_type) {
637 	case IOP_REQUEST_TYPE_GET_CONFIG:
638 	case IOP_REQUEST_TYPE_SET_CONFIG:
639 		hba->config_done = 1;
640 		break;
641 
642 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
643 		srb = hba->srb[(_tag >> 4) & 0xff];
644 		req = (struct hpt_iop_request_scsi_command *)srb;
645 
646 		ccb = (union ccb *)srb->ccb;
647 
648 		callout_stop(&srb->timeout);
649 
650 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
651 			cdb = ccb->csio.cdb_io.cdb_ptr;
652 		else
653 			cdb = ccb->csio.cdb_io.cdb_bytes;
654 
655 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
656 			ccb->ccb_h.status = CAM_REQ_CMP;
657 			goto scsi_done;
658 		}
659 
660 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
661 			req->header.result = IOP_RESULT_SUCCESS;
662 
663 		switch (req->header.result) {
664 		case IOP_RESULT_SUCCESS:
665 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
666 			case CAM_DIR_IN:
667 				bus_dmamap_sync(hba->io_dmat,
668 						srb->dma_map, BUS_DMASYNC_POSTREAD);
669 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
670 				break;
671 			case CAM_DIR_OUT:
672 				bus_dmamap_sync(hba->io_dmat,
673 						srb->dma_map, BUS_DMASYNC_POSTWRITE);
674 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
675 				break;
676 			}
677 			ccb->ccb_h.status = CAM_REQ_CMP;
678 			break;
679 		case IOP_RESULT_BAD_TARGET:
680 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
681 			break;
682 		case IOP_RESULT_BUSY:
683 			ccb->ccb_h.status = CAM_BUSY;
684 			break;
685 		case IOP_RESULT_INVALID_REQUEST:
686 			ccb->ccb_h.status = CAM_REQ_INVALID;
687 			break;
688 		case IOP_RESULT_FAIL:
689 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
690 			break;
691 		case IOP_RESULT_RESET:
692 			ccb->ccb_h.status = CAM_BUSY;
693 			break;
694 		case IOP_RESULT_CHECK_CONDITION:
695 			memset(&ccb->csio.sense_data, 0,
696 			       sizeof(ccb->csio.sense_data));
697 			if (req->dataxfer_length < ccb->csio.sense_len)
698 				ccb->csio.sense_resid = ccb->csio.sense_len -
699 				req->dataxfer_length;
700 			else
701 				ccb->csio.sense_resid = 0;
702 			memcpy(&ccb->csio.sense_data, &req->sg_list,
703 			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
704 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
705 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
706 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
707 			break;
708 		default:
709 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
710 			break;
711 		}
712 scsi_done:
713 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
714 
715 		hptiop_free_srb(hba, srb);
716 		xpt_done(ccb);
717 		break;
718 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
719 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
720 			hba->config_done = 1;
721 		else
722 			hba->config_done = -1;
723 		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
724 		break;
725 	default:
726 		device_printf(hba->pcidev, "wrong callback type\n");
727 		break;
728 	}
729 }
730 
731 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
732 {
733 	u_int64_t req;
734 
735 	while ((req = hptiop_mv_outbound_read(hba))) {
736 		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
737 			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
738 				hptiop_request_callback_mv(hba, req);
739 			}
740 	    	}
741 	}
742 }
743 
744 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
745 {
746 	u_int32_t status;
747 	int ret = 0;
748 
749 	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
750 
751 	if (status)
752 		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
753 
754 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
755 		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
756 		KdPrint(("hptiop: received outbound msg %x\n", msg));
757 		hptiop_os_message_callback(hba, msg);
758 		ret = 1;
759 	}
760 
761 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
762 		hptiop_drain_outbound_queue_mv(hba);
763 		ret = 1;
764 	}
765 
766 	return ret;
767 }
768 
769 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
770 {
771 	u_int32_t status, _tag, cptr;
772 	int ret = 0;
773 
774 	if (hba->initialized) {
775 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
776 	}
777 
778 	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
779 	if (status) {
780 		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
781 		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
782 			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
783 			hptiop_os_message_callback(hba, msg);
784 		}
785 		ret = 1;
786 	}
787 
788 	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
789 	if (status) {
790 		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
791 		do {
792 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
793 			while (hba->u.mvfrey.outlist_rptr != cptr) {
794 				hba->u.mvfrey.outlist_rptr++;
795 				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
796 					hba->u.mvfrey.outlist_rptr = 0;
797 				}
798 
799 				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
800 				hptiop_request_callback_mvfrey(hba, _tag);
801 				ret = 2;
802 			}
803 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
804 	}
805 
806 	if (hba->initialized) {
807 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
808 	}
809 
810 	return ret;
811 }
812 
813 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
814 					u_int32_t req32, u_int32_t millisec)
815 {
816 	u_int32_t i;
817 	u_int64_t temp64;
818 
819 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
820 	BUS_SPACE_RD4_ITL(outbound_intstatus);
821 
822 	for (i = 0; i < millisec; i++) {
823 		hptiop_intr_itl(hba);
824 		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
825 			offsetof(struct hpt_iop_request_header, context),
826 			(u_int32_t *)&temp64, 2);
827 		if (temp64)
828 			return 0;
829 		DELAY(1000);
830 	}
831 
832 	return -1;
833 }
834 
835 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
836 					void *req, u_int32_t millisec)
837 {
838 	u_int32_t i;
839 	u_int64_t phy_addr;
840 	hba->config_done = 0;
841 
842 	phy_addr = hba->ctlcfgcmd_phy |
843 			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
844 	((struct hpt_iop_request_get_config *)req)->header.flags |=
845 		IOP_REQUEST_FLAG_SYNC_REQUEST |
846 		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
847 	hptiop_mv_inbound_write(phy_addr, hba);
848 	BUS_SPACE_RD4_MV0(outbound_intmask);
849 
850 	for (i = 0; i < millisec; i++) {
851 		hptiop_intr_mv(hba);
852 		if (hba->config_done)
853 			return 0;
854 		DELAY(1000);
855 	}
856 	return -1;
857 }
858 
859 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
860 					void *req, u_int32_t millisec)
861 {
862 	u_int32_t i, index;
863 	u_int64_t phy_addr;
864 	struct hpt_iop_request_header *reqhdr =
865 										(struct hpt_iop_request_header *)req;
866 
867 	hba->config_done = 0;
868 
869 	phy_addr = hba->ctlcfgcmd_phy;
870 	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
871 					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
872 					| IOP_REQUEST_FLAG_ADDR_BITS
873 					| ((phy_addr >> 16) & 0xffff0000);
874 	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
875 					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
876 
877 	hba->u.mvfrey.inlist_wptr++;
878 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
879 
880 	if (index == hba->u.mvfrey.list_count) {
881 		index = 0;
882 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
883 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
884 	}
885 
886 	hba->u.mvfrey.inlist[index].addr = phy_addr;
887 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
888 
889 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
890 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
891 
892 	for (i = 0; i < millisec; i++) {
893 		hptiop_intr_mvfrey(hba);
894 		if (hba->config_done)
895 			return 0;
896 		DELAY(1000);
897 	}
898 	return -1;
899 }
900 
901 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
902 					u_int32_t msg, u_int32_t millisec)
903 {
904 	u_int32_t i;
905 
906 	hba->msg_done = 0;
907 	hba->ops->post_msg(hba, msg);
908 
909 	for (i=0; i<millisec; i++) {
910 		hba->ops->iop_intr(hba);
911 		if (hba->msg_done)
912 			break;
913 		DELAY(1000);
914 	}
915 
916 	return hba->msg_done? 0 : -1;
917 }
918 
919 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
920 				struct hpt_iop_request_get_config * config)
921 {
922 	u_int32_t req32;
923 
924 	config->header.size = sizeof(struct hpt_iop_request_get_config);
925 	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
926 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
927 	config->header.result = IOP_RESULT_PENDING;
928 	config->header.context = 0;
929 
930 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
931 	if (req32 == IOPMU_QUEUE_EMPTY)
932 		return -1;
933 
934 	bus_space_write_region_4(hba->bar0t, hba->bar0h,
935 			req32, (u_int32_t *)config,
936 			sizeof(struct hpt_iop_request_header) >> 2);
937 
938 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
939 		KdPrint(("hptiop: get config send cmd failed"));
940 		return -1;
941 	}
942 
943 	bus_space_read_region_4(hba->bar0t, hba->bar0h,
944 			req32, (u_int32_t *)config,
945 			sizeof(struct hpt_iop_request_get_config) >> 2);
946 
947 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
948 
949 	return 0;
950 }
951 
952 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
953 				struct hpt_iop_request_get_config * config)
954 {
955 	struct hpt_iop_request_get_config *req;
956 
957 	if (!(req = hba->ctlcfg_ptr))
958 		return -1;
959 
960 	req->header.flags = 0;
961 	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
962 	req->header.size = sizeof(struct hpt_iop_request_get_config);
963 	req->header.result = IOP_RESULT_PENDING;
964 	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
965 
966 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
967 		KdPrint(("hptiop: get config send cmd failed"));
968 		return -1;
969 	}
970 
971 	*config = *req;
972 	return 0;
973 }
974 
975 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
976 				struct hpt_iop_request_get_config * config)
977 {
978 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
979 
980 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
981 	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
982 		KdPrint(("hptiop: header size %x/%x type %x/%x",
983 			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
984 			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
985 		return -1;
986 	}
987 
988 	config->interface_version = info->interface_version;
989 	config->firmware_version = info->firmware_version;
990 	config->max_requests = info->max_requests;
991 	config->request_size = info->request_size;
992 	config->max_sg_count = info->max_sg_count;
993 	config->data_transfer_length = info->data_transfer_length;
994 	config->alignment_mask = info->alignment_mask;
995 	config->max_devices = info->max_devices;
996 	config->sdram_size = info->sdram_size;
997 
998 	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
999 		 config->max_requests, config->request_size,
1000 		 config->data_transfer_length, config->max_devices,
1001 		 config->sdram_size));
1002 
1003 	return 0;
1004 }
1005 
1006 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1007 				struct hpt_iop_request_set_config *config)
1008 {
1009 	u_int32_t req32;
1010 
1011 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1012 
1013 	if (req32 == IOPMU_QUEUE_EMPTY)
1014 		return -1;
1015 
1016 	config->header.size = sizeof(struct hpt_iop_request_set_config);
1017 	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1018 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1019 	config->header.result = IOP_RESULT_PENDING;
1020 	config->header.context = 0;
1021 
1022 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1023 		(u_int32_t *)config,
1024 		sizeof(struct hpt_iop_request_set_config) >> 2);
1025 
1026 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1027 		KdPrint(("hptiop: set config send cmd failed"));
1028 		return -1;
1029 	}
1030 
1031 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1032 
1033 	return 0;
1034 }
1035 
1036 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1037 				struct hpt_iop_request_set_config *config)
1038 {
1039 	struct hpt_iop_request_set_config *req;
1040 
1041 	if (!(req = hba->ctlcfg_ptr))
1042 		return -1;
1043 
1044 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1045 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1046 		sizeof(struct hpt_iop_request_set_config) -
1047 			sizeof(struct hpt_iop_request_header));
1048 
1049 	req->header.flags = 0;
1050 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1051 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1052 	req->header.result = IOP_RESULT_PENDING;
1053 	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1054 
1055 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1056 		KdPrint(("hptiop: set config send cmd failed"));
1057 		return -1;
1058 	}
1059 
1060 	return 0;
1061 }
1062 
1063 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1064 				struct hpt_iop_request_set_config *config)
1065 {
1066 	struct hpt_iop_request_set_config *req;
1067 
1068 	if (!(req = hba->ctlcfg_ptr))
1069 		return -1;
1070 
1071 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1072 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1073 		sizeof(struct hpt_iop_request_set_config) -
1074 			sizeof(struct hpt_iop_request_header));
1075 
1076 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1077 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1078 	req->header.result = IOP_RESULT_PENDING;
1079 
1080 	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1081 		KdPrint(("hptiop: set config send cmd failed"));
1082 		return -1;
1083 	}
1084 
1085 	return 0;
1086 }
1087 
1088 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1089 				u_int32_t req32,
1090 				struct hpt_iop_ioctl_param *pParams)
1091 {
1092 	u_int64_t temp64;
1093 	struct hpt_iop_request_ioctl_command req;
1094 
1095 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1096 			(hba->max_request_size -
1097 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1098 		device_printf(hba->pcidev, "request size beyond max value");
1099 		return -1;
1100 	}
1101 
1102 	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1103 		+ pParams->nInBufferSize;
1104 	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1105 	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1106 	req.header.result = IOP_RESULT_PENDING;
1107 	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1108 	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1109 	req.inbuf_size = pParams->nInBufferSize;
1110 	req.outbuf_size = pParams->nOutBufferSize;
1111 	req.bytes_returned = 0;
1112 
1113 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1114 		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1115 
1116 	hptiop_lock_adapter(hba);
1117 
1118 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1119 	BUS_SPACE_RD4_ITL(outbound_intstatus);
1120 
1121 	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1122 		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1123 		(u_int32_t *)&temp64, 2);
1124 	while (temp64) {
1125 		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1126 				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1127 			break;
1128 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1129 		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1130 			offsetof(struct hpt_iop_request_ioctl_command,
1131 				header.context),
1132 			(u_int32_t *)&temp64, 2);
1133 	}
1134 
1135 	hptiop_unlock_adapter(hba);
1136 	return 0;
1137 }
1138 
1139 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1140 									void *user, int size)
1141 {
1142 	unsigned char byte;
1143 	int i;
1144 
1145 	for (i=0; i<size; i++) {
1146 		if (copyin((u_int8_t *)user + i, &byte, 1))
1147 			return -1;
1148 		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1149 	}
1150 
1151 	return 0;
1152 }
1153 
1154 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1155 									void *user, int size)
1156 {
1157 	unsigned char byte;
1158 	int i;
1159 
1160 	for (i=0; i<size; i++) {
1161 		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1162 		if (copyout(&byte, (u_int8_t *)user + i, 1))
1163 			return -1;
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1170 				struct hpt_iop_ioctl_param * pParams)
1171 {
1172 	u_int32_t req32;
1173 	u_int32_t result;
1174 
1175 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1176 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1177 		return EFAULT;
1178 
1179 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1180 	if (req32 == IOPMU_QUEUE_EMPTY)
1181 		return EFAULT;
1182 
1183 	if (pParams->nInBufferSize)
1184 		if (hptiop_bus_space_copyin(hba, req32 +
1185 			offsetof(struct hpt_iop_request_ioctl_command, buf),
1186 			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
1187 			goto invalid;
1188 
1189 	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1190 		goto invalid;
1191 
1192 	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1193 			offsetof(struct hpt_iop_request_ioctl_command,
1194 				header.result));
1195 
1196 	if (result == IOP_RESULT_SUCCESS) {
1197 		if (pParams->nOutBufferSize)
1198 			if (hptiop_bus_space_copyout(hba, req32 +
1199 				offsetof(struct hpt_iop_request_ioctl_command, buf) +
1200 					((pParams->nInBufferSize + 3) & ~3),
1201 				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1202 				goto invalid;
1203 
1204 		if (pParams->lpBytesReturned) {
1205 			if (hptiop_bus_space_copyout(hba, req32 +
1206 				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1207 				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1208 				goto invalid;
1209 		}
1210 
1211 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1212 
1213 		return 0;
1214 	} else{
1215 invalid:
1216 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1217 
1218 		return EFAULT;
1219 	}
1220 }
1221 
1222 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1223 				struct hpt_iop_request_ioctl_command *req,
1224 				struct hpt_iop_ioctl_param *pParams)
1225 {
1226 	u_int64_t req_phy;
1227 	int size = 0;
1228 
1229 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1230 			(hba->max_request_size -
1231 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1232 		device_printf(hba->pcidev, "request size beyond max value");
1233 		return -1;
1234 	}
1235 
1236 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1237 	req->inbuf_size = pParams->nInBufferSize;
1238 	req->outbuf_size = pParams->nOutBufferSize;
1239 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1240 					+ pParams->nInBufferSize;
1241 	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1242 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1243 	req->header.result = IOP_RESULT_PENDING;
1244 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1245 	size = req->header.size >> 8;
1246 	size = imin(3, size);
1247 	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1248 	hptiop_mv_inbound_write(req_phy, hba);
1249 
1250 	BUS_SPACE_RD4_MV0(outbound_intmask);
1251 
1252 	while (hba->config_done == 0) {
1253 		if (hptiop_sleep(hba, req, PPAUSE,
1254 			"hptctl", HPT_OSM_TIMEOUT)==0)
1255 			continue;
1256 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1257 	}
1258 	return 0;
1259 }
1260 
1261 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1262 				struct hpt_iop_ioctl_param *pParams)
1263 {
1264 	struct hpt_iop_request_ioctl_command *req;
1265 
1266 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1267 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1268 		return EFAULT;
1269 
1270 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1271 	hba->config_done = 0;
1272 	hptiop_lock_adapter(hba);
1273 	if (pParams->nInBufferSize)
1274 		if (copyin((void *)pParams->lpInBuffer,
1275 				req->buf, pParams->nInBufferSize))
1276 			goto invalid;
1277 	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1278 		goto invalid;
1279 
1280 	if (hba->config_done == 1) {
1281 		if (pParams->nOutBufferSize)
1282 			if (copyout(req->buf +
1283 				((pParams->nInBufferSize + 3) & ~3),
1284 				(void *)pParams->lpOutBuffer,
1285 				pParams->nOutBufferSize))
1286 				goto invalid;
1287 
1288 		if (pParams->lpBytesReturned)
1289 			if (copyout(&req->bytes_returned,
1290 				(void*)pParams->lpBytesReturned,
1291 				sizeof(u_int32_t)))
1292 				goto invalid;
1293 		hptiop_unlock_adapter(hba);
1294 		return 0;
1295 	} else{
1296 invalid:
1297 		hptiop_unlock_adapter(hba);
1298 		return EFAULT;
1299 	}
1300 }
1301 
1302 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1303 				struct hpt_iop_request_ioctl_command *req,
1304 				struct hpt_iop_ioctl_param *pParams)
1305 {
1306 	u_int64_t phy_addr;
1307 	u_int32_t index;
1308 
1309 	phy_addr = hba->ctlcfgcmd_phy;
1310 
1311 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1312 			(hba->max_request_size -
1313 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1314 		device_printf(hba->pcidev, "request size beyond max value");
1315 		return -1;
1316 	}
1317 
1318 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1319 	req->inbuf_size = pParams->nInBufferSize;
1320 	req->outbuf_size = pParams->nOutBufferSize;
1321 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1322 					+ pParams->nInBufferSize;
1323 
1324 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1325 	req->header.result = IOP_RESULT_PENDING;
1326 
1327 	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1328 						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1329 						| IOP_REQUEST_FLAG_ADDR_BITS
1330 						| ((phy_addr >> 16) & 0xffff0000);
1331 	req->header.context = ((phy_addr & 0xffffffff) << 32 )
1332 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1333 
1334 	hba->u.mvfrey.inlist_wptr++;
1335 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1336 
1337 	if (index == hba->u.mvfrey.list_count) {
1338 		index = 0;
1339 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1340 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1341 	}
1342 
1343 	hba->u.mvfrey.inlist[index].addr = phy_addr;
1344 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1345 
1346 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1347 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1348 
1349 	while (hba->config_done == 0) {
1350 		if (hptiop_sleep(hba, req, PPAUSE,
1351 			"hptctl", HPT_OSM_TIMEOUT)==0)
1352 			continue;
1353 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1354 	}
1355 	return 0;
1356 }
1357 
1358 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1359 				struct hpt_iop_ioctl_param *pParams)
1360 {
1361 	struct hpt_iop_request_ioctl_command *req;
1362 
1363 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1364 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1365 		return EFAULT;
1366 
1367 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1368 	hba->config_done = 0;
1369 	hptiop_lock_adapter(hba);
1370 	if (pParams->nInBufferSize)
1371 		if (copyin((void *)pParams->lpInBuffer,
1372 				req->buf, pParams->nInBufferSize))
1373 			goto invalid;
1374 	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1375 		goto invalid;
1376 
1377 	if (hba->config_done == 1) {
1378 		if (pParams->nOutBufferSize)
1379 			if (copyout(req->buf +
1380 				((pParams->nInBufferSize + 3) & ~3),
1381 				(void *)pParams->lpOutBuffer,
1382 				pParams->nOutBufferSize))
1383 				goto invalid;
1384 
1385 		if (pParams->lpBytesReturned)
1386 			if (copyout(&req->bytes_returned,
1387 				(void*)pParams->lpBytesReturned,
1388 				sizeof(u_int32_t)))
1389 				goto invalid;
1390 		hptiop_unlock_adapter(hba);
1391 		return 0;
1392 	} else{
1393 invalid:
1394 		hptiop_unlock_adapter(hba);
1395 		return EFAULT;
1396 	}
1397 }
1398 
1399 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1400 {
1401 	union ccb           *ccb;
1402 
1403 	if ((ccb = xpt_alloc_ccb()) == NULL)
1404 		return(ENOMEM);
1405 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1406 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1407 		xpt_free_ccb(ccb);
1408 		return(EIO);
1409 	}
1410 	xpt_rescan(ccb);
1411 	return(0);
1412 }
1413 
1414 static  bus_dmamap_callback_t   hptiop_map_srb;
1415 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1416 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1417 static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1418 
1419 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1420 {
1421 	hba->bar0_rid = 0x10;
1422 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1423 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1424 
1425 	if (hba->bar0_res == NULL) {
1426 		device_printf(hba->pcidev,
1427 			"failed to get iop base adrress.\n");
1428 		return -1;
1429 	}
1430 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1431 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1432 	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1433 				rman_get_virtual(hba->bar0_res);
1434 
1435 	if (!hba->u.itl.mu) {
1436 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1437 					hba->bar0_rid, hba->bar0_res);
1438 		device_printf(hba->pcidev, "alloc mem res failed\n");
1439 		return -1;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1446 {
1447 	hba->bar0_rid = 0x10;
1448 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1449 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1450 
1451 	if (hba->bar0_res == NULL) {
1452 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1453 		return -1;
1454 	}
1455 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1456 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1457 	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1458 				rman_get_virtual(hba->bar0_res);
1459 
1460 	if (!hba->u.mv.regs) {
1461 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1462 					hba->bar0_rid, hba->bar0_res);
1463 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1464 		return -1;
1465 	}
1466 
1467 	hba->bar2_rid = 0x18;
1468 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1469 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1470 
1471 	if (hba->bar2_res == NULL) {
1472 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1473 					hba->bar0_rid, hba->bar0_res);
1474 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1475 		return -1;
1476 	}
1477 
1478 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1479 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1480 	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1481 
1482 	if (!hba->u.mv.mu) {
1483 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484 					hba->bar0_rid, hba->bar0_res);
1485 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1486 					hba->bar2_rid, hba->bar2_res);
1487 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1488 		return -1;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1495 {
1496 	hba->bar0_rid = 0x10;
1497 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1498 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1499 
1500 	if (hba->bar0_res == NULL) {
1501 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1502 		return -1;
1503 	}
1504 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1505 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1506 	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1507 				rman_get_virtual(hba->bar0_res);
1508 
1509 	if (!hba->u.mvfrey.config) {
1510 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1511 					hba->bar0_rid, hba->bar0_res);
1512 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1513 		return -1;
1514 	}
1515 
1516 	hba->bar2_rid = 0x18;
1517 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1518 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1519 
1520 	if (hba->bar2_res == NULL) {
1521 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522 					hba->bar0_rid, hba->bar0_res);
1523 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1524 		return -1;
1525 	}
1526 
1527 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1528 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1529 	hba->u.mvfrey.mu =
1530 					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1531 
1532 	if (!hba->u.mvfrey.mu) {
1533 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 					hba->bar0_rid, hba->bar0_res);
1535 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1536 					hba->bar2_rid, hba->bar2_res);
1537 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1538 		return -1;
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1545 {
1546 	if (hba->bar0_res)
1547 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1548 			hba->bar0_rid, hba->bar0_res);
1549 }
1550 
1551 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1552 {
1553 	if (hba->bar0_res)
1554 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1555 			hba->bar0_rid, hba->bar0_res);
1556 	if (hba->bar2_res)
1557 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1558 			hba->bar2_rid, hba->bar2_res);
1559 }
1560 
1561 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1562 {
1563 	if (hba->bar0_res)
1564 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1565 			hba->bar0_rid, hba->bar0_res);
1566 	if (hba->bar2_res)
1567 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1568 			hba->bar2_rid, hba->bar2_res);
1569 }
1570 
1571 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1572 {
1573 	if (bus_dma_tag_create(hba->parent_dmat,
1574 				1,
1575 				0,
1576 				BUS_SPACE_MAXADDR_32BIT,
1577 				BUS_SPACE_MAXADDR,
1578 				NULL, NULL,
1579 				0x800 - 0x8,
1580 				1,
1581 				BUS_SPACE_MAXSIZE_32BIT,
1582 				BUS_DMA_ALLOCNOW,
1583 				NULL,
1584 				NULL,
1585 				&hba->ctlcfg_dmat)) {
1586 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1587 		return -1;
1588 	}
1589 
1590 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1591 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1592 		&hba->ctlcfg_dmamap) != 0) {
1593 			device_printf(hba->pcidev,
1594 					"bus_dmamem_alloc failed!\n");
1595 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1596 			return -1;
1597 	}
1598 
1599 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1600 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1601 			MVIOP_IOCTLCFG_SIZE,
1602 			hptiop_mv_map_ctlcfg, hba, 0)) {
1603 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1604 		if (hba->ctlcfg_dmat) {
1605 			bus_dmamem_free(hba->ctlcfg_dmat,
1606 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1607 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1608 		}
1609 		return -1;
1610 	}
1611 
1612 	return 0;
1613 }
1614 
1615 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1616 {
1617 	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1618 
1619 	list_count >>= 16;
1620 
1621 	if (list_count == 0) {
1622 		return -1;
1623 	}
1624 
1625 	hba->u.mvfrey.list_count = list_count;
1626 	hba->u.mvfrey.internal_mem_size = 0x800
1627 							+ list_count * sizeof(struct mvfrey_inlist_entry)
1628 							+ list_count * sizeof(struct mvfrey_outlist_entry)
1629 							+ sizeof(int);
1630 	if (bus_dma_tag_create(hba->parent_dmat,
1631 				1,
1632 				0,
1633 				BUS_SPACE_MAXADDR_32BIT,
1634 				BUS_SPACE_MAXADDR,
1635 				NULL, NULL,
1636 				hba->u.mvfrey.internal_mem_size,
1637 				1,
1638 				BUS_SPACE_MAXSIZE_32BIT,
1639 				BUS_DMA_ALLOCNOW,
1640 				NULL,
1641 				NULL,
1642 				&hba->ctlcfg_dmat)) {
1643 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1644 		return -1;
1645 	}
1646 
1647 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1648 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1649 		&hba->ctlcfg_dmamap) != 0) {
1650 			device_printf(hba->pcidev,
1651 					"bus_dmamem_alloc failed!\n");
1652 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1653 			return -1;
1654 	}
1655 
1656 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1657 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1658 			hba->u.mvfrey.internal_mem_size,
1659 			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1660 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1661 		if (hba->ctlcfg_dmat) {
1662 			bus_dmamem_free(hba->ctlcfg_dmat,
1663 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1664 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1665 		}
1666 		return -1;
1667 	}
1668 
1669 	return 0;
1670 }
1671 
1672 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1673 	return 0;
1674 }
1675 
1676 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1677 {
1678 	if (hba->ctlcfg_dmat) {
1679 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1680 		bus_dmamem_free(hba->ctlcfg_dmat,
1681 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1682 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1683 	}
1684 
1685 	return 0;
1686 }
1687 
1688 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1689 {
1690 	if (hba->ctlcfg_dmat) {
1691 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1692 		bus_dmamem_free(hba->ctlcfg_dmat,
1693 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1694 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1695 	}
1696 
1697 	return 0;
1698 }
1699 
1700 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1701 {
1702 	u_int32_t i = 100;
1703 
1704 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1705 		return -1;
1706 
1707 	/* wait 100ms for MCU ready */
1708 	while(i--) {
1709 		DELAY(1000);
1710 	}
1711 
1712 	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1713 							hba->u.mvfrey.inlist_phy & 0xffffffff);
1714 	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1715 							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1716 
1717 	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1718 							hba->u.mvfrey.outlist_phy & 0xffffffff);
1719 	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1720 							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1721 
1722 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1723 							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1724 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1725 							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1726 
1727 	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1728 								| CL_POINTER_TOGGLE;
1729 	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1730 								| CL_POINTER_TOGGLE;
1731 	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1732 
1733 	return 0;
1734 }
1735 
1736 /*
1737  * CAM driver interface
1738  */
1739 static device_method_t driver_methods[] = {
1740 	/* Device interface */
1741 	DEVMETHOD(device_probe,     hptiop_probe),
1742 	DEVMETHOD(device_attach,    hptiop_attach),
1743 	DEVMETHOD(device_detach,    hptiop_detach),
1744 	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1745 	{ 0, 0 }
1746 };
1747 
1748 static struct hptiop_adapter_ops hptiop_itl_ops = {
1749 	.family	           = INTEL_BASED_IOP,
1750 	.iop_wait_ready    = hptiop_wait_ready_itl,
1751 	.internal_memalloc = 0,
1752 	.internal_memfree  = hptiop_internal_memfree_itl,
1753 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1754 	.release_pci_res   = hptiop_release_pci_res_itl,
1755 	.enable_intr       = hptiop_enable_intr_itl,
1756 	.disable_intr      = hptiop_disable_intr_itl,
1757 	.get_config        = hptiop_get_config_itl,
1758 	.set_config        = hptiop_set_config_itl,
1759 	.iop_intr          = hptiop_intr_itl,
1760 	.post_msg          = hptiop_post_msg_itl,
1761 	.post_req          = hptiop_post_req_itl,
1762 	.do_ioctl          = hptiop_do_ioctl_itl,
1763 	.reset_comm        = 0,
1764 };
1765 
1766 static struct hptiop_adapter_ops hptiop_mv_ops = {
1767 	.family	           = MV_BASED_IOP,
1768 	.iop_wait_ready    = hptiop_wait_ready_mv,
1769 	.internal_memalloc = hptiop_internal_memalloc_mv,
1770 	.internal_memfree  = hptiop_internal_memfree_mv,
1771 	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1772 	.release_pci_res   = hptiop_release_pci_res_mv,
1773 	.enable_intr       = hptiop_enable_intr_mv,
1774 	.disable_intr      = hptiop_disable_intr_mv,
1775 	.get_config        = hptiop_get_config_mv,
1776 	.set_config        = hptiop_set_config_mv,
1777 	.iop_intr          = hptiop_intr_mv,
1778 	.post_msg          = hptiop_post_msg_mv,
1779 	.post_req          = hptiop_post_req_mv,
1780 	.do_ioctl          = hptiop_do_ioctl_mv,
1781 	.reset_comm        = 0,
1782 };
1783 
1784 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1785 	.family	           = MVFREY_BASED_IOP,
1786 	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1787 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1788 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1789 	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1790 	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1791 	.enable_intr       = hptiop_enable_intr_mvfrey,
1792 	.disable_intr      = hptiop_disable_intr_mvfrey,
1793 	.get_config        = hptiop_get_config_mvfrey,
1794 	.set_config        = hptiop_set_config_mvfrey,
1795 	.iop_intr          = hptiop_intr_mvfrey,
1796 	.post_msg          = hptiop_post_msg_mvfrey,
1797 	.post_req          = hptiop_post_req_mvfrey,
1798 	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1799 	.reset_comm        = hptiop_reset_comm_mvfrey,
1800 };
1801 
1802 static driver_t hptiop_pci_driver = {
1803 	driver_name,
1804 	driver_methods,
1805 	sizeof(struct hpt_iop_hba)
1806 };
1807 
1808 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1809 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1810 
1811 static int hptiop_probe(device_t dev)
1812 {
1813 	struct hpt_iop_hba *hba;
1814 	u_int32_t id;
1815 	static char buf[256];
1816 	int sas = 0;
1817 	struct hptiop_adapter_ops *ops;
1818 
1819 	if (pci_get_vendor(dev) != 0x1103)
1820 		return (ENXIO);
1821 
1822 	id = pci_get_device(dev);
1823 
1824 	switch (id) {
1825 		case 0x4520:
1826 		case 0x4521:
1827 		case 0x4522:
1828 			sas = 1;
1829 		case 0x3620:
1830 		case 0x3622:
1831 		case 0x3640:
1832 			ops = &hptiop_mvfrey_ops;
1833 			break;
1834 		case 0x4210:
1835 		case 0x4211:
1836 		case 0x4310:
1837 		case 0x4311:
1838 		case 0x4320:
1839 		case 0x4321:
1840  		case 0x4322:
1841 			sas = 1;
1842 		case 0x3220:
1843 		case 0x3320:
1844 		case 0x3410:
1845 		case 0x3520:
1846 		case 0x3510:
1847 		case 0x3511:
1848 		case 0x3521:
1849 		case 0x3522:
1850 		case 0x3530:
1851 		case 0x3540:
1852 		case 0x3560:
1853 			ops = &hptiop_itl_ops;
1854 			break;
1855 		case 0x3020:
1856 		case 0x3120:
1857 		case 0x3122:
1858 			ops = &hptiop_mv_ops;
1859 			break;
1860 		default:
1861 			return (ENXIO);
1862 	}
1863 
1864 	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1865 		pci_get_bus(dev), pci_get_slot(dev),
1866 		pci_get_function(dev), pci_get_irq(dev));
1867 
1868 	sprintf(buf, "RocketRAID %x %s Controller\n",
1869 				id, sas ? "SAS" : "SATA");
1870 	device_set_desc_copy(dev, buf);
1871 
1872 	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1873 	bzero(hba, sizeof(struct hpt_iop_hba));
1874 	hba->ops = ops;
1875 
1876 	KdPrint(("hba->ops=%p\n", hba->ops));
1877 	return 0;
1878 }
1879 
1880 static int hptiop_attach(device_t dev)
1881 {
1882 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1883 	struct hpt_iop_request_get_config  iop_config;
1884 	struct hpt_iop_request_set_config  set_config;
1885 	int rid = 0;
1886 	struct cam_devq *devq;
1887 	struct ccb_setasync ccb;
1888 	u_int32_t unit = device_get_unit(dev);
1889 
1890 	device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1891 			unit, driver_version);
1892 
1893 	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1894 		pci_get_bus(dev), pci_get_slot(dev),
1895 		pci_get_function(dev), hba->ops));
1896 
1897 	pci_enable_busmaster(dev);
1898 	hba->pcidev = dev;
1899 	hba->pciunit = unit;
1900 
1901 	if (hba->ops->alloc_pci_res(hba))
1902 		return ENXIO;
1903 
1904 	if (hba->ops->iop_wait_ready(hba, 2000)) {
1905 		device_printf(dev, "adapter is not ready\n");
1906 		goto release_pci_res;
1907 	}
1908 
1909 	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1910 
1911 	if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1912 			1,  /* alignment */
1913 			0, /* boundary */
1914 			BUS_SPACE_MAXADDR,  /* lowaddr */
1915 			BUS_SPACE_MAXADDR,  /* highaddr */
1916 			NULL, NULL,         /* filter, filterarg */
1917 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1918 			BUS_SPACE_UNRESTRICTED, /* nsegments */
1919 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1920 			0,      /* flags */
1921 			NULL,   /* lockfunc */
1922 			NULL,       /* lockfuncarg */
1923 			&hba->parent_dmat   /* tag */))
1924 	{
1925 		device_printf(dev, "alloc parent_dmat failed\n");
1926 		goto release_pci_res;
1927 	}
1928 
1929 	if (hba->ops->family == MV_BASED_IOP) {
1930 		if (hba->ops->internal_memalloc(hba)) {
1931 			device_printf(dev, "alloc srb_dmat failed\n");
1932 			goto destroy_parent_tag;
1933 		}
1934 	}
1935 
1936 	if (hba->ops->get_config(hba, &iop_config)) {
1937 		device_printf(dev, "get iop config failed.\n");
1938 		goto get_config_failed;
1939 	}
1940 
1941 	hba->firmware_version = iop_config.firmware_version;
1942 	hba->interface_version = iop_config.interface_version;
1943 	hba->max_requests = iop_config.max_requests;
1944 	hba->max_devices = iop_config.max_devices;
1945 	hba->max_request_size = iop_config.request_size;
1946 	hba->max_sg_count = iop_config.max_sg_count;
1947 
1948 	if (hba->ops->family == MVFREY_BASED_IOP) {
1949 		if (hba->ops->internal_memalloc(hba)) {
1950 			device_printf(dev, "alloc srb_dmat failed\n");
1951 			goto destroy_parent_tag;
1952 		}
1953 		if (hba->ops->reset_comm(hba)) {
1954 			device_printf(dev, "reset comm failed\n");
1955 			goto get_config_failed;
1956 		}
1957 	}
1958 
1959 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1960 			4,  /* alignment */
1961 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1962 			BUS_SPACE_MAXADDR,  /* lowaddr */
1963 			BUS_SPACE_MAXADDR,  /* highaddr */
1964 			NULL, NULL,         /* filter, filterarg */
1965 			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1966 			hba->max_sg_count,  /* nsegments */
1967 			0x20000,    /* maxsegsize */
1968 			BUS_DMA_ALLOCNOW,       /* flags */
1969 			busdma_lock_mutex,  /* lockfunc */
1970 			&hba->lock,     /* lockfuncarg */
1971 			&hba->io_dmat   /* tag */))
1972 	{
1973 		device_printf(dev, "alloc io_dmat failed\n");
1974 		goto get_config_failed;
1975 	}
1976 
1977 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1978 			1,  /* alignment */
1979 			0, /* boundary */
1980 			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1981 			BUS_SPACE_MAXADDR,  /* highaddr */
1982 			NULL, NULL,         /* filter, filterarg */
1983 			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1984 			1,  /* nsegments */
1985 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1986 			0,      /* flags */
1987 			NULL,   /* lockfunc */
1988 			NULL,       /* lockfuncarg */
1989 			&hba->srb_dmat  /* tag */))
1990 	{
1991 		device_printf(dev, "alloc srb_dmat failed\n");
1992 		goto destroy_io_dmat;
1993 	}
1994 
1995 	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1996 			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1997 			&hba->srb_dmamap) != 0)
1998 	{
1999 		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
2000 		goto destroy_srb_dmat;
2001 	}
2002 
2003 	if (bus_dmamap_load(hba->srb_dmat,
2004 			hba->srb_dmamap, hba->uncached_ptr,
2005 			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
2006 			hptiop_map_srb, hba, 0))
2007 	{
2008 		device_printf(dev, "bus_dmamap_load failed!\n");
2009 		goto srb_dmamem_free;
2010 	}
2011 
2012 	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2013 		device_printf(dev, "cam_simq_alloc failed\n");
2014 		goto srb_dmamap_unload;
2015 	}
2016 
2017 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2018 			hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2019 	if (!hba->sim) {
2020 		device_printf(dev, "cam_sim_alloc failed\n");
2021 		cam_simq_free(devq);
2022 		goto srb_dmamap_unload;
2023 	}
2024 	hptiop_lock_adapter(hba);
2025 	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2026 	{
2027 		device_printf(dev, "xpt_bus_register failed\n");
2028 		goto free_cam_sim;
2029 	}
2030 
2031 	if (xpt_create_path(&hba->path, /*periph */ NULL,
2032 			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2033 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2034 		device_printf(dev, "xpt_create_path failed\n");
2035 		goto deregister_xpt_bus;
2036 	}
2037 	hptiop_unlock_adapter(hba);
2038 
2039 	bzero(&set_config, sizeof(set_config));
2040 	set_config.iop_id = unit;
2041 	set_config.vbus_id = cam_sim_path(hba->sim);
2042 	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2043 
2044 	if (hba->ops->set_config(hba, &set_config)) {
2045 		device_printf(dev, "set iop config failed.\n");
2046 		goto free_hba_path;
2047 	}
2048 
2049 	memset(&ccb, 0, sizeof(ccb));
2050 	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2051 	ccb.ccb_h.func_code = XPT_SASYNC_CB;
2052 	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2053 	ccb.callback = hptiop_async;
2054 	ccb.callback_arg = hba->sim;
2055 	xpt_action((union ccb *)&ccb);
2056 
2057 	rid = 0;
2058 	if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2059 			&rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2060 		device_printf(dev, "allocate irq failed!\n");
2061 		goto free_hba_path;
2062 	}
2063 
2064 	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2065 				NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2066 	{
2067 		device_printf(dev, "allocate intr function failed!\n");
2068 		goto free_irq_resource;
2069 	}
2070 
2071 	if (hptiop_send_sync_msg(hba,
2072 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2073 		device_printf(dev, "fail to start background task\n");
2074 		goto teartown_irq_resource;
2075 	}
2076 
2077 	hba->ops->enable_intr(hba);
2078 	hba->initialized = 1;
2079 
2080 	hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2081 				UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2082 				S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2083 
2084 
2085 	return 0;
2086 
2087 
2088 teartown_irq_resource:
2089 	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2090 
2091 free_irq_resource:
2092 	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2093 
2094 	hptiop_lock_adapter(hba);
2095 free_hba_path:
2096 	xpt_free_path(hba->path);
2097 
2098 deregister_xpt_bus:
2099 	xpt_bus_deregister(cam_sim_path(hba->sim));
2100 
2101 free_cam_sim:
2102 	cam_sim_free(hba->sim, /*free devq*/ TRUE);
2103 	hptiop_unlock_adapter(hba);
2104 
2105 srb_dmamap_unload:
2106 	if (hba->uncached_ptr)
2107 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2108 
2109 srb_dmamem_free:
2110 	if (hba->uncached_ptr)
2111 		bus_dmamem_free(hba->srb_dmat,
2112 			hba->uncached_ptr, hba->srb_dmamap);
2113 
2114 destroy_srb_dmat:
2115 	if (hba->srb_dmat)
2116 		bus_dma_tag_destroy(hba->srb_dmat);
2117 
2118 destroy_io_dmat:
2119 	if (hba->io_dmat)
2120 		bus_dma_tag_destroy(hba->io_dmat);
2121 
2122 get_config_failed:
2123 	hba->ops->internal_memfree(hba);
2124 
2125 destroy_parent_tag:
2126 	if (hba->parent_dmat)
2127 		bus_dma_tag_destroy(hba->parent_dmat);
2128 
2129 release_pci_res:
2130 	if (hba->ops->release_pci_res)
2131 		hba->ops->release_pci_res(hba);
2132 
2133 	return ENXIO;
2134 }
2135 
2136 static int hptiop_detach(device_t dev)
2137 {
2138 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2139 	int i;
2140 	int error = EBUSY;
2141 
2142 	hptiop_lock_adapter(hba);
2143 	for (i = 0; i < hba->max_devices; i++)
2144 		if (hptiop_os_query_remove_device(hba, i)) {
2145 			device_printf(dev, "%d file system is busy. id=%d",
2146 						hba->pciunit, i);
2147 			goto out;
2148 		}
2149 
2150 	if ((error = hptiop_shutdown(dev)) != 0)
2151 		goto out;
2152 	if (hptiop_send_sync_msg(hba,
2153 		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2154 		goto out;
2155 	hptiop_unlock_adapter(hba);
2156 
2157 	hptiop_release_resource(hba);
2158 	return (0);
2159 out:
2160 	hptiop_unlock_adapter(hba);
2161 	return error;
2162 }
2163 
2164 static int hptiop_shutdown(device_t dev)
2165 {
2166 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2167 
2168 	int error = 0;
2169 
2170 	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2171 		device_printf(dev, "%d device is busy", hba->pciunit);
2172 		return EBUSY;
2173 	}
2174 
2175 	hba->ops->disable_intr(hba);
2176 
2177 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2178 		error = EBUSY;
2179 
2180 	return error;
2181 }
2182 
2183 static void hptiop_pci_intr(void *arg)
2184 {
2185 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2186 	hptiop_lock_adapter(hba);
2187 	hba->ops->iop_intr(hba);
2188 	hptiop_unlock_adapter(hba);
2189 }
2190 
2191 static void hptiop_poll(struct cam_sim *sim)
2192 {
2193 	struct hpt_iop_hba *hba;
2194 
2195 	hba = cam_sim_softc(sim);
2196 	hba->ops->iop_intr(hba);
2197 }
2198 
2199 static void hptiop_async(void * callback_arg, u_int32_t code,
2200 					struct cam_path * path, void * arg)
2201 {
2202 }
2203 
2204 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2205 {
2206 	BUS_SPACE_WRT4_ITL(outbound_intmask,
2207 		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2208 }
2209 
2210 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2211 {
2212 	u_int32_t int_mask;
2213 
2214 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2215 
2216 	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2217 			| MVIOP_MU_OUTBOUND_INT_MSG;
2218     	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2219 }
2220 
2221 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2222 {
2223 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2224 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2225 
2226 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2227 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2228 
2229 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2230 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2231 }
2232 
2233 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2234 {
2235 	u_int32_t int_mask;
2236 
2237 	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2238 
2239 	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2240 	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2241 	BUS_SPACE_RD4_ITL(outbound_intstatus);
2242 }
2243 
2244 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2245 {
2246 	u_int32_t int_mask;
2247 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2248 
2249 	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2250 			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2251 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2252 	BUS_SPACE_RD4_MV0(outbound_intmask);
2253 }
2254 
2255 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2256 {
2257 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2258 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2259 
2260 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2261 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2262 
2263 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2264 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2265 }
2266 
2267 static void hptiop_reset_adapter(void *argv)
2268 {
2269 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2270 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2271 		return;
2272 	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2273 }
2274 
2275 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2276 {
2277 	struct hpt_iop_srb * srb;
2278 
2279 	if (hba->srb_list) {
2280 		srb = hba->srb_list;
2281 		hba->srb_list = srb->next;
2282 		return srb;
2283 	}
2284 
2285 	return NULL;
2286 }
2287 
2288 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2289 {
2290 	srb->next = hba->srb_list;
2291 	hba->srb_list = srb;
2292 }
2293 
2294 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2295 {
2296 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2297 	struct hpt_iop_srb * srb;
2298 	int error;
2299 
2300 	switch (ccb->ccb_h.func_code) {
2301 
2302 	case XPT_SCSI_IO:
2303 		if (ccb->ccb_h.target_lun != 0 ||
2304 			ccb->ccb_h.target_id >= hba->max_devices ||
2305 			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2306 		{
2307 			ccb->ccb_h.status = CAM_TID_INVALID;
2308 			xpt_done(ccb);
2309 			return;
2310 		}
2311 
2312 		if ((srb = hptiop_get_srb(hba)) == NULL) {
2313 			device_printf(hba->pcidev, "srb allocated failed");
2314 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2315 			xpt_done(ccb);
2316 			return;
2317 		}
2318 
2319 		srb->ccb = ccb;
2320 		error = bus_dmamap_load_ccb(hba->io_dmat,
2321 					    srb->dma_map,
2322 					    ccb,
2323 					    hptiop_post_scsi_command,
2324 					    srb,
2325 					    0);
2326 
2327 		if (error && error != EINPROGRESS) {
2328 			device_printf(hba->pcidev,
2329 				"%d bus_dmamap_load error %d",
2330 				hba->pciunit, error);
2331 			xpt_freeze_simq(hba->sim, 1);
2332 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2333 			hptiop_free_srb(hba, srb);
2334 			xpt_done(ccb);
2335 			return;
2336 		}
2337 
2338 		return;
2339 
2340 	case XPT_RESET_BUS:
2341 		device_printf(hba->pcidev, "reset adapter");
2342 		hba->msg_done = 0;
2343 		hptiop_reset_adapter(hba);
2344 		break;
2345 
2346 	case XPT_GET_TRAN_SETTINGS:
2347 	case XPT_SET_TRAN_SETTINGS:
2348 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2349 		break;
2350 
2351 	case XPT_CALC_GEOMETRY:
2352 		cam_calc_geometry(&ccb->ccg, 1);
2353 		break;
2354 
2355 	case XPT_PATH_INQ:
2356 	{
2357 		struct ccb_pathinq *cpi = &ccb->cpi;
2358 
2359 		cpi->version_num = 1;
2360 		cpi->hba_inquiry = PI_SDTR_ABLE;
2361 		cpi->target_sprt = 0;
2362 		cpi->hba_misc = PIM_NOBUSRESET;
2363 		cpi->hba_eng_cnt = 0;
2364 		cpi->max_target = hba->max_devices;
2365 		cpi->max_lun = 0;
2366 		cpi->unit_number = cam_sim_unit(sim);
2367 		cpi->bus_id = cam_sim_bus(sim);
2368 		cpi->initiator_id = hba->max_devices;
2369 		cpi->base_transfer_speed = 3300;
2370 
2371 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2372 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2373 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2374 		cpi->transport = XPORT_SPI;
2375 		cpi->transport_version = 2;
2376 		cpi->protocol = PROTO_SCSI;
2377 		cpi->protocol_version = SCSI_REV_2;
2378 		cpi->ccb_h.status = CAM_REQ_CMP;
2379 		break;
2380 	}
2381 
2382 	default:
2383 		ccb->ccb_h.status = CAM_REQ_INVALID;
2384 		break;
2385 	}
2386 
2387 	xpt_done(ccb);
2388 	return;
2389 }
2390 
2391 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2392 				struct hpt_iop_srb *srb,
2393 				bus_dma_segment_t *segs, int nsegs)
2394 {
2395 	int idx;
2396 	union ccb *ccb = srb->ccb;
2397 	u_int8_t *cdb;
2398 
2399 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2400 		cdb = ccb->csio.cdb_io.cdb_ptr;
2401 	else
2402 		cdb = ccb->csio.cdb_io.cdb_bytes;
2403 
2404 	KdPrint(("ccb=%p %x-%x-%x\n",
2405 		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2406 
2407 	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2408 		u_int32_t iop_req32;
2409 		struct hpt_iop_request_scsi_command req;
2410 
2411 		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2412 
2413 		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2414 			device_printf(hba->pcidev, "invalid req offset\n");
2415 			ccb->ccb_h.status = CAM_BUSY;
2416 			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2417 			hptiop_free_srb(hba, srb);
2418 			xpt_done(ccb);
2419 			return;
2420 		}
2421 
2422 		if (ccb->csio.dxfer_len && nsegs > 0) {
2423 			struct hpt_iopsg *psg = req.sg_list;
2424 			for (idx = 0; idx < nsegs; idx++, psg++) {
2425 				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2426 				psg->size = segs[idx].ds_len;
2427 				psg->eot = 0;
2428 			}
2429 			psg[-1].eot = 1;
2430 		}
2431 
2432 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2433 
2434 		req.header.size =
2435 				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2436 				+ nsegs*sizeof(struct hpt_iopsg);
2437 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2438 		req.header.flags = 0;
2439 		req.header.result = IOP_RESULT_PENDING;
2440 		req.header.context = (u_int64_t)(unsigned long)srb;
2441 		req.dataxfer_length = ccb->csio.dxfer_len;
2442 		req.channel =  0;
2443 		req.target =  ccb->ccb_h.target_id;
2444 		req.lun =  ccb->ccb_h.target_lun;
2445 
2446 		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2447 			(u_int8_t *)&req, req.header.size);
2448 
2449 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2450 			bus_dmamap_sync(hba->io_dmat,
2451 				srb->dma_map, BUS_DMASYNC_PREREAD);
2452 		}
2453 		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2454 			bus_dmamap_sync(hba->io_dmat,
2455 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2456 
2457 		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2458 	} else {
2459 		struct hpt_iop_request_scsi_command *req;
2460 
2461 		req = (struct hpt_iop_request_scsi_command *)srb;
2462 		if (ccb->csio.dxfer_len && nsegs > 0) {
2463 			struct hpt_iopsg *psg = req->sg_list;
2464 			for (idx = 0; idx < nsegs; idx++, psg++) {
2465 				psg->pci_address =
2466 					(u_int64_t)segs[idx].ds_addr;
2467 				psg->size = segs[idx].ds_len;
2468 				psg->eot = 0;
2469 			}
2470 			psg[-1].eot = 1;
2471 		}
2472 
2473 		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2474 
2475 		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2476 		req->header.result = IOP_RESULT_PENDING;
2477 		req->dataxfer_length = ccb->csio.dxfer_len;
2478 		req->channel =  0;
2479 		req->target =  ccb->ccb_h.target_id;
2480 		req->lun =  ccb->ccb_h.target_lun;
2481 		req->header.size =
2482 			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2483 			+ nsegs*sizeof(struct hpt_iopsg);
2484 		req->header.context = (u_int64_t)srb->index |
2485 						IOPMU_QUEUE_ADDR_HOST_BIT;
2486 		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2487 
2488 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2489 			bus_dmamap_sync(hba->io_dmat,
2490 				srb->dma_map, BUS_DMASYNC_PREREAD);
2491 		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2492 			bus_dmamap_sync(hba->io_dmat,
2493 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2494 		}
2495 
2496 		if (hba->firmware_version > 0x01020000
2497 			|| hba->interface_version > 0x01020000) {
2498 			u_int32_t size_bits;
2499 
2500 			if (req->header.size < 256)
2501 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2502 			else if (req->header.size < 512)
2503 				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2504 			else
2505 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2506 						| IOPMU_QUEUE_ADDR_HOST_BIT;
2507 
2508 			BUS_SPACE_WRT4_ITL(inbound_queue,
2509 				(u_int32_t)srb->phy_addr | size_bits);
2510 		} else
2511 			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2512 				|IOPMU_QUEUE_ADDR_HOST_BIT);
2513 	}
2514 }
2515 
2516 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2517 				struct hpt_iop_srb *srb,
2518 				bus_dma_segment_t *segs, int nsegs)
2519 {
2520 	int idx, size;
2521 	union ccb *ccb = srb->ccb;
2522 	u_int8_t *cdb;
2523 	struct hpt_iop_request_scsi_command *req;
2524 	u_int64_t req_phy;
2525 
2526     	req = (struct hpt_iop_request_scsi_command *)srb;
2527 	req_phy = srb->phy_addr;
2528 
2529 	if (ccb->csio.dxfer_len && nsegs > 0) {
2530 		struct hpt_iopsg *psg = req->sg_list;
2531 		for (idx = 0; idx < nsegs; idx++, psg++) {
2532 			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2533 			psg->size = segs[idx].ds_len;
2534 			psg->eot = 0;
2535 		}
2536 		psg[-1].eot = 1;
2537 	}
2538 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2539 		cdb = ccb->csio.cdb_io.cdb_ptr;
2540 	else
2541 		cdb = ccb->csio.cdb_io.cdb_bytes;
2542 
2543 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2544 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2545 	req->header.result = IOP_RESULT_PENDING;
2546 	req->dataxfer_length = ccb->csio.dxfer_len;
2547 	req->channel = 0;
2548 	req->target =  ccb->ccb_h.target_id;
2549 	req->lun =  ccb->ccb_h.target_lun;
2550 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2551 				- sizeof(struct hpt_iopsg)
2552 				+ nsegs * sizeof(struct hpt_iopsg);
2553 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2554 		bus_dmamap_sync(hba->io_dmat,
2555 			srb->dma_map, BUS_DMASYNC_PREREAD);
2556 	}
2557 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2558 		bus_dmamap_sync(hba->io_dmat,
2559 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2560 	req->header.context = (u_int64_t)srb->index
2561 					<< MVIOP_REQUEST_NUMBER_START_BIT
2562 					| MVIOP_CMD_TYPE_SCSI;
2563 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2564 	size = req->header.size >> 8;
2565 	hptiop_mv_inbound_write(req_phy
2566 			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2567 			| imin(3, size), hba);
2568 }
2569 
2570 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2571 				struct hpt_iop_srb *srb,
2572 				bus_dma_segment_t *segs, int nsegs)
2573 {
2574 	int idx, index;
2575 	union ccb *ccb = srb->ccb;
2576 	u_int8_t *cdb;
2577 	struct hpt_iop_request_scsi_command *req;
2578 	u_int64_t req_phy;
2579 
2580 	req = (struct hpt_iop_request_scsi_command *)srb;
2581 	req_phy = srb->phy_addr;
2582 
2583 	if (ccb->csio.dxfer_len && nsegs > 0) {
2584 		struct hpt_iopsg *psg = req->sg_list;
2585 		for (idx = 0; idx < nsegs; idx++, psg++) {
2586 			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2587 			psg->size = segs[idx].ds_len;
2588 			psg->eot = 0;
2589 		}
2590 		psg[-1].eot = 1;
2591 	}
2592 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2593 		cdb = ccb->csio.cdb_io.cdb_ptr;
2594 	else
2595 		cdb = ccb->csio.cdb_io.cdb_bytes;
2596 
2597 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2598 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2599 	req->header.result = IOP_RESULT_PENDING;
2600 	req->dataxfer_length = ccb->csio.dxfer_len;
2601 	req->channel = 0;
2602 	req->target = ccb->ccb_h.target_id;
2603 	req->lun = ccb->ccb_h.target_lun;
2604 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2605 				- sizeof(struct hpt_iopsg)
2606 				+ nsegs * sizeof(struct hpt_iopsg);
2607 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2608 		bus_dmamap_sync(hba->io_dmat,
2609 			srb->dma_map, BUS_DMASYNC_PREREAD);
2610 	}
2611 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2612 		bus_dmamap_sync(hba->io_dmat,
2613 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2614 
2615 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2616 						| IOP_REQUEST_FLAG_ADDR_BITS
2617 						| ((req_phy >> 16) & 0xffff0000);
2618 	req->header.context = ((req_phy & 0xffffffff) << 32 )
2619 						| srb->index << 4
2620 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2621 
2622 	hba->u.mvfrey.inlist_wptr++;
2623 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2624 
2625 	if (index == hba->u.mvfrey.list_count) {
2626 		index = 0;
2627 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2628 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2629 	}
2630 
2631 	hba->u.mvfrey.inlist[index].addr = req_phy;
2632 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2633 
2634 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2635 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2636 
2637 	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2638 		callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2639 	}
2640 }
2641 
2642 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2643 					int nsegs, int error)
2644 {
2645 	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2646 	union ccb *ccb = srb->ccb;
2647 	struct hpt_iop_hba *hba = srb->hba;
2648 
2649 	if (error || nsegs > hba->max_sg_count) {
2650 		KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2651 			ccb->ccb_h.func_code,
2652 			ccb->ccb_h.target_id,
2653 			(uintmax_t)ccb->ccb_h.target_lun, nsegs));
2654 		ccb->ccb_h.status = CAM_BUSY;
2655 		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2656 		hptiop_free_srb(hba, srb);
2657 		xpt_done(ccb);
2658 		return;
2659 	}
2660 
2661 	hba->ops->post_req(hba, srb, segs, nsegs);
2662 }
2663 
2664 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2665 				int nsegs, int error)
2666 {
2667 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2668 	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2669 				& ~(u_int64_t)0x1F;
2670 	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2671 				& ~0x1F);
2672 }
2673 
2674 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2675 				int nsegs, int error)
2676 {
2677 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2678 	char *p;
2679 	u_int64_t phy;
2680 	u_int32_t list_count = hba->u.mvfrey.list_count;
2681 
2682 	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2683 				& ~(u_int64_t)0x1F;
2684 	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2685 				& ~0x1F);
2686 
2687 	hba->ctlcfgcmd_phy = phy;
2688 	hba->ctlcfg_ptr = p;
2689 
2690 	p += 0x800;
2691 	phy += 0x800;
2692 
2693 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2694 	hba->u.mvfrey.inlist_phy = phy;
2695 
2696 	p += list_count * sizeof(struct mvfrey_inlist_entry);
2697 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2698 
2699 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2700 	hba->u.mvfrey.outlist_phy = phy;
2701 
2702 	p += list_count * sizeof(struct mvfrey_outlist_entry);
2703 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2704 
2705 	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2706 	hba->u.mvfrey.outlist_cptr_phy = phy;
2707 }
2708 
2709 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2710 				int nsegs, int error)
2711 {
2712 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2713 	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2714 	struct hpt_iop_srb *srb, *tmp_srb;
2715 	int i;
2716 
2717 	if (error || nsegs == 0) {
2718 		device_printf(hba->pcidev, "hptiop_map_srb error");
2719 		return;
2720 	}
2721 
2722 	/* map srb */
2723 	srb = (struct hpt_iop_srb *)
2724 		(((unsigned long)hba->uncached_ptr + 0x1F)
2725 		& ~(unsigned long)0x1F);
2726 
2727 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2728 		tmp_srb = (struct hpt_iop_srb *)
2729 					((char *)srb + i * HPT_SRB_MAX_SIZE);
2730 		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2731 			if (bus_dmamap_create(hba->io_dmat,
2732 						0, &tmp_srb->dma_map)) {
2733 				device_printf(hba->pcidev, "dmamap create failed");
2734 				return;
2735 			}
2736 
2737 			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2738 			tmp_srb->hba = hba;
2739 			tmp_srb->index = i;
2740 			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2741 				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2742 							(phy_addr >> 5);
2743 				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2744 					tmp_srb->srb_flag =
2745 						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2746 			} else {
2747 				tmp_srb->phy_addr = phy_addr;
2748 			}
2749 
2750 			callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2751 			hptiop_free_srb(hba, tmp_srb);
2752 			hba->srb[i] = tmp_srb;
2753 			phy_addr += HPT_SRB_MAX_SIZE;
2754 		}
2755 		else {
2756 			device_printf(hba->pcidev, "invalid alignment");
2757 			return;
2758 		}
2759 	}
2760 }
2761 
2762 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2763 {
2764 	hba->msg_done = 1;
2765 }
2766 
2767 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2768 						int target_id)
2769 {
2770 	struct cam_periph       *periph = NULL;
2771 	struct cam_path         *path;
2772 	int                     status, retval = 0;
2773 
2774 	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2775 
2776 	if (status == CAM_REQ_CMP) {
2777 		if ((periph = cam_periph_find(path, "da")) != NULL) {
2778 			if (periph->refcount >= 1) {
2779 				device_printf(hba->pcidev, "%d ,"
2780 					"target_id=0x%x,"
2781 					"refcount=%d",
2782 				    hba->pciunit, target_id, periph->refcount);
2783 				retval = -1;
2784 			}
2785 		}
2786 		xpt_free_path(path);
2787 	}
2788 	return retval;
2789 }
2790 
2791 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2792 {
2793 	int i;
2794 
2795 	if (hba->ioctl_dev)
2796 		destroy_dev(hba->ioctl_dev);
2797 
2798 	if (hba->path) {
2799 		struct ccb_setasync ccb;
2800 
2801 		memset(&ccb, 0, sizeof(ccb));
2802 		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2803 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
2804 		ccb.event_enable = 0;
2805 		ccb.callback = hptiop_async;
2806 		ccb.callback_arg = hba->sim;
2807 		xpt_action((union ccb *)&ccb);
2808 		xpt_free_path(hba->path);
2809 	}
2810 
2811 	if (hba->irq_handle)
2812 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2813 
2814 	if (hba->sim) {
2815 		hptiop_lock_adapter(hba);
2816 		xpt_bus_deregister(cam_sim_path(hba->sim));
2817 		cam_sim_free(hba->sim, TRUE);
2818 		hptiop_unlock_adapter(hba);
2819 	}
2820 
2821 	if (hba->ctlcfg_dmat) {
2822 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2823 		bus_dmamem_free(hba->ctlcfg_dmat,
2824 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2825 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2826 	}
2827 
2828 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2829 		struct hpt_iop_srb *srb = hba->srb[i];
2830 		if (srb->dma_map)
2831 			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2832 		callout_drain(&srb->timeout);
2833 	}
2834 
2835 	if (hba->srb_dmat) {
2836 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2837 		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2838 		bus_dma_tag_destroy(hba->srb_dmat);
2839 	}
2840 
2841 	if (hba->io_dmat)
2842 		bus_dma_tag_destroy(hba->io_dmat);
2843 
2844 	if (hba->parent_dmat)
2845 		bus_dma_tag_destroy(hba->parent_dmat);
2846 
2847 	if (hba->irq_res)
2848 		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2849 					0, hba->irq_res);
2850 
2851 	if (hba->bar0_res)
2852 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2853 					hba->bar0_rid, hba->bar0_res);
2854 	if (hba->bar2_res)
2855 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2856 					hba->bar2_rid, hba->bar2_res);
2857 	mtx_destroy(&hba->lock);
2858 }
2859