xref: /freebsd/sys/dev/hptiop/hptiop.c (revision bd001d86d679e10d179ef00b9866f0e65b6fa7fd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/cons.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37 
38 #include <sys/stat.h>
39 #include <sys/malloc.h>
40 #include <sys/conf.h>
41 #include <sys/libkern.h>
42 #include <sys/kernel.h>
43 
44 #include <sys/kthread.h>
45 #include <sys/mutex.h>
46 #include <sys/module.h>
47 
48 #include <sys/eventhandler.h>
49 #include <sys/bus.h>
50 #include <sys/taskqueue.h>
51 #include <sys/ioccom.h>
52 
53 #include <machine/resource.h>
54 #include <machine/bus.h>
55 #include <machine/stdarg.h>
56 #include <sys/rman.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 
64 
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_debug.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 
74 
75 #include <dev/hptiop/hptiop.h>
76 
77 static const char driver_name[] = "hptiop";
78 static const char driver_version[] = "v1.9";
79 
80 static devclass_t hptiop_devclass;
81 
82 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
83 				u_int32_t msg, u_int32_t millisec);
84 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
85 							u_int32_t req);
86 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
87 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
88 							u_int32_t req);
89 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
90 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
91 				struct hpt_iop_ioctl_param *pParams);
92 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
93 				struct hpt_iop_ioctl_param *pParams);
94 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
95 				struct hpt_iop_ioctl_param *pParams);
96 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
98 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
99 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
100 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
101 				struct hpt_iop_request_get_config *config);
102 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
103 				struct hpt_iop_request_get_config *config);
104 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
105 				struct hpt_iop_request_get_config *config);
106 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
107 				struct hpt_iop_request_set_config *config);
108 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
109 				struct hpt_iop_request_set_config *config);
110 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
111 				struct hpt_iop_request_set_config *config);
112 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
115 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
116 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
117 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
118 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
119 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
120 				struct hpt_iop_request_ioctl_command *req,
121 				struct hpt_iop_ioctl_param *pParams);
122 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
123 				struct hpt_iop_request_ioctl_command *req,
124 				struct hpt_iop_ioctl_param *pParams);
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126 				struct hpt_iop_srb *srb,
127 				bus_dma_segment_t *segs, int nsegs);
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129 				struct hpt_iop_srb *srb,
130 				bus_dma_segment_t *segs, int nsegs);
131 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
132 				struct hpt_iop_srb *srb,
133 				bus_dma_segment_t *segs, int nsegs);
134 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
136 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
137 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
138 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
139 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
141 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
142 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
143 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
144 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
145 static int  hptiop_probe(device_t dev);
146 static int  hptiop_attach(device_t dev);
147 static int  hptiop_detach(device_t dev);
148 static int  hptiop_shutdown(device_t dev);
149 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
150 static void hptiop_poll(struct cam_sim *sim);
151 static void hptiop_async(void *callback_arg, u_int32_t code,
152 					struct cam_path *path, void *arg);
153 static void hptiop_pci_intr(void *arg);
154 static void hptiop_release_resource(struct hpt_iop_hba *hba);
155 static void hptiop_reset_adapter(void *argv);
156 static d_open_t hptiop_open;
157 static d_close_t hptiop_close;
158 static d_ioctl_t hptiop_ioctl;
159 
160 static struct cdevsw hptiop_cdevsw = {
161 	.d_open = hptiop_open,
162 	.d_close = hptiop_close,
163 	.d_ioctl = hptiop_ioctl,
164 	.d_name = driver_name,
165 	.d_version = D_VERSION,
166 };
167 
168 #define hba_from_dev(dev) \
169 	((struct hpt_iop_hba *)((dev)->si_drv1))
170 
171 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
172 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
173 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
174 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
175 
176 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
177 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
178 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
179 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
180 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
181 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
182 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
183 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
184 
185 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
186 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
187 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
188 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
189 
190 static int hptiop_open(ioctl_dev_t dev, int flags,
191 					int devtype, ioctl_thread_t proc)
192 {
193 	struct hpt_iop_hba *hba = hba_from_dev(dev);
194 
195 	if (hba==NULL)
196 		return ENXIO;
197 	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
198 		return EBUSY;
199 	hba->flag |= HPT_IOCTL_FLAG_OPEN;
200 	return 0;
201 }
202 
203 static int hptiop_close(ioctl_dev_t dev, int flags,
204 					int devtype, ioctl_thread_t proc)
205 {
206 	struct hpt_iop_hba *hba = hba_from_dev(dev);
207 	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
208 	return 0;
209 }
210 
211 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
212 					int flags, ioctl_thread_t proc)
213 {
214 	int ret = EFAULT;
215 	struct hpt_iop_hba *hba = hba_from_dev(dev);
216 
217 	switch (cmd) {
218 	case HPT_DO_IOCONTROL:
219 		ret = hba->ops->do_ioctl(hba,
220 				(struct hpt_iop_ioctl_param *)data);
221 		break;
222 	case HPT_SCAN_BUS:
223 		ret = hptiop_rescan_bus(hba);
224 		break;
225 	}
226 	return ret;
227 }
228 
229 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
230 {
231 	u_int64_t p;
232 	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
233 	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
234 
235 	if (outbound_tail != outbound_head) {
236 		bus_space_read_region_4(hba->bar2t, hba->bar2h,
237 			offsetof(struct hpt_iopmu_mv,
238 				outbound_q[outbound_tail]),
239 			(u_int32_t *)&p, 2);
240 
241 		outbound_tail++;
242 
243 		if (outbound_tail == MVIOP_QUEUE_LEN)
244 			outbound_tail = 0;
245 
246 		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
247 		return p;
248 	} else
249 		return 0;
250 }
251 
252 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
253 {
254 	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
255 	u_int32_t head = inbound_head + 1;
256 
257 	if (head == MVIOP_QUEUE_LEN)
258 		head = 0;
259 
260 	bus_space_write_region_4(hba->bar2t, hba->bar2h,
261 			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
262 			(u_int32_t *)&p, 2);
263 	BUS_SPACE_WRT4_MV2(inbound_head, head);
264 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
265 }
266 
267 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
268 {
269 	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
270 	BUS_SPACE_RD4_ITL(outbound_intstatus);
271 }
272 
273 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
274 {
275 
276 	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
277 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
278 
279 	BUS_SPACE_RD4_MV0(outbound_intmask);
280 }
281 
282 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
283 {
284 	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
285 	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
286 }
287 
288 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
289 {
290 	u_int32_t req=0;
291 	int i;
292 
293 	for (i = 0; i < millisec; i++) {
294 		req = BUS_SPACE_RD4_ITL(inbound_queue);
295 		if (req != IOPMU_QUEUE_EMPTY)
296 			break;
297 		DELAY(1000);
298 	}
299 
300 	if (req!=IOPMU_QUEUE_EMPTY) {
301 		BUS_SPACE_WRT4_ITL(outbound_queue, req);
302 		BUS_SPACE_RD4_ITL(outbound_intstatus);
303 		return 0;
304 	}
305 
306 	return -1;
307 }
308 
309 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
310 {
311 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
312 		return -1;
313 
314 	return 0;
315 }
316 
317 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
318 							u_int32_t millisec)
319 {
320 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321 		return -1;
322 
323 	return 0;
324 }
325 
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327 							u_int32_t index)
328 {
329 	struct hpt_iop_srb *srb;
330 	struct hpt_iop_request_scsi_command *req=NULL;
331 	union ccb *ccb;
332 	u_int8_t *cdb;
333 	u_int32_t result, temp, dxfer;
334 	u_int64_t temp64;
335 
336 	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 		if (hba->firmware_version > 0x01020000 ||
338 			hba->interface_version > 0x01020000) {
339 			srb = hba->srb[index & ~(u_int32_t)
340 				(IOPMU_QUEUE_ADDR_HOST_BIT
341 				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 			req = (struct hpt_iop_request_scsi_command *)srb;
343 			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 				result = IOP_RESULT_SUCCESS;
345 			else
346 				result = req->header.result;
347 		} else {
348 			srb = hba->srb[index &
349 				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 			req = (struct hpt_iop_request_scsi_command *)srb;
351 			result = req->header.result;
352 		}
353 		dxfer = req->dataxfer_length;
354 		goto srb_complete;
355 	}
356 
357 	/*iop req*/
358 	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 		offsetof(struct hpt_iop_request_header, type));
360 	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 		offsetof(struct hpt_iop_request_header, result));
362 	switch(temp) {
363 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364 	{
365 		temp64 = 0;
366 		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 			offsetof(struct hpt_iop_request_header, context),
368 			(u_int32_t *)&temp64, 2);
369 		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370 		break;
371 	}
372 
373 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 			offsetof(struct hpt_iop_request_header, context),
376 			(u_int32_t *)&temp64, 2);
377 		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 				index + offsetof(struct hpt_iop_request_scsi_command,
380 				dataxfer_length));
381 srb_complete:
382 		ccb = (union ccb *)srb->ccb;
383 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 			cdb = ccb->csio.cdb_io.cdb_ptr;
385 		else
386 			cdb = ccb->csio.cdb_io.cdb_bytes;
387 
388 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 			ccb->ccb_h.status = CAM_REQ_CMP;
390 			goto scsi_done;
391 		}
392 
393 		switch (result) {
394 		case IOP_RESULT_SUCCESS:
395 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396 			case CAM_DIR_IN:
397 				bus_dmamap_sync(hba->io_dmat,
398 					srb->dma_map, BUS_DMASYNC_POSTREAD);
399 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 				break;
401 			case CAM_DIR_OUT:
402 				bus_dmamap_sync(hba->io_dmat,
403 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 				break;
406 			}
407 
408 			ccb->ccb_h.status = CAM_REQ_CMP;
409 			break;
410 
411 		case IOP_RESULT_BAD_TARGET:
412 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413 			break;
414 		case IOP_RESULT_BUSY:
415 			ccb->ccb_h.status = CAM_BUSY;
416 			break;
417 		case IOP_RESULT_INVALID_REQUEST:
418 			ccb->ccb_h.status = CAM_REQ_INVALID;
419 			break;
420 		case IOP_RESULT_FAIL:
421 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422 			break;
423 		case IOP_RESULT_RESET:
424 			ccb->ccb_h.status = CAM_BUSY;
425 			break;
426 		case IOP_RESULT_CHECK_CONDITION:
427 			memset(&ccb->csio.sense_data, 0,
428 			    sizeof(ccb->csio.sense_data));
429 			if (dxfer < ccb->csio.sense_len)
430 				ccb->csio.sense_resid = ccb->csio.sense_len -
431 				    dxfer;
432 			else
433 				ccb->csio.sense_resid = 0;
434 			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 				bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 					index + offsetof(struct hpt_iop_request_scsi_command,
437 					sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
439 			} else {
440 				memcpy(&ccb->csio.sense_data, &req->sg_list,
441 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
442 			}
443 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446 			break;
447 		default:
448 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 			break;
450 		}
451 scsi_done:
452 		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 			BUS_SPACE_WRT4_ITL(outbound_queue, index);
454 
455 		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456 
457 		hptiop_free_srb(hba, srb);
458 		xpt_done(ccb);
459 		break;
460 	}
461 }
462 
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464 {
465 	u_int32_t req, temp;
466 
467 	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 			hptiop_request_callback_itl(hba, req);
470 		else {
471 			temp = bus_space_read_4(hba->bar0t,
472 					hba->bar0h,req +
473 					offsetof(struct hpt_iop_request_header,
474 						flags));
475 			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
476 				u_int64_t temp64;
477 				bus_space_read_region_4(hba->bar0t,
478 					hba->bar0h,req +
479 					offsetof(struct hpt_iop_request_header,
480 						context),
481 					(u_int32_t *)&temp64, 2);
482 				if (temp64) {
483 					hptiop_request_callback_itl(hba, req);
484 				} else {
485 					temp64 = 1;
486 					bus_space_write_region_4(hba->bar0t,
487 						hba->bar0h,req +
488 						offsetof(struct hpt_iop_request_header,
489 							context),
490 						(u_int32_t *)&temp64, 2);
491 				}
492 			} else
493 				hptiop_request_callback_itl(hba, req);
494 		}
495 	}
496 }
497 
498 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
499 {
500 	u_int32_t status;
501 	int ret = 0;
502 
503 	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
504 
505 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
506 		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
507 		KdPrint(("hptiop: received outbound msg %x\n", msg));
508 		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
509 		hptiop_os_message_callback(hba, msg);
510 		ret = 1;
511 	}
512 
513 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
514 		hptiop_drain_outbound_queue_itl(hba);
515 		ret = 1;
516 	}
517 
518 	return ret;
519 }
520 
521 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
522 							u_int64_t _tag)
523 {
524 	u_int32_t context = (u_int32_t)_tag;
525 
526 	if (context & MVIOP_CMD_TYPE_SCSI) {
527 		struct hpt_iop_srb *srb;
528 		struct hpt_iop_request_scsi_command *req;
529 		union ccb *ccb;
530 		u_int8_t *cdb;
531 
532 		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
533 		req = (struct hpt_iop_request_scsi_command *)srb;
534 		ccb = (union ccb *)srb->ccb;
535 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
536 			cdb = ccb->csio.cdb_io.cdb_ptr;
537 		else
538 			cdb = ccb->csio.cdb_io.cdb_bytes;
539 
540 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
541 			ccb->ccb_h.status = CAM_REQ_CMP;
542 			goto scsi_done;
543 		}
544 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
545 			req->header.result = IOP_RESULT_SUCCESS;
546 
547 		switch (req->header.result) {
548 		case IOP_RESULT_SUCCESS:
549 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
550 			case CAM_DIR_IN:
551 				bus_dmamap_sync(hba->io_dmat,
552 					srb->dma_map, BUS_DMASYNC_POSTREAD);
553 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
554 				break;
555 			case CAM_DIR_OUT:
556 				bus_dmamap_sync(hba->io_dmat,
557 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
558 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
559 				break;
560 			}
561 			ccb->ccb_h.status = CAM_REQ_CMP;
562 			break;
563 		case IOP_RESULT_BAD_TARGET:
564 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
565 			break;
566 		case IOP_RESULT_BUSY:
567 			ccb->ccb_h.status = CAM_BUSY;
568 			break;
569 		case IOP_RESULT_INVALID_REQUEST:
570 			ccb->ccb_h.status = CAM_REQ_INVALID;
571 			break;
572 		case IOP_RESULT_FAIL:
573 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
574 			break;
575 		case IOP_RESULT_RESET:
576 			ccb->ccb_h.status = CAM_BUSY;
577 			break;
578 		case IOP_RESULT_CHECK_CONDITION:
579 			memset(&ccb->csio.sense_data, 0,
580 			    sizeof(ccb->csio.sense_data));
581 			if (req->dataxfer_length < ccb->csio.sense_len)
582 				ccb->csio.sense_resid = ccb->csio.sense_len -
583 				    req->dataxfer_length;
584 			else
585 				ccb->csio.sense_resid = 0;
586 			memcpy(&ccb->csio.sense_data, &req->sg_list,
587 				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
588 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
589 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
590 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
591 			break;
592 		default:
593 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
594 			break;
595 		}
596 scsi_done:
597 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
598 
599 		hptiop_free_srb(hba, srb);
600 		xpt_done(ccb);
601 	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
602 		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
603 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
604 			hba->config_done = 1;
605 		else
606 			hba->config_done = -1;
607 		wakeup(req);
608 	} else if (context &
609 			(MVIOP_CMD_TYPE_SET_CONFIG |
610 				MVIOP_CMD_TYPE_GET_CONFIG))
611 		hba->config_done = 1;
612 	else {
613 		device_printf(hba->pcidev, "wrong callback type\n");
614 	}
615 }
616 
617 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
618 				u_int32_t _tag)
619 {
620 	u_int32_t req_type = _tag & 0xf;
621 
622 	struct hpt_iop_srb *srb;
623 	struct hpt_iop_request_scsi_command *req;
624 	union ccb *ccb;
625 	u_int8_t *cdb;
626 
627 	switch (req_type) {
628 	case IOP_REQUEST_TYPE_GET_CONFIG:
629 	case IOP_REQUEST_TYPE_SET_CONFIG:
630 		hba->config_done = 1;
631 		break;
632 
633 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
634 		srb = hba->srb[(_tag >> 4) & 0xff];
635 		req = (struct hpt_iop_request_scsi_command *)srb;
636 
637 		ccb = (union ccb *)srb->ccb;
638 
639 		callout_stop(&srb->timeout);
640 
641 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
642 			cdb = ccb->csio.cdb_io.cdb_ptr;
643 		else
644 			cdb = ccb->csio.cdb_io.cdb_bytes;
645 
646 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
647 			ccb->ccb_h.status = CAM_REQ_CMP;
648 			goto scsi_done;
649 		}
650 
651 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
652 			req->header.result = IOP_RESULT_SUCCESS;
653 
654 		switch (req->header.result) {
655 		case IOP_RESULT_SUCCESS:
656 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
657 			case CAM_DIR_IN:
658 				bus_dmamap_sync(hba->io_dmat,
659 						srb->dma_map, BUS_DMASYNC_POSTREAD);
660 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
661 				break;
662 			case CAM_DIR_OUT:
663 				bus_dmamap_sync(hba->io_dmat,
664 						srb->dma_map, BUS_DMASYNC_POSTWRITE);
665 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
666 				break;
667 			}
668 			ccb->ccb_h.status = CAM_REQ_CMP;
669 			break;
670 		case IOP_RESULT_BAD_TARGET:
671 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
672 			break;
673 		case IOP_RESULT_BUSY:
674 			ccb->ccb_h.status = CAM_BUSY;
675 			break;
676 		case IOP_RESULT_INVALID_REQUEST:
677 			ccb->ccb_h.status = CAM_REQ_INVALID;
678 			break;
679 		case IOP_RESULT_FAIL:
680 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
681 			break;
682 		case IOP_RESULT_RESET:
683 			ccb->ccb_h.status = CAM_BUSY;
684 			break;
685 		case IOP_RESULT_CHECK_CONDITION:
686 			memset(&ccb->csio.sense_data, 0,
687 			       sizeof(ccb->csio.sense_data));
688 			if (req->dataxfer_length < ccb->csio.sense_len)
689 				ccb->csio.sense_resid = ccb->csio.sense_len -
690 				req->dataxfer_length;
691 			else
692 				ccb->csio.sense_resid = 0;
693 			memcpy(&ccb->csio.sense_data, &req->sg_list,
694 			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
695 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
696 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
697 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
698 			break;
699 		default:
700 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
701 			break;
702 		}
703 scsi_done:
704 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
705 
706 		hptiop_free_srb(hba, srb);
707 		xpt_done(ccb);
708 		break;
709 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
710 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
711 			hba->config_done = 1;
712 		else
713 			hba->config_done = -1;
714 		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
715 		break;
716 	default:
717 		device_printf(hba->pcidev, "wrong callback type\n");
718 		break;
719 	}
720 }
721 
722 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
723 {
724 	u_int64_t req;
725 
726 	while ((req = hptiop_mv_outbound_read(hba))) {
727 		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
728 			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
729 				hptiop_request_callback_mv(hba, req);
730 			}
731 	    	}
732 	}
733 }
734 
735 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
736 {
737 	u_int32_t status;
738 	int ret = 0;
739 
740 	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
741 
742 	if (status)
743 		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
744 
745 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
746 		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
747 		KdPrint(("hptiop: received outbound msg %x\n", msg));
748 		hptiop_os_message_callback(hba, msg);
749 		ret = 1;
750 	}
751 
752 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
753 		hptiop_drain_outbound_queue_mv(hba);
754 		ret = 1;
755 	}
756 
757 	return ret;
758 }
759 
760 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
761 {
762 	u_int32_t status, _tag, cptr;
763 	int ret = 0;
764 
765 	if (hba->initialized) {
766 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
767 	}
768 
769 	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
770 	if (status) {
771 		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
772 		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
773 			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
774 			hptiop_os_message_callback(hba, msg);
775 		}
776 		ret = 1;
777 	}
778 
779 	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
780 	if (status) {
781 		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
782 		do {
783 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
784 			while (hba->u.mvfrey.outlist_rptr != cptr) {
785 				hba->u.mvfrey.outlist_rptr++;
786 				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
787 					hba->u.mvfrey.outlist_rptr = 0;
788 				}
789 
790 				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
791 				hptiop_request_callback_mvfrey(hba, _tag);
792 				ret = 2;
793 			}
794 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
795 	}
796 
797 	if (hba->initialized) {
798 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
799 	}
800 
801 	return ret;
802 }
803 
804 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
805 					u_int32_t req32, u_int32_t millisec)
806 {
807 	u_int32_t i;
808 	u_int64_t temp64;
809 
810 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
811 	BUS_SPACE_RD4_ITL(outbound_intstatus);
812 
813 	for (i = 0; i < millisec; i++) {
814 		hptiop_intr_itl(hba);
815 		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
816 			offsetof(struct hpt_iop_request_header, context),
817 			(u_int32_t *)&temp64, 2);
818 		if (temp64)
819 			return 0;
820 		DELAY(1000);
821 	}
822 
823 	return -1;
824 }
825 
826 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
827 					void *req, u_int32_t millisec)
828 {
829 	u_int32_t i;
830 	u_int64_t phy_addr;
831 	hba->config_done = 0;
832 
833 	phy_addr = hba->ctlcfgcmd_phy |
834 			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
835 	((struct hpt_iop_request_get_config *)req)->header.flags |=
836 		IOP_REQUEST_FLAG_SYNC_REQUEST |
837 		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
838 	hptiop_mv_inbound_write(phy_addr, hba);
839 	BUS_SPACE_RD4_MV0(outbound_intmask);
840 
841 	for (i = 0; i < millisec; i++) {
842 		hptiop_intr_mv(hba);
843 		if (hba->config_done)
844 			return 0;
845 		DELAY(1000);
846 	}
847 	return -1;
848 }
849 
850 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
851 					void *req, u_int32_t millisec)
852 {
853 	u_int32_t i, index;
854 	u_int64_t phy_addr;
855 	struct hpt_iop_request_header *reqhdr =
856 										(struct hpt_iop_request_header *)req;
857 
858 	hba->config_done = 0;
859 
860 	phy_addr = hba->ctlcfgcmd_phy;
861 	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
862 					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
863 					| IOP_REQUEST_FLAG_ADDR_BITS
864 					| ((phy_addr >> 16) & 0xffff0000);
865 	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
866 					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
867 
868 	hba->u.mvfrey.inlist_wptr++;
869 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
870 
871 	if (index == hba->u.mvfrey.list_count) {
872 		index = 0;
873 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
874 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
875 	}
876 
877 	hba->u.mvfrey.inlist[index].addr = phy_addr;
878 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
879 
880 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
881 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
882 
883 	for (i = 0; i < millisec; i++) {
884 		hptiop_intr_mvfrey(hba);
885 		if (hba->config_done)
886 			return 0;
887 		DELAY(1000);
888 	}
889 	return -1;
890 }
891 
892 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
893 					u_int32_t msg, u_int32_t millisec)
894 {
895 	u_int32_t i;
896 
897 	hba->msg_done = 0;
898 	hba->ops->post_msg(hba, msg);
899 
900 	for (i=0; i<millisec; i++) {
901 		hba->ops->iop_intr(hba);
902 		if (hba->msg_done)
903 			break;
904 		DELAY(1000);
905 	}
906 
907 	return hba->msg_done? 0 : -1;
908 }
909 
910 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
911 				struct hpt_iop_request_get_config * config)
912 {
913 	u_int32_t req32;
914 
915 	config->header.size = sizeof(struct hpt_iop_request_get_config);
916 	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
917 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
918 	config->header.result = IOP_RESULT_PENDING;
919 	config->header.context = 0;
920 
921 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
922 	if (req32 == IOPMU_QUEUE_EMPTY)
923 		return -1;
924 
925 	bus_space_write_region_4(hba->bar0t, hba->bar0h,
926 			req32, (u_int32_t *)config,
927 			sizeof(struct hpt_iop_request_header) >> 2);
928 
929 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
930 		KdPrint(("hptiop: get config send cmd failed"));
931 		return -1;
932 	}
933 
934 	bus_space_read_region_4(hba->bar0t, hba->bar0h,
935 			req32, (u_int32_t *)config,
936 			sizeof(struct hpt_iop_request_get_config) >> 2);
937 
938 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
939 
940 	return 0;
941 }
942 
943 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
944 				struct hpt_iop_request_get_config * config)
945 {
946 	struct hpt_iop_request_get_config *req;
947 
948 	if (!(req = hba->ctlcfg_ptr))
949 		return -1;
950 
951 	req->header.flags = 0;
952 	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
953 	req->header.size = sizeof(struct hpt_iop_request_get_config);
954 	req->header.result = IOP_RESULT_PENDING;
955 	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
956 
957 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
958 		KdPrint(("hptiop: get config send cmd failed"));
959 		return -1;
960 	}
961 
962 	*config = *req;
963 	return 0;
964 }
965 
966 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
967 				struct hpt_iop_request_get_config * config)
968 {
969 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
970 
971 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
972 	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
973 		KdPrint(("hptiop: header size %x/%x type %x/%x",
974 			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
975 			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
976 		return -1;
977 	}
978 
979 	config->interface_version = info->interface_version;
980 	config->firmware_version = info->firmware_version;
981 	config->max_requests = info->max_requests;
982 	config->request_size = info->request_size;
983 	config->max_sg_count = info->max_sg_count;
984 	config->data_transfer_length = info->data_transfer_length;
985 	config->alignment_mask = info->alignment_mask;
986 	config->max_devices = info->max_devices;
987 	config->sdram_size = info->sdram_size;
988 
989 	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
990 		 config->max_requests, config->request_size,
991 		 config->data_transfer_length, config->max_devices,
992 		 config->sdram_size));
993 
994 	return 0;
995 }
996 
997 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
998 				struct hpt_iop_request_set_config *config)
999 {
1000 	u_int32_t req32;
1001 
1002 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1003 
1004 	if (req32 == IOPMU_QUEUE_EMPTY)
1005 		return -1;
1006 
1007 	config->header.size = sizeof(struct hpt_iop_request_set_config);
1008 	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1009 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1010 	config->header.result = IOP_RESULT_PENDING;
1011 	config->header.context = 0;
1012 
1013 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1014 		(u_int32_t *)config,
1015 		sizeof(struct hpt_iop_request_set_config) >> 2);
1016 
1017 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1018 		KdPrint(("hptiop: set config send cmd failed"));
1019 		return -1;
1020 	}
1021 
1022 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1023 
1024 	return 0;
1025 }
1026 
1027 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1028 				struct hpt_iop_request_set_config *config)
1029 {
1030 	struct hpt_iop_request_set_config *req;
1031 
1032 	if (!(req = hba->ctlcfg_ptr))
1033 		return -1;
1034 
1035 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1036 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1037 		sizeof(struct hpt_iop_request_set_config) -
1038 			sizeof(struct hpt_iop_request_header));
1039 
1040 	req->header.flags = 0;
1041 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1042 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1043 	req->header.result = IOP_RESULT_PENDING;
1044 	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1045 
1046 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1047 		KdPrint(("hptiop: set config send cmd failed"));
1048 		return -1;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1055 				struct hpt_iop_request_set_config *config)
1056 {
1057 	struct hpt_iop_request_set_config *req;
1058 
1059 	if (!(req = hba->ctlcfg_ptr))
1060 		return -1;
1061 
1062 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1063 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1064 		sizeof(struct hpt_iop_request_set_config) -
1065 			sizeof(struct hpt_iop_request_header));
1066 
1067 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1068 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1069 	req->header.result = IOP_RESULT_PENDING;
1070 
1071 	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1072 		KdPrint(("hptiop: set config send cmd failed"));
1073 		return -1;
1074 	}
1075 
1076 	return 0;
1077 }
1078 
1079 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1080 				u_int32_t req32,
1081 				struct hpt_iop_ioctl_param *pParams)
1082 {
1083 	u_int64_t temp64;
1084 	struct hpt_iop_request_ioctl_command req;
1085 
1086 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1087 			(hba->max_request_size -
1088 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1089 		device_printf(hba->pcidev, "request size beyond max value");
1090 		return -1;
1091 	}
1092 
1093 	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1094 		+ pParams->nInBufferSize;
1095 	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1096 	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1097 	req.header.result = IOP_RESULT_PENDING;
1098 	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1099 	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1100 	req.inbuf_size = pParams->nInBufferSize;
1101 	req.outbuf_size = pParams->nOutBufferSize;
1102 	req.bytes_returned = 0;
1103 
1104 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1105 		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1106 
1107 	hptiop_lock_adapter(hba);
1108 
1109 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1110 	BUS_SPACE_RD4_ITL(outbound_intstatus);
1111 
1112 	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1113 		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1114 		(u_int32_t *)&temp64, 2);
1115 	while (temp64) {
1116 		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1117 				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1118 			break;
1119 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1120 		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1121 			offsetof(struct hpt_iop_request_ioctl_command,
1122 				header.context),
1123 			(u_int32_t *)&temp64, 2);
1124 	}
1125 
1126 	hptiop_unlock_adapter(hba);
1127 	return 0;
1128 }
1129 
1130 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1131 									void *user, int size)
1132 {
1133 	unsigned char byte;
1134 	int i;
1135 
1136 	for (i=0; i<size; i++) {
1137 		if (copyin((u_int8_t *)user + i, &byte, 1))
1138 			return -1;
1139 		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1146 									void *user, int size)
1147 {
1148 	unsigned char byte;
1149 	int i;
1150 
1151 	for (i=0; i<size; i++) {
1152 		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1153 		if (copyout(&byte, (u_int8_t *)user + i, 1))
1154 			return -1;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1161 				struct hpt_iop_ioctl_param * pParams)
1162 {
1163 	u_int32_t req32;
1164 	u_int32_t result;
1165 
1166 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1167 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1168 		return EFAULT;
1169 
1170 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1171 	if (req32 == IOPMU_QUEUE_EMPTY)
1172 		return EFAULT;
1173 
1174 	if (pParams->nInBufferSize)
1175 		if (hptiop_bus_space_copyin(hba, req32 +
1176 			offsetof(struct hpt_iop_request_ioctl_command, buf),
1177 			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
1178 			goto invalid;
1179 
1180 	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1181 		goto invalid;
1182 
1183 	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1184 			offsetof(struct hpt_iop_request_ioctl_command,
1185 				header.result));
1186 
1187 	if (result == IOP_RESULT_SUCCESS) {
1188 		if (pParams->nOutBufferSize)
1189 			if (hptiop_bus_space_copyout(hba, req32 +
1190 				offsetof(struct hpt_iop_request_ioctl_command, buf) +
1191 					((pParams->nInBufferSize + 3) & ~3),
1192 				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1193 				goto invalid;
1194 
1195 		if (pParams->lpBytesReturned) {
1196 			if (hptiop_bus_space_copyout(hba, req32 +
1197 				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1198 				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1199 				goto invalid;
1200 		}
1201 
1202 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1203 
1204 		return 0;
1205 	} else{
1206 invalid:
1207 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1208 
1209 		return EFAULT;
1210 	}
1211 }
1212 
1213 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1214 				struct hpt_iop_request_ioctl_command *req,
1215 				struct hpt_iop_ioctl_param *pParams)
1216 {
1217 	u_int64_t req_phy;
1218 	int size = 0;
1219 
1220 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1221 			(hba->max_request_size -
1222 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1223 		device_printf(hba->pcidev, "request size beyond max value");
1224 		return -1;
1225 	}
1226 
1227 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1228 	req->inbuf_size = pParams->nInBufferSize;
1229 	req->outbuf_size = pParams->nOutBufferSize;
1230 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1231 					+ pParams->nInBufferSize;
1232 	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1233 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1234 	req->header.result = IOP_RESULT_PENDING;
1235 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1236 	size = req->header.size >> 8;
1237 	size = imin(3, size);
1238 	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1239 	hptiop_mv_inbound_write(req_phy, hba);
1240 
1241 	BUS_SPACE_RD4_MV0(outbound_intmask);
1242 
1243 	while (hba->config_done == 0) {
1244 		if (hptiop_sleep(hba, req, PPAUSE,
1245 			"hptctl", HPT_OSM_TIMEOUT)==0)
1246 			continue;
1247 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1248 	}
1249 	return 0;
1250 }
1251 
1252 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1253 				struct hpt_iop_ioctl_param *pParams)
1254 {
1255 	struct hpt_iop_request_ioctl_command *req;
1256 
1257 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1258 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1259 		return EFAULT;
1260 
1261 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1262 	hba->config_done = 0;
1263 	hptiop_lock_adapter(hba);
1264 	if (pParams->nInBufferSize)
1265 		if (copyin((void *)pParams->lpInBuffer,
1266 				req->buf, pParams->nInBufferSize))
1267 			goto invalid;
1268 	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1269 		goto invalid;
1270 
1271 	if (hba->config_done == 1) {
1272 		if (pParams->nOutBufferSize)
1273 			if (copyout(req->buf +
1274 				((pParams->nInBufferSize + 3) & ~3),
1275 				(void *)pParams->lpOutBuffer,
1276 				pParams->nOutBufferSize))
1277 				goto invalid;
1278 
1279 		if (pParams->lpBytesReturned)
1280 			if (copyout(&req->bytes_returned,
1281 				(void*)pParams->lpBytesReturned,
1282 				sizeof(u_int32_t)))
1283 				goto invalid;
1284 		hptiop_unlock_adapter(hba);
1285 		return 0;
1286 	} else{
1287 invalid:
1288 		hptiop_unlock_adapter(hba);
1289 		return EFAULT;
1290 	}
1291 }
1292 
1293 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1294 				struct hpt_iop_request_ioctl_command *req,
1295 				struct hpt_iop_ioctl_param *pParams)
1296 {
1297 	u_int64_t phy_addr;
1298 	u_int32_t index;
1299 
1300 	phy_addr = hba->ctlcfgcmd_phy;
1301 
1302 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1303 			(hba->max_request_size -
1304 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1305 		device_printf(hba->pcidev, "request size beyond max value");
1306 		return -1;
1307 	}
1308 
1309 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1310 	req->inbuf_size = pParams->nInBufferSize;
1311 	req->outbuf_size = pParams->nOutBufferSize;
1312 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1313 					+ pParams->nInBufferSize;
1314 
1315 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1316 	req->header.result = IOP_RESULT_PENDING;
1317 
1318 	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1319 						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1320 						| IOP_REQUEST_FLAG_ADDR_BITS
1321 						| ((phy_addr >> 16) & 0xffff0000);
1322 	req->header.context = ((phy_addr & 0xffffffff) << 32 )
1323 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1324 
1325 	hba->u.mvfrey.inlist_wptr++;
1326 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1327 
1328 	if (index == hba->u.mvfrey.list_count) {
1329 		index = 0;
1330 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1331 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1332 	}
1333 
1334 	hba->u.mvfrey.inlist[index].addr = phy_addr;
1335 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1336 
1337 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1338 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1339 
1340 	while (hba->config_done == 0) {
1341 		if (hptiop_sleep(hba, req, PPAUSE,
1342 			"hptctl", HPT_OSM_TIMEOUT)==0)
1343 			continue;
1344 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1345 	}
1346 	return 0;
1347 }
1348 
1349 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1350 				struct hpt_iop_ioctl_param *pParams)
1351 {
1352 	struct hpt_iop_request_ioctl_command *req;
1353 
1354 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1355 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1356 		return EFAULT;
1357 
1358 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1359 	hba->config_done = 0;
1360 	hptiop_lock_adapter(hba);
1361 	if (pParams->nInBufferSize)
1362 		if (copyin((void *)pParams->lpInBuffer,
1363 				req->buf, pParams->nInBufferSize))
1364 			goto invalid;
1365 	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1366 		goto invalid;
1367 
1368 	if (hba->config_done == 1) {
1369 		if (pParams->nOutBufferSize)
1370 			if (copyout(req->buf +
1371 				((pParams->nInBufferSize + 3) & ~3),
1372 				(void *)pParams->lpOutBuffer,
1373 				pParams->nOutBufferSize))
1374 				goto invalid;
1375 
1376 		if (pParams->lpBytesReturned)
1377 			if (copyout(&req->bytes_returned,
1378 				(void*)pParams->lpBytesReturned,
1379 				sizeof(u_int32_t)))
1380 				goto invalid;
1381 		hptiop_unlock_adapter(hba);
1382 		return 0;
1383 	} else{
1384 invalid:
1385 		hptiop_unlock_adapter(hba);
1386 		return EFAULT;
1387 	}
1388 }
1389 
1390 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1391 {
1392 	union ccb           *ccb;
1393 
1394 	if ((ccb = xpt_alloc_ccb()) == NULL)
1395 		return(ENOMEM);
1396 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1397 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1398 		xpt_free_ccb(ccb);
1399 		return(EIO);
1400 	}
1401 	xpt_rescan(ccb);
1402 	return(0);
1403 }
1404 
1405 static  bus_dmamap_callback_t   hptiop_map_srb;
1406 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1407 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1408 static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1409 
1410 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1411 {
1412 	hba->bar0_rid = 0x10;
1413 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1414 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1415 
1416 	if (hba->bar0_res == NULL) {
1417 		device_printf(hba->pcidev,
1418 			"failed to get iop base adrress.\n");
1419 		return -1;
1420 	}
1421 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1422 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1423 	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1424 				rman_get_virtual(hba->bar0_res);
1425 
1426 	if (!hba->u.itl.mu) {
1427 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1428 					hba->bar0_rid, hba->bar0_res);
1429 		device_printf(hba->pcidev, "alloc mem res failed\n");
1430 		return -1;
1431 	}
1432 
1433 	return 0;
1434 }
1435 
1436 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1437 {
1438 	hba->bar0_rid = 0x10;
1439 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1440 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1441 
1442 	if (hba->bar0_res == NULL) {
1443 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1444 		return -1;
1445 	}
1446 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1447 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1448 	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1449 				rman_get_virtual(hba->bar0_res);
1450 
1451 	if (!hba->u.mv.regs) {
1452 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1453 					hba->bar0_rid, hba->bar0_res);
1454 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1455 		return -1;
1456 	}
1457 
1458 	hba->bar2_rid = 0x18;
1459 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1460 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1461 
1462 	if (hba->bar2_res == NULL) {
1463 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1464 					hba->bar0_rid, hba->bar0_res);
1465 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1466 		return -1;
1467 	}
1468 
1469 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1470 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1471 	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1472 
1473 	if (!hba->u.mv.mu) {
1474 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1475 					hba->bar0_rid, hba->bar0_res);
1476 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1477 					hba->bar2_rid, hba->bar2_res);
1478 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1479 		return -1;
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1486 {
1487 	hba->bar0_rid = 0x10;
1488 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1489 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1490 
1491 	if (hba->bar0_res == NULL) {
1492 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1493 		return -1;
1494 	}
1495 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1496 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1497 	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1498 				rman_get_virtual(hba->bar0_res);
1499 
1500 	if (!hba->u.mvfrey.config) {
1501 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1502 					hba->bar0_rid, hba->bar0_res);
1503 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1504 		return -1;
1505 	}
1506 
1507 	hba->bar2_rid = 0x18;
1508 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1509 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1510 
1511 	if (hba->bar2_res == NULL) {
1512 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1513 					hba->bar0_rid, hba->bar0_res);
1514 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1515 		return -1;
1516 	}
1517 
1518 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1519 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1520 	hba->u.mvfrey.mu =
1521 					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1522 
1523 	if (!hba->u.mvfrey.mu) {
1524 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1525 					hba->bar0_rid, hba->bar0_res);
1526 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1527 					hba->bar2_rid, hba->bar2_res);
1528 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1529 		return -1;
1530 	}
1531 
1532 	return 0;
1533 }
1534 
1535 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1536 {
1537 	if (hba->bar0_res)
1538 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1539 			hba->bar0_rid, hba->bar0_res);
1540 }
1541 
1542 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1543 {
1544 	if (hba->bar0_res)
1545 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1546 			hba->bar0_rid, hba->bar0_res);
1547 	if (hba->bar2_res)
1548 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1549 			hba->bar2_rid, hba->bar2_res);
1550 }
1551 
1552 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1553 {
1554 	if (hba->bar0_res)
1555 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1556 			hba->bar0_rid, hba->bar0_res);
1557 	if (hba->bar2_res)
1558 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1559 			hba->bar2_rid, hba->bar2_res);
1560 }
1561 
1562 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1563 {
1564 	if (bus_dma_tag_create(hba->parent_dmat,
1565 				1,
1566 				0,
1567 				BUS_SPACE_MAXADDR_32BIT,
1568 				BUS_SPACE_MAXADDR,
1569 				NULL, NULL,
1570 				0x800 - 0x8,
1571 				1,
1572 				BUS_SPACE_MAXSIZE_32BIT,
1573 				BUS_DMA_ALLOCNOW,
1574 				NULL,
1575 				NULL,
1576 				&hba->ctlcfg_dmat)) {
1577 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1578 		return -1;
1579 	}
1580 
1581 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1582 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1583 		&hba->ctlcfg_dmamap) != 0) {
1584 			device_printf(hba->pcidev,
1585 					"bus_dmamem_alloc failed!\n");
1586 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1587 			return -1;
1588 	}
1589 
1590 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1591 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1592 			MVIOP_IOCTLCFG_SIZE,
1593 			hptiop_mv_map_ctlcfg, hba, 0)) {
1594 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1595 		if (hba->ctlcfg_dmat) {
1596 			bus_dmamem_free(hba->ctlcfg_dmat,
1597 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1598 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1599 		}
1600 		return -1;
1601 	}
1602 
1603 	return 0;
1604 }
1605 
1606 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1607 {
1608 	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1609 
1610 	list_count >>= 16;
1611 
1612 	if (list_count == 0) {
1613 		return -1;
1614 	}
1615 
1616 	hba->u.mvfrey.list_count = list_count;
1617 	hba->u.mvfrey.internal_mem_size = 0x800
1618 							+ list_count * sizeof(struct mvfrey_inlist_entry)
1619 							+ list_count * sizeof(struct mvfrey_outlist_entry)
1620 							+ sizeof(int);
1621 	if (bus_dma_tag_create(hba->parent_dmat,
1622 				1,
1623 				0,
1624 				BUS_SPACE_MAXADDR_32BIT,
1625 				BUS_SPACE_MAXADDR,
1626 				NULL, NULL,
1627 				hba->u.mvfrey.internal_mem_size,
1628 				1,
1629 				BUS_SPACE_MAXSIZE_32BIT,
1630 				BUS_DMA_ALLOCNOW,
1631 				NULL,
1632 				NULL,
1633 				&hba->ctlcfg_dmat)) {
1634 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1635 		return -1;
1636 	}
1637 
1638 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1639 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1640 		&hba->ctlcfg_dmamap) != 0) {
1641 			device_printf(hba->pcidev,
1642 					"bus_dmamem_alloc failed!\n");
1643 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1644 			return -1;
1645 	}
1646 
1647 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1648 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1649 			hba->u.mvfrey.internal_mem_size,
1650 			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1651 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1652 		if (hba->ctlcfg_dmat) {
1653 			bus_dmamem_free(hba->ctlcfg_dmat,
1654 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1655 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1656 		}
1657 		return -1;
1658 	}
1659 
1660 	return 0;
1661 }
1662 
1663 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1664 	return 0;
1665 }
1666 
1667 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1668 {
1669 	if (hba->ctlcfg_dmat) {
1670 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1671 		bus_dmamem_free(hba->ctlcfg_dmat,
1672 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1673 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1680 {
1681 	if (hba->ctlcfg_dmat) {
1682 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1683 		bus_dmamem_free(hba->ctlcfg_dmat,
1684 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1685 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1686 	}
1687 
1688 	return 0;
1689 }
1690 
1691 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1692 {
1693 	u_int32_t i = 100;
1694 
1695 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1696 		return -1;
1697 
1698 	/* wait 100ms for MCU ready */
1699 	while(i--) {
1700 		DELAY(1000);
1701 	}
1702 
1703 	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1704 							hba->u.mvfrey.inlist_phy & 0xffffffff);
1705 	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1706 							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1707 
1708 	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1709 							hba->u.mvfrey.outlist_phy & 0xffffffff);
1710 	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1711 							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1712 
1713 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1714 							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1715 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1716 							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1717 
1718 	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1719 								| CL_POINTER_TOGGLE;
1720 	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1721 								| CL_POINTER_TOGGLE;
1722 	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1723 
1724 	return 0;
1725 }
1726 
1727 /*
1728  * CAM driver interface
1729  */
1730 static device_method_t driver_methods[] = {
1731 	/* Device interface */
1732 	DEVMETHOD(device_probe,     hptiop_probe),
1733 	DEVMETHOD(device_attach,    hptiop_attach),
1734 	DEVMETHOD(device_detach,    hptiop_detach),
1735 	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1736 	{ 0, 0 }
1737 };
1738 
1739 static struct hptiop_adapter_ops hptiop_itl_ops = {
1740 	.family	           = INTEL_BASED_IOP,
1741 	.iop_wait_ready    = hptiop_wait_ready_itl,
1742 	.internal_memalloc = 0,
1743 	.internal_memfree  = hptiop_internal_memfree_itl,
1744 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1745 	.release_pci_res   = hptiop_release_pci_res_itl,
1746 	.enable_intr       = hptiop_enable_intr_itl,
1747 	.disable_intr      = hptiop_disable_intr_itl,
1748 	.get_config        = hptiop_get_config_itl,
1749 	.set_config        = hptiop_set_config_itl,
1750 	.iop_intr          = hptiop_intr_itl,
1751 	.post_msg          = hptiop_post_msg_itl,
1752 	.post_req          = hptiop_post_req_itl,
1753 	.do_ioctl          = hptiop_do_ioctl_itl,
1754 	.reset_comm        = 0,
1755 };
1756 
1757 static struct hptiop_adapter_ops hptiop_mv_ops = {
1758 	.family	           = MV_BASED_IOP,
1759 	.iop_wait_ready    = hptiop_wait_ready_mv,
1760 	.internal_memalloc = hptiop_internal_memalloc_mv,
1761 	.internal_memfree  = hptiop_internal_memfree_mv,
1762 	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1763 	.release_pci_res   = hptiop_release_pci_res_mv,
1764 	.enable_intr       = hptiop_enable_intr_mv,
1765 	.disable_intr      = hptiop_disable_intr_mv,
1766 	.get_config        = hptiop_get_config_mv,
1767 	.set_config        = hptiop_set_config_mv,
1768 	.iop_intr          = hptiop_intr_mv,
1769 	.post_msg          = hptiop_post_msg_mv,
1770 	.post_req          = hptiop_post_req_mv,
1771 	.do_ioctl          = hptiop_do_ioctl_mv,
1772 	.reset_comm        = 0,
1773 };
1774 
1775 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1776 	.family	           = MVFREY_BASED_IOP,
1777 	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1778 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1779 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1780 	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1781 	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1782 	.enable_intr       = hptiop_enable_intr_mvfrey,
1783 	.disable_intr      = hptiop_disable_intr_mvfrey,
1784 	.get_config        = hptiop_get_config_mvfrey,
1785 	.set_config        = hptiop_set_config_mvfrey,
1786 	.iop_intr          = hptiop_intr_mvfrey,
1787 	.post_msg          = hptiop_post_msg_mvfrey,
1788 	.post_req          = hptiop_post_req_mvfrey,
1789 	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1790 	.reset_comm        = hptiop_reset_comm_mvfrey,
1791 };
1792 
1793 static driver_t hptiop_pci_driver = {
1794 	driver_name,
1795 	driver_methods,
1796 	sizeof(struct hpt_iop_hba)
1797 };
1798 
1799 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1800 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1801 
1802 static int hptiop_probe(device_t dev)
1803 {
1804 	struct hpt_iop_hba *hba;
1805 	u_int32_t id;
1806 	static char buf[256];
1807 	int sas = 0;
1808 	struct hptiop_adapter_ops *ops;
1809 
1810 	if (pci_get_vendor(dev) != 0x1103)
1811 		return (ENXIO);
1812 
1813 	id = pci_get_device(dev);
1814 
1815 	switch (id) {
1816 		case 0x4520:
1817 		case 0x4521:
1818 		case 0x4522:
1819 			sas = 1;
1820 		case 0x3620:
1821 		case 0x3622:
1822 		case 0x3640:
1823 			ops = &hptiop_mvfrey_ops;
1824 			break;
1825 		case 0x4210:
1826 		case 0x4211:
1827 		case 0x4310:
1828 		case 0x4311:
1829 		case 0x4320:
1830 		case 0x4321:
1831  		case 0x4322:
1832 			sas = 1;
1833 		case 0x3220:
1834 		case 0x3320:
1835 		case 0x3410:
1836 		case 0x3520:
1837 		case 0x3510:
1838 		case 0x3511:
1839 		case 0x3521:
1840 		case 0x3522:
1841 		case 0x3530:
1842 		case 0x3540:
1843 		case 0x3560:
1844 			ops = &hptiop_itl_ops;
1845 			break;
1846 		case 0x3020:
1847 		case 0x3120:
1848 		case 0x3122:
1849 			ops = &hptiop_mv_ops;
1850 			break;
1851 		default:
1852 			return (ENXIO);
1853 	}
1854 
1855 	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1856 		pci_get_bus(dev), pci_get_slot(dev),
1857 		pci_get_function(dev), pci_get_irq(dev));
1858 
1859 	sprintf(buf, "RocketRAID %x %s Controller\n",
1860 				id, sas ? "SAS" : "SATA");
1861 	device_set_desc_copy(dev, buf);
1862 
1863 	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1864 	bzero(hba, sizeof(struct hpt_iop_hba));
1865 	hba->ops = ops;
1866 
1867 	KdPrint(("hba->ops=%p\n", hba->ops));
1868 	return 0;
1869 }
1870 
1871 static int hptiop_attach(device_t dev)
1872 {
1873 	struct make_dev_args args;
1874 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1875 	struct hpt_iop_request_get_config  iop_config;
1876 	struct hpt_iop_request_set_config  set_config;
1877 	int rid = 0;
1878 	struct cam_devq *devq;
1879 	struct ccb_setasync ccb;
1880 	u_int32_t unit = device_get_unit(dev);
1881 
1882 	device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1883 			unit, driver_version);
1884 
1885 	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1886 		pci_get_bus(dev), pci_get_slot(dev),
1887 		pci_get_function(dev), hba->ops));
1888 
1889 	pci_enable_busmaster(dev);
1890 	hba->pcidev = dev;
1891 	hba->pciunit = unit;
1892 
1893 	if (hba->ops->alloc_pci_res(hba))
1894 		return ENXIO;
1895 
1896 	if (hba->ops->iop_wait_ready(hba, 2000)) {
1897 		device_printf(dev, "adapter is not ready\n");
1898 		goto release_pci_res;
1899 	}
1900 
1901 	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1902 
1903 	if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1904 			1,  /* alignment */
1905 			0, /* boundary */
1906 			BUS_SPACE_MAXADDR,  /* lowaddr */
1907 			BUS_SPACE_MAXADDR,  /* highaddr */
1908 			NULL, NULL,         /* filter, filterarg */
1909 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1910 			BUS_SPACE_UNRESTRICTED, /* nsegments */
1911 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1912 			0,      /* flags */
1913 			NULL,   /* lockfunc */
1914 			NULL,       /* lockfuncarg */
1915 			&hba->parent_dmat   /* tag */))
1916 	{
1917 		device_printf(dev, "alloc parent_dmat failed\n");
1918 		goto release_pci_res;
1919 	}
1920 
1921 	if (hba->ops->family == MV_BASED_IOP) {
1922 		if (hba->ops->internal_memalloc(hba)) {
1923 			device_printf(dev, "alloc srb_dmat failed\n");
1924 			goto destroy_parent_tag;
1925 		}
1926 	}
1927 
1928 	if (hba->ops->get_config(hba, &iop_config)) {
1929 		device_printf(dev, "get iop config failed.\n");
1930 		goto get_config_failed;
1931 	}
1932 
1933 	hba->firmware_version = iop_config.firmware_version;
1934 	hba->interface_version = iop_config.interface_version;
1935 	hba->max_requests = iop_config.max_requests;
1936 	hba->max_devices = iop_config.max_devices;
1937 	hba->max_request_size = iop_config.request_size;
1938 	hba->max_sg_count = iop_config.max_sg_count;
1939 
1940 	if (hba->ops->family == MVFREY_BASED_IOP) {
1941 		if (hba->ops->internal_memalloc(hba)) {
1942 			device_printf(dev, "alloc srb_dmat failed\n");
1943 			goto destroy_parent_tag;
1944 		}
1945 		if (hba->ops->reset_comm(hba)) {
1946 			device_printf(dev, "reset comm failed\n");
1947 			goto get_config_failed;
1948 		}
1949 	}
1950 
1951 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1952 			4,  /* alignment */
1953 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1954 			BUS_SPACE_MAXADDR,  /* lowaddr */
1955 			BUS_SPACE_MAXADDR,  /* highaddr */
1956 			NULL, NULL,         /* filter, filterarg */
1957 			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1958 			hba->max_sg_count,  /* nsegments */
1959 			0x20000,    /* maxsegsize */
1960 			BUS_DMA_ALLOCNOW,       /* flags */
1961 			busdma_lock_mutex,  /* lockfunc */
1962 			&hba->lock,     /* lockfuncarg */
1963 			&hba->io_dmat   /* tag */))
1964 	{
1965 		device_printf(dev, "alloc io_dmat failed\n");
1966 		goto get_config_failed;
1967 	}
1968 
1969 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1970 			1,  /* alignment */
1971 			0, /* boundary */
1972 			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1973 			BUS_SPACE_MAXADDR,  /* highaddr */
1974 			NULL, NULL,         /* filter, filterarg */
1975 			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1976 			1,  /* nsegments */
1977 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1978 			0,      /* flags */
1979 			NULL,   /* lockfunc */
1980 			NULL,       /* lockfuncarg */
1981 			&hba->srb_dmat  /* tag */))
1982 	{
1983 		device_printf(dev, "alloc srb_dmat failed\n");
1984 		goto destroy_io_dmat;
1985 	}
1986 
1987 	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1988 			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1989 			&hba->srb_dmamap) != 0)
1990 	{
1991 		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1992 		goto destroy_srb_dmat;
1993 	}
1994 
1995 	if (bus_dmamap_load(hba->srb_dmat,
1996 			hba->srb_dmamap, hba->uncached_ptr,
1997 			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1998 			hptiop_map_srb, hba, 0))
1999 	{
2000 		device_printf(dev, "bus_dmamap_load failed!\n");
2001 		goto srb_dmamem_free;
2002 	}
2003 
2004 	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2005 		device_printf(dev, "cam_simq_alloc failed\n");
2006 		goto srb_dmamap_unload;
2007 	}
2008 
2009 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2010 			hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2011 	if (!hba->sim) {
2012 		device_printf(dev, "cam_sim_alloc failed\n");
2013 		cam_simq_free(devq);
2014 		goto srb_dmamap_unload;
2015 	}
2016 	hptiop_lock_adapter(hba);
2017 	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2018 	{
2019 		device_printf(dev, "xpt_bus_register failed\n");
2020 		goto free_cam_sim;
2021 	}
2022 
2023 	if (xpt_create_path(&hba->path, /*periph */ NULL,
2024 			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2025 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2026 		device_printf(dev, "xpt_create_path failed\n");
2027 		goto deregister_xpt_bus;
2028 	}
2029 	hptiop_unlock_adapter(hba);
2030 
2031 	bzero(&set_config, sizeof(set_config));
2032 	set_config.iop_id = unit;
2033 	set_config.vbus_id = cam_sim_path(hba->sim);
2034 	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2035 
2036 	if (hba->ops->set_config(hba, &set_config)) {
2037 		device_printf(dev, "set iop config failed.\n");
2038 		goto free_hba_path;
2039 	}
2040 
2041 	memset(&ccb, 0, sizeof(ccb));
2042 	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2043 	ccb.ccb_h.func_code = XPT_SASYNC_CB;
2044 	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2045 	ccb.callback = hptiop_async;
2046 	ccb.callback_arg = hba->sim;
2047 	xpt_action((union ccb *)&ccb);
2048 
2049 	rid = 0;
2050 	if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2051 			&rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2052 		device_printf(dev, "allocate irq failed!\n");
2053 		goto free_hba_path;
2054 	}
2055 
2056 	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2057 				NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2058 	{
2059 		device_printf(dev, "allocate intr function failed!\n");
2060 		goto free_irq_resource;
2061 	}
2062 
2063 	if (hptiop_send_sync_msg(hba,
2064 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2065 		device_printf(dev, "fail to start background task\n");
2066 		goto teartown_irq_resource;
2067 	}
2068 
2069 	hba->ops->enable_intr(hba);
2070 	hba->initialized = 1;
2071 
2072 	make_dev_args_init(&args);
2073 	args.mda_devsw = &hptiop_cdevsw;
2074 	args.mda_uid = UID_ROOT;
2075 	args.mda_gid = GID_WHEEL /*GID_OPERATOR*/;
2076 	args.mda_mode = S_IRUSR | S_IWUSR;
2077 	args.mda_si_drv1 = hba;
2078 
2079 	make_dev_s(&args, &hba->ioctl_dev, "%s%d", driver_name, unit);
2080 
2081 	return 0;
2082 
2083 
2084 teartown_irq_resource:
2085 	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2086 
2087 free_irq_resource:
2088 	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2089 
2090 	hptiop_lock_adapter(hba);
2091 free_hba_path:
2092 	xpt_free_path(hba->path);
2093 
2094 deregister_xpt_bus:
2095 	xpt_bus_deregister(cam_sim_path(hba->sim));
2096 
2097 free_cam_sim:
2098 	cam_sim_free(hba->sim, /*free devq*/ TRUE);
2099 	hptiop_unlock_adapter(hba);
2100 
2101 srb_dmamap_unload:
2102 	if (hba->uncached_ptr)
2103 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2104 
2105 srb_dmamem_free:
2106 	if (hba->uncached_ptr)
2107 		bus_dmamem_free(hba->srb_dmat,
2108 			hba->uncached_ptr, hba->srb_dmamap);
2109 
2110 destroy_srb_dmat:
2111 	if (hba->srb_dmat)
2112 		bus_dma_tag_destroy(hba->srb_dmat);
2113 
2114 destroy_io_dmat:
2115 	if (hba->io_dmat)
2116 		bus_dma_tag_destroy(hba->io_dmat);
2117 
2118 get_config_failed:
2119 	hba->ops->internal_memfree(hba);
2120 
2121 destroy_parent_tag:
2122 	if (hba->parent_dmat)
2123 		bus_dma_tag_destroy(hba->parent_dmat);
2124 
2125 release_pci_res:
2126 	if (hba->ops->release_pci_res)
2127 		hba->ops->release_pci_res(hba);
2128 
2129 	return ENXIO;
2130 }
2131 
2132 static int hptiop_detach(device_t dev)
2133 {
2134 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2135 	int i;
2136 	int error = EBUSY;
2137 
2138 	hptiop_lock_adapter(hba);
2139 	for (i = 0; i < hba->max_devices; i++)
2140 		if (hptiop_os_query_remove_device(hba, i)) {
2141 			device_printf(dev, "%d file system is busy. id=%d",
2142 						hba->pciunit, i);
2143 			goto out;
2144 		}
2145 
2146 	if ((error = hptiop_shutdown(dev)) != 0)
2147 		goto out;
2148 	if (hptiop_send_sync_msg(hba,
2149 		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2150 		goto out;
2151 	hptiop_unlock_adapter(hba);
2152 
2153 	hptiop_release_resource(hba);
2154 	return (0);
2155 out:
2156 	hptiop_unlock_adapter(hba);
2157 	return error;
2158 }
2159 
2160 static int hptiop_shutdown(device_t dev)
2161 {
2162 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2163 
2164 	int error = 0;
2165 
2166 	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2167 		device_printf(dev, "%d device is busy", hba->pciunit);
2168 		return EBUSY;
2169 	}
2170 
2171 	hba->ops->disable_intr(hba);
2172 
2173 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2174 		error = EBUSY;
2175 
2176 	return error;
2177 }
2178 
2179 static void hptiop_pci_intr(void *arg)
2180 {
2181 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2182 	hptiop_lock_adapter(hba);
2183 	hba->ops->iop_intr(hba);
2184 	hptiop_unlock_adapter(hba);
2185 }
2186 
2187 static void hptiop_poll(struct cam_sim *sim)
2188 {
2189 	struct hpt_iop_hba *hba;
2190 
2191 	hba = cam_sim_softc(sim);
2192 	hba->ops->iop_intr(hba);
2193 }
2194 
2195 static void hptiop_async(void * callback_arg, u_int32_t code,
2196 					struct cam_path * path, void * arg)
2197 {
2198 }
2199 
2200 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2201 {
2202 	BUS_SPACE_WRT4_ITL(outbound_intmask,
2203 		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2204 }
2205 
2206 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2207 {
2208 	u_int32_t int_mask;
2209 
2210 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2211 
2212 	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2213 			| MVIOP_MU_OUTBOUND_INT_MSG;
2214     	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2215 }
2216 
2217 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2218 {
2219 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2220 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2221 
2222 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2223 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2224 
2225 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2226 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2227 }
2228 
2229 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2230 {
2231 	u_int32_t int_mask;
2232 
2233 	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2234 
2235 	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2236 	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2237 	BUS_SPACE_RD4_ITL(outbound_intstatus);
2238 }
2239 
2240 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2241 {
2242 	u_int32_t int_mask;
2243 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2244 
2245 	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2246 			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2247 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2248 	BUS_SPACE_RD4_MV0(outbound_intmask);
2249 }
2250 
2251 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2252 {
2253 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2254 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2255 
2256 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2257 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2258 
2259 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2260 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2261 }
2262 
2263 static void hptiop_reset_adapter(void *argv)
2264 {
2265 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2266 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2267 		return;
2268 	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2269 }
2270 
2271 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2272 {
2273 	struct hpt_iop_srb * srb;
2274 
2275 	if (hba->srb_list) {
2276 		srb = hba->srb_list;
2277 		hba->srb_list = srb->next;
2278 		return srb;
2279 	}
2280 
2281 	return NULL;
2282 }
2283 
2284 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2285 {
2286 	srb->next = hba->srb_list;
2287 	hba->srb_list = srb;
2288 }
2289 
2290 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2291 {
2292 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2293 	struct hpt_iop_srb * srb;
2294 	int error;
2295 
2296 	switch (ccb->ccb_h.func_code) {
2297 
2298 	case XPT_SCSI_IO:
2299 		if (ccb->ccb_h.target_lun != 0 ||
2300 			ccb->ccb_h.target_id >= hba->max_devices ||
2301 			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2302 		{
2303 			ccb->ccb_h.status = CAM_TID_INVALID;
2304 			xpt_done(ccb);
2305 			return;
2306 		}
2307 
2308 		if ((srb = hptiop_get_srb(hba)) == NULL) {
2309 			device_printf(hba->pcidev, "srb allocated failed");
2310 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2311 			xpt_done(ccb);
2312 			return;
2313 		}
2314 
2315 		srb->ccb = ccb;
2316 		error = bus_dmamap_load_ccb(hba->io_dmat,
2317 					    srb->dma_map,
2318 					    ccb,
2319 					    hptiop_post_scsi_command,
2320 					    srb,
2321 					    0);
2322 
2323 		if (error && error != EINPROGRESS) {
2324 			device_printf(hba->pcidev,
2325 				"%d bus_dmamap_load error %d",
2326 				hba->pciunit, error);
2327 			xpt_freeze_simq(hba->sim, 1);
2328 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2329 			hptiop_free_srb(hba, srb);
2330 			xpt_done(ccb);
2331 			return;
2332 		}
2333 
2334 		return;
2335 
2336 	case XPT_RESET_BUS:
2337 		device_printf(hba->pcidev, "reset adapter");
2338 		hba->msg_done = 0;
2339 		hptiop_reset_adapter(hba);
2340 		break;
2341 
2342 	case XPT_GET_TRAN_SETTINGS:
2343 	case XPT_SET_TRAN_SETTINGS:
2344 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2345 		break;
2346 
2347 	case XPT_CALC_GEOMETRY:
2348 		cam_calc_geometry(&ccb->ccg, 1);
2349 		break;
2350 
2351 	case XPT_PATH_INQ:
2352 	{
2353 		struct ccb_pathinq *cpi = &ccb->cpi;
2354 
2355 		cpi->version_num = 1;
2356 		cpi->hba_inquiry = PI_SDTR_ABLE;
2357 		cpi->target_sprt = 0;
2358 		cpi->hba_misc = PIM_NOBUSRESET;
2359 		cpi->hba_eng_cnt = 0;
2360 		cpi->max_target = hba->max_devices;
2361 		cpi->max_lun = 0;
2362 		cpi->unit_number = cam_sim_unit(sim);
2363 		cpi->bus_id = cam_sim_bus(sim);
2364 		cpi->initiator_id = hba->max_devices;
2365 		cpi->base_transfer_speed = 3300;
2366 
2367 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2368 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2369 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2370 		cpi->transport = XPORT_SPI;
2371 		cpi->transport_version = 2;
2372 		cpi->protocol = PROTO_SCSI;
2373 		cpi->protocol_version = SCSI_REV_2;
2374 		cpi->ccb_h.status = CAM_REQ_CMP;
2375 		break;
2376 	}
2377 
2378 	default:
2379 		ccb->ccb_h.status = CAM_REQ_INVALID;
2380 		break;
2381 	}
2382 
2383 	xpt_done(ccb);
2384 	return;
2385 }
2386 
2387 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2388 				struct hpt_iop_srb *srb,
2389 				bus_dma_segment_t *segs, int nsegs)
2390 {
2391 	int idx;
2392 	union ccb *ccb = srb->ccb;
2393 	u_int8_t *cdb;
2394 
2395 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2396 		cdb = ccb->csio.cdb_io.cdb_ptr;
2397 	else
2398 		cdb = ccb->csio.cdb_io.cdb_bytes;
2399 
2400 	KdPrint(("ccb=%p %x-%x-%x\n",
2401 		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2402 
2403 	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2404 		u_int32_t iop_req32;
2405 		struct hpt_iop_request_scsi_command req;
2406 
2407 		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2408 
2409 		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2410 			device_printf(hba->pcidev, "invalid req offset\n");
2411 			ccb->ccb_h.status = CAM_BUSY;
2412 			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2413 			hptiop_free_srb(hba, srb);
2414 			xpt_done(ccb);
2415 			return;
2416 		}
2417 
2418 		if (ccb->csio.dxfer_len && nsegs > 0) {
2419 			struct hpt_iopsg *psg = req.sg_list;
2420 			for (idx = 0; idx < nsegs; idx++, psg++) {
2421 				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2422 				psg->size = segs[idx].ds_len;
2423 				psg->eot = 0;
2424 			}
2425 			psg[-1].eot = 1;
2426 		}
2427 
2428 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2429 
2430 		req.header.size =
2431 				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2432 				+ nsegs*sizeof(struct hpt_iopsg);
2433 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2434 		req.header.flags = 0;
2435 		req.header.result = IOP_RESULT_PENDING;
2436 		req.header.context = (u_int64_t)(unsigned long)srb;
2437 		req.dataxfer_length = ccb->csio.dxfer_len;
2438 		req.channel =  0;
2439 		req.target =  ccb->ccb_h.target_id;
2440 		req.lun =  ccb->ccb_h.target_lun;
2441 
2442 		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2443 			(u_int8_t *)&req, req.header.size);
2444 
2445 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2446 			bus_dmamap_sync(hba->io_dmat,
2447 				srb->dma_map, BUS_DMASYNC_PREREAD);
2448 		}
2449 		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2450 			bus_dmamap_sync(hba->io_dmat,
2451 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2452 
2453 		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2454 	} else {
2455 		struct hpt_iop_request_scsi_command *req;
2456 
2457 		req = (struct hpt_iop_request_scsi_command *)srb;
2458 		if (ccb->csio.dxfer_len && nsegs > 0) {
2459 			struct hpt_iopsg *psg = req->sg_list;
2460 			for (idx = 0; idx < nsegs; idx++, psg++) {
2461 				psg->pci_address =
2462 					(u_int64_t)segs[idx].ds_addr;
2463 				psg->size = segs[idx].ds_len;
2464 				psg->eot = 0;
2465 			}
2466 			psg[-1].eot = 1;
2467 		}
2468 
2469 		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2470 
2471 		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2472 		req->header.result = IOP_RESULT_PENDING;
2473 		req->dataxfer_length = ccb->csio.dxfer_len;
2474 		req->channel =  0;
2475 		req->target =  ccb->ccb_h.target_id;
2476 		req->lun =  ccb->ccb_h.target_lun;
2477 		req->header.size =
2478 			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2479 			+ nsegs*sizeof(struct hpt_iopsg);
2480 		req->header.context = (u_int64_t)srb->index |
2481 						IOPMU_QUEUE_ADDR_HOST_BIT;
2482 		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2483 
2484 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2485 			bus_dmamap_sync(hba->io_dmat,
2486 				srb->dma_map, BUS_DMASYNC_PREREAD);
2487 		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2488 			bus_dmamap_sync(hba->io_dmat,
2489 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2490 		}
2491 
2492 		if (hba->firmware_version > 0x01020000
2493 			|| hba->interface_version > 0x01020000) {
2494 			u_int32_t size_bits;
2495 
2496 			if (req->header.size < 256)
2497 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2498 			else if (req->header.size < 512)
2499 				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2500 			else
2501 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2502 						| IOPMU_QUEUE_ADDR_HOST_BIT;
2503 
2504 			BUS_SPACE_WRT4_ITL(inbound_queue,
2505 				(u_int32_t)srb->phy_addr | size_bits);
2506 		} else
2507 			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2508 				|IOPMU_QUEUE_ADDR_HOST_BIT);
2509 	}
2510 }
2511 
2512 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2513 				struct hpt_iop_srb *srb,
2514 				bus_dma_segment_t *segs, int nsegs)
2515 {
2516 	int idx, size;
2517 	union ccb *ccb = srb->ccb;
2518 	u_int8_t *cdb;
2519 	struct hpt_iop_request_scsi_command *req;
2520 	u_int64_t req_phy;
2521 
2522     	req = (struct hpt_iop_request_scsi_command *)srb;
2523 	req_phy = srb->phy_addr;
2524 
2525 	if (ccb->csio.dxfer_len && nsegs > 0) {
2526 		struct hpt_iopsg *psg = req->sg_list;
2527 		for (idx = 0; idx < nsegs; idx++, psg++) {
2528 			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2529 			psg->size = segs[idx].ds_len;
2530 			psg->eot = 0;
2531 		}
2532 		psg[-1].eot = 1;
2533 	}
2534 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2535 		cdb = ccb->csio.cdb_io.cdb_ptr;
2536 	else
2537 		cdb = ccb->csio.cdb_io.cdb_bytes;
2538 
2539 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2540 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2541 	req->header.result = IOP_RESULT_PENDING;
2542 	req->dataxfer_length = ccb->csio.dxfer_len;
2543 	req->channel = 0;
2544 	req->target =  ccb->ccb_h.target_id;
2545 	req->lun =  ccb->ccb_h.target_lun;
2546 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2547 				- sizeof(struct hpt_iopsg)
2548 				+ nsegs * sizeof(struct hpt_iopsg);
2549 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2550 		bus_dmamap_sync(hba->io_dmat,
2551 			srb->dma_map, BUS_DMASYNC_PREREAD);
2552 	}
2553 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2554 		bus_dmamap_sync(hba->io_dmat,
2555 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2556 	req->header.context = (u_int64_t)srb->index
2557 					<< MVIOP_REQUEST_NUMBER_START_BIT
2558 					| MVIOP_CMD_TYPE_SCSI;
2559 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2560 	size = req->header.size >> 8;
2561 	hptiop_mv_inbound_write(req_phy
2562 			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2563 			| imin(3, size), hba);
2564 }
2565 
2566 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2567 				struct hpt_iop_srb *srb,
2568 				bus_dma_segment_t *segs, int nsegs)
2569 {
2570 	int idx, index;
2571 	union ccb *ccb = srb->ccb;
2572 	u_int8_t *cdb;
2573 	struct hpt_iop_request_scsi_command *req;
2574 	u_int64_t req_phy;
2575 
2576 	req = (struct hpt_iop_request_scsi_command *)srb;
2577 	req_phy = srb->phy_addr;
2578 
2579 	if (ccb->csio.dxfer_len && nsegs > 0) {
2580 		struct hpt_iopsg *psg = req->sg_list;
2581 		for (idx = 0; idx < nsegs; idx++, psg++) {
2582 			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2583 			psg->size = segs[idx].ds_len;
2584 			psg->eot = 0;
2585 		}
2586 		psg[-1].eot = 1;
2587 	}
2588 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2589 		cdb = ccb->csio.cdb_io.cdb_ptr;
2590 	else
2591 		cdb = ccb->csio.cdb_io.cdb_bytes;
2592 
2593 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2594 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2595 	req->header.result = IOP_RESULT_PENDING;
2596 	req->dataxfer_length = ccb->csio.dxfer_len;
2597 	req->channel = 0;
2598 	req->target = ccb->ccb_h.target_id;
2599 	req->lun = ccb->ccb_h.target_lun;
2600 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2601 				- sizeof(struct hpt_iopsg)
2602 				+ nsegs * sizeof(struct hpt_iopsg);
2603 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2604 		bus_dmamap_sync(hba->io_dmat,
2605 			srb->dma_map, BUS_DMASYNC_PREREAD);
2606 	}
2607 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2608 		bus_dmamap_sync(hba->io_dmat,
2609 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2610 
2611 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2612 						| IOP_REQUEST_FLAG_ADDR_BITS
2613 						| ((req_phy >> 16) & 0xffff0000);
2614 	req->header.context = ((req_phy & 0xffffffff) << 32 )
2615 						| srb->index << 4
2616 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2617 
2618 	hba->u.mvfrey.inlist_wptr++;
2619 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2620 
2621 	if (index == hba->u.mvfrey.list_count) {
2622 		index = 0;
2623 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2624 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2625 	}
2626 
2627 	hba->u.mvfrey.inlist[index].addr = req_phy;
2628 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2629 
2630 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2631 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2632 
2633 	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2634 		callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2635 	}
2636 }
2637 
2638 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2639 					int nsegs, int error)
2640 {
2641 	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2642 	union ccb *ccb = srb->ccb;
2643 	struct hpt_iop_hba *hba = srb->hba;
2644 
2645 	if (error || nsegs > hba->max_sg_count) {
2646 		KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2647 			ccb->ccb_h.func_code,
2648 			ccb->ccb_h.target_id,
2649 			(uintmax_t)ccb->ccb_h.target_lun, nsegs));
2650 		ccb->ccb_h.status = CAM_BUSY;
2651 		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2652 		hptiop_free_srb(hba, srb);
2653 		xpt_done(ccb);
2654 		return;
2655 	}
2656 
2657 	hba->ops->post_req(hba, srb, segs, nsegs);
2658 }
2659 
2660 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2661 				int nsegs, int error)
2662 {
2663 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2664 	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2665 				& ~(u_int64_t)0x1F;
2666 	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2667 				& ~0x1F);
2668 }
2669 
2670 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2671 				int nsegs, int error)
2672 {
2673 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2674 	char *p;
2675 	u_int64_t phy;
2676 	u_int32_t list_count = hba->u.mvfrey.list_count;
2677 
2678 	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2679 				& ~(u_int64_t)0x1F;
2680 	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2681 				& ~0x1F);
2682 
2683 	hba->ctlcfgcmd_phy = phy;
2684 	hba->ctlcfg_ptr = p;
2685 
2686 	p += 0x800;
2687 	phy += 0x800;
2688 
2689 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2690 	hba->u.mvfrey.inlist_phy = phy;
2691 
2692 	p += list_count * sizeof(struct mvfrey_inlist_entry);
2693 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2694 
2695 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2696 	hba->u.mvfrey.outlist_phy = phy;
2697 
2698 	p += list_count * sizeof(struct mvfrey_outlist_entry);
2699 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2700 
2701 	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2702 	hba->u.mvfrey.outlist_cptr_phy = phy;
2703 }
2704 
2705 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2706 				int nsegs, int error)
2707 {
2708 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2709 	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2710 	struct hpt_iop_srb *srb, *tmp_srb;
2711 	int i;
2712 
2713 	if (error || nsegs == 0) {
2714 		device_printf(hba->pcidev, "hptiop_map_srb error");
2715 		return;
2716 	}
2717 
2718 	/* map srb */
2719 	srb = (struct hpt_iop_srb *)
2720 		(((unsigned long)hba->uncached_ptr + 0x1F)
2721 		& ~(unsigned long)0x1F);
2722 
2723 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2724 		tmp_srb = (struct hpt_iop_srb *)
2725 					((char *)srb + i * HPT_SRB_MAX_SIZE);
2726 		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2727 			if (bus_dmamap_create(hba->io_dmat,
2728 						0, &tmp_srb->dma_map)) {
2729 				device_printf(hba->pcidev, "dmamap create failed");
2730 				return;
2731 			}
2732 
2733 			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2734 			tmp_srb->hba = hba;
2735 			tmp_srb->index = i;
2736 			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2737 				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2738 							(phy_addr >> 5);
2739 				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2740 					tmp_srb->srb_flag =
2741 						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2742 			} else {
2743 				tmp_srb->phy_addr = phy_addr;
2744 			}
2745 
2746 			callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2747 			hptiop_free_srb(hba, tmp_srb);
2748 			hba->srb[i] = tmp_srb;
2749 			phy_addr += HPT_SRB_MAX_SIZE;
2750 		}
2751 		else {
2752 			device_printf(hba->pcidev, "invalid alignment");
2753 			return;
2754 		}
2755 	}
2756 }
2757 
2758 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2759 {
2760 	hba->msg_done = 1;
2761 }
2762 
2763 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2764 						int target_id)
2765 {
2766 	struct cam_periph       *periph = NULL;
2767 	struct cam_path         *path;
2768 	int                     status, retval = 0;
2769 
2770 	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2771 
2772 	if (status == CAM_REQ_CMP) {
2773 		if ((periph = cam_periph_find(path, "da")) != NULL) {
2774 			if (periph->refcount >= 1) {
2775 				device_printf(hba->pcidev, "%d ,"
2776 					"target_id=0x%x,"
2777 					"refcount=%d",
2778 				    hba->pciunit, target_id, periph->refcount);
2779 				retval = -1;
2780 			}
2781 		}
2782 		xpt_free_path(path);
2783 	}
2784 	return retval;
2785 }
2786 
2787 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2788 {
2789 	int i;
2790 
2791 	if (hba->ioctl_dev)
2792 		destroy_dev(hba->ioctl_dev);
2793 
2794 	if (hba->path) {
2795 		struct ccb_setasync ccb;
2796 
2797 		memset(&ccb, 0, sizeof(ccb));
2798 		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2799 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
2800 		ccb.event_enable = 0;
2801 		ccb.callback = hptiop_async;
2802 		ccb.callback_arg = hba->sim;
2803 		xpt_action((union ccb *)&ccb);
2804 		xpt_free_path(hba->path);
2805 	}
2806 
2807 	if (hba->irq_handle)
2808 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2809 
2810 	if (hba->sim) {
2811 		hptiop_lock_adapter(hba);
2812 		xpt_bus_deregister(cam_sim_path(hba->sim));
2813 		cam_sim_free(hba->sim, TRUE);
2814 		hptiop_unlock_adapter(hba);
2815 	}
2816 
2817 	if (hba->ctlcfg_dmat) {
2818 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2819 		bus_dmamem_free(hba->ctlcfg_dmat,
2820 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2821 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2822 	}
2823 
2824 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2825 		struct hpt_iop_srb *srb = hba->srb[i];
2826 		if (srb->dma_map)
2827 			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2828 		callout_drain(&srb->timeout);
2829 	}
2830 
2831 	if (hba->srb_dmat) {
2832 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2833 		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2834 		bus_dma_tag_destroy(hba->srb_dmat);
2835 	}
2836 
2837 	if (hba->io_dmat)
2838 		bus_dma_tag_destroy(hba->io_dmat);
2839 
2840 	if (hba->parent_dmat)
2841 		bus_dma_tag_destroy(hba->parent_dmat);
2842 
2843 	if (hba->irq_res)
2844 		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2845 					0, hba->irq_res);
2846 
2847 	if (hba->bar0_res)
2848 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2849 					hba->bar0_rid, hba->bar0_res);
2850 	if (hba->bar2_res)
2851 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2852 					hba->bar2_rid, hba->bar2_res);
2853 	mtx_destroy(&hba->lock);
2854 }
2855