xref: /freebsd/sys/dev/hptiop/hptiop.c (revision 1c64b3aba3d0e2910ac52c18e3c240348eb3e36e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/cons.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 
35 #include <sys/stat.h>
36 #include <sys/malloc.h>
37 #include <sys/conf.h>
38 #include <sys/libkern.h>
39 #include <sys/kernel.h>
40 
41 #include <sys/kthread.h>
42 #include <sys/mutex.h>
43 #include <sys/module.h>
44 
45 #include <sys/eventhandler.h>
46 #include <sys/bus.h>
47 #include <sys/taskqueue.h>
48 #include <sys/ioccom.h>
49 
50 #include <machine/resource.h>
51 #include <machine/bus.h>
52 #include <machine/stdarg.h>
53 #include <sys/rman.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_sim.h>
65 #include <cam/cam_xpt_sim.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_periph.h>
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70 
71 
72 #include <dev/hptiop/hptiop.h>
73 
74 static const char driver_name[] = "hptiop";
75 static const char driver_version[] = "v1.9";
76 
77 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
78 				u_int32_t msg, u_int32_t millisec);
79 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
80 							u_int32_t req);
81 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
82 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
83 							u_int32_t req);
84 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
85 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
86 				struct hpt_iop_ioctl_param *pParams);
87 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
88 				struct hpt_iop_ioctl_param *pParams);
89 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
90 				struct hpt_iop_ioctl_param *pParams);
91 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
92 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
93 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
94 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
95 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
96 				struct hpt_iop_request_get_config *config);
97 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
98 				struct hpt_iop_request_get_config *config);
99 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
100 				struct hpt_iop_request_get_config *config);
101 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
102 				struct hpt_iop_request_set_config *config);
103 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
104 				struct hpt_iop_request_set_config *config);
105 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
106 				struct hpt_iop_request_set_config *config);
107 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
108 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
109 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
110 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
111 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
112 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
113 			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
114 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
115 				struct hpt_iop_request_ioctl_command *req,
116 				struct hpt_iop_ioctl_param *pParams);
117 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
118 				struct hpt_iop_request_ioctl_command *req,
119 				struct hpt_iop_ioctl_param *pParams);
120 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
121 				struct hpt_iop_srb *srb,
122 				bus_dma_segment_t *segs, int nsegs);
123 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
124 				struct hpt_iop_srb *srb,
125 				bus_dma_segment_t *segs, int nsegs);
126 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
127 				struct hpt_iop_srb *srb,
128 				bus_dma_segment_t *segs, int nsegs);
129 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
130 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
131 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
132 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
133 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
134 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
135 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
138 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
139 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
140 static int  hptiop_probe(device_t dev);
141 static int  hptiop_attach(device_t dev);
142 static int  hptiop_detach(device_t dev);
143 static int  hptiop_shutdown(device_t dev);
144 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
145 static void hptiop_poll(struct cam_sim *sim);
146 static void hptiop_async(void *callback_arg, u_int32_t code,
147 					struct cam_path *path, void *arg);
148 static void hptiop_pci_intr(void *arg);
149 static void hptiop_release_resource(struct hpt_iop_hba *hba);
150 static void hptiop_reset_adapter(void *argv);
151 static d_open_t hptiop_open;
152 static d_close_t hptiop_close;
153 static d_ioctl_t hptiop_ioctl;
154 
155 static struct cdevsw hptiop_cdevsw = {
156 	.d_open = hptiop_open,
157 	.d_close = hptiop_close,
158 	.d_ioctl = hptiop_ioctl,
159 	.d_name = driver_name,
160 	.d_version = D_VERSION,
161 };
162 
163 #define hba_from_dev(dev) \
164 	((struct hpt_iop_hba *)((dev)->si_drv1))
165 
166 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
167 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
168 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
169 		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
170 
171 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
172 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
173 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
174 		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
175 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
176 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
177 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
178 		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
179 
180 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
181 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
182 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
183 		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
184 
hptiop_open(ioctl_dev_t dev,int flags,int devtype,ioctl_thread_t proc)185 static int hptiop_open(ioctl_dev_t dev, int flags,
186 					int devtype, ioctl_thread_t proc)
187 {
188 	struct hpt_iop_hba *hba = hba_from_dev(dev);
189 
190 	if (hba==NULL)
191 		return ENXIO;
192 	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
193 		return EBUSY;
194 	hba->flag |= HPT_IOCTL_FLAG_OPEN;
195 	return 0;
196 }
197 
hptiop_close(ioctl_dev_t dev,int flags,int devtype,ioctl_thread_t proc)198 static int hptiop_close(ioctl_dev_t dev, int flags,
199 					int devtype, ioctl_thread_t proc)
200 {
201 	struct hpt_iop_hba *hba = hba_from_dev(dev);
202 	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
203 	return 0;
204 }
205 
hptiop_ioctl(ioctl_dev_t dev,u_long cmd,caddr_t data,int flags,ioctl_thread_t proc)206 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
207 					int flags, ioctl_thread_t proc)
208 {
209 	int ret = EFAULT;
210 	struct hpt_iop_hba *hba = hba_from_dev(dev);
211 
212 	switch (cmd) {
213 	case HPT_DO_IOCONTROL:
214 		ret = hba->ops->do_ioctl(hba,
215 				(struct hpt_iop_ioctl_param *)data);
216 		break;
217 	case HPT_SCAN_BUS:
218 		ret = hptiop_rescan_bus(hba);
219 		break;
220 	}
221 	return ret;
222 }
223 
hptiop_mv_outbound_read(struct hpt_iop_hba * hba)224 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
225 {
226 	u_int64_t p;
227 	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
228 	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
229 
230 	if (outbound_tail != outbound_head) {
231 		bus_space_read_region_4(hba->bar2t, hba->bar2h,
232 			offsetof(struct hpt_iopmu_mv,
233 				outbound_q[outbound_tail]),
234 			(u_int32_t *)&p, 2);
235 
236 		outbound_tail++;
237 
238 		if (outbound_tail == MVIOP_QUEUE_LEN)
239 			outbound_tail = 0;
240 
241 		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
242 		return p;
243 	} else
244 		return 0;
245 }
246 
hptiop_mv_inbound_write(u_int64_t p,struct hpt_iop_hba * hba)247 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
248 {
249 	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
250 	u_int32_t head = inbound_head + 1;
251 
252 	if (head == MVIOP_QUEUE_LEN)
253 		head = 0;
254 
255 	bus_space_write_region_4(hba->bar2t, hba->bar2h,
256 			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
257 			(u_int32_t *)&p, 2);
258 	BUS_SPACE_WRT4_MV2(inbound_head, head);
259 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
260 }
261 
hptiop_post_msg_itl(struct hpt_iop_hba * hba,u_int32_t msg)262 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
263 {
264 	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
265 	BUS_SPACE_RD4_ITL(outbound_intstatus);
266 }
267 
hptiop_post_msg_mv(struct hpt_iop_hba * hba,u_int32_t msg)268 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
269 {
270 
271 	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
272 	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
273 
274 	BUS_SPACE_RD4_MV0(outbound_intmask);
275 }
276 
hptiop_post_msg_mvfrey(struct hpt_iop_hba * hba,u_int32_t msg)277 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
278 {
279 	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
280 	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
281 }
282 
hptiop_wait_ready_itl(struct hpt_iop_hba * hba,u_int32_t millisec)283 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
284 {
285 	u_int32_t req=0;
286 	int i;
287 
288 	for (i = 0; i < millisec; i++) {
289 		req = BUS_SPACE_RD4_ITL(inbound_queue);
290 		if (req != IOPMU_QUEUE_EMPTY)
291 			break;
292 		DELAY(1000);
293 	}
294 
295 	if (req!=IOPMU_QUEUE_EMPTY) {
296 		BUS_SPACE_WRT4_ITL(outbound_queue, req);
297 		BUS_SPACE_RD4_ITL(outbound_intstatus);
298 		return 0;
299 	}
300 
301 	return -1;
302 }
303 
hptiop_wait_ready_mv(struct hpt_iop_hba * hba,u_int32_t millisec)304 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
305 {
306 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
307 		return -1;
308 
309 	return 0;
310 }
311 
hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,u_int32_t millisec)312 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
313 							u_int32_t millisec)
314 {
315 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
316 		return -1;
317 
318 	return 0;
319 }
320 
hptiop_request_callback_itl(struct hpt_iop_hba * hba,u_int32_t index)321 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
322 							u_int32_t index)
323 {
324 	struct hpt_iop_srb *srb;
325 	struct hpt_iop_request_scsi_command *req=NULL;
326 	union ccb *ccb;
327 	u_int8_t *cdb;
328 	u_int32_t result, temp, dxfer;
329 	u_int64_t temp64;
330 
331 	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
332 		if (hba->firmware_version > 0x01020000 ||
333 			hba->interface_version > 0x01020000) {
334 			srb = hba->srb[index & ~(u_int32_t)
335 				(IOPMU_QUEUE_ADDR_HOST_BIT
336 				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
337 			req = (struct hpt_iop_request_scsi_command *)srb;
338 			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
339 				result = IOP_RESULT_SUCCESS;
340 			else
341 				result = req->header.result;
342 		} else {
343 			srb = hba->srb[index &
344 				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
345 			req = (struct hpt_iop_request_scsi_command *)srb;
346 			result = req->header.result;
347 		}
348 		dxfer = req->dataxfer_length;
349 		goto srb_complete;
350 	}
351 
352 	/*iop req*/
353 	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
354 		offsetof(struct hpt_iop_request_header, type));
355 	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
356 		offsetof(struct hpt_iop_request_header, result));
357 	switch(temp) {
358 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
359 	{
360 		temp64 = 0;
361 		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
362 			offsetof(struct hpt_iop_request_header, context),
363 			(u_int32_t *)&temp64, 2);
364 		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
365 		break;
366 	}
367 
368 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
369 		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
370 			offsetof(struct hpt_iop_request_header, context),
371 			(u_int32_t *)&temp64, 2);
372 		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
373 		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
374 				index + offsetof(struct hpt_iop_request_scsi_command,
375 				dataxfer_length));
376 srb_complete:
377 		ccb = (union ccb *)srb->ccb;
378 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
379 			cdb = ccb->csio.cdb_io.cdb_ptr;
380 		else
381 			cdb = ccb->csio.cdb_io.cdb_bytes;
382 
383 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
384 			ccb->ccb_h.status = CAM_REQ_CMP;
385 			goto scsi_done;
386 		}
387 
388 		switch (result) {
389 		case IOP_RESULT_SUCCESS:
390 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
391 			case CAM_DIR_IN:
392 				bus_dmamap_sync(hba->io_dmat,
393 					srb->dma_map, BUS_DMASYNC_POSTREAD);
394 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
395 				break;
396 			case CAM_DIR_OUT:
397 				bus_dmamap_sync(hba->io_dmat,
398 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
399 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 				break;
401 			}
402 
403 			ccb->ccb_h.status = CAM_REQ_CMP;
404 			break;
405 
406 		case IOP_RESULT_BAD_TARGET:
407 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
408 			break;
409 		case IOP_RESULT_BUSY:
410 			ccb->ccb_h.status = CAM_BUSY;
411 			break;
412 		case IOP_RESULT_INVALID_REQUEST:
413 			ccb->ccb_h.status = CAM_REQ_INVALID;
414 			break;
415 		case IOP_RESULT_FAIL:
416 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
417 			break;
418 		case IOP_RESULT_RESET:
419 			ccb->ccb_h.status = CAM_BUSY;
420 			break;
421 		case IOP_RESULT_CHECK_CONDITION:
422 			memset(&ccb->csio.sense_data, 0,
423 			    sizeof(ccb->csio.sense_data));
424 			if (dxfer < ccb->csio.sense_len)
425 				ccb->csio.sense_resid = ccb->csio.sense_len -
426 				    dxfer;
427 			else
428 				ccb->csio.sense_resid = 0;
429 			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
430 				bus_space_read_region_1(hba->bar0t, hba->bar0h,
431 					index + offsetof(struct hpt_iop_request_scsi_command,
432 					sg_list), (u_int8_t *)&ccb->csio.sense_data,
433 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
434 			} else {
435 				memcpy(&ccb->csio.sense_data, &req->sg_list,
436 					MIN(dxfer, sizeof(ccb->csio.sense_data)));
437 			}
438 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
439 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
440 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
441 			break;
442 		default:
443 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 			break;
445 		}
446 scsi_done:
447 		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
448 			BUS_SPACE_WRT4_ITL(outbound_queue, index);
449 
450 		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
451 
452 		hptiop_free_srb(hba, srb);
453 		xpt_done(ccb);
454 		break;
455 	}
456 }
457 
hptiop_drain_outbound_queue_itl(struct hpt_iop_hba * hba)458 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
459 {
460 	u_int32_t req, temp;
461 
462 	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
463 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
464 			hptiop_request_callback_itl(hba, req);
465 		else {
466 			temp = bus_space_read_4(hba->bar0t,
467 					hba->bar0h,req +
468 					offsetof(struct hpt_iop_request_header,
469 						flags));
470 			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
471 				u_int64_t temp64;
472 				bus_space_read_region_4(hba->bar0t,
473 					hba->bar0h,req +
474 					offsetof(struct hpt_iop_request_header,
475 						context),
476 					(u_int32_t *)&temp64, 2);
477 				if (temp64) {
478 					hptiop_request_callback_itl(hba, req);
479 				} else {
480 					temp64 = 1;
481 					bus_space_write_region_4(hba->bar0t,
482 						hba->bar0h,req +
483 						offsetof(struct hpt_iop_request_header,
484 							context),
485 						(u_int32_t *)&temp64, 2);
486 				}
487 			} else
488 				hptiop_request_callback_itl(hba, req);
489 		}
490 	}
491 }
492 
hptiop_intr_itl(struct hpt_iop_hba * hba)493 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
494 {
495 	u_int32_t status;
496 	int ret = 0;
497 
498 	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
499 
500 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
501 		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
502 		KdPrint(("hptiop: received outbound msg %x\n", msg));
503 		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
504 		hptiop_os_message_callback(hba, msg);
505 		ret = 1;
506 	}
507 
508 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
509 		hptiop_drain_outbound_queue_itl(hba);
510 		ret = 1;
511 	}
512 
513 	return ret;
514 }
515 
hptiop_request_callback_mv(struct hpt_iop_hba * hba,u_int64_t _tag)516 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
517 							u_int64_t _tag)
518 {
519 	u_int32_t context = (u_int32_t)_tag;
520 
521 	if (context & MVIOP_CMD_TYPE_SCSI) {
522 		struct hpt_iop_srb *srb;
523 		struct hpt_iop_request_scsi_command *req;
524 		union ccb *ccb;
525 		u_int8_t *cdb;
526 
527 		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
528 		req = (struct hpt_iop_request_scsi_command *)srb;
529 		ccb = (union ccb *)srb->ccb;
530 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
531 			cdb = ccb->csio.cdb_io.cdb_ptr;
532 		else
533 			cdb = ccb->csio.cdb_io.cdb_bytes;
534 
535 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
536 			ccb->ccb_h.status = CAM_REQ_CMP;
537 			goto scsi_done;
538 		}
539 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
540 			req->header.result = IOP_RESULT_SUCCESS;
541 
542 		switch (req->header.result) {
543 		case IOP_RESULT_SUCCESS:
544 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
545 			case CAM_DIR_IN:
546 				bus_dmamap_sync(hba->io_dmat,
547 					srb->dma_map, BUS_DMASYNC_POSTREAD);
548 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
549 				break;
550 			case CAM_DIR_OUT:
551 				bus_dmamap_sync(hba->io_dmat,
552 					srb->dma_map, BUS_DMASYNC_POSTWRITE);
553 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
554 				break;
555 			}
556 			ccb->ccb_h.status = CAM_REQ_CMP;
557 			break;
558 		case IOP_RESULT_BAD_TARGET:
559 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
560 			break;
561 		case IOP_RESULT_BUSY:
562 			ccb->ccb_h.status = CAM_BUSY;
563 			break;
564 		case IOP_RESULT_INVALID_REQUEST:
565 			ccb->ccb_h.status = CAM_REQ_INVALID;
566 			break;
567 		case IOP_RESULT_FAIL:
568 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
569 			break;
570 		case IOP_RESULT_RESET:
571 			ccb->ccb_h.status = CAM_BUSY;
572 			break;
573 		case IOP_RESULT_CHECK_CONDITION:
574 			memset(&ccb->csio.sense_data, 0,
575 			    sizeof(ccb->csio.sense_data));
576 			if (req->dataxfer_length < ccb->csio.sense_len)
577 				ccb->csio.sense_resid = ccb->csio.sense_len -
578 				    req->dataxfer_length;
579 			else
580 				ccb->csio.sense_resid = 0;
581 			memcpy(&ccb->csio.sense_data, &req->sg_list,
582 				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
583 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
584 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
585 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
586 			break;
587 		default:
588 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
589 			break;
590 		}
591 scsi_done:
592 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
593 
594 		hptiop_free_srb(hba, srb);
595 		xpt_done(ccb);
596 	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
597 		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
598 		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
599 			hba->config_done = 1;
600 		else
601 			hba->config_done = -1;
602 		wakeup(req);
603 	} else if (context &
604 			(MVIOP_CMD_TYPE_SET_CONFIG |
605 				MVIOP_CMD_TYPE_GET_CONFIG))
606 		hba->config_done = 1;
607 	else {
608 		device_printf(hba->pcidev, "wrong callback type\n");
609 	}
610 }
611 
hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,u_int32_t _tag)612 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
613 				u_int32_t _tag)
614 {
615 	u_int32_t req_type = _tag & 0xf;
616 
617 	struct hpt_iop_srb *srb;
618 	struct hpt_iop_request_scsi_command *req;
619 	union ccb *ccb;
620 	u_int8_t *cdb;
621 
622 	switch (req_type) {
623 	case IOP_REQUEST_TYPE_GET_CONFIG:
624 	case IOP_REQUEST_TYPE_SET_CONFIG:
625 		hba->config_done = 1;
626 		break;
627 
628 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
629 		srb = hba->srb[(_tag >> 4) & 0xff];
630 		req = (struct hpt_iop_request_scsi_command *)srb;
631 
632 		ccb = (union ccb *)srb->ccb;
633 
634 		callout_stop(&srb->timeout);
635 
636 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
637 			cdb = ccb->csio.cdb_io.cdb_ptr;
638 		else
639 			cdb = ccb->csio.cdb_io.cdb_bytes;
640 
641 		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
642 			ccb->ccb_h.status = CAM_REQ_CMP;
643 			goto scsi_done;
644 		}
645 
646 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
647 			req->header.result = IOP_RESULT_SUCCESS;
648 
649 		switch (req->header.result) {
650 		case IOP_RESULT_SUCCESS:
651 			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
652 			case CAM_DIR_IN:
653 				bus_dmamap_sync(hba->io_dmat,
654 						srb->dma_map, BUS_DMASYNC_POSTREAD);
655 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
656 				break;
657 			case CAM_DIR_OUT:
658 				bus_dmamap_sync(hba->io_dmat,
659 						srb->dma_map, BUS_DMASYNC_POSTWRITE);
660 				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
661 				break;
662 			}
663 			ccb->ccb_h.status = CAM_REQ_CMP;
664 			break;
665 		case IOP_RESULT_BAD_TARGET:
666 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
667 			break;
668 		case IOP_RESULT_BUSY:
669 			ccb->ccb_h.status = CAM_BUSY;
670 			break;
671 		case IOP_RESULT_INVALID_REQUEST:
672 			ccb->ccb_h.status = CAM_REQ_INVALID;
673 			break;
674 		case IOP_RESULT_FAIL:
675 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
676 			break;
677 		case IOP_RESULT_RESET:
678 			ccb->ccb_h.status = CAM_BUSY;
679 			break;
680 		case IOP_RESULT_CHECK_CONDITION:
681 			memset(&ccb->csio.sense_data, 0,
682 			       sizeof(ccb->csio.sense_data));
683 			if (req->dataxfer_length < ccb->csio.sense_len)
684 				ccb->csio.sense_resid = ccb->csio.sense_len -
685 				req->dataxfer_length;
686 			else
687 				ccb->csio.sense_resid = 0;
688 			memcpy(&ccb->csio.sense_data, &req->sg_list,
689 			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
690 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
691 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
692 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
693 			break;
694 		default:
695 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
696 			break;
697 		}
698 scsi_done:
699 		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
700 
701 		hptiop_free_srb(hba, srb);
702 		xpt_done(ccb);
703 		break;
704 	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
705 		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
706 			hba->config_done = 1;
707 		else
708 			hba->config_done = -1;
709 		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
710 		break;
711 	default:
712 		device_printf(hba->pcidev, "wrong callback type\n");
713 		break;
714 	}
715 }
716 
hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)717 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
718 {
719 	u_int64_t req;
720 
721 	while ((req = hptiop_mv_outbound_read(hba))) {
722 		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
723 			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
724 				hptiop_request_callback_mv(hba, req);
725 			}
726 	    	}
727 	}
728 }
729 
hptiop_intr_mv(struct hpt_iop_hba * hba)730 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
731 {
732 	u_int32_t status;
733 	int ret = 0;
734 
735 	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
736 
737 	if (status)
738 		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
739 
740 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
741 		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
742 		KdPrint(("hptiop: received outbound msg %x\n", msg));
743 		hptiop_os_message_callback(hba, msg);
744 		ret = 1;
745 	}
746 
747 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
748 		hptiop_drain_outbound_queue_mv(hba);
749 		ret = 1;
750 	}
751 
752 	return ret;
753 }
754 
hptiop_intr_mvfrey(struct hpt_iop_hba * hba)755 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
756 {
757 	u_int32_t status, _tag, cptr;
758 	int ret = 0;
759 
760 	if (hba->initialized) {
761 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
762 	}
763 
764 	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
765 	if (status) {
766 		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
767 		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
768 			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
769 			hptiop_os_message_callback(hba, msg);
770 		}
771 		ret = 1;
772 	}
773 
774 	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
775 	if (status) {
776 		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
777 		do {
778 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
779 			while (hba->u.mvfrey.outlist_rptr != cptr) {
780 				hba->u.mvfrey.outlist_rptr++;
781 				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
782 					hba->u.mvfrey.outlist_rptr = 0;
783 				}
784 
785 				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
786 				hptiop_request_callback_mvfrey(hba, _tag);
787 				ret = 2;
788 			}
789 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
790 	}
791 
792 	if (hba->initialized) {
793 		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
794 	}
795 
796 	return ret;
797 }
798 
hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,u_int32_t req32,u_int32_t millisec)799 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
800 					u_int32_t req32, u_int32_t millisec)
801 {
802 	u_int32_t i;
803 	u_int64_t temp64;
804 
805 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
806 	BUS_SPACE_RD4_ITL(outbound_intstatus);
807 
808 	for (i = 0; i < millisec; i++) {
809 		hptiop_intr_itl(hba);
810 		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
811 			offsetof(struct hpt_iop_request_header, context),
812 			(u_int32_t *)&temp64, 2);
813 		if (temp64)
814 			return 0;
815 		DELAY(1000);
816 	}
817 
818 	return -1;
819 }
820 
hptiop_send_sync_request_mv(struct hpt_iop_hba * hba,void * req,u_int32_t millisec)821 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
822 					void *req, u_int32_t millisec)
823 {
824 	u_int32_t i;
825 	u_int64_t phy_addr;
826 	hba->config_done = 0;
827 
828 	phy_addr = hba->ctlcfgcmd_phy |
829 			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
830 	((struct hpt_iop_request_get_config *)req)->header.flags |=
831 		IOP_REQUEST_FLAG_SYNC_REQUEST |
832 		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
833 	hptiop_mv_inbound_write(phy_addr, hba);
834 	BUS_SPACE_RD4_MV0(outbound_intmask);
835 
836 	for (i = 0; i < millisec; i++) {
837 		hptiop_intr_mv(hba);
838 		if (hba->config_done)
839 			return 0;
840 		DELAY(1000);
841 	}
842 	return -1;
843 }
844 
hptiop_send_sync_request_mvfrey(struct hpt_iop_hba * hba,void * req,u_int32_t millisec)845 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
846 					void *req, u_int32_t millisec)
847 {
848 	u_int32_t i, index;
849 	u_int64_t phy_addr;
850 	struct hpt_iop_request_header *reqhdr =
851 										(struct hpt_iop_request_header *)req;
852 
853 	hba->config_done = 0;
854 
855 	phy_addr = hba->ctlcfgcmd_phy;
856 	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
857 					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
858 					| IOP_REQUEST_FLAG_ADDR_BITS
859 					| ((phy_addr >> 16) & 0xffff0000);
860 	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
861 					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
862 
863 	hba->u.mvfrey.inlist_wptr++;
864 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
865 
866 	if (index == hba->u.mvfrey.list_count) {
867 		index = 0;
868 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
869 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
870 	}
871 
872 	hba->u.mvfrey.inlist[index].addr = phy_addr;
873 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
874 
875 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
876 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
877 
878 	for (i = 0; i < millisec; i++) {
879 		hptiop_intr_mvfrey(hba);
880 		if (hba->config_done)
881 			return 0;
882 		DELAY(1000);
883 	}
884 	return -1;
885 }
886 
hptiop_send_sync_msg(struct hpt_iop_hba * hba,u_int32_t msg,u_int32_t millisec)887 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
888 					u_int32_t msg, u_int32_t millisec)
889 {
890 	u_int32_t i;
891 
892 	hba->msg_done = 0;
893 	hba->ops->post_msg(hba, msg);
894 
895 	for (i=0; i<millisec; i++) {
896 		hba->ops->iop_intr(hba);
897 		if (hba->msg_done)
898 			break;
899 		DELAY(1000);
900 	}
901 
902 	return hba->msg_done? 0 : -1;
903 }
904 
hptiop_get_config_itl(struct hpt_iop_hba * hba,struct hpt_iop_request_get_config * config)905 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
906 				struct hpt_iop_request_get_config * config)
907 {
908 	u_int32_t req32;
909 
910 	config->header.size = sizeof(struct hpt_iop_request_get_config);
911 	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
912 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
913 	config->header.result = IOP_RESULT_PENDING;
914 	config->header.context = 0;
915 
916 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
917 	if (req32 == IOPMU_QUEUE_EMPTY)
918 		return -1;
919 
920 	bus_space_write_region_4(hba->bar0t, hba->bar0h,
921 			req32, (u_int32_t *)config,
922 			sizeof(struct hpt_iop_request_header) >> 2);
923 
924 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
925 		KdPrint(("hptiop: get config send cmd failed"));
926 		return -1;
927 	}
928 
929 	bus_space_read_region_4(hba->bar0t, hba->bar0h,
930 			req32, (u_int32_t *)config,
931 			sizeof(struct hpt_iop_request_get_config) >> 2);
932 
933 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
934 
935 	return 0;
936 }
937 
hptiop_get_config_mv(struct hpt_iop_hba * hba,struct hpt_iop_request_get_config * config)938 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
939 				struct hpt_iop_request_get_config * config)
940 {
941 	struct hpt_iop_request_get_config *req;
942 
943 	if (!(req = hba->ctlcfg_ptr))
944 		return -1;
945 
946 	req->header.flags = 0;
947 	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
948 	req->header.size = sizeof(struct hpt_iop_request_get_config);
949 	req->header.result = IOP_RESULT_PENDING;
950 	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
951 
952 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
953 		KdPrint(("hptiop: get config send cmd failed"));
954 		return -1;
955 	}
956 
957 	*config = *req;
958 	return 0;
959 }
960 
hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,struct hpt_iop_request_get_config * config)961 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
962 				struct hpt_iop_request_get_config * config)
963 {
964 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
965 
966 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
967 	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
968 		KdPrint(("hptiop: header size %x/%x type %x/%x",
969 			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
970 			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
971 		return -1;
972 	}
973 
974 	config->interface_version = info->interface_version;
975 	config->firmware_version = info->firmware_version;
976 	config->max_requests = info->max_requests;
977 	config->request_size = info->request_size;
978 	config->max_sg_count = info->max_sg_count;
979 	config->data_transfer_length = info->data_transfer_length;
980 	config->alignment_mask = info->alignment_mask;
981 	config->max_devices = info->max_devices;
982 	config->sdram_size = info->sdram_size;
983 
984 	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
985 		 config->max_requests, config->request_size,
986 		 config->data_transfer_length, config->max_devices,
987 		 config->sdram_size));
988 
989 	return 0;
990 }
991 
hptiop_set_config_itl(struct hpt_iop_hba * hba,struct hpt_iop_request_set_config * config)992 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
993 				struct hpt_iop_request_set_config *config)
994 {
995 	u_int32_t req32;
996 
997 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
998 
999 	if (req32 == IOPMU_QUEUE_EMPTY)
1000 		return -1;
1001 
1002 	config->header.size = sizeof(struct hpt_iop_request_set_config);
1003 	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1004 	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1005 	config->header.result = IOP_RESULT_PENDING;
1006 	config->header.context = 0;
1007 
1008 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1009 		(u_int32_t *)config,
1010 		sizeof(struct hpt_iop_request_set_config) >> 2);
1011 
1012 	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1013 		KdPrint(("hptiop: set config send cmd failed"));
1014 		return -1;
1015 	}
1016 
1017 	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1018 
1019 	return 0;
1020 }
1021 
hptiop_set_config_mv(struct hpt_iop_hba * hba,struct hpt_iop_request_set_config * config)1022 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1023 				struct hpt_iop_request_set_config *config)
1024 {
1025 	struct hpt_iop_request_set_config *req;
1026 
1027 	if (!(req = hba->ctlcfg_ptr))
1028 		return -1;
1029 
1030 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1031 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1032 		sizeof(struct hpt_iop_request_set_config) -
1033 			sizeof(struct hpt_iop_request_header));
1034 
1035 	req->header.flags = 0;
1036 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1037 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1038 	req->header.result = IOP_RESULT_PENDING;
1039 	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1040 
1041 	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1042 		KdPrint(("hptiop: set config send cmd failed"));
1043 		return -1;
1044 	}
1045 
1046 	return 0;
1047 }
1048 
hptiop_set_config_mvfrey(struct hpt_iop_hba * hba,struct hpt_iop_request_set_config * config)1049 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1050 				struct hpt_iop_request_set_config *config)
1051 {
1052 	struct hpt_iop_request_set_config *req;
1053 
1054 	if (!(req = hba->ctlcfg_ptr))
1055 		return -1;
1056 
1057 	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1058 		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1059 		sizeof(struct hpt_iop_request_set_config) -
1060 			sizeof(struct hpt_iop_request_header));
1061 
1062 	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1063 	req->header.size = sizeof(struct hpt_iop_request_set_config);
1064 	req->header.result = IOP_RESULT_PENDING;
1065 
1066 	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1067 		KdPrint(("hptiop: set config send cmd failed"));
1068 		return -1;
1069 	}
1070 
1071 	return 0;
1072 }
1073 
hptiop_post_ioctl_command_itl(struct hpt_iop_hba * hba,u_int32_t req32,struct hpt_iop_ioctl_param * pParams)1074 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1075 				u_int32_t req32,
1076 				struct hpt_iop_ioctl_param *pParams)
1077 {
1078 	u_int64_t temp64;
1079 	struct hpt_iop_request_ioctl_command req;
1080 
1081 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1082 			(hba->max_request_size -
1083 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1084 		device_printf(hba->pcidev, "request size beyond max value");
1085 		return -1;
1086 	}
1087 
1088 	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1089 		+ pParams->nInBufferSize;
1090 	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1091 	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1092 	req.header.result = IOP_RESULT_PENDING;
1093 	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1094 	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1095 	req.inbuf_size = pParams->nInBufferSize;
1096 	req.outbuf_size = pParams->nOutBufferSize;
1097 	req.bytes_returned = 0;
1098 
1099 	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1100 		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1101 
1102 	hptiop_lock_adapter(hba);
1103 
1104 	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1105 	BUS_SPACE_RD4_ITL(outbound_intstatus);
1106 
1107 	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1108 		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1109 		(u_int32_t *)&temp64, 2);
1110 	while (temp64) {
1111 		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1112 				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1113 			break;
1114 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1115 		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1116 			offsetof(struct hpt_iop_request_ioctl_command,
1117 				header.context),
1118 			(u_int32_t *)&temp64, 2);
1119 	}
1120 
1121 	hptiop_unlock_adapter(hba);
1122 	return 0;
1123 }
1124 
hptiop_bus_space_copyin(struct hpt_iop_hba * hba,u_int32_t bus,void * user,int size)1125 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1126 									void *user, int size)
1127 {
1128 	unsigned char byte;
1129 	int i;
1130 
1131 	for (i=0; i<size; i++) {
1132 		if (copyin((u_int8_t *)user + i, &byte, 1))
1133 			return -1;
1134 		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1135 	}
1136 
1137 	return 0;
1138 }
1139 
hptiop_bus_space_copyout(struct hpt_iop_hba * hba,u_int32_t bus,void * user,int size)1140 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1141 									void *user, int size)
1142 {
1143 	unsigned char byte;
1144 	int i;
1145 
1146 	for (i=0; i<size; i++) {
1147 		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1148 		if (copyout(&byte, (u_int8_t *)user + i, 1))
1149 			return -1;
1150 	}
1151 
1152 	return 0;
1153 }
1154 
hptiop_do_ioctl_itl(struct hpt_iop_hba * hba,struct hpt_iop_ioctl_param * pParams)1155 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1156 				struct hpt_iop_ioctl_param * pParams)
1157 {
1158 	u_int32_t req32;
1159 	u_int32_t result;
1160 
1161 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1162 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1163 		return EFAULT;
1164 
1165 	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1166 	if (req32 == IOPMU_QUEUE_EMPTY)
1167 		return EFAULT;
1168 
1169 	if (pParams->nInBufferSize)
1170 		if (hptiop_bus_space_copyin(hba, req32 +
1171 			offsetof(struct hpt_iop_request_ioctl_command, buf),
1172 			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
1173 			goto invalid;
1174 
1175 	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1176 		goto invalid;
1177 
1178 	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1179 			offsetof(struct hpt_iop_request_ioctl_command,
1180 				header.result));
1181 
1182 	if (result == IOP_RESULT_SUCCESS) {
1183 		if (pParams->nOutBufferSize)
1184 			if (hptiop_bus_space_copyout(hba, req32 +
1185 				offsetof(struct hpt_iop_request_ioctl_command, buf) +
1186 					((pParams->nInBufferSize + 3) & ~3),
1187 				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1188 				goto invalid;
1189 
1190 		if (pParams->lpBytesReturned) {
1191 			if (hptiop_bus_space_copyout(hba, req32 +
1192 				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1193 				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1194 				goto invalid;
1195 		}
1196 
1197 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1198 
1199 		return 0;
1200 	} else{
1201 invalid:
1202 		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1203 
1204 		return EFAULT;
1205 	}
1206 }
1207 
hptiop_post_ioctl_command_mv(struct hpt_iop_hba * hba,struct hpt_iop_request_ioctl_command * req,struct hpt_iop_ioctl_param * pParams)1208 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1209 				struct hpt_iop_request_ioctl_command *req,
1210 				struct hpt_iop_ioctl_param *pParams)
1211 {
1212 	u_int64_t req_phy;
1213 	int size = 0;
1214 
1215 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1216 			(hba->max_request_size -
1217 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1218 		device_printf(hba->pcidev, "request size beyond max value");
1219 		return -1;
1220 	}
1221 
1222 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1223 	req->inbuf_size = pParams->nInBufferSize;
1224 	req->outbuf_size = pParams->nOutBufferSize;
1225 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1226 					+ pParams->nInBufferSize;
1227 	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1228 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1229 	req->header.result = IOP_RESULT_PENDING;
1230 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1231 	size = req->header.size >> 8;
1232 	size = imin(3, size);
1233 	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1234 	hptiop_mv_inbound_write(req_phy, hba);
1235 
1236 	BUS_SPACE_RD4_MV0(outbound_intmask);
1237 
1238 	while (hba->config_done == 0) {
1239 		if (hptiop_sleep(hba, req, PPAUSE,
1240 			"hptctl", HPT_OSM_TIMEOUT)==0)
1241 			continue;
1242 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1243 	}
1244 	return 0;
1245 }
1246 
hptiop_do_ioctl_mv(struct hpt_iop_hba * hba,struct hpt_iop_ioctl_param * pParams)1247 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1248 				struct hpt_iop_ioctl_param *pParams)
1249 {
1250 	struct hpt_iop_request_ioctl_command *req;
1251 
1252 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1253 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1254 		return EFAULT;
1255 
1256 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1257 	hba->config_done = 0;
1258 	hptiop_lock_adapter(hba);
1259 	if (pParams->nInBufferSize)
1260 		if (copyin((void *)pParams->lpInBuffer,
1261 				req->buf, pParams->nInBufferSize))
1262 			goto invalid;
1263 	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1264 		goto invalid;
1265 
1266 	if (hba->config_done == 1) {
1267 		if (pParams->nOutBufferSize)
1268 			if (copyout(req->buf +
1269 				((pParams->nInBufferSize + 3) & ~3),
1270 				(void *)pParams->lpOutBuffer,
1271 				pParams->nOutBufferSize))
1272 				goto invalid;
1273 
1274 		if (pParams->lpBytesReturned)
1275 			if (copyout(&req->bytes_returned,
1276 				(void*)pParams->lpBytesReturned,
1277 				sizeof(u_int32_t)))
1278 				goto invalid;
1279 		hptiop_unlock_adapter(hba);
1280 		return 0;
1281 	} else{
1282 invalid:
1283 		hptiop_unlock_adapter(hba);
1284 		return EFAULT;
1285 	}
1286 }
1287 
hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba * hba,struct hpt_iop_request_ioctl_command * req,struct hpt_iop_ioctl_param * pParams)1288 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1289 				struct hpt_iop_request_ioctl_command *req,
1290 				struct hpt_iop_ioctl_param *pParams)
1291 {
1292 	u_int64_t phy_addr;
1293 	u_int32_t index;
1294 
1295 	phy_addr = hba->ctlcfgcmd_phy;
1296 
1297 	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1298 			(hba->max_request_size -
1299 			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1300 		device_printf(hba->pcidev, "request size beyond max value");
1301 		return -1;
1302 	}
1303 
1304 	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1305 	req->inbuf_size = pParams->nInBufferSize;
1306 	req->outbuf_size = pParams->nOutBufferSize;
1307 	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1308 					+ pParams->nInBufferSize;
1309 
1310 	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1311 	req->header.result = IOP_RESULT_PENDING;
1312 
1313 	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1314 						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1315 						| IOP_REQUEST_FLAG_ADDR_BITS
1316 						| ((phy_addr >> 16) & 0xffff0000);
1317 	req->header.context = ((phy_addr & 0xffffffff) << 32 )
1318 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1319 
1320 	hba->u.mvfrey.inlist_wptr++;
1321 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1322 
1323 	if (index == hba->u.mvfrey.list_count) {
1324 		index = 0;
1325 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1326 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1327 	}
1328 
1329 	hba->u.mvfrey.inlist[index].addr = phy_addr;
1330 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1331 
1332 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1333 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1334 
1335 	while (hba->config_done == 0) {
1336 		if (hptiop_sleep(hba, req, PPAUSE,
1337 			"hptctl", HPT_OSM_TIMEOUT)==0)
1338 			continue;
1339 		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1340 	}
1341 	return 0;
1342 }
1343 
hptiop_do_ioctl_mvfrey(struct hpt_iop_hba * hba,struct hpt_iop_ioctl_param * pParams)1344 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1345 				struct hpt_iop_ioctl_param *pParams)
1346 {
1347 	struct hpt_iop_request_ioctl_command *req;
1348 
1349 	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1350 		(pParams->Magic != HPT_IOCTL_MAGIC32))
1351 		return EFAULT;
1352 
1353 	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1354 	hba->config_done = 0;
1355 	hptiop_lock_adapter(hba);
1356 	if (pParams->nInBufferSize)
1357 		if (copyin((void *)pParams->lpInBuffer,
1358 				req->buf, pParams->nInBufferSize))
1359 			goto invalid;
1360 	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1361 		goto invalid;
1362 
1363 	if (hba->config_done == 1) {
1364 		if (pParams->nOutBufferSize)
1365 			if (copyout(req->buf +
1366 				((pParams->nInBufferSize + 3) & ~3),
1367 				(void *)pParams->lpOutBuffer,
1368 				pParams->nOutBufferSize))
1369 				goto invalid;
1370 
1371 		if (pParams->lpBytesReturned)
1372 			if (copyout(&req->bytes_returned,
1373 				(void*)pParams->lpBytesReturned,
1374 				sizeof(u_int32_t)))
1375 				goto invalid;
1376 		hptiop_unlock_adapter(hba);
1377 		return 0;
1378 	} else{
1379 invalid:
1380 		hptiop_unlock_adapter(hba);
1381 		return EFAULT;
1382 	}
1383 }
1384 
hptiop_rescan_bus(struct hpt_iop_hba * hba)1385 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1386 {
1387 	union ccb           *ccb;
1388 
1389 	if ((ccb = xpt_alloc_ccb()) == NULL)
1390 		return(ENOMEM);
1391 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1392 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1393 		xpt_free_ccb(ccb);
1394 		return(EIO);
1395 	}
1396 	xpt_rescan(ccb);
1397 	return(0);
1398 }
1399 
1400 static  bus_dmamap_callback_t   hptiop_map_srb;
1401 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1402 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1403 static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1404 
hptiop_alloc_pci_res_itl(struct hpt_iop_hba * hba)1405 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1406 {
1407 	hba->bar0_rid = 0x10;
1408 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1409 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1410 
1411 	if (hba->bar0_res == NULL) {
1412 		device_printf(hba->pcidev,
1413 			"failed to get iop base adrress.\n");
1414 		return -1;
1415 	}
1416 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1417 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1418 	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1419 				rman_get_virtual(hba->bar0_res);
1420 
1421 	if (!hba->u.itl.mu) {
1422 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1423 					hba->bar0_rid, hba->bar0_res);
1424 		device_printf(hba->pcidev, "alloc mem res failed\n");
1425 		return -1;
1426 	}
1427 
1428 	return 0;
1429 }
1430 
hptiop_alloc_pci_res_mv(struct hpt_iop_hba * hba)1431 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1432 {
1433 	hba->bar0_rid = 0x10;
1434 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1435 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1436 
1437 	if (hba->bar0_res == NULL) {
1438 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1439 		return -1;
1440 	}
1441 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1442 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1443 	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1444 				rman_get_virtual(hba->bar0_res);
1445 
1446 	if (!hba->u.mv.regs) {
1447 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1448 					hba->bar0_rid, hba->bar0_res);
1449 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1450 		return -1;
1451 	}
1452 
1453 	hba->bar2_rid = 0x18;
1454 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1455 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1456 
1457 	if (hba->bar2_res == NULL) {
1458 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1459 					hba->bar0_rid, hba->bar0_res);
1460 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1461 		return -1;
1462 	}
1463 
1464 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1465 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1466 	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1467 
1468 	if (!hba->u.mv.mu) {
1469 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1470 					hba->bar0_rid, hba->bar0_res);
1471 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1472 					hba->bar2_rid, hba->bar2_res);
1473 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1474 		return -1;
1475 	}
1476 
1477 	return 0;
1478 }
1479 
hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba * hba)1480 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1481 {
1482 	hba->bar0_rid = 0x10;
1483 	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1484 			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1485 
1486 	if (hba->bar0_res == NULL) {
1487 		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1488 		return -1;
1489 	}
1490 	hba->bar0t = rman_get_bustag(hba->bar0_res);
1491 	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1492 	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1493 				rman_get_virtual(hba->bar0_res);
1494 
1495 	if (!hba->u.mvfrey.config) {
1496 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1497 					hba->bar0_rid, hba->bar0_res);
1498 		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1499 		return -1;
1500 	}
1501 
1502 	hba->bar2_rid = 0x18;
1503 	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1504 			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1505 
1506 	if (hba->bar2_res == NULL) {
1507 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1508 					hba->bar0_rid, hba->bar0_res);
1509 		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1510 		return -1;
1511 	}
1512 
1513 	hba->bar2t = rman_get_bustag(hba->bar2_res);
1514 	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1515 	hba->u.mvfrey.mu =
1516 					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1517 
1518 	if (!hba->u.mvfrey.mu) {
1519 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1520 					hba->bar0_rid, hba->bar0_res);
1521 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522 					hba->bar2_rid, hba->bar2_res);
1523 		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1524 		return -1;
1525 	}
1526 
1527 	return 0;
1528 }
1529 
hptiop_release_pci_res_itl(struct hpt_iop_hba * hba)1530 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1531 {
1532 	if (hba->bar0_res)
1533 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 			hba->bar0_rid, hba->bar0_res);
1535 }
1536 
hptiop_release_pci_res_mv(struct hpt_iop_hba * hba)1537 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1538 {
1539 	if (hba->bar0_res)
1540 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1541 			hba->bar0_rid, hba->bar0_res);
1542 	if (hba->bar2_res)
1543 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1544 			hba->bar2_rid, hba->bar2_res);
1545 }
1546 
hptiop_release_pci_res_mvfrey(struct hpt_iop_hba * hba)1547 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1548 {
1549 	if (hba->bar0_res)
1550 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1551 			hba->bar0_rid, hba->bar0_res);
1552 	if (hba->bar2_res)
1553 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1554 			hba->bar2_rid, hba->bar2_res);
1555 }
1556 
hptiop_internal_memalloc_mv(struct hpt_iop_hba * hba)1557 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1558 {
1559 	if (bus_dma_tag_create(hba->parent_dmat,
1560 				1,
1561 				0,
1562 				BUS_SPACE_MAXADDR_32BIT,
1563 				BUS_SPACE_MAXADDR,
1564 				NULL, NULL,
1565 				0x800 - 0x8,
1566 				1,
1567 				BUS_SPACE_MAXSIZE_32BIT,
1568 				BUS_DMA_ALLOCNOW,
1569 				NULL,
1570 				NULL,
1571 				&hba->ctlcfg_dmat)) {
1572 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1573 		return -1;
1574 	}
1575 
1576 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1577 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1578 		&hba->ctlcfg_dmamap) != 0) {
1579 			device_printf(hba->pcidev,
1580 					"bus_dmamem_alloc failed!\n");
1581 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1582 			return -1;
1583 	}
1584 
1585 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1586 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1587 			MVIOP_IOCTLCFG_SIZE,
1588 			hptiop_mv_map_ctlcfg, hba, 0)) {
1589 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1590 		if (hba->ctlcfg_dmat) {
1591 			bus_dmamem_free(hba->ctlcfg_dmat,
1592 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1593 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1594 		}
1595 		return -1;
1596 	}
1597 
1598 	return 0;
1599 }
1600 
hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba * hba)1601 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1602 {
1603 	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1604 
1605 	list_count >>= 16;
1606 
1607 	if (list_count == 0) {
1608 		return -1;
1609 	}
1610 
1611 	hba->u.mvfrey.list_count = list_count;
1612 	hba->u.mvfrey.internal_mem_size = 0x800
1613 							+ list_count * sizeof(struct mvfrey_inlist_entry)
1614 							+ list_count * sizeof(struct mvfrey_outlist_entry)
1615 							+ sizeof(int);
1616 	if (bus_dma_tag_create(hba->parent_dmat,
1617 				1,
1618 				0,
1619 				BUS_SPACE_MAXADDR_32BIT,
1620 				BUS_SPACE_MAXADDR,
1621 				NULL, NULL,
1622 				hba->u.mvfrey.internal_mem_size,
1623 				1,
1624 				BUS_SPACE_MAXSIZE_32BIT,
1625 				BUS_DMA_ALLOCNOW,
1626 				NULL,
1627 				NULL,
1628 				&hba->ctlcfg_dmat)) {
1629 		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1630 		return -1;
1631 	}
1632 
1633 	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1634 		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1635 		&hba->ctlcfg_dmamap) != 0) {
1636 			device_printf(hba->pcidev,
1637 					"bus_dmamem_alloc failed!\n");
1638 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1639 			return -1;
1640 	}
1641 
1642 	if (bus_dmamap_load(hba->ctlcfg_dmat,
1643 			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1644 			hba->u.mvfrey.internal_mem_size,
1645 			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1646 		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1647 		if (hba->ctlcfg_dmat) {
1648 			bus_dmamem_free(hba->ctlcfg_dmat,
1649 				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1650 			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1651 		}
1652 		return -1;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
hptiop_internal_memfree_itl(struct hpt_iop_hba * hba)1658 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1659 	return 0;
1660 }
1661 
hptiop_internal_memfree_mv(struct hpt_iop_hba * hba)1662 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1663 {
1664 	if (hba->ctlcfg_dmat) {
1665 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1666 		bus_dmamem_free(hba->ctlcfg_dmat,
1667 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1668 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1669 	}
1670 
1671 	return 0;
1672 }
1673 
hptiop_internal_memfree_mvfrey(struct hpt_iop_hba * hba)1674 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1675 {
1676 	if (hba->ctlcfg_dmat) {
1677 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1678 		bus_dmamem_free(hba->ctlcfg_dmat,
1679 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1680 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1681 	}
1682 
1683 	return 0;
1684 }
1685 
hptiop_reset_comm_mvfrey(struct hpt_iop_hba * hba)1686 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1687 {
1688 	u_int32_t i = 100;
1689 
1690 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1691 		return -1;
1692 
1693 	/* wait 100ms for MCU ready */
1694 	while(i--) {
1695 		DELAY(1000);
1696 	}
1697 
1698 	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1699 							hba->u.mvfrey.inlist_phy & 0xffffffff);
1700 	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1701 							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1702 
1703 	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1704 							hba->u.mvfrey.outlist_phy & 0xffffffff);
1705 	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1706 							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1707 
1708 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1709 							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1710 	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1711 							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1712 
1713 	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1714 								| CL_POINTER_TOGGLE;
1715 	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1716 								| CL_POINTER_TOGGLE;
1717 	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1718 
1719 	return 0;
1720 }
1721 
1722 /*
1723  * CAM driver interface
1724  */
1725 static device_method_t driver_methods[] = {
1726 	/* Device interface */
1727 	DEVMETHOD(device_probe,     hptiop_probe),
1728 	DEVMETHOD(device_attach,    hptiop_attach),
1729 	DEVMETHOD(device_detach,    hptiop_detach),
1730 	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1731 	{ 0, 0 }
1732 };
1733 
1734 static struct hptiop_adapter_ops hptiop_itl_ops = {
1735 	.family	           = INTEL_BASED_IOP,
1736 	.iop_wait_ready    = hptiop_wait_ready_itl,
1737 	.internal_memalloc = 0,
1738 	.internal_memfree  = hptiop_internal_memfree_itl,
1739 	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1740 	.release_pci_res   = hptiop_release_pci_res_itl,
1741 	.enable_intr       = hptiop_enable_intr_itl,
1742 	.disable_intr      = hptiop_disable_intr_itl,
1743 	.get_config        = hptiop_get_config_itl,
1744 	.set_config        = hptiop_set_config_itl,
1745 	.iop_intr          = hptiop_intr_itl,
1746 	.post_msg          = hptiop_post_msg_itl,
1747 	.post_req          = hptiop_post_req_itl,
1748 	.do_ioctl          = hptiop_do_ioctl_itl,
1749 	.reset_comm        = 0,
1750 };
1751 
1752 static struct hptiop_adapter_ops hptiop_mv_ops = {
1753 	.family	           = MV_BASED_IOP,
1754 	.iop_wait_ready    = hptiop_wait_ready_mv,
1755 	.internal_memalloc = hptiop_internal_memalloc_mv,
1756 	.internal_memfree  = hptiop_internal_memfree_mv,
1757 	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1758 	.release_pci_res   = hptiop_release_pci_res_mv,
1759 	.enable_intr       = hptiop_enable_intr_mv,
1760 	.disable_intr      = hptiop_disable_intr_mv,
1761 	.get_config        = hptiop_get_config_mv,
1762 	.set_config        = hptiop_set_config_mv,
1763 	.iop_intr          = hptiop_intr_mv,
1764 	.post_msg          = hptiop_post_msg_mv,
1765 	.post_req          = hptiop_post_req_mv,
1766 	.do_ioctl          = hptiop_do_ioctl_mv,
1767 	.reset_comm        = 0,
1768 };
1769 
1770 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1771 	.family	           = MVFREY_BASED_IOP,
1772 	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1773 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1774 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1775 	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1776 	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1777 	.enable_intr       = hptiop_enable_intr_mvfrey,
1778 	.disable_intr      = hptiop_disable_intr_mvfrey,
1779 	.get_config        = hptiop_get_config_mvfrey,
1780 	.set_config        = hptiop_set_config_mvfrey,
1781 	.iop_intr          = hptiop_intr_mvfrey,
1782 	.post_msg          = hptiop_post_msg_mvfrey,
1783 	.post_req          = hptiop_post_req_mvfrey,
1784 	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1785 	.reset_comm        = hptiop_reset_comm_mvfrey,
1786 };
1787 
1788 static driver_t hptiop_pci_driver = {
1789 	driver_name,
1790 	driver_methods,
1791 	sizeof(struct hpt_iop_hba)
1792 };
1793 
1794 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, 0, 0);
1795 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1796 
hptiop_probe(device_t dev)1797 static int hptiop_probe(device_t dev)
1798 {
1799 	struct hpt_iop_hba *hba;
1800 	u_int32_t id;
1801 	int sas = 0;
1802 	struct hptiop_adapter_ops *ops;
1803 
1804 	if (pci_get_vendor(dev) != 0x1103)
1805 		return (ENXIO);
1806 
1807 	id = pci_get_device(dev);
1808 
1809 	switch (id) {
1810 		case 0x4520:
1811 		case 0x4521:
1812 		case 0x4522:
1813 			sas = 1;
1814 		case 0x3620:
1815 		case 0x3622:
1816 		case 0x3640:
1817 			ops = &hptiop_mvfrey_ops;
1818 			break;
1819 		case 0x4210:
1820 		case 0x4211:
1821 		case 0x4310:
1822 		case 0x4311:
1823 		case 0x4320:
1824 		case 0x4321:
1825  		case 0x4322:
1826 			sas = 1;
1827 		case 0x3220:
1828 		case 0x3320:
1829 		case 0x3410:
1830 		case 0x3520:
1831 		case 0x3510:
1832 		case 0x3511:
1833 		case 0x3521:
1834 		case 0x3522:
1835 		case 0x3530:
1836 		case 0x3540:
1837 		case 0x3560:
1838 			ops = &hptiop_itl_ops;
1839 			break;
1840 		case 0x3020:
1841 		case 0x3120:
1842 		case 0x3122:
1843 			ops = &hptiop_mv_ops;
1844 			break;
1845 		default:
1846 			return (ENXIO);
1847 	}
1848 
1849 	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1850 		pci_get_bus(dev), pci_get_slot(dev),
1851 		pci_get_function(dev), pci_get_irq(dev));
1852 
1853 	device_set_descf(dev, "RocketRAID %x %s Controller",
1854 	    id, sas ? "SAS" : "SATA");
1855 
1856 	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1857 	bzero(hba, sizeof(struct hpt_iop_hba));
1858 	hba->ops = ops;
1859 
1860 	KdPrint(("hba->ops=%p\n", hba->ops));
1861 	return 0;
1862 }
1863 
hptiop_attach(device_t dev)1864 static int hptiop_attach(device_t dev)
1865 {
1866 	struct make_dev_args args;
1867 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1868 	struct hpt_iop_request_get_config  iop_config;
1869 	struct hpt_iop_request_set_config  set_config;
1870 	int rid = 0;
1871 	struct cam_devq *devq;
1872 	struct ccb_setasync ccb;
1873 	u_int32_t unit = device_get_unit(dev);
1874 
1875 	device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1876 			unit, driver_version);
1877 
1878 	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1879 		pci_get_bus(dev), pci_get_slot(dev),
1880 		pci_get_function(dev), hba->ops));
1881 
1882 	pci_enable_busmaster(dev);
1883 	hba->pcidev = dev;
1884 	hba->pciunit = unit;
1885 
1886 	if (hba->ops->alloc_pci_res(hba))
1887 		return ENXIO;
1888 
1889 	if (hba->ops->iop_wait_ready(hba, 2000)) {
1890 		device_printf(dev, "adapter is not ready\n");
1891 		goto release_pci_res;
1892 	}
1893 
1894 	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1895 
1896 	if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1897 			1,  /* alignment */
1898 			0, /* boundary */
1899 			BUS_SPACE_MAXADDR,  /* lowaddr */
1900 			BUS_SPACE_MAXADDR,  /* highaddr */
1901 			NULL, NULL,         /* filter, filterarg */
1902 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1903 			BUS_SPACE_UNRESTRICTED, /* nsegments */
1904 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1905 			0,      /* flags */
1906 			NULL,   /* lockfunc */
1907 			NULL,       /* lockfuncarg */
1908 			&hba->parent_dmat   /* tag */))
1909 	{
1910 		device_printf(dev, "alloc parent_dmat failed\n");
1911 		goto release_pci_res;
1912 	}
1913 
1914 	if (hba->ops->family == MV_BASED_IOP) {
1915 		if (hba->ops->internal_memalloc(hba)) {
1916 			device_printf(dev, "alloc srb_dmat failed\n");
1917 			goto destroy_parent_tag;
1918 		}
1919 	}
1920 
1921 	if (hba->ops->get_config(hba, &iop_config)) {
1922 		device_printf(dev, "get iop config failed.\n");
1923 		goto get_config_failed;
1924 	}
1925 
1926 	hba->firmware_version = iop_config.firmware_version;
1927 	hba->interface_version = iop_config.interface_version;
1928 	hba->max_requests = iop_config.max_requests;
1929 	hba->max_devices = iop_config.max_devices;
1930 	hba->max_request_size = iop_config.request_size;
1931 	hba->max_sg_count = iop_config.max_sg_count;
1932 
1933 	if (hba->ops->family == MVFREY_BASED_IOP) {
1934 		if (hba->ops->internal_memalloc(hba)) {
1935 			device_printf(dev, "alloc srb_dmat failed\n");
1936 			goto destroy_parent_tag;
1937 		}
1938 		if (hba->ops->reset_comm(hba)) {
1939 			device_printf(dev, "reset comm failed\n");
1940 			goto get_config_failed;
1941 		}
1942 	}
1943 
1944 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1945 			4,  /* alignment */
1946 			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1947 			BUS_SPACE_MAXADDR,  /* lowaddr */
1948 			BUS_SPACE_MAXADDR,  /* highaddr */
1949 			NULL, NULL,         /* filter, filterarg */
1950 			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1951 			hba->max_sg_count,  /* nsegments */
1952 			0x20000,    /* maxsegsize */
1953 			BUS_DMA_ALLOCNOW,       /* flags */
1954 			busdma_lock_mutex,  /* lockfunc */
1955 			&hba->lock,     /* lockfuncarg */
1956 			&hba->io_dmat   /* tag */))
1957 	{
1958 		device_printf(dev, "alloc io_dmat failed\n");
1959 		goto get_config_failed;
1960 	}
1961 
1962 	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1963 			1,  /* alignment */
1964 			0, /* boundary */
1965 			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1966 			BUS_SPACE_MAXADDR,  /* highaddr */
1967 			NULL, NULL,         /* filter, filterarg */
1968 			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1969 			1,  /* nsegments */
1970 			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1971 			0,      /* flags */
1972 			NULL,   /* lockfunc */
1973 			NULL,       /* lockfuncarg */
1974 			&hba->srb_dmat  /* tag */))
1975 	{
1976 		device_printf(dev, "alloc srb_dmat failed\n");
1977 		goto destroy_io_dmat;
1978 	}
1979 
1980 	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1981 			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1982 			&hba->srb_dmamap) != 0)
1983 	{
1984 		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1985 		goto destroy_srb_dmat;
1986 	}
1987 
1988 	if (bus_dmamap_load(hba->srb_dmat,
1989 			hba->srb_dmamap, hba->uncached_ptr,
1990 			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1991 			hptiop_map_srb, hba, 0))
1992 	{
1993 		device_printf(dev, "bus_dmamap_load failed!\n");
1994 		goto srb_dmamem_free;
1995 	}
1996 
1997 	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1998 		device_printf(dev, "cam_simq_alloc failed\n");
1999 		goto srb_dmamap_unload;
2000 	}
2001 
2002 	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2003 			hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2004 	if (!hba->sim) {
2005 		device_printf(dev, "cam_sim_alloc failed\n");
2006 		cam_simq_free(devq);
2007 		goto srb_dmamap_unload;
2008 	}
2009 	hptiop_lock_adapter(hba);
2010 	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2011 	{
2012 		device_printf(dev, "xpt_bus_register failed\n");
2013 		goto free_cam_sim;
2014 	}
2015 
2016 	if (xpt_create_path(&hba->path, /*periph */ NULL,
2017 			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2018 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2019 		device_printf(dev, "xpt_create_path failed\n");
2020 		goto deregister_xpt_bus;
2021 	}
2022 	hptiop_unlock_adapter(hba);
2023 
2024 	bzero(&set_config, sizeof(set_config));
2025 	set_config.iop_id = unit;
2026 	set_config.vbus_id = cam_sim_path(hba->sim);
2027 	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2028 
2029 	if (hba->ops->set_config(hba, &set_config)) {
2030 		device_printf(dev, "set iop config failed.\n");
2031 		goto free_hba_path;
2032 	}
2033 
2034 	memset(&ccb, 0, sizeof(ccb));
2035 	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2036 	ccb.ccb_h.func_code = XPT_SASYNC_CB;
2037 	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2038 	ccb.callback = hptiop_async;
2039 	ccb.callback_arg = hba->sim;
2040 	xpt_action((union ccb *)&ccb);
2041 
2042 	rid = 0;
2043 	if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2044 			&rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2045 		device_printf(dev, "allocate irq failed!\n");
2046 		goto free_hba_path;
2047 	}
2048 
2049 	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2050 				NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2051 	{
2052 		device_printf(dev, "allocate intr function failed!\n");
2053 		goto free_irq_resource;
2054 	}
2055 
2056 	if (hptiop_send_sync_msg(hba,
2057 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2058 		device_printf(dev, "fail to start background task\n");
2059 		goto teartown_irq_resource;
2060 	}
2061 
2062 	hba->ops->enable_intr(hba);
2063 	hba->initialized = 1;
2064 
2065 	make_dev_args_init(&args);
2066 	args.mda_devsw = &hptiop_cdevsw;
2067 	args.mda_uid = UID_ROOT;
2068 	args.mda_gid = GID_WHEEL /*GID_OPERATOR*/;
2069 	args.mda_mode = S_IRUSR | S_IWUSR;
2070 	args.mda_si_drv1 = hba;
2071 
2072 	make_dev_s(&args, &hba->ioctl_dev, "%s%d", driver_name, unit);
2073 
2074 	return 0;
2075 
2076 
2077 teartown_irq_resource:
2078 	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2079 
2080 free_irq_resource:
2081 	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2082 
2083 	hptiop_lock_adapter(hba);
2084 free_hba_path:
2085 	xpt_free_path(hba->path);
2086 
2087 deregister_xpt_bus:
2088 	xpt_bus_deregister(cam_sim_path(hba->sim));
2089 
2090 free_cam_sim:
2091 	cam_sim_free(hba->sim, /*free devq*/ TRUE);
2092 	hptiop_unlock_adapter(hba);
2093 
2094 srb_dmamap_unload:
2095 	if (hba->uncached_ptr)
2096 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2097 
2098 srb_dmamem_free:
2099 	if (hba->uncached_ptr)
2100 		bus_dmamem_free(hba->srb_dmat,
2101 			hba->uncached_ptr, hba->srb_dmamap);
2102 
2103 destroy_srb_dmat:
2104 	if (hba->srb_dmat)
2105 		bus_dma_tag_destroy(hba->srb_dmat);
2106 
2107 destroy_io_dmat:
2108 	if (hba->io_dmat)
2109 		bus_dma_tag_destroy(hba->io_dmat);
2110 
2111 get_config_failed:
2112 	hba->ops->internal_memfree(hba);
2113 
2114 destroy_parent_tag:
2115 	if (hba->parent_dmat)
2116 		bus_dma_tag_destroy(hba->parent_dmat);
2117 
2118 release_pci_res:
2119 	if (hba->ops->release_pci_res)
2120 		hba->ops->release_pci_res(hba);
2121 
2122 	return ENXIO;
2123 }
2124 
hptiop_detach(device_t dev)2125 static int hptiop_detach(device_t dev)
2126 {
2127 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2128 	int i;
2129 	int error = EBUSY;
2130 
2131 	hptiop_lock_adapter(hba);
2132 	for (i = 0; i < hba->max_devices; i++)
2133 		if (hptiop_os_query_remove_device(hba, i)) {
2134 			device_printf(dev, "%d file system is busy. id=%d",
2135 						hba->pciunit, i);
2136 			goto out;
2137 		}
2138 
2139 	if ((error = hptiop_shutdown(dev)) != 0)
2140 		goto out;
2141 	if (hptiop_send_sync_msg(hba,
2142 		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2143 		goto out;
2144 	hptiop_unlock_adapter(hba);
2145 
2146 	hptiop_release_resource(hba);
2147 	return (0);
2148 out:
2149 	hptiop_unlock_adapter(hba);
2150 	return error;
2151 }
2152 
hptiop_shutdown(device_t dev)2153 static int hptiop_shutdown(device_t dev)
2154 {
2155 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2156 
2157 	int error = 0;
2158 
2159 	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2160 		device_printf(dev, "%d device is busy", hba->pciunit);
2161 		return EBUSY;
2162 	}
2163 
2164 	hba->ops->disable_intr(hba);
2165 
2166 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2167 		error = EBUSY;
2168 
2169 	return error;
2170 }
2171 
hptiop_pci_intr(void * arg)2172 static void hptiop_pci_intr(void *arg)
2173 {
2174 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2175 	hptiop_lock_adapter(hba);
2176 	hba->ops->iop_intr(hba);
2177 	hptiop_unlock_adapter(hba);
2178 }
2179 
hptiop_poll(struct cam_sim * sim)2180 static void hptiop_poll(struct cam_sim *sim)
2181 {
2182 	struct hpt_iop_hba *hba;
2183 
2184 	hba = cam_sim_softc(sim);
2185 	hba->ops->iop_intr(hba);
2186 }
2187 
hptiop_async(void * callback_arg,u_int32_t code,struct cam_path * path,void * arg)2188 static void hptiop_async(void * callback_arg, u_int32_t code,
2189 					struct cam_path * path, void * arg)
2190 {
2191 }
2192 
hptiop_enable_intr_itl(struct hpt_iop_hba * hba)2193 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2194 {
2195 	BUS_SPACE_WRT4_ITL(outbound_intmask,
2196 		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2197 }
2198 
hptiop_enable_intr_mv(struct hpt_iop_hba * hba)2199 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2200 {
2201 	u_int32_t int_mask;
2202 
2203 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2204 
2205 	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2206 			| MVIOP_MU_OUTBOUND_INT_MSG;
2207     	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2208 }
2209 
hptiop_enable_intr_mvfrey(struct hpt_iop_hba * hba)2210 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2211 {
2212 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2213 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2214 
2215 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2216 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2217 
2218 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2219 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2220 }
2221 
hptiop_disable_intr_itl(struct hpt_iop_hba * hba)2222 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2223 {
2224 	u_int32_t int_mask;
2225 
2226 	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2227 
2228 	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2229 	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2230 	BUS_SPACE_RD4_ITL(outbound_intstatus);
2231 }
2232 
hptiop_disable_intr_mv(struct hpt_iop_hba * hba)2233 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2234 {
2235 	u_int32_t int_mask;
2236 	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2237 
2238 	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2239 			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2240 	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2241 	BUS_SPACE_RD4_MV0(outbound_intmask);
2242 }
2243 
hptiop_disable_intr_mvfrey(struct hpt_iop_hba * hba)2244 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2245 {
2246 	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2247 	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2248 
2249 	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2250 	BUS_SPACE_RD4_MVFREY2(isr_enable);
2251 
2252 	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2253 	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2254 }
2255 
hptiop_reset_adapter(void * argv)2256 static void hptiop_reset_adapter(void *argv)
2257 {
2258 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2259 	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2260 		return;
2261 	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2262 }
2263 
hptiop_get_srb(struct hpt_iop_hba * hba)2264 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2265 {
2266 	struct hpt_iop_srb * srb;
2267 
2268 	if (hba->srb_list) {
2269 		srb = hba->srb_list;
2270 		hba->srb_list = srb->next;
2271 		return srb;
2272 	}
2273 
2274 	return NULL;
2275 }
2276 
hptiop_free_srb(struct hpt_iop_hba * hba,struct hpt_iop_srb * srb)2277 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2278 {
2279 	srb->next = hba->srb_list;
2280 	hba->srb_list = srb;
2281 }
2282 
hptiop_action(struct cam_sim * sim,union ccb * ccb)2283 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2284 {
2285 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2286 	struct hpt_iop_srb * srb;
2287 	int error;
2288 
2289 	switch (ccb->ccb_h.func_code) {
2290 
2291 	case XPT_SCSI_IO:
2292 		if (ccb->ccb_h.target_lun != 0 ||
2293 			ccb->ccb_h.target_id >= hba->max_devices ||
2294 			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2295 		{
2296 			ccb->ccb_h.status = CAM_TID_INVALID;
2297 			xpt_done(ccb);
2298 			return;
2299 		}
2300 
2301 		if ((srb = hptiop_get_srb(hba)) == NULL) {
2302 			device_printf(hba->pcidev, "srb allocated failed");
2303 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2304 			xpt_done(ccb);
2305 			return;
2306 		}
2307 
2308 		srb->ccb = ccb;
2309 		error = bus_dmamap_load_ccb(hba->io_dmat,
2310 					    srb->dma_map,
2311 					    ccb,
2312 					    hptiop_post_scsi_command,
2313 					    srb,
2314 					    0);
2315 
2316 		if (error && error != EINPROGRESS) {
2317 			device_printf(hba->pcidev,
2318 				"%d bus_dmamap_load error %d",
2319 				hba->pciunit, error);
2320 			xpt_freeze_simq(hba->sim, 1);
2321 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2322 			hptiop_free_srb(hba, srb);
2323 			xpt_done(ccb);
2324 			return;
2325 		}
2326 
2327 		return;
2328 
2329 	case XPT_RESET_BUS:
2330 		device_printf(hba->pcidev, "reset adapter");
2331 		hba->msg_done = 0;
2332 		hptiop_reset_adapter(hba);
2333 		break;
2334 
2335 	case XPT_GET_TRAN_SETTINGS:
2336 	case XPT_SET_TRAN_SETTINGS:
2337 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2338 		break;
2339 
2340 	case XPT_CALC_GEOMETRY:
2341 		cam_calc_geometry(&ccb->ccg, 1);
2342 		break;
2343 
2344 	case XPT_PATH_INQ:
2345 	{
2346 		struct ccb_pathinq *cpi = &ccb->cpi;
2347 
2348 		cpi->version_num = 1;
2349 		cpi->hba_inquiry = PI_SDTR_ABLE;
2350 		cpi->target_sprt = 0;
2351 		cpi->hba_misc = PIM_NOBUSRESET;
2352 		cpi->hba_eng_cnt = 0;
2353 		cpi->max_target = hba->max_devices;
2354 		cpi->max_lun = 0;
2355 		cpi->unit_number = cam_sim_unit(sim);
2356 		cpi->bus_id = cam_sim_bus(sim);
2357 		cpi->initiator_id = hba->max_devices;
2358 		cpi->base_transfer_speed = 3300;
2359 
2360 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2361 		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2362 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2363 		cpi->transport = XPORT_SPI;
2364 		cpi->transport_version = 2;
2365 		cpi->protocol = PROTO_SCSI;
2366 		cpi->protocol_version = SCSI_REV_2;
2367 		cpi->ccb_h.status = CAM_REQ_CMP;
2368 		break;
2369 	}
2370 
2371 	default:
2372 		ccb->ccb_h.status = CAM_REQ_INVALID;
2373 		break;
2374 	}
2375 
2376 	xpt_done(ccb);
2377 	return;
2378 }
2379 
hptiop_post_req_itl(struct hpt_iop_hba * hba,struct hpt_iop_srb * srb,bus_dma_segment_t * segs,int nsegs)2380 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2381 				struct hpt_iop_srb *srb,
2382 				bus_dma_segment_t *segs, int nsegs)
2383 {
2384 	int idx;
2385 	union ccb *ccb = srb->ccb;
2386 	u_int8_t *cdb;
2387 
2388 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2389 		cdb = ccb->csio.cdb_io.cdb_ptr;
2390 	else
2391 		cdb = ccb->csio.cdb_io.cdb_bytes;
2392 
2393 	KdPrint(("ccb=%p %x-%x-%x\n",
2394 		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2395 
2396 	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2397 		u_int32_t iop_req32;
2398 		struct hpt_iop_request_scsi_command req;
2399 
2400 		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2401 
2402 		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2403 			device_printf(hba->pcidev, "invalid req offset\n");
2404 			ccb->ccb_h.status = CAM_BUSY;
2405 			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2406 			hptiop_free_srb(hba, srb);
2407 			xpt_done(ccb);
2408 			return;
2409 		}
2410 
2411 		if (ccb->csio.dxfer_len && nsegs > 0) {
2412 			struct hpt_iopsg *psg = req.sg_list;
2413 			for (idx = 0; idx < nsegs; idx++, psg++) {
2414 				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2415 				psg->size = segs[idx].ds_len;
2416 				psg->eot = 0;
2417 			}
2418 			psg[-1].eot = 1;
2419 		}
2420 
2421 		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2422 
2423 		req.header.size =
2424 				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2425 				+ nsegs*sizeof(struct hpt_iopsg);
2426 		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2427 		req.header.flags = 0;
2428 		req.header.result = IOP_RESULT_PENDING;
2429 		req.header.context = (u_int64_t)(unsigned long)srb;
2430 		req.dataxfer_length = ccb->csio.dxfer_len;
2431 		req.channel =  0;
2432 		req.target =  ccb->ccb_h.target_id;
2433 		req.lun =  ccb->ccb_h.target_lun;
2434 
2435 		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2436 			(u_int8_t *)&req, req.header.size);
2437 
2438 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2439 			bus_dmamap_sync(hba->io_dmat,
2440 				srb->dma_map, BUS_DMASYNC_PREREAD);
2441 		}
2442 		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2443 			bus_dmamap_sync(hba->io_dmat,
2444 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2445 
2446 		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2447 	} else {
2448 		struct hpt_iop_request_scsi_command *req;
2449 
2450 		req = (struct hpt_iop_request_scsi_command *)srb;
2451 		if (ccb->csio.dxfer_len && nsegs > 0) {
2452 			struct hpt_iopsg *psg = req->sg_list;
2453 			for (idx = 0; idx < nsegs; idx++, psg++) {
2454 				psg->pci_address =
2455 					(u_int64_t)segs[idx].ds_addr;
2456 				psg->size = segs[idx].ds_len;
2457 				psg->eot = 0;
2458 			}
2459 			psg[-1].eot = 1;
2460 		}
2461 
2462 		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2463 
2464 		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2465 		req->header.result = IOP_RESULT_PENDING;
2466 		req->dataxfer_length = ccb->csio.dxfer_len;
2467 		req->channel =  0;
2468 		req->target =  ccb->ccb_h.target_id;
2469 		req->lun =  ccb->ccb_h.target_lun;
2470 		req->header.size =
2471 			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2472 			+ nsegs*sizeof(struct hpt_iopsg);
2473 		req->header.context = (u_int64_t)srb->index |
2474 						IOPMU_QUEUE_ADDR_HOST_BIT;
2475 		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2476 
2477 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2478 			bus_dmamap_sync(hba->io_dmat,
2479 				srb->dma_map, BUS_DMASYNC_PREREAD);
2480 		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2481 			bus_dmamap_sync(hba->io_dmat,
2482 				srb->dma_map, BUS_DMASYNC_PREWRITE);
2483 		}
2484 
2485 		if (hba->firmware_version > 0x01020000
2486 			|| hba->interface_version > 0x01020000) {
2487 			u_int32_t size_bits;
2488 
2489 			if (req->header.size < 256)
2490 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2491 			else if (req->header.size < 512)
2492 				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2493 			else
2494 				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2495 						| IOPMU_QUEUE_ADDR_HOST_BIT;
2496 
2497 			BUS_SPACE_WRT4_ITL(inbound_queue,
2498 				(u_int32_t)srb->phy_addr | size_bits);
2499 		} else
2500 			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2501 				|IOPMU_QUEUE_ADDR_HOST_BIT);
2502 	}
2503 }
2504 
hptiop_post_req_mv(struct hpt_iop_hba * hba,struct hpt_iop_srb * srb,bus_dma_segment_t * segs,int nsegs)2505 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2506 				struct hpt_iop_srb *srb,
2507 				bus_dma_segment_t *segs, int nsegs)
2508 {
2509 	int idx, size;
2510 	union ccb *ccb = srb->ccb;
2511 	u_int8_t *cdb;
2512 	struct hpt_iop_request_scsi_command *req;
2513 	u_int64_t req_phy;
2514 
2515     	req = (struct hpt_iop_request_scsi_command *)srb;
2516 	req_phy = srb->phy_addr;
2517 
2518 	if (ccb->csio.dxfer_len && nsegs > 0) {
2519 		struct hpt_iopsg *psg = req->sg_list;
2520 		for (idx = 0; idx < nsegs; idx++, psg++) {
2521 			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2522 			psg->size = segs[idx].ds_len;
2523 			psg->eot = 0;
2524 		}
2525 		psg[-1].eot = 1;
2526 	}
2527 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2528 		cdb = ccb->csio.cdb_io.cdb_ptr;
2529 	else
2530 		cdb = ccb->csio.cdb_io.cdb_bytes;
2531 
2532 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2533 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2534 	req->header.result = IOP_RESULT_PENDING;
2535 	req->dataxfer_length = ccb->csio.dxfer_len;
2536 	req->channel = 0;
2537 	req->target =  ccb->ccb_h.target_id;
2538 	req->lun =  ccb->ccb_h.target_lun;
2539 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2540 				- sizeof(struct hpt_iopsg)
2541 				+ nsegs * sizeof(struct hpt_iopsg);
2542 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2543 		bus_dmamap_sync(hba->io_dmat,
2544 			srb->dma_map, BUS_DMASYNC_PREREAD);
2545 	}
2546 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2547 		bus_dmamap_sync(hba->io_dmat,
2548 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2549 	req->header.context = (u_int64_t)srb->index
2550 					<< MVIOP_REQUEST_NUMBER_START_BIT
2551 					| MVIOP_CMD_TYPE_SCSI;
2552 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2553 	size = req->header.size >> 8;
2554 	hptiop_mv_inbound_write(req_phy
2555 			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2556 			| imin(3, size), hba);
2557 }
2558 
hptiop_post_req_mvfrey(struct hpt_iop_hba * hba,struct hpt_iop_srb * srb,bus_dma_segment_t * segs,int nsegs)2559 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2560 				struct hpt_iop_srb *srb,
2561 				bus_dma_segment_t *segs, int nsegs)
2562 {
2563 	int idx, index;
2564 	union ccb *ccb = srb->ccb;
2565 	u_int8_t *cdb;
2566 	struct hpt_iop_request_scsi_command *req;
2567 	u_int64_t req_phy;
2568 
2569 	req = (struct hpt_iop_request_scsi_command *)srb;
2570 	req_phy = srb->phy_addr;
2571 
2572 	if (ccb->csio.dxfer_len && nsegs > 0) {
2573 		struct hpt_iopsg *psg = req->sg_list;
2574 		for (idx = 0; idx < nsegs; idx++, psg++) {
2575 			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2576 			psg->size = segs[idx].ds_len;
2577 			psg->eot = 0;
2578 		}
2579 		psg[-1].eot = 1;
2580 	}
2581 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2582 		cdb = ccb->csio.cdb_io.cdb_ptr;
2583 	else
2584 		cdb = ccb->csio.cdb_io.cdb_bytes;
2585 
2586 	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2587 	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2588 	req->header.result = IOP_RESULT_PENDING;
2589 	req->dataxfer_length = ccb->csio.dxfer_len;
2590 	req->channel = 0;
2591 	req->target = ccb->ccb_h.target_id;
2592 	req->lun = ccb->ccb_h.target_lun;
2593 	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2594 				- sizeof(struct hpt_iopsg)
2595 				+ nsegs * sizeof(struct hpt_iopsg);
2596 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2597 		bus_dmamap_sync(hba->io_dmat,
2598 			srb->dma_map, BUS_DMASYNC_PREREAD);
2599 	}
2600 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2601 		bus_dmamap_sync(hba->io_dmat,
2602 			srb->dma_map, BUS_DMASYNC_PREWRITE);
2603 
2604 	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2605 						| IOP_REQUEST_FLAG_ADDR_BITS
2606 						| ((req_phy >> 16) & 0xffff0000);
2607 	req->header.context = ((req_phy & 0xffffffff) << 32 )
2608 						| srb->index << 4
2609 						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2610 
2611 	hba->u.mvfrey.inlist_wptr++;
2612 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2613 
2614 	if (index == hba->u.mvfrey.list_count) {
2615 		index = 0;
2616 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2617 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2618 	}
2619 
2620 	hba->u.mvfrey.inlist[index].addr = req_phy;
2621 	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2622 
2623 	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2624 	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2625 
2626 	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2627 		callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2628 	}
2629 }
2630 
hptiop_post_scsi_command(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2631 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2632 					int nsegs, int error)
2633 {
2634 	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2635 	union ccb *ccb = srb->ccb;
2636 	struct hpt_iop_hba *hba = srb->hba;
2637 
2638 	if (error || nsegs > hba->max_sg_count) {
2639 		KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2640 			ccb->ccb_h.func_code,
2641 			ccb->ccb_h.target_id,
2642 			(uintmax_t)ccb->ccb_h.target_lun, nsegs));
2643 		ccb->ccb_h.status = CAM_BUSY;
2644 		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2645 		hptiop_free_srb(hba, srb);
2646 		xpt_done(ccb);
2647 		return;
2648 	}
2649 
2650 	hba->ops->post_req(hba, srb, segs, nsegs);
2651 }
2652 
hptiop_mv_map_ctlcfg(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2653 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2654 				int nsegs, int error)
2655 {
2656 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2657 	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2658 				& ~(u_int64_t)0x1F;
2659 	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2660 				& ~0x1F);
2661 }
2662 
hptiop_mvfrey_map_ctlcfg(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2663 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2664 				int nsegs, int error)
2665 {
2666 	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2667 	char *p;
2668 	u_int64_t phy;
2669 	u_int32_t list_count = hba->u.mvfrey.list_count;
2670 
2671 	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2672 				& ~(u_int64_t)0x1F;
2673 	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2674 				& ~0x1F);
2675 
2676 	hba->ctlcfgcmd_phy = phy;
2677 	hba->ctlcfg_ptr = p;
2678 
2679 	p += 0x800;
2680 	phy += 0x800;
2681 
2682 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2683 	hba->u.mvfrey.inlist_phy = phy;
2684 
2685 	p += list_count * sizeof(struct mvfrey_inlist_entry);
2686 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2687 
2688 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2689 	hba->u.mvfrey.outlist_phy = phy;
2690 
2691 	p += list_count * sizeof(struct mvfrey_outlist_entry);
2692 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2693 
2694 	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2695 	hba->u.mvfrey.outlist_cptr_phy = phy;
2696 }
2697 
hptiop_map_srb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2698 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2699 				int nsegs, int error)
2700 {
2701 	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2702 	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2703 	struct hpt_iop_srb *srb, *tmp_srb;
2704 	int i;
2705 
2706 	if (error || nsegs == 0) {
2707 		device_printf(hba->pcidev, "hptiop_map_srb error");
2708 		return;
2709 	}
2710 
2711 	/* map srb */
2712 	srb = (struct hpt_iop_srb *)
2713 		(((unsigned long)hba->uncached_ptr + 0x1F)
2714 		& ~(unsigned long)0x1F);
2715 
2716 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2717 		tmp_srb = (struct hpt_iop_srb *)
2718 					((char *)srb + i * HPT_SRB_MAX_SIZE);
2719 		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2720 			if (bus_dmamap_create(hba->io_dmat,
2721 						0, &tmp_srb->dma_map)) {
2722 				device_printf(hba->pcidev, "dmamap create failed");
2723 				return;
2724 			}
2725 
2726 			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2727 			tmp_srb->hba = hba;
2728 			tmp_srb->index = i;
2729 			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2730 				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2731 							(phy_addr >> 5);
2732 				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2733 					tmp_srb->srb_flag =
2734 						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2735 			} else {
2736 				tmp_srb->phy_addr = phy_addr;
2737 			}
2738 
2739 			callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2740 			hptiop_free_srb(hba, tmp_srb);
2741 			hba->srb[i] = tmp_srb;
2742 			phy_addr += HPT_SRB_MAX_SIZE;
2743 		}
2744 		else {
2745 			device_printf(hba->pcidev, "invalid alignment");
2746 			return;
2747 		}
2748 	}
2749 }
2750 
hptiop_os_message_callback(struct hpt_iop_hba * hba,u_int32_t msg)2751 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2752 {
2753 	hba->msg_done = 1;
2754 }
2755 
hptiop_os_query_remove_device(struct hpt_iop_hba * hba,int target_id)2756 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2757 						int target_id)
2758 {
2759 	struct cam_periph       *periph = NULL;
2760 	struct cam_path         *path;
2761 	int                     status, retval = 0;
2762 
2763 	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2764 
2765 	if (status == CAM_REQ_CMP) {
2766 		if ((periph = cam_periph_find(path, "da")) != NULL) {
2767 			if (periph->refcount >= 1) {
2768 				device_printf(hba->pcidev, "%d ,"
2769 					"target_id=0x%x,"
2770 					"refcount=%d",
2771 				    hba->pciunit, target_id, periph->refcount);
2772 				retval = -1;
2773 			}
2774 		}
2775 		xpt_free_path(path);
2776 	}
2777 	return retval;
2778 }
2779 
hptiop_release_resource(struct hpt_iop_hba * hba)2780 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2781 {
2782 	int i;
2783 
2784 	if (hba->ioctl_dev)
2785 		destroy_dev(hba->ioctl_dev);
2786 
2787 	if (hba->path) {
2788 		struct ccb_setasync ccb;
2789 
2790 		memset(&ccb, 0, sizeof(ccb));
2791 		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2792 		ccb.ccb_h.func_code = XPT_SASYNC_CB;
2793 		ccb.event_enable = 0;
2794 		ccb.callback = hptiop_async;
2795 		ccb.callback_arg = hba->sim;
2796 		xpt_action((union ccb *)&ccb);
2797 		xpt_free_path(hba->path);
2798 	}
2799 
2800 	if (hba->irq_handle)
2801 		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2802 
2803 	if (hba->sim) {
2804 		hptiop_lock_adapter(hba);
2805 		xpt_bus_deregister(cam_sim_path(hba->sim));
2806 		cam_sim_free(hba->sim, TRUE);
2807 		hptiop_unlock_adapter(hba);
2808 	}
2809 
2810 	if (hba->ctlcfg_dmat) {
2811 		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2812 		bus_dmamem_free(hba->ctlcfg_dmat,
2813 					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2814 		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2815 	}
2816 
2817 	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2818 		struct hpt_iop_srb *srb = hba->srb[i];
2819 		if (srb->dma_map)
2820 			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2821 		callout_drain(&srb->timeout);
2822 	}
2823 
2824 	if (hba->srb_dmat) {
2825 		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2826 		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2827 		bus_dma_tag_destroy(hba->srb_dmat);
2828 	}
2829 
2830 	if (hba->io_dmat)
2831 		bus_dma_tag_destroy(hba->io_dmat);
2832 
2833 	if (hba->parent_dmat)
2834 		bus_dma_tag_destroy(hba->parent_dmat);
2835 
2836 	if (hba->irq_res)
2837 		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2838 					0, hba->irq_res);
2839 
2840 	if (hba->bar0_res)
2841 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2842 					hba->bar0_rid, hba->bar0_res);
2843 	if (hba->bar2_res)
2844 		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2845 					hba->bar2_rid, hba->bar2_res);
2846 	mtx_destroy(&hba->lock);
2847 }
2848