xref: /linux/drivers/scsi/hptiop.c (revision 0889d07f3e4b171c453b2aaf2b257f9074cdf624)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HighPoint RR3xxx/4xxx controller driver for Linux
4  * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
5  *
6  * Please report bugs/comments/suggestions to linux@highpoint-tech.com
7  *
8  * For more information, visit http://www.highpoint-tech.com
9  */
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/timer.h>
19 #include <linux/spinlock.h>
20 #include <linux/gfp.h>
21 #include <linux/uaccess.h>
22 #include <asm/io.h>
23 #include <asm/div64.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_tcq.h>
28 #include <scsi/scsi_host.h>
29 
30 #include "hptiop.h"
31 
32 MODULE_AUTHOR("HighPoint Technologies, Inc.");
33 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
34 
35 static char driver_name[] = "hptiop";
36 static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
37 static const char driver_ver[] = "v1.10.0";
38 
39 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
40 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
41 				struct hpt_iop_request_scsi_command *req);
42 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
43 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
44 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
45 
46 static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
47 {
48 	u32 req = 0;
49 	int i;
50 
51 	for (i = 0; i < millisec; i++) {
52 		req = readl(&hba->u.itl.iop->inbound_queue);
53 		if (req != IOPMU_QUEUE_EMPTY)
54 			break;
55 		msleep(1);
56 	}
57 
58 	if (req != IOPMU_QUEUE_EMPTY) {
59 		writel(req, &hba->u.itl.iop->outbound_queue);
60 		readl(&hba->u.itl.iop->outbound_intstatus);
61 		return 0;
62 	}
63 
64 	return -1;
65 }
66 
67 static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
68 {
69 	return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
70 }
71 
72 static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
73 {
74 	return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
75 }
76 
77 static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
78 {
79 	if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
80 		hptiop_host_request_callback_itl(hba,
81 				tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
82 	else
83 		hptiop_iop_request_callback_itl(hba, tag);
84 }
85 
86 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
87 {
88 	u32 req;
89 
90 	while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
91 						IOPMU_QUEUE_EMPTY) {
92 
93 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
94 			hptiop_request_callback_itl(hba, req);
95 		else {
96 			struct hpt_iop_request_header __iomem * p;
97 
98 			p = (struct hpt_iop_request_header __iomem *)
99 				((char __iomem *)hba->u.itl.iop + req);
100 
101 			if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
102 				if (readl(&p->context))
103 					hptiop_request_callback_itl(hba, req);
104 				else
105 					writel(1, &p->context);
106 			}
107 			else
108 				hptiop_request_callback_itl(hba, req);
109 		}
110 	}
111 }
112 
113 static int iop_intr_itl(struct hptiop_hba *hba)
114 {
115 	struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
116 	void __iomem *plx = hba->u.itl.plx;
117 	u32 status;
118 	int ret = 0;
119 
120 	if (plx && readl(plx + 0x11C5C) & 0xf)
121 		writel(1, plx + 0x11C60);
122 
123 	status = readl(&iop->outbound_intstatus);
124 
125 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
126 		u32 msg = readl(&iop->outbound_msgaddr0);
127 
128 		dprintk("received outbound msg %x\n", msg);
129 		writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
130 		hptiop_message_callback(hba, msg);
131 		ret = 1;
132 	}
133 
134 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
135 		hptiop_drain_outbound_queue_itl(hba);
136 		ret = 1;
137 	}
138 
139 	return ret;
140 }
141 
142 static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
143 {
144 	u32 outbound_tail = readl(&mu->outbound_tail);
145 	u32 outbound_head = readl(&mu->outbound_head);
146 
147 	if (outbound_tail != outbound_head) {
148 		u64 p;
149 
150 		memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
151 		outbound_tail++;
152 
153 		if (outbound_tail == MVIOP_QUEUE_LEN)
154 			outbound_tail = 0;
155 		writel(outbound_tail, &mu->outbound_tail);
156 		return p;
157 	} else
158 		return 0;
159 }
160 
161 static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
162 {
163 	u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
164 	u32 head = inbound_head + 1;
165 
166 	if (head == MVIOP_QUEUE_LEN)
167 		head = 0;
168 
169 	memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
170 	writel(head, &hba->u.mv.mu->inbound_head);
171 	writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
172 			&hba->u.mv.regs->inbound_doorbell);
173 }
174 
175 static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
176 {
177 	u32 req_type = (tag >> 5) & 0x7;
178 	struct hpt_iop_request_scsi_command *req;
179 
180 	dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
181 
182 	BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
183 
184 	switch (req_type) {
185 	case IOP_REQUEST_TYPE_GET_CONFIG:
186 	case IOP_REQUEST_TYPE_SET_CONFIG:
187 		hba->msg_done = 1;
188 		break;
189 
190 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
191 		req = hba->reqs[tag >> 8].req_virt;
192 		if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
193 			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
194 
195 		hptiop_finish_scsi_req(hba, tag>>8, req);
196 		break;
197 
198 	default:
199 		break;
200 	}
201 }
202 
203 static int iop_intr_mv(struct hptiop_hba *hba)
204 {
205 	u32 status;
206 	int ret = 0;
207 
208 	status = readl(&hba->u.mv.regs->outbound_doorbell);
209 	writel(~status, &hba->u.mv.regs->outbound_doorbell);
210 
211 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
212 		u32 msg;
213 		msg = readl(&hba->u.mv.mu->outbound_msg);
214 		dprintk("received outbound msg %x\n", msg);
215 		hptiop_message_callback(hba, msg);
216 		ret = 1;
217 	}
218 
219 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
220 		u64 tag;
221 
222 		while ((tag = mv_outbound_read(hba->u.mv.mu)))
223 			hptiop_request_callback_mv(hba, tag);
224 		ret = 1;
225 	}
226 
227 	return ret;
228 }
229 
230 static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
231 {
232 	u32 req_type = _tag & 0xf;
233 	struct hpt_iop_request_scsi_command *req;
234 
235 	switch (req_type) {
236 	case IOP_REQUEST_TYPE_GET_CONFIG:
237 	case IOP_REQUEST_TYPE_SET_CONFIG:
238 		hba->msg_done = 1;
239 		break;
240 
241 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
242 		req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
243 		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
244 			req->header.result = IOP_RESULT_SUCCESS;
245 		hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
246 		break;
247 
248 	default:
249 		break;
250 	}
251 }
252 
253 static int iop_intr_mvfrey(struct hptiop_hba *hba)
254 {
255 	u32 _tag, status, cptr, cur_rptr;
256 	int ret = 0;
257 
258 	if (hba->initialized)
259 		writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
260 
261 	status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
262 	if (status) {
263 		writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
264 		if (status & CPU_TO_F0_DRBL_MSG_BIT) {
265 			u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
266 			dprintk("received outbound msg %x\n", msg);
267 			hptiop_message_callback(hba, msg);
268 		}
269 		ret = 1;
270 	}
271 
272 	status = readl(&(hba->u.mvfrey.mu->isr_cause));
273 	if (status) {
274 		writel(status, &(hba->u.mvfrey.mu->isr_cause));
275 		do {
276 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
277 			cur_rptr = hba->u.mvfrey.outlist_rptr;
278 			while (cur_rptr != cptr) {
279 				cur_rptr++;
280 				if (cur_rptr ==	hba->u.mvfrey.list_count)
281 					cur_rptr = 0;
282 
283 				_tag = hba->u.mvfrey.outlist[cur_rptr].val;
284 				BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
285 				hptiop_request_callback_mvfrey(hba, _tag);
286 				ret = 1;
287 			}
288 			hba->u.mvfrey.outlist_rptr = cur_rptr;
289 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
290 	}
291 
292 	if (hba->initialized)
293 		writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
294 
295 	return ret;
296 }
297 
298 static int iop_send_sync_request_itl(struct hptiop_hba *hba,
299 					void __iomem *_req, u32 millisec)
300 {
301 	struct hpt_iop_request_header __iomem *req = _req;
302 	u32 i;
303 
304 	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
305 	writel(0, &req->context);
306 	writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
307 			&hba->u.itl.iop->inbound_queue);
308 	readl(&hba->u.itl.iop->outbound_intstatus);
309 
310 	for (i = 0; i < millisec; i++) {
311 		iop_intr_itl(hba);
312 		if (readl(&req->context))
313 			return 0;
314 		msleep(1);
315 	}
316 
317 	return -1;
318 }
319 
320 static int iop_send_sync_request_mv(struct hptiop_hba *hba,
321 					u32 size_bits, u32 millisec)
322 {
323 	struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
324 	u32 i;
325 
326 	hba->msg_done = 0;
327 	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
328 	mv_inbound_write(hba->u.mv.internal_req_phy |
329 			MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
330 
331 	for (i = 0; i < millisec; i++) {
332 		iop_intr_mv(hba);
333 		if (hba->msg_done)
334 			return 0;
335 		msleep(1);
336 	}
337 	return -1;
338 }
339 
340 static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
341 					u32 size_bits, u32 millisec)
342 {
343 	struct hpt_iop_request_header *reqhdr =
344 		hba->u.mvfrey.internal_req.req_virt;
345 	u32 i;
346 
347 	hba->msg_done = 0;
348 	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
349 	hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
350 
351 	for (i = 0; i < millisec; i++) {
352 		iop_intr_mvfrey(hba);
353 		if (hba->msg_done)
354 			break;
355 		msleep(1);
356 	}
357 	return hba->msg_done ? 0 : -1;
358 }
359 
360 static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
361 {
362 	writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
363 	readl(&hba->u.itl.iop->outbound_intstatus);
364 }
365 
366 static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
367 {
368 	writel(msg, &hba->u.mv.mu->inbound_msg);
369 	writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
370 	readl(&hba->u.mv.regs->inbound_doorbell);
371 }
372 
373 static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
374 {
375 	writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
376 	readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
377 }
378 
379 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
380 {
381 	u32 i;
382 
383 	hba->msg_done = 0;
384 	hba->ops->disable_intr(hba);
385 	hba->ops->post_msg(hba, msg);
386 
387 	for (i = 0; i < millisec; i++) {
388 		spin_lock_irq(hba->host->host_lock);
389 		hba->ops->iop_intr(hba);
390 		spin_unlock_irq(hba->host->host_lock);
391 		if (hba->msg_done)
392 			break;
393 		msleep(1);
394 	}
395 
396 	hba->ops->enable_intr(hba);
397 	return hba->msg_done? 0 : -1;
398 }
399 
400 static int iop_get_config_itl(struct hptiop_hba *hba,
401 				struct hpt_iop_request_get_config *config)
402 {
403 	u32 req32;
404 	struct hpt_iop_request_get_config __iomem *req;
405 
406 	req32 = readl(&hba->u.itl.iop->inbound_queue);
407 	if (req32 == IOPMU_QUEUE_EMPTY)
408 		return -1;
409 
410 	req = (struct hpt_iop_request_get_config __iomem *)
411 			((unsigned long)hba->u.itl.iop + req32);
412 
413 	writel(0, &req->header.flags);
414 	writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
415 	writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
416 	writel(IOP_RESULT_PENDING, &req->header.result);
417 
418 	if (iop_send_sync_request_itl(hba, req, 20000)) {
419 		dprintk("Get config send cmd failed\n");
420 		return -1;
421 	}
422 
423 	memcpy_fromio(config, req, sizeof(*config));
424 	writel(req32, &hba->u.itl.iop->outbound_queue);
425 	return 0;
426 }
427 
428 static int iop_get_config_mv(struct hptiop_hba *hba,
429 				struct hpt_iop_request_get_config *config)
430 {
431 	struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
432 
433 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
434 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
435 	req->header.size =
436 		cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
437 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
438 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
439 	req->header.context_hi32 = 0;
440 
441 	if (iop_send_sync_request_mv(hba, 0, 20000)) {
442 		dprintk("Get config send cmd failed\n");
443 		return -1;
444 	}
445 
446 	memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
447 	return 0;
448 }
449 
450 static int iop_get_config_mvfrey(struct hptiop_hba *hba,
451 				struct hpt_iop_request_get_config *config)
452 {
453 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
454 
455 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
456 			info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
457 		return -1;
458 
459 	config->interface_version = info->interface_version;
460 	config->firmware_version = info->firmware_version;
461 	config->max_requests = info->max_requests;
462 	config->request_size = info->request_size;
463 	config->max_sg_count = info->max_sg_count;
464 	config->data_transfer_length = info->data_transfer_length;
465 	config->alignment_mask = info->alignment_mask;
466 	config->max_devices = info->max_devices;
467 	config->sdram_size = info->sdram_size;
468 
469 	return 0;
470 }
471 
472 static int iop_set_config_itl(struct hptiop_hba *hba,
473 				struct hpt_iop_request_set_config *config)
474 {
475 	u32 req32;
476 	struct hpt_iop_request_set_config __iomem *req;
477 
478 	req32 = readl(&hba->u.itl.iop->inbound_queue);
479 	if (req32 == IOPMU_QUEUE_EMPTY)
480 		return -1;
481 
482 	req = (struct hpt_iop_request_set_config __iomem *)
483 			((unsigned long)hba->u.itl.iop + req32);
484 
485 	memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
486 		(u8 *)config + sizeof(struct hpt_iop_request_header),
487 		sizeof(struct hpt_iop_request_set_config) -
488 			sizeof(struct hpt_iop_request_header));
489 
490 	writel(0, &req->header.flags);
491 	writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
492 	writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
493 	writel(IOP_RESULT_PENDING, &req->header.result);
494 
495 	if (iop_send_sync_request_itl(hba, req, 20000)) {
496 		dprintk("Set config send cmd failed\n");
497 		return -1;
498 	}
499 
500 	writel(req32, &hba->u.itl.iop->outbound_queue);
501 	return 0;
502 }
503 
504 static int iop_set_config_mv(struct hptiop_hba *hba,
505 				struct hpt_iop_request_set_config *config)
506 {
507 	struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
508 
509 	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
510 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
511 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
512 	req->header.size =
513 		cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
514 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
515 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
516 	req->header.context_hi32 = 0;
517 
518 	if (iop_send_sync_request_mv(hba, 0, 20000)) {
519 		dprintk("Set config send cmd failed\n");
520 		return -1;
521 	}
522 
523 	return 0;
524 }
525 
526 static int iop_set_config_mvfrey(struct hptiop_hba *hba,
527 				struct hpt_iop_request_set_config *config)
528 {
529 	struct hpt_iop_request_set_config *req =
530 		hba->u.mvfrey.internal_req.req_virt;
531 
532 	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
533 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
534 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
535 	req->header.size =
536 		cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
537 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
538 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
539 	req->header.context_hi32 = 0;
540 
541 	if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
542 		dprintk("Set config send cmd failed\n");
543 		return -1;
544 	}
545 
546 	return 0;
547 }
548 
549 static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
550 {
551 	writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
552 		&hba->u.itl.iop->outbound_intmask);
553 }
554 
555 static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
556 {
557 	writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
558 		&hba->u.mv.regs->outbound_intmask);
559 }
560 
561 static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
562 {
563 	writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
564 	writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
565 	writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
566 }
567 
568 static int hptiop_initialize_iop(struct hptiop_hba *hba)
569 {
570 	/* enable interrupts */
571 	hba->ops->enable_intr(hba);
572 
573 	hba->initialized = 1;
574 
575 	/* start background tasks */
576 	if (iop_send_sync_msg(hba,
577 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
578 		printk(KERN_ERR "scsi%d: fail to start background task\n",
579 			hba->host->host_no);
580 		return -1;
581 	}
582 	return 0;
583 }
584 
585 static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
586 {
587 	u32 mem_base_phy, length;
588 	void __iomem *mem_base_virt;
589 
590 	struct pci_dev *pcidev = hba->pcidev;
591 
592 
593 	if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
594 		printk(KERN_ERR "scsi%d: pci resource invalid\n",
595 				hba->host->host_no);
596 		return NULL;
597 	}
598 
599 	mem_base_phy = pci_resource_start(pcidev, index);
600 	length = pci_resource_len(pcidev, index);
601 	mem_base_virt = ioremap(mem_base_phy, length);
602 
603 	if (!mem_base_virt) {
604 		printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
605 				hba->host->host_no);
606 		return NULL;
607 	}
608 	return mem_base_virt;
609 }
610 
611 static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
612 {
613 	struct pci_dev *pcidev = hba->pcidev;
614 	hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
615 	if (hba->u.itl.iop == NULL)
616 		return -1;
617 	if ((pcidev->device & 0xff00) == 0x4400) {
618 		hba->u.itl.plx = hba->u.itl.iop;
619 		hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
620 		if (hba->u.itl.iop == NULL) {
621 			iounmap(hba->u.itl.plx);
622 			return -1;
623 		}
624 	}
625 	return 0;
626 }
627 
628 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
629 {
630 	if (hba->u.itl.plx)
631 		iounmap(hba->u.itl.plx);
632 	iounmap(hba->u.itl.iop);
633 }
634 
635 static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
636 {
637 	hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
638 	if (hba->u.mv.regs == NULL)
639 		return -1;
640 
641 	hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
642 	if (hba->u.mv.mu == NULL) {
643 		iounmap(hba->u.mv.regs);
644 		return -1;
645 	}
646 
647 	return 0;
648 }
649 
650 static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
651 {
652 	hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
653 	if (hba->u.mvfrey.config == NULL)
654 		return -1;
655 
656 	hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
657 	if (hba->u.mvfrey.mu == NULL) {
658 		iounmap(hba->u.mvfrey.config);
659 		return -1;
660 	}
661 
662 	return 0;
663 }
664 
665 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
666 {
667 	iounmap(hba->u.mv.regs);
668 	iounmap(hba->u.mv.mu);
669 }
670 
671 static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
672 {
673 	iounmap(hba->u.mvfrey.config);
674 	iounmap(hba->u.mvfrey.mu);
675 }
676 
677 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
678 {
679 	dprintk("iop message 0x%x\n", msg);
680 
681 	if (msg == IOPMU_INBOUND_MSG0_NOP ||
682 		msg == IOPMU_INBOUND_MSG0_RESET_COMM)
683 		hba->msg_done = 1;
684 
685 	if (!hba->initialized)
686 		return;
687 
688 	if (msg == IOPMU_INBOUND_MSG0_RESET) {
689 		atomic_set(&hba->resetting, 0);
690 		wake_up(&hba->reset_wq);
691 	}
692 	else if (msg <= IOPMU_INBOUND_MSG0_MAX)
693 		hba->msg_done = 1;
694 }
695 
696 static struct hptiop_request *get_req(struct hptiop_hba *hba)
697 {
698 	struct hptiop_request *ret;
699 
700 	dprintk("get_req : req=%p\n", hba->req_list);
701 
702 	ret = hba->req_list;
703 	if (ret)
704 		hba->req_list = ret->next;
705 
706 	return ret;
707 }
708 
709 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
710 {
711 	dprintk("free_req(%d, %p)\n", req->index, req);
712 	req->next = hba->req_list;
713 	hba->req_list = req;
714 }
715 
716 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
717 				struct hpt_iop_request_scsi_command *req)
718 {
719 	struct scsi_cmnd *scp;
720 
721 	dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
722 			"result=%d, context=0x%x tag=%d\n",
723 			req, req->header.type, req->header.result,
724 			req->header.context, tag);
725 
726 	BUG_ON(!req->header.result);
727 	BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
728 
729 	scp = hba->reqs[tag].scp;
730 
731 	if (HPT_SCP(scp)->mapped)
732 		scsi_dma_unmap(scp);
733 
734 	switch (le32_to_cpu(req->header.result)) {
735 	case IOP_RESULT_SUCCESS:
736 		scsi_set_resid(scp,
737 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
738 		scp->result = (DID_OK<<16);
739 		break;
740 	case IOP_RESULT_BAD_TARGET:
741 		scp->result = (DID_BAD_TARGET<<16);
742 		break;
743 	case IOP_RESULT_BUSY:
744 		scp->result = (DID_BUS_BUSY<<16);
745 		break;
746 	case IOP_RESULT_RESET:
747 		scp->result = (DID_RESET<<16);
748 		break;
749 	case IOP_RESULT_FAIL:
750 		scp->result = (DID_ERROR<<16);
751 		break;
752 	case IOP_RESULT_INVALID_REQUEST:
753 		scp->result = (DID_ABORT<<16);
754 		break;
755 	case IOP_RESULT_CHECK_CONDITION:
756 		scsi_set_resid(scp,
757 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
758 		scp->result = SAM_STAT_CHECK_CONDITION;
759 		memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
760 		goto skip_resid;
761 		break;
762 
763 	default:
764 		scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
765 		break;
766 	}
767 
768 	scsi_set_resid(scp,
769 		scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
770 
771 skip_resid:
772 	dprintk("scsi_done(%p)\n", scp);
773 	scp->scsi_done(scp);
774 	free_req(hba, &hba->reqs[tag]);
775 }
776 
777 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
778 {
779 	struct hpt_iop_request_scsi_command *req;
780 	u32 tag;
781 
782 	if (hba->iopintf_v2) {
783 		tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
784 		req = hba->reqs[tag].req_virt;
785 		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
786 			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
787 	} else {
788 		tag = _tag;
789 		req = hba->reqs[tag].req_virt;
790 	}
791 
792 	hptiop_finish_scsi_req(hba, tag, req);
793 }
794 
795 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
796 {
797 	struct hpt_iop_request_header __iomem *req;
798 	struct hpt_iop_request_ioctl_command __iomem *p;
799 	struct hpt_ioctl_k *arg;
800 
801 	req = (struct hpt_iop_request_header __iomem *)
802 			((unsigned long)hba->u.itl.iop + tag);
803 	dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
804 			"result=%d, context=0x%x tag=%d\n",
805 			req, readl(&req->type), readl(&req->result),
806 			readl(&req->context), tag);
807 
808 	BUG_ON(!readl(&req->result));
809 	BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
810 
811 	p = (struct hpt_iop_request_ioctl_command __iomem *)req;
812 	arg = (struct hpt_ioctl_k *)(unsigned long)
813 		(readl(&req->context) |
814 			((u64)readl(&req->context_hi32)<<32));
815 
816 	if (readl(&req->result) == IOP_RESULT_SUCCESS) {
817 		arg->result = HPT_IOCTL_RESULT_OK;
818 
819 		if (arg->outbuf_size)
820 			memcpy_fromio(arg->outbuf,
821 				&p->buf[(readl(&p->inbuf_size) + 3)& ~3],
822 				arg->outbuf_size);
823 
824 		if (arg->bytes_returned)
825 			*arg->bytes_returned = arg->outbuf_size;
826 	}
827 	else
828 		arg->result = HPT_IOCTL_RESULT_FAILED;
829 
830 	arg->done(arg);
831 	writel(tag, &hba->u.itl.iop->outbound_queue);
832 }
833 
834 static irqreturn_t hptiop_intr(int irq, void *dev_id)
835 {
836 	struct hptiop_hba  *hba = dev_id;
837 	int  handled;
838 	unsigned long flags;
839 
840 	spin_lock_irqsave(hba->host->host_lock, flags);
841 	handled = hba->ops->iop_intr(hba);
842 	spin_unlock_irqrestore(hba->host->host_lock, flags);
843 
844 	return handled;
845 }
846 
847 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
848 {
849 	struct Scsi_Host *host = scp->device->host;
850 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
851 	struct scatterlist *sg;
852 	int idx, nseg;
853 
854 	nseg = scsi_dma_map(scp);
855 	BUG_ON(nseg < 0);
856 	if (!nseg)
857 		return 0;
858 
859 	HPT_SCP(scp)->sgcnt = nseg;
860 	HPT_SCP(scp)->mapped = 1;
861 
862 	BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
863 
864 	scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
865 		psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
866 			hba->ops->host_phy_flag;
867 		psg[idx].size = cpu_to_le32(sg_dma_len(sg));
868 		psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
869 			cpu_to_le32(1) : 0;
870 	}
871 	return HPT_SCP(scp)->sgcnt;
872 }
873 
874 static void hptiop_post_req_itl(struct hptiop_hba *hba,
875 					struct hptiop_request *_req)
876 {
877 	struct hpt_iop_request_header *reqhdr = _req->req_virt;
878 
879 	reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
880 							(u32)_req->index);
881 	reqhdr->context_hi32 = 0;
882 
883 	if (hba->iopintf_v2) {
884 		u32 size, size_bits;
885 
886 		size = le32_to_cpu(reqhdr->size);
887 		if (size < 256)
888 			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
889 		else if (size < 512)
890 			size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
891 		else
892 			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
893 						IOPMU_QUEUE_ADDR_HOST_BIT;
894 		writel(_req->req_shifted_phy | size_bits,
895 			&hba->u.itl.iop->inbound_queue);
896 	} else
897 		writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
898 					&hba->u.itl.iop->inbound_queue);
899 }
900 
901 static void hptiop_post_req_mv(struct hptiop_hba *hba,
902 					struct hptiop_request *_req)
903 {
904 	struct hpt_iop_request_header *reqhdr = _req->req_virt;
905 	u32 size, size_bit;
906 
907 	reqhdr->context = cpu_to_le32(_req->index<<8 |
908 					IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
909 	reqhdr->context_hi32 = 0;
910 	size = le32_to_cpu(reqhdr->size);
911 
912 	if (size <= 256)
913 		size_bit = 0;
914 	else if (size <= 256*2)
915 		size_bit = 1;
916 	else if (size <= 256*3)
917 		size_bit = 2;
918 	else
919 		size_bit = 3;
920 
921 	mv_inbound_write((_req->req_shifted_phy << 5) |
922 		MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
923 }
924 
925 static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
926 					struct hptiop_request *_req)
927 {
928 	struct hpt_iop_request_header *reqhdr = _req->req_virt;
929 	u32 index;
930 
931 	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
932 			IOP_REQUEST_FLAG_ADDR_BITS |
933 			((_req->req_shifted_phy >> 11) & 0xffff0000));
934 	reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
935 			(_req->index << 4) | reqhdr->type);
936 	reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
937 			0xffffffff);
938 
939 	hba->u.mvfrey.inlist_wptr++;
940 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
941 
942 	if (index == hba->u.mvfrey.list_count) {
943 		index = 0;
944 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
945 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
946 	}
947 
948 	hba->u.mvfrey.inlist[index].addr =
949 			(dma_addr_t)_req->req_shifted_phy << 5;
950 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
951 	writel(hba->u.mvfrey.inlist_wptr,
952 		&(hba->u.mvfrey.mu->inbound_write_ptr));
953 	readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
954 }
955 
956 static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
957 {
958 	return 0;
959 }
960 
961 static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
962 {
963 	return 0;
964 }
965 
966 static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
967 {
968 	u32 list_count = hba->u.mvfrey.list_count;
969 
970 	if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
971 		return -1;
972 
973 	/* wait 100ms for MCU ready */
974 	msleep(100);
975 
976 	writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
977 			&(hba->u.mvfrey.mu->inbound_base));
978 	writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
979 			&(hba->u.mvfrey.mu->inbound_base_high));
980 
981 	writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
982 			&(hba->u.mvfrey.mu->outbound_base));
983 	writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
984 			&(hba->u.mvfrey.mu->outbound_base_high));
985 
986 	writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
987 			&(hba->u.mvfrey.mu->outbound_shadow_base));
988 	writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
989 			&(hba->u.mvfrey.mu->outbound_shadow_base_high));
990 
991 	hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
992 	*hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
993 	hba->u.mvfrey.outlist_rptr = list_count - 1;
994 	return 0;
995 }
996 
997 static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
998 				void (*done)(struct scsi_cmnd *))
999 {
1000 	struct Scsi_Host *host = scp->device->host;
1001 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1002 	struct hpt_iop_request_scsi_command *req;
1003 	int sg_count = 0;
1004 	struct hptiop_request *_req;
1005 
1006 	BUG_ON(!done);
1007 	scp->scsi_done = done;
1008 
1009 	_req = get_req(hba);
1010 	if (_req == NULL) {
1011 		dprintk("hptiop_queuecmd : no free req\n");
1012 		return SCSI_MLQUEUE_HOST_BUSY;
1013 	}
1014 
1015 	_req->scp = scp;
1016 
1017 	dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) "
1018 			"req_index=%d, req=%p\n",
1019 			scp,
1020 			host->host_no, scp->device->channel,
1021 			scp->device->id, scp->device->lun,
1022 			cpu_to_be32(((u32 *)scp->cmnd)[0]),
1023 			cpu_to_be32(((u32 *)scp->cmnd)[1]),
1024 			cpu_to_be32(((u32 *)scp->cmnd)[2]),
1025 			cpu_to_be32(((u32 *)scp->cmnd)[3]),
1026 			_req->index, _req->req_virt);
1027 
1028 	scp->result = 0;
1029 
1030 	if (scp->device->channel ||
1031 			(scp->device->id > hba->max_devices) ||
1032 			((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
1033 		scp->result = DID_BAD_TARGET << 16;
1034 		free_req(hba, _req);
1035 		goto cmd_done;
1036 	}
1037 
1038 	req = _req->req_virt;
1039 
1040 	/* build S/G table */
1041 	sg_count = hptiop_buildsgl(scp, req->sg_list);
1042 	if (!sg_count)
1043 		HPT_SCP(scp)->mapped = 0;
1044 
1045 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
1046 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
1047 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
1048 	req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
1049 	req->channel = scp->device->channel;
1050 	req->target = scp->device->id;
1051 	req->lun = scp->device->lun;
1052 	req->header.size = cpu_to_le32(
1053 				sizeof(struct hpt_iop_request_scsi_command)
1054 				 - sizeof(struct hpt_iopsg)
1055 				 + sg_count * sizeof(struct hpt_iopsg));
1056 
1057 	memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
1058 	hba->ops->post_req(hba, _req);
1059 	return 0;
1060 
1061 cmd_done:
1062 	dprintk("scsi_done(scp=%p)\n", scp);
1063 	scp->scsi_done(scp);
1064 	return 0;
1065 }
1066 
1067 static DEF_SCSI_QCMD(hptiop_queuecommand)
1068 
1069 static const char *hptiop_info(struct Scsi_Host *host)
1070 {
1071 	return driver_name_long;
1072 }
1073 
1074 static int hptiop_reset_hba(struct hptiop_hba *hba)
1075 {
1076 	if (atomic_xchg(&hba->resetting, 1) == 0) {
1077 		atomic_inc(&hba->reset_count);
1078 		hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
1079 	}
1080 
1081 	wait_event_timeout(hba->reset_wq,
1082 			atomic_read(&hba->resetting) == 0, 60 * HZ);
1083 
1084 	if (atomic_read(&hba->resetting)) {
1085 		/* IOP is in unknown state, abort reset */
1086 		printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
1087 		return -1;
1088 	}
1089 
1090 	if (iop_send_sync_msg(hba,
1091 		IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1092 		dprintk("scsi%d: fail to start background task\n",
1093 				hba->host->host_no);
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 static int hptiop_reset(struct scsi_cmnd *scp)
1100 {
1101 	struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata;
1102 
1103 	printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n",
1104 	       scp->device->host->host_no, -1, -1);
1105 
1106 	return hptiop_reset_hba(hba)? FAILED : SUCCESS;
1107 }
1108 
1109 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
1110 					  int queue_depth)
1111 {
1112 	struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
1113 
1114 	if (queue_depth > hba->max_requests)
1115 		queue_depth = hba->max_requests;
1116 	return scsi_change_queue_depth(sdev, queue_depth);
1117 }
1118 
1119 static ssize_t hptiop_show_version(struct device *dev,
1120 				   struct device_attribute *attr, char *buf)
1121 {
1122 	return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
1123 }
1124 
1125 static ssize_t hptiop_show_fw_version(struct device *dev,
1126 				      struct device_attribute *attr, char *buf)
1127 {
1128 	struct Scsi_Host *host = class_to_shost(dev);
1129 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1130 
1131 	return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
1132 				hba->firmware_version >> 24,
1133 				(hba->firmware_version >> 16) & 0xff,
1134 				(hba->firmware_version >> 8) & 0xff,
1135 				hba->firmware_version & 0xff);
1136 }
1137 
1138 static struct device_attribute hptiop_attr_version = {
1139 	.attr = {
1140 		.name = "driver-version",
1141 		.mode = S_IRUGO,
1142 	},
1143 	.show = hptiop_show_version,
1144 };
1145 
1146 static struct device_attribute hptiop_attr_fw_version = {
1147 	.attr = {
1148 		.name = "firmware-version",
1149 		.mode = S_IRUGO,
1150 	},
1151 	.show = hptiop_show_fw_version,
1152 };
1153 
1154 static struct device_attribute *hptiop_attrs[] = {
1155 	&hptiop_attr_version,
1156 	&hptiop_attr_fw_version,
1157 	NULL
1158 };
1159 
1160 static int hptiop_slave_config(struct scsi_device *sdev)
1161 {
1162 	if (sdev->type == TYPE_TAPE)
1163 		blk_queue_max_hw_sectors(sdev->request_queue, 8192);
1164 
1165 	return 0;
1166 }
1167 
1168 static struct scsi_host_template driver_template = {
1169 	.module                     = THIS_MODULE,
1170 	.name                       = driver_name,
1171 	.queuecommand               = hptiop_queuecommand,
1172 	.eh_host_reset_handler      = hptiop_reset,
1173 	.info                       = hptiop_info,
1174 	.emulated                   = 0,
1175 	.proc_name                  = driver_name,
1176 	.shost_attrs                = hptiop_attrs,
1177 	.slave_configure            = hptiop_slave_config,
1178 	.this_id                    = -1,
1179 	.change_queue_depth         = hptiop_adjust_disk_queue_depth,
1180 };
1181 
1182 static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
1183 {
1184 	return 0;
1185 }
1186 
1187 static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
1188 {
1189 	hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
1190 			0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
1191 	if (hba->u.mv.internal_req)
1192 		return 0;
1193 	else
1194 		return -1;
1195 }
1196 
1197 static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
1198 {
1199 	u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
1200 	char *p;
1201 	dma_addr_t phy;
1202 
1203 	BUG_ON(hba->max_request_size == 0);
1204 
1205 	if (list_count == 0) {
1206 		BUG_ON(1);
1207 		return -1;
1208 	}
1209 
1210 	list_count >>= 16;
1211 
1212 	hba->u.mvfrey.list_count = list_count;
1213 	hba->u.mvfrey.internal_mem_size = 0x800 +
1214 			list_count * sizeof(struct mvfrey_inlist_entry) +
1215 			list_count * sizeof(struct mvfrey_outlist_entry) +
1216 			sizeof(int);
1217 
1218 	p = dma_alloc_coherent(&hba->pcidev->dev,
1219 			hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
1220 	if (!p)
1221 		return -1;
1222 
1223 	hba->u.mvfrey.internal_req.req_virt = p;
1224 	hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
1225 	hba->u.mvfrey.internal_req.scp = NULL;
1226 	hba->u.mvfrey.internal_req.next = NULL;
1227 
1228 	p += 0x800;
1229 	phy += 0x800;
1230 
1231 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
1232 	hba->u.mvfrey.inlist_phy = phy;
1233 
1234 	p += list_count * sizeof(struct mvfrey_inlist_entry);
1235 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
1236 
1237 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
1238 	hba->u.mvfrey.outlist_phy = phy;
1239 
1240 	p += list_count * sizeof(struct mvfrey_outlist_entry);
1241 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
1242 
1243 	hba->u.mvfrey.outlist_cptr = (__le32 *)p;
1244 	hba->u.mvfrey.outlist_cptr_phy = phy;
1245 
1246 	return 0;
1247 }
1248 
1249 static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
1250 {
1251 	return 0;
1252 }
1253 
1254 static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
1255 {
1256 	if (hba->u.mv.internal_req) {
1257 		dma_free_coherent(&hba->pcidev->dev, 0x800,
1258 			hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
1259 		return 0;
1260 	} else
1261 		return -1;
1262 }
1263 
1264 static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
1265 {
1266 	if (hba->u.mvfrey.internal_req.req_virt) {
1267 		dma_free_coherent(&hba->pcidev->dev,
1268 			hba->u.mvfrey.internal_mem_size,
1269 			hba->u.mvfrey.internal_req.req_virt,
1270 			(dma_addr_t)
1271 			hba->u.mvfrey.internal_req.req_shifted_phy << 5);
1272 		return 0;
1273 	} else
1274 		return -1;
1275 }
1276 
1277 static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1278 {
1279 	struct Scsi_Host *host = NULL;
1280 	struct hptiop_hba *hba;
1281 	struct hptiop_adapter_ops *iop_ops;
1282 	struct hpt_iop_request_get_config iop_config;
1283 	struct hpt_iop_request_set_config set_config;
1284 	dma_addr_t start_phy;
1285 	void *start_virt;
1286 	u32 offset, i, req_size;
1287 	int rc;
1288 
1289 	dprintk("hptiop_probe(%p)\n", pcidev);
1290 
1291 	if (pci_enable_device(pcidev)) {
1292 		printk(KERN_ERR "hptiop: fail to enable pci device\n");
1293 		return -ENODEV;
1294 	}
1295 
1296 	printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
1297 		pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
1298 		pcidev->irq);
1299 
1300 	pci_set_master(pcidev);
1301 
1302 	/* Enable 64bit DMA if possible */
1303 	iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
1304 	rc = dma_set_mask(&pcidev->dev,
1305 			  DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
1306 	if (rc)
1307 		rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1308 
1309 	if (rc) {
1310 		printk(KERN_ERR "hptiop: fail to set dma_mask\n");
1311 		goto disable_pci_device;
1312 	}
1313 
1314 	if (pci_request_regions(pcidev, driver_name)) {
1315 		printk(KERN_ERR "hptiop: pci_request_regions failed\n");
1316 		goto disable_pci_device;
1317 	}
1318 
1319 	host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
1320 	if (!host) {
1321 		printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
1322 		goto free_pci_regions;
1323 	}
1324 
1325 	hba = (struct hptiop_hba *)host->hostdata;
1326 	memset(hba, 0, sizeof(struct hptiop_hba));
1327 
1328 	hba->ops = iop_ops;
1329 	hba->pcidev = pcidev;
1330 	hba->host = host;
1331 	hba->initialized = 0;
1332 	hba->iopintf_v2 = 0;
1333 
1334 	atomic_set(&hba->resetting, 0);
1335 	atomic_set(&hba->reset_count, 0);
1336 
1337 	init_waitqueue_head(&hba->reset_wq);
1338 	init_waitqueue_head(&hba->ioctl_wq);
1339 
1340 	host->max_lun = 128;
1341 	host->max_channel = 0;
1342 	host->io_port = 0;
1343 	host->n_io_port = 0;
1344 	host->irq = pcidev->irq;
1345 
1346 	if (hba->ops->map_pci_bar(hba))
1347 		goto free_scsi_host;
1348 
1349 	if (hba->ops->iop_wait_ready(hba, 20000)) {
1350 		printk(KERN_ERR "scsi%d: firmware not ready\n",
1351 				hba->host->host_no);
1352 		goto unmap_pci_bar;
1353 	}
1354 
1355 	if (hba->ops->family == MV_BASED_IOP) {
1356 		if (hba->ops->internal_memalloc(hba)) {
1357 			printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1358 				hba->host->host_no);
1359 			goto unmap_pci_bar;
1360 		}
1361 	}
1362 
1363 	if (hba->ops->get_config(hba, &iop_config)) {
1364 		printk(KERN_ERR "scsi%d: get config failed\n",
1365 				hba->host->host_no);
1366 		goto unmap_pci_bar;
1367 	}
1368 
1369 	hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1370 				HPTIOP_MAX_REQUESTS);
1371 	hba->max_devices = le32_to_cpu(iop_config.max_devices);
1372 	hba->max_request_size = le32_to_cpu(iop_config.request_size);
1373 	hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1374 	hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1375 	hba->interface_version = le32_to_cpu(iop_config.interface_version);
1376 	hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1377 
1378 	if (hba->ops->family == MVFREY_BASED_IOP) {
1379 		if (hba->ops->internal_memalloc(hba)) {
1380 			printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1381 				hba->host->host_no);
1382 			goto unmap_pci_bar;
1383 		}
1384 		if (hba->ops->reset_comm(hba)) {
1385 			printk(KERN_ERR "scsi%d: reset comm failed\n",
1386 					hba->host->host_no);
1387 			goto unmap_pci_bar;
1388 		}
1389 	}
1390 
1391 	if (hba->firmware_version > 0x01020000 ||
1392 			hba->interface_version > 0x01020000)
1393 		hba->iopintf_v2 = 1;
1394 
1395 	host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
1396 	host->max_id = le32_to_cpu(iop_config.max_devices);
1397 	host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
1398 	host->can_queue = le32_to_cpu(iop_config.max_requests);
1399 	host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
1400 	host->max_cmd_len = 16;
1401 
1402 	req_size = sizeof(struct hpt_iop_request_scsi_command)
1403 		+ sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
1404 	if ((req_size & 0x1f) != 0)
1405 		req_size = (req_size + 0x1f) & ~0x1f;
1406 
1407 	memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
1408 	set_config.iop_id = cpu_to_le32(host->host_no);
1409 	set_config.vbus_id = cpu_to_le16(host->host_no);
1410 	set_config.max_host_request_size = cpu_to_le16(req_size);
1411 
1412 	if (hba->ops->set_config(hba, &set_config)) {
1413 		printk(KERN_ERR "scsi%d: set config failed\n",
1414 				hba->host->host_no);
1415 		goto unmap_pci_bar;
1416 	}
1417 
1418 	pci_set_drvdata(pcidev, host);
1419 
1420 	if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1421 					driver_name, hba)) {
1422 		printk(KERN_ERR "scsi%d: request irq %d failed\n",
1423 					hba->host->host_no, pcidev->irq);
1424 		goto unmap_pci_bar;
1425 	}
1426 
1427 	/* Allocate request mem */
1428 
1429 	dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1430 
1431 	hba->req_size = req_size;
1432 	hba->req_list = NULL;
1433 
1434 	for (i = 0; i < hba->max_requests; i++) {
1435 		start_virt = dma_alloc_coherent(&pcidev->dev,
1436 					hba->req_size + 0x20,
1437 					&start_phy, GFP_KERNEL);
1438 
1439 		if (!start_virt) {
1440 			printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1441 						hba->host->host_no);
1442 			goto free_request_mem;
1443 		}
1444 
1445 		hba->dma_coherent[i] = start_virt;
1446 		hba->dma_coherent_handle[i] = start_phy;
1447 
1448 		if ((start_phy & 0x1f) != 0) {
1449 			offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1450 			start_phy += offset;
1451 			start_virt += offset;
1452 		}
1453 
1454 		hba->reqs[i].next = NULL;
1455 		hba->reqs[i].req_virt = start_virt;
1456 		hba->reqs[i].req_shifted_phy = start_phy >> 5;
1457 		hba->reqs[i].index = i;
1458 		free_req(hba, &hba->reqs[i]);
1459 	}
1460 
1461 	/* Enable Interrupt and start background task */
1462 	if (hptiop_initialize_iop(hba))
1463 		goto free_request_mem;
1464 
1465 	if (scsi_add_host(host, &pcidev->dev)) {
1466 		printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1467 					hba->host->host_no);
1468 		goto free_request_mem;
1469 	}
1470 
1471 	scsi_scan_host(host);
1472 
1473 	dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1474 	return 0;
1475 
1476 free_request_mem:
1477 	for (i = 0; i < hba->max_requests; i++) {
1478 		if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1479 			dma_free_coherent(&hba->pcidev->dev,
1480 					hba->req_size + 0x20,
1481 					hba->dma_coherent[i],
1482 					hba->dma_coherent_handle[i]);
1483 		else
1484 			break;
1485 	}
1486 
1487 	free_irq(hba->pcidev->irq, hba);
1488 
1489 unmap_pci_bar:
1490 	hba->ops->internal_memfree(hba);
1491 
1492 	hba->ops->unmap_pci_bar(hba);
1493 
1494 free_scsi_host:
1495 	scsi_host_put(host);
1496 
1497 free_pci_regions:
1498 	pci_release_regions(pcidev);
1499 
1500 disable_pci_device:
1501 	pci_disable_device(pcidev);
1502 
1503 	dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
1504 	return -ENODEV;
1505 }
1506 
1507 static void hptiop_shutdown(struct pci_dev *pcidev)
1508 {
1509 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
1510 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1511 
1512 	dprintk("hptiop_shutdown(%p)\n", hba);
1513 
1514 	/* stop the iop */
1515 	if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1516 		printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
1517 					hba->host->host_no);
1518 
1519 	/* disable all outbound interrupts */
1520 	hba->ops->disable_intr(hba);
1521 }
1522 
1523 static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1524 {
1525 	u32 int_mask;
1526 
1527 	int_mask = readl(&hba->u.itl.iop->outbound_intmask);
1528 	writel(int_mask |
1529 		IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
1530 		&hba->u.itl.iop->outbound_intmask);
1531 	readl(&hba->u.itl.iop->outbound_intmask);
1532 }
1533 
1534 static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1535 {
1536 	writel(0, &hba->u.mv.regs->outbound_intmask);
1537 	readl(&hba->u.mv.regs->outbound_intmask);
1538 }
1539 
1540 static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
1541 {
1542 	writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
1543 	readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
1544 	writel(0, &(hba->u.mvfrey.mu->isr_enable));
1545 	readl(&(hba->u.mvfrey.mu->isr_enable));
1546 	writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
1547 	readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
1548 }
1549 
1550 static void hptiop_remove(struct pci_dev *pcidev)
1551 {
1552 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
1553 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1554 	u32 i;
1555 
1556 	dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1557 
1558 	scsi_remove_host(host);
1559 
1560 	hptiop_shutdown(pcidev);
1561 
1562 	free_irq(hba->pcidev->irq, hba);
1563 
1564 	for (i = 0; i < hba->max_requests; i++) {
1565 		if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1566 			dma_free_coherent(&hba->pcidev->dev,
1567 					hba->req_size + 0x20,
1568 					hba->dma_coherent[i],
1569 					hba->dma_coherent_handle[i]);
1570 		else
1571 			break;
1572 	}
1573 
1574 	hba->ops->internal_memfree(hba);
1575 
1576 	hba->ops->unmap_pci_bar(hba);
1577 
1578 	pci_release_regions(hba->pcidev);
1579 	pci_set_drvdata(hba->pcidev, NULL);
1580 	pci_disable_device(hba->pcidev);
1581 
1582 	scsi_host_put(host);
1583 }
1584 
1585 static struct hptiop_adapter_ops hptiop_itl_ops = {
1586 	.family            = INTEL_BASED_IOP,
1587 	.iop_wait_ready    = iop_wait_ready_itl,
1588 	.internal_memalloc = hptiop_internal_memalloc_itl,
1589 	.internal_memfree  = hptiop_internal_memfree_itl,
1590 	.map_pci_bar       = hptiop_map_pci_bar_itl,
1591 	.unmap_pci_bar     = hptiop_unmap_pci_bar_itl,
1592 	.enable_intr       = hptiop_enable_intr_itl,
1593 	.disable_intr      = hptiop_disable_intr_itl,
1594 	.get_config        = iop_get_config_itl,
1595 	.set_config        = iop_set_config_itl,
1596 	.iop_intr          = iop_intr_itl,
1597 	.post_msg          = hptiop_post_msg_itl,
1598 	.post_req          = hptiop_post_req_itl,
1599 	.hw_dma_bit_mask   = 64,
1600 	.reset_comm        = hptiop_reset_comm_itl,
1601 	.host_phy_flag     = cpu_to_le64(0),
1602 };
1603 
1604 static struct hptiop_adapter_ops hptiop_mv_ops = {
1605 	.family            = MV_BASED_IOP,
1606 	.iop_wait_ready    = iop_wait_ready_mv,
1607 	.internal_memalloc = hptiop_internal_memalloc_mv,
1608 	.internal_memfree  = hptiop_internal_memfree_mv,
1609 	.map_pci_bar       = hptiop_map_pci_bar_mv,
1610 	.unmap_pci_bar     = hptiop_unmap_pci_bar_mv,
1611 	.enable_intr       = hptiop_enable_intr_mv,
1612 	.disable_intr      = hptiop_disable_intr_mv,
1613 	.get_config        = iop_get_config_mv,
1614 	.set_config        = iop_set_config_mv,
1615 	.iop_intr          = iop_intr_mv,
1616 	.post_msg          = hptiop_post_msg_mv,
1617 	.post_req          = hptiop_post_req_mv,
1618 	.hw_dma_bit_mask   = 33,
1619 	.reset_comm        = hptiop_reset_comm_mv,
1620 	.host_phy_flag     = cpu_to_le64(0),
1621 };
1622 
1623 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1624 	.family            = MVFREY_BASED_IOP,
1625 	.iop_wait_ready    = iop_wait_ready_mvfrey,
1626 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1627 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1628 	.map_pci_bar       = hptiop_map_pci_bar_mvfrey,
1629 	.unmap_pci_bar     = hptiop_unmap_pci_bar_mvfrey,
1630 	.enable_intr       = hptiop_enable_intr_mvfrey,
1631 	.disable_intr      = hptiop_disable_intr_mvfrey,
1632 	.get_config        = iop_get_config_mvfrey,
1633 	.set_config        = iop_set_config_mvfrey,
1634 	.iop_intr          = iop_intr_mvfrey,
1635 	.post_msg          = hptiop_post_msg_mvfrey,
1636 	.post_req          = hptiop_post_req_mvfrey,
1637 	.hw_dma_bit_mask   = 64,
1638 	.reset_comm        = hptiop_reset_comm_mvfrey,
1639 	.host_phy_flag     = cpu_to_le64(1),
1640 };
1641 
1642 static struct pci_device_id hptiop_id_table[] = {
1643 	{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
1644 	{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
1645 	{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1646 	{ PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1647 	{ PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1648 	{ PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
1649 	{ PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1650 	{ PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1651 	{ PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1652 	{ PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1653 	{ PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1654 	{ PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1655 	{ PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1656 	{ PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1657 	{ PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1658 	{ PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1659 	{ PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1660 	{ PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1661 	{ PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
1662 	{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1663 	{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1664 	{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1665 	{ PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
1666 	{ PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
1667 	{ PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
1668 	{ PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
1669 	{ PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
1670 	{ PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
1671 	{ PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
1672 	{ PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
1673 	{ PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
1674 	{ PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
1675 	{},
1676 };
1677 
1678 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
1679 
1680 static struct pci_driver hptiop_pci_driver = {
1681 	.name       = driver_name,
1682 	.id_table   = hptiop_id_table,
1683 	.probe      = hptiop_probe,
1684 	.remove     = hptiop_remove,
1685 	.shutdown   = hptiop_shutdown,
1686 };
1687 
1688 static int __init hptiop_module_init(void)
1689 {
1690 	printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1691 	return pci_register_driver(&hptiop_pci_driver);
1692 }
1693 
1694 static void __exit hptiop_module_exit(void)
1695 {
1696 	pci_unregister_driver(&hptiop_pci_driver);
1697 }
1698 
1699 
1700 module_init(hptiop_module_init);
1701 module_exit(hptiop_module_exit);
1702 
1703 MODULE_LICENSE("GPL");
1704 
1705