xref: /linux/drivers/scsi/qlogicpti.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
2  *
3  * Copyright (C) 1996, 2006 David S. Miller (davem@davemloft.net)
4  *
5  * A lot of this driver was directly stolen from Erik H. Moe's PCI
6  * Qlogic ISP driver.  Mucho kudos to him for this code.
7  *
8  * An even bigger kudos to John Grana at Performance Technologies
9  * for providing me with the hardware to write this driver, you rule
10  * John you really do.
11  *
12  * May, 2, 1997: Added support for QLGC,isp --jj
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/proc_fs.h>
22 #include <linux/stat.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/jiffies.h>
28 
29 #include <asm/byteorder.h>
30 
31 #include "qlogicpti.h"
32 
33 #include <asm/sbus.h>
34 #include <asm/dma.h>
35 #include <asm/system.h>
36 #include <asm/ptrace.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
39 #include <asm/io.h>
40 #include <asm/irq.h>
41 
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_eh.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_host.h>
48 
49 #define MAX_TARGETS	16
50 #define MAX_LUNS	8	/* 32 for 1.31 F/W */
51 
52 #define DEFAULT_LOOP_COUNT	10000
53 
54 #include "qlogicpti_asm.c"
55 
56 static struct qlogicpti *qptichain = NULL;
57 static DEFINE_SPINLOCK(qptichain_lock);
58 
59 #define PACKB(a, b)			(((a)<<4)|(b))
60 
61 static const u_char mbox_param[] = {
62 	PACKB(1, 1),	/* MBOX_NO_OP */
63 	PACKB(5, 5),	/* MBOX_LOAD_RAM */
64 	PACKB(2, 0),	/* MBOX_EXEC_FIRMWARE */
65 	PACKB(5, 5),	/* MBOX_DUMP_RAM */
66 	PACKB(3, 3),	/* MBOX_WRITE_RAM_WORD */
67 	PACKB(2, 3),	/* MBOX_READ_RAM_WORD */
68 	PACKB(6, 6),	/* MBOX_MAILBOX_REG_TEST */
69 	PACKB(2, 3),	/* MBOX_VERIFY_CHECKSUM	*/
70 	PACKB(1, 3),	/* MBOX_ABOUT_FIRMWARE */
71 	PACKB(0, 0),	/* 0x0009 */
72 	PACKB(0, 0),	/* 0x000a */
73 	PACKB(0, 0),	/* 0x000b */
74 	PACKB(0, 0),	/* 0x000c */
75 	PACKB(0, 0),	/* 0x000d */
76 	PACKB(1, 2),	/* MBOX_CHECK_FIRMWARE */
77 	PACKB(0, 0),	/* 0x000f */
78 	PACKB(5, 5),	/* MBOX_INIT_REQ_QUEUE */
79 	PACKB(6, 6),	/* MBOX_INIT_RES_QUEUE */
80 	PACKB(4, 4),	/* MBOX_EXECUTE_IOCB */
81 	PACKB(2, 2),	/* MBOX_WAKE_UP	*/
82 	PACKB(1, 6),	/* MBOX_STOP_FIRMWARE */
83 	PACKB(4, 4),	/* MBOX_ABORT */
84 	PACKB(2, 2),	/* MBOX_ABORT_DEVICE */
85 	PACKB(3, 3),	/* MBOX_ABORT_TARGET */
86 	PACKB(2, 2),	/* MBOX_BUS_RESET */
87 	PACKB(2, 3),	/* MBOX_STOP_QUEUE */
88 	PACKB(2, 3),	/* MBOX_START_QUEUE */
89 	PACKB(2, 3),	/* MBOX_SINGLE_STEP_QUEUE */
90 	PACKB(2, 3),	/* MBOX_ABORT_QUEUE */
91 	PACKB(2, 4),	/* MBOX_GET_DEV_QUEUE_STATUS */
92 	PACKB(0, 0),	/* 0x001e */
93 	PACKB(1, 3),	/* MBOX_GET_FIRMWARE_STATUS */
94 	PACKB(1, 2),	/* MBOX_GET_INIT_SCSI_ID */
95 	PACKB(1, 2),	/* MBOX_GET_SELECT_TIMEOUT */
96 	PACKB(1, 3),	/* MBOX_GET_RETRY_COUNT	*/
97 	PACKB(1, 2),	/* MBOX_GET_TAG_AGE_LIMIT */
98 	PACKB(1, 2),	/* MBOX_GET_CLOCK_RATE */
99 	PACKB(1, 2),	/* MBOX_GET_ACT_NEG_STATE */
100 	PACKB(1, 2),	/* MBOX_GET_ASYNC_DATA_SETUP_TIME */
101 	PACKB(1, 3),	/* MBOX_GET_SBUS_PARAMS */
102 	PACKB(2, 4),	/* MBOX_GET_TARGET_PARAMS */
103 	PACKB(2, 4),	/* MBOX_GET_DEV_QUEUE_PARAMS */
104 	PACKB(0, 0),	/* 0x002a */
105 	PACKB(0, 0),	/* 0x002b */
106 	PACKB(0, 0),	/* 0x002c */
107 	PACKB(0, 0),	/* 0x002d */
108 	PACKB(0, 0),	/* 0x002e */
109 	PACKB(0, 0),	/* 0x002f */
110 	PACKB(2, 2),	/* MBOX_SET_INIT_SCSI_ID */
111 	PACKB(2, 2),	/* MBOX_SET_SELECT_TIMEOUT */
112 	PACKB(3, 3),	/* MBOX_SET_RETRY_COUNT	*/
113 	PACKB(2, 2),	/* MBOX_SET_TAG_AGE_LIMIT */
114 	PACKB(2, 2),	/* MBOX_SET_CLOCK_RATE */
115 	PACKB(2, 2),	/* MBOX_SET_ACTIVE_NEG_STATE */
116 	PACKB(2, 2),	/* MBOX_SET_ASYNC_DATA_SETUP_TIME */
117 	PACKB(3, 3),	/* MBOX_SET_SBUS_CONTROL_PARAMS */
118 	PACKB(4, 4),	/* MBOX_SET_TARGET_PARAMS */
119 	PACKB(4, 4),	/* MBOX_SET_DEV_QUEUE_PARAMS */
120 	PACKB(0, 0),	/* 0x003a */
121 	PACKB(0, 0),	/* 0x003b */
122 	PACKB(0, 0),	/* 0x003c */
123 	PACKB(0, 0),	/* 0x003d */
124 	PACKB(0, 0),	/* 0x003e */
125 	PACKB(0, 0),	/* 0x003f */
126 	PACKB(0, 0),	/* 0x0040 */
127 	PACKB(0, 0),	/* 0x0041 */
128 	PACKB(0, 0)	/* 0x0042 */
129 };
130 
131 #define MAX_MBOX_COMMAND	ARRAY_SIZE(mbox_param)
132 
133 /* queue length's _must_ be power of two: */
134 #define QUEUE_DEPTH(in, out, ql)	((in - out) & (ql))
135 #define REQ_QUEUE_DEPTH(in, out)	QUEUE_DEPTH(in, out, 		     \
136 						    QLOGICPTI_REQ_QUEUE_LEN)
137 #define RES_QUEUE_DEPTH(in, out)	QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
138 
139 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti)
140 {
141 	sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB,
142 		    qpti->qregs + SBUS_CTRL);
143 }
144 
145 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti)
146 {
147 	sbus_writew(0, qpti->qregs + SBUS_CTRL);
148 }
149 
150 static inline void set_sbus_cfg1(struct qlogicpti *qpti)
151 {
152 	u16 val;
153 	u8 bursts = qpti->bursts;
154 
155 #if 0	/* It appears that at least PTI cards do not support
156 	 * 64-byte bursts and that setting the B64 bit actually
157 	 * is a nop and the chip ends up using the smallest burst
158 	 * size. -DaveM
159 	 */
160 	if (sbus_can_burst64(qpti->sdev) && (bursts & DMA_BURST64)) {
161 		val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
162 	} else
163 #endif
164 	if (bursts & DMA_BURST32) {
165 		val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32);
166 	} else if (bursts & DMA_BURST16) {
167 		val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16);
168 	} else if (bursts & DMA_BURST8) {
169 		val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8);
170 	} else {
171 		val = 0; /* No sbus bursts for you... */
172 	}
173 	sbus_writew(val, qpti->qregs + SBUS_CFG1);
174 }
175 
176 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force)
177 {
178 	int loop_count;
179 	u16 tmp;
180 
181 	if (mbox_param[param[0]] == 0)
182 		return 1;
183 
184 	/* Set SBUS semaphore. */
185 	tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
186 	tmp |= SBUS_SEMAPHORE_LCK;
187 	sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
188 
189 	/* Wait for host IRQ bit to clear. */
190 	loop_count = DEFAULT_LOOP_COUNT;
191 	while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) {
192 		barrier();
193 		cpu_relax();
194 	}
195 	if (!loop_count)
196 		printk(KERN_EMERG "qlogicpti: mbox_command loop timeout #1\n");
197 
198 	/* Write mailbox command registers. */
199 	switch (mbox_param[param[0]] >> 4) {
200 	case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
201 	case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
202 	case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
203 	case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
204 	case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
205 	case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
206 	}
207 
208 	/* Clear RISC interrupt. */
209 	tmp = sbus_readw(qpti->qregs + HCCTRL);
210 	tmp |= HCCTRL_CRIRQ;
211 	sbus_writew(tmp, qpti->qregs + HCCTRL);
212 
213 	/* Clear SBUS semaphore. */
214 	sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
215 
216 	/* Set HOST interrupt. */
217 	tmp = sbus_readw(qpti->qregs + HCCTRL);
218 	tmp |= HCCTRL_SHIRQ;
219 	sbus_writew(tmp, qpti->qregs + HCCTRL);
220 
221 	/* Wait for HOST interrupt clears. */
222 	loop_count = DEFAULT_LOOP_COUNT;
223 	while (--loop_count &&
224 	       (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
225 		udelay(20);
226 	if (!loop_count)
227 		printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #2\n",
228 		       param[0]);
229 
230 	/* Wait for SBUS semaphore to get set. */
231 	loop_count = DEFAULT_LOOP_COUNT;
232 	while (--loop_count &&
233 	       !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) {
234 		udelay(20);
235 
236 		/* Workaround for some buggy chips. */
237 		if (sbus_readw(qpti->qregs + MBOX0) & 0x4000)
238 			break;
239 	}
240 	if (!loop_count)
241 		printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #3\n",
242 		       param[0]);
243 
244 	/* Wait for MBOX busy condition to go away. */
245 	loop_count = DEFAULT_LOOP_COUNT;
246 	while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
247 		udelay(20);
248 	if (!loop_count)
249 		printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #4\n",
250 		       param[0]);
251 
252 	/* Read back output parameters. */
253 	switch (mbox_param[param[0]] & 0xf) {
254 	case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
255 	case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
256 	case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
257 	case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
258 	case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
259 	case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
260 	}
261 
262 	/* Clear RISC interrupt. */
263 	tmp = sbus_readw(qpti->qregs + HCCTRL);
264 	tmp |= HCCTRL_CRIRQ;
265 	sbus_writew(tmp, qpti->qregs + HCCTRL);
266 
267 	/* Release SBUS semaphore. */
268 	tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
269 	tmp &= ~(SBUS_SEMAPHORE_LCK);
270 	sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
271 
272 	/* We're done. */
273 	return 0;
274 }
275 
276 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti)
277 {
278 	int i;
279 
280 	qpti->host_param.initiator_scsi_id = qpti->scsi_id;
281 	qpti->host_param.bus_reset_delay = 3;
282 	qpti->host_param.retry_count = 0;
283 	qpti->host_param.retry_delay = 5;
284 	qpti->host_param.async_data_setup_time = 3;
285 	qpti->host_param.req_ack_active_negation = 1;
286 	qpti->host_param.data_line_active_negation = 1;
287 	qpti->host_param.data_dma_burst_enable = 1;
288 	qpti->host_param.command_dma_burst_enable = 1;
289 	qpti->host_param.tag_aging = 8;
290 	qpti->host_param.selection_timeout = 250;
291 	qpti->host_param.max_queue_depth = 256;
292 
293 	for(i = 0; i < MAX_TARGETS; i++) {
294 		/*
295 		 * disconnect, parity, arq, reneg on reset, and, oddly enough
296 		 * tags...the midlayer's notion of tagged support has to match
297 		 * our device settings, and since we base whether we enable a
298 		 * tag on a  per-cmnd basis upon what the midlayer sez, we
299 		 * actually enable the capability here.
300 		 */
301 		qpti->dev_param[i].device_flags = 0xcd;
302 		qpti->dev_param[i].execution_throttle = 16;
303 		if (qpti->ultra) {
304 			qpti->dev_param[i].synchronous_period = 12;
305 			qpti->dev_param[i].synchronous_offset = 8;
306 		} else {
307 			qpti->dev_param[i].synchronous_period = 25;
308 			qpti->dev_param[i].synchronous_offset = 12;
309 		}
310 		qpti->dev_param[i].device_enable = 1;
311 	}
312 	/* this is very important to set! */
313 	qpti->sbits = 1 << qpti->scsi_id;
314 }
315 
316 static int qlogicpti_reset_hardware(struct Scsi_Host *host)
317 {
318 	struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
319 	u_short param[6];
320 	unsigned short risc_code_addr;
321 	int loop_count, i;
322 	unsigned long flags;
323 
324 	risc_code_addr = 0x1000;	/* all load addresses are at 0x1000 */
325 
326 	spin_lock_irqsave(host->host_lock, flags);
327 
328 	sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
329 
330 	/* Only reset the scsi bus if it is not free. */
331 	if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) {
332 		sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE);
333 		sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD);
334 		udelay(400);
335 	}
336 
337 	sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
338 	sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
339 	sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
340 
341 	loop_count = DEFAULT_LOOP_COUNT;
342 	while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
343 		udelay(20);
344 	if (!loop_count)
345 		printk(KERN_EMERG "qlogicpti: reset_hardware loop timeout\n");
346 
347 	sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
348 	set_sbus_cfg1(qpti);
349 	qlogicpti_enable_irqs(qpti);
350 
351 	if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
352 		qpti->ultra = 1;
353 		sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
354 			    qpti->qregs + RISC_MTREG);
355 	} else {
356 		qpti->ultra = 0;
357 		sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
358 			    qpti->qregs + RISC_MTREG);
359 	}
360 
361 	/* reset adapter and per-device default values. */
362 	/* do it after finding out whether we're ultra mode capable */
363 	qlogicpti_set_hostdev_defaults(qpti);
364 
365 	/* Release the RISC processor. */
366 	sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
367 
368 	/* Get RISC to start executing the firmware code. */
369 	param[0] = MBOX_EXEC_FIRMWARE;
370 	param[1] = risc_code_addr;
371 	if (qlogicpti_mbox_command(qpti, param, 1)) {
372 		printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n",
373 		       qpti->qpti_id);
374 		spin_unlock_irqrestore(host->host_lock, flags);
375 		return 1;
376 	}
377 
378 	/* Set initiator scsi ID. */
379 	param[0] = MBOX_SET_INIT_SCSI_ID;
380 	param[1] = qpti->host_param.initiator_scsi_id;
381 	if (qlogicpti_mbox_command(qpti, param, 1) ||
382 	   (param[0] != MBOX_COMMAND_COMPLETE)) {
383 		printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n",
384 		       qpti->qpti_id);
385 		spin_unlock_irqrestore(host->host_lock, flags);
386 		return 1;
387 	}
388 
389 	/* Initialize state of the queues, both hw and sw. */
390 	qpti->req_in_ptr = qpti->res_out_ptr = 0;
391 
392 	param[0] = MBOX_INIT_RES_QUEUE;
393 	param[1] = RES_QUEUE_LEN + 1;
394 	param[2] = (u_short) (qpti->res_dvma >> 16);
395 	param[3] = (u_short) (qpti->res_dvma & 0xffff);
396 	param[4] = param[5] = 0;
397 	if (qlogicpti_mbox_command(qpti, param, 1)) {
398 		printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n",
399 		       qpti->qpti_id);
400 		spin_unlock_irqrestore(host->host_lock, flags);
401 		return 1;
402 	}
403 
404 	param[0] = MBOX_INIT_REQ_QUEUE;
405 	param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1;
406 	param[2] = (u_short) (qpti->req_dvma >> 16);
407 	param[3] = (u_short) (qpti->req_dvma & 0xffff);
408 	param[4] = param[5] = 0;
409 	if (qlogicpti_mbox_command(qpti, param, 1)) {
410 		printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n",
411 		       qpti->qpti_id);
412 		spin_unlock_irqrestore(host->host_lock, flags);
413 		return 1;
414 	}
415 
416 	param[0] = MBOX_SET_RETRY_COUNT;
417 	param[1] = qpti->host_param.retry_count;
418 	param[2] = qpti->host_param.retry_delay;
419 	qlogicpti_mbox_command(qpti, param, 0);
420 
421 	param[0] = MBOX_SET_TAG_AGE_LIMIT;
422 	param[1] = qpti->host_param.tag_aging;
423 	qlogicpti_mbox_command(qpti, param, 0);
424 
425 	for (i = 0; i < MAX_TARGETS; i++) {
426 		param[0] = MBOX_GET_DEV_QUEUE_PARAMS;
427 		param[1] = (i << 8);
428 		qlogicpti_mbox_command(qpti, param, 0);
429 	}
430 
431 	param[0] = MBOX_GET_FIRMWARE_STATUS;
432 	qlogicpti_mbox_command(qpti, param, 0);
433 
434 	param[0] = MBOX_SET_SELECT_TIMEOUT;
435 	param[1] = qpti->host_param.selection_timeout;
436 	qlogicpti_mbox_command(qpti, param, 0);
437 
438 	for (i = 0; i < MAX_TARGETS; i++) {
439 		param[0] = MBOX_SET_TARGET_PARAMS;
440 		param[1] = (i << 8);
441 		param[2] = (qpti->dev_param[i].device_flags << 8);
442 		/*
443 		 * Since we're now loading 1.31 f/w, force narrow/async.
444 		 */
445 		param[2] |= 0xc0;
446 		param[3] = 0;	/* no offset, we do not have sync mode yet */
447 		qlogicpti_mbox_command(qpti, param, 0);
448 	}
449 
450 	/*
451 	 * Always (sigh) do an initial bus reset (kicks f/w).
452 	 */
453 	param[0] = MBOX_BUS_RESET;
454 	param[1] = qpti->host_param.bus_reset_delay;
455 	qlogicpti_mbox_command(qpti, param, 0);
456 	qpti->send_marker = 1;
457 
458 	spin_unlock_irqrestore(host->host_lock, flags);
459 	return 0;
460 }
461 
462 #define PTI_RESET_LIMIT 400
463 
464 static int __init qlogicpti_load_firmware(struct qlogicpti *qpti)
465 {
466 	struct Scsi_Host *host = qpti->qhost;
467 	unsigned short csum = 0;
468 	unsigned short param[6];
469 	unsigned short *risc_code, risc_code_addr, risc_code_length;
470 	unsigned long flags;
471 	int i, timeout;
472 
473 	risc_code = &sbus_risc_code01[0];
474 	risc_code_addr = 0x1000;	/* all f/w modules load at 0x1000 */
475 	risc_code_length = sbus_risc_code_length01;
476 
477 	spin_lock_irqsave(host->host_lock, flags);
478 
479 	/* Verify the checksum twice, one before loading it, and once
480 	 * afterwards via the mailbox commands.
481 	 */
482 	for (i = 0; i < risc_code_length; i++)
483 		csum += risc_code[i];
484 	if (csum) {
485 		spin_unlock_irqrestore(host->host_lock, flags);
486 		printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
487 		       qpti->qpti_id);
488 		return 1;
489 	}
490 	sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
491 	sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
492 	sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
493 	timeout = PTI_RESET_LIMIT;
494 	while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
495 		udelay(20);
496 	if (!timeout) {
497 		spin_unlock_irqrestore(host->host_lock, flags);
498 		printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
499 		return 1;
500 	}
501 
502 	sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
503 	mdelay(1);
504 
505 	sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL);
506 	set_sbus_cfg1(qpti);
507 	sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
508 
509 	if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
510 		qpti->ultra = 1;
511 		sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
512 			    qpti->qregs + RISC_MTREG);
513 	} else {
514 		qpti->ultra = 0;
515 		sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
516 			    qpti->qregs + RISC_MTREG);
517 	}
518 
519 	sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
520 
521 	/* Pin lines are only stable while RISC is paused. */
522 	sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
523 	if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE)
524 		qpti->differential = 1;
525 	else
526 		qpti->differential = 0;
527 	sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
528 
529 	/* This shouldn't be necessary- we've reset things so we should be
530 	   running from the ROM now.. */
531 
532 	param[0] = MBOX_STOP_FIRMWARE;
533 	param[1] = param[2] = param[3] = param[4] = param[5] = 0;
534 	if (qlogicpti_mbox_command(qpti, param, 1)) {
535 		printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
536 		       qpti->qpti_id);
537 		spin_unlock_irqrestore(host->host_lock, flags);
538 		return 1;
539 	}
540 
541 	/* Load it up.. */
542 	for (i = 0; i < risc_code_length; i++) {
543 		param[0] = MBOX_WRITE_RAM_WORD;
544 		param[1] = risc_code_addr + i;
545 		param[2] = risc_code[i];
546 		if (qlogicpti_mbox_command(qpti, param, 1) ||
547 		    param[0] != MBOX_COMMAND_COMPLETE) {
548 			printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
549 			       qpti->qpti_id);
550 			spin_unlock_irqrestore(host->host_lock, flags);
551 			return 1;
552 		}
553 	}
554 
555 	/* Reset the ISP again. */
556 	sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
557 	mdelay(1);
558 
559 	qlogicpti_enable_irqs(qpti);
560 	sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
561 	sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
562 
563 	/* Ask ISP to verify the checksum of the new code. */
564 	param[0] = MBOX_VERIFY_CHECKSUM;
565 	param[1] = risc_code_addr;
566 	if (qlogicpti_mbox_command(qpti, param, 1) ||
567 	    (param[0] != MBOX_COMMAND_COMPLETE)) {
568 		printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
569 		       qpti->qpti_id);
570 		spin_unlock_irqrestore(host->host_lock, flags);
571 		return 1;
572 	}
573 
574 	/* Start using newly downloaded firmware. */
575 	param[0] = MBOX_EXEC_FIRMWARE;
576 	param[1] = risc_code_addr;
577 	qlogicpti_mbox_command(qpti, param, 1);
578 
579 	param[0] = MBOX_ABOUT_FIRMWARE;
580 	if (qlogicpti_mbox_command(qpti, param, 1) ||
581 	    (param[0] != MBOX_COMMAND_COMPLETE)) {
582 		printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
583 		       qpti->qpti_id);
584 		spin_unlock_irqrestore(host->host_lock, flags);
585 		return 1;
586 	}
587 
588 	/* Snag the major and minor revisions from the result. */
589 	qpti->fware_majrev = param[1];
590 	qpti->fware_minrev = param[2];
591 	qpti->fware_micrev = param[3];
592 
593 	/* Set the clock rate */
594 	param[0] = MBOX_SET_CLOCK_RATE;
595 	param[1] = qpti->clock;
596 	if (qlogicpti_mbox_command(qpti, param, 1) ||
597 	    (param[0] != MBOX_COMMAND_COMPLETE)) {
598 		printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
599 		       qpti->qpti_id);
600 		spin_unlock_irqrestore(host->host_lock, flags);
601 		return 1;
602 	}
603 
604 	if (qpti->is_pti != 0) {
605 		/* Load scsi initiator ID and interrupt level into sbus static ram. */
606 		param[0] = MBOX_WRITE_RAM_WORD;
607 		param[1] = 0xff80;
608 		param[2] = (unsigned short) qpti->scsi_id;
609 		qlogicpti_mbox_command(qpti, param, 1);
610 
611 		param[0] = MBOX_WRITE_RAM_WORD;
612 		param[1] = 0xff00;
613 		param[2] = (unsigned short) 3;
614 		qlogicpti_mbox_command(qpti, param, 1);
615 	}
616 
617 	spin_unlock_irqrestore(host->host_lock, flags);
618 	return 0;
619 }
620 
621 static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
622 {
623 	int curstat = sbus_readb(qpti->sreg);
624 
625 	curstat &= 0xf0;
626 	if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE))
627 		printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id);
628 	if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER))
629 		printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id);
630 	if (curstat != qpti->swsreg) {
631 		int error = 0;
632 		if (curstat & SREG_FUSE) {
633 			error++;
634 			printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id);
635 		}
636 		if (curstat & SREG_TPOWER) {
637 			error++;
638 			printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id);
639 		}
640 		if (qpti->differential &&
641 		    (curstat & SREG_DSENSE) != SREG_DSENSE) {
642 			error++;
643 			printk("qlogicpti%d: You have a single ended device on a "
644 			       "differential bus!  Please fix!\n", qpti->qpti_id);
645 		}
646 		qpti->swsreg = curstat;
647 		return error;
648 	}
649 	return 0;
650 }
651 
652 static irqreturn_t qpti_intr(int irq, void *dev_id, struct pt_regs *regs);
653 
654 static void __init qpti_chain_add(struct qlogicpti *qpti)
655 {
656 	spin_lock_irq(&qptichain_lock);
657 	if (qptichain != NULL) {
658 		struct qlogicpti *qlink = qptichain;
659 
660 		while(qlink->next)
661 			qlink = qlink->next;
662 		qlink->next = qpti;
663 	} else {
664 		qptichain = qpti;
665 	}
666 	qpti->next = NULL;
667 	spin_unlock_irq(&qptichain_lock);
668 }
669 
670 static void __init qpti_chain_del(struct qlogicpti *qpti)
671 {
672 	spin_lock_irq(&qptichain_lock);
673 	if (qptichain == qpti) {
674 		qptichain = qpti->next;
675 	} else {
676 		struct qlogicpti *qlink = qptichain;
677 		while(qlink->next != qpti)
678 			qlink = qlink->next;
679 		qlink->next = qpti->next;
680 	}
681 	qpti->next = NULL;
682 	spin_unlock_irq(&qptichain_lock);
683 }
684 
685 static int __init qpti_map_regs(struct qlogicpti *qpti)
686 {
687 	struct sbus_dev *sdev = qpti->sdev;
688 
689 	qpti->qregs = sbus_ioremap(&sdev->resource[0], 0,
690 				   sdev->reg_addrs[0].reg_size,
691 				   "PTI Qlogic/ISP");
692 	if (!qpti->qregs) {
693 		printk("PTI: Qlogic/ISP registers are unmappable\n");
694 		return -1;
695 	}
696 	if (qpti->is_pti) {
697 		qpti->sreg = sbus_ioremap(&sdev->resource[0], (16 * 4096),
698 					  sizeof(unsigned char),
699 					  "PTI Qlogic/ISP statreg");
700 		if (!qpti->sreg) {
701 			printk("PTI: Qlogic/ISP status register is unmappable\n");
702 			return -1;
703 		}
704 	}
705 	return 0;
706 }
707 
708 static int __init qpti_register_irq(struct qlogicpti *qpti)
709 {
710 	struct sbus_dev *sdev = qpti->sdev;
711 
712 	qpti->qhost->irq = qpti->irq = sdev->irqs[0];
713 
714 	/* We used to try various overly-clever things to
715 	 * reduce the interrupt processing overhead on
716 	 * sun4c/sun4m when multiple PTI's shared the
717 	 * same IRQ.  It was too complex and messy to
718 	 * sanely maintain.
719 	 */
720 	if (request_irq(qpti->irq, qpti_intr,
721 			IRQF_SHARED, "Qlogic/PTI", qpti))
722 		goto fail;
723 
724 	printk("qpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
725 
726 	return 0;
727 
728 fail:
729 	printk("qpti%d: Cannot acquire irq line\n", qpti->qpti_id);
730 	return -1;
731 }
732 
733 static void __init qpti_get_scsi_id(struct qlogicpti *qpti)
734 {
735 	qpti->scsi_id = prom_getintdefault(qpti->prom_node,
736 					   "initiator-id",
737 					   -1);
738 	if (qpti->scsi_id == -1)
739 		qpti->scsi_id = prom_getintdefault(qpti->prom_node,
740 						   "scsi-initiator-id",
741 						   -1);
742 	if (qpti->scsi_id == -1)
743 		qpti->scsi_id =
744 			prom_getintdefault(qpti->sdev->bus->prom_node,
745 					   "scsi-initiator-id", 7);
746 	qpti->qhost->this_id = qpti->scsi_id;
747 	qpti->qhost->max_sectors = 64;
748 
749 	printk("SCSI ID %d ", qpti->scsi_id);
750 }
751 
752 static void qpti_get_bursts(struct qlogicpti *qpti)
753 {
754 	struct sbus_dev *sdev = qpti->sdev;
755 	u8 bursts, bmask;
756 
757 	bursts = prom_getintdefault(qpti->prom_node, "burst-sizes", 0xff);
758 	bmask = prom_getintdefault(sdev->bus->prom_node,
759 				   "burst-sizes", 0xff);
760 	if (bmask != 0xff)
761 		bursts &= bmask;
762 	if (bursts == 0xff ||
763 	    (bursts & DMA_BURST16) == 0 ||
764 	    (bursts & DMA_BURST32) == 0)
765 		bursts = (DMA_BURST32 - 1);
766 
767 	qpti->bursts = bursts;
768 }
769 
770 static void qpti_get_clock(struct qlogicpti *qpti)
771 {
772 	unsigned int cfreq;
773 
774 	/* Check for what the clock input to this card is.
775 	 * Default to 40Mhz.
776 	 */
777 	cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000);
778 	qpti->clock = (cfreq + 500000)/1000000;
779 	if (qpti->clock == 0) /* bullshit */
780 		qpti->clock = 40;
781 }
782 
783 /* The request and response queues must each be aligned
784  * on a page boundary.
785  */
786 static int __init qpti_map_queues(struct qlogicpti *qpti)
787 {
788 	struct sbus_dev *sdev = qpti->sdev;
789 
790 #define QSIZE(entries)	(((entries) + 1) * QUEUE_ENTRY_LEN)
791 	qpti->res_cpu = sbus_alloc_consistent(sdev,
792 					      QSIZE(RES_QUEUE_LEN),
793 					      &qpti->res_dvma);
794 	if (qpti->res_cpu == NULL ||
795 	    qpti->res_dvma == 0) {
796 		printk("QPTI: Cannot map response queue.\n");
797 		return -1;
798 	}
799 
800 	qpti->req_cpu = sbus_alloc_consistent(sdev,
801 					      QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
802 					      &qpti->req_dvma);
803 	if (qpti->req_cpu == NULL ||
804 	    qpti->req_dvma == 0) {
805 		sbus_free_consistent(sdev, QSIZE(RES_QUEUE_LEN),
806 				     qpti->res_cpu, qpti->res_dvma);
807 		printk("QPTI: Cannot map request queue.\n");
808 		return -1;
809 	}
810 	memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN));
811 	memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN));
812 	return 0;
813 }
814 
815 const char *qlogicpti_info(struct Scsi_Host *host)
816 {
817 	static char buf[80];
818 	struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
819 
820 	sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p",
821 		qpti->qhost->irq, qpti->qregs);
822 	return buf;
823 }
824 
825 /* I am a certified frobtronicist. */
826 static inline void marker_frob(struct Command_Entry *cmd)
827 {
828 	struct Marker_Entry *marker = (struct Marker_Entry *) cmd;
829 
830 	memset(marker, 0, sizeof(struct Marker_Entry));
831 	marker->hdr.entry_cnt = 1;
832 	marker->hdr.entry_type = ENTRY_MARKER;
833 	marker->modifier = SYNC_ALL;
834 	marker->rsvd = 0;
835 }
836 
837 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
838 			    struct qlogicpti *qpti)
839 {
840 	memset(cmd, 0, sizeof(struct Command_Entry));
841 	cmd->hdr.entry_cnt = 1;
842 	cmd->hdr.entry_type = ENTRY_COMMAND;
843 	cmd->target_id = Cmnd->device->id;
844 	cmd->target_lun = Cmnd->device->lun;
845 	cmd->cdb_length = Cmnd->cmd_len;
846 	cmd->control_flags = 0;
847 	if (Cmnd->device->tagged_supported) {
848 		if (qpti->cmd_count[Cmnd->device->id] == 0)
849 			qpti->tag_ages[Cmnd->device->id] = jiffies;
850 		if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) {
851 			cmd->control_flags = CFLAG_ORDERED_TAG;
852 			qpti->tag_ages[Cmnd->device->id] = jiffies;
853 		} else
854 			cmd->control_flags = CFLAG_SIMPLE_TAG;
855 	}
856 	if ((Cmnd->cmnd[0] == WRITE_6) ||
857 	    (Cmnd->cmnd[0] == WRITE_10) ||
858 	    (Cmnd->cmnd[0] == WRITE_12))
859 		cmd->control_flags |= CFLAG_WRITE;
860 	else
861 		cmd->control_flags |= CFLAG_READ;
862 	cmd->time_out = 30;
863 	memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
864 }
865 
866 /* Do it to it baby. */
867 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
868 			   struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
869 {
870 	struct dataseg *ds;
871 	struct scatterlist *sg;
872 	int i, n;
873 
874 	if (Cmnd->use_sg) {
875 		int sg_count;
876 
877 		sg = (struct scatterlist *) Cmnd->buffer;
878 		sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
879 
880 		ds = cmd->dataseg;
881 		cmd->segment_cnt = sg_count;
882 
883 		/* Fill in first four sg entries: */
884 		n = sg_count;
885 		if (n > 4)
886 			n = 4;
887 		for (i = 0; i < n; i++, sg++) {
888 			ds[i].d_base = sg_dma_address(sg);
889 			ds[i].d_count = sg_dma_len(sg);
890 		}
891 		sg_count -= 4;
892 		while (sg_count > 0) {
893 			struct Continuation_Entry *cont;
894 
895 			++cmd->hdr.entry_cnt;
896 			cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr];
897 			in_ptr = NEXT_REQ_PTR(in_ptr);
898 			if (in_ptr == out_ptr)
899 				return -1;
900 
901 			cont->hdr.entry_type = ENTRY_CONTINUATION;
902 			cont->hdr.entry_cnt = 0;
903 			cont->hdr.sys_def_1 = 0;
904 			cont->hdr.flags = 0;
905 			cont->reserved = 0;
906 			ds = cont->dataseg;
907 			n = sg_count;
908 			if (n > 7)
909 				n = 7;
910 			for (i = 0; i < n; i++, sg++) {
911 				ds[i].d_base = sg_dma_address(sg);
912 				ds[i].d_count = sg_dma_len(sg);
913 			}
914 			sg_count -= n;
915 		}
916 	} else if (Cmnd->request_bufflen) {
917 		Cmnd->SCp.ptr = (char *)(unsigned long)
918 			sbus_map_single(qpti->sdev,
919 					Cmnd->request_buffer,
920 					Cmnd->request_bufflen,
921 					Cmnd->sc_data_direction);
922 
923 		cmd->dataseg[0].d_base = (u32) ((unsigned long)Cmnd->SCp.ptr);
924 		cmd->dataseg[0].d_count = Cmnd->request_bufflen;
925 		cmd->segment_cnt = 1;
926 	} else {
927 		cmd->dataseg[0].d_base = 0;
928 		cmd->dataseg[0].d_count = 0;
929 		cmd->segment_cnt = 1; /* Shouldn't this be 0? */
930 	}
931 
932 	/* Committed, record Scsi_Cmd so we can find it later. */
933 	cmd->handle = in_ptr;
934 	qpti->cmd_slots[in_ptr] = Cmnd;
935 
936 	qpti->cmd_count[Cmnd->device->id]++;
937 	sbus_writew(in_ptr, qpti->qregs + MBOX4);
938 	qpti->req_in_ptr = in_ptr;
939 
940 	return in_ptr;
941 }
942 
943 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr)
944 {
945 	/* Temporary workaround until bug is found and fixed (one bug has been found
946 	   already, but fixing it makes things even worse) -jj */
947 	int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
948 	host->can_queue = host->host_busy + num_free;
949 	host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
950 }
951 
952 static unsigned int scsi_rbuf_get(struct scsi_cmnd *cmd, unsigned char **buf_out)
953 {
954 	unsigned char *buf;
955 	unsigned int buflen;
956 
957 	if (cmd->use_sg) {
958 		struct scatterlist *sg;
959 
960 		sg = (struct scatterlist *) cmd->request_buffer;
961 		buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
962 		buflen = sg->length;
963 	} else {
964 		buf = cmd->request_buffer;
965 		buflen = cmd->request_bufflen;
966 	}
967 
968 	*buf_out = buf;
969 	return buflen;
970 }
971 
972 static void scsi_rbuf_put(struct scsi_cmnd *cmd, unsigned char *buf)
973 {
974 	if (cmd->use_sg) {
975 		struct scatterlist *sg;
976 
977 		sg = (struct scatterlist *) cmd->request_buffer;
978 		kunmap_atomic(buf - sg->offset, KM_IRQ0);
979 	}
980 }
981 
982 /*
983  * Until we scan the entire bus with inquiries, go throught this fella...
984  */
985 static void ourdone(struct scsi_cmnd *Cmnd)
986 {
987 	struct qlogicpti *qpti = (struct qlogicpti *) Cmnd->device->host->hostdata;
988 	int tgt = Cmnd->device->id;
989 	void (*done) (struct scsi_cmnd *);
990 
991 	/* This grot added by DaveM, blame him for ugliness.
992 	 * The issue is that in the 2.3.x driver we use the
993 	 * host_scribble portion of the scsi command as a
994 	 * completion linked list at interrupt service time,
995 	 * so we have to store the done function pointer elsewhere.
996 	 */
997 	done = (void (*)(struct scsi_cmnd *))
998 		(((unsigned long) Cmnd->SCp.Message)
999 #ifdef __sparc_v9__
1000 		 | ((unsigned long) Cmnd->SCp.Status << 32UL)
1001 #endif
1002 		 );
1003 
1004 	if ((qpti->sbits & (1 << tgt)) == 0) {
1005 		int ok = host_byte(Cmnd->result) == DID_OK;
1006 		if (Cmnd->cmnd[0] == 0x12 && ok) {
1007 			unsigned char *iqd;
1008 			unsigned int iqd_len;
1009 
1010 			iqd_len = scsi_rbuf_get(Cmnd, &iqd);
1011 
1012 			/* tags handled in midlayer */
1013 			/* enable sync mode? */
1014 			if (iqd[7] & 0x10) {
1015 				qpti->dev_param[tgt].device_flags |= 0x10;
1016 			} else {
1017 				qpti->dev_param[tgt].synchronous_offset = 0;
1018 				qpti->dev_param[tgt].synchronous_period = 0;
1019 			}
1020 			/* are we wide capable? */
1021 			if (iqd[7] & 0x20) {
1022 				qpti->dev_param[tgt].device_flags |= 0x20;
1023 			}
1024 
1025 			scsi_rbuf_put(Cmnd, iqd);
1026 
1027 			qpti->sbits |= (1 << tgt);
1028 		} else if (!ok) {
1029 			qpti->sbits |= (1 << tgt);
1030 		}
1031 	}
1032 	done(Cmnd);
1033 }
1034 
1035 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *));
1036 
1037 static int qlogicpti_queuecommand_slow(struct scsi_cmnd *Cmnd,
1038 				       void (*done)(struct scsi_cmnd *))
1039 {
1040 	struct qlogicpti *qpti = (struct qlogicpti *) Cmnd->device->host->hostdata;
1041 
1042 	/*
1043 	 * done checking this host adapter?
1044 	 * If not, then rewrite the command
1045 	 * to finish through ourdone so we
1046 	 * can peek at Inquiry data results.
1047 	 */
1048 	if (qpti->sbits && qpti->sbits != 0xffff) {
1049 		/* See above about in ourdone this ugliness... */
1050 		Cmnd->SCp.Message = ((unsigned long)done) & 0xffffffff;
1051 #ifdef CONFIG_SPARC64
1052 		Cmnd->SCp.Status = ((unsigned long)done >> 32UL) & 0xffffffff;
1053 #endif
1054 		return qlogicpti_queuecommand(Cmnd, ourdone);
1055 	}
1056 
1057 	/*
1058 	 * We've peeked at all targets for this bus- time
1059 	 * to set parameters for devices for real now.
1060 	 */
1061 	if (qpti->sbits == 0xffff) {
1062 		int i;
1063 		for(i = 0; i < MAX_TARGETS; i++) {
1064 			u_short param[6];
1065 			param[0] = MBOX_SET_TARGET_PARAMS;
1066 			param[1] = (i << 8);
1067 			param[2] = (qpti->dev_param[i].device_flags << 8);
1068 			if (qpti->dev_param[i].device_flags & 0x10) {
1069 				param[3] = (qpti->dev_param[i].synchronous_offset << 8) |
1070 					qpti->dev_param[i].synchronous_period;
1071 			} else {
1072 				param[3] = 0;
1073 			}
1074 			(void) qlogicpti_mbox_command(qpti, param, 0);
1075 		}
1076 		/*
1077 		 * set to zero so any traverse through ourdone
1078 		 * doesn't start the whole process again,
1079 		 */
1080 		qpti->sbits = 0;
1081 	}
1082 
1083 	/* check to see if we're done with all adapters... */
1084 	for (qpti = qptichain; qpti != NULL; qpti = qpti->next) {
1085 		if (qpti->sbits) {
1086 			break;
1087 		}
1088 	}
1089 
1090 	/*
1091 	 * if we hit the end of the chain w/o finding adapters still
1092 	 * capability-configuring, then we're done with all adapters
1093 	 * and can rock on..
1094 	 */
1095 	if (qpti == NULL)
1096 		Cmnd->device->host->hostt->queuecommand = qlogicpti_queuecommand;
1097 
1098 	return qlogicpti_queuecommand(Cmnd, done);
1099 }
1100 
1101 /*
1102  * The middle SCSI layer ensures that queuecommand never gets invoked
1103  * concurrently with itself or the interrupt handler (though the
1104  * interrupt handler may call this routine as part of
1105  * request-completion handling).
1106  *
1107  * "This code must fly." -davem
1108  */
1109 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
1110 {
1111 	struct Scsi_Host *host = Cmnd->device->host;
1112 	struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1113 	struct Command_Entry *cmd;
1114 	u_int out_ptr;
1115 	int in_ptr;
1116 
1117 	Cmnd->scsi_done = done;
1118 
1119 	in_ptr = qpti->req_in_ptr;
1120 	cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1121 	out_ptr = sbus_readw(qpti->qregs + MBOX4);
1122 	in_ptr = NEXT_REQ_PTR(in_ptr);
1123 	if (in_ptr == out_ptr)
1124 		goto toss_command;
1125 
1126 	if (qpti->send_marker) {
1127 		marker_frob(cmd);
1128 		qpti->send_marker = 0;
1129 		if (NEXT_REQ_PTR(in_ptr) == out_ptr) {
1130 			sbus_writew(in_ptr, qpti->qregs + MBOX4);
1131 			qpti->req_in_ptr = in_ptr;
1132 			goto toss_command;
1133 		}
1134 		cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1135 		in_ptr = NEXT_REQ_PTR(in_ptr);
1136 	}
1137 	cmd_frob(cmd, Cmnd, qpti);
1138 	if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1)
1139 		goto toss_command;
1140 
1141 	update_can_queue(host, in_ptr, out_ptr);
1142 
1143 	return 0;
1144 
1145 toss_command:
1146 	printk(KERN_EMERG "qlogicpti%d: request queue overflow\n",
1147 	       qpti->qpti_id);
1148 
1149 	/* Unfortunately, unless you use the new EH code, which
1150 	 * we don't, the midlayer will ignore the return value,
1151 	 * which is insane.  We pick up the pieces like this.
1152 	 */
1153 	Cmnd->result = DID_BUS_BUSY;
1154 	done(Cmnd);
1155 	return 1;
1156 }
1157 
1158 static int qlogicpti_return_status(struct Status_Entry *sts, int id)
1159 {
1160 	int host_status = DID_ERROR;
1161 
1162 	switch (sts->completion_status) {
1163 	      case CS_COMPLETE:
1164 		host_status = DID_OK;
1165 		break;
1166 	      case CS_INCOMPLETE:
1167 		if (!(sts->state_flags & SF_GOT_BUS))
1168 			host_status = DID_NO_CONNECT;
1169 		else if (!(sts->state_flags & SF_GOT_TARGET))
1170 			host_status = DID_BAD_TARGET;
1171 		else if (!(sts->state_flags & SF_SENT_CDB))
1172 			host_status = DID_ERROR;
1173 		else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
1174 			host_status = DID_ERROR;
1175 		else if (!(sts->state_flags & SF_GOT_STATUS))
1176 			host_status = DID_ERROR;
1177 		else if (!(sts->state_flags & SF_GOT_SENSE))
1178 			host_status = DID_ERROR;
1179 		break;
1180 	      case CS_DMA_ERROR:
1181 	      case CS_TRANSPORT_ERROR:
1182 		host_status = DID_ERROR;
1183 		break;
1184 	      case CS_RESET_OCCURRED:
1185 	      case CS_BUS_RESET:
1186 		host_status = DID_RESET;
1187 		break;
1188 	      case CS_ABORTED:
1189 		host_status = DID_ABORT;
1190 		break;
1191 	      case CS_TIMEOUT:
1192 		host_status = DID_TIME_OUT;
1193 		break;
1194 	      case CS_DATA_OVERRUN:
1195 	      case CS_COMMAND_OVERRUN:
1196 	      case CS_STATUS_OVERRUN:
1197 	      case CS_BAD_MESSAGE:
1198 	      case CS_NO_MESSAGE_OUT:
1199 	      case CS_EXT_ID_FAILED:
1200 	      case CS_IDE_MSG_FAILED:
1201 	      case CS_ABORT_MSG_FAILED:
1202 	      case CS_NOP_MSG_FAILED:
1203 	      case CS_PARITY_ERROR_MSG_FAILED:
1204 	      case CS_DEVICE_RESET_MSG_FAILED:
1205 	      case CS_ID_MSG_FAILED:
1206 	      case CS_UNEXP_BUS_FREE:
1207 		host_status = DID_ERROR;
1208 		break;
1209 	      case CS_DATA_UNDERRUN:
1210 		host_status = DID_OK;
1211 		break;
1212 	      default:
1213 		printk(KERN_EMERG "qpti%d: unknown completion status 0x%04x\n",
1214 		       id, sts->completion_status);
1215 		host_status = DID_ERROR;
1216 		break;
1217 	}
1218 
1219 	return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
1220 }
1221 
1222 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1223 {
1224 	struct scsi_cmnd *Cmnd, *done_queue = NULL;
1225 	struct Status_Entry *sts;
1226 	u_int in_ptr, out_ptr;
1227 
1228 	if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
1229 		return NULL;
1230 
1231 	in_ptr = sbus_readw(qpti->qregs + MBOX5);
1232 	sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
1233 	if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
1234 		switch (sbus_readw(qpti->qregs + MBOX0)) {
1235 		case ASYNC_SCSI_BUS_RESET:
1236 		case EXECUTION_TIMEOUT_RESET:
1237 			qpti->send_marker = 1;
1238 			break;
1239 		case INVALID_COMMAND:
1240 		case HOST_INTERFACE_ERROR:
1241 		case COMMAND_ERROR:
1242 		case COMMAND_PARAM_ERROR:
1243 			break;
1244 		};
1245 		sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
1246 	}
1247 
1248 	/* This looks like a network driver! */
1249 	out_ptr = qpti->res_out_ptr;
1250 	while (out_ptr != in_ptr) {
1251 		u_int cmd_slot;
1252 
1253 		sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr];
1254 		out_ptr = NEXT_RES_PTR(out_ptr);
1255 
1256 		/* We store an index in the handle, not the pointer in
1257 		 * some form.  This avoids problems due to the fact
1258 		 * that the handle provided is only 32-bits. -DaveM
1259 		 */
1260 		cmd_slot = sts->handle;
1261 		Cmnd = qpti->cmd_slots[cmd_slot];
1262 		qpti->cmd_slots[cmd_slot] = NULL;
1263 
1264 		if (sts->completion_status == CS_RESET_OCCURRED ||
1265 		    sts->completion_status == CS_ABORTED ||
1266 		    (sts->status_flags & STF_BUS_RESET))
1267 			qpti->send_marker = 1;
1268 
1269 		if (sts->state_flags & SF_GOT_SENSE)
1270 			memcpy(Cmnd->sense_buffer, sts->req_sense_data,
1271 			       sizeof(Cmnd->sense_buffer));
1272 
1273 		if (sts->hdr.entry_type == ENTRY_STATUS)
1274 			Cmnd->result =
1275 			    qlogicpti_return_status(sts, qpti->qpti_id);
1276 		else
1277 			Cmnd->result = DID_ERROR << 16;
1278 
1279 		if (Cmnd->use_sg) {
1280 			sbus_unmap_sg(qpti->sdev,
1281 				      (struct scatterlist *)Cmnd->buffer,
1282 				      Cmnd->use_sg,
1283 				      Cmnd->sc_data_direction);
1284 		} else {
1285 			sbus_unmap_single(qpti->sdev,
1286 					  (__u32)((unsigned long)Cmnd->SCp.ptr),
1287 					  Cmnd->request_bufflen,
1288 					  Cmnd->sc_data_direction);
1289 		}
1290 		qpti->cmd_count[Cmnd->device->id]--;
1291 		sbus_writew(out_ptr, qpti->qregs + MBOX5);
1292 		Cmnd->host_scribble = (unsigned char *) done_queue;
1293 		done_queue = Cmnd;
1294 	}
1295 	qpti->res_out_ptr = out_ptr;
1296 
1297 	return done_queue;
1298 }
1299 
1300 static irqreturn_t qpti_intr(int irq, void *dev_id, struct pt_regs *regs)
1301 {
1302 	struct qlogicpti *qpti = dev_id;
1303 	unsigned long flags;
1304 	struct scsi_cmnd *dq;
1305 
1306 	spin_lock_irqsave(qpti->qhost->host_lock, flags);
1307 	dq = qlogicpti_intr_handler(qpti);
1308 
1309 	if (dq != NULL) {
1310 		do {
1311 			struct scsi_cmnd *next;
1312 
1313 			next = (struct scsi_cmnd *) dq->host_scribble;
1314 			dq->scsi_done(dq);
1315 			dq = next;
1316 		} while (dq != NULL);
1317 	}
1318 	spin_unlock_irqrestore(qpti->qhost->host_lock, flags);
1319 
1320 	return IRQ_HANDLED;
1321 }
1322 
1323 static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
1324 {
1325 	u_short param[6];
1326 	struct Scsi_Host *host = Cmnd->device->host;
1327 	struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1328 	int return_status = SUCCESS;
1329 	u32 cmd_cookie;
1330 	int i;
1331 
1332 	printk(KERN_WARNING "qlogicpti : Aborting cmd for tgt[%d] lun[%d]\n",
1333 	       (int)Cmnd->device->id, (int)Cmnd->device->lun);
1334 
1335 	qlogicpti_disable_irqs(qpti);
1336 
1337 	/* Find the 32-bit cookie we gave to the firmware for
1338 	 * this command.
1339 	 */
1340 	for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++)
1341 		if (qpti->cmd_slots[i] == Cmnd)
1342 			break;
1343 	cmd_cookie = i;
1344 
1345 	param[0] = MBOX_ABORT;
1346 	param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun;
1347 	param[2] = cmd_cookie >> 16;
1348 	param[3] = cmd_cookie & 0xffff;
1349 	if (qlogicpti_mbox_command(qpti, param, 0) ||
1350 	    (param[0] != MBOX_COMMAND_COMPLETE)) {
1351 		printk(KERN_EMERG "qlogicpti : scsi abort failure: %x\n", param[0]);
1352 		return_status = FAILED;
1353 	}
1354 
1355 	qlogicpti_enable_irqs(qpti);
1356 
1357 	return return_status;
1358 }
1359 
1360 static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
1361 {
1362 	u_short param[6];
1363 	struct Scsi_Host *host = Cmnd->device->host;
1364 	struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1365 	int return_status = SUCCESS;
1366 
1367 	printk(KERN_WARNING "qlogicpti : Resetting SCSI bus!\n");
1368 
1369 	qlogicpti_disable_irqs(qpti);
1370 
1371 	param[0] = MBOX_BUS_RESET;
1372 	param[1] = qpti->host_param.bus_reset_delay;
1373 	if (qlogicpti_mbox_command(qpti, param, 0) ||
1374 	   (param[0] != MBOX_COMMAND_COMPLETE)) {
1375 		printk(KERN_EMERG "qlogicisp : scsi bus reset failure: %x\n", param[0]);
1376 		return_status = FAILED;
1377 	}
1378 
1379 	qlogicpti_enable_irqs(qpti);
1380 
1381 	return return_status;
1382 }
1383 
1384 static struct scsi_host_template qpti_template = {
1385 	.module			= THIS_MODULE,
1386 	.name			= "qlogicpti",
1387 	.info			= qlogicpti_info,
1388 	.queuecommand		= qlogicpti_queuecommand_slow,
1389 	.eh_abort_handler	= qlogicpti_abort,
1390 	.eh_bus_reset_handler	= qlogicpti_reset,
1391 	.can_queue		= QLOGICPTI_REQ_QUEUE_LEN,
1392 	.this_id		= 7,
1393 	.sg_tablesize		= QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1394 	.cmd_per_lun		= 1,
1395 	.use_clustering		= ENABLE_CLUSTERING,
1396 };
1397 
1398 static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_device_id *match)
1399 {
1400 	static int nqptis;
1401 	struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1402 	struct device_node *dp = dev->node;
1403 	struct scsi_host_template *tpnt = match->data;
1404 	struct Scsi_Host *host;
1405 	struct qlogicpti *qpti;
1406 	char *fcode;
1407 
1408 	/* Sometimes Antares cards come up not completely
1409 	 * setup, and we get a report of a zero IRQ.
1410 	 */
1411 	if (sdev->irqs[0] == 0)
1412 		return -ENODEV;
1413 
1414 	host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
1415 	if (!host)
1416 		return -ENOMEM;
1417 
1418 	qpti = (struct qlogicpti *) host->hostdata;
1419 
1420 	host->max_id = MAX_TARGETS;
1421 	qpti->qhost = host;
1422 	qpti->sdev = sdev;
1423 	qpti->qpti_id = nqptis;
1424 	qpti->prom_node = sdev->prom_node;
1425 	strcpy(qpti->prom_name, sdev->ofdev.node->name);
1426 	qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
1427 
1428 	if (qpti_map_regs(qpti) < 0)
1429 		goto fail_unlink;
1430 
1431 	if (qpti_register_irq(qpti) < 0)
1432 		goto fail_unmap_regs;
1433 
1434 	qpti_get_scsi_id(qpti);
1435 	qpti_get_bursts(qpti);
1436 	qpti_get_clock(qpti);
1437 
1438 	/* Clear out scsi_cmnd array. */
1439 	memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
1440 
1441 	if (qpti_map_queues(qpti) < 0)
1442 		goto fail_free_irq;
1443 
1444 	/* Load the firmware. */
1445 	if (qlogicpti_load_firmware(qpti))
1446 		goto fail_unmap_queues;
1447 	if (qpti->is_pti) {
1448 		/* Check the PTI status reg. */
1449 		if (qlogicpti_verify_tmon(qpti))
1450 			goto fail_unmap_queues;
1451 	}
1452 
1453 	/* Reset the ISP and init res/req queues. */
1454 	if (qlogicpti_reset_hardware(host))
1455 		goto fail_unmap_queues;
1456 
1457 	if (scsi_add_host(host, &dev->dev))
1458 		goto fail_unmap_queues;
1459 
1460 	printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
1461 	       qpti->fware_minrev, qpti->fware_micrev);
1462 
1463 	fcode = of_get_property(dp, "isp-fcode", NULL);
1464 	if (fcode && fcode[0])
1465 		printk("(Firmware %s)", fcode);
1466 	if (of_find_property(dp, "differential", NULL) != NULL)
1467 		qpti->differential = 1;
1468 
1469 	printk (" [%s Wide, using %s interface]\n",
1470 		(qpti->ultra ? "Ultra" : "Fast"),
1471 		(qpti->differential ? "differential" : "single ended"));
1472 
1473 	dev_set_drvdata(&sdev->ofdev.dev, qpti);
1474 
1475 	qpti_chain_add(qpti);
1476 
1477 	scsi_scan_host(host);
1478 	nqptis++;
1479 
1480 	return 0;
1481 
1482 fail_unmap_queues:
1483 #define QSIZE(entries)	(((entries) + 1) * QUEUE_ENTRY_LEN)
1484 	sbus_free_consistent(qpti->sdev,
1485 			     QSIZE(RES_QUEUE_LEN),
1486 			     qpti->res_cpu, qpti->res_dvma);
1487 	sbus_free_consistent(qpti->sdev,
1488 			     QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1489 			     qpti->req_cpu, qpti->req_dvma);
1490 #undef QSIZE
1491 
1492 fail_unmap_regs:
1493 	sbus_iounmap(qpti->qregs,
1494 		     qpti->sdev->reg_addrs[0].reg_size);
1495 	if (qpti->is_pti)
1496 		sbus_iounmap(qpti->sreg, sizeof(unsigned char));
1497 
1498 fail_free_irq:
1499 	free_irq(qpti->irq, qpti);
1500 
1501 fail_unlink:
1502 	scsi_host_put(host);
1503 
1504 	return -ENODEV;
1505 }
1506 
1507 static int __devexit qpti_sbus_remove(struct of_device *dev)
1508 {
1509 	struct qlogicpti *qpti = dev_get_drvdata(&dev->dev);
1510 
1511 	qpti_chain_del(qpti);
1512 
1513 	scsi_remove_host(qpti->qhost);
1514 
1515 	/* Shut up the card. */
1516 	sbus_writew(0, qpti->qregs + SBUS_CTRL);
1517 
1518 	/* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
1519 	free_irq(qpti->irq, qpti);
1520 
1521 #define QSIZE(entries)	(((entries) + 1) * QUEUE_ENTRY_LEN)
1522 	sbus_free_consistent(qpti->sdev,
1523 			     QSIZE(RES_QUEUE_LEN),
1524 			     qpti->res_cpu, qpti->res_dvma);
1525 	sbus_free_consistent(qpti->sdev,
1526 			     QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1527 			     qpti->req_cpu, qpti->req_dvma);
1528 #undef QSIZE
1529 
1530 	sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size);
1531 	if (qpti->is_pti)
1532 		sbus_iounmap(qpti->sreg, sizeof(unsigned char));
1533 
1534 	scsi_host_put(qpti->qhost);
1535 
1536 	return 0;
1537 }
1538 
1539 static struct of_device_id qpti_match[] = {
1540 	{
1541 		.name = "ptisp",
1542 		.data = &qpti_template,
1543 	},
1544 	{
1545 		.name = "PTI,ptisp",
1546 		.data = &qpti_template,
1547 	},
1548 	{
1549 		.name = "QLGC,isp",
1550 		.data = &qpti_template,
1551 	},
1552 	{
1553 		.name = "SUNW,isp",
1554 		.data = &qpti_template,
1555 	},
1556 	{},
1557 };
1558 MODULE_DEVICE_TABLE(of, qpti_match);
1559 
1560 static struct of_platform_driver qpti_sbus_driver = {
1561 	.name		= "qpti",
1562 	.match_table	= qpti_match,
1563 	.probe		= qpti_sbus_probe,
1564 	.remove		= __devexit_p(qpti_sbus_remove),
1565 };
1566 
1567 static int __init qpti_init(void)
1568 {
1569 	return of_register_driver(&qpti_sbus_driver, &sbus_bus_type);
1570 }
1571 
1572 static void __exit qpti_exit(void)
1573 {
1574 	of_unregister_driver(&qpti_sbus_driver);
1575 }
1576 
1577 MODULE_DESCRIPTION("QlogicISP SBUS driver");
1578 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1579 MODULE_LICENSE("GPL");
1580 MODULE_VERSION("2.0");
1581 
1582 module_init(qpti_init);
1583 module_exit(qpti_exit);
1584