xref: /linux/drivers/scsi/53c700.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /* -*- mode: c; c-basic-offset: 8 -*- */
2 
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
4  *
5  * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 **  This program is free software; you can redistribute it and/or modify
9 **  it under the terms of the GNU General Public License as published by
10 **  the Free Software Foundation; either version 2 of the License, or
11 **  (at your option) any later version.
12 **
13 **  This program is distributed in the hope that it will be useful,
14 **  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 **  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 **  GNU General Public License for more details.
17 **
18 **  You should have received a copy of the GNU General Public License
19 **  along with this program; if not, write to the Free Software
20 **  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 **
22 **-----------------------------------------------------------------------------
23  */
24 
25 /* Notes:
26  *
27  * This driver is designed exclusively for these chips (virtually the
28  * earliest of the scripts engine chips).  They need their own drivers
29  * because they are missing so many of the scripts and snazzy register
30  * features of their elder brothers (the 710, 720 and 770).
31  *
32  * The 700 is the lowliest of the line, it can only do async SCSI.
33  * The 700-66 can at least do synchronous SCSI up to 10MHz.
34  *
35  * The 700 chip has no host bus interface logic of its own.  However,
36  * it is usually mapped to a location with well defined register
37  * offsets.  Therefore, if you can determine the base address and the
38  * irq your board incorporating this chip uses, you can probably use
39  * this driver to run it (although you'll probably have to write a
40  * minimal wrapper for the purpose---see the NCR_D700 driver for
41  * details about how to do this).
42  *
43  *
44  * TODO List:
45  *
46  * 1. Better statistics in the proc fs
47  *
48  * 2. Implement message queue (queues SCSI messages like commands) and make
49  *    the abort and device reset functions use them.
50  * */
51 
52 /* CHANGELOG
53  *
54  * Version 2.8
55  *
56  * Fixed bad bug affecting tag starvation processing (previously the
57  * driver would hang the system if too many tags starved.  Also fixed
58  * bad bug having to do with 10 byte command processing and REQUEST
59  * SENSE (the command would loop forever getting a transfer length
60  * mismatch in the CMD phase).
61  *
62  * Version 2.7
63  *
64  * Fixed scripts problem which caused certain devices (notably CDRWs)
65  * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
66  * __raw_readl/writel for parisc compatibility (Thomas
67  * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68  * for sense requests (Ryan Bradetich).
69  *
70  * Version 2.6
71  *
72  * Following test of the 64 bit parisc kernel by Richard Hirst,
73  * several problems have now been corrected.  Also adds support for
74  * consistent memory allocation.
75  *
76  * Version 2.5
77  *
78  * More Compatibility changes for 710 (now actually works).  Enhanced
79  * support for odd clock speeds which constrain SDTR negotiations.
80  * correct cacheline separation for scsi messages and status for
81  * incoherent architectures.  Use of the pci mapping functions on
82  * buffers to begin support for 64 bit drivers.
83  *
84  * Version 2.4
85  *
86  * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87  * special 53c710 instructions or registers are used).
88  *
89  * Version 2.3
90  *
91  * More endianness/cache coherency changes.
92  *
93  * Better bad device handling (handles devices lying about tag
94  * queueing support and devices which fail to provide sense data on
95  * contingent allegiance conditions)
96  *
97  * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98  * debugging this driver on the parisc architecture and suggesting
99  * many improvements and bug fixes.
100  *
101  * Thanks also go to Linuxcare Inc. for providing several PARISC
102  * machines for me to debug the driver on.
103  *
104  * Version 2.2
105  *
106  * Made the driver mem or io mapped; added endian invariance; added
107  * dma cache flushing operations for architectures which need it;
108  * added support for more varied clocking speeds.
109  *
110  * Version 2.1
111  *
112  * Initial modularisation from the D700.  See NCR_D700.c for the rest of
113  * the changelog.
114  * */
115 #define NCR_700_VERSION "2.8"
116 
117 #include <linux/config.h>
118 #include <linux/kernel.h>
119 #include <linux/types.h>
120 #include <linux/string.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/sched.h>
126 #include <linux/init.h>
127 #include <linux/proc_fs.h>
128 #include <linux/blkdev.h>
129 #include <linux/module.h>
130 #include <linux/interrupt.h>
131 #include <linux/device.h>
132 #include <asm/dma.h>
133 #include <asm/system.h>
134 #include <asm/io.h>
135 #include <asm/pgtable.h>
136 #include <asm/byteorder.h>
137 
138 #include <scsi/scsi.h>
139 #include <scsi/scsi_cmnd.h>
140 #include <scsi/scsi_dbg.h>
141 #include <scsi/scsi_eh.h>
142 #include <scsi/scsi_host.h>
143 #include <scsi/scsi_tcq.h>
144 #include <scsi/scsi_transport.h>
145 #include <scsi/scsi_transport_spi.h>
146 
147 #include "53c700.h"
148 
149 /* NOTE: For 64 bit drivers there are points in the code where we use
150  * a non dereferenceable pointer to point to a structure in dma-able
151  * memory (which is 32 bits) so that we can use all of the structure
152  * operations but take the address at the end.  This macro allows us
153  * to truncate the 64 bit pointer down to 32 bits without the compiler
154  * complaining */
155 #define to32bit(x)	((__u32)((unsigned long)(x)))
156 
157 #ifdef NCR_700_DEBUG
158 #define STATIC
159 #else
160 #define STATIC static
161 #endif
162 
163 MODULE_AUTHOR("James Bottomley");
164 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
165 MODULE_LICENSE("GPL");
166 
167 /* This is the script */
168 #include "53c700_d.h"
169 
170 
171 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
172 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
173 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
174 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
175 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
176 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
177 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
179 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
180 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
181 
182 STATIC struct device_attribute *NCR_700_dev_attrs[];
183 
184 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
185 
186 static char *NCR_700_phase[] = {
187 	"",
188 	"after selection",
189 	"before command phase",
190 	"after command phase",
191 	"after status phase",
192 	"after data in phase",
193 	"after data out phase",
194 	"during data phase",
195 };
196 
197 static char *NCR_700_condition[] = {
198 	"",
199 	"NOT MSG_OUT",
200 	"UNEXPECTED PHASE",
201 	"NOT MSG_IN",
202 	"UNEXPECTED MSG",
203 	"MSG_IN",
204 	"SDTR_MSG RECEIVED",
205 	"REJECT_MSG RECEIVED",
206 	"DISCONNECT_MSG RECEIVED",
207 	"MSG_OUT",
208 	"DATA_IN",
209 
210 };
211 
212 static char *NCR_700_fatal_messages[] = {
213 	"unexpected message after reselection",
214 	"still MSG_OUT after message injection",
215 	"not MSG_IN after selection",
216 	"Illegal message length received",
217 };
218 
219 static char *NCR_700_SBCL_bits[] = {
220 	"IO ",
221 	"CD ",
222 	"MSG ",
223 	"ATN ",
224 	"SEL ",
225 	"BSY ",
226 	"ACK ",
227 	"REQ ",
228 };
229 
230 static char *NCR_700_SBCL_to_phase[] = {
231 	"DATA_OUT",
232 	"DATA_IN",
233 	"CMD_OUT",
234 	"STATE",
235 	"ILLEGAL PHASE",
236 	"ILLEGAL PHASE",
237 	"MSG OUT",
238 	"MSG IN",
239 };
240 
241 /* This translates the SDTR message offset and period to a value
242  * which can be loaded into the SXFER_REG.
243  *
244  * NOTE: According to SCSI-2, the true transfer period (in ns) is
245  *       actually four times this period value */
246 static inline __u8
247 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
248 			       __u8 offset, __u8 period)
249 {
250 	int XFERP;
251 
252 	__u8 min_xferp = (hostdata->chip710
253 			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
254 	__u8 max_offset = (hostdata->chip710
255 			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
256 
257 	if(offset == 0)
258 		return 0;
259 
260 	if(period < hostdata->min_period) {
261 		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
262 		period = hostdata->min_period;
263 	}
264 	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
265 	if(offset > max_offset) {
266 		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
267 		       offset, max_offset);
268 		offset = max_offset;
269 	}
270 	if(XFERP < min_xferp) {
271 		printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
272 		       XFERP,  min_xferp);
273 		XFERP =  min_xferp;
274 	}
275 	return (offset & 0x0f) | (XFERP & 0x07)<<4;
276 }
277 
278 static inline __u8
279 NCR_700_get_SXFER(struct scsi_device *SDp)
280 {
281 	struct NCR_700_Host_Parameters *hostdata =
282 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
283 
284 	return NCR_700_offset_period_to_sxfer(hostdata,
285 					      spi_offset(SDp->sdev_target),
286 					      spi_period(SDp->sdev_target));
287 }
288 
289 struct Scsi_Host *
290 NCR_700_detect(struct scsi_host_template *tpnt,
291 	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
292 {
293 	dma_addr_t pScript, pSlots;
294 	__u8 *memory;
295 	__u32 *script;
296 	struct Scsi_Host *host;
297 	static int banner = 0;
298 	int j;
299 
300 	if(tpnt->sdev_attrs == NULL)
301 		tpnt->sdev_attrs = NCR_700_dev_attrs;
302 
303 	memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
304 				       &pScript, GFP_KERNEL);
305 	if(memory == NULL) {
306 		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
307 		return NULL;
308 	}
309 
310 	script = (__u32 *)memory;
311 	hostdata->msgin = memory + MSGIN_OFFSET;
312 	hostdata->msgout = memory + MSGOUT_OFFSET;
313 	hostdata->status = memory + STATUS_OFFSET;
314 	/* all of these offsets are L1_CACHE_BYTES separated.  It is fatal
315 	 * if this isn't sufficient separation to avoid dma flushing issues */
316 	BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
317 	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
318 	hostdata->dev = dev;
319 
320 	pSlots = pScript + SLOTS_OFFSET;
321 
322 	/* Fill in the missing routines from the host template */
323 	tpnt->queuecommand = NCR_700_queuecommand;
324 	tpnt->eh_abort_handler = NCR_700_abort;
325 	tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
326 	tpnt->eh_host_reset_handler = NCR_700_host_reset;
327 	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
328 	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
329 	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
330 	tpnt->use_clustering = ENABLE_CLUSTERING;
331 	tpnt->slave_configure = NCR_700_slave_configure;
332 	tpnt->slave_destroy = NCR_700_slave_destroy;
333 	tpnt->change_queue_depth = NCR_700_change_queue_depth;
334 	tpnt->change_queue_type = NCR_700_change_queue_type;
335 
336 	if(tpnt->name == NULL)
337 		tpnt->name = "53c700";
338 	if(tpnt->proc_name == NULL)
339 		tpnt->proc_name = "53c700";
340 
341 
342 	host = scsi_host_alloc(tpnt, 4);
343 	if (!host)
344 		return NULL;
345 	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
346 	       * NCR_700_COMMAND_SLOTS_PER_HOST);
347 	for(j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
348 		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
349 					  - (unsigned long)&hostdata->slots[0].SG[0]);
350 		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
351 		if(j == 0)
352 			hostdata->free_list = &hostdata->slots[j];
353 		else
354 			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
355 		hostdata->slots[j].state = NCR_700_SLOT_FREE;
356 	}
357 
358 	for(j = 0; j < sizeof(SCRIPT)/sizeof(SCRIPT[0]); j++) {
359 		script[j] = bS_to_host(SCRIPT[j]);
360 	}
361 
362 	/* adjust all labels to be bus physical */
363 	for(j = 0; j < PATCHES; j++) {
364 		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
365 	}
366 	/* now patch up fixed addresses. */
367 	script_patch_32(script, MessageLocation,
368 			pScript + MSGOUT_OFFSET);
369 	script_patch_32(script, StatusAddress,
370 			pScript + STATUS_OFFSET);
371 	script_patch_32(script, ReceiveMsgAddress,
372 			pScript + MSGIN_OFFSET);
373 
374 	hostdata->script = script;
375 	hostdata->pScript = pScript;
376 	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
377 	hostdata->state = NCR_700_HOST_FREE;
378 	hostdata->cmd = NULL;
379 	host->max_id = 7;
380 	host->max_lun = NCR_700_MAX_LUNS;
381 	BUG_ON(NCR_700_transport_template == NULL);
382 	host->transportt = NCR_700_transport_template;
383 	host->unique_id = (unsigned long)hostdata->base;
384 	hostdata->eh_complete = NULL;
385 	host->hostdata[0] = (unsigned long)hostdata;
386 	/* kick the chip */
387 	NCR_700_writeb(0xff, host, CTEST9_REG);
388 	if(hostdata->chip710)
389 		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
390 	else
391 		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
392 	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
393 	if(banner == 0) {
394 		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
395 		banner = 1;
396 	}
397 	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
398 	       hostdata->chip710 ? "53c710" :
399 	       (hostdata->fast ? "53c700-66" : "53c700"),
400 	       hostdata->rev, hostdata->differential ?
401 	       "(Differential)" : "");
402 	/* reset the chip */
403 	NCR_700_chip_reset(host);
404 
405 	if (scsi_add_host(host, dev)) {
406 		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
407 		scsi_host_put(host);
408 		return NULL;
409 	}
410 
411 	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
412 		SPI_SIGNAL_SE;
413 
414 	return host;
415 }
416 
417 int
418 NCR_700_release(struct Scsi_Host *host)
419 {
420 	struct NCR_700_Host_Parameters *hostdata =
421 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
422 
423 	dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
424 			       hostdata->script, hostdata->pScript);
425 	return 1;
426 }
427 
428 static inline __u8
429 NCR_700_identify(int can_disconnect, __u8 lun)
430 {
431 	return IDENTIFY_BASE |
432 		((can_disconnect) ? 0x40 : 0) |
433 		(lun & NCR_700_LUN_MASK);
434 }
435 
436 /*
437  * Function : static int data_residual (Scsi_Host *host)
438  *
439  * Purpose : return residual data count of what's in the chip.  If you
440  * really want to know what this function is doing, it's almost a
441  * direct transcription of the algorithm described in the 53c710
442  * guide, except that the DBC and DFIFO registers are only 6 bits
443  * wide on a 53c700.
444  *
445  * Inputs : host - SCSI host */
446 static inline int
447 NCR_700_data_residual (struct Scsi_Host *host) {
448 	struct NCR_700_Host_Parameters *hostdata =
449 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
450 	int count, synchronous = 0;
451 	unsigned int ddir;
452 
453 	if(hostdata->chip710) {
454 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
455 			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
456 	} else {
457 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
458 			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
459 	}
460 
461 	if(hostdata->fast)
462 		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
463 
464 	/* get the data direction */
465 	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
466 
467 	if (ddir) {
468 		/* Receive */
469 		if (synchronous)
470 			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
471 		else
472 			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
473 				++count;
474 	} else {
475 		/* Send */
476 		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
477 		if (sstat & SODL_REG_FULL)
478 			++count;
479 		if (synchronous && (sstat & SODR_REG_FULL))
480 			++count;
481 	}
482 #ifdef NCR_700_DEBUG
483 	if(count)
484 		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
485 #endif
486 	return count;
487 }
488 
489 /* print out the SCSI wires and corresponding phase from the SBCL register
490  * in the chip */
491 static inline char *
492 sbcl_to_string(__u8 sbcl)
493 {
494 	int i;
495 	static char ret[256];
496 
497 	ret[0]='\0';
498 	for(i=0; i<8; i++) {
499 		if((1<<i) & sbcl)
500 			strcat(ret, NCR_700_SBCL_bits[i]);
501 	}
502 	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
503 	return ret;
504 }
505 
506 static inline __u8
507 bitmap_to_number(__u8 bitmap)
508 {
509 	__u8 i;
510 
511 	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
512 		;
513 	return i;
514 }
515 
516 /* Pull a slot off the free list */
517 STATIC struct NCR_700_command_slot *
518 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
519 {
520 	struct NCR_700_command_slot *slot = hostdata->free_list;
521 
522 	if(slot == NULL) {
523 		/* sanity check */
524 		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
525 			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
526 		return NULL;
527 	}
528 
529 	if(slot->state != NCR_700_SLOT_FREE)
530 		/* should panic! */
531 		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
532 
533 
534 	hostdata->free_list = slot->ITL_forw;
535 	slot->ITL_forw = NULL;
536 
537 
538 	/* NOTE: set the state to busy here, not queued, since this
539 	 * indicates the slot is in use and cannot be run by the IRQ
540 	 * finish routine.  If we cannot queue the command when it
541 	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
542 	slot->state = NCR_700_SLOT_BUSY;
543 	hostdata->command_slot_count++;
544 
545 	return slot;
546 }
547 
548 STATIC void
549 free_slot(struct NCR_700_command_slot *slot,
550 	  struct NCR_700_Host_Parameters *hostdata)
551 {
552 	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
553 		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
554 	}
555 	if(slot->state == NCR_700_SLOT_FREE) {
556 		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
557 	}
558 
559 	slot->resume_offset = 0;
560 	slot->cmnd = NULL;
561 	slot->state = NCR_700_SLOT_FREE;
562 	slot->ITL_forw = hostdata->free_list;
563 	hostdata->free_list = slot;
564 	hostdata->command_slot_count--;
565 }
566 
567 
568 /* This routine really does very little.  The command is indexed on
569    the ITL and (if tagged) the ITLQ lists in _queuecommand */
570 STATIC void
571 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
572 		     struct scsi_cmnd *SCp, __u32 dsp)
573 {
574 	/* Its just possible that this gets executed twice */
575 	if(SCp != NULL) {
576 		struct NCR_700_command_slot *slot =
577 			(struct NCR_700_command_slot *)SCp->host_scribble;
578 
579 		slot->resume_offset = dsp;
580 	}
581 	hostdata->state = NCR_700_HOST_FREE;
582 	hostdata->cmd = NULL;
583 }
584 
585 STATIC inline void
586 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
587 	      struct NCR_700_command_slot *slot)
588 {
589 	if(SCp->sc_data_direction != DMA_NONE &&
590 	   SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
591 		if(SCp->use_sg) {
592 			dma_unmap_sg(hostdata->dev, SCp->buffer,
593 				     SCp->use_sg, SCp->sc_data_direction);
594 		} else {
595 			dma_unmap_single(hostdata->dev, slot->dma_handle,
596 					 SCp->request_bufflen,
597 					 SCp->sc_data_direction);
598 		}
599 	}
600 }
601 
602 STATIC inline void
603 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
604 	       struct scsi_cmnd *SCp, int result)
605 {
606 	hostdata->state = NCR_700_HOST_FREE;
607 	hostdata->cmd = NULL;
608 
609 	if(SCp != NULL) {
610 		struct NCR_700_command_slot *slot =
611 			(struct NCR_700_command_slot *)SCp->host_scribble;
612 
613 		NCR_700_unmap(hostdata, SCp, slot);
614 		dma_unmap_single(hostdata->dev, slot->pCmd,
615 				 sizeof(SCp->cmnd), DMA_TO_DEVICE);
616 		if(SCp->cmnd[0] == REQUEST_SENSE && SCp->cmnd[6] == NCR_700_INTERNAL_SENSE_MAGIC) {
617 #ifdef NCR_700_DEBUG
618 			printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
619 			       SCp, SCp->cmnd[7], result);
620 			scsi_print_sense("53c700", SCp);
621 
622 #endif
623 			/* restore the old result if the request sense was
624 			 * successful */
625 			if(result == 0)
626 				result = SCp->cmnd[7];
627 			/* now restore the original command */
628 			memcpy((void *) SCp->cmnd, (void *) SCp->data_cmnd,
629 			       sizeof(SCp->data_cmnd));
630 			SCp->request_buffer = SCp->buffer;
631 			SCp->request_bufflen = SCp->bufflen;
632 			SCp->use_sg = SCp->old_use_sg;
633 			SCp->cmd_len = SCp->old_cmd_len;
634 			SCp->sc_data_direction = SCp->sc_old_data_direction;
635 			SCp->underflow = SCp->old_underflow;
636 
637 		}
638 		free_slot(slot, hostdata);
639 #ifdef NCR_700_DEBUG
640 		if(NCR_700_get_depth(SCp->device) == 0 ||
641 		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
642 			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
643 			       NCR_700_get_depth(SCp->device));
644 #endif /* NCR_700_DEBUG */
645 		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
646 
647 		SCp->host_scribble = NULL;
648 		SCp->result = result;
649 		SCp->scsi_done(SCp);
650 	} else {
651 		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
652 	}
653 }
654 
655 
656 STATIC void
657 NCR_700_internal_bus_reset(struct Scsi_Host *host)
658 {
659 	/* Bus reset */
660 	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
661 	udelay(50);
662 	NCR_700_writeb(0, host, SCNTL1_REG);
663 
664 }
665 
666 STATIC void
667 NCR_700_chip_setup(struct Scsi_Host *host)
668 {
669 	struct NCR_700_Host_Parameters *hostdata =
670 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
671 	__u32 dcntl_extra = 0;
672 	__u8 min_period;
673 	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
674 
675 	if(hostdata->chip710) {
676 		__u8 burst_disable = hostdata->burst_disable
677 			? BURST_DISABLE : 0;
678 		dcntl_extra = COMPAT_700_MODE;
679 
680 		NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
681 		NCR_700_writeb(BURST_LENGTH_8  | hostdata->dmode_extra,
682 			       host, DMODE_710_REG);
683 		NCR_700_writeb(burst_disable | (hostdata->differential ?
684 						DIFF : 0), host, CTEST7_REG);
685 		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
686 		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
687 			       | AUTO_ATN, host, SCNTL0_REG);
688 	} else {
689 		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
690 			       host, DMODE_700_REG);
691 		NCR_700_writeb(hostdata->differential ?
692 			       DIFF : 0, host, CTEST7_REG);
693 		if(hostdata->fast) {
694 			/* this is for 700-66, does nothing on 700 */
695 			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
696 				       | GENERATE_RECEIVE_PARITY, host,
697 				       CTEST8_REG);
698 		} else {
699 			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
700 				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
701 		}
702 	}
703 
704 	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
705 	NCR_700_writeb(0, host, SBCL_REG);
706 	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
707 
708 	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
709 	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
710 
711 	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
712 	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
713 	if(hostdata->clock > 75) {
714 		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
715 		/* do the best we can, but the async clock will be out
716 		 * of spec: sync divider 2, async divider 3 */
717 		DEBUG(("53c700: sync 2 async 3\n"));
718 		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
719 		NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
720 		hostdata->sync_clock = hostdata->clock/2;
721 	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
722 		/* sync divider 1.5, async divider 3 */
723 		DEBUG(("53c700: sync 1.5 async 3\n"));
724 		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
725 		NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
726 		hostdata->sync_clock = hostdata->clock*2;
727 		hostdata->sync_clock /= 3;
728 
729 	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
730 		/* sync divider 1, async divider 2 */
731 		DEBUG(("53c700: sync 1 async 2\n"));
732 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
733 		NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
734 		hostdata->sync_clock = hostdata->clock;
735 	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
736 		/* sync divider 1, async divider 1.5 */
737 		DEBUG(("53c700: sync 1 async 1.5\n"));
738 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
739 		NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
740 		hostdata->sync_clock = hostdata->clock;
741 	} else {
742 		DEBUG(("53c700: sync 1 async 1\n"));
743 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
744 		NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
745 		/* sync divider 1, async divider 1 */
746 		hostdata->sync_clock = hostdata->clock;
747 	}
748 	/* Calculate the actual minimum period that can be supported
749 	 * by our synchronous clock speed.  See the 710 manual for
750 	 * exact details of this calculation which is based on a
751 	 * setting of the SXFER register */
752 	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
753 	hostdata->min_period = NCR_700_MIN_PERIOD;
754 	if(min_period > NCR_700_MIN_PERIOD)
755 		hostdata->min_period = min_period;
756 }
757 
758 STATIC void
759 NCR_700_chip_reset(struct Scsi_Host *host)
760 {
761 	struct NCR_700_Host_Parameters *hostdata =
762 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
763 	if(hostdata->chip710) {
764 		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
765 		udelay(100);
766 
767 		NCR_700_writeb(0, host, ISTAT_REG);
768 	} else {
769 		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
770 		udelay(100);
771 
772 		NCR_700_writeb(0, host, DCNTL_REG);
773 	}
774 
775 	mdelay(1000);
776 
777 	NCR_700_chip_setup(host);
778 }
779 
780 /* The heart of the message processing engine is that the instruction
781  * immediately after the INT is the normal case (and so must be CLEAR
782  * ACK).  If we want to do something else, we call that routine in
783  * scripts and set temp to be the normal case + 8 (skipping the CLEAR
784  * ACK) so that the routine returns correctly to resume its activity
785  * */
786 STATIC __u32
787 process_extended_message(struct Scsi_Host *host,
788 			 struct NCR_700_Host_Parameters *hostdata,
789 			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
790 {
791 	__u32 resume_offset = dsp, temp = dsp + 8;
792 	__u8 pun = 0xff, lun = 0xff;
793 
794 	if(SCp != NULL) {
795 		pun = SCp->device->id;
796 		lun = SCp->device->lun;
797 	}
798 
799 	switch(hostdata->msgin[2]) {
800 	case A_SDTR_MSG:
801 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
802 			struct scsi_target *starget = SCp->device->sdev_target;
803 			__u8 period = hostdata->msgin[3];
804 			__u8 offset = hostdata->msgin[4];
805 
806 			if(offset == 0 || period == 0) {
807 				offset = 0;
808 				period = 0;
809 			}
810 
811 			spi_offset(starget) = offset;
812 			spi_period(starget) = period;
813 
814 			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
815 				spi_display_xfer_agreement(starget);
816 				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
817 			}
818 
819 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
820 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
821 
822 			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
823 				       host, SXFER_REG);
824 
825 		} else {
826 			/* SDTR message out of the blue, reject it */
827 			shost_printk(KERN_WARNING, host,
828 				"Unexpected SDTR msg\n");
829 			hostdata->msgout[0] = A_REJECT_MSG;
830 			dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
831 			script_patch_16(hostdata->script, MessageCount, 1);
832 			/* SendMsgOut returns, so set up the return
833 			 * address */
834 			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
835 		}
836 		break;
837 
838 	case A_WDTR_MSG:
839 		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
840 		       host->host_no, pun, lun);
841 		hostdata->msgout[0] = A_REJECT_MSG;
842 		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
843 		script_patch_16(hostdata->script, MessageCount, 1);
844 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
845 
846 		break;
847 
848 	default:
849 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
850 		       host->host_no, pun, lun,
851 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
852 		spi_print_msg(hostdata->msgin);
853 		printk("\n");
854 		/* just reject it */
855 		hostdata->msgout[0] = A_REJECT_MSG;
856 		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
857 		script_patch_16(hostdata->script, MessageCount, 1);
858 		/* SendMsgOut returns, so set up the return
859 		 * address */
860 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
861 	}
862 	NCR_700_writel(temp, host, TEMP_REG);
863 	return resume_offset;
864 }
865 
866 STATIC __u32
867 process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
868 		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
869 {
870 	/* work out where to return to */
871 	__u32 temp = dsp + 8, resume_offset = dsp;
872 	__u8 pun = 0xff, lun = 0xff;
873 
874 	if(SCp != NULL) {
875 		pun = SCp->device->id;
876 		lun = SCp->device->lun;
877 	}
878 
879 #ifdef NCR_700_DEBUG
880 	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
881 	       NCR_700_phase[(dsps & 0xf00) >> 8]);
882 	spi_print_msg(hostdata->msgin);
883 	printk("\n");
884 #endif
885 
886 	switch(hostdata->msgin[0]) {
887 
888 	case A_EXTENDED_MSG:
889 		resume_offset =  process_extended_message(host, hostdata, SCp,
890 							  dsp, dsps);
891 		break;
892 
893 	case A_REJECT_MSG:
894 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
895 			/* Rejected our sync negotiation attempt */
896 			spi_period(SCp->device->sdev_target) =
897 				spi_offset(SCp->device->sdev_target) = 0;
898 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
899 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
900 		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
901 			/* rejected our first simple tag message */
902 			scmd_printk(KERN_WARNING, SCp,
903 				"Rejected first tag queue attempt, turning off tag queueing\n");
904 			/* we're done negotiating */
905 			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
906 			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
907 			SCp->device->tagged_supported = 0;
908 			scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
909 		} else {
910 			shost_printk(KERN_WARNING, host,
911 				"(%d:%d) Unexpected REJECT Message %s\n",
912 			       pun, lun,
913 			       NCR_700_phase[(dsps & 0xf00) >> 8]);
914 			/* however, just ignore it */
915 		}
916 		break;
917 
918 	case A_PARITY_ERROR_MSG:
919 		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
920 		       pun, lun);
921 		NCR_700_internal_bus_reset(host);
922 		break;
923 	case A_SIMPLE_TAG_MSG:
924 		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
925 		       pun, lun, hostdata->msgin[1],
926 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
927 		/* just ignore it */
928 		break;
929 	default:
930 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
931 		       host->host_no, pun, lun,
932 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
933 
934 		spi_print_msg(hostdata->msgin);
935 		printk("\n");
936 		/* just reject it */
937 		hostdata->msgout[0] = A_REJECT_MSG;
938 		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
939 		script_patch_16(hostdata->script, MessageCount, 1);
940 		/* SendMsgOut returns, so set up the return
941 		 * address */
942 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
943 
944 		break;
945 	}
946 	NCR_700_writel(temp, host, TEMP_REG);
947 	/* set us up to receive another message */
948 	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
949 	return resume_offset;
950 }
951 
952 STATIC __u32
953 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
954 			 struct Scsi_Host *host,
955 			 struct NCR_700_Host_Parameters *hostdata)
956 {
957 	__u32 resume_offset = 0;
958 	__u8 pun = 0xff, lun=0xff;
959 
960 	if(SCp != NULL) {
961 		pun = SCp->device->id;
962 		lun = SCp->device->lun;
963 	}
964 
965 	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
966 		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
967 		       hostdata->status[0]));
968 		/* OK, if TCQ still under negotiation, we now know it works */
969 		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
970 			NCR_700_set_tag_neg_state(SCp->device,
971 						  NCR_700_FINISHED_TAG_NEGOTIATION);
972 
973 		/* check for contingent allegiance contitions */
974 		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
975 		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
976 			struct NCR_700_command_slot *slot =
977 				(struct NCR_700_command_slot *)SCp->host_scribble;
978 			if(SCp->cmnd[0] == REQUEST_SENSE) {
979 				/* OOPS: bad device, returning another
980 				 * contingent allegiance condition */
981 				scmd_printk(KERN_ERR, SCp,
982 					"broken device is looping in contingent allegiance: ignoring\n");
983 				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
984 			} else {
985 #ifdef NCR_DEBUG
986 				scsi_print_command(SCp);
987 				printk("  cmd %p has status %d, requesting sense\n",
988 				       SCp, hostdata->status[0]);
989 #endif
990 				/* we can destroy the command here
991 				 * because the contingent allegiance
992 				 * condition will cause a retry which
993 				 * will re-copy the command from the
994 				 * saved data_cmnd.  We also unmap any
995 				 * data associated with the command
996 				 * here */
997 				NCR_700_unmap(hostdata, SCp, slot);
998 
999 				SCp->cmnd[0] = REQUEST_SENSE;
1000 				SCp->cmnd[1] = (SCp->device->lun & 0x7) << 5;
1001 				SCp->cmnd[2] = 0;
1002 				SCp->cmnd[3] = 0;
1003 				SCp->cmnd[4] = sizeof(SCp->sense_buffer);
1004 				SCp->cmnd[5] = 0;
1005 				SCp->cmd_len = 6;
1006 				/* Here's a quiet hack: the
1007 				 * REQUEST_SENSE command is six bytes,
1008 				 * so store a flag indicating that
1009 				 * this was an internal sense request
1010 				 * and the original status at the end
1011 				 * of the command */
1012 				SCp->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1013 				SCp->cmnd[7] = hostdata->status[0];
1014 				SCp->use_sg = 0;
1015 				SCp->sc_data_direction = DMA_FROM_DEVICE;
1016 				dma_sync_single_for_device(hostdata->dev, slot->pCmd,
1017 							   SCp->cmd_len, DMA_TO_DEVICE);
1018 				SCp->request_bufflen = sizeof(SCp->sense_buffer);
1019 				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1020 				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1021 				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1022 				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1023 				slot->SG[1].pAddr = 0;
1024 				slot->resume_offset = hostdata->pScript;
1025 				dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1026 				dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1027 
1028 				/* queue the command for reissue */
1029 				slot->state = NCR_700_SLOT_QUEUED;
1030 				hostdata->state = NCR_700_HOST_FREE;
1031 				hostdata->cmd = NULL;
1032 			}
1033 		} else {
1034 			// Currently rely on the mid layer evaluation
1035 			// of the tag queuing capability
1036 			//
1037 			//if(status_byte(hostdata->status[0]) == GOOD &&
1038 			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1039 			//	/* Piggy back the tag queueing support
1040 			//	 * on this command */
1041 			//	dma_sync_single_for_cpu(hostdata->dev,
1042 			//			    slot->dma_handle,
1043 			//			    SCp->request_bufflen,
1044 			//			    DMA_FROM_DEVICE);
1045 			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1046 			//		scmd_printk(KERN_INFO, SCp,
1047 			//		     "Enabling Tag Command Queuing\n");
1048 			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1049 			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1050 			//	} else {
1051 			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1052 			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1053 			//	}
1054 			//}
1055 			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1056 		}
1057 	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1058 		__u8 i = (dsps & 0xf00) >> 8;
1059 
1060 		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1061 		       NCR_700_phase[i],
1062 		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1063 		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1064 			SCp->cmd_len);
1065 		scsi_print_command(SCp);
1066 
1067 		NCR_700_internal_bus_reset(host);
1068 	} else if((dsps & 0xfffff000) == A_FATAL) {
1069 		int i = (dsps & 0xfff);
1070 
1071 		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1072 		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1073 		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1074 			printk(KERN_ERR "     msg begins %02x %02x\n",
1075 			       hostdata->msgin[0], hostdata->msgin[1]);
1076 		}
1077 		NCR_700_internal_bus_reset(host);
1078 	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1079 #ifdef NCR_700_DEBUG
1080 		__u8 i = (dsps & 0xf00) >> 8;
1081 
1082 		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1083 		       host->host_no, pun, lun,
1084 		       i, NCR_700_phase[i]);
1085 #endif
1086 		save_for_reselection(hostdata, SCp, dsp);
1087 
1088 	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1089 		__u8 lun;
1090 		struct NCR_700_command_slot *slot;
1091 		__u8 reselection_id = hostdata->reselection_id;
1092 		struct scsi_device *SDp;
1093 
1094 		lun = hostdata->msgin[0] & 0x1f;
1095 
1096 		hostdata->reselection_id = 0xff;
1097 		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1098 		       host->host_no, reselection_id, lun));
1099 		/* clear the reselection indicator */
1100 		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1101 		if(unlikely(SDp == NULL)) {
1102 			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1103 			       host->host_no, reselection_id, lun);
1104 			BUG();
1105 		}
1106 		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1107 			struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1108 			if(unlikely(SCp == NULL)) {
1109 				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1110 				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1111 				BUG();
1112 			}
1113 
1114 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1115 			DDEBUG(KERN_DEBUG, SDp,
1116 				"reselection is tag %d, slot %p(%d)\n",
1117 				hostdata->msgin[2], slot, slot->tag);
1118 		} else {
1119 			struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1120 			if(unlikely(SCp == NULL)) {
1121 				sdev_printk(KERN_ERR, SDp,
1122 					"no saved request for untagged cmd\n");
1123 				BUG();
1124 			}
1125 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1126 		}
1127 
1128 		if(slot == NULL) {
1129 			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1130 			       host->host_no, reselection_id, lun,
1131 			       hostdata->msgin[0], hostdata->msgin[1],
1132 			       hostdata->msgin[2]);
1133 		} else {
1134 			if(hostdata->state != NCR_700_HOST_BUSY)
1135 				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1136 				       host->host_no);
1137 			resume_offset = slot->resume_offset;
1138 			hostdata->cmd = slot->cmnd;
1139 
1140 			/* re-patch for this command */
1141 			script_patch_32_abs(hostdata->script, CommandAddress,
1142 					    slot->pCmd);
1143 			script_patch_16(hostdata->script,
1144 					CommandCount, slot->cmnd->cmd_len);
1145 			script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1146 					    to32bit(&slot->pSG[0].ins));
1147 
1148 			/* Note: setting SXFER only works if we're
1149 			 * still in the MESSAGE phase, so it is vital
1150 			 * that ACK is still asserted when we process
1151 			 * the reselection message.  The resume offset
1152 			 * should therefore always clear ACK */
1153 			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1154 				       host, SXFER_REG);
1155 			dma_cache_sync(hostdata->msgin,
1156 				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1157 			dma_cache_sync(hostdata->msgout,
1158 				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1159 			/* I'm just being paranoid here, the command should
1160 			 * already have been flushed from the cache */
1161 			dma_cache_sync(slot->cmnd->cmnd,
1162 				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
1163 
1164 
1165 
1166 		}
1167 	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1168 
1169 		/* This section is full of debugging code because I've
1170 		 * never managed to reach it.  I think what happens is
1171 		 * that, because the 700 runs with selection
1172 		 * interrupts enabled the whole time that we take a
1173 		 * selection interrupt before we manage to get to the
1174 		 * reselected script interrupt */
1175 
1176 		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1177 		struct NCR_700_command_slot *slot;
1178 
1179 		/* Take out our own ID */
1180 		reselection_id &= ~(1<<host->this_id);
1181 
1182 		/* I've never seen this happen, so keep this as a printk rather
1183 		 * than a debug */
1184 		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1185 		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1186 
1187 		{
1188 			/* FIXME: DEBUGGING CODE */
1189 			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1190 			int i;
1191 
1192 			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1193 				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1194 				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1195 					break;
1196 			}
1197 			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1198 			SCp =  hostdata->slots[i].cmnd;
1199 		}
1200 
1201 		if(SCp != NULL) {
1202 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1203 			/* change slot from busy to queued to redo command */
1204 			slot->state = NCR_700_SLOT_QUEUED;
1205 		}
1206 		hostdata->cmd = NULL;
1207 
1208 		if(reselection_id == 0) {
1209 			if(hostdata->reselection_id == 0xff) {
1210 				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1211 				return 0;
1212 			} else {
1213 				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1214 				       host->host_no);
1215 				reselection_id = hostdata->reselection_id;
1216 			}
1217 		} else {
1218 
1219 			/* convert to real ID */
1220 			reselection_id = bitmap_to_number(reselection_id);
1221 		}
1222 		hostdata->reselection_id = reselection_id;
1223 		/* just in case we have a stale simple tag message, clear it */
1224 		hostdata->msgin[1] = 0;
1225 		dma_cache_sync(hostdata->msgin,
1226 			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1227 		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1228 			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1229 		} else {
1230 			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1231 		}
1232 	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1233 		/* we've just disconnected from the bus, do nothing since
1234 		 * a return here will re-run the queued command slot
1235 		 * that may have been interrupted by the initial selection */
1236 		DEBUG((" SELECTION COMPLETED\n"));
1237 	} else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1238 		resume_offset = process_message(host, hostdata, SCp,
1239 						dsp, dsps);
1240 	} else if((dsps &  0xfffff000) == 0) {
1241 		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1242 		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1243 		       host->host_no, pun, lun, NCR_700_condition[i],
1244 		       NCR_700_phase[j], dsp - hostdata->pScript);
1245 		if(SCp != NULL) {
1246 			scsi_print_command(SCp);
1247 
1248 			if(SCp->use_sg) {
1249 				for(i = 0; i < SCp->use_sg + 1; i++) {
1250 					printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1251 				}
1252 			}
1253 		}
1254 		NCR_700_internal_bus_reset(host);
1255 	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1256 		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1257 		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1258 		resume_offset = dsp;
1259 	} else {
1260 		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1261 		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1262 		NCR_700_internal_bus_reset(host);
1263 	}
1264 	return resume_offset;
1265 }
1266 
1267 /* We run the 53c700 with selection interrupts always enabled.  This
1268  * means that the chip may be selected as soon as the bus frees.  On a
1269  * busy bus, this can be before the scripts engine finishes its
1270  * processing.  Therefore, part of the selection processing has to be
1271  * to find out what the scripts engine is doing and complete the
1272  * function if necessary (i.e. process the pending disconnect or save
1273  * the interrupted initial selection */
1274 STATIC inline __u32
1275 process_selection(struct Scsi_Host *host, __u32 dsp)
1276 {
1277 	__u8 id = 0;	/* Squash compiler warning */
1278 	int count = 0;
1279 	__u32 resume_offset = 0;
1280 	struct NCR_700_Host_Parameters *hostdata =
1281 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1282 	struct scsi_cmnd *SCp = hostdata->cmd;
1283 	__u8 sbcl;
1284 
1285 	for(count = 0; count < 5; count++) {
1286 		id = NCR_700_readb(host, hostdata->chip710 ?
1287 				   CTEST9_REG : SFBR_REG);
1288 
1289 		/* Take out our own ID */
1290 		id &= ~(1<<host->this_id);
1291 		if(id != 0)
1292 			break;
1293 		udelay(5);
1294 	}
1295 	sbcl = NCR_700_readb(host, SBCL_REG);
1296 	if((sbcl & SBCL_IO) == 0) {
1297 		/* mark as having been selected rather than reselected */
1298 		id = 0xff;
1299 	} else {
1300 		/* convert to real ID */
1301 		hostdata->reselection_id = id = bitmap_to_number(id);
1302 		DEBUG(("scsi%d:  Reselected by %d\n",
1303 		       host->host_no, id));
1304 	}
1305 	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1306 		struct NCR_700_command_slot *slot =
1307 			(struct NCR_700_command_slot *)SCp->host_scribble;
1308 		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1309 
1310 		switch(dsp - hostdata->pScript) {
1311 		case Ent_Disconnect1:
1312 		case Ent_Disconnect2:
1313 			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1314 			break;
1315 		case Ent_Disconnect3:
1316 		case Ent_Disconnect4:
1317 			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1318 			break;
1319 		case Ent_Disconnect5:
1320 		case Ent_Disconnect6:
1321 			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1322 			break;
1323 		case Ent_Disconnect7:
1324 		case Ent_Disconnect8:
1325 			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1326 			break;
1327 		case Ent_Finish1:
1328 		case Ent_Finish2:
1329 			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1330 			break;
1331 
1332 		default:
1333 			slot->state = NCR_700_SLOT_QUEUED;
1334 			break;
1335 			}
1336 	}
1337 	hostdata->state = NCR_700_HOST_BUSY;
1338 	hostdata->cmd = NULL;
1339 	/* clear any stale simple tag message */
1340 	hostdata->msgin[1] = 0;
1341 	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1342 		       DMA_BIDIRECTIONAL);
1343 
1344 	if(id == 0xff) {
1345 		/* Selected as target, Ignore */
1346 		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1347 	} else if(hostdata->tag_negotiated & (1<<id)) {
1348 		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1349 	} else {
1350 		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1351 	}
1352 	return resume_offset;
1353 }
1354 
1355 static inline void
1356 NCR_700_clear_fifo(struct Scsi_Host *host) {
1357 	const struct NCR_700_Host_Parameters *hostdata
1358 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1359 	if(hostdata->chip710) {
1360 		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1361 	} else {
1362 		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1363 	}
1364 }
1365 
1366 static inline void
1367 NCR_700_flush_fifo(struct Scsi_Host *host) {
1368 	const struct NCR_700_Host_Parameters *hostdata
1369 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1370 	if(hostdata->chip710) {
1371 		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1372 		udelay(10);
1373 		NCR_700_writeb(0, host, CTEST8_REG);
1374 	} else {
1375 		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1376 		udelay(10);
1377 		NCR_700_writeb(0, host, DFIFO_REG);
1378 	}
1379 }
1380 
1381 
1382 /* The queue lock with interrupts disabled must be held on entry to
1383  * this function */
1384 STATIC int
1385 NCR_700_start_command(struct scsi_cmnd *SCp)
1386 {
1387 	struct NCR_700_command_slot *slot =
1388 		(struct NCR_700_command_slot *)SCp->host_scribble;
1389 	struct NCR_700_Host_Parameters *hostdata =
1390 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1391 	__u16 count = 1;	/* for IDENTIFY message */
1392 
1393 	if(hostdata->state != NCR_700_HOST_FREE) {
1394 		/* keep this inside the lock to close the race window where
1395 		 * the running command finishes on another CPU while we don't
1396 		 * change the state to queued on this one */
1397 		slot->state = NCR_700_SLOT_QUEUED;
1398 
1399 		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1400 		       SCp->device->host->host_no, slot->cmnd, slot));
1401 		return 0;
1402 	}
1403 	hostdata->state = NCR_700_HOST_BUSY;
1404 	hostdata->cmd = SCp;
1405 	slot->state = NCR_700_SLOT_BUSY;
1406 	/* keep interrupts disabled until we have the command correctly
1407 	 * set up so we cannot take a selection interrupt */
1408 
1409 	hostdata->msgout[0] = NCR_700_identify(SCp->cmnd[0] != REQUEST_SENSE,
1410 					       SCp->device->lun);
1411 	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1412 	 * if the negotiated transfer parameters still hold, so
1413 	 * always renegotiate them */
1414 	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE) {
1415 		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1416 	}
1417 
1418 	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1419 	 * If a contingent allegiance condition exists, the device
1420 	 * will refuse all tags, so send the request sense as untagged
1421 	 * */
1422 	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1423 	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE)) {
1424 		count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1425 	}
1426 
1427 	if(hostdata->fast &&
1428 	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1429 		count += spi_populate_sync_msg(&hostdata->msgout[count],
1430 				spi_period(SCp->device->sdev_target),
1431 				spi_offset(SCp->device->sdev_target));
1432 		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1433 	}
1434 
1435 	script_patch_16(hostdata->script, MessageCount, count);
1436 
1437 
1438 	script_patch_ID(hostdata->script,
1439 			Device_ID, 1<<scmd_id(SCp));
1440 
1441 	script_patch_32_abs(hostdata->script, CommandAddress,
1442 			    slot->pCmd);
1443 	script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
1444 	/* finally plumb the beginning of the SG list into the script
1445 	 * */
1446 	script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1447 			    to32bit(&slot->pSG[0].ins));
1448 	NCR_700_clear_fifo(SCp->device->host);
1449 
1450 	if(slot->resume_offset == 0)
1451 		slot->resume_offset = hostdata->pScript;
1452 	/* now perform all the writebacks and invalidates */
1453 	dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
1454 	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1455 		       DMA_FROM_DEVICE);
1456 	dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1457 	dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
1458 
1459 	/* set the synchronous period/offset */
1460 	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1461 		       SCp->device->host, SXFER_REG);
1462 	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1463 	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1464 
1465 	return 1;
1466 }
1467 
1468 irqreturn_t
1469 NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1470 {
1471 	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1472 	struct NCR_700_Host_Parameters *hostdata =
1473 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1474 	__u8 istat;
1475 	__u32 resume_offset = 0;
1476 	__u8 pun = 0xff, lun = 0xff;
1477 	unsigned long flags;
1478 	int handled = 0;
1479 
1480 	/* Use the host lock to serialise acess to the 53c700
1481 	 * hardware.  Note: In future, we may need to take the queue
1482 	 * lock to enter the done routines.  When that happens, we
1483 	 * need to ensure that for this driver, the host lock and the
1484 	 * queue lock point to the same thing. */
1485 	spin_lock_irqsave(host->host_lock, flags);
1486 	if((istat = NCR_700_readb(host, ISTAT_REG))
1487 	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1488 		__u32 dsps;
1489 		__u8 sstat0 = 0, dstat = 0;
1490 		__u32 dsp;
1491 		struct scsi_cmnd *SCp = hostdata->cmd;
1492 		enum NCR_700_Host_State state;
1493 
1494 		handled = 1;
1495 		state = hostdata->state;
1496 		SCp = hostdata->cmd;
1497 
1498 		if(istat & SCSI_INT_PENDING) {
1499 			udelay(10);
1500 
1501 			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1502 		}
1503 
1504 		if(istat & DMA_INT_PENDING) {
1505 			udelay(10);
1506 
1507 			dstat = NCR_700_readb(host, DSTAT_REG);
1508 		}
1509 
1510 		dsps = NCR_700_readl(host, DSPS_REG);
1511 		dsp = NCR_700_readl(host, DSP_REG);
1512 
1513 		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1514 		       host->host_no, istat, sstat0, dstat,
1515 		       (dsp - (__u32)(hostdata->pScript))/4,
1516 		       dsp, dsps));
1517 
1518 		if(SCp != NULL) {
1519 			pun = SCp->device->id;
1520 			lun = SCp->device->lun;
1521 		}
1522 
1523 		if(sstat0 & SCSI_RESET_DETECTED) {
1524 			struct scsi_device *SDp;
1525 			int i;
1526 
1527 			hostdata->state = NCR_700_HOST_BUSY;
1528 
1529 			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1530 			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1531 
1532 			scsi_report_bus_reset(host, 0);
1533 
1534 			/* clear all the negotiated parameters */
1535 			__shost_for_each_device(SDp, host)
1536 				SDp->hostdata = NULL;
1537 
1538 			/* clear all the slots and their pending commands */
1539 			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1540 				struct scsi_cmnd *SCp;
1541 				struct NCR_700_command_slot *slot =
1542 					&hostdata->slots[i];
1543 
1544 				if(slot->state == NCR_700_SLOT_FREE)
1545 					continue;
1546 
1547 				SCp = slot->cmnd;
1548 				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1549 				       slot, SCp);
1550 				free_slot(slot, hostdata);
1551 				SCp->host_scribble = NULL;
1552 				NCR_700_set_depth(SCp->device, 0);
1553 				/* NOTE: deadlock potential here: we
1554 				 * rely on mid-layer guarantees that
1555 				 * scsi_done won't try to issue the
1556 				 * command again otherwise we'll
1557 				 * deadlock on the
1558 				 * hostdata->state_lock */
1559 				SCp->result = DID_RESET << 16;
1560 				SCp->scsi_done(SCp);
1561 			}
1562 			mdelay(25);
1563 			NCR_700_chip_setup(host);
1564 
1565 			hostdata->state = NCR_700_HOST_FREE;
1566 			hostdata->cmd = NULL;
1567 			/* signal back if this was an eh induced reset */
1568 			if(hostdata->eh_complete != NULL)
1569 				complete(hostdata->eh_complete);
1570 			goto out_unlock;
1571 		} else if(sstat0 & SELECTION_TIMEOUT) {
1572 			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1573 			       host->host_no, pun, lun));
1574 			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1575 		} else if(sstat0 & PHASE_MISMATCH) {
1576 			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1577 				(struct NCR_700_command_slot *)SCp->host_scribble;
1578 
1579 			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1580 				/* It wants to reply to some part of
1581 				 * our message */
1582 #ifdef NCR_700_DEBUG
1583 				__u32 temp = NCR_700_readl(host, TEMP_REG);
1584 				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1585 				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1586 #endif
1587 				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1588 			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1589 				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1590 				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1591 				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1592 				int residual = NCR_700_data_residual(host);
1593 				int i;
1594 #ifdef NCR_700_DEBUG
1595 				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1596 
1597 				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1598 				       host->host_no, pun, lun,
1599 				       SGcount, data_transfer);
1600 				scsi_print_command(SCp);
1601 				if(residual) {
1602 					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1603 				       host->host_no, pun, lun,
1604 				       SGcount, data_transfer, residual);
1605 				}
1606 #endif
1607 				data_transfer += residual;
1608 
1609 				if(data_transfer != 0) {
1610 					int count;
1611 					__u32 pAddr;
1612 
1613 					SGcount--;
1614 
1615 					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1616 					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1617 					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1618 					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1619 					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1620 					pAddr += (count - data_transfer);
1621 #ifdef NCR_700_DEBUG
1622 					if(pAddr != naddr) {
1623 						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1624 					}
1625 #endif
1626 					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1627 				}
1628 				/* set the executed moves to nops */
1629 				for(i=0; i<SGcount; i++) {
1630 					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1631 					slot->SG[i].pAddr = 0;
1632 				}
1633 				dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1634 				/* and pretend we disconnected after
1635 				 * the command phase */
1636 				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1637 				/* make sure all the data is flushed */
1638 				NCR_700_flush_fifo(host);
1639 			} else {
1640 				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1641 				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1642 				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1643 				NCR_700_internal_bus_reset(host);
1644 			}
1645 
1646 		} else if(sstat0 & SCSI_GROSS_ERROR) {
1647 			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1648 			       host->host_no, pun, lun);
1649 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1650 		} else if(sstat0 & PARITY_ERROR) {
1651 			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1652 			       host->host_no, pun, lun);
1653 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1654 		} else if(dstat & SCRIPT_INT_RECEIVED) {
1655 			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1656 			       host->host_no, pun, lun));
1657 			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1658 		} else if(dstat & (ILGL_INST_DETECTED)) {
1659 			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1660 			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1661 			       host->host_no, pun, lun,
1662 			       dsp, dsp - hostdata->pScript);
1663 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1664 		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1665 			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1666 			       host->host_no, pun, lun, dstat);
1667 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1668 		}
1669 
1670 
1671 		/* NOTE: selection interrupt processing MUST occur
1672 		 * after script interrupt processing to correctly cope
1673 		 * with the case where we process a disconnect and
1674 		 * then get reselected before we process the
1675 		 * disconnection */
1676 		if(sstat0 & SELECTED) {
1677 			/* FIXME: It currently takes at least FOUR
1678 			 * interrupts to complete a command that
1679 			 * disconnects: one for the disconnect, one
1680 			 * for the reselection, one to get the
1681 			 * reselection data and one to complete the
1682 			 * command.  If we guess the reselected
1683 			 * command here and prepare it, we only need
1684 			 * to get a reselection data interrupt if we
1685 			 * guessed wrongly.  Since the interrupt
1686 			 * overhead is much greater than the command
1687 			 * setup, this would be an efficient
1688 			 * optimisation particularly as we probably
1689 			 * only have one outstanding command on a
1690 			 * target most of the time */
1691 
1692 			resume_offset = process_selection(host, dsp);
1693 
1694 		}
1695 
1696 	}
1697 
1698 	if(resume_offset) {
1699 		if(hostdata->state != NCR_700_HOST_BUSY) {
1700 			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1701 			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1702 			hostdata->state = NCR_700_HOST_BUSY;
1703 		}
1704 
1705 		DEBUG(("Attempting to resume at %x\n", resume_offset));
1706 		NCR_700_clear_fifo(host);
1707 		NCR_700_writel(resume_offset, host, DSP_REG);
1708 	}
1709 	/* There is probably a technical no-no about this: If we're a
1710 	 * shared interrupt and we got this interrupt because the
1711 	 * other device needs servicing not us, we're still going to
1712 	 * check our queued commands here---of course, there shouldn't
1713 	 * be any outstanding.... */
1714 	if(hostdata->state == NCR_700_HOST_FREE) {
1715 		int i;
1716 
1717 		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1718 			/* fairness: always run the queue from the last
1719 			 * position we left off */
1720 			int j = (i + hostdata->saved_slot_position)
1721 				% NCR_700_COMMAND_SLOTS_PER_HOST;
1722 
1723 			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1724 				continue;
1725 			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1726 				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1727 				       host->host_no, &hostdata->slots[j],
1728 				       hostdata->slots[j].cmnd));
1729 				hostdata->saved_slot_position = j + 1;
1730 			}
1731 
1732 			break;
1733 		}
1734 	}
1735  out_unlock:
1736 	spin_unlock_irqrestore(host->host_lock, flags);
1737 	return IRQ_RETVAL(handled);
1738 }
1739 
1740 STATIC int
1741 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1742 {
1743 	struct NCR_700_Host_Parameters *hostdata =
1744 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1745 	__u32 move_ins;
1746 	enum dma_data_direction direction;
1747 	struct NCR_700_command_slot *slot;
1748 
1749 	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1750 		/* We're over our allocation, this should never happen
1751 		 * since we report the max allocation to the mid layer */
1752 		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1753 		return 1;
1754 	}
1755 	/* check for untagged commands.  We cannot have any outstanding
1756 	 * commands if we accept them.  Commands could be untagged because:
1757 	 *
1758 	 * - The tag negotiated bitmap is clear
1759 	 * - The blk layer sent and untagged command
1760 	 */
1761 	if(NCR_700_get_depth(SCp->device) != 0
1762 	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1763 	       || !blk_rq_tagged(SCp->request))) {
1764 		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1765 		       NCR_700_get_depth(SCp->device));
1766 		return SCSI_MLQUEUE_DEVICE_BUSY;
1767 	}
1768 	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1769 		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1770 		       NCR_700_get_depth(SCp->device));
1771 		return SCSI_MLQUEUE_DEVICE_BUSY;
1772 	}
1773 	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1774 
1775 	/* begin the command here */
1776 	/* no need to check for NULL, test for command_slot_count above
1777 	 * ensures a slot is free */
1778 	slot = find_empty_slot(hostdata);
1779 
1780 	slot->cmnd = SCp;
1781 
1782 	SCp->scsi_done = done;
1783 	SCp->host_scribble = (unsigned char *)slot;
1784 	SCp->SCp.ptr = NULL;
1785 	SCp->SCp.buffer = NULL;
1786 
1787 #ifdef NCR_700_DEBUG
1788 	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1789 	scsi_print_command(SCp);
1790 #endif
1791 	if(blk_rq_tagged(SCp->request)
1792 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1793 	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1794 		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1795 		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1796 		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1797 	}
1798 
1799 	/* here we may have to process an untagged command.  The gate
1800 	 * above ensures that this will be the only one outstanding,
1801 	 * so clear the tag negotiated bit.
1802 	 *
1803 	 * FIXME: This will royally screw up on multiple LUN devices
1804 	 * */
1805 	if(!blk_rq_tagged(SCp->request)
1806 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1807 		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1808 		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1809 	}
1810 
1811 	if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1812 	   && scsi_get_tag_type(SCp->device)) {
1813 		slot->tag = SCp->request->tag;
1814 		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1815 		       slot->tag, slot);
1816 	} else {
1817 		slot->tag = SCSI_NO_TAG;
1818 		/* must populate current_cmnd for scsi_find_tag to work */
1819 		SCp->device->current_cmnd = SCp;
1820 	}
1821 	/* sanity check: some of the commands generated by the mid-layer
1822 	 * have an eccentric idea of their sc_data_direction */
1823 	if(!SCp->use_sg && !SCp->request_bufflen
1824 	   && SCp->sc_data_direction != DMA_NONE) {
1825 #ifdef NCR_700_DEBUG
1826 		printk("53c700: Command");
1827 		scsi_print_command(SCp);
1828 		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1829 #endif
1830 		SCp->sc_data_direction = DMA_NONE;
1831 	}
1832 
1833 	switch (SCp->cmnd[0]) {
1834 	case REQUEST_SENSE:
1835 		/* clear the internal sense magic */
1836 		SCp->cmnd[6] = 0;
1837 		/* fall through */
1838 	default:
1839 		/* OK, get it from the command */
1840 		switch(SCp->sc_data_direction) {
1841 		case DMA_BIDIRECTIONAL:
1842 		default:
1843 			printk(KERN_ERR "53c700: Unknown command for data direction ");
1844 			scsi_print_command(SCp);
1845 
1846 			move_ins = 0;
1847 			break;
1848 		case DMA_NONE:
1849 			move_ins = 0;
1850 			break;
1851 		case DMA_FROM_DEVICE:
1852 			move_ins = SCRIPT_MOVE_DATA_IN;
1853 			break;
1854 		case DMA_TO_DEVICE:
1855 			move_ins = SCRIPT_MOVE_DATA_OUT;
1856 			break;
1857 		}
1858 	}
1859 
1860 	/* now build the scatter gather list */
1861 	direction = SCp->sc_data_direction;
1862 	if(move_ins != 0) {
1863 		int i;
1864 		int sg_count;
1865 		dma_addr_t vPtr = 0;
1866 		__u32 count = 0;
1867 
1868 		if(SCp->use_sg) {
1869 			sg_count = dma_map_sg(hostdata->dev, SCp->buffer,
1870 					      SCp->use_sg, direction);
1871 		} else {
1872 			vPtr = dma_map_single(hostdata->dev,
1873 					      SCp->request_buffer,
1874 					      SCp->request_bufflen,
1875 					      direction);
1876 			count = SCp->request_bufflen;
1877 			slot->dma_handle = vPtr;
1878 			sg_count = 1;
1879 		}
1880 
1881 
1882 		for(i = 0; i < sg_count; i++) {
1883 
1884 			if(SCp->use_sg) {
1885 				struct scatterlist *sg = SCp->buffer;
1886 
1887 				vPtr = sg_dma_address(&sg[i]);
1888 				count = sg_dma_len(&sg[i]);
1889 			}
1890 
1891 			slot->SG[i].ins = bS_to_host(move_ins | count);
1892 			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1893 			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1894 			slot->SG[i].pAddr = bS_to_host(vPtr);
1895 		}
1896 		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1897 		slot->SG[i].pAddr = 0;
1898 		dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1899 		DEBUG((" SETTING %08lx to %x\n",
1900 		       (&slot->pSG[i].ins),
1901 		       slot->SG[i].ins));
1902 	}
1903 	slot->resume_offset = 0;
1904 	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1905 				    sizeof(SCp->cmnd), DMA_TO_DEVICE);
1906 	NCR_700_start_command(SCp);
1907 	return 0;
1908 }
1909 
1910 STATIC int
1911 NCR_700_abort(struct scsi_cmnd * SCp)
1912 {
1913 	struct NCR_700_command_slot *slot;
1914 
1915 	scmd_printk(KERN_INFO, SCp,
1916 		"New error handler wants to abort command\n\t");
1917 	scsi_print_command(SCp);
1918 
1919 	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1920 
1921 	if(slot == NULL)
1922 		/* no outstanding command to abort */
1923 		return SUCCESS;
1924 	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1925 		/* FIXME: This is because of a problem in the new
1926 		 * error handler.  When it is in error recovery, it
1927 		 * will send a TUR to a device it thinks may still be
1928 		 * showing a problem.  If the TUR isn't responded to,
1929 		 * it will abort it and mark the device off line.
1930 		 * Unfortunately, it does no other error recovery, so
1931 		 * this would leave us with an outstanding command
1932 		 * occupying a slot.  Rather than allow this to
1933 		 * happen, we issue a bus reset to force all
1934 		 * outstanding commands to terminate here. */
1935 		NCR_700_internal_bus_reset(SCp->device->host);
1936 		/* still drop through and return failed */
1937 	}
1938 	return FAILED;
1939 
1940 }
1941 
1942 STATIC int
1943 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1944 {
1945 	DECLARE_COMPLETION(complete);
1946 	struct NCR_700_Host_Parameters *hostdata =
1947 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1948 
1949 	scmd_printk(KERN_INFO, SCp,
1950 		"New error handler wants BUS reset, cmd %p\n\t", SCp);
1951 	scsi_print_command(SCp);
1952 
1953 	/* In theory, eh_complete should always be null because the
1954 	 * eh is single threaded, but just in case we're handling a
1955 	 * reset via sg or something */
1956 	spin_lock_irq(SCp->device->host->host_lock);
1957 	while (hostdata->eh_complete != NULL) {
1958 		spin_unlock_irq(SCp->device->host->host_lock);
1959 		msleep_interruptible(100);
1960 		spin_lock_irq(SCp->device->host->host_lock);
1961 	}
1962 
1963 	hostdata->eh_complete = &complete;
1964 	NCR_700_internal_bus_reset(SCp->device->host);
1965 
1966 	spin_unlock_irq(SCp->device->host->host_lock);
1967 	wait_for_completion(&complete);
1968 	spin_lock_irq(SCp->device->host->host_lock);
1969 
1970 	hostdata->eh_complete = NULL;
1971 	/* Revalidate the transport parameters of the failing device */
1972 	if(hostdata->fast)
1973 		spi_schedule_dv_device(SCp->device);
1974 
1975 	spin_unlock_irq(SCp->device->host->host_lock);
1976 	return SUCCESS;
1977 }
1978 
1979 STATIC int
1980 NCR_700_host_reset(struct scsi_cmnd * SCp)
1981 {
1982 	scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1983 	scsi_print_command(SCp);
1984 
1985 	spin_lock_irq(SCp->device->host->host_lock);
1986 
1987 	NCR_700_internal_bus_reset(SCp->device->host);
1988 	NCR_700_chip_reset(SCp->device->host);
1989 
1990 	spin_unlock_irq(SCp->device->host->host_lock);
1991 
1992 	return SUCCESS;
1993 }
1994 
1995 STATIC void
1996 NCR_700_set_period(struct scsi_target *STp, int period)
1997 {
1998 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1999 	struct NCR_700_Host_Parameters *hostdata =
2000 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2001 
2002 	if(!hostdata->fast)
2003 		return;
2004 
2005 	if(period < hostdata->min_period)
2006 		period = hostdata->min_period;
2007 
2008 	spi_period(STp) = period;
2009 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2010 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2011 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2012 }
2013 
2014 STATIC void
2015 NCR_700_set_offset(struct scsi_target *STp, int offset)
2016 {
2017 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2018 	struct NCR_700_Host_Parameters *hostdata =
2019 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2020 	int max_offset = hostdata->chip710
2021 		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2022 
2023 	if(!hostdata->fast)
2024 		return;
2025 
2026 	if(offset > max_offset)
2027 		offset = max_offset;
2028 
2029 	/* if we're currently async, make sure the period is reasonable */
2030 	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2031 				    spi_period(STp) > 0xff))
2032 		spi_period(STp) = hostdata->min_period;
2033 
2034 	spi_offset(STp) = offset;
2035 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2036 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2037 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2038 }
2039 
2040 
2041 
2042 STATIC int
2043 NCR_700_slave_configure(struct scsi_device *SDp)
2044 {
2045 	struct NCR_700_Host_Parameters *hostdata =
2046 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2047 
2048 	/* to do here: allocate memory; build a queue_full list */
2049 	if(SDp->tagged_supported) {
2050 		scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2051 		scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2052 		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2053 	} else {
2054 		/* initialise to default depth */
2055 		scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2056 	}
2057 	if(hostdata->fast) {
2058 		/* Find the correct offset and period via domain validation */
2059 		if (!spi_initial_dv(SDp->sdev_target))
2060 			spi_dv_device(SDp);
2061 	} else {
2062 		spi_offset(SDp->sdev_target) = 0;
2063 		spi_period(SDp->sdev_target) = 0;
2064 	}
2065 	return 0;
2066 }
2067 
2068 STATIC void
2069 NCR_700_slave_destroy(struct scsi_device *SDp)
2070 {
2071 	/* to do here: deallocate memory */
2072 }
2073 
2074 static int
2075 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2076 {
2077 	if (depth > NCR_700_MAX_TAGS)
2078 		depth = NCR_700_MAX_TAGS;
2079 
2080 	scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2081 	return depth;
2082 }
2083 
2084 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2085 {
2086 	int change_tag = ((tag_type ==0 &&  scsi_get_tag_type(SDp) != 0)
2087 			  || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2088 	struct NCR_700_Host_Parameters *hostdata =
2089 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2090 
2091 	scsi_set_tag_type(SDp, tag_type);
2092 
2093 	/* We have a global (per target) flag to track whether TCQ is
2094 	 * enabled, so we'll be turning it off for the entire target here.
2095 	 * our tag algorithm will fail if we mix tagged and untagged commands,
2096 	 * so quiesce the device before doing this */
2097 	if (change_tag)
2098 		scsi_target_quiesce(SDp->sdev_target);
2099 
2100 	if (!tag_type) {
2101 		/* shift back to the default unqueued number of commands
2102 		 * (the user can still raise this) */
2103 		scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2104 		hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2105 	} else {
2106 		/* Here, we cleared the negotiation flag above, so this
2107 		 * will force the driver to renegotiate */
2108 		scsi_activate_tcq(SDp, SDp->queue_depth);
2109 		if (change_tag)
2110 			NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2111 	}
2112 	if (change_tag)
2113 		scsi_target_resume(SDp->sdev_target);
2114 
2115 	return tag_type;
2116 }
2117 
2118 static ssize_t
2119 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2120 {
2121 	struct scsi_device *SDp = to_scsi_device(dev);
2122 
2123 	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2124 }
2125 
2126 static struct device_attribute NCR_700_active_tags_attr = {
2127 	.attr = {
2128 		.name =		"active_tags",
2129 		.mode =		S_IRUGO,
2130 	},
2131 	.show = NCR_700_show_active_tags,
2132 };
2133 
2134 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2135 	&NCR_700_active_tags_attr,
2136 	NULL,
2137 };
2138 
2139 EXPORT_SYMBOL(NCR_700_detect);
2140 EXPORT_SYMBOL(NCR_700_release);
2141 EXPORT_SYMBOL(NCR_700_intr);
2142 
2143 static struct spi_function_template NCR_700_transport_functions =  {
2144 	.set_period	= NCR_700_set_period,
2145 	.show_period	= 1,
2146 	.set_offset	= NCR_700_set_offset,
2147 	.show_offset	= 1,
2148 };
2149 
2150 static int __init NCR_700_init(void)
2151 {
2152 	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2153 	if(!NCR_700_transport_template)
2154 		return -ENODEV;
2155 	return 0;
2156 }
2157 
2158 static void __exit NCR_700_exit(void)
2159 {
2160 	spi_release_transport(NCR_700_transport_template);
2161 }
2162 
2163 module_init(NCR_700_init);
2164 module_exit(NCR_700_exit);
2165 
2166