xref: /linux/drivers/scsi/53c700.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /* -*- mode: c; c-basic-offset: 8 -*- */
2 
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
4  *
5  * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 **  This program is free software; you can redistribute it and/or modify
9 **  it under the terms of the GNU General Public License as published by
10 **  the Free Software Foundation; either version 2 of the License, or
11 **  (at your option) any later version.
12 **
13 **  This program is distributed in the hope that it will be useful,
14 **  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 **  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 **  GNU General Public License for more details.
17 **
18 **  You should have received a copy of the GNU General Public License
19 **  along with this program; if not, write to the Free Software
20 **  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 **
22 **-----------------------------------------------------------------------------
23  */
24 
25 /* Notes:
26  *
27  * This driver is designed exclusively for these chips (virtually the
28  * earliest of the scripts engine chips).  They need their own drivers
29  * because they are missing so many of the scripts and snazzy register
30  * features of their elder brothers (the 710, 720 and 770).
31  *
32  * The 700 is the lowliest of the line, it can only do async SCSI.
33  * The 700-66 can at least do synchronous SCSI up to 10MHz.
34  *
35  * The 700 chip has no host bus interface logic of its own.  However,
36  * it is usually mapped to a location with well defined register
37  * offsets.  Therefore, if you can determine the base address and the
38  * irq your board incorporating this chip uses, you can probably use
39  * this driver to run it (although you'll probably have to write a
40  * minimal wrapper for the purpose---see the NCR_D700 driver for
41  * details about how to do this).
42  *
43  *
44  * TODO List:
45  *
46  * 1. Better statistics in the proc fs
47  *
48  * 2. Implement message queue (queues SCSI messages like commands) and make
49  *    the abort and device reset functions use them.
50  * */
51 
52 /* CHANGELOG
53  *
54  * Version 2.8
55  *
56  * Fixed bad bug affecting tag starvation processing (previously the
57  * driver would hang the system if too many tags starved.  Also fixed
58  * bad bug having to do with 10 byte command processing and REQUEST
59  * SENSE (the command would loop forever getting a transfer length
60  * mismatch in the CMD phase).
61  *
62  * Version 2.7
63  *
64  * Fixed scripts problem which caused certain devices (notably CDRWs)
65  * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
66  * __raw_readl/writel for parisc compatibility (Thomas
67  * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68  * for sense requests (Ryan Bradetich).
69  *
70  * Version 2.6
71  *
72  * Following test of the 64 bit parisc kernel by Richard Hirst,
73  * several problems have now been corrected.  Also adds support for
74  * consistent memory allocation.
75  *
76  * Version 2.5
77  *
78  * More Compatibility changes for 710 (now actually works).  Enhanced
79  * support for odd clock speeds which constrain SDTR negotiations.
80  * correct cacheline separation for scsi messages and status for
81  * incoherent architectures.  Use of the pci mapping functions on
82  * buffers to begin support for 64 bit drivers.
83  *
84  * Version 2.4
85  *
86  * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87  * special 53c710 instructions or registers are used).
88  *
89  * Version 2.3
90  *
91  * More endianness/cache coherency changes.
92  *
93  * Better bad device handling (handles devices lying about tag
94  * queueing support and devices which fail to provide sense data on
95  * contingent allegiance conditions)
96  *
97  * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98  * debugging this driver on the parisc architecture and suggesting
99  * many improvements and bug fixes.
100  *
101  * Thanks also go to Linuxcare Inc. for providing several PARISC
102  * machines for me to debug the driver on.
103  *
104  * Version 2.2
105  *
106  * Made the driver mem or io mapped; added endian invariance; added
107  * dma cache flushing operations for architectures which need it;
108  * added support for more varied clocking speeds.
109  *
110  * Version 2.1
111  *
112  * Initial modularisation from the D700.  See NCR_D700.c for the rest of
113  * the changelog.
114  * */
115 #define NCR_700_VERSION "2.8"
116 
117 #include <linux/config.h>
118 #include <linux/kernel.h>
119 #include <linux/types.h>
120 #include <linux/string.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/sched.h>
126 #include <linux/init.h>
127 #include <linux/proc_fs.h>
128 #include <linux/blkdev.h>
129 #include <linux/module.h>
130 #include <linux/interrupt.h>
131 #include <linux/device.h>
132 #include <asm/dma.h>
133 #include <asm/system.h>
134 #include <asm/io.h>
135 #include <asm/pgtable.h>
136 #include <asm/byteorder.h>
137 
138 #include <scsi/scsi.h>
139 #include <scsi/scsi_cmnd.h>
140 #include <scsi/scsi_dbg.h>
141 #include <scsi/scsi_eh.h>
142 #include <scsi/scsi_host.h>
143 #include <scsi/scsi_tcq.h>
144 #include <scsi/scsi_transport.h>
145 #include <scsi/scsi_transport_spi.h>
146 
147 #include "53c700.h"
148 
149 /* NOTE: For 64 bit drivers there are points in the code where we use
150  * a non dereferenceable pointer to point to a structure in dma-able
151  * memory (which is 32 bits) so that we can use all of the structure
152  * operations but take the address at the end.  This macro allows us
153  * to truncate the 64 bit pointer down to 32 bits without the compiler
154  * complaining */
155 #define to32bit(x)	((__u32)((unsigned long)(x)))
156 
157 #ifdef NCR_700_DEBUG
158 #define STATIC
159 #else
160 #define STATIC static
161 #endif
162 
163 MODULE_AUTHOR("James Bottomley");
164 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
165 MODULE_LICENSE("GPL");
166 
167 /* This is the script */
168 #include "53c700_d.h"
169 
170 
171 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
172 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
173 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
174 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
175 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
176 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
177 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
179 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
180 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
181 
182 STATIC struct device_attribute *NCR_700_dev_attrs[];
183 
184 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
185 
186 struct NCR_700_sense {
187 	unsigned char cmnd[MAX_COMMAND_SIZE];
188 };
189 
190 static char *NCR_700_phase[] = {
191 	"",
192 	"after selection",
193 	"before command phase",
194 	"after command phase",
195 	"after status phase",
196 	"after data in phase",
197 	"after data out phase",
198 	"during data phase",
199 };
200 
201 static char *NCR_700_condition[] = {
202 	"",
203 	"NOT MSG_OUT",
204 	"UNEXPECTED PHASE",
205 	"NOT MSG_IN",
206 	"UNEXPECTED MSG",
207 	"MSG_IN",
208 	"SDTR_MSG RECEIVED",
209 	"REJECT_MSG RECEIVED",
210 	"DISCONNECT_MSG RECEIVED",
211 	"MSG_OUT",
212 	"DATA_IN",
213 
214 };
215 
216 static char *NCR_700_fatal_messages[] = {
217 	"unexpected message after reselection",
218 	"still MSG_OUT after message injection",
219 	"not MSG_IN after selection",
220 	"Illegal message length received",
221 };
222 
223 static char *NCR_700_SBCL_bits[] = {
224 	"IO ",
225 	"CD ",
226 	"MSG ",
227 	"ATN ",
228 	"SEL ",
229 	"BSY ",
230 	"ACK ",
231 	"REQ ",
232 };
233 
234 static char *NCR_700_SBCL_to_phase[] = {
235 	"DATA_OUT",
236 	"DATA_IN",
237 	"CMD_OUT",
238 	"STATE",
239 	"ILLEGAL PHASE",
240 	"ILLEGAL PHASE",
241 	"MSG OUT",
242 	"MSG IN",
243 };
244 
245 /* This translates the SDTR message offset and period to a value
246  * which can be loaded into the SXFER_REG.
247  *
248  * NOTE: According to SCSI-2, the true transfer period (in ns) is
249  *       actually four times this period value */
250 static inline __u8
251 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
252 			       __u8 offset, __u8 period)
253 {
254 	int XFERP;
255 
256 	__u8 min_xferp = (hostdata->chip710
257 			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
258 	__u8 max_offset = (hostdata->chip710
259 			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
260 
261 	if(offset == 0)
262 		return 0;
263 
264 	if(period < hostdata->min_period) {
265 		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
266 		period = hostdata->min_period;
267 	}
268 	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
269 	if(offset > max_offset) {
270 		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
271 		       offset, max_offset);
272 		offset = max_offset;
273 	}
274 	if(XFERP < min_xferp) {
275 		printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
276 		       XFERP,  min_xferp);
277 		XFERP =  min_xferp;
278 	}
279 	return (offset & 0x0f) | (XFERP & 0x07)<<4;
280 }
281 
282 static inline __u8
283 NCR_700_get_SXFER(struct scsi_device *SDp)
284 {
285 	struct NCR_700_Host_Parameters *hostdata =
286 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
287 
288 	return NCR_700_offset_period_to_sxfer(hostdata,
289 					      spi_offset(SDp->sdev_target),
290 					      spi_period(SDp->sdev_target));
291 }
292 
293 struct Scsi_Host *
294 NCR_700_detect(struct scsi_host_template *tpnt,
295 	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
296 {
297 	dma_addr_t pScript, pSlots;
298 	__u8 *memory;
299 	__u32 *script;
300 	struct Scsi_Host *host;
301 	static int banner = 0;
302 	int j;
303 
304 	if(tpnt->sdev_attrs == NULL)
305 		tpnt->sdev_attrs = NCR_700_dev_attrs;
306 
307 	memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
308 				       &pScript, GFP_KERNEL);
309 	if(memory == NULL) {
310 		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
311 		return NULL;
312 	}
313 
314 	script = (__u32 *)memory;
315 	hostdata->msgin = memory + MSGIN_OFFSET;
316 	hostdata->msgout = memory + MSGOUT_OFFSET;
317 	hostdata->status = memory + STATUS_OFFSET;
318 	/* all of these offsets are L1_CACHE_BYTES separated.  It is fatal
319 	 * if this isn't sufficient separation to avoid dma flushing issues */
320 	BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
321 	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
322 	hostdata->dev = dev;
323 
324 	pSlots = pScript + SLOTS_OFFSET;
325 
326 	/* Fill in the missing routines from the host template */
327 	tpnt->queuecommand = NCR_700_queuecommand;
328 	tpnt->eh_abort_handler = NCR_700_abort;
329 	tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
330 	tpnt->eh_host_reset_handler = NCR_700_host_reset;
331 	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
332 	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
333 	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
334 	tpnt->use_clustering = ENABLE_CLUSTERING;
335 	tpnt->slave_configure = NCR_700_slave_configure;
336 	tpnt->slave_destroy = NCR_700_slave_destroy;
337 	tpnt->change_queue_depth = NCR_700_change_queue_depth;
338 	tpnt->change_queue_type = NCR_700_change_queue_type;
339 
340 	if(tpnt->name == NULL)
341 		tpnt->name = "53c700";
342 	if(tpnt->proc_name == NULL)
343 		tpnt->proc_name = "53c700";
344 
345 	host = scsi_host_alloc(tpnt, 4);
346 	if (!host)
347 		return NULL;
348 	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
349 	       * NCR_700_COMMAND_SLOTS_PER_HOST);
350 	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
351 		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
352 					  - (unsigned long)&hostdata->slots[0].SG[0]);
353 		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
354 		if(j == 0)
355 			hostdata->free_list = &hostdata->slots[j];
356 		else
357 			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
358 		hostdata->slots[j].state = NCR_700_SLOT_FREE;
359 	}
360 
361 	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
362 		script[j] = bS_to_host(SCRIPT[j]);
363 
364 	/* adjust all labels to be bus physical */
365 	for (j = 0; j < PATCHES; j++)
366 		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
367 	/* now patch up fixed addresses. */
368 	script_patch_32(script, MessageLocation,
369 			pScript + MSGOUT_OFFSET);
370 	script_patch_32(script, StatusAddress,
371 			pScript + STATUS_OFFSET);
372 	script_patch_32(script, ReceiveMsgAddress,
373 			pScript + MSGIN_OFFSET);
374 
375 	hostdata->script = script;
376 	hostdata->pScript = pScript;
377 	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
378 	hostdata->state = NCR_700_HOST_FREE;
379 	hostdata->cmd = NULL;
380 	host->max_id = 8;
381 	host->max_lun = NCR_700_MAX_LUNS;
382 	BUG_ON(NCR_700_transport_template == NULL);
383 	host->transportt = NCR_700_transport_template;
384 	host->unique_id = (unsigned long)hostdata->base;
385 	hostdata->eh_complete = NULL;
386 	host->hostdata[0] = (unsigned long)hostdata;
387 	/* kick the chip */
388 	NCR_700_writeb(0xff, host, CTEST9_REG);
389 	if (hostdata->chip710)
390 		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
391 	else
392 		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
393 	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
394 	if (banner == 0) {
395 		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
396 		banner = 1;
397 	}
398 	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
399 	       hostdata->chip710 ? "53c710" :
400 	       (hostdata->fast ? "53c700-66" : "53c700"),
401 	       hostdata->rev, hostdata->differential ?
402 	       "(Differential)" : "");
403 	/* reset the chip */
404 	NCR_700_chip_reset(host);
405 
406 	if (scsi_add_host(host, dev)) {
407 		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
408 		scsi_host_put(host);
409 		return NULL;
410 	}
411 
412 	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
413 		SPI_SIGNAL_SE;
414 
415 	return host;
416 }
417 
418 int
419 NCR_700_release(struct Scsi_Host *host)
420 {
421 	struct NCR_700_Host_Parameters *hostdata =
422 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
423 
424 	dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
425 			       hostdata->script, hostdata->pScript);
426 	return 1;
427 }
428 
429 static inline __u8
430 NCR_700_identify(int can_disconnect, __u8 lun)
431 {
432 	return IDENTIFY_BASE |
433 		((can_disconnect) ? 0x40 : 0) |
434 		(lun & NCR_700_LUN_MASK);
435 }
436 
437 /*
438  * Function : static int data_residual (Scsi_Host *host)
439  *
440  * Purpose : return residual data count of what's in the chip.  If you
441  * really want to know what this function is doing, it's almost a
442  * direct transcription of the algorithm described in the 53c710
443  * guide, except that the DBC and DFIFO registers are only 6 bits
444  * wide on a 53c700.
445  *
446  * Inputs : host - SCSI host */
447 static inline int
448 NCR_700_data_residual (struct Scsi_Host *host) {
449 	struct NCR_700_Host_Parameters *hostdata =
450 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
451 	int count, synchronous = 0;
452 	unsigned int ddir;
453 
454 	if(hostdata->chip710) {
455 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
456 			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
457 	} else {
458 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
459 			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
460 	}
461 
462 	if(hostdata->fast)
463 		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
464 
465 	/* get the data direction */
466 	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
467 
468 	if (ddir) {
469 		/* Receive */
470 		if (synchronous)
471 			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
472 		else
473 			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
474 				++count;
475 	} else {
476 		/* Send */
477 		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
478 		if (sstat & SODL_REG_FULL)
479 			++count;
480 		if (synchronous && (sstat & SODR_REG_FULL))
481 			++count;
482 	}
483 #ifdef NCR_700_DEBUG
484 	if(count)
485 		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
486 #endif
487 	return count;
488 }
489 
490 /* print out the SCSI wires and corresponding phase from the SBCL register
491  * in the chip */
492 static inline char *
493 sbcl_to_string(__u8 sbcl)
494 {
495 	int i;
496 	static char ret[256];
497 
498 	ret[0]='\0';
499 	for(i=0; i<8; i++) {
500 		if((1<<i) & sbcl)
501 			strcat(ret, NCR_700_SBCL_bits[i]);
502 	}
503 	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
504 	return ret;
505 }
506 
507 static inline __u8
508 bitmap_to_number(__u8 bitmap)
509 {
510 	__u8 i;
511 
512 	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
513 		;
514 	return i;
515 }
516 
517 /* Pull a slot off the free list */
518 STATIC struct NCR_700_command_slot *
519 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
520 {
521 	struct NCR_700_command_slot *slot = hostdata->free_list;
522 
523 	if(slot == NULL) {
524 		/* sanity check */
525 		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
526 			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
527 		return NULL;
528 	}
529 
530 	if(slot->state != NCR_700_SLOT_FREE)
531 		/* should panic! */
532 		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
533 
534 
535 	hostdata->free_list = slot->ITL_forw;
536 	slot->ITL_forw = NULL;
537 
538 
539 	/* NOTE: set the state to busy here, not queued, since this
540 	 * indicates the slot is in use and cannot be run by the IRQ
541 	 * finish routine.  If we cannot queue the command when it
542 	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
543 	slot->state = NCR_700_SLOT_BUSY;
544 	slot->flags = 0;
545 	hostdata->command_slot_count++;
546 
547 	return slot;
548 }
549 
550 STATIC void
551 free_slot(struct NCR_700_command_slot *slot,
552 	  struct NCR_700_Host_Parameters *hostdata)
553 {
554 	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
555 		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
556 	}
557 	if(slot->state == NCR_700_SLOT_FREE) {
558 		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
559 	}
560 
561 	slot->resume_offset = 0;
562 	slot->cmnd = NULL;
563 	slot->state = NCR_700_SLOT_FREE;
564 	slot->ITL_forw = hostdata->free_list;
565 	hostdata->free_list = slot;
566 	hostdata->command_slot_count--;
567 }
568 
569 
570 /* This routine really does very little.  The command is indexed on
571    the ITL and (if tagged) the ITLQ lists in _queuecommand */
572 STATIC void
573 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
574 		     struct scsi_cmnd *SCp, __u32 dsp)
575 {
576 	/* Its just possible that this gets executed twice */
577 	if(SCp != NULL) {
578 		struct NCR_700_command_slot *slot =
579 			(struct NCR_700_command_slot *)SCp->host_scribble;
580 
581 		slot->resume_offset = dsp;
582 	}
583 	hostdata->state = NCR_700_HOST_FREE;
584 	hostdata->cmd = NULL;
585 }
586 
587 STATIC inline void
588 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
589 	      struct NCR_700_command_slot *slot)
590 {
591 	if(SCp->sc_data_direction != DMA_NONE &&
592 	   SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
593 		if(SCp->use_sg) {
594 			dma_unmap_sg(hostdata->dev, SCp->request_buffer,
595 				     SCp->use_sg, SCp->sc_data_direction);
596 		} else {
597 			dma_unmap_single(hostdata->dev, slot->dma_handle,
598 					 SCp->request_bufflen,
599 					 SCp->sc_data_direction);
600 		}
601 	}
602 }
603 
604 STATIC inline void
605 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
606 	       struct scsi_cmnd *SCp, int result)
607 {
608 	hostdata->state = NCR_700_HOST_FREE;
609 	hostdata->cmd = NULL;
610 
611 	if(SCp != NULL) {
612 		struct NCR_700_command_slot *slot =
613 			(struct NCR_700_command_slot *)SCp->host_scribble;
614 
615 		NCR_700_unmap(hostdata, SCp, slot);
616 		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
617 			struct NCR_700_sense *sense = SCp->device->hostdata;
618 #ifdef NCR_700_DEBUG
619 			printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
620 			       SCp, SCp->cmnd[7], result);
621 			scsi_print_sense("53c700", SCp);
622 
623 #endif
624 			dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
625 			/* restore the old result if the request sense was
626 			 * successful */
627 			if(result == 0)
628 				result = sense->cmnd[7];
629 		} else
630 			dma_unmap_single(hostdata->dev, slot->pCmd,
631 					 sizeof(SCp->cmnd), DMA_TO_DEVICE);
632 
633 		free_slot(slot, hostdata);
634 #ifdef NCR_700_DEBUG
635 		if(NCR_700_get_depth(SCp->device) == 0 ||
636 		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
637 			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
638 			       NCR_700_get_depth(SCp->device));
639 #endif /* NCR_700_DEBUG */
640 		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
641 
642 		SCp->host_scribble = NULL;
643 		SCp->result = result;
644 		SCp->scsi_done(SCp);
645 	} else {
646 		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
647 	}
648 }
649 
650 
651 STATIC void
652 NCR_700_internal_bus_reset(struct Scsi_Host *host)
653 {
654 	/* Bus reset */
655 	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
656 	udelay(50);
657 	NCR_700_writeb(0, host, SCNTL1_REG);
658 
659 }
660 
661 STATIC void
662 NCR_700_chip_setup(struct Scsi_Host *host)
663 {
664 	struct NCR_700_Host_Parameters *hostdata =
665 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
666 	__u32 dcntl_extra = 0;
667 	__u8 min_period;
668 	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
669 
670 	if(hostdata->chip710) {
671 		__u8 burst_disable = hostdata->burst_disable
672 			? BURST_DISABLE : 0;
673 		dcntl_extra = COMPAT_700_MODE;
674 
675 		NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
676 		NCR_700_writeb(BURST_LENGTH_8  | hostdata->dmode_extra,
677 			       host, DMODE_710_REG);
678 		NCR_700_writeb(burst_disable | (hostdata->differential ?
679 						DIFF : 0), host, CTEST7_REG);
680 		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
681 		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
682 			       | AUTO_ATN, host, SCNTL0_REG);
683 	} else {
684 		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
685 			       host, DMODE_700_REG);
686 		NCR_700_writeb(hostdata->differential ?
687 			       DIFF : 0, host, CTEST7_REG);
688 		if(hostdata->fast) {
689 			/* this is for 700-66, does nothing on 700 */
690 			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
691 				       | GENERATE_RECEIVE_PARITY, host,
692 				       CTEST8_REG);
693 		} else {
694 			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
695 				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
696 		}
697 	}
698 
699 	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
700 	NCR_700_writeb(0, host, SBCL_REG);
701 	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
702 
703 	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
704 	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
705 
706 	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
707 	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
708 	if(hostdata->clock > 75) {
709 		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
710 		/* do the best we can, but the async clock will be out
711 		 * of spec: sync divider 2, async divider 3 */
712 		DEBUG(("53c700: sync 2 async 3\n"));
713 		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
714 		NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
715 		hostdata->sync_clock = hostdata->clock/2;
716 	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
717 		/* sync divider 1.5, async divider 3 */
718 		DEBUG(("53c700: sync 1.5 async 3\n"));
719 		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
720 		NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
721 		hostdata->sync_clock = hostdata->clock*2;
722 		hostdata->sync_clock /= 3;
723 
724 	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
725 		/* sync divider 1, async divider 2 */
726 		DEBUG(("53c700: sync 1 async 2\n"));
727 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
728 		NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
729 		hostdata->sync_clock = hostdata->clock;
730 	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
731 		/* sync divider 1, async divider 1.5 */
732 		DEBUG(("53c700: sync 1 async 1.5\n"));
733 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
734 		NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
735 		hostdata->sync_clock = hostdata->clock;
736 	} else {
737 		DEBUG(("53c700: sync 1 async 1\n"));
738 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
739 		NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
740 		/* sync divider 1, async divider 1 */
741 		hostdata->sync_clock = hostdata->clock;
742 	}
743 	/* Calculate the actual minimum period that can be supported
744 	 * by our synchronous clock speed.  See the 710 manual for
745 	 * exact details of this calculation which is based on a
746 	 * setting of the SXFER register */
747 	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
748 	hostdata->min_period = NCR_700_MIN_PERIOD;
749 	if(min_period > NCR_700_MIN_PERIOD)
750 		hostdata->min_period = min_period;
751 }
752 
753 STATIC void
754 NCR_700_chip_reset(struct Scsi_Host *host)
755 {
756 	struct NCR_700_Host_Parameters *hostdata =
757 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
758 	if(hostdata->chip710) {
759 		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
760 		udelay(100);
761 
762 		NCR_700_writeb(0, host, ISTAT_REG);
763 	} else {
764 		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
765 		udelay(100);
766 
767 		NCR_700_writeb(0, host, DCNTL_REG);
768 	}
769 
770 	mdelay(1000);
771 
772 	NCR_700_chip_setup(host);
773 }
774 
775 /* The heart of the message processing engine is that the instruction
776  * immediately after the INT is the normal case (and so must be CLEAR
777  * ACK).  If we want to do something else, we call that routine in
778  * scripts and set temp to be the normal case + 8 (skipping the CLEAR
779  * ACK) so that the routine returns correctly to resume its activity
780  * */
781 STATIC __u32
782 process_extended_message(struct Scsi_Host *host,
783 			 struct NCR_700_Host_Parameters *hostdata,
784 			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
785 {
786 	__u32 resume_offset = dsp, temp = dsp + 8;
787 	__u8 pun = 0xff, lun = 0xff;
788 
789 	if(SCp != NULL) {
790 		pun = SCp->device->id;
791 		lun = SCp->device->lun;
792 	}
793 
794 	switch(hostdata->msgin[2]) {
795 	case A_SDTR_MSG:
796 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
797 			struct scsi_target *starget = SCp->device->sdev_target;
798 			__u8 period = hostdata->msgin[3];
799 			__u8 offset = hostdata->msgin[4];
800 
801 			if(offset == 0 || period == 0) {
802 				offset = 0;
803 				period = 0;
804 			}
805 
806 			spi_offset(starget) = offset;
807 			spi_period(starget) = period;
808 
809 			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
810 				spi_display_xfer_agreement(starget);
811 				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
812 			}
813 
814 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
815 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
816 
817 			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
818 				       host, SXFER_REG);
819 
820 		} else {
821 			/* SDTR message out of the blue, reject it */
822 			shost_printk(KERN_WARNING, host,
823 				"Unexpected SDTR msg\n");
824 			hostdata->msgout[0] = A_REJECT_MSG;
825 			dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
826 			script_patch_16(hostdata->script, MessageCount, 1);
827 			/* SendMsgOut returns, so set up the return
828 			 * address */
829 			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
830 		}
831 		break;
832 
833 	case A_WDTR_MSG:
834 		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
835 		       host->host_no, pun, lun);
836 		hostdata->msgout[0] = A_REJECT_MSG;
837 		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
838 		script_patch_16(hostdata->script, MessageCount, 1);
839 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
840 
841 		break;
842 
843 	default:
844 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
845 		       host->host_no, pun, lun,
846 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
847 		spi_print_msg(hostdata->msgin);
848 		printk("\n");
849 		/* just reject it */
850 		hostdata->msgout[0] = A_REJECT_MSG;
851 		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
852 		script_patch_16(hostdata->script, MessageCount, 1);
853 		/* SendMsgOut returns, so set up the return
854 		 * address */
855 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
856 	}
857 	NCR_700_writel(temp, host, TEMP_REG);
858 	return resume_offset;
859 }
860 
861 STATIC __u32
862 process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
863 		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
864 {
865 	/* work out where to return to */
866 	__u32 temp = dsp + 8, resume_offset = dsp;
867 	__u8 pun = 0xff, lun = 0xff;
868 
869 	if(SCp != NULL) {
870 		pun = SCp->device->id;
871 		lun = SCp->device->lun;
872 	}
873 
874 #ifdef NCR_700_DEBUG
875 	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
876 	       NCR_700_phase[(dsps & 0xf00) >> 8]);
877 	spi_print_msg(hostdata->msgin);
878 	printk("\n");
879 #endif
880 
881 	switch(hostdata->msgin[0]) {
882 
883 	case A_EXTENDED_MSG:
884 		resume_offset =  process_extended_message(host, hostdata, SCp,
885 							  dsp, dsps);
886 		break;
887 
888 	case A_REJECT_MSG:
889 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
890 			/* Rejected our sync negotiation attempt */
891 			spi_period(SCp->device->sdev_target) =
892 				spi_offset(SCp->device->sdev_target) = 0;
893 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
894 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
895 		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
896 			/* rejected our first simple tag message */
897 			scmd_printk(KERN_WARNING, SCp,
898 				"Rejected first tag queue attempt, turning off tag queueing\n");
899 			/* we're done negotiating */
900 			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
901 			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
902 			SCp->device->tagged_supported = 0;
903 			scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
904 		} else {
905 			shost_printk(KERN_WARNING, host,
906 				"(%d:%d) Unexpected REJECT Message %s\n",
907 			       pun, lun,
908 			       NCR_700_phase[(dsps & 0xf00) >> 8]);
909 			/* however, just ignore it */
910 		}
911 		break;
912 
913 	case A_PARITY_ERROR_MSG:
914 		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
915 		       pun, lun);
916 		NCR_700_internal_bus_reset(host);
917 		break;
918 	case A_SIMPLE_TAG_MSG:
919 		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
920 		       pun, lun, hostdata->msgin[1],
921 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
922 		/* just ignore it */
923 		break;
924 	default:
925 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
926 		       host->host_no, pun, lun,
927 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
928 
929 		spi_print_msg(hostdata->msgin);
930 		printk("\n");
931 		/* just reject it */
932 		hostdata->msgout[0] = A_REJECT_MSG;
933 		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
934 		script_patch_16(hostdata->script, MessageCount, 1);
935 		/* SendMsgOut returns, so set up the return
936 		 * address */
937 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
938 
939 		break;
940 	}
941 	NCR_700_writel(temp, host, TEMP_REG);
942 	/* set us up to receive another message */
943 	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
944 	return resume_offset;
945 }
946 
947 STATIC __u32
948 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
949 			 struct Scsi_Host *host,
950 			 struct NCR_700_Host_Parameters *hostdata)
951 {
952 	__u32 resume_offset = 0;
953 	__u8 pun = 0xff, lun=0xff;
954 
955 	if(SCp != NULL) {
956 		pun = SCp->device->id;
957 		lun = SCp->device->lun;
958 	}
959 
960 	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
961 		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
962 		       hostdata->status[0]));
963 		/* OK, if TCQ still under negotiation, we now know it works */
964 		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
965 			NCR_700_set_tag_neg_state(SCp->device,
966 						  NCR_700_FINISHED_TAG_NEGOTIATION);
967 
968 		/* check for contingent allegiance contitions */
969 		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
970 		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
971 			struct NCR_700_command_slot *slot =
972 				(struct NCR_700_command_slot *)SCp->host_scribble;
973 			if(SCp->cmnd[0] == REQUEST_SENSE) {
974 				/* OOPS: bad device, returning another
975 				 * contingent allegiance condition */
976 				scmd_printk(KERN_ERR, SCp,
977 					"broken device is looping in contingent allegiance: ignoring\n");
978 				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
979 			} else {
980 				struct NCR_700_sense *sense = SCp->device->hostdata;
981 #ifdef NCR_DEBUG
982 				scsi_print_command(SCp);
983 				printk("  cmd %p has status %d, requesting sense\n",
984 				       SCp, hostdata->status[0]);
985 #endif
986 				/* we can destroy the command here
987 				 * because the contingent allegiance
988 				 * condition will cause a retry which
989 				 * will re-copy the command from the
990 				 * saved data_cmnd.  We also unmap any
991 				 * data associated with the command
992 				 * here */
993 				NCR_700_unmap(hostdata, SCp, slot);
994 				dma_unmap_single(hostdata->dev, slot->pCmd,
995 						 sizeof(SCp->cmnd),
996 						 DMA_TO_DEVICE);
997 
998 				sense->cmnd[0] = REQUEST_SENSE;
999 				sense->cmnd[1] = (SCp->device->lun & 0x7) << 5;
1000 				sense->cmnd[2] = 0;
1001 				sense->cmnd[3] = 0;
1002 				sense->cmnd[4] = sizeof(SCp->sense_buffer);
1003 				sense->cmnd[5] = 0;
1004 				/* Here's a quiet hack: the
1005 				 * REQUEST_SENSE command is six bytes,
1006 				 * so store a flag indicating that
1007 				 * this was an internal sense request
1008 				 * and the original status at the end
1009 				 * of the command */
1010 				sense->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1011 				sense->cmnd[7] = hostdata->status[0];
1012 				slot->pCmd = dma_map_single(hostdata->dev, sense->cmnd, sizeof(sense->cmnd), DMA_TO_DEVICE);
1013 				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1014 				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1015 				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1016 				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1017 				slot->SG[1].pAddr = 0;
1018 				slot->resume_offset = hostdata->pScript;
1019 				dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1020 				dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1021 
1022 				/* queue the command for reissue */
1023 				slot->state = NCR_700_SLOT_QUEUED;
1024 				slot->flags = NCR_700_FLAG_AUTOSENSE;
1025 				hostdata->state = NCR_700_HOST_FREE;
1026 				hostdata->cmd = NULL;
1027 			}
1028 		} else {
1029 			// Currently rely on the mid layer evaluation
1030 			// of the tag queuing capability
1031 			//
1032 			//if(status_byte(hostdata->status[0]) == GOOD &&
1033 			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1034 			//	/* Piggy back the tag queueing support
1035 			//	 * on this command */
1036 			//	dma_sync_single_for_cpu(hostdata->dev,
1037 			//			    slot->dma_handle,
1038 			//			    SCp->request_bufflen,
1039 			//			    DMA_FROM_DEVICE);
1040 			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1041 			//		scmd_printk(KERN_INFO, SCp,
1042 			//		     "Enabling Tag Command Queuing\n");
1043 			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1044 			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1045 			//	} else {
1046 			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1047 			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1048 			//	}
1049 			//}
1050 			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1051 		}
1052 	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1053 		__u8 i = (dsps & 0xf00) >> 8;
1054 
1055 		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1056 		       NCR_700_phase[i],
1057 		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1058 		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1059 			SCp->cmd_len);
1060 		scsi_print_command(SCp);
1061 
1062 		NCR_700_internal_bus_reset(host);
1063 	} else if((dsps & 0xfffff000) == A_FATAL) {
1064 		int i = (dsps & 0xfff);
1065 
1066 		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1067 		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1068 		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1069 			printk(KERN_ERR "     msg begins %02x %02x\n",
1070 			       hostdata->msgin[0], hostdata->msgin[1]);
1071 		}
1072 		NCR_700_internal_bus_reset(host);
1073 	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1074 #ifdef NCR_700_DEBUG
1075 		__u8 i = (dsps & 0xf00) >> 8;
1076 
1077 		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1078 		       host->host_no, pun, lun,
1079 		       i, NCR_700_phase[i]);
1080 #endif
1081 		save_for_reselection(hostdata, SCp, dsp);
1082 
1083 	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1084 		__u8 lun;
1085 		struct NCR_700_command_slot *slot;
1086 		__u8 reselection_id = hostdata->reselection_id;
1087 		struct scsi_device *SDp;
1088 
1089 		lun = hostdata->msgin[0] & 0x1f;
1090 
1091 		hostdata->reselection_id = 0xff;
1092 		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1093 		       host->host_no, reselection_id, lun));
1094 		/* clear the reselection indicator */
1095 		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1096 		if(unlikely(SDp == NULL)) {
1097 			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1098 			       host->host_no, reselection_id, lun);
1099 			BUG();
1100 		}
1101 		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1102 			struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1103 			if(unlikely(SCp == NULL)) {
1104 				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1105 				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1106 				BUG();
1107 			}
1108 
1109 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1110 			DDEBUG(KERN_DEBUG, SDp,
1111 				"reselection is tag %d, slot %p(%d)\n",
1112 				hostdata->msgin[2], slot, slot->tag);
1113 		} else {
1114 			struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1115 			if(unlikely(SCp == NULL)) {
1116 				sdev_printk(KERN_ERR, SDp,
1117 					"no saved request for untagged cmd\n");
1118 				BUG();
1119 			}
1120 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1121 		}
1122 
1123 		if(slot == NULL) {
1124 			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1125 			       host->host_no, reselection_id, lun,
1126 			       hostdata->msgin[0], hostdata->msgin[1],
1127 			       hostdata->msgin[2]);
1128 		} else {
1129 			if(hostdata->state != NCR_700_HOST_BUSY)
1130 				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1131 				       host->host_no);
1132 			resume_offset = slot->resume_offset;
1133 			hostdata->cmd = slot->cmnd;
1134 
1135 			/* re-patch for this command */
1136 			script_patch_32_abs(hostdata->script, CommandAddress,
1137 					    slot->pCmd);
1138 			script_patch_16(hostdata->script,
1139 					CommandCount, slot->cmnd->cmd_len);
1140 			script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1141 					    to32bit(&slot->pSG[0].ins));
1142 
1143 			/* Note: setting SXFER only works if we're
1144 			 * still in the MESSAGE phase, so it is vital
1145 			 * that ACK is still asserted when we process
1146 			 * the reselection message.  The resume offset
1147 			 * should therefore always clear ACK */
1148 			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1149 				       host, SXFER_REG);
1150 			dma_cache_sync(hostdata->msgin,
1151 				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1152 			dma_cache_sync(hostdata->msgout,
1153 				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1154 			/* I'm just being paranoid here, the command should
1155 			 * already have been flushed from the cache */
1156 			dma_cache_sync(slot->cmnd->cmnd,
1157 				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
1158 
1159 
1160 
1161 		}
1162 	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1163 
1164 		/* This section is full of debugging code because I've
1165 		 * never managed to reach it.  I think what happens is
1166 		 * that, because the 700 runs with selection
1167 		 * interrupts enabled the whole time that we take a
1168 		 * selection interrupt before we manage to get to the
1169 		 * reselected script interrupt */
1170 
1171 		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1172 		struct NCR_700_command_slot *slot;
1173 
1174 		/* Take out our own ID */
1175 		reselection_id &= ~(1<<host->this_id);
1176 
1177 		/* I've never seen this happen, so keep this as a printk rather
1178 		 * than a debug */
1179 		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1180 		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1181 
1182 		{
1183 			/* FIXME: DEBUGGING CODE */
1184 			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1185 			int i;
1186 
1187 			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1188 				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1189 				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1190 					break;
1191 			}
1192 			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1193 			SCp =  hostdata->slots[i].cmnd;
1194 		}
1195 
1196 		if(SCp != NULL) {
1197 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1198 			/* change slot from busy to queued to redo command */
1199 			slot->state = NCR_700_SLOT_QUEUED;
1200 		}
1201 		hostdata->cmd = NULL;
1202 
1203 		if(reselection_id == 0) {
1204 			if(hostdata->reselection_id == 0xff) {
1205 				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1206 				return 0;
1207 			} else {
1208 				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1209 				       host->host_no);
1210 				reselection_id = hostdata->reselection_id;
1211 			}
1212 		} else {
1213 
1214 			/* convert to real ID */
1215 			reselection_id = bitmap_to_number(reselection_id);
1216 		}
1217 		hostdata->reselection_id = reselection_id;
1218 		/* just in case we have a stale simple tag message, clear it */
1219 		hostdata->msgin[1] = 0;
1220 		dma_cache_sync(hostdata->msgin,
1221 			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1222 		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1223 			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1224 		} else {
1225 			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1226 		}
1227 	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1228 		/* we've just disconnected from the bus, do nothing since
1229 		 * a return here will re-run the queued command slot
1230 		 * that may have been interrupted by the initial selection */
1231 		DEBUG((" SELECTION COMPLETED\n"));
1232 	} else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1233 		resume_offset = process_message(host, hostdata, SCp,
1234 						dsp, dsps);
1235 	} else if((dsps &  0xfffff000) == 0) {
1236 		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1237 		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1238 		       host->host_no, pun, lun, NCR_700_condition[i],
1239 		       NCR_700_phase[j], dsp - hostdata->pScript);
1240 		if(SCp != NULL) {
1241 			scsi_print_command(SCp);
1242 
1243 			if(SCp->use_sg) {
1244 				for(i = 0; i < SCp->use_sg + 1; i++) {
1245 					printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1246 				}
1247 			}
1248 		}
1249 		NCR_700_internal_bus_reset(host);
1250 	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1251 		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1252 		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1253 		resume_offset = dsp;
1254 	} else {
1255 		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1256 		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1257 		NCR_700_internal_bus_reset(host);
1258 	}
1259 	return resume_offset;
1260 }
1261 
1262 /* We run the 53c700 with selection interrupts always enabled.  This
1263  * means that the chip may be selected as soon as the bus frees.  On a
1264  * busy bus, this can be before the scripts engine finishes its
1265  * processing.  Therefore, part of the selection processing has to be
1266  * to find out what the scripts engine is doing and complete the
1267  * function if necessary (i.e. process the pending disconnect or save
1268  * the interrupted initial selection */
1269 STATIC inline __u32
1270 process_selection(struct Scsi_Host *host, __u32 dsp)
1271 {
1272 	__u8 id = 0;	/* Squash compiler warning */
1273 	int count = 0;
1274 	__u32 resume_offset = 0;
1275 	struct NCR_700_Host_Parameters *hostdata =
1276 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1277 	struct scsi_cmnd *SCp = hostdata->cmd;
1278 	__u8 sbcl;
1279 
1280 	for(count = 0; count < 5; count++) {
1281 		id = NCR_700_readb(host, hostdata->chip710 ?
1282 				   CTEST9_REG : SFBR_REG);
1283 
1284 		/* Take out our own ID */
1285 		id &= ~(1<<host->this_id);
1286 		if(id != 0)
1287 			break;
1288 		udelay(5);
1289 	}
1290 	sbcl = NCR_700_readb(host, SBCL_REG);
1291 	if((sbcl & SBCL_IO) == 0) {
1292 		/* mark as having been selected rather than reselected */
1293 		id = 0xff;
1294 	} else {
1295 		/* convert to real ID */
1296 		hostdata->reselection_id = id = bitmap_to_number(id);
1297 		DEBUG(("scsi%d:  Reselected by %d\n",
1298 		       host->host_no, id));
1299 	}
1300 	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1301 		struct NCR_700_command_slot *slot =
1302 			(struct NCR_700_command_slot *)SCp->host_scribble;
1303 		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1304 
1305 		switch(dsp - hostdata->pScript) {
1306 		case Ent_Disconnect1:
1307 		case Ent_Disconnect2:
1308 			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1309 			break;
1310 		case Ent_Disconnect3:
1311 		case Ent_Disconnect4:
1312 			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1313 			break;
1314 		case Ent_Disconnect5:
1315 		case Ent_Disconnect6:
1316 			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1317 			break;
1318 		case Ent_Disconnect7:
1319 		case Ent_Disconnect8:
1320 			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1321 			break;
1322 		case Ent_Finish1:
1323 		case Ent_Finish2:
1324 			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1325 			break;
1326 
1327 		default:
1328 			slot->state = NCR_700_SLOT_QUEUED;
1329 			break;
1330 			}
1331 	}
1332 	hostdata->state = NCR_700_HOST_BUSY;
1333 	hostdata->cmd = NULL;
1334 	/* clear any stale simple tag message */
1335 	hostdata->msgin[1] = 0;
1336 	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1337 		       DMA_BIDIRECTIONAL);
1338 
1339 	if(id == 0xff) {
1340 		/* Selected as target, Ignore */
1341 		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1342 	} else if(hostdata->tag_negotiated & (1<<id)) {
1343 		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1344 	} else {
1345 		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1346 	}
1347 	return resume_offset;
1348 }
1349 
1350 static inline void
1351 NCR_700_clear_fifo(struct Scsi_Host *host) {
1352 	const struct NCR_700_Host_Parameters *hostdata
1353 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1354 	if(hostdata->chip710) {
1355 		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1356 	} else {
1357 		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1358 	}
1359 }
1360 
1361 static inline void
1362 NCR_700_flush_fifo(struct Scsi_Host *host) {
1363 	const struct NCR_700_Host_Parameters *hostdata
1364 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1365 	if(hostdata->chip710) {
1366 		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1367 		udelay(10);
1368 		NCR_700_writeb(0, host, CTEST8_REG);
1369 	} else {
1370 		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1371 		udelay(10);
1372 		NCR_700_writeb(0, host, DFIFO_REG);
1373 	}
1374 }
1375 
1376 
1377 /* The queue lock with interrupts disabled must be held on entry to
1378  * this function */
1379 STATIC int
1380 NCR_700_start_command(struct scsi_cmnd *SCp)
1381 {
1382 	struct NCR_700_command_slot *slot =
1383 		(struct NCR_700_command_slot *)SCp->host_scribble;
1384 	struct NCR_700_Host_Parameters *hostdata =
1385 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1386 	__u16 count = 1;	/* for IDENTIFY message */
1387 
1388 	if(hostdata->state != NCR_700_HOST_FREE) {
1389 		/* keep this inside the lock to close the race window where
1390 		 * the running command finishes on another CPU while we don't
1391 		 * change the state to queued on this one */
1392 		slot->state = NCR_700_SLOT_QUEUED;
1393 
1394 		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1395 		       SCp->device->host->host_no, slot->cmnd, slot));
1396 		return 0;
1397 	}
1398 	hostdata->state = NCR_700_HOST_BUSY;
1399 	hostdata->cmd = SCp;
1400 	slot->state = NCR_700_SLOT_BUSY;
1401 	/* keep interrupts disabled until we have the command correctly
1402 	 * set up so we cannot take a selection interrupt */
1403 
1404 	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1405 						slot->flags != NCR_700_FLAG_AUTOSENSE),
1406 					       SCp->device->lun);
1407 	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1408 	 * if the negotiated transfer parameters still hold, so
1409 	 * always renegotiate them */
1410 	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1411 	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1412 		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1413 	}
1414 
1415 	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1416 	 * If a contingent allegiance condition exists, the device
1417 	 * will refuse all tags, so send the request sense as untagged
1418 	 * */
1419 	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1420 	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1421 	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1422 		count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1423 	}
1424 
1425 	if(hostdata->fast &&
1426 	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1427 		count += spi_populate_sync_msg(&hostdata->msgout[count],
1428 				spi_period(SCp->device->sdev_target),
1429 				spi_offset(SCp->device->sdev_target));
1430 		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1431 	}
1432 
1433 	script_patch_16(hostdata->script, MessageCount, count);
1434 
1435 
1436 	script_patch_ID(hostdata->script,
1437 			Device_ID, 1<<scmd_id(SCp));
1438 
1439 	script_patch_32_abs(hostdata->script, CommandAddress,
1440 			    slot->pCmd);
1441 	script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
1442 	/* finally plumb the beginning of the SG list into the script
1443 	 * */
1444 	script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1445 			    to32bit(&slot->pSG[0].ins));
1446 	NCR_700_clear_fifo(SCp->device->host);
1447 
1448 	if(slot->resume_offset == 0)
1449 		slot->resume_offset = hostdata->pScript;
1450 	/* now perform all the writebacks and invalidates */
1451 	dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
1452 	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1453 		       DMA_FROM_DEVICE);
1454 	dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1455 	dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
1456 
1457 	/* set the synchronous period/offset */
1458 	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1459 		       SCp->device->host, SXFER_REG);
1460 	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1461 	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1462 
1463 	return 1;
1464 }
1465 
1466 irqreturn_t
1467 NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1468 {
1469 	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1470 	struct NCR_700_Host_Parameters *hostdata =
1471 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1472 	__u8 istat;
1473 	__u32 resume_offset = 0;
1474 	__u8 pun = 0xff, lun = 0xff;
1475 	unsigned long flags;
1476 	int handled = 0;
1477 
1478 	/* Use the host lock to serialise acess to the 53c700
1479 	 * hardware.  Note: In future, we may need to take the queue
1480 	 * lock to enter the done routines.  When that happens, we
1481 	 * need to ensure that for this driver, the host lock and the
1482 	 * queue lock point to the same thing. */
1483 	spin_lock_irqsave(host->host_lock, flags);
1484 	if((istat = NCR_700_readb(host, ISTAT_REG))
1485 	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1486 		__u32 dsps;
1487 		__u8 sstat0 = 0, dstat = 0;
1488 		__u32 dsp;
1489 		struct scsi_cmnd *SCp = hostdata->cmd;
1490 		enum NCR_700_Host_State state;
1491 
1492 		handled = 1;
1493 		state = hostdata->state;
1494 		SCp = hostdata->cmd;
1495 
1496 		if(istat & SCSI_INT_PENDING) {
1497 			udelay(10);
1498 
1499 			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1500 		}
1501 
1502 		if(istat & DMA_INT_PENDING) {
1503 			udelay(10);
1504 
1505 			dstat = NCR_700_readb(host, DSTAT_REG);
1506 		}
1507 
1508 		dsps = NCR_700_readl(host, DSPS_REG);
1509 		dsp = NCR_700_readl(host, DSP_REG);
1510 
1511 		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1512 		       host->host_no, istat, sstat0, dstat,
1513 		       (dsp - (__u32)(hostdata->pScript))/4,
1514 		       dsp, dsps));
1515 
1516 		if(SCp != NULL) {
1517 			pun = SCp->device->id;
1518 			lun = SCp->device->lun;
1519 		}
1520 
1521 		if(sstat0 & SCSI_RESET_DETECTED) {
1522 			struct scsi_device *SDp;
1523 			int i;
1524 
1525 			hostdata->state = NCR_700_HOST_BUSY;
1526 
1527 			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1528 			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1529 
1530 			scsi_report_bus_reset(host, 0);
1531 
1532 			/* clear all the negotiated parameters */
1533 			__shost_for_each_device(SDp, host)
1534 				SDp->hostdata = NULL;
1535 
1536 			/* clear all the slots and their pending commands */
1537 			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1538 				struct scsi_cmnd *SCp;
1539 				struct NCR_700_command_slot *slot =
1540 					&hostdata->slots[i];
1541 
1542 				if(slot->state == NCR_700_SLOT_FREE)
1543 					continue;
1544 
1545 				SCp = slot->cmnd;
1546 				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1547 				       slot, SCp);
1548 				free_slot(slot, hostdata);
1549 				SCp->host_scribble = NULL;
1550 				NCR_700_set_depth(SCp->device, 0);
1551 				/* NOTE: deadlock potential here: we
1552 				 * rely on mid-layer guarantees that
1553 				 * scsi_done won't try to issue the
1554 				 * command again otherwise we'll
1555 				 * deadlock on the
1556 				 * hostdata->state_lock */
1557 				SCp->result = DID_RESET << 16;
1558 				SCp->scsi_done(SCp);
1559 			}
1560 			mdelay(25);
1561 			NCR_700_chip_setup(host);
1562 
1563 			hostdata->state = NCR_700_HOST_FREE;
1564 			hostdata->cmd = NULL;
1565 			/* signal back if this was an eh induced reset */
1566 			if(hostdata->eh_complete != NULL)
1567 				complete(hostdata->eh_complete);
1568 			goto out_unlock;
1569 		} else if(sstat0 & SELECTION_TIMEOUT) {
1570 			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1571 			       host->host_no, pun, lun));
1572 			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1573 		} else if(sstat0 & PHASE_MISMATCH) {
1574 			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1575 				(struct NCR_700_command_slot *)SCp->host_scribble;
1576 
1577 			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1578 				/* It wants to reply to some part of
1579 				 * our message */
1580 #ifdef NCR_700_DEBUG
1581 				__u32 temp = NCR_700_readl(host, TEMP_REG);
1582 				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1583 				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1584 #endif
1585 				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1586 			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1587 				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1588 				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1589 				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1590 				int residual = NCR_700_data_residual(host);
1591 				int i;
1592 #ifdef NCR_700_DEBUG
1593 				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1594 
1595 				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1596 				       host->host_no, pun, lun,
1597 				       SGcount, data_transfer);
1598 				scsi_print_command(SCp);
1599 				if(residual) {
1600 					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1601 				       host->host_no, pun, lun,
1602 				       SGcount, data_transfer, residual);
1603 				}
1604 #endif
1605 				data_transfer += residual;
1606 
1607 				if(data_transfer != 0) {
1608 					int count;
1609 					__u32 pAddr;
1610 
1611 					SGcount--;
1612 
1613 					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1614 					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1615 					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1616 					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1617 					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1618 					pAddr += (count - data_transfer);
1619 #ifdef NCR_700_DEBUG
1620 					if(pAddr != naddr) {
1621 						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1622 					}
1623 #endif
1624 					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1625 				}
1626 				/* set the executed moves to nops */
1627 				for(i=0; i<SGcount; i++) {
1628 					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1629 					slot->SG[i].pAddr = 0;
1630 				}
1631 				dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1632 				/* and pretend we disconnected after
1633 				 * the command phase */
1634 				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1635 				/* make sure all the data is flushed */
1636 				NCR_700_flush_fifo(host);
1637 			} else {
1638 				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1639 				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1640 				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1641 				NCR_700_internal_bus_reset(host);
1642 			}
1643 
1644 		} else if(sstat0 & SCSI_GROSS_ERROR) {
1645 			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1646 			       host->host_no, pun, lun);
1647 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1648 		} else if(sstat0 & PARITY_ERROR) {
1649 			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1650 			       host->host_no, pun, lun);
1651 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1652 		} else if(dstat & SCRIPT_INT_RECEIVED) {
1653 			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1654 			       host->host_no, pun, lun));
1655 			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1656 		} else if(dstat & (ILGL_INST_DETECTED)) {
1657 			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1658 			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1659 			       host->host_no, pun, lun,
1660 			       dsp, dsp - hostdata->pScript);
1661 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1662 		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1663 			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1664 			       host->host_no, pun, lun, dstat);
1665 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1666 		}
1667 
1668 
1669 		/* NOTE: selection interrupt processing MUST occur
1670 		 * after script interrupt processing to correctly cope
1671 		 * with the case where we process a disconnect and
1672 		 * then get reselected before we process the
1673 		 * disconnection */
1674 		if(sstat0 & SELECTED) {
1675 			/* FIXME: It currently takes at least FOUR
1676 			 * interrupts to complete a command that
1677 			 * disconnects: one for the disconnect, one
1678 			 * for the reselection, one to get the
1679 			 * reselection data and one to complete the
1680 			 * command.  If we guess the reselected
1681 			 * command here and prepare it, we only need
1682 			 * to get a reselection data interrupt if we
1683 			 * guessed wrongly.  Since the interrupt
1684 			 * overhead is much greater than the command
1685 			 * setup, this would be an efficient
1686 			 * optimisation particularly as we probably
1687 			 * only have one outstanding command on a
1688 			 * target most of the time */
1689 
1690 			resume_offset = process_selection(host, dsp);
1691 
1692 		}
1693 
1694 	}
1695 
1696 	if(resume_offset) {
1697 		if(hostdata->state != NCR_700_HOST_BUSY) {
1698 			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1699 			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1700 			hostdata->state = NCR_700_HOST_BUSY;
1701 		}
1702 
1703 		DEBUG(("Attempting to resume at %x\n", resume_offset));
1704 		NCR_700_clear_fifo(host);
1705 		NCR_700_writel(resume_offset, host, DSP_REG);
1706 	}
1707 	/* There is probably a technical no-no about this: If we're a
1708 	 * shared interrupt and we got this interrupt because the
1709 	 * other device needs servicing not us, we're still going to
1710 	 * check our queued commands here---of course, there shouldn't
1711 	 * be any outstanding.... */
1712 	if(hostdata->state == NCR_700_HOST_FREE) {
1713 		int i;
1714 
1715 		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1716 			/* fairness: always run the queue from the last
1717 			 * position we left off */
1718 			int j = (i + hostdata->saved_slot_position)
1719 				% NCR_700_COMMAND_SLOTS_PER_HOST;
1720 
1721 			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1722 				continue;
1723 			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1724 				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1725 				       host->host_no, &hostdata->slots[j],
1726 				       hostdata->slots[j].cmnd));
1727 				hostdata->saved_slot_position = j + 1;
1728 			}
1729 
1730 			break;
1731 		}
1732 	}
1733  out_unlock:
1734 	spin_unlock_irqrestore(host->host_lock, flags);
1735 	return IRQ_RETVAL(handled);
1736 }
1737 
1738 STATIC int
1739 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1740 {
1741 	struct NCR_700_Host_Parameters *hostdata =
1742 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1743 	__u32 move_ins;
1744 	enum dma_data_direction direction;
1745 	struct NCR_700_command_slot *slot;
1746 
1747 	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1748 		/* We're over our allocation, this should never happen
1749 		 * since we report the max allocation to the mid layer */
1750 		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1751 		return 1;
1752 	}
1753 	/* check for untagged commands.  We cannot have any outstanding
1754 	 * commands if we accept them.  Commands could be untagged because:
1755 	 *
1756 	 * - The tag negotiated bitmap is clear
1757 	 * - The blk layer sent and untagged command
1758 	 */
1759 	if(NCR_700_get_depth(SCp->device) != 0
1760 	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1761 	       || !blk_rq_tagged(SCp->request))) {
1762 		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1763 		       NCR_700_get_depth(SCp->device));
1764 		return SCSI_MLQUEUE_DEVICE_BUSY;
1765 	}
1766 	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1767 		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1768 		       NCR_700_get_depth(SCp->device));
1769 		return SCSI_MLQUEUE_DEVICE_BUSY;
1770 	}
1771 	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1772 
1773 	/* begin the command here */
1774 	/* no need to check for NULL, test for command_slot_count above
1775 	 * ensures a slot is free */
1776 	slot = find_empty_slot(hostdata);
1777 
1778 	slot->cmnd = SCp;
1779 
1780 	SCp->scsi_done = done;
1781 	SCp->host_scribble = (unsigned char *)slot;
1782 	SCp->SCp.ptr = NULL;
1783 	SCp->SCp.buffer = NULL;
1784 
1785 #ifdef NCR_700_DEBUG
1786 	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1787 	scsi_print_command(SCp);
1788 #endif
1789 	if(blk_rq_tagged(SCp->request)
1790 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1791 	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1792 		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1793 		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1794 		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1795 	}
1796 
1797 	/* here we may have to process an untagged command.  The gate
1798 	 * above ensures that this will be the only one outstanding,
1799 	 * so clear the tag negotiated bit.
1800 	 *
1801 	 * FIXME: This will royally screw up on multiple LUN devices
1802 	 * */
1803 	if(!blk_rq_tagged(SCp->request)
1804 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1805 		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1806 		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1807 	}
1808 
1809 	if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1810 	   && scsi_get_tag_type(SCp->device)) {
1811 		slot->tag = SCp->request->tag;
1812 		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1813 		       slot->tag, slot);
1814 	} else {
1815 		slot->tag = SCSI_NO_TAG;
1816 		/* must populate current_cmnd for scsi_find_tag to work */
1817 		SCp->device->current_cmnd = SCp;
1818 	}
1819 	/* sanity check: some of the commands generated by the mid-layer
1820 	 * have an eccentric idea of their sc_data_direction */
1821 	if(!SCp->use_sg && !SCp->request_bufflen
1822 	   && SCp->sc_data_direction != DMA_NONE) {
1823 #ifdef NCR_700_DEBUG
1824 		printk("53c700: Command");
1825 		scsi_print_command(SCp);
1826 		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1827 #endif
1828 		SCp->sc_data_direction = DMA_NONE;
1829 	}
1830 
1831 	switch (SCp->cmnd[0]) {
1832 	case REQUEST_SENSE:
1833 		/* clear the internal sense magic */
1834 		SCp->cmnd[6] = 0;
1835 		/* fall through */
1836 	default:
1837 		/* OK, get it from the command */
1838 		switch(SCp->sc_data_direction) {
1839 		case DMA_BIDIRECTIONAL:
1840 		default:
1841 			printk(KERN_ERR "53c700: Unknown command for data direction ");
1842 			scsi_print_command(SCp);
1843 
1844 			move_ins = 0;
1845 			break;
1846 		case DMA_NONE:
1847 			move_ins = 0;
1848 			break;
1849 		case DMA_FROM_DEVICE:
1850 			move_ins = SCRIPT_MOVE_DATA_IN;
1851 			break;
1852 		case DMA_TO_DEVICE:
1853 			move_ins = SCRIPT_MOVE_DATA_OUT;
1854 			break;
1855 		}
1856 	}
1857 
1858 	/* now build the scatter gather list */
1859 	direction = SCp->sc_data_direction;
1860 	if(move_ins != 0) {
1861 		int i;
1862 		int sg_count;
1863 		dma_addr_t vPtr = 0;
1864 		__u32 count = 0;
1865 
1866 		if(SCp->use_sg) {
1867 			sg_count = dma_map_sg(hostdata->dev,
1868 					      SCp->request_buffer, SCp->use_sg,
1869 					      direction);
1870 		} else {
1871 			vPtr = dma_map_single(hostdata->dev,
1872 					      SCp->request_buffer,
1873 					      SCp->request_bufflen,
1874 					      direction);
1875 			count = SCp->request_bufflen;
1876 			slot->dma_handle = vPtr;
1877 			sg_count = 1;
1878 		}
1879 
1880 
1881 		for(i = 0; i < sg_count; i++) {
1882 
1883 			if(SCp->use_sg) {
1884 				struct scatterlist *sg = SCp->request_buffer;
1885 
1886 				vPtr = sg_dma_address(&sg[i]);
1887 				count = sg_dma_len(&sg[i]);
1888 			}
1889 
1890 			slot->SG[i].ins = bS_to_host(move_ins | count);
1891 			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1892 			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1893 			slot->SG[i].pAddr = bS_to_host(vPtr);
1894 		}
1895 		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1896 		slot->SG[i].pAddr = 0;
1897 		dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1898 		DEBUG((" SETTING %08lx to %x\n",
1899 		       (&slot->pSG[i].ins),
1900 		       slot->SG[i].ins));
1901 	}
1902 	slot->resume_offset = 0;
1903 	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1904 				    sizeof(SCp->cmnd), DMA_TO_DEVICE);
1905 	NCR_700_start_command(SCp);
1906 	return 0;
1907 }
1908 
1909 STATIC int
1910 NCR_700_abort(struct scsi_cmnd * SCp)
1911 {
1912 	struct NCR_700_command_slot *slot;
1913 
1914 	scmd_printk(KERN_INFO, SCp,
1915 		"New error handler wants to abort command\n\t");
1916 	scsi_print_command(SCp);
1917 
1918 	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1919 
1920 	if(slot == NULL)
1921 		/* no outstanding command to abort */
1922 		return SUCCESS;
1923 	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1924 		/* FIXME: This is because of a problem in the new
1925 		 * error handler.  When it is in error recovery, it
1926 		 * will send a TUR to a device it thinks may still be
1927 		 * showing a problem.  If the TUR isn't responded to,
1928 		 * it will abort it and mark the device off line.
1929 		 * Unfortunately, it does no other error recovery, so
1930 		 * this would leave us with an outstanding command
1931 		 * occupying a slot.  Rather than allow this to
1932 		 * happen, we issue a bus reset to force all
1933 		 * outstanding commands to terminate here. */
1934 		NCR_700_internal_bus_reset(SCp->device->host);
1935 		/* still drop through and return failed */
1936 	}
1937 	return FAILED;
1938 
1939 }
1940 
1941 STATIC int
1942 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1943 {
1944 	DECLARE_COMPLETION(complete);
1945 	struct NCR_700_Host_Parameters *hostdata =
1946 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1947 
1948 	scmd_printk(KERN_INFO, SCp,
1949 		"New error handler wants BUS reset, cmd %p\n\t", SCp);
1950 	scsi_print_command(SCp);
1951 
1952 	/* In theory, eh_complete should always be null because the
1953 	 * eh is single threaded, but just in case we're handling a
1954 	 * reset via sg or something */
1955 	spin_lock_irq(SCp->device->host->host_lock);
1956 	while (hostdata->eh_complete != NULL) {
1957 		spin_unlock_irq(SCp->device->host->host_lock);
1958 		msleep_interruptible(100);
1959 		spin_lock_irq(SCp->device->host->host_lock);
1960 	}
1961 
1962 	hostdata->eh_complete = &complete;
1963 	NCR_700_internal_bus_reset(SCp->device->host);
1964 
1965 	spin_unlock_irq(SCp->device->host->host_lock);
1966 	wait_for_completion(&complete);
1967 	spin_lock_irq(SCp->device->host->host_lock);
1968 
1969 	hostdata->eh_complete = NULL;
1970 	/* Revalidate the transport parameters of the failing device */
1971 	if(hostdata->fast)
1972 		spi_schedule_dv_device(SCp->device);
1973 
1974 	spin_unlock_irq(SCp->device->host->host_lock);
1975 	return SUCCESS;
1976 }
1977 
1978 STATIC int
1979 NCR_700_host_reset(struct scsi_cmnd * SCp)
1980 {
1981 	scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1982 	scsi_print_command(SCp);
1983 
1984 	spin_lock_irq(SCp->device->host->host_lock);
1985 
1986 	NCR_700_internal_bus_reset(SCp->device->host);
1987 	NCR_700_chip_reset(SCp->device->host);
1988 
1989 	spin_unlock_irq(SCp->device->host->host_lock);
1990 
1991 	return SUCCESS;
1992 }
1993 
1994 STATIC void
1995 NCR_700_set_period(struct scsi_target *STp, int period)
1996 {
1997 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1998 	struct NCR_700_Host_Parameters *hostdata =
1999 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2000 
2001 	if(!hostdata->fast)
2002 		return;
2003 
2004 	if(period < hostdata->min_period)
2005 		period = hostdata->min_period;
2006 
2007 	spi_period(STp) = period;
2008 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2009 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2010 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2011 }
2012 
2013 STATIC void
2014 NCR_700_set_offset(struct scsi_target *STp, int offset)
2015 {
2016 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2017 	struct NCR_700_Host_Parameters *hostdata =
2018 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2019 	int max_offset = hostdata->chip710
2020 		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2021 
2022 	if(!hostdata->fast)
2023 		return;
2024 
2025 	if(offset > max_offset)
2026 		offset = max_offset;
2027 
2028 	/* if we're currently async, make sure the period is reasonable */
2029 	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2030 				    spi_period(STp) > 0xff))
2031 		spi_period(STp) = hostdata->min_period;
2032 
2033 	spi_offset(STp) = offset;
2034 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2035 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2036 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2037 }
2038 
2039 
2040 
2041 STATIC int
2042 NCR_700_slave_configure(struct scsi_device *SDp)
2043 {
2044 	struct NCR_700_Host_Parameters *hostdata =
2045 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2046 
2047 	SDp->hostdata = kmalloc(GFP_KERNEL, sizeof(struct NCR_700_sense));
2048 
2049 	if (!SDp->hostdata)
2050 		return -ENOMEM;
2051 
2052 	/* to do here: allocate memory; build a queue_full list */
2053 	if(SDp->tagged_supported) {
2054 		scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2055 		scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2056 		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2057 	} else {
2058 		/* initialise to default depth */
2059 		scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2060 	}
2061 	if(hostdata->fast) {
2062 		/* Find the correct offset and period via domain validation */
2063 		if (!spi_initial_dv(SDp->sdev_target))
2064 			spi_dv_device(SDp);
2065 	} else {
2066 		spi_offset(SDp->sdev_target) = 0;
2067 		spi_period(SDp->sdev_target) = 0;
2068 	}
2069 	return 0;
2070 }
2071 
2072 STATIC void
2073 NCR_700_slave_destroy(struct scsi_device *SDp)
2074 {
2075 	kfree(SDp->hostdata);
2076 	SDp->hostdata = NULL;
2077 }
2078 
2079 static int
2080 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2081 {
2082 	if (depth > NCR_700_MAX_TAGS)
2083 		depth = NCR_700_MAX_TAGS;
2084 
2085 	scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2086 	return depth;
2087 }
2088 
2089 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2090 {
2091 	int change_tag = ((tag_type ==0 &&  scsi_get_tag_type(SDp) != 0)
2092 			  || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2093 	struct NCR_700_Host_Parameters *hostdata =
2094 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2095 
2096 	scsi_set_tag_type(SDp, tag_type);
2097 
2098 	/* We have a global (per target) flag to track whether TCQ is
2099 	 * enabled, so we'll be turning it off for the entire target here.
2100 	 * our tag algorithm will fail if we mix tagged and untagged commands,
2101 	 * so quiesce the device before doing this */
2102 	if (change_tag)
2103 		scsi_target_quiesce(SDp->sdev_target);
2104 
2105 	if (!tag_type) {
2106 		/* shift back to the default unqueued number of commands
2107 		 * (the user can still raise this) */
2108 		scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2109 		hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2110 	} else {
2111 		/* Here, we cleared the negotiation flag above, so this
2112 		 * will force the driver to renegotiate */
2113 		scsi_activate_tcq(SDp, SDp->queue_depth);
2114 		if (change_tag)
2115 			NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2116 	}
2117 	if (change_tag)
2118 		scsi_target_resume(SDp->sdev_target);
2119 
2120 	return tag_type;
2121 }
2122 
2123 static ssize_t
2124 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2125 {
2126 	struct scsi_device *SDp = to_scsi_device(dev);
2127 
2128 	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2129 }
2130 
2131 static struct device_attribute NCR_700_active_tags_attr = {
2132 	.attr = {
2133 		.name =		"active_tags",
2134 		.mode =		S_IRUGO,
2135 	},
2136 	.show = NCR_700_show_active_tags,
2137 };
2138 
2139 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2140 	&NCR_700_active_tags_attr,
2141 	NULL,
2142 };
2143 
2144 EXPORT_SYMBOL(NCR_700_detect);
2145 EXPORT_SYMBOL(NCR_700_release);
2146 EXPORT_SYMBOL(NCR_700_intr);
2147 
2148 static struct spi_function_template NCR_700_transport_functions =  {
2149 	.set_period	= NCR_700_set_period,
2150 	.show_period	= 1,
2151 	.set_offset	= NCR_700_set_offset,
2152 	.show_offset	= 1,
2153 };
2154 
2155 static int __init NCR_700_init(void)
2156 {
2157 	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2158 	if(!NCR_700_transport_template)
2159 		return -ENODEV;
2160 	return 0;
2161 }
2162 
2163 static void __exit NCR_700_exit(void)
2164 {
2165 	spi_release_transport(NCR_700_transport_template);
2166 }
2167 
2168 module_init(NCR_700_init);
2169 module_exit(NCR_700_exit);
2170 
2171