xref: /linux/drivers/scsi/53c700.c (revision ed4bc1890b4984d0af447ad3cc1f93541623f8f3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8 -*- */
3 
4 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5  *
6  * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
7 **-----------------------------------------------------------------------------
8 **
9 **
10 **-----------------------------------------------------------------------------
11  */
12 
13 /* Notes:
14  *
15  * This driver is designed exclusively for these chips (virtually the
16  * earliest of the scripts engine chips).  They need their own drivers
17  * because they are missing so many of the scripts and snazzy register
18  * features of their elder brothers (the 710, 720 and 770).
19  *
20  * The 700 is the lowliest of the line, it can only do async SCSI.
21  * The 700-66 can at least do synchronous SCSI up to 10MHz.
22  *
23  * The 700 chip has no host bus interface logic of its own.  However,
24  * it is usually mapped to a location with well defined register
25  * offsets.  Therefore, if you can determine the base address and the
26  * irq your board incorporating this chip uses, you can probably use
27  * this driver to run it (although you'll probably have to write a
28  * minimal wrapper for the purpose---see the NCR_D700 driver for
29  * details about how to do this).
30  *
31  *
32  * TODO List:
33  *
34  * 1. Better statistics in the proc fs
35  *
36  * 2. Implement message queue (queues SCSI messages like commands) and make
37  *    the abort and device reset functions use them.
38  * */
39 
40 /* CHANGELOG
41  *
42  * Version 2.8
43  *
44  * Fixed bad bug affecting tag starvation processing (previously the
45  * driver would hang the system if too many tags starved.  Also fixed
46  * bad bug having to do with 10 byte command processing and REQUEST
47  * SENSE (the command would loop forever getting a transfer length
48  * mismatch in the CMD phase).
49  *
50  * Version 2.7
51  *
52  * Fixed scripts problem which caused certain devices (notably CDRWs)
53  * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
54  * __raw_readl/writel for parisc compatibility (Thomas
55  * Bogendoerfer). Added missing SCp->request_bufflen initialisation
56  * for sense requests (Ryan Bradetich).
57  *
58  * Version 2.6
59  *
60  * Following test of the 64 bit parisc kernel by Richard Hirst,
61  * several problems have now been corrected.  Also adds support for
62  * consistent memory allocation.
63  *
64  * Version 2.5
65  *
66  * More Compatibility changes for 710 (now actually works).  Enhanced
67  * support for odd clock speeds which constrain SDTR negotiations.
68  * correct cacheline separation for scsi messages and status for
69  * incoherent architectures.  Use of the pci mapping functions on
70  * buffers to begin support for 64 bit drivers.
71  *
72  * Version 2.4
73  *
74  * Added support for the 53c710 chip (in 53c700 emulation mode only---no
75  * special 53c710 instructions or registers are used).
76  *
77  * Version 2.3
78  *
79  * More endianness/cache coherency changes.
80  *
81  * Better bad device handling (handles devices lying about tag
82  * queueing support and devices which fail to provide sense data on
83  * contingent allegiance conditions)
84  *
85  * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
86  * debugging this driver on the parisc architecture and suggesting
87  * many improvements and bug fixes.
88  *
89  * Thanks also go to Linuxcare Inc. for providing several PARISC
90  * machines for me to debug the driver on.
91  *
92  * Version 2.2
93  *
94  * Made the driver mem or io mapped; added endian invariance; added
95  * dma cache flushing operations for architectures which need it;
96  * added support for more varied clocking speeds.
97  *
98  * Version 2.1
99  *
100  * Initial modularisation from the D700.  See NCR_D700.c for the rest of
101  * the changelog.
102  * */
103 #define NCR_700_VERSION "2.8"
104 
105 #include <linux/kernel.h>
106 #include <linux/types.h>
107 #include <linux/string.h>
108 #include <linux/slab.h>
109 #include <linux/ioport.h>
110 #include <linux/delay.h>
111 #include <linux/spinlock.h>
112 #include <linux/completion.h>
113 #include <linux/init.h>
114 #include <linux/proc_fs.h>
115 #include <linux/blkdev.h>
116 #include <linux/module.h>
117 #include <linux/interrupt.h>
118 #include <linux/device.h>
119 #include <linux/pgtable.h>
120 #include <asm/dma.h>
121 #include <asm/io.h>
122 #include <asm/byteorder.h>
123 
124 #include <scsi/scsi.h>
125 #include <scsi/scsi_cmnd.h>
126 #include <scsi/scsi_dbg.h>
127 #include <scsi/scsi_eh.h>
128 #include <scsi/scsi_host.h>
129 #include <scsi/scsi_tcq.h>
130 #include <scsi/scsi_transport.h>
131 #include <scsi/scsi_transport_spi.h>
132 
133 #include "53c700.h"
134 
135 /* NOTE: For 64 bit drivers there are points in the code where we use
136  * a non dereferenceable pointer to point to a structure in dma-able
137  * memory (which is 32 bits) so that we can use all of the structure
138  * operations but take the address at the end.  This macro allows us
139  * to truncate the 64 bit pointer down to 32 bits without the compiler
140  * complaining */
141 #define to32bit(x)	((__u32)((unsigned long)(x)))
142 
143 #ifdef NCR_700_DEBUG
144 #define STATIC
145 #else
146 #define STATIC static
147 #endif
148 
149 MODULE_AUTHOR("James Bottomley");
150 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
151 MODULE_LICENSE("GPL");
152 
153 /* This is the script */
154 #include "53c700_d.h"
155 
156 
157 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
158 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
159 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
160 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
161 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
162 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
163 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
164 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
165 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
166 
167 STATIC struct device_attribute *NCR_700_dev_attrs[];
168 
169 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
170 
171 static char *NCR_700_phase[] = {
172 	"",
173 	"after selection",
174 	"before command phase",
175 	"after command phase",
176 	"after status phase",
177 	"after data in phase",
178 	"after data out phase",
179 	"during data phase",
180 };
181 
182 static char *NCR_700_condition[] = {
183 	"",
184 	"NOT MSG_OUT",
185 	"UNEXPECTED PHASE",
186 	"NOT MSG_IN",
187 	"UNEXPECTED MSG",
188 	"MSG_IN",
189 	"SDTR_MSG RECEIVED",
190 	"REJECT_MSG RECEIVED",
191 	"DISCONNECT_MSG RECEIVED",
192 	"MSG_OUT",
193 	"DATA_IN",
194 
195 };
196 
197 static char *NCR_700_fatal_messages[] = {
198 	"unexpected message after reselection",
199 	"still MSG_OUT after message injection",
200 	"not MSG_IN after selection",
201 	"Illegal message length received",
202 };
203 
204 static char *NCR_700_SBCL_bits[] = {
205 	"IO ",
206 	"CD ",
207 	"MSG ",
208 	"ATN ",
209 	"SEL ",
210 	"BSY ",
211 	"ACK ",
212 	"REQ ",
213 };
214 
215 static char *NCR_700_SBCL_to_phase[] = {
216 	"DATA_OUT",
217 	"DATA_IN",
218 	"CMD_OUT",
219 	"STATE",
220 	"ILLEGAL PHASE",
221 	"ILLEGAL PHASE",
222 	"MSG OUT",
223 	"MSG IN",
224 };
225 
226 /* This translates the SDTR message offset and period to a value
227  * which can be loaded into the SXFER_REG.
228  *
229  * NOTE: According to SCSI-2, the true transfer period (in ns) is
230  *       actually four times this period value */
231 static inline __u8
232 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
233 			       __u8 offset, __u8 period)
234 {
235 	int XFERP;
236 
237 	__u8 min_xferp = (hostdata->chip710
238 			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
239 	__u8 max_offset = (hostdata->chip710
240 			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
241 
242 	if(offset == 0)
243 		return 0;
244 
245 	if(period < hostdata->min_period) {
246 		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
247 		period = hostdata->min_period;
248 	}
249 	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
250 	if(offset > max_offset) {
251 		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
252 		       offset, max_offset);
253 		offset = max_offset;
254 	}
255 	if(XFERP < min_xferp) {
256 		XFERP =  min_xferp;
257 	}
258 	return (offset & 0x0f) | (XFERP & 0x07)<<4;
259 }
260 
261 static inline __u8
262 NCR_700_get_SXFER(struct scsi_device *SDp)
263 {
264 	struct NCR_700_Host_Parameters *hostdata =
265 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
266 
267 	return NCR_700_offset_period_to_sxfer(hostdata,
268 					      spi_offset(SDp->sdev_target),
269 					      spi_period(SDp->sdev_target));
270 }
271 
272 static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h,
273 		void *addr, size_t size)
274 {
275 	if (h->noncoherent)
276 		dma_cache_sync(h->dev, addr, size, DMA_TO_DEVICE);
277 }
278 
279 static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h,
280 		void *addr, size_t size)
281 {
282 	if (h->noncoherent)
283 		dma_cache_sync(h->dev, addr, size, DMA_FROM_DEVICE);
284 }
285 
286 struct Scsi_Host *
287 NCR_700_detect(struct scsi_host_template *tpnt,
288 	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
289 {
290 	dma_addr_t pScript, pSlots;
291 	__u8 *memory;
292 	__u32 *script;
293 	struct Scsi_Host *host;
294 	static int banner = 0;
295 	int j;
296 
297 	if(tpnt->sdev_attrs == NULL)
298 		tpnt->sdev_attrs = NCR_700_dev_attrs;
299 
300 	memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
301 	if (!memory) {
302 		hostdata->noncoherent = 1;
303 		memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
304 					 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
305 	}
306 	if (!memory) {
307 		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
308 		return NULL;
309 	}
310 
311 	script = (__u32 *)memory;
312 	hostdata->msgin = memory + MSGIN_OFFSET;
313 	hostdata->msgout = memory + MSGOUT_OFFSET;
314 	hostdata->status = memory + STATUS_OFFSET;
315 	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
316 	hostdata->dev = dev;
317 
318 	pSlots = pScript + SLOTS_OFFSET;
319 
320 	/* Fill in the missing routines from the host template */
321 	tpnt->queuecommand = NCR_700_queuecommand;
322 	tpnt->eh_abort_handler = NCR_700_abort;
323 	tpnt->eh_host_reset_handler = NCR_700_host_reset;
324 	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
325 	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
326 	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
327 	tpnt->slave_configure = NCR_700_slave_configure;
328 	tpnt->slave_destroy = NCR_700_slave_destroy;
329 	tpnt->slave_alloc = NCR_700_slave_alloc;
330 	tpnt->change_queue_depth = NCR_700_change_queue_depth;
331 
332 	if(tpnt->name == NULL)
333 		tpnt->name = "53c700";
334 	if(tpnt->proc_name == NULL)
335 		tpnt->proc_name = "53c700";
336 
337 	host = scsi_host_alloc(tpnt, 4);
338 	if (!host)
339 		return NULL;
340 	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
341 	       * NCR_700_COMMAND_SLOTS_PER_HOST);
342 	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
343 		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
344 					  - (unsigned long)&hostdata->slots[0].SG[0]);
345 		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
346 		if(j == 0)
347 			hostdata->free_list = &hostdata->slots[j];
348 		else
349 			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
350 		hostdata->slots[j].state = NCR_700_SLOT_FREE;
351 	}
352 
353 	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
354 		script[j] = bS_to_host(SCRIPT[j]);
355 
356 	/* adjust all labels to be bus physical */
357 	for (j = 0; j < PATCHES; j++)
358 		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
359 	/* now patch up fixed addresses. */
360 	script_patch_32(hostdata, script, MessageLocation,
361 			pScript + MSGOUT_OFFSET);
362 	script_patch_32(hostdata, script, StatusAddress,
363 			pScript + STATUS_OFFSET);
364 	script_patch_32(hostdata, script, ReceiveMsgAddress,
365 			pScript + MSGIN_OFFSET);
366 
367 	hostdata->script = script;
368 	hostdata->pScript = pScript;
369 	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
370 	hostdata->state = NCR_700_HOST_FREE;
371 	hostdata->cmd = NULL;
372 	host->max_id = 8;
373 	host->max_lun = NCR_700_MAX_LUNS;
374 	BUG_ON(NCR_700_transport_template == NULL);
375 	host->transportt = NCR_700_transport_template;
376 	host->unique_id = (unsigned long)hostdata->base;
377 	hostdata->eh_complete = NULL;
378 	host->hostdata[0] = (unsigned long)hostdata;
379 	/* kick the chip */
380 	NCR_700_writeb(0xff, host, CTEST9_REG);
381 	if (hostdata->chip710)
382 		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
383 	else
384 		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
385 	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
386 	if (banner == 0) {
387 		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
388 		banner = 1;
389 	}
390 	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
391 	       hostdata->chip710 ? "53c710" :
392 	       (hostdata->fast ? "53c700-66" : "53c700"),
393 	       hostdata->rev, hostdata->differential ?
394 	       "(Differential)" : "");
395 	/* reset the chip */
396 	NCR_700_chip_reset(host);
397 
398 	if (scsi_add_host(host, dev)) {
399 		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
400 		scsi_host_put(host);
401 		return NULL;
402 	}
403 
404 	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
405 		SPI_SIGNAL_SE;
406 
407 	return host;
408 }
409 
410 int
411 NCR_700_release(struct Scsi_Host *host)
412 {
413 	struct NCR_700_Host_Parameters *hostdata =
414 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
415 
416 	if (hostdata->noncoherent)
417 		dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
418 			       hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
419 	else
420 		dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE,
421 				  hostdata->script, hostdata->pScript);
422 	return 1;
423 }
424 
425 static inline __u8
426 NCR_700_identify(int can_disconnect, __u8 lun)
427 {
428 	return IDENTIFY_BASE |
429 		((can_disconnect) ? 0x40 : 0) |
430 		(lun & NCR_700_LUN_MASK);
431 }
432 
433 /*
434  * Function : static int data_residual (Scsi_Host *host)
435  *
436  * Purpose : return residual data count of what's in the chip.  If you
437  * really want to know what this function is doing, it's almost a
438  * direct transcription of the algorithm described in the 53c710
439  * guide, except that the DBC and DFIFO registers are only 6 bits
440  * wide on a 53c700.
441  *
442  * Inputs : host - SCSI host */
443 static inline int
444 NCR_700_data_residual (struct Scsi_Host *host) {
445 	struct NCR_700_Host_Parameters *hostdata =
446 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
447 	int count, synchronous = 0;
448 	unsigned int ddir;
449 
450 	if(hostdata->chip710) {
451 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
452 			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
453 	} else {
454 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
455 			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
456 	}
457 
458 	if(hostdata->fast)
459 		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
460 
461 	/* get the data direction */
462 	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
463 
464 	if (ddir) {
465 		/* Receive */
466 		if (synchronous)
467 			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
468 		else
469 			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
470 				++count;
471 	} else {
472 		/* Send */
473 		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
474 		if (sstat & SODL_REG_FULL)
475 			++count;
476 		if (synchronous && (sstat & SODR_REG_FULL))
477 			++count;
478 	}
479 #ifdef NCR_700_DEBUG
480 	if(count)
481 		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
482 #endif
483 	return count;
484 }
485 
486 /* print out the SCSI wires and corresponding phase from the SBCL register
487  * in the chip */
488 static inline char *
489 sbcl_to_string(__u8 sbcl)
490 {
491 	int i;
492 	static char ret[256];
493 
494 	ret[0]='\0';
495 	for(i=0; i<8; i++) {
496 		if((1<<i) & sbcl)
497 			strcat(ret, NCR_700_SBCL_bits[i]);
498 	}
499 	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
500 	return ret;
501 }
502 
503 static inline __u8
504 bitmap_to_number(__u8 bitmap)
505 {
506 	__u8 i;
507 
508 	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
509 		;
510 	return i;
511 }
512 
513 /* Pull a slot off the free list */
514 STATIC struct NCR_700_command_slot *
515 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
516 {
517 	struct NCR_700_command_slot *slot = hostdata->free_list;
518 
519 	if(slot == NULL) {
520 		/* sanity check */
521 		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
522 			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
523 		return NULL;
524 	}
525 
526 	if(slot->state != NCR_700_SLOT_FREE)
527 		/* should panic! */
528 		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
529 
530 
531 	hostdata->free_list = slot->ITL_forw;
532 	slot->ITL_forw = NULL;
533 
534 
535 	/* NOTE: set the state to busy here, not queued, since this
536 	 * indicates the slot is in use and cannot be run by the IRQ
537 	 * finish routine.  If we cannot queue the command when it
538 	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
539 	slot->state = NCR_700_SLOT_BUSY;
540 	slot->flags = 0;
541 	hostdata->command_slot_count++;
542 
543 	return slot;
544 }
545 
546 STATIC void
547 free_slot(struct NCR_700_command_slot *slot,
548 	  struct NCR_700_Host_Parameters *hostdata)
549 {
550 	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
551 		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
552 	}
553 	if(slot->state == NCR_700_SLOT_FREE) {
554 		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
555 	}
556 
557 	slot->resume_offset = 0;
558 	slot->cmnd = NULL;
559 	slot->state = NCR_700_SLOT_FREE;
560 	slot->ITL_forw = hostdata->free_list;
561 	hostdata->free_list = slot;
562 	hostdata->command_slot_count--;
563 }
564 
565 
566 /* This routine really does very little.  The command is indexed on
567    the ITL and (if tagged) the ITLQ lists in _queuecommand */
568 STATIC void
569 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
570 		     struct scsi_cmnd *SCp, __u32 dsp)
571 {
572 	/* Its just possible that this gets executed twice */
573 	if(SCp != NULL) {
574 		struct NCR_700_command_slot *slot =
575 			(struct NCR_700_command_slot *)SCp->host_scribble;
576 
577 		slot->resume_offset = dsp;
578 	}
579 	hostdata->state = NCR_700_HOST_FREE;
580 	hostdata->cmd = NULL;
581 }
582 
583 STATIC inline void
584 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
585 	      struct NCR_700_command_slot *slot)
586 {
587 	if(SCp->sc_data_direction != DMA_NONE &&
588 	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
589 		scsi_dma_unmap(SCp);
590 }
591 
592 STATIC inline void
593 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
594 	       struct scsi_cmnd *SCp, int result)
595 {
596 	hostdata->state = NCR_700_HOST_FREE;
597 	hostdata->cmd = NULL;
598 
599 	if(SCp != NULL) {
600 		struct NCR_700_command_slot *slot =
601 			(struct NCR_700_command_slot *)SCp->host_scribble;
602 
603 		dma_unmap_single(hostdata->dev, slot->pCmd,
604 				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
605 		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
606 			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
607 
608 			dma_unmap_single(hostdata->dev, slot->dma_handle,
609 					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
610 			/* restore the old result if the request sense was
611 			 * successful */
612 			if (result == 0)
613 				result = cmnd[7];
614 			/* restore the original length */
615 			SCp->cmd_len = cmnd[8];
616 		} else
617 			NCR_700_unmap(hostdata, SCp, slot);
618 
619 		free_slot(slot, hostdata);
620 #ifdef NCR_700_DEBUG
621 		if(NCR_700_get_depth(SCp->device) == 0 ||
622 		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
623 			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
624 			       NCR_700_get_depth(SCp->device));
625 #endif /* NCR_700_DEBUG */
626 		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
627 
628 		SCp->host_scribble = NULL;
629 		SCp->result = result;
630 		SCp->scsi_done(SCp);
631 	} else {
632 		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
633 	}
634 }
635 
636 
637 STATIC void
638 NCR_700_internal_bus_reset(struct Scsi_Host *host)
639 {
640 	/* Bus reset */
641 	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
642 	udelay(50);
643 	NCR_700_writeb(0, host, SCNTL1_REG);
644 
645 }
646 
647 STATIC void
648 NCR_700_chip_setup(struct Scsi_Host *host)
649 {
650 	struct NCR_700_Host_Parameters *hostdata =
651 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
652 	__u8 min_period;
653 	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
654 
655 	if(hostdata->chip710) {
656 		__u8 burst_disable = 0;
657 		__u8 burst_length = 0;
658 
659 		switch (hostdata->burst_length) {
660 			case 1:
661 			        burst_length = BURST_LENGTH_1;
662 			        break;
663 			case 2:
664 			        burst_length = BURST_LENGTH_2;
665 			        break;
666 			case 4:
667 			        burst_length = BURST_LENGTH_4;
668 			        break;
669 			case 8:
670 			        burst_length = BURST_LENGTH_8;
671 			        break;
672 			default:
673 			        burst_disable = BURST_DISABLE;
674 			        break;
675 		}
676 		hostdata->dcntl_extra |= COMPAT_700_MODE;
677 
678 		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
679 		NCR_700_writeb(burst_length | hostdata->dmode_extra,
680 			       host, DMODE_710_REG);
681 		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
682 			       (hostdata->differential ? DIFF : 0),
683 			       host, CTEST7_REG);
684 		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
685 		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
686 			       | AUTO_ATN, host, SCNTL0_REG);
687 	} else {
688 		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
689 			       host, DMODE_700_REG);
690 		NCR_700_writeb(hostdata->differential ?
691 			       DIFF : 0, host, CTEST7_REG);
692 		if(hostdata->fast) {
693 			/* this is for 700-66, does nothing on 700 */
694 			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
695 				       | GENERATE_RECEIVE_PARITY, host,
696 				       CTEST8_REG);
697 		} else {
698 			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
699 				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
700 		}
701 	}
702 
703 	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
704 	NCR_700_writeb(0, host, SBCL_REG);
705 	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
706 
707 	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
708 	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
709 
710 	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
711 	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
712 	if(hostdata->clock > 75) {
713 		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
714 		/* do the best we can, but the async clock will be out
715 		 * of spec: sync divider 2, async divider 3 */
716 		DEBUG(("53c700: sync 2 async 3\n"));
717 		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
718 		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
719 		hostdata->sync_clock = hostdata->clock/2;
720 	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
721 		/* sync divider 1.5, async divider 3 */
722 		DEBUG(("53c700: sync 1.5 async 3\n"));
723 		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
724 		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
725 		hostdata->sync_clock = hostdata->clock*2;
726 		hostdata->sync_clock /= 3;
727 
728 	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
729 		/* sync divider 1, async divider 2 */
730 		DEBUG(("53c700: sync 1 async 2\n"));
731 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
732 		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
733 		hostdata->sync_clock = hostdata->clock;
734 	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
735 		/* sync divider 1, async divider 1.5 */
736 		DEBUG(("53c700: sync 1 async 1.5\n"));
737 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
738 		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
739 		hostdata->sync_clock = hostdata->clock;
740 	} else {
741 		DEBUG(("53c700: sync 1 async 1\n"));
742 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
743 		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
744 		/* sync divider 1, async divider 1 */
745 		hostdata->sync_clock = hostdata->clock;
746 	}
747 	/* Calculate the actual minimum period that can be supported
748 	 * by our synchronous clock speed.  See the 710 manual for
749 	 * exact details of this calculation which is based on a
750 	 * setting of the SXFER register */
751 	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
752 	hostdata->min_period = NCR_700_MIN_PERIOD;
753 	if(min_period > NCR_700_MIN_PERIOD)
754 		hostdata->min_period = min_period;
755 }
756 
757 STATIC void
758 NCR_700_chip_reset(struct Scsi_Host *host)
759 {
760 	struct NCR_700_Host_Parameters *hostdata =
761 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
762 	if(hostdata->chip710) {
763 		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
764 		udelay(100);
765 
766 		NCR_700_writeb(0, host, ISTAT_REG);
767 	} else {
768 		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
769 		udelay(100);
770 
771 		NCR_700_writeb(0, host, DCNTL_REG);
772 	}
773 
774 	mdelay(1000);
775 
776 	NCR_700_chip_setup(host);
777 }
778 
779 /* The heart of the message processing engine is that the instruction
780  * immediately after the INT is the normal case (and so must be CLEAR
781  * ACK).  If we want to do something else, we call that routine in
782  * scripts and set temp to be the normal case + 8 (skipping the CLEAR
783  * ACK) so that the routine returns correctly to resume its activity
784  * */
785 STATIC __u32
786 process_extended_message(struct Scsi_Host *host,
787 			 struct NCR_700_Host_Parameters *hostdata,
788 			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
789 {
790 	__u32 resume_offset = dsp, temp = dsp + 8;
791 	__u8 pun = 0xff, lun = 0xff;
792 
793 	if(SCp != NULL) {
794 		pun = SCp->device->id;
795 		lun = SCp->device->lun;
796 	}
797 
798 	switch(hostdata->msgin[2]) {
799 	case A_SDTR_MSG:
800 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
801 			struct scsi_target *starget = SCp->device->sdev_target;
802 			__u8 period = hostdata->msgin[3];
803 			__u8 offset = hostdata->msgin[4];
804 
805 			if(offset == 0 || period == 0) {
806 				offset = 0;
807 				period = 0;
808 			}
809 
810 			spi_offset(starget) = offset;
811 			spi_period(starget) = period;
812 
813 			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
814 				spi_display_xfer_agreement(starget);
815 				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
816 			}
817 
818 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
819 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
820 
821 			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
822 				       host, SXFER_REG);
823 
824 		} else {
825 			/* SDTR message out of the blue, reject it */
826 			shost_printk(KERN_WARNING, host,
827 				"Unexpected SDTR msg\n");
828 			hostdata->msgout[0] = A_REJECT_MSG;
829 			dma_sync_to_dev(hostdata, hostdata->msgout, 1);
830 			script_patch_16(hostdata, hostdata->script,
831 			                MessageCount, 1);
832 			/* SendMsgOut returns, so set up the return
833 			 * address */
834 			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
835 		}
836 		break;
837 
838 	case A_WDTR_MSG:
839 		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
840 		       host->host_no, pun, lun);
841 		hostdata->msgout[0] = A_REJECT_MSG;
842 		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
843 		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
844 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
845 
846 		break;
847 
848 	default:
849 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
850 		       host->host_no, pun, lun,
851 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
852 		spi_print_msg(hostdata->msgin);
853 		printk("\n");
854 		/* just reject it */
855 		hostdata->msgout[0] = A_REJECT_MSG;
856 		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
857 		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
858 		/* SendMsgOut returns, so set up the return
859 		 * address */
860 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
861 	}
862 	NCR_700_writel(temp, host, TEMP_REG);
863 	return resume_offset;
864 }
865 
866 STATIC __u32
867 process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
868 		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
869 {
870 	/* work out where to return to */
871 	__u32 temp = dsp + 8, resume_offset = dsp;
872 	__u8 pun = 0xff, lun = 0xff;
873 
874 	if(SCp != NULL) {
875 		pun = SCp->device->id;
876 		lun = SCp->device->lun;
877 	}
878 
879 #ifdef NCR_700_DEBUG
880 	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
881 	       NCR_700_phase[(dsps & 0xf00) >> 8]);
882 	spi_print_msg(hostdata->msgin);
883 	printk("\n");
884 #endif
885 
886 	switch(hostdata->msgin[0]) {
887 
888 	case A_EXTENDED_MSG:
889 		resume_offset =  process_extended_message(host, hostdata, SCp,
890 							  dsp, dsps);
891 		break;
892 
893 	case A_REJECT_MSG:
894 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
895 			/* Rejected our sync negotiation attempt */
896 			spi_period(SCp->device->sdev_target) =
897 				spi_offset(SCp->device->sdev_target) = 0;
898 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
899 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
900 		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
901 			/* rejected our first simple tag message */
902 			scmd_printk(KERN_WARNING, SCp,
903 				"Rejected first tag queue attempt, turning off tag queueing\n");
904 			/* we're done negotiating */
905 			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
906 			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
907 
908 			SCp->device->tagged_supported = 0;
909 			SCp->device->simple_tags = 0;
910 			scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
911 		} else {
912 			shost_printk(KERN_WARNING, host,
913 				"(%d:%d) Unexpected REJECT Message %s\n",
914 			       pun, lun,
915 			       NCR_700_phase[(dsps & 0xf00) >> 8]);
916 			/* however, just ignore it */
917 		}
918 		break;
919 
920 	case A_PARITY_ERROR_MSG:
921 		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
922 		       pun, lun);
923 		NCR_700_internal_bus_reset(host);
924 		break;
925 	case A_SIMPLE_TAG_MSG:
926 		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
927 		       pun, lun, hostdata->msgin[1],
928 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
929 		/* just ignore it */
930 		break;
931 	default:
932 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
933 		       host->host_no, pun, lun,
934 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
935 
936 		spi_print_msg(hostdata->msgin);
937 		printk("\n");
938 		/* just reject it */
939 		hostdata->msgout[0] = A_REJECT_MSG;
940 		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
941 		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
942 		/* SendMsgOut returns, so set up the return
943 		 * address */
944 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
945 
946 		break;
947 	}
948 	NCR_700_writel(temp, host, TEMP_REG);
949 	/* set us up to receive another message */
950 	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
951 	return resume_offset;
952 }
953 
954 STATIC __u32
955 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
956 			 struct Scsi_Host *host,
957 			 struct NCR_700_Host_Parameters *hostdata)
958 {
959 	__u32 resume_offset = 0;
960 	__u8 pun = 0xff, lun=0xff;
961 
962 	if(SCp != NULL) {
963 		pun = SCp->device->id;
964 		lun = SCp->device->lun;
965 	}
966 
967 	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
968 		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
969 		       hostdata->status[0]));
970 		/* OK, if TCQ still under negotiation, we now know it works */
971 		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
972 			NCR_700_set_tag_neg_state(SCp->device,
973 						  NCR_700_FINISHED_TAG_NEGOTIATION);
974 
975 		/* check for contingent allegiance contitions */
976 		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
977 		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
978 			struct NCR_700_command_slot *slot =
979 				(struct NCR_700_command_slot *)SCp->host_scribble;
980 			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
981 				/* OOPS: bad device, returning another
982 				 * contingent allegiance condition */
983 				scmd_printk(KERN_ERR, SCp,
984 					"broken device is looping in contingent allegiance: ignoring\n");
985 				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
986 			} else {
987 				char *cmnd =
988 					NCR_700_get_sense_cmnd(SCp->device);
989 #ifdef NCR_DEBUG
990 				scsi_print_command(SCp);
991 				printk("  cmd %p has status %d, requesting sense\n",
992 				       SCp, hostdata->status[0]);
993 #endif
994 				/* we can destroy the command here
995 				 * because the contingent allegiance
996 				 * condition will cause a retry which
997 				 * will re-copy the command from the
998 				 * saved data_cmnd.  We also unmap any
999 				 * data associated with the command
1000 				 * here */
1001 				NCR_700_unmap(hostdata, SCp, slot);
1002 				dma_unmap_single(hostdata->dev, slot->pCmd,
1003 						 MAX_COMMAND_SIZE,
1004 						 DMA_TO_DEVICE);
1005 
1006 				cmnd[0] = REQUEST_SENSE;
1007 				cmnd[1] = (lun & 0x7) << 5;
1008 				cmnd[2] = 0;
1009 				cmnd[3] = 0;
1010 				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1011 				cmnd[5] = 0;
1012 				/* Here's a quiet hack: the
1013 				 * REQUEST_SENSE command is six bytes,
1014 				 * so store a flag indicating that
1015 				 * this was an internal sense request
1016 				 * and the original status at the end
1017 				 * of the command */
1018 				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1019 				cmnd[7] = hostdata->status[0];
1020 				cmnd[8] = SCp->cmd_len;
1021 				SCp->cmd_len = 6; /* command length for
1022 						   * REQUEST_SENSE */
1023 				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1024 				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1025 				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1026 				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1027 				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1028 				slot->SG[1].pAddr = 0;
1029 				slot->resume_offset = hostdata->pScript;
1030 				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2);
1031 				dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE);
1032 
1033 				/* queue the command for reissue */
1034 				slot->state = NCR_700_SLOT_QUEUED;
1035 				slot->flags = NCR_700_FLAG_AUTOSENSE;
1036 				hostdata->state = NCR_700_HOST_FREE;
1037 				hostdata->cmd = NULL;
1038 			}
1039 		} else {
1040 			// Currently rely on the mid layer evaluation
1041 			// of the tag queuing capability
1042 			//
1043 			//if(status_byte(hostdata->status[0]) == GOOD &&
1044 			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1045 			//	/* Piggy back the tag queueing support
1046 			//	 * on this command */
1047 			//	dma_sync_single_for_cpu(hostdata->dev,
1048 			//			    slot->dma_handle,
1049 			//			    SCp->request_bufflen,
1050 			//			    DMA_FROM_DEVICE);
1051 			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1052 			//		scmd_printk(KERN_INFO, SCp,
1053 			//		     "Enabling Tag Command Queuing\n");
1054 			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1055 			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1056 			//	} else {
1057 			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1058 			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1059 			//	}
1060 			//}
1061 			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1062 		}
1063 	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1064 		__u8 i = (dsps & 0xf00) >> 8;
1065 
1066 		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1067 		       NCR_700_phase[i],
1068 		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1069 		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1070 			SCp->cmd_len);
1071 		scsi_print_command(SCp);
1072 
1073 		NCR_700_internal_bus_reset(host);
1074 	} else if((dsps & 0xfffff000) == A_FATAL) {
1075 		int i = (dsps & 0xfff);
1076 
1077 		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1078 		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1079 		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1080 			printk(KERN_ERR "     msg begins %02x %02x\n",
1081 			       hostdata->msgin[0], hostdata->msgin[1]);
1082 		}
1083 		NCR_700_internal_bus_reset(host);
1084 	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1085 #ifdef NCR_700_DEBUG
1086 		__u8 i = (dsps & 0xf00) >> 8;
1087 
1088 		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1089 		       host->host_no, pun, lun,
1090 		       i, NCR_700_phase[i]);
1091 #endif
1092 		save_for_reselection(hostdata, SCp, dsp);
1093 
1094 	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1095 		__u8 lun;
1096 		struct NCR_700_command_slot *slot;
1097 		__u8 reselection_id = hostdata->reselection_id;
1098 		struct scsi_device *SDp;
1099 
1100 		lun = hostdata->msgin[0] & 0x1f;
1101 
1102 		hostdata->reselection_id = 0xff;
1103 		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1104 		       host->host_no, reselection_id, lun));
1105 		/* clear the reselection indicator */
1106 		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1107 		if(unlikely(SDp == NULL)) {
1108 			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1109 			       host->host_no, reselection_id, lun);
1110 			BUG();
1111 		}
1112 		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1113 			struct scsi_cmnd *SCp;
1114 
1115 			SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
1116 			if(unlikely(SCp == NULL)) {
1117 				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1118 				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1119 				BUG();
1120 			}
1121 
1122 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1123 			DDEBUG(KERN_DEBUG, SDp,
1124 				"reselection is tag %d, slot %p(%d)\n",
1125 				hostdata->msgin[2], slot, slot->tag);
1126 		} else {
1127 			struct NCR_700_Device_Parameters *p = SDp->hostdata;
1128 			struct scsi_cmnd *SCp = p->current_cmnd;
1129 
1130 			if(unlikely(SCp == NULL)) {
1131 				sdev_printk(KERN_ERR, SDp,
1132 					"no saved request for untagged cmd\n");
1133 				BUG();
1134 			}
1135 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1136 		}
1137 
1138 		if(slot == NULL) {
1139 			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1140 			       host->host_no, reselection_id, lun,
1141 			       hostdata->msgin[0], hostdata->msgin[1],
1142 			       hostdata->msgin[2]);
1143 		} else {
1144 			if(hostdata->state != NCR_700_HOST_BUSY)
1145 				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1146 				       host->host_no);
1147 			resume_offset = slot->resume_offset;
1148 			hostdata->cmd = slot->cmnd;
1149 
1150 			/* re-patch for this command */
1151 			script_patch_32_abs(hostdata, hostdata->script,
1152 			                    CommandAddress, slot->pCmd);
1153 			script_patch_16(hostdata, hostdata->script,
1154 					CommandCount, slot->cmnd->cmd_len);
1155 			script_patch_32_abs(hostdata, hostdata->script,
1156 			                    SGScriptStartAddress,
1157 					    to32bit(&slot->pSG[0].ins));
1158 
1159 			/* Note: setting SXFER only works if we're
1160 			 * still in the MESSAGE phase, so it is vital
1161 			 * that ACK is still asserted when we process
1162 			 * the reselection message.  The resume offset
1163 			 * should therefore always clear ACK */
1164 			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1165 				       host, SXFER_REG);
1166 			dma_sync_from_dev(hostdata, hostdata->msgin,
1167 				       MSG_ARRAY_SIZE);
1168 			dma_sync_to_dev(hostdata, hostdata->msgout,
1169 				       MSG_ARRAY_SIZE);
1170 			/* I'm just being paranoid here, the command should
1171 			 * already have been flushed from the cache */
1172 			dma_sync_to_dev(hostdata, slot->cmnd->cmnd,
1173 				       slot->cmnd->cmd_len);
1174 
1175 
1176 
1177 		}
1178 	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1179 
1180 		/* This section is full of debugging code because I've
1181 		 * never managed to reach it.  I think what happens is
1182 		 * that, because the 700 runs with selection
1183 		 * interrupts enabled the whole time that we take a
1184 		 * selection interrupt before we manage to get to the
1185 		 * reselected script interrupt */
1186 
1187 		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1188 		struct NCR_700_command_slot *slot;
1189 
1190 		/* Take out our own ID */
1191 		reselection_id &= ~(1<<host->this_id);
1192 
1193 		/* I've never seen this happen, so keep this as a printk rather
1194 		 * than a debug */
1195 		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1196 		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1197 
1198 		{
1199 			/* FIXME: DEBUGGING CODE */
1200 			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1201 			int i;
1202 
1203 			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1204 				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1205 				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1206 					break;
1207 			}
1208 			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1209 			SCp =  hostdata->slots[i].cmnd;
1210 		}
1211 
1212 		if(SCp != NULL) {
1213 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1214 			/* change slot from busy to queued to redo command */
1215 			slot->state = NCR_700_SLOT_QUEUED;
1216 		}
1217 		hostdata->cmd = NULL;
1218 
1219 		if(reselection_id == 0) {
1220 			if(hostdata->reselection_id == 0xff) {
1221 				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1222 				return 0;
1223 			} else {
1224 				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1225 				       host->host_no);
1226 				reselection_id = hostdata->reselection_id;
1227 			}
1228 		} else {
1229 
1230 			/* convert to real ID */
1231 			reselection_id = bitmap_to_number(reselection_id);
1232 		}
1233 		hostdata->reselection_id = reselection_id;
1234 		/* just in case we have a stale simple tag message, clear it */
1235 		hostdata->msgin[1] = 0;
1236 		dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1237 		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1238 			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1239 		} else {
1240 			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1241 		}
1242 	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1243 		/* we've just disconnected from the bus, do nothing since
1244 		 * a return here will re-run the queued command slot
1245 		 * that may have been interrupted by the initial selection */
1246 		DEBUG((" SELECTION COMPLETED\n"));
1247 	} else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1248 		resume_offset = process_message(host, hostdata, SCp,
1249 						dsp, dsps);
1250 	} else if((dsps &  0xfffff000) == 0) {
1251 		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1252 		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1253 		       host->host_no, pun, lun, NCR_700_condition[i],
1254 		       NCR_700_phase[j], dsp - hostdata->pScript);
1255 		if(SCp != NULL) {
1256 			struct scatterlist *sg;
1257 
1258 			scsi_print_command(SCp);
1259 			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1260 				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1261 			}
1262 		}
1263 		NCR_700_internal_bus_reset(host);
1264 	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1265 		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1266 		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1267 		resume_offset = dsp;
1268 	} else {
1269 		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1270 		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1271 		NCR_700_internal_bus_reset(host);
1272 	}
1273 	return resume_offset;
1274 }
1275 
1276 /* We run the 53c700 with selection interrupts always enabled.  This
1277  * means that the chip may be selected as soon as the bus frees.  On a
1278  * busy bus, this can be before the scripts engine finishes its
1279  * processing.  Therefore, part of the selection processing has to be
1280  * to find out what the scripts engine is doing and complete the
1281  * function if necessary (i.e. process the pending disconnect or save
1282  * the interrupted initial selection */
1283 STATIC inline __u32
1284 process_selection(struct Scsi_Host *host, __u32 dsp)
1285 {
1286 	__u8 id = 0;	/* Squash compiler warning */
1287 	int count = 0;
1288 	__u32 resume_offset = 0;
1289 	struct NCR_700_Host_Parameters *hostdata =
1290 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1291 	struct scsi_cmnd *SCp = hostdata->cmd;
1292 	__u8 sbcl;
1293 
1294 	for(count = 0; count < 5; count++) {
1295 		id = NCR_700_readb(host, hostdata->chip710 ?
1296 				   CTEST9_REG : SFBR_REG);
1297 
1298 		/* Take out our own ID */
1299 		id &= ~(1<<host->this_id);
1300 		if(id != 0)
1301 			break;
1302 		udelay(5);
1303 	}
1304 	sbcl = NCR_700_readb(host, SBCL_REG);
1305 	if((sbcl & SBCL_IO) == 0) {
1306 		/* mark as having been selected rather than reselected */
1307 		id = 0xff;
1308 	} else {
1309 		/* convert to real ID */
1310 		hostdata->reselection_id = id = bitmap_to_number(id);
1311 		DEBUG(("scsi%d:  Reselected by %d\n",
1312 		       host->host_no, id));
1313 	}
1314 	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1315 		struct NCR_700_command_slot *slot =
1316 			(struct NCR_700_command_slot *)SCp->host_scribble;
1317 		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1318 
1319 		switch(dsp - hostdata->pScript) {
1320 		case Ent_Disconnect1:
1321 		case Ent_Disconnect2:
1322 			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1323 			break;
1324 		case Ent_Disconnect3:
1325 		case Ent_Disconnect4:
1326 			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1327 			break;
1328 		case Ent_Disconnect5:
1329 		case Ent_Disconnect6:
1330 			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1331 			break;
1332 		case Ent_Disconnect7:
1333 		case Ent_Disconnect8:
1334 			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1335 			break;
1336 		case Ent_Finish1:
1337 		case Ent_Finish2:
1338 			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1339 			break;
1340 
1341 		default:
1342 			slot->state = NCR_700_SLOT_QUEUED;
1343 			break;
1344 			}
1345 	}
1346 	hostdata->state = NCR_700_HOST_BUSY;
1347 	hostdata->cmd = NULL;
1348 	/* clear any stale simple tag message */
1349 	hostdata->msgin[1] = 0;
1350 	dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1351 
1352 	if(id == 0xff) {
1353 		/* Selected as target, Ignore */
1354 		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1355 	} else if(hostdata->tag_negotiated & (1<<id)) {
1356 		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1357 	} else {
1358 		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1359 	}
1360 	return resume_offset;
1361 }
1362 
1363 static inline void
1364 NCR_700_clear_fifo(struct Scsi_Host *host) {
1365 	const struct NCR_700_Host_Parameters *hostdata
1366 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1367 	if(hostdata->chip710) {
1368 		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1369 	} else {
1370 		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1371 	}
1372 }
1373 
1374 static inline void
1375 NCR_700_flush_fifo(struct Scsi_Host *host) {
1376 	const struct NCR_700_Host_Parameters *hostdata
1377 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1378 	if(hostdata->chip710) {
1379 		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1380 		udelay(10);
1381 		NCR_700_writeb(0, host, CTEST8_REG);
1382 	} else {
1383 		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1384 		udelay(10);
1385 		NCR_700_writeb(0, host, DFIFO_REG);
1386 	}
1387 }
1388 
1389 
1390 /* The queue lock with interrupts disabled must be held on entry to
1391  * this function */
1392 STATIC int
1393 NCR_700_start_command(struct scsi_cmnd *SCp)
1394 {
1395 	struct NCR_700_command_slot *slot =
1396 		(struct NCR_700_command_slot *)SCp->host_scribble;
1397 	struct NCR_700_Host_Parameters *hostdata =
1398 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1399 	__u16 count = 1;	/* for IDENTIFY message */
1400 	u8 lun = SCp->device->lun;
1401 
1402 	if(hostdata->state != NCR_700_HOST_FREE) {
1403 		/* keep this inside the lock to close the race window where
1404 		 * the running command finishes on another CPU while we don't
1405 		 * change the state to queued on this one */
1406 		slot->state = NCR_700_SLOT_QUEUED;
1407 
1408 		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1409 		       SCp->device->host->host_no, slot->cmnd, slot));
1410 		return 0;
1411 	}
1412 	hostdata->state = NCR_700_HOST_BUSY;
1413 	hostdata->cmd = SCp;
1414 	slot->state = NCR_700_SLOT_BUSY;
1415 	/* keep interrupts disabled until we have the command correctly
1416 	 * set up so we cannot take a selection interrupt */
1417 
1418 	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1419 						slot->flags != NCR_700_FLAG_AUTOSENSE),
1420 					       lun);
1421 	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1422 	 * if the negotiated transfer parameters still hold, so
1423 	 * always renegotiate them */
1424 	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1425 	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1426 		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1427 	}
1428 
1429 	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1430 	 * If a contingent allegiance condition exists, the device
1431 	 * will refuse all tags, so send the request sense as untagged
1432 	 * */
1433 	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1434 	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1435 	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1436 		count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1437 	}
1438 
1439 	if(hostdata->fast &&
1440 	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1441 		count += spi_populate_sync_msg(&hostdata->msgout[count],
1442 				spi_period(SCp->device->sdev_target),
1443 				spi_offset(SCp->device->sdev_target));
1444 		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1445 	}
1446 
1447 	script_patch_16(hostdata, hostdata->script, MessageCount, count);
1448 
1449 	script_patch_ID(hostdata, hostdata->script, Device_ID, 1<<scmd_id(SCp));
1450 
1451 	script_patch_32_abs(hostdata, hostdata->script, CommandAddress,
1452 			    slot->pCmd);
1453 	script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len);
1454 	/* finally plumb the beginning of the SG list into the script
1455 	 * */
1456 	script_patch_32_abs(hostdata, hostdata->script,
1457 	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1458 	NCR_700_clear_fifo(SCp->device->host);
1459 
1460 	if(slot->resume_offset == 0)
1461 		slot->resume_offset = hostdata->pScript;
1462 	/* now perform all the writebacks and invalidates */
1463 	dma_sync_to_dev(hostdata, hostdata->msgout, count);
1464 	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1465 	dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len);
1466 	dma_sync_from_dev(hostdata, hostdata->status, 1);
1467 
1468 	/* set the synchronous period/offset */
1469 	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1470 		       SCp->device->host, SXFER_REG);
1471 	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1472 	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1473 
1474 	return 1;
1475 }
1476 
1477 irqreturn_t
1478 NCR_700_intr(int irq, void *dev_id)
1479 {
1480 	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1481 	struct NCR_700_Host_Parameters *hostdata =
1482 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1483 	__u8 istat;
1484 	__u32 resume_offset = 0;
1485 	__u8 pun = 0xff, lun = 0xff;
1486 	unsigned long flags;
1487 	int handled = 0;
1488 
1489 	/* Use the host lock to serialise access to the 53c700
1490 	 * hardware.  Note: In future, we may need to take the queue
1491 	 * lock to enter the done routines.  When that happens, we
1492 	 * need to ensure that for this driver, the host lock and the
1493 	 * queue lock point to the same thing. */
1494 	spin_lock_irqsave(host->host_lock, flags);
1495 	if((istat = NCR_700_readb(host, ISTAT_REG))
1496 	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1497 		__u32 dsps;
1498 		__u8 sstat0 = 0, dstat = 0;
1499 		__u32 dsp;
1500 		struct scsi_cmnd *SCp = hostdata->cmd;
1501 		enum NCR_700_Host_State state;
1502 
1503 		handled = 1;
1504 		state = hostdata->state;
1505 		SCp = hostdata->cmd;
1506 
1507 		if(istat & SCSI_INT_PENDING) {
1508 			udelay(10);
1509 
1510 			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1511 		}
1512 
1513 		if(istat & DMA_INT_PENDING) {
1514 			udelay(10);
1515 
1516 			dstat = NCR_700_readb(host, DSTAT_REG);
1517 		}
1518 
1519 		dsps = NCR_700_readl(host, DSPS_REG);
1520 		dsp = NCR_700_readl(host, DSP_REG);
1521 
1522 		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1523 		       host->host_no, istat, sstat0, dstat,
1524 		       (dsp - (__u32)(hostdata->pScript))/4,
1525 		       dsp, dsps));
1526 
1527 		if(SCp != NULL) {
1528 			pun = SCp->device->id;
1529 			lun = SCp->device->lun;
1530 		}
1531 
1532 		if(sstat0 & SCSI_RESET_DETECTED) {
1533 			struct scsi_device *SDp;
1534 			int i;
1535 
1536 			hostdata->state = NCR_700_HOST_BUSY;
1537 
1538 			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1539 			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1540 
1541 			scsi_report_bus_reset(host, 0);
1542 
1543 			/* clear all the negotiated parameters */
1544 			__shost_for_each_device(SDp, host)
1545 				NCR_700_clear_flag(SDp, ~0);
1546 
1547 			/* clear all the slots and their pending commands */
1548 			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1549 				struct scsi_cmnd *SCp;
1550 				struct NCR_700_command_slot *slot =
1551 					&hostdata->slots[i];
1552 
1553 				if(slot->state == NCR_700_SLOT_FREE)
1554 					continue;
1555 
1556 				SCp = slot->cmnd;
1557 				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1558 				       slot, SCp);
1559 				free_slot(slot, hostdata);
1560 				SCp->host_scribble = NULL;
1561 				NCR_700_set_depth(SCp->device, 0);
1562 				/* NOTE: deadlock potential here: we
1563 				 * rely on mid-layer guarantees that
1564 				 * scsi_done won't try to issue the
1565 				 * command again otherwise we'll
1566 				 * deadlock on the
1567 				 * hostdata->state_lock */
1568 				SCp->result = DID_RESET << 16;
1569 				SCp->scsi_done(SCp);
1570 			}
1571 			mdelay(25);
1572 			NCR_700_chip_setup(host);
1573 
1574 			hostdata->state = NCR_700_HOST_FREE;
1575 			hostdata->cmd = NULL;
1576 			/* signal back if this was an eh induced reset */
1577 			if(hostdata->eh_complete != NULL)
1578 				complete(hostdata->eh_complete);
1579 			goto out_unlock;
1580 		} else if(sstat0 & SELECTION_TIMEOUT) {
1581 			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1582 			       host->host_no, pun, lun));
1583 			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1584 		} else if(sstat0 & PHASE_MISMATCH) {
1585 			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1586 				(struct NCR_700_command_slot *)SCp->host_scribble;
1587 
1588 			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1589 				/* It wants to reply to some part of
1590 				 * our message */
1591 #ifdef NCR_700_DEBUG
1592 				__u32 temp = NCR_700_readl(host, TEMP_REG);
1593 				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1594 				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1595 #endif
1596 				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1597 			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1598 				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1599 				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1600 				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1601 				int residual = NCR_700_data_residual(host);
1602 				int i;
1603 #ifdef NCR_700_DEBUG
1604 				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1605 
1606 				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1607 				       host->host_no, pun, lun,
1608 				       SGcount, data_transfer);
1609 				scsi_print_command(SCp);
1610 				if(residual) {
1611 					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1612 				       host->host_no, pun, lun,
1613 				       SGcount, data_transfer, residual);
1614 				}
1615 #endif
1616 				data_transfer += residual;
1617 
1618 				if(data_transfer != 0) {
1619 					int count;
1620 					__u32 pAddr;
1621 
1622 					SGcount--;
1623 
1624 					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1625 					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1626 					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1627 					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1628 					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1629 					pAddr += (count - data_transfer);
1630 #ifdef NCR_700_DEBUG
1631 					if(pAddr != naddr) {
1632 						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1633 					}
1634 #endif
1635 					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1636 				}
1637 				/* set the executed moves to nops */
1638 				for(i=0; i<SGcount; i++) {
1639 					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1640 					slot->SG[i].pAddr = 0;
1641 				}
1642 				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1643 				/* and pretend we disconnected after
1644 				 * the command phase */
1645 				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1646 				/* make sure all the data is flushed */
1647 				NCR_700_flush_fifo(host);
1648 			} else {
1649 				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1650 				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1651 				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1652 				NCR_700_internal_bus_reset(host);
1653 			}
1654 
1655 		} else if(sstat0 & SCSI_GROSS_ERROR) {
1656 			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1657 			       host->host_no, pun, lun);
1658 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1659 		} else if(sstat0 & PARITY_ERROR) {
1660 			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1661 			       host->host_no, pun, lun);
1662 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1663 		} else if(dstat & SCRIPT_INT_RECEIVED) {
1664 			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1665 			       host->host_no, pun, lun));
1666 			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1667 		} else if(dstat & (ILGL_INST_DETECTED)) {
1668 			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1669 			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1670 			       host->host_no, pun, lun,
1671 			       dsp, dsp - hostdata->pScript);
1672 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1673 		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1674 			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1675 			       host->host_no, pun, lun, dstat);
1676 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1677 		}
1678 
1679 
1680 		/* NOTE: selection interrupt processing MUST occur
1681 		 * after script interrupt processing to correctly cope
1682 		 * with the case where we process a disconnect and
1683 		 * then get reselected before we process the
1684 		 * disconnection */
1685 		if(sstat0 & SELECTED) {
1686 			/* FIXME: It currently takes at least FOUR
1687 			 * interrupts to complete a command that
1688 			 * disconnects: one for the disconnect, one
1689 			 * for the reselection, one to get the
1690 			 * reselection data and one to complete the
1691 			 * command.  If we guess the reselected
1692 			 * command here and prepare it, we only need
1693 			 * to get a reselection data interrupt if we
1694 			 * guessed wrongly.  Since the interrupt
1695 			 * overhead is much greater than the command
1696 			 * setup, this would be an efficient
1697 			 * optimisation particularly as we probably
1698 			 * only have one outstanding command on a
1699 			 * target most of the time */
1700 
1701 			resume_offset = process_selection(host, dsp);
1702 
1703 		}
1704 
1705 	}
1706 
1707 	if(resume_offset) {
1708 		if(hostdata->state != NCR_700_HOST_BUSY) {
1709 			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1710 			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1711 			hostdata->state = NCR_700_HOST_BUSY;
1712 		}
1713 
1714 		DEBUG(("Attempting to resume at %x\n", resume_offset));
1715 		NCR_700_clear_fifo(host);
1716 		NCR_700_writel(resume_offset, host, DSP_REG);
1717 	}
1718 	/* There is probably a technical no-no about this: If we're a
1719 	 * shared interrupt and we got this interrupt because the
1720 	 * other device needs servicing not us, we're still going to
1721 	 * check our queued commands here---of course, there shouldn't
1722 	 * be any outstanding.... */
1723 	if(hostdata->state == NCR_700_HOST_FREE) {
1724 		int i;
1725 
1726 		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1727 			/* fairness: always run the queue from the last
1728 			 * position we left off */
1729 			int j = (i + hostdata->saved_slot_position)
1730 				% NCR_700_COMMAND_SLOTS_PER_HOST;
1731 
1732 			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1733 				continue;
1734 			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1735 				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1736 				       host->host_no, &hostdata->slots[j],
1737 				       hostdata->slots[j].cmnd));
1738 				hostdata->saved_slot_position = j + 1;
1739 			}
1740 
1741 			break;
1742 		}
1743 	}
1744  out_unlock:
1745 	spin_unlock_irqrestore(host->host_lock, flags);
1746 	return IRQ_RETVAL(handled);
1747 }
1748 
1749 static int
1750 NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1751 {
1752 	struct NCR_700_Host_Parameters *hostdata =
1753 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1754 	__u32 move_ins;
1755 	enum dma_data_direction direction;
1756 	struct NCR_700_command_slot *slot;
1757 
1758 	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1759 		/* We're over our allocation, this should never happen
1760 		 * since we report the max allocation to the mid layer */
1761 		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1762 		return 1;
1763 	}
1764 	/* check for untagged commands.  We cannot have any outstanding
1765 	 * commands if we accept them.  Commands could be untagged because:
1766 	 *
1767 	 * - The tag negotiated bitmap is clear
1768 	 * - The blk layer sent and untagged command
1769 	 */
1770 	if(NCR_700_get_depth(SCp->device) != 0
1771 	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1772 	       || !(SCp->flags & SCMD_TAGGED))) {
1773 		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1774 		       NCR_700_get_depth(SCp->device));
1775 		return SCSI_MLQUEUE_DEVICE_BUSY;
1776 	}
1777 	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1778 		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1779 		       NCR_700_get_depth(SCp->device));
1780 		return SCSI_MLQUEUE_DEVICE_BUSY;
1781 	}
1782 	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1783 
1784 	/* begin the command here */
1785 	/* no need to check for NULL, test for command_slot_count above
1786 	 * ensures a slot is free */
1787 	slot = find_empty_slot(hostdata);
1788 
1789 	slot->cmnd = SCp;
1790 
1791 	SCp->scsi_done = done;
1792 	SCp->host_scribble = (unsigned char *)slot;
1793 	SCp->SCp.ptr = NULL;
1794 	SCp->SCp.buffer = NULL;
1795 
1796 #ifdef NCR_700_DEBUG
1797 	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1798 	scsi_print_command(SCp);
1799 #endif
1800 	if ((SCp->flags & SCMD_TAGGED)
1801 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1802 	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1803 		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1804 		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1805 		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1806 	}
1807 
1808 	/* here we may have to process an untagged command.  The gate
1809 	 * above ensures that this will be the only one outstanding,
1810 	 * so clear the tag negotiated bit.
1811 	 *
1812 	 * FIXME: This will royally screw up on multiple LUN devices
1813 	 * */
1814 	if (!(SCp->flags & SCMD_TAGGED)
1815 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1816 		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1817 		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1818 	}
1819 
1820 	if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1821 	    SCp->device->simple_tags) {
1822 		slot->tag = SCp->request->tag;
1823 		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1824 		       slot->tag, slot);
1825 	} else {
1826 		struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
1827 
1828 		slot->tag = SCSI_NO_TAG;
1829 		/* save current command for reselection */
1830 		p->current_cmnd = SCp;
1831 	}
1832 	/* sanity check: some of the commands generated by the mid-layer
1833 	 * have an eccentric idea of their sc_data_direction */
1834 	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1835 	   SCp->sc_data_direction != DMA_NONE) {
1836 #ifdef NCR_700_DEBUG
1837 		printk("53c700: Command");
1838 		scsi_print_command(SCp);
1839 		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1840 #endif
1841 		SCp->sc_data_direction = DMA_NONE;
1842 	}
1843 
1844 	switch (SCp->cmnd[0]) {
1845 	case REQUEST_SENSE:
1846 		/* clear the internal sense magic */
1847 		SCp->cmnd[6] = 0;
1848 		fallthrough;
1849 	default:
1850 		/* OK, get it from the command */
1851 		switch(SCp->sc_data_direction) {
1852 		case DMA_BIDIRECTIONAL:
1853 		default:
1854 			printk(KERN_ERR "53c700: Unknown command for data direction ");
1855 			scsi_print_command(SCp);
1856 
1857 			move_ins = 0;
1858 			break;
1859 		case DMA_NONE:
1860 			move_ins = 0;
1861 			break;
1862 		case DMA_FROM_DEVICE:
1863 			move_ins = SCRIPT_MOVE_DATA_IN;
1864 			break;
1865 		case DMA_TO_DEVICE:
1866 			move_ins = SCRIPT_MOVE_DATA_OUT;
1867 			break;
1868 		}
1869 	}
1870 
1871 	/* now build the scatter gather list */
1872 	direction = SCp->sc_data_direction;
1873 	if(move_ins != 0) {
1874 		int i;
1875 		int sg_count;
1876 		dma_addr_t vPtr = 0;
1877 		struct scatterlist *sg;
1878 		__u32 count = 0;
1879 
1880 		sg_count = scsi_dma_map(SCp);
1881 		BUG_ON(sg_count < 0);
1882 
1883 		scsi_for_each_sg(SCp, sg, sg_count, i) {
1884 			vPtr = sg_dma_address(sg);
1885 			count = sg_dma_len(sg);
1886 
1887 			slot->SG[i].ins = bS_to_host(move_ins | count);
1888 			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1889 			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1890 			slot->SG[i].pAddr = bS_to_host(vPtr);
1891 		}
1892 		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1893 		slot->SG[i].pAddr = 0;
1894 		dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1895 		DEBUG((" SETTING %p to %x\n",
1896 		       (&slot->pSG[i].ins),
1897 		       slot->SG[i].ins));
1898 	}
1899 	slot->resume_offset = 0;
1900 	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1901 				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1902 	NCR_700_start_command(SCp);
1903 	return 0;
1904 }
1905 
1906 STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1907 
1908 STATIC int
1909 NCR_700_abort(struct scsi_cmnd * SCp)
1910 {
1911 	struct NCR_700_command_slot *slot;
1912 
1913 	scmd_printk(KERN_INFO, SCp, "abort command\n");
1914 
1915 	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1916 
1917 	if(slot == NULL)
1918 		/* no outstanding command to abort */
1919 		return SUCCESS;
1920 	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1921 		/* FIXME: This is because of a problem in the new
1922 		 * error handler.  When it is in error recovery, it
1923 		 * will send a TUR to a device it thinks may still be
1924 		 * showing a problem.  If the TUR isn't responded to,
1925 		 * it will abort it and mark the device off line.
1926 		 * Unfortunately, it does no other error recovery, so
1927 		 * this would leave us with an outstanding command
1928 		 * occupying a slot.  Rather than allow this to
1929 		 * happen, we issue a bus reset to force all
1930 		 * outstanding commands to terminate here. */
1931 		NCR_700_internal_bus_reset(SCp->device->host);
1932 		/* still drop through and return failed */
1933 	}
1934 	return FAILED;
1935 
1936 }
1937 
1938 STATIC int
1939 NCR_700_host_reset(struct scsi_cmnd * SCp)
1940 {
1941 	DECLARE_COMPLETION_ONSTACK(complete);
1942 	struct NCR_700_Host_Parameters *hostdata =
1943 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1944 
1945 	scmd_printk(KERN_INFO, SCp,
1946 		"New error handler wants HOST reset, cmd %p\n\t", SCp);
1947 	scsi_print_command(SCp);
1948 
1949 	/* In theory, eh_complete should always be null because the
1950 	 * eh is single threaded, but just in case we're handling a
1951 	 * reset via sg or something */
1952 	spin_lock_irq(SCp->device->host->host_lock);
1953 	while (hostdata->eh_complete != NULL) {
1954 		spin_unlock_irq(SCp->device->host->host_lock);
1955 		msleep_interruptible(100);
1956 		spin_lock_irq(SCp->device->host->host_lock);
1957 	}
1958 
1959 	hostdata->eh_complete = &complete;
1960 	NCR_700_internal_bus_reset(SCp->device->host);
1961 	NCR_700_chip_reset(SCp->device->host);
1962 
1963 	spin_unlock_irq(SCp->device->host->host_lock);
1964 	wait_for_completion(&complete);
1965 	spin_lock_irq(SCp->device->host->host_lock);
1966 
1967 	hostdata->eh_complete = NULL;
1968 	/* Revalidate the transport parameters of the failing device */
1969 	if(hostdata->fast)
1970 		spi_schedule_dv_device(SCp->device);
1971 
1972 	spin_unlock_irq(SCp->device->host->host_lock);
1973 	return SUCCESS;
1974 }
1975 
1976 STATIC void
1977 NCR_700_set_period(struct scsi_target *STp, int period)
1978 {
1979 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1980 	struct NCR_700_Host_Parameters *hostdata =
1981 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1982 
1983 	if(!hostdata->fast)
1984 		return;
1985 
1986 	if(period < hostdata->min_period)
1987 		period = hostdata->min_period;
1988 
1989 	spi_period(STp) = period;
1990 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
1991 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1992 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
1993 }
1994 
1995 STATIC void
1996 NCR_700_set_offset(struct scsi_target *STp, int offset)
1997 {
1998 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1999 	struct NCR_700_Host_Parameters *hostdata =
2000 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2001 	int max_offset = hostdata->chip710
2002 		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2003 
2004 	if(!hostdata->fast)
2005 		return;
2006 
2007 	if(offset > max_offset)
2008 		offset = max_offset;
2009 
2010 	/* if we're currently async, make sure the period is reasonable */
2011 	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2012 				    spi_period(STp) > 0xff))
2013 		spi_period(STp) = hostdata->min_period;
2014 
2015 	spi_offset(STp) = offset;
2016 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2017 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2018 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2019 }
2020 
2021 STATIC int
2022 NCR_700_slave_alloc(struct scsi_device *SDp)
2023 {
2024 	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2025 				GFP_KERNEL);
2026 
2027 	if (!SDp->hostdata)
2028 		return -ENOMEM;
2029 
2030 	return 0;
2031 }
2032 
2033 STATIC int
2034 NCR_700_slave_configure(struct scsi_device *SDp)
2035 {
2036 	struct NCR_700_Host_Parameters *hostdata =
2037 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2038 
2039 	/* to do here: allocate memory; build a queue_full list */
2040 	if(SDp->tagged_supported) {
2041 		scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2042 		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2043 	}
2044 
2045 	if(hostdata->fast) {
2046 		/* Find the correct offset and period via domain validation */
2047 		if (!spi_initial_dv(SDp->sdev_target))
2048 			spi_dv_device(SDp);
2049 	} else {
2050 		spi_offset(SDp->sdev_target) = 0;
2051 		spi_period(SDp->sdev_target) = 0;
2052 	}
2053 	return 0;
2054 }
2055 
2056 STATIC void
2057 NCR_700_slave_destroy(struct scsi_device *SDp)
2058 {
2059 	kfree(SDp->hostdata);
2060 	SDp->hostdata = NULL;
2061 }
2062 
2063 static int
2064 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2065 {
2066 	if (depth > NCR_700_MAX_TAGS)
2067 		depth = NCR_700_MAX_TAGS;
2068 	return scsi_change_queue_depth(SDp, depth);
2069 }
2070 
2071 static ssize_t
2072 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2073 {
2074 	struct scsi_device *SDp = to_scsi_device(dev);
2075 
2076 	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2077 }
2078 
2079 static struct device_attribute NCR_700_active_tags_attr = {
2080 	.attr = {
2081 		.name =		"active_tags",
2082 		.mode =		S_IRUGO,
2083 	},
2084 	.show = NCR_700_show_active_tags,
2085 };
2086 
2087 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2088 	&NCR_700_active_tags_attr,
2089 	NULL,
2090 };
2091 
2092 EXPORT_SYMBOL(NCR_700_detect);
2093 EXPORT_SYMBOL(NCR_700_release);
2094 EXPORT_SYMBOL(NCR_700_intr);
2095 
2096 static struct spi_function_template NCR_700_transport_functions =  {
2097 	.set_period	= NCR_700_set_period,
2098 	.show_period	= 1,
2099 	.set_offset	= NCR_700_set_offset,
2100 	.show_offset	= 1,
2101 };
2102 
2103 static int __init NCR_700_init(void)
2104 {
2105 	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2106 	if(!NCR_700_transport_template)
2107 		return -ENODEV;
2108 	return 0;
2109 }
2110 
2111 static void __exit NCR_700_exit(void)
2112 {
2113 	spi_release_transport(NCR_700_transport_template);
2114 }
2115 
2116 module_init(NCR_700_init);
2117 module_exit(NCR_700_exit);
2118 
2119