xref: /linux/drivers/scsi/53c700.c (revision 88e45067a30918ebb4942120892963e2311330af)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
4  *
5  * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 **
9 **-----------------------------------------------------------------------------
10  */
11 
12 /* Notes:
13  *
14  * This driver is designed exclusively for these chips (virtually the
15  * earliest of the scripts engine chips).  They need their own drivers
16  * because they are missing so many of the scripts and snazzy register
17  * features of their elder brothers (the 710, 720 and 770).
18  *
19  * The 700 is the lowliest of the line, it can only do async SCSI.
20  * The 700-66 can at least do synchronous SCSI up to 10MHz.
21  *
22  * The 700 chip has no host bus interface logic of its own.  However,
23  * it is usually mapped to a location with well defined register
24  * offsets.  Therefore, if you can determine the base address and the
25  * irq your board incorporating this chip uses, you can probably use
26  * this driver to run it (although you'll probably have to write a
27  * minimal wrapper for the purpose---see the NCR_D700 driver for
28  * details about how to do this).
29  *
30  *
31  * TODO List:
32  *
33  * 1. Better statistics in the proc fs
34  *
35  * 2. Implement message queue (queues SCSI messages like commands) and make
36  *    the abort and device reset functions use them.
37  * */
38 
39 /* CHANGELOG
40  *
41  * Version 2.8
42  *
43  * Fixed bad bug affecting tag starvation processing (previously the
44  * driver would hang the system if too many tags starved.  Also fixed
45  * bad bug having to do with 10 byte command processing and REQUEST
46  * SENSE (the command would loop forever getting a transfer length
47  * mismatch in the CMD phase).
48  *
49  * Version 2.7
50  *
51  * Fixed scripts problem which caused certain devices (notably CDRWs)
52  * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
53  * __raw_readl/writel for parisc compatibility (Thomas
54  * Bogendoerfer). Added missing SCp->request_bufflen initialisation
55  * for sense requests (Ryan Bradetich).
56  *
57  * Version 2.6
58  *
59  * Following test of the 64 bit parisc kernel by Richard Hirst,
60  * several problems have now been corrected.  Also adds support for
61  * consistent memory allocation.
62  *
63  * Version 2.5
64  *
65  * More Compatibility changes for 710 (now actually works).  Enhanced
66  * support for odd clock speeds which constrain SDTR negotiations.
67  * correct cacheline separation for scsi messages and status for
68  * incoherent architectures.  Use of the pci mapping functions on
69  * buffers to begin support for 64 bit drivers.
70  *
71  * Version 2.4
72  *
73  * Added support for the 53c710 chip (in 53c700 emulation mode only---no
74  * special 53c710 instructions or registers are used).
75  *
76  * Version 2.3
77  *
78  * More endianness/cache coherency changes.
79  *
80  * Better bad device handling (handles devices lying about tag
81  * queueing support and devices which fail to provide sense data on
82  * contingent allegiance conditions)
83  *
84  * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
85  * debugging this driver on the parisc architecture and suggesting
86  * many improvements and bug fixes.
87  *
88  * Thanks also go to Linuxcare Inc. for providing several PARISC
89  * machines for me to debug the driver on.
90  *
91  * Version 2.2
92  *
93  * Made the driver mem or io mapped; added endian invariance; added
94  * dma cache flushing operations for architectures which need it;
95  * added support for more varied clocking speeds.
96  *
97  * Version 2.1
98  *
99  * Initial modularisation from the D700.  See NCR_D700.c for the rest of
100  * the changelog.
101  * */
102 #define NCR_700_VERSION "2.8"
103 
104 #include <linux/kernel.h>
105 #include <linux/types.h>
106 #include <linux/string.h>
107 #include <linux/slab.h>
108 #include <linux/ioport.h>
109 #include <linux/delay.h>
110 #include <linux/spinlock.h>
111 #include <linux/completion.h>
112 #include <linux/init.h>
113 #include <linux/proc_fs.h>
114 #include <linux/blkdev.h>
115 #include <linux/module.h>
116 #include <linux/interrupt.h>
117 #include <linux/device.h>
118 #include <linux/pgtable.h>
119 #include <asm/dma.h>
120 #include <asm/io.h>
121 #include <asm/byteorder.h>
122 
123 #include <scsi/scsi.h>
124 #include <scsi/scsi_cmnd.h>
125 #include <scsi/scsi_dbg.h>
126 #include <scsi/scsi_eh.h>
127 #include <scsi/scsi_host.h>
128 #include <scsi/scsi_tcq.h>
129 #include <scsi/scsi_transport.h>
130 #include <scsi/scsi_transport_spi.h>
131 
132 #include "53c700.h"
133 
134 /* NOTE: For 64 bit drivers there are points in the code where we use
135  * a non dereferenceable pointer to point to a structure in dma-able
136  * memory (which is 32 bits) so that we can use all of the structure
137  * operations but take the address at the end.  This macro allows us
138  * to truncate the 64 bit pointer down to 32 bits without the compiler
139  * complaining */
140 #define to32bit(x)	((__u32)((unsigned long)(x)))
141 
142 #ifdef NCR_700_DEBUG
143 #define STATIC
144 #else
145 #define STATIC static
146 #endif
147 
148 MODULE_AUTHOR("James Bottomley");
149 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
150 MODULE_LICENSE("GPL");
151 
152 /* This is the script */
153 #include "53c700_d.h"
154 
155 
156 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
157 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
158 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
159 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
160 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
161 STATIC int NCR_700_sdev_init(struct scsi_device *SDpnt);
162 STATIC int NCR_700_sdev_configure(struct scsi_device *SDpnt,
163 				  struct queue_limits *lim);
164 STATIC void NCR_700_sdev_destroy(struct scsi_device *SDpnt);
165 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
166 
167 STATIC const struct attribute_group *NCR_700_dev_groups[];
168 
169 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
170 
171 static char *NCR_700_phase[] = {
172 	"",
173 	"after selection",
174 	"before command phase",
175 	"after command phase",
176 	"after status phase",
177 	"after data in phase",
178 	"after data out phase",
179 	"during data phase",
180 };
181 
182 static char *NCR_700_condition[] = {
183 	"",
184 	"NOT MSG_OUT",
185 	"UNEXPECTED PHASE",
186 	"NOT MSG_IN",
187 	"UNEXPECTED MSG",
188 	"MSG_IN",
189 	"SDTR_MSG RECEIVED",
190 	"REJECT_MSG RECEIVED",
191 	"DISCONNECT_MSG RECEIVED",
192 	"MSG_OUT",
193 	"DATA_IN",
194 
195 };
196 
197 static char *NCR_700_fatal_messages[] = {
198 	"unexpected message after reselection",
199 	"still MSG_OUT after message injection",
200 	"not MSG_IN after selection",
201 	"Illegal message length received",
202 };
203 
204 static char *NCR_700_SBCL_bits[] = {
205 	"IO ",
206 	"CD ",
207 	"MSG ",
208 	"ATN ",
209 	"SEL ",
210 	"BSY ",
211 	"ACK ",
212 	"REQ ",
213 };
214 
215 static char *NCR_700_SBCL_to_phase[] = {
216 	"DATA_OUT",
217 	"DATA_IN",
218 	"CMD_OUT",
219 	"STATE",
220 	"ILLEGAL PHASE",
221 	"ILLEGAL PHASE",
222 	"MSG OUT",
223 	"MSG IN",
224 };
225 
226 /* This translates the SDTR message offset and period to a value
227  * which can be loaded into the SXFER_REG.
228  *
229  * NOTE: According to SCSI-2, the true transfer period (in ns) is
230  *       actually four times this period value */
231 static inline __u8
NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters * hostdata,__u8 offset,__u8 period)232 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
233 			       __u8 offset, __u8 period)
234 {
235 	int XFERP;
236 
237 	__u8 min_xferp = (hostdata->chip710
238 			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
239 	__u8 max_offset = (hostdata->chip710
240 			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
241 
242 	if(offset == 0)
243 		return 0;
244 
245 	if(period < hostdata->min_period) {
246 		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
247 		period = hostdata->min_period;
248 	}
249 	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
250 	if(offset > max_offset) {
251 		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
252 		       offset, max_offset);
253 		offset = max_offset;
254 	}
255 	if(XFERP < min_xferp) {
256 		XFERP =  min_xferp;
257 	}
258 	return (offset & 0x0f) | (XFERP & 0x07)<<4;
259 }
260 
261 static inline __u8
NCR_700_get_SXFER(struct scsi_device * SDp)262 NCR_700_get_SXFER(struct scsi_device *SDp)
263 {
264 	struct NCR_700_Host_Parameters *hostdata =
265 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
266 
267 	return NCR_700_offset_period_to_sxfer(hostdata,
268 					      spi_offset(SDp->sdev_target),
269 					      spi_period(SDp->sdev_target));
270 }
271 
virt_to_dma(struct NCR_700_Host_Parameters * h,void * p)272 static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p)
273 {
274 	return h->pScript + ((uintptr_t)p - (uintptr_t)h->script);
275 }
276 
dma_sync_to_dev(struct NCR_700_Host_Parameters * h,void * addr,size_t size)277 static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h,
278 		void *addr, size_t size)
279 {
280 	if (h->noncoherent)
281 		dma_sync_single_for_device(h->dev, virt_to_dma(h, addr),
282 					   size, DMA_BIDIRECTIONAL);
283 }
284 
dma_sync_from_dev(struct NCR_700_Host_Parameters * h,void * addr,size_t size)285 static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h,
286 		void *addr, size_t size)
287 {
288 	if (h->noncoherent)
289 		dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size,
290 					   DMA_BIDIRECTIONAL);
291 }
292 
293 struct Scsi_Host *
NCR_700_detect(struct scsi_host_template * tpnt,struct NCR_700_Host_Parameters * hostdata,struct device * dev)294 NCR_700_detect(struct scsi_host_template *tpnt,
295 	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
296 {
297 	dma_addr_t pScript, pSlots;
298 	__u8 *memory;
299 	__u32 *script;
300 	struct Scsi_Host *host;
301 	static int banner = 0;
302 	int j;
303 
304 	if (tpnt->sdev_groups == NULL)
305 		tpnt->sdev_groups = NCR_700_dev_groups;
306 
307 	memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
308 	if (!memory) {
309 		hostdata->noncoherent = 1;
310 		memory = dma_alloc_noncoherent(dev, TOTAL_MEM_SIZE, &pScript,
311 					 DMA_BIDIRECTIONAL, GFP_KERNEL);
312 	}
313 	if (!memory) {
314 		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
315 		return NULL;
316 	}
317 
318 	script = (__u32 *)memory;
319 	hostdata->msgin = memory + MSGIN_OFFSET;
320 	hostdata->msgout = memory + MSGOUT_OFFSET;
321 	hostdata->status = memory + STATUS_OFFSET;
322 	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
323 	hostdata->dev = dev;
324 
325 	pSlots = pScript + SLOTS_OFFSET;
326 
327 	/* Fill in the missing routines from the host template */
328 	tpnt->queuecommand = NCR_700_queuecommand;
329 	tpnt->eh_abort_handler = NCR_700_abort;
330 	tpnt->eh_host_reset_handler = NCR_700_host_reset;
331 	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
332 	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
333 	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
334 	tpnt->sdev_configure = NCR_700_sdev_configure;
335 	tpnt->sdev_destroy = NCR_700_sdev_destroy;
336 	tpnt->sdev_init = NCR_700_sdev_init;
337 	tpnt->change_queue_depth = NCR_700_change_queue_depth;
338 
339 	if(tpnt->name == NULL)
340 		tpnt->name = "53c700";
341 	if(tpnt->proc_name == NULL)
342 		tpnt->proc_name = "53c700";
343 
344 	host = scsi_host_alloc(tpnt, 4);
345 	if (!host)
346 		return NULL;
347 	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
348 	       * NCR_700_COMMAND_SLOTS_PER_HOST);
349 	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
350 		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
351 					  - (unsigned long)&hostdata->slots[0].SG[0]);
352 		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
353 		if(j == 0)
354 			hostdata->free_list = &hostdata->slots[j];
355 		else
356 			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
357 		hostdata->slots[j].state = NCR_700_SLOT_FREE;
358 	}
359 
360 	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
361 		script[j] = bS_to_host(SCRIPT[j]);
362 
363 	/* adjust all labels to be bus physical */
364 	for (j = 0; j < PATCHES; j++)
365 		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
366 	/* now patch up fixed addresses. */
367 	script_patch_32(hostdata, script, MessageLocation,
368 			pScript + MSGOUT_OFFSET);
369 	script_patch_32(hostdata, script, StatusAddress,
370 			pScript + STATUS_OFFSET);
371 	script_patch_32(hostdata, script, ReceiveMsgAddress,
372 			pScript + MSGIN_OFFSET);
373 
374 	hostdata->script = script;
375 	hostdata->pScript = pScript;
376 	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
377 	hostdata->state = NCR_700_HOST_FREE;
378 	hostdata->cmd = NULL;
379 	host->max_id = 8;
380 	host->max_lun = NCR_700_MAX_LUNS;
381 	BUG_ON(NCR_700_transport_template == NULL);
382 	host->transportt = NCR_700_transport_template;
383 	host->unique_id = (unsigned long)hostdata->base;
384 	hostdata->eh_complete = NULL;
385 	host->hostdata[0] = (unsigned long)hostdata;
386 	/* kick the chip */
387 	NCR_700_writeb(0xff, host, CTEST9_REG);
388 	if (hostdata->chip710)
389 		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
390 	else
391 		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
392 	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
393 	if (banner == 0) {
394 		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
395 		banner = 1;
396 	}
397 	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
398 	       hostdata->chip710 ? "53c710" :
399 	       (hostdata->fast ? "53c700-66" : "53c700"),
400 	       hostdata->rev, hostdata->differential ?
401 	       "(Differential)" : "");
402 	/* reset the chip */
403 	NCR_700_chip_reset(host);
404 
405 	if (scsi_add_host(host, dev)) {
406 		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
407 		scsi_host_put(host);
408 		return NULL;
409 	}
410 
411 	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
412 		SPI_SIGNAL_SE;
413 
414 	return host;
415 }
416 
417 int
NCR_700_release(struct Scsi_Host * host)418 NCR_700_release(struct Scsi_Host *host)
419 {
420 	struct NCR_700_Host_Parameters *hostdata =
421 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
422 
423 	if (hostdata->noncoherent)
424 		dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
425 				hostdata->script, hostdata->pScript,
426 				DMA_BIDIRECTIONAL);
427 	else
428 		dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE,
429 				  hostdata->script, hostdata->pScript);
430 	return 1;
431 }
432 
433 static inline __u8
NCR_700_identify(int can_disconnect,__u8 lun)434 NCR_700_identify(int can_disconnect, __u8 lun)
435 {
436 	return IDENTIFY_BASE |
437 		((can_disconnect) ? 0x40 : 0) |
438 		(lun & NCR_700_LUN_MASK);
439 }
440 
441 /*
442  * Function : static int data_residual (Scsi_Host *host)
443  *
444  * Purpose : return residual data count of what's in the chip.  If you
445  * really want to know what this function is doing, it's almost a
446  * direct transcription of the algorithm described in the 53c710
447  * guide, except that the DBC and DFIFO registers are only 6 bits
448  * wide on a 53c700.
449  *
450  * Inputs : host - SCSI host */
451 static inline int
NCR_700_data_residual(struct Scsi_Host * host)452 NCR_700_data_residual (struct Scsi_Host *host) {
453 	struct NCR_700_Host_Parameters *hostdata =
454 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
455 	int count, synchronous = 0;
456 	unsigned int ddir;
457 
458 	if(hostdata->chip710) {
459 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
460 			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
461 	} else {
462 		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
463 			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
464 	}
465 
466 	if(hostdata->fast)
467 		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
468 
469 	/* get the data direction */
470 	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
471 
472 	if (ddir) {
473 		/* Receive */
474 		if (synchronous)
475 			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
476 		else
477 			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
478 				++count;
479 	} else {
480 		/* Send */
481 		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
482 		if (sstat & SODL_REG_FULL)
483 			++count;
484 		if (synchronous && (sstat & SODR_REG_FULL))
485 			++count;
486 	}
487 #ifdef NCR_700_DEBUG
488 	if(count)
489 		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
490 #endif
491 	return count;
492 }
493 
494 /* print out the SCSI wires and corresponding phase from the SBCL register
495  * in the chip */
496 static inline char *
sbcl_to_string(__u8 sbcl)497 sbcl_to_string(__u8 sbcl)
498 {
499 	int i;
500 	static char ret[256];
501 
502 	ret[0]='\0';
503 	for(i=0; i<8; i++) {
504 		if((1<<i) & sbcl)
505 			strcat(ret, NCR_700_SBCL_bits[i]);
506 	}
507 	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
508 	return ret;
509 }
510 
511 static inline __u8
bitmap_to_number(__u8 bitmap)512 bitmap_to_number(__u8 bitmap)
513 {
514 	__u8 i;
515 
516 	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
517 		;
518 	return i;
519 }
520 
521 /* Pull a slot off the free list */
522 STATIC struct NCR_700_command_slot *
find_empty_slot(struct NCR_700_Host_Parameters * hostdata)523 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
524 {
525 	struct NCR_700_command_slot *slot = hostdata->free_list;
526 
527 	if(slot == NULL) {
528 		/* sanity check */
529 		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
530 			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
531 		return NULL;
532 	}
533 
534 	if(slot->state != NCR_700_SLOT_FREE)
535 		/* should panic! */
536 		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
537 
538 
539 	hostdata->free_list = slot->ITL_forw;
540 	slot->ITL_forw = NULL;
541 
542 
543 	/* NOTE: set the state to busy here, not queued, since this
544 	 * indicates the slot is in use and cannot be run by the IRQ
545 	 * finish routine.  If we cannot queue the command when it
546 	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
547 	slot->state = NCR_700_SLOT_BUSY;
548 	slot->flags = 0;
549 	hostdata->command_slot_count++;
550 
551 	return slot;
552 }
553 
554 STATIC void
free_slot(struct NCR_700_command_slot * slot,struct NCR_700_Host_Parameters * hostdata)555 free_slot(struct NCR_700_command_slot *slot,
556 	  struct NCR_700_Host_Parameters *hostdata)
557 {
558 	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
559 		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
560 	}
561 	if(slot->state == NCR_700_SLOT_FREE) {
562 		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
563 	}
564 
565 	slot->resume_offset = 0;
566 	slot->cmnd = NULL;
567 	slot->state = NCR_700_SLOT_FREE;
568 	slot->ITL_forw = hostdata->free_list;
569 	hostdata->free_list = slot;
570 	hostdata->command_slot_count--;
571 }
572 
573 
574 /* This routine really does very little.  The command is indexed on
575    the ITL and (if tagged) the ITLQ lists in _queuecommand */
576 STATIC void
save_for_reselection(struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,__u32 dsp)577 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
578 		     struct scsi_cmnd *SCp, __u32 dsp)
579 {
580 	/* Its just possible that this gets executed twice */
581 	if(SCp != NULL) {
582 		struct NCR_700_command_slot *slot =
583 			(struct NCR_700_command_slot *)SCp->host_scribble;
584 
585 		slot->resume_offset = dsp;
586 	}
587 	hostdata->state = NCR_700_HOST_FREE;
588 	hostdata->cmd = NULL;
589 }
590 
591 STATIC inline void
NCR_700_unmap(struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,struct NCR_700_command_slot * slot)592 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
593 	      struct NCR_700_command_slot *slot)
594 {
595 	if(SCp->sc_data_direction != DMA_NONE &&
596 	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
597 		scsi_dma_unmap(SCp);
598 }
599 
600 STATIC inline void
NCR_700_scsi_done(struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,int result)601 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
602 	       struct scsi_cmnd *SCp, int result)
603 {
604 	hostdata->state = NCR_700_HOST_FREE;
605 	hostdata->cmd = NULL;
606 
607 	if(SCp != NULL) {
608 		struct NCR_700_command_slot *slot =
609 			(struct NCR_700_command_slot *)SCp->host_scribble;
610 
611 		dma_unmap_single(hostdata->dev, slot->pCmd,
612 				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
613 		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
614 			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
615 
616 			dma_unmap_single(hostdata->dev, slot->dma_handle,
617 					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
618 			/* restore the old result if the request sense was
619 			 * successful */
620 			if (result == 0)
621 				result = cmnd[7];
622 			/* restore the original length */
623 			SCp->cmd_len = cmnd[8];
624 		} else
625 			NCR_700_unmap(hostdata, SCp, slot);
626 
627 		free_slot(slot, hostdata);
628 #ifdef NCR_700_DEBUG
629 		if(NCR_700_get_depth(SCp->device) == 0 ||
630 		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
631 			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
632 			       NCR_700_get_depth(SCp->device));
633 #endif /* NCR_700_DEBUG */
634 		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
635 
636 		SCp->host_scribble = NULL;
637 		SCp->result = result;
638 		scsi_done(SCp);
639 	} else {
640 		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
641 	}
642 }
643 
644 
645 STATIC void
NCR_700_internal_bus_reset(struct Scsi_Host * host)646 NCR_700_internal_bus_reset(struct Scsi_Host *host)
647 {
648 	/* Bus reset */
649 	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
650 	udelay(50);
651 	NCR_700_writeb(0, host, SCNTL1_REG);
652 
653 }
654 
655 STATIC void
NCR_700_chip_setup(struct Scsi_Host * host)656 NCR_700_chip_setup(struct Scsi_Host *host)
657 {
658 	struct NCR_700_Host_Parameters *hostdata =
659 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
660 	__u8 min_period;
661 	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
662 
663 	if(hostdata->chip710) {
664 		__u8 burst_disable = 0;
665 		__u8 burst_length = 0;
666 
667 		switch (hostdata->burst_length) {
668 			case 1:
669 			        burst_length = BURST_LENGTH_1;
670 			        break;
671 			case 2:
672 			        burst_length = BURST_LENGTH_2;
673 			        break;
674 			case 4:
675 			        burst_length = BURST_LENGTH_4;
676 			        break;
677 			case 8:
678 			        burst_length = BURST_LENGTH_8;
679 			        break;
680 			default:
681 			        burst_disable = BURST_DISABLE;
682 			        break;
683 		}
684 		hostdata->dcntl_extra |= COMPAT_700_MODE;
685 
686 		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
687 		NCR_700_writeb(burst_length | hostdata->dmode_extra,
688 			       host, DMODE_710_REG);
689 		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
690 			       (hostdata->differential ? DIFF : 0),
691 			       host, CTEST7_REG);
692 		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
693 		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
694 			       | AUTO_ATN, host, SCNTL0_REG);
695 	} else {
696 		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
697 			       host, DMODE_700_REG);
698 		NCR_700_writeb(hostdata->differential ?
699 			       DIFF : 0, host, CTEST7_REG);
700 		if(hostdata->fast) {
701 			/* this is for 700-66, does nothing on 700 */
702 			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
703 				       | GENERATE_RECEIVE_PARITY, host,
704 				       CTEST8_REG);
705 		} else {
706 			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
707 				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
708 		}
709 	}
710 
711 	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
712 	NCR_700_writeb(0, host, SBCL_REG);
713 	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
714 
715 	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
716 	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
717 
718 	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
719 	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
720 	if(hostdata->clock > 75) {
721 		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
722 		/* do the best we can, but the async clock will be out
723 		 * of spec: sync divider 2, async divider 3 */
724 		DEBUG(("53c700: sync 2 async 3\n"));
725 		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
726 		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
727 		hostdata->sync_clock = hostdata->clock/2;
728 	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
729 		/* sync divider 1.5, async divider 3 */
730 		DEBUG(("53c700: sync 1.5 async 3\n"));
731 		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
732 		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
733 		hostdata->sync_clock = hostdata->clock*2;
734 		hostdata->sync_clock /= 3;
735 
736 	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
737 		/* sync divider 1, async divider 2 */
738 		DEBUG(("53c700: sync 1 async 2\n"));
739 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
740 		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
741 		hostdata->sync_clock = hostdata->clock;
742 	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
743 		/* sync divider 1, async divider 1.5 */
744 		DEBUG(("53c700: sync 1 async 1.5\n"));
745 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
746 		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
747 		hostdata->sync_clock = hostdata->clock;
748 	} else {
749 		DEBUG(("53c700: sync 1 async 1\n"));
750 		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
751 		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
752 		/* sync divider 1, async divider 1 */
753 		hostdata->sync_clock = hostdata->clock;
754 	}
755 	/* Calculate the actual minimum period that can be supported
756 	 * by our synchronous clock speed.  See the 710 manual for
757 	 * exact details of this calculation which is based on a
758 	 * setting of the SXFER register */
759 	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
760 	hostdata->min_period = NCR_700_MIN_PERIOD;
761 	if(min_period > NCR_700_MIN_PERIOD)
762 		hostdata->min_period = min_period;
763 }
764 
765 STATIC void
NCR_700_chip_reset(struct Scsi_Host * host)766 NCR_700_chip_reset(struct Scsi_Host *host)
767 {
768 	struct NCR_700_Host_Parameters *hostdata =
769 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
770 	if(hostdata->chip710) {
771 		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
772 		udelay(100);
773 
774 		NCR_700_writeb(0, host, ISTAT_REG);
775 	} else {
776 		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
777 		udelay(100);
778 
779 		NCR_700_writeb(0, host, DCNTL_REG);
780 	}
781 
782 	mdelay(1000);
783 
784 	NCR_700_chip_setup(host);
785 }
786 
787 /* The heart of the message processing engine is that the instruction
788  * immediately after the INT is the normal case (and so must be CLEAR
789  * ACK).  If we want to do something else, we call that routine in
790  * scripts and set temp to be the normal case + 8 (skipping the CLEAR
791  * ACK) so that the routine returns correctly to resume its activity
792  * */
793 STATIC __u32
process_extended_message(struct Scsi_Host * host,struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,__u32 dsp,__u32 dsps)794 process_extended_message(struct Scsi_Host *host,
795 			 struct NCR_700_Host_Parameters *hostdata,
796 			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
797 {
798 	__u32 resume_offset = dsp, temp = dsp + 8;
799 	__u8 pun = 0xff, lun = 0xff;
800 
801 	if(SCp != NULL) {
802 		pun = SCp->device->id;
803 		lun = SCp->device->lun;
804 	}
805 
806 	switch(hostdata->msgin[2]) {
807 	case A_SDTR_MSG:
808 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
809 			struct scsi_target *starget = SCp->device->sdev_target;
810 			__u8 period = hostdata->msgin[3];
811 			__u8 offset = hostdata->msgin[4];
812 
813 			if(offset == 0 || period == 0) {
814 				offset = 0;
815 				period = 0;
816 			}
817 
818 			spi_offset(starget) = offset;
819 			spi_period(starget) = period;
820 
821 			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
822 				spi_display_xfer_agreement(starget);
823 				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
824 			}
825 
826 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
827 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
828 
829 			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
830 				       host, SXFER_REG);
831 
832 		} else {
833 			/* SDTR message out of the blue, reject it */
834 			shost_printk(KERN_WARNING, host,
835 				"Unexpected SDTR msg\n");
836 			hostdata->msgout[0] = A_REJECT_MSG;
837 			dma_sync_to_dev(hostdata, hostdata->msgout, 1);
838 			script_patch_16(hostdata, hostdata->script,
839 			                MessageCount, 1);
840 			/* SendMsgOut returns, so set up the return
841 			 * address */
842 			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
843 		}
844 		break;
845 
846 	case A_WDTR_MSG:
847 		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
848 		       host->host_no, pun, lun);
849 		hostdata->msgout[0] = A_REJECT_MSG;
850 		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
851 		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
852 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
853 
854 		break;
855 
856 	default:
857 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
858 		       host->host_no, pun, lun,
859 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
860 		spi_print_msg(hostdata->msgin);
861 		printk("\n");
862 		/* just reject it */
863 		hostdata->msgout[0] = A_REJECT_MSG;
864 		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
865 		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
866 		/* SendMsgOut returns, so set up the return
867 		 * address */
868 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
869 	}
870 	NCR_700_writel(temp, host, TEMP_REG);
871 	return resume_offset;
872 }
873 
874 STATIC __u32
process_message(struct Scsi_Host * host,struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,__u32 dsp,__u32 dsps)875 process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
876 		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
877 {
878 	/* work out where to return to */
879 	__u32 temp = dsp + 8, resume_offset = dsp;
880 	__u8 pun = 0xff, lun = 0xff;
881 
882 	if(SCp != NULL) {
883 		pun = SCp->device->id;
884 		lun = SCp->device->lun;
885 	}
886 
887 #ifdef NCR_700_DEBUG
888 	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
889 	       NCR_700_phase[(dsps & 0xf00) >> 8]);
890 	spi_print_msg(hostdata->msgin);
891 	printk("\n");
892 #endif
893 
894 	switch(hostdata->msgin[0]) {
895 
896 	case A_EXTENDED_MSG:
897 		resume_offset =  process_extended_message(host, hostdata, SCp,
898 							  dsp, dsps);
899 		break;
900 
901 	case A_REJECT_MSG:
902 		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
903 			/* Rejected our sync negotiation attempt */
904 			spi_period(SCp->device->sdev_target) =
905 				spi_offset(SCp->device->sdev_target) = 0;
906 			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
907 			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
908 		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
909 			/* rejected our first simple tag message */
910 			scmd_printk(KERN_WARNING, SCp,
911 				"Rejected first tag queue attempt, turning off tag queueing\n");
912 			/* we're done negotiating */
913 			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
914 			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
915 
916 			SCp->device->tagged_supported = 0;
917 			SCp->device->simple_tags = 0;
918 			scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
919 		} else {
920 			shost_printk(KERN_WARNING, host,
921 				"(%d:%d) Unexpected REJECT Message %s\n",
922 			       pun, lun,
923 			       NCR_700_phase[(dsps & 0xf00) >> 8]);
924 			/* however, just ignore it */
925 		}
926 		break;
927 
928 	case A_PARITY_ERROR_MSG:
929 		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
930 		       pun, lun);
931 		NCR_700_internal_bus_reset(host);
932 		break;
933 	case A_SIMPLE_TAG_MSG:
934 		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
935 		       pun, lun, hostdata->msgin[1],
936 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
937 		/* just ignore it */
938 		break;
939 	default:
940 		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
941 		       host->host_no, pun, lun,
942 		       NCR_700_phase[(dsps & 0xf00) >> 8]);
943 
944 		spi_print_msg(hostdata->msgin);
945 		printk("\n");
946 		/* just reject it */
947 		hostdata->msgout[0] = A_REJECT_MSG;
948 		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
949 		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
950 		/* SendMsgOut returns, so set up the return
951 		 * address */
952 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
953 
954 		break;
955 	}
956 	NCR_700_writel(temp, host, TEMP_REG);
957 	/* set us up to receive another message */
958 	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
959 	return resume_offset;
960 }
961 
962 STATIC __u32
process_script_interrupt(__u32 dsps,__u32 dsp,struct scsi_cmnd * SCp,struct Scsi_Host * host,struct NCR_700_Host_Parameters * hostdata)963 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
964 			 struct Scsi_Host *host,
965 			 struct NCR_700_Host_Parameters *hostdata)
966 {
967 	__u32 resume_offset = 0;
968 	__u8 pun = 0xff, lun=0xff;
969 
970 	if(SCp != NULL) {
971 		pun = SCp->device->id;
972 		lun = SCp->device->lun;
973 	}
974 
975 	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
976 		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
977 		       hostdata->status[0]));
978 		/* OK, if TCQ still under negotiation, we now know it works */
979 		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
980 			NCR_700_set_tag_neg_state(SCp->device,
981 						  NCR_700_FINISHED_TAG_NEGOTIATION);
982 
983 		/* check for contingent allegiance conditions */
984 		if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION ||
985 		    hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) {
986 			struct NCR_700_command_slot *slot =
987 				(struct NCR_700_command_slot *)SCp->host_scribble;
988 			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
989 				/* OOPS: bad device, returning another
990 				 * contingent allegiance condition */
991 				scmd_printk(KERN_ERR, SCp,
992 					"broken device is looping in contingent allegiance: ignoring\n");
993 				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
994 			} else {
995 				char *cmnd =
996 					NCR_700_get_sense_cmnd(SCp->device);
997 #ifdef NCR_DEBUG
998 				scsi_print_command(SCp);
999 				printk("  cmd %p has status %d, requesting sense\n",
1000 				       SCp, hostdata->status[0]);
1001 #endif
1002 				/* we can destroy the command here
1003 				 * because the contingent allegiance
1004 				 * condition will cause a retry which
1005 				 * will re-copy the command from the
1006 				 * saved data_cmnd.  We also unmap any
1007 				 * data associated with the command
1008 				 * here */
1009 				NCR_700_unmap(hostdata, SCp, slot);
1010 				dma_unmap_single(hostdata->dev, slot->pCmd,
1011 						 MAX_COMMAND_SIZE,
1012 						 DMA_TO_DEVICE);
1013 
1014 				cmnd[0] = REQUEST_SENSE;
1015 				cmnd[1] = (lun & 0x7) << 5;
1016 				cmnd[2] = 0;
1017 				cmnd[3] = 0;
1018 				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1019 				cmnd[5] = 0;
1020 				/* Here's a quiet hack: the
1021 				 * REQUEST_SENSE command is six bytes,
1022 				 * so store a flag indicating that
1023 				 * this was an internal sense request
1024 				 * and the original status at the end
1025 				 * of the command */
1026 				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1027 				cmnd[7] = hostdata->status[0];
1028 				cmnd[8] = SCp->cmd_len;
1029 				SCp->cmd_len = 6; /* command length for
1030 						   * REQUEST_SENSE */
1031 				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1032 				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1033 				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1034 				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1035 				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1036 				slot->SG[1].pAddr = 0;
1037 				slot->resume_offset = hostdata->pScript;
1038 				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2);
1039 				dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE);
1040 
1041 				/* queue the command for reissue */
1042 				slot->state = NCR_700_SLOT_QUEUED;
1043 				slot->flags = NCR_700_FLAG_AUTOSENSE;
1044 				hostdata->state = NCR_700_HOST_FREE;
1045 				hostdata->cmd = NULL;
1046 			}
1047 		} else {
1048 			// Currently rely on the mid layer evaluation
1049 			// of the tag queuing capability
1050 			//
1051 			//if(status_byte(hostdata->status[0]) == GOOD &&
1052 			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1053 			//	/* Piggy back the tag queueing support
1054 			//	 * on this command */
1055 			//	dma_sync_single_for_cpu(hostdata->dev,
1056 			//			    slot->dma_handle,
1057 			//			    SCp->request_bufflen,
1058 			//			    DMA_FROM_DEVICE);
1059 			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1060 			//		scmd_printk(KERN_INFO, SCp,
1061 			//		     "Enabling Tag Command Queuing\n");
1062 			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1063 			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1064 			//	} else {
1065 			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1066 			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1067 			//	}
1068 			//}
1069 			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1070 		}
1071 	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1072 		__u8 i = (dsps & 0xf00) >> 8;
1073 
1074 		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1075 		       NCR_700_phase[i],
1076 		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1077 		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1078 			SCp->cmd_len);
1079 		scsi_print_command(SCp);
1080 
1081 		NCR_700_internal_bus_reset(host);
1082 	} else if((dsps & 0xfffff000) == A_FATAL) {
1083 		int i = (dsps & 0xfff);
1084 
1085 		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1086 		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1087 		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1088 			printk(KERN_ERR "     msg begins %02x %02x\n",
1089 			       hostdata->msgin[0], hostdata->msgin[1]);
1090 		}
1091 		NCR_700_internal_bus_reset(host);
1092 	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1093 #ifdef NCR_700_DEBUG
1094 		__u8 i = (dsps & 0xf00) >> 8;
1095 
1096 		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1097 		       host->host_no, pun, lun,
1098 		       i, NCR_700_phase[i]);
1099 #endif
1100 		save_for_reselection(hostdata, SCp, dsp);
1101 
1102 	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1103 		__u8 lun;
1104 		struct NCR_700_command_slot *slot;
1105 		__u8 reselection_id = hostdata->reselection_id;
1106 		struct scsi_device *SDp;
1107 
1108 		lun = hostdata->msgin[0] & 0x1f;
1109 
1110 		hostdata->reselection_id = 0xff;
1111 		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1112 		       host->host_no, reselection_id, lun));
1113 		/* clear the reselection indicator */
1114 		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1115 		if(unlikely(SDp == NULL)) {
1116 			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1117 			       host->host_no, reselection_id, lun);
1118 			BUG();
1119 		}
1120 		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1121 			struct scsi_cmnd *SCp;
1122 
1123 			SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
1124 			if(unlikely(SCp == NULL)) {
1125 				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1126 				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1127 				BUG();
1128 			}
1129 
1130 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1131 			DDEBUG(KERN_DEBUG, SDp,
1132 				"reselection is tag %d, slot %p(%d)\n",
1133 				hostdata->msgin[2], slot, slot->tag);
1134 		} else {
1135 			struct NCR_700_Device_Parameters *p = SDp->hostdata;
1136 			struct scsi_cmnd *SCp = p->current_cmnd;
1137 
1138 			if(unlikely(SCp == NULL)) {
1139 				sdev_printk(KERN_ERR, SDp,
1140 					"no saved request for untagged cmd\n");
1141 				BUG();
1142 			}
1143 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1144 		}
1145 
1146 		if(slot == NULL) {
1147 			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1148 			       host->host_no, reselection_id, lun,
1149 			       hostdata->msgin[0], hostdata->msgin[1],
1150 			       hostdata->msgin[2]);
1151 		} else {
1152 			if(hostdata->state != NCR_700_HOST_BUSY)
1153 				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1154 				       host->host_no);
1155 			resume_offset = slot->resume_offset;
1156 			hostdata->cmd = slot->cmnd;
1157 
1158 			/* re-patch for this command */
1159 			script_patch_32_abs(hostdata, hostdata->script,
1160 			                    CommandAddress, slot->pCmd);
1161 			script_patch_16(hostdata, hostdata->script,
1162 					CommandCount, slot->cmnd->cmd_len);
1163 			script_patch_32_abs(hostdata, hostdata->script,
1164 			                    SGScriptStartAddress,
1165 					    to32bit(&slot->pSG[0].ins));
1166 
1167 			/* Note: setting SXFER only works if we're
1168 			 * still in the MESSAGE phase, so it is vital
1169 			 * that ACK is still asserted when we process
1170 			 * the reselection message.  The resume offset
1171 			 * should therefore always clear ACK */
1172 			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1173 				       host, SXFER_REG);
1174 			dma_sync_from_dev(hostdata, hostdata->msgin,
1175 				       MSG_ARRAY_SIZE);
1176 			dma_sync_to_dev(hostdata, hostdata->msgout,
1177 				       MSG_ARRAY_SIZE);
1178 			/* I'm just being paranoid here, the command should
1179 			 * already have been flushed from the cache */
1180 			dma_sync_to_dev(hostdata, slot->cmnd->cmnd,
1181 				       slot->cmnd->cmd_len);
1182 
1183 
1184 
1185 		}
1186 	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1187 
1188 		/* This section is full of debugging code because I've
1189 		 * never managed to reach it.  I think what happens is
1190 		 * that, because the 700 runs with selection
1191 		 * interrupts enabled the whole time that we take a
1192 		 * selection interrupt before we manage to get to the
1193 		 * reselected script interrupt */
1194 
1195 		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1196 		struct NCR_700_command_slot *slot;
1197 
1198 		/* Take out our own ID */
1199 		reselection_id &= ~(1<<host->this_id);
1200 
1201 		/* I've never seen this happen, so keep this as a printk rather
1202 		 * than a debug */
1203 		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1204 		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1205 
1206 		{
1207 			/* FIXME: DEBUGGING CODE */
1208 			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1209 			int i;
1210 
1211 			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1212 				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1213 				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1214 					break;
1215 			}
1216 			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1217 			SCp =  hostdata->slots[i].cmnd;
1218 		}
1219 
1220 		if(SCp != NULL) {
1221 			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1222 			/* change slot from busy to queued to redo command */
1223 			slot->state = NCR_700_SLOT_QUEUED;
1224 		}
1225 		hostdata->cmd = NULL;
1226 
1227 		if(reselection_id == 0) {
1228 			if(hostdata->reselection_id == 0xff) {
1229 				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1230 				return 0;
1231 			} else {
1232 				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1233 				       host->host_no);
1234 				reselection_id = hostdata->reselection_id;
1235 			}
1236 		} else {
1237 
1238 			/* convert to real ID */
1239 			reselection_id = bitmap_to_number(reselection_id);
1240 		}
1241 		hostdata->reselection_id = reselection_id;
1242 		/* just in case we have a stale simple tag message, clear it */
1243 		hostdata->msgin[1] = 0;
1244 		dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1245 		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1246 			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1247 		} else {
1248 			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1249 		}
1250 	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1251 		/* we've just disconnected from the bus, do nothing since
1252 		 * a return here will re-run the queued command slot
1253 		 * that may have been interrupted by the initial selection */
1254 		DEBUG((" SELECTION COMPLETED\n"));
1255 	} else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1256 		resume_offset = process_message(host, hostdata, SCp,
1257 						dsp, dsps);
1258 	} else if((dsps &  0xfffff000) == 0) {
1259 		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1260 		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1261 		       host->host_no, pun, lun, NCR_700_condition[i],
1262 		       NCR_700_phase[j], dsp - hostdata->pScript);
1263 		if(SCp != NULL) {
1264 			struct scatterlist *sg;
1265 
1266 			scsi_print_command(SCp);
1267 			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1268 				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1269 			}
1270 		}
1271 		NCR_700_internal_bus_reset(host);
1272 	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1273 		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1274 		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1275 		resume_offset = dsp;
1276 	} else {
1277 		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1278 		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1279 		NCR_700_internal_bus_reset(host);
1280 	}
1281 	return resume_offset;
1282 }
1283 
1284 /* We run the 53c700 with selection interrupts always enabled.  This
1285  * means that the chip may be selected as soon as the bus frees.  On a
1286  * busy bus, this can be before the scripts engine finishes its
1287  * processing.  Therefore, part of the selection processing has to be
1288  * to find out what the scripts engine is doing and complete the
1289  * function if necessary (i.e. process the pending disconnect or save
1290  * the interrupted initial selection */
1291 STATIC inline __u32
process_selection(struct Scsi_Host * host,__u32 dsp)1292 process_selection(struct Scsi_Host *host, __u32 dsp)
1293 {
1294 	__u8 id = 0;	/* Squash compiler warning */
1295 	int count = 0;
1296 	__u32 resume_offset = 0;
1297 	struct NCR_700_Host_Parameters *hostdata =
1298 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1299 	struct scsi_cmnd *SCp = hostdata->cmd;
1300 	__u8 sbcl;
1301 
1302 	for(count = 0; count < 5; count++) {
1303 		id = NCR_700_readb(host, hostdata->chip710 ?
1304 				   CTEST9_REG : SFBR_REG);
1305 
1306 		/* Take out our own ID */
1307 		id &= ~(1<<host->this_id);
1308 		if(id != 0)
1309 			break;
1310 		udelay(5);
1311 	}
1312 	sbcl = NCR_700_readb(host, SBCL_REG);
1313 	if((sbcl & SBCL_IO) == 0) {
1314 		/* mark as having been selected rather than reselected */
1315 		id = 0xff;
1316 	} else {
1317 		/* convert to real ID */
1318 		hostdata->reselection_id = id = bitmap_to_number(id);
1319 		DEBUG(("scsi%d:  Reselected by %d\n",
1320 		       host->host_no, id));
1321 	}
1322 	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1323 		struct NCR_700_command_slot *slot =
1324 			(struct NCR_700_command_slot *)SCp->host_scribble;
1325 		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1326 
1327 		switch(dsp - hostdata->pScript) {
1328 		case Ent_Disconnect1:
1329 		case Ent_Disconnect2:
1330 			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1331 			break;
1332 		case Ent_Disconnect3:
1333 		case Ent_Disconnect4:
1334 			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1335 			break;
1336 		case Ent_Disconnect5:
1337 		case Ent_Disconnect6:
1338 			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1339 			break;
1340 		case Ent_Disconnect7:
1341 		case Ent_Disconnect8:
1342 			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1343 			break;
1344 		case Ent_Finish1:
1345 		case Ent_Finish2:
1346 			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1347 			break;
1348 
1349 		default:
1350 			slot->state = NCR_700_SLOT_QUEUED;
1351 			break;
1352 			}
1353 	}
1354 	hostdata->state = NCR_700_HOST_BUSY;
1355 	hostdata->cmd = NULL;
1356 	/* clear any stale simple tag message */
1357 	hostdata->msgin[1] = 0;
1358 	dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1359 
1360 	if(id == 0xff) {
1361 		/* Selected as target, Ignore */
1362 		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1363 	} else if(hostdata->tag_negotiated & (1<<id)) {
1364 		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1365 	} else {
1366 		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1367 	}
1368 	return resume_offset;
1369 }
1370 
1371 static inline void
NCR_700_clear_fifo(struct Scsi_Host * host)1372 NCR_700_clear_fifo(struct Scsi_Host *host) {
1373 	const struct NCR_700_Host_Parameters *hostdata
1374 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1375 	if(hostdata->chip710) {
1376 		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1377 	} else {
1378 		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1379 	}
1380 }
1381 
1382 static inline void
NCR_700_flush_fifo(struct Scsi_Host * host)1383 NCR_700_flush_fifo(struct Scsi_Host *host) {
1384 	const struct NCR_700_Host_Parameters *hostdata
1385 		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1386 	if(hostdata->chip710) {
1387 		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1388 		udelay(10);
1389 		NCR_700_writeb(0, host, CTEST8_REG);
1390 	} else {
1391 		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1392 		udelay(10);
1393 		NCR_700_writeb(0, host, DFIFO_REG);
1394 	}
1395 }
1396 
1397 
1398 /* The queue lock with interrupts disabled must be held on entry to
1399  * this function */
1400 STATIC int
NCR_700_start_command(struct scsi_cmnd * SCp)1401 NCR_700_start_command(struct scsi_cmnd *SCp)
1402 {
1403 	struct NCR_700_command_slot *slot =
1404 		(struct NCR_700_command_slot *)SCp->host_scribble;
1405 	struct NCR_700_Host_Parameters *hostdata =
1406 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1407 	__u16 count = 1;	/* for IDENTIFY message */
1408 	u8 lun = SCp->device->lun;
1409 
1410 	if(hostdata->state != NCR_700_HOST_FREE) {
1411 		/* keep this inside the lock to close the race window where
1412 		 * the running command finishes on another CPU while we don't
1413 		 * change the state to queued on this one */
1414 		slot->state = NCR_700_SLOT_QUEUED;
1415 
1416 		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1417 		       SCp->device->host->host_no, slot->cmnd, slot));
1418 		return 0;
1419 	}
1420 	hostdata->state = NCR_700_HOST_BUSY;
1421 	hostdata->cmd = SCp;
1422 	slot->state = NCR_700_SLOT_BUSY;
1423 	/* keep interrupts disabled until we have the command correctly
1424 	 * set up so we cannot take a selection interrupt */
1425 
1426 	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1427 						slot->flags != NCR_700_FLAG_AUTOSENSE),
1428 					       lun);
1429 	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1430 	 * if the negotiated transfer parameters still hold, so
1431 	 * always renegotiate them */
1432 	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1433 	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1434 		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1435 	}
1436 
1437 	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1438 	 * If a contingent allegiance condition exists, the device
1439 	 * will refuse all tags, so send the request sense as untagged
1440 	 * */
1441 	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1442 	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1443 	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1444 		count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1445 	}
1446 
1447 	if(hostdata->fast &&
1448 	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1449 		count += spi_populate_sync_msg(&hostdata->msgout[count],
1450 				spi_period(SCp->device->sdev_target),
1451 				spi_offset(SCp->device->sdev_target));
1452 		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1453 	}
1454 
1455 	script_patch_16(hostdata, hostdata->script, MessageCount, count);
1456 
1457 	script_patch_ID(hostdata, hostdata->script, Device_ID, 1<<scmd_id(SCp));
1458 
1459 	script_patch_32_abs(hostdata, hostdata->script, CommandAddress,
1460 			    slot->pCmd);
1461 	script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len);
1462 	/* finally plumb the beginning of the SG list into the script
1463 	 * */
1464 	script_patch_32_abs(hostdata, hostdata->script,
1465 	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1466 	NCR_700_clear_fifo(SCp->device->host);
1467 
1468 	if(slot->resume_offset == 0)
1469 		slot->resume_offset = hostdata->pScript;
1470 	/* now perform all the writebacks and invalidates */
1471 	dma_sync_to_dev(hostdata, hostdata->msgout, count);
1472 	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1473 	dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len);
1474 	dma_sync_from_dev(hostdata, hostdata->status, 1);
1475 
1476 	/* set the synchronous period/offset */
1477 	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1478 		       SCp->device->host, SXFER_REG);
1479 	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1480 	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1481 
1482 	return 1;
1483 }
1484 
1485 irqreturn_t
NCR_700_intr(int irq,void * dev_id)1486 NCR_700_intr(int irq, void *dev_id)
1487 {
1488 	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1489 	struct NCR_700_Host_Parameters *hostdata =
1490 		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1491 	__u8 istat;
1492 	__u32 resume_offset = 0;
1493 	__u8 pun = 0xff, lun = 0xff;
1494 	unsigned long flags;
1495 	int handled = 0;
1496 
1497 	/* Use the host lock to serialise access to the 53c700
1498 	 * hardware.  Note: In future, we may need to take the queue
1499 	 * lock to enter the done routines.  When that happens, we
1500 	 * need to ensure that for this driver, the host lock and the
1501 	 * queue lock point to the same thing. */
1502 	spin_lock_irqsave(host->host_lock, flags);
1503 	if((istat = NCR_700_readb(host, ISTAT_REG))
1504 	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1505 		__u32 dsps;
1506 		__u8 sstat0 = 0, dstat = 0;
1507 		__u32 dsp;
1508 		struct scsi_cmnd *SCp = hostdata->cmd;
1509 
1510 		handled = 1;
1511 
1512 		if(istat & SCSI_INT_PENDING) {
1513 			udelay(10);
1514 
1515 			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1516 		}
1517 
1518 		if(istat & DMA_INT_PENDING) {
1519 			udelay(10);
1520 
1521 			dstat = NCR_700_readb(host, DSTAT_REG);
1522 		}
1523 
1524 		dsps = NCR_700_readl(host, DSPS_REG);
1525 		dsp = NCR_700_readl(host, DSP_REG);
1526 
1527 		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1528 		       host->host_no, istat, sstat0, dstat,
1529 		       (dsp - (__u32)(hostdata->pScript))/4,
1530 		       dsp, dsps));
1531 
1532 		if(SCp != NULL) {
1533 			pun = SCp->device->id;
1534 			lun = SCp->device->lun;
1535 		}
1536 
1537 		if(sstat0 & SCSI_RESET_DETECTED) {
1538 			struct scsi_device *SDp;
1539 			int i;
1540 
1541 			hostdata->state = NCR_700_HOST_BUSY;
1542 
1543 			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1544 			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1545 
1546 			scsi_report_bus_reset(host, 0);
1547 
1548 			/* clear all the negotiated parameters */
1549 			__shost_for_each_device(SDp, host)
1550 				NCR_700_clear_flag(SDp, ~0);
1551 
1552 			/* clear all the slots and their pending commands */
1553 			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1554 				struct scsi_cmnd *SCp;
1555 				struct NCR_700_command_slot *slot =
1556 					&hostdata->slots[i];
1557 
1558 				if(slot->state == NCR_700_SLOT_FREE)
1559 					continue;
1560 
1561 				SCp = slot->cmnd;
1562 				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1563 				       slot, SCp);
1564 				free_slot(slot, hostdata);
1565 				SCp->host_scribble = NULL;
1566 				NCR_700_set_depth(SCp->device, 0);
1567 				/* NOTE: deadlock potential here: we
1568 				 * rely on mid-layer guarantees that
1569 				 * scsi_done won't try to issue the
1570 				 * command again otherwise we'll
1571 				 * deadlock on the
1572 				 * hostdata->state_lock */
1573 				SCp->result = DID_RESET << 16;
1574 				scsi_done(SCp);
1575 			}
1576 			mdelay(25);
1577 			NCR_700_chip_setup(host);
1578 
1579 			hostdata->state = NCR_700_HOST_FREE;
1580 			hostdata->cmd = NULL;
1581 			/* signal back if this was an eh induced reset */
1582 			if(hostdata->eh_complete != NULL)
1583 				complete(hostdata->eh_complete);
1584 			goto out_unlock;
1585 		} else if(sstat0 & SELECTION_TIMEOUT) {
1586 			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1587 			       host->host_no, pun, lun));
1588 			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1589 		} else if(sstat0 & PHASE_MISMATCH) {
1590 			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1591 				(struct NCR_700_command_slot *)SCp->host_scribble;
1592 
1593 			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1594 				/* It wants to reply to some part of
1595 				 * our message */
1596 #ifdef NCR_700_DEBUG
1597 				__u32 temp = NCR_700_readl(host, TEMP_REG);
1598 				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1599 				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1600 #endif
1601 				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1602 			} else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
1603 				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1604 				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1605 				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1606 				int residual = NCR_700_data_residual(host);
1607 				int i;
1608 #ifdef NCR_700_DEBUG
1609 				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1610 
1611 				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1612 				       host->host_no, pun, lun,
1613 				       SGcount, data_transfer);
1614 				scsi_print_command(SCp);
1615 				if(residual) {
1616 					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1617 				       host->host_no, pun, lun,
1618 				       SGcount, data_transfer, residual);
1619 				}
1620 #endif
1621 				data_transfer += residual;
1622 
1623 				if(data_transfer != 0) {
1624 					int count;
1625 					__u32 pAddr;
1626 
1627 					SGcount--;
1628 
1629 					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1630 					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1631 					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1632 					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1633 					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1634 					pAddr += (count - data_transfer);
1635 #ifdef NCR_700_DEBUG
1636 					if(pAddr != naddr) {
1637 						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1638 					}
1639 #endif
1640 					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1641 				}
1642 				/* set the executed moves to nops */
1643 				for(i=0; i<SGcount; i++) {
1644 					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1645 					slot->SG[i].pAddr = 0;
1646 				}
1647 				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1648 				/* and pretend we disconnected after
1649 				 * the command phase */
1650 				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1651 				/* make sure all the data is flushed */
1652 				NCR_700_flush_fifo(host);
1653 			} else {
1654 				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1655 				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1656 				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1657 				NCR_700_internal_bus_reset(host);
1658 			}
1659 
1660 		} else if(sstat0 & SCSI_GROSS_ERROR) {
1661 			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1662 			       host->host_no, pun, lun);
1663 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1664 		} else if(sstat0 & PARITY_ERROR) {
1665 			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1666 			       host->host_no, pun, lun);
1667 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1668 		} else if(dstat & SCRIPT_INT_RECEIVED) {
1669 			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1670 			       host->host_no, pun, lun));
1671 			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1672 		} else if(dstat & (ILGL_INST_DETECTED)) {
1673 			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1674 			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1675 			       host->host_no, pun, lun,
1676 			       dsp, dsp - hostdata->pScript);
1677 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1678 		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1679 			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1680 			       host->host_no, pun, lun, dstat);
1681 			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1682 		}
1683 
1684 
1685 		/* NOTE: selection interrupt processing MUST occur
1686 		 * after script interrupt processing to correctly cope
1687 		 * with the case where we process a disconnect and
1688 		 * then get reselected before we process the
1689 		 * disconnection */
1690 		if(sstat0 & SELECTED) {
1691 			/* FIXME: It currently takes at least FOUR
1692 			 * interrupts to complete a command that
1693 			 * disconnects: one for the disconnect, one
1694 			 * for the reselection, one to get the
1695 			 * reselection data and one to complete the
1696 			 * command.  If we guess the reselected
1697 			 * command here and prepare it, we only need
1698 			 * to get a reselection data interrupt if we
1699 			 * guessed wrongly.  Since the interrupt
1700 			 * overhead is much greater than the command
1701 			 * setup, this would be an efficient
1702 			 * optimisation particularly as we probably
1703 			 * only have one outstanding command on a
1704 			 * target most of the time */
1705 
1706 			resume_offset = process_selection(host, dsp);
1707 
1708 		}
1709 
1710 	}
1711 
1712 	if(resume_offset) {
1713 		if(hostdata->state != NCR_700_HOST_BUSY) {
1714 			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1715 			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1716 			hostdata->state = NCR_700_HOST_BUSY;
1717 		}
1718 
1719 		DEBUG(("Attempting to resume at %x\n", resume_offset));
1720 		NCR_700_clear_fifo(host);
1721 		NCR_700_writel(resume_offset, host, DSP_REG);
1722 	}
1723 	/* There is probably a technical no-no about this: If we're a
1724 	 * shared interrupt and we got this interrupt because the
1725 	 * other device needs servicing not us, we're still going to
1726 	 * check our queued commands here---of course, there shouldn't
1727 	 * be any outstanding.... */
1728 	if(hostdata->state == NCR_700_HOST_FREE) {
1729 		int i;
1730 
1731 		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1732 			/* fairness: always run the queue from the last
1733 			 * position we left off */
1734 			int j = (i + hostdata->saved_slot_position)
1735 				% NCR_700_COMMAND_SLOTS_PER_HOST;
1736 
1737 			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1738 				continue;
1739 			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1740 				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1741 				       host->host_no, &hostdata->slots[j],
1742 				       hostdata->slots[j].cmnd));
1743 				hostdata->saved_slot_position = j + 1;
1744 			}
1745 
1746 			break;
1747 		}
1748 	}
1749  out_unlock:
1750 	spin_unlock_irqrestore(host->host_lock, flags);
1751 	return IRQ_RETVAL(handled);
1752 }
1753 
NCR_700_queuecommand_lck(struct scsi_cmnd * SCp)1754 static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp)
1755 {
1756 	struct NCR_700_Host_Parameters *hostdata =
1757 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1758 	__u32 move_ins;
1759 	struct NCR_700_command_slot *slot;
1760 
1761 	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1762 		/* We're over our allocation, this should never happen
1763 		 * since we report the max allocation to the mid layer */
1764 		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1765 		return 1;
1766 	}
1767 	/* check for untagged commands.  We cannot have any outstanding
1768 	 * commands if we accept them.  Commands could be untagged because:
1769 	 *
1770 	 * - The tag negotiated bitmap is clear
1771 	 * - The blk layer sent and untagged command
1772 	 */
1773 	if(NCR_700_get_depth(SCp->device) != 0
1774 	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1775 	       || !(SCp->flags & SCMD_TAGGED))) {
1776 		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1777 		       NCR_700_get_depth(SCp->device));
1778 		return SCSI_MLQUEUE_DEVICE_BUSY;
1779 	}
1780 	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1781 		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1782 		       NCR_700_get_depth(SCp->device));
1783 		return SCSI_MLQUEUE_DEVICE_BUSY;
1784 	}
1785 	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1786 
1787 	/* begin the command here */
1788 	/* no need to check for NULL, test for command_slot_count above
1789 	 * ensures a slot is free */
1790 	slot = find_empty_slot(hostdata);
1791 
1792 	slot->cmnd = SCp;
1793 
1794 	SCp->host_scribble = (unsigned char *)slot;
1795 
1796 #ifdef NCR_700_DEBUG
1797 	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1798 	scsi_print_command(SCp);
1799 #endif
1800 	if ((SCp->flags & SCMD_TAGGED)
1801 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1802 	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1803 		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1804 		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1805 		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1806 	}
1807 
1808 	/* here we may have to process an untagged command.  The gate
1809 	 * above ensures that this will be the only one outstanding,
1810 	 * so clear the tag negotiated bit.
1811 	 *
1812 	 * FIXME: This will royally screw up on multiple LUN devices
1813 	 * */
1814 	if (!(SCp->flags & SCMD_TAGGED)
1815 	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1816 		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1817 		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1818 	}
1819 
1820 	if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1821 	    SCp->device->simple_tags) {
1822 		slot->tag = scsi_cmd_to_rq(SCp)->tag;
1823 		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1824 		       slot->tag, slot);
1825 	} else {
1826 		struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
1827 
1828 		slot->tag = SCSI_NO_TAG;
1829 		/* save current command for reselection */
1830 		p->current_cmnd = SCp;
1831 	}
1832 	/* sanity check: some of the commands generated by the mid-layer
1833 	 * have an eccentric idea of their sc_data_direction */
1834 	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1835 	   SCp->sc_data_direction != DMA_NONE) {
1836 #ifdef NCR_700_DEBUG
1837 		printk("53c700: Command");
1838 		scsi_print_command(SCp);
1839 		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1840 #endif
1841 		SCp->sc_data_direction = DMA_NONE;
1842 	}
1843 
1844 	switch (SCp->cmnd[0]) {
1845 	case REQUEST_SENSE:
1846 		/* clear the internal sense magic */
1847 		SCp->cmnd[6] = 0;
1848 		fallthrough;
1849 	default:
1850 		/* OK, get it from the command */
1851 		switch(SCp->sc_data_direction) {
1852 		case DMA_BIDIRECTIONAL:
1853 		default:
1854 			printk(KERN_ERR "53c700: Unknown command for data direction ");
1855 			scsi_print_command(SCp);
1856 
1857 			move_ins = 0;
1858 			break;
1859 		case DMA_NONE:
1860 			move_ins = 0;
1861 			break;
1862 		case DMA_FROM_DEVICE:
1863 			move_ins = SCRIPT_MOVE_DATA_IN;
1864 			break;
1865 		case DMA_TO_DEVICE:
1866 			move_ins = SCRIPT_MOVE_DATA_OUT;
1867 			break;
1868 		}
1869 	}
1870 
1871 	/* now build the scatter gather list */
1872 	if(move_ins != 0) {
1873 		int i;
1874 		int sg_count;
1875 		dma_addr_t vPtr = 0;
1876 		struct scatterlist *sg;
1877 		__u32 count = 0;
1878 
1879 		sg_count = scsi_dma_map(SCp);
1880 		BUG_ON(sg_count < 0);
1881 
1882 		scsi_for_each_sg(SCp, sg, sg_count, i) {
1883 			vPtr = sg_dma_address(sg);
1884 			count = sg_dma_len(sg);
1885 
1886 			slot->SG[i].ins = bS_to_host(move_ins | count);
1887 			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1888 			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1889 			slot->SG[i].pAddr = bS_to_host(vPtr);
1890 		}
1891 		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1892 		slot->SG[i].pAddr = 0;
1893 		dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1894 		DEBUG((" SETTING %p to %x\n",
1895 		       (&slot->pSG[i].ins),
1896 		       slot->SG[i].ins));
1897 	}
1898 	slot->resume_offset = 0;
1899 	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1900 				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1901 	NCR_700_start_command(SCp);
1902 	return 0;
1903 }
1904 
DEF_SCSI_QCMD(NCR_700_queuecommand)1905 STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1906 
1907 STATIC int
1908 NCR_700_abort(struct scsi_cmnd * SCp)
1909 {
1910 	struct NCR_700_command_slot *slot;
1911 
1912 	scmd_printk(KERN_INFO, SCp, "abort command\n");
1913 
1914 	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1915 
1916 	if(slot == NULL)
1917 		/* no outstanding command to abort */
1918 		return SUCCESS;
1919 	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1920 		/* FIXME: This is because of a problem in the new
1921 		 * error handler.  When it is in error recovery, it
1922 		 * will send a TUR to a device it thinks may still be
1923 		 * showing a problem.  If the TUR isn't responded to,
1924 		 * it will abort it and mark the device off line.
1925 		 * Unfortunately, it does no other error recovery, so
1926 		 * this would leave us with an outstanding command
1927 		 * occupying a slot.  Rather than allow this to
1928 		 * happen, we issue a bus reset to force all
1929 		 * outstanding commands to terminate here. */
1930 		NCR_700_internal_bus_reset(SCp->device->host);
1931 		/* still drop through and return failed */
1932 	}
1933 	return FAILED;
1934 
1935 }
1936 
1937 STATIC int
NCR_700_host_reset(struct scsi_cmnd * SCp)1938 NCR_700_host_reset(struct scsi_cmnd * SCp)
1939 {
1940 	DECLARE_COMPLETION_ONSTACK(complete);
1941 	struct NCR_700_Host_Parameters *hostdata =
1942 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1943 
1944 	scmd_printk(KERN_INFO, SCp,
1945 		"New error handler wants HOST reset, cmd %p\n\t", SCp);
1946 	scsi_print_command(SCp);
1947 
1948 	/* In theory, eh_complete should always be null because the
1949 	 * eh is single threaded, but just in case we're handling a
1950 	 * reset via sg or something */
1951 	spin_lock_irq(SCp->device->host->host_lock);
1952 	while (hostdata->eh_complete != NULL) {
1953 		spin_unlock_irq(SCp->device->host->host_lock);
1954 		msleep_interruptible(100);
1955 		spin_lock_irq(SCp->device->host->host_lock);
1956 	}
1957 
1958 	hostdata->eh_complete = &complete;
1959 	NCR_700_internal_bus_reset(SCp->device->host);
1960 	NCR_700_chip_reset(SCp->device->host);
1961 
1962 	spin_unlock_irq(SCp->device->host->host_lock);
1963 	wait_for_completion(&complete);
1964 	spin_lock_irq(SCp->device->host->host_lock);
1965 
1966 	hostdata->eh_complete = NULL;
1967 	/* Revalidate the transport parameters of the failing device */
1968 	if(hostdata->fast)
1969 		spi_schedule_dv_device(SCp->device);
1970 
1971 	spin_unlock_irq(SCp->device->host->host_lock);
1972 	return SUCCESS;
1973 }
1974 
1975 STATIC void
NCR_700_set_period(struct scsi_target * STp,int period)1976 NCR_700_set_period(struct scsi_target *STp, int period)
1977 {
1978 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1979 	struct NCR_700_Host_Parameters *hostdata =
1980 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1981 
1982 	if(!hostdata->fast)
1983 		return;
1984 
1985 	if(period < hostdata->min_period)
1986 		period = hostdata->min_period;
1987 
1988 	spi_period(STp) = period;
1989 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
1990 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1991 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
1992 }
1993 
1994 STATIC void
NCR_700_set_offset(struct scsi_target * STp,int offset)1995 NCR_700_set_offset(struct scsi_target *STp, int offset)
1996 {
1997 	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1998 	struct NCR_700_Host_Parameters *hostdata =
1999 		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2000 	int max_offset = hostdata->chip710
2001 		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2002 
2003 	if(!hostdata->fast)
2004 		return;
2005 
2006 	if(offset > max_offset)
2007 		offset = max_offset;
2008 
2009 	/* if we're currently async, make sure the period is reasonable */
2010 	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2011 				    spi_period(STp) > 0xff))
2012 		spi_period(STp) = hostdata->min_period;
2013 
2014 	spi_offset(STp) = offset;
2015 	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2016 			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2017 	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2018 }
2019 
2020 STATIC int
NCR_700_sdev_init(struct scsi_device * SDp)2021 NCR_700_sdev_init(struct scsi_device *SDp)
2022 {
2023 	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2024 				GFP_KERNEL);
2025 
2026 	if (!SDp->hostdata)
2027 		return -ENOMEM;
2028 
2029 	return 0;
2030 }
2031 
2032 STATIC int
NCR_700_sdev_configure(struct scsi_device * SDp,struct queue_limits * lim)2033 NCR_700_sdev_configure(struct scsi_device *SDp, struct queue_limits *lim)
2034 {
2035 	struct NCR_700_Host_Parameters *hostdata =
2036 		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2037 
2038 	/* to do here: allocate memory; build a queue_full list */
2039 	if(SDp->tagged_supported) {
2040 		scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2041 		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2042 	}
2043 
2044 	if(hostdata->fast) {
2045 		/* Find the correct offset and period via domain validation */
2046 		if (!spi_initial_dv(SDp->sdev_target))
2047 			spi_dv_device(SDp);
2048 	} else {
2049 		spi_offset(SDp->sdev_target) = 0;
2050 		spi_period(SDp->sdev_target) = 0;
2051 	}
2052 	return 0;
2053 }
2054 
2055 STATIC void
NCR_700_sdev_destroy(struct scsi_device * SDp)2056 NCR_700_sdev_destroy(struct scsi_device *SDp)
2057 {
2058 	kfree(SDp->hostdata);
2059 	SDp->hostdata = NULL;
2060 }
2061 
2062 static int
NCR_700_change_queue_depth(struct scsi_device * SDp,int depth)2063 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2064 {
2065 	if (depth > NCR_700_MAX_TAGS)
2066 		depth = NCR_700_MAX_TAGS;
2067 	return scsi_change_queue_depth(SDp, depth);
2068 }
2069 
2070 static ssize_t
NCR_700_show_active_tags(struct device * dev,struct device_attribute * attr,char * buf)2071 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2072 {
2073 	struct scsi_device *SDp = to_scsi_device(dev);
2074 
2075 	return sysfs_emit(buf, "%d\n", NCR_700_get_depth(SDp));
2076 }
2077 
2078 static struct device_attribute NCR_700_active_tags_attr = {
2079 	.attr = {
2080 		.name =		"active_tags",
2081 		.mode =		S_IRUGO,
2082 	},
2083 	.show = NCR_700_show_active_tags,
2084 };
2085 
2086 STATIC struct attribute *NCR_700_dev_attrs[] = {
2087 	&NCR_700_active_tags_attr.attr,
2088 	NULL,
2089 };
2090 
2091 ATTRIBUTE_GROUPS(NCR_700_dev);
2092 
2093 EXPORT_SYMBOL(NCR_700_detect);
2094 EXPORT_SYMBOL(NCR_700_release);
2095 EXPORT_SYMBOL(NCR_700_intr);
2096 
2097 static struct spi_function_template NCR_700_transport_functions =  {
2098 	.set_period	= NCR_700_set_period,
2099 	.show_period	= 1,
2100 	.set_offset	= NCR_700_set_offset,
2101 	.show_offset	= 1,
2102 };
2103 
NCR_700_init(void)2104 static int __init NCR_700_init(void)
2105 {
2106 	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2107 	if(!NCR_700_transport_template)
2108 		return -ENODEV;
2109 	return 0;
2110 }
2111 
NCR_700_exit(void)2112 static void __exit NCR_700_exit(void)
2113 {
2114 	spi_release_transport(NCR_700_transport_template);
2115 }
2116 
2117 module_init(NCR_700_init);
2118 module_exit(NCR_700_exit);
2119 
2120