xref: /linux/drivers/scsi/sym53c8xx_2/sym_glue.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
4  * of PCI-SCSI IO processors.
5  *
6  * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
7  * Copyright (c) 2003-2005  Matthew Wilcox <matthew@wil.cx>
8  *
9  * This driver is derived from the Linux sym53c8xx driver.
10  * Copyright (C) 1998-2000  Gerard Roudier
11  *
12  * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
13  * a port of the FreeBSD ncr driver to Linux-1.2.13.
14  *
15  * The original ncr driver has been written for 386bsd and FreeBSD by
16  *         Wolfgang Stanglmeier        <wolf@cologne.de>
17  *         Stefan Esser                <se@mi.Uni-Koeln.de>
18  * Copyright (C) 1994  Wolfgang Stanglmeier
19  *
20  * Other major contributions:
21  *
22  * NVRAM detection and reading.
23  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
24  *
25  *-----------------------------------------------------------------------------
26  */
27 #include <linux/ctype.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_transport.h>
36 
37 #include "sym_glue.h"
38 #include "sym_nvram.h"
39 
40 #define NAME53C		"sym53c"
41 #define NAME53C8XX	"sym53c8xx"
42 
43 struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP;
44 unsigned int sym_debug_flags = 0;
45 
46 static char *excl_string;
47 static char *safe_string;
48 module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0);
49 module_param_named(burst, sym_driver_setup.burst_order, byte, 0);
50 module_param_named(led, sym_driver_setup.scsi_led, byte, 0);
51 module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0);
52 module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0);
53 module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0);
54 module_param_named(hostid, sym_driver_setup.host_id, byte, 0);
55 module_param_named(verb, sym_driver_setup.verbose, byte, 0);
56 module_param_named(debug, sym_debug_flags, uint, 0);
57 module_param_named(settle, sym_driver_setup.settle_delay, byte, 0);
58 module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0);
59 module_param_named(excl, excl_string, charp, 0);
60 module_param_named(safe, safe_string, charp, 0);
61 
62 MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default");
63 MODULE_PARM_DESC(burst, "Maximum burst.  0 to disable, 255 to read from registers");
64 MODULE_PARM_DESC(led, "Set to 1 to enable LED support");
65 MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3");
66 MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole");
67 MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error");
68 MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters");
69 MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive");
70 MODULE_PARM_DESC(debug, "Set bits to enable debugging");
71 MODULE_PARM_DESC(settle, "Settle delay in seconds.  Default 3");
72 MODULE_PARM_DESC(nvram, "Option currently not used");
73 MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached");
74 MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\"");
75 
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(SYM_VERSION);
78 MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>");
79 MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters");
80 
81 static void sym2_setup_params(void)
82 {
83 	char *p = excl_string;
84 	int xi = 0;
85 
86 	while (p && (xi < 8)) {
87 		char *next_p;
88 		int val = (int) simple_strtoul(p, &next_p, 0);
89 		sym_driver_setup.excludes[xi++] = val;
90 		p = next_p;
91 	}
92 
93 	if (safe_string) {
94 		if (*safe_string == 'y') {
95 			sym_driver_setup.max_tag = 0;
96 			sym_driver_setup.burst_order = 0;
97 			sym_driver_setup.scsi_led = 0;
98 			sym_driver_setup.scsi_diff = 1;
99 			sym_driver_setup.irq_mode = 0;
100 			sym_driver_setup.scsi_bus_check = 2;
101 			sym_driver_setup.host_id = 7;
102 			sym_driver_setup.verbose = 2;
103 			sym_driver_setup.settle_delay = 10;
104 			sym_driver_setup.use_nvram = 1;
105 		} else if (*safe_string != 'n') {
106 			printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s"
107 					" passed to safe option", safe_string);
108 		}
109 	}
110 }
111 
112 static struct scsi_transport_template *sym2_transport_template = NULL;
113 
114 /*
115  *  Driver private area in the SCSI command structure.
116  */
117 struct sym_ucmd {		/* Override the SCSI pointer structure */
118 	struct completion *eh_done;		/* SCSI error handling */
119 };
120 
121 #define SYM_UCMD_PTR(cmd)  ((struct sym_ucmd *)(&(cmd)->SCp))
122 #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
123 
124 /*
125  *  Complete a pending CAM CCB.
126  */
127 void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
128 {
129 	struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
130 	BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
131 
132 	if (ucmd->eh_done)
133 		complete(ucmd->eh_done);
134 
135 	scsi_dma_unmap(cmd);
136 	cmd->scsi_done(cmd);
137 }
138 
139 /*
140  *  Tell the SCSI layer about a BUS RESET.
141  */
142 void sym_xpt_async_bus_reset(struct sym_hcb *np)
143 {
144 	printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np));
145 	np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ;
146 	np->s.settle_time_valid = 1;
147 	if (sym_verbose >= 2)
148 		printf_info("%s: command processing suspended for %d seconds\n",
149 			    sym_name(np), sym_driver_setup.settle_delay);
150 }
151 
152 /*
153  *  Choose the more appropriate CAM status if
154  *  the IO encountered an extended error.
155  */
156 static int sym_xerr_cam_status(int cam_status, int x_status)
157 {
158 	if (x_status) {
159 		if (x_status & XE_PARITY_ERR)
160 			cam_status = DID_PARITY;
161 		else
162 			cam_status = DID_ERROR;
163 	}
164 	return cam_status;
165 }
166 
167 /*
168  *  Build CAM result for a failed or auto-sensed IO.
169  */
170 void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
171 {
172 	struct scsi_cmnd *cmd = cp->cmd;
173 	u_int cam_status, scsi_status;
174 
175 	cam_status  = DID_OK;
176 	scsi_status = cp->ssss_status;
177 
178 	if (cp->host_flags & HF_SENSE) {
179 		scsi_status = cp->sv_scsi_status;
180 		resid = cp->sv_resid;
181 		if (sym_verbose && cp->sv_xerr_status)
182 			sym_print_xerr(cmd, cp->sv_xerr_status);
183 		if (cp->host_status == HS_COMPLETE &&
184 		    cp->ssss_status == S_GOOD &&
185 		    cp->xerr_status == 0) {
186 			cam_status = sym_xerr_cam_status(DID_OK,
187 							 cp->sv_xerr_status);
188 			/*
189 			 *  Bounce back the sense data to user.
190 			 */
191 			memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
192 			memcpy(cmd->sense_buffer, cp->sns_bbuf,
193 			       min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN));
194 #if 0
195 			/*
196 			 *  If the device reports a UNIT ATTENTION condition
197 			 *  due to a RESET condition, we should consider all
198 			 *  disconnect CCBs for this unit as aborted.
199 			 */
200 			if (1) {
201 				u_char *p;
202 				p  = (u_char *) cmd->sense_data;
203 				if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
204 					sym_clear_tasks(np, DID_ABORT,
205 							cp->target,cp->lun, -1);
206 			}
207 #endif
208 		} else {
209 			/*
210 			 * Error return from our internal request sense.  This
211 			 * is bad: we must clear the contingent allegiance
212 			 * condition otherwise the device will always return
213 			 * BUSY.  Use a big stick.
214 			 */
215 			sym_reset_scsi_target(np, cmd->device->id);
216 			cam_status = DID_ERROR;
217 		}
218 	} else if (cp->host_status == HS_COMPLETE) 	/* Bad SCSI status */
219 		cam_status = DID_OK;
220 	else if (cp->host_status == HS_SEL_TIMEOUT)	/* Selection timeout */
221 		cam_status = DID_NO_CONNECT;
222 	else if (cp->host_status == HS_UNEXPECTED)	/* Unexpected BUS FREE*/
223 		cam_status = DID_ERROR;
224 	else {						/* Extended error */
225 		if (sym_verbose) {
226 			sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n",
227 				cp->host_status, cp->ssss_status,
228 				cp->xerr_status);
229 		}
230 		/*
231 		 *  Set the most appropriate value for CAM status.
232 		 */
233 		cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
234 	}
235 	scsi_set_resid(cmd, resid);
236 	cmd->result = (cam_status << 16) | scsi_status;
237 }
238 
239 static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
240 {
241 	int segment;
242 	int use_sg;
243 
244 	cp->data_len = 0;
245 
246 	use_sg = scsi_dma_map(cmd);
247 	if (use_sg > 0) {
248 		struct scatterlist *sg;
249 		struct sym_tcb *tp = &np->target[cp->target];
250 		struct sym_tblmove *data;
251 
252 		if (use_sg > SYM_CONF_MAX_SG) {
253 			scsi_dma_unmap(cmd);
254 			return -1;
255 		}
256 
257 		data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
258 
259 		scsi_for_each_sg(cmd, sg, use_sg, segment) {
260 			dma_addr_t baddr = sg_dma_address(sg);
261 			unsigned int len = sg_dma_len(sg);
262 
263 			if ((len & 1) && (tp->head.wval & EWS)) {
264 				len++;
265 				cp->odd_byte_adjustment++;
266 			}
267 
268 			sym_build_sge(np, &data[segment], baddr, len);
269 			cp->data_len += len;
270 		}
271 	} else {
272 		segment = -2;
273 	}
274 
275 	return segment;
276 }
277 
278 /*
279  *  Queue a SCSI command.
280  */
281 static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd)
282 {
283 	struct scsi_device *sdev = cmd->device;
284 	struct sym_tcb *tp;
285 	struct sym_lcb *lp;
286 	struct sym_ccb *cp;
287 	int	order;
288 
289 	/*
290 	 *  Retrieve the target descriptor.
291 	 */
292 	tp = &np->target[sdev->id];
293 
294 	/*
295 	 *  Select tagged/untagged.
296 	 */
297 	lp = sym_lp(tp, sdev->lun);
298 	order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0;
299 
300 	/*
301 	 *  Queue the SCSI IO.
302 	 */
303 	cp = sym_get_ccb(np, cmd, order);
304 	if (!cp)
305 		return 1;	/* Means resource shortage */
306 	sym_queue_scsiio(np, cmd, cp);
307 	return 0;
308 }
309 
310 /*
311  *  Setup buffers and pointers that address the CDB.
312  */
313 static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
314 {
315 	memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len);
316 
317 	cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]);
318 	cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
319 
320 	return 0;
321 }
322 
323 /*
324  *  Setup pointers that address the data and start the I/O.
325  */
326 int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
327 {
328 	u32 lastp, goalp;
329 	int dir;
330 
331 	/*
332 	 *  Build the CDB.
333 	 */
334 	if (sym_setup_cdb(np, cmd, cp))
335 		goto out_abort;
336 
337 	/*
338 	 *  No direction means no data.
339 	 */
340 	dir = cmd->sc_data_direction;
341 	if (dir != DMA_NONE) {
342 		cp->segments = sym_scatter(np, cp, cmd);
343 		if (cp->segments < 0) {
344 			sym_set_cam_status(cmd, DID_ERROR);
345 			goto out_abort;
346 		}
347 
348 		/*
349 		 *  No segments means no data.
350 		 */
351 		if (!cp->segments)
352 			dir = DMA_NONE;
353 	} else {
354 		cp->data_len = 0;
355 		cp->segments = 0;
356 	}
357 
358 	/*
359 	 *  Set the data pointer.
360 	 */
361 	switch (dir) {
362 	case DMA_BIDIRECTIONAL:
363 		scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command");
364 		sym_set_cam_status(cmd, DID_ERROR);
365 		goto out_abort;
366 	case DMA_TO_DEVICE:
367 		goalp = SCRIPTA_BA(np, data_out2) + 8;
368 		lastp = goalp - 8 - (cp->segments * (2*4));
369 		break;
370 	case DMA_FROM_DEVICE:
371 		cp->host_flags |= HF_DATA_IN;
372 		goalp = SCRIPTA_BA(np, data_in2) + 8;
373 		lastp = goalp - 8 - (cp->segments * (2*4));
374 		break;
375 	case DMA_NONE:
376 	default:
377 		lastp = goalp = SCRIPTB_BA(np, no_data);
378 		break;
379 	}
380 
381 	/*
382 	 *  Set all pointers values needed by SCRIPTS.
383 	 */
384 	cp->phys.head.lastp = cpu_to_scr(lastp);
385 	cp->phys.head.savep = cpu_to_scr(lastp);
386 	cp->startp	    = cp->phys.head.savep;
387 	cp->goalp	    = cpu_to_scr(goalp);
388 
389 	/*
390 	 *  When `#ifed 1', the code below makes the driver
391 	 *  panic on the first attempt to write to a SCSI device.
392 	 *  It is the first test we want to do after a driver
393 	 *  change that does not seem obviously safe. :)
394 	 */
395 #if 0
396 	switch (cp->cdb_buf[0]) {
397 	case 0x0A: case 0x2A: case 0xAA:
398 		panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
399 		break;
400 	default:
401 		break;
402 	}
403 #endif
404 
405 	/*
406 	 *	activate this job.
407 	 */
408 	sym_put_start_queue(np, cp);
409 	return 0;
410 
411 out_abort:
412 	sym_free_ccb(np, cp);
413 	sym_xpt_done(np, cmd);
414 	return 0;
415 }
416 
417 
418 /*
419  *  timer daemon.
420  *
421  *  Misused to keep the driver running when
422  *  interrupts are not configured correctly.
423  */
424 static void sym_timer(struct sym_hcb *np)
425 {
426 	unsigned long thistime = jiffies;
427 
428 	/*
429 	 *  Restart the timer.
430 	 */
431 	np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL;
432 	add_timer(&np->s.timer);
433 
434 	/*
435 	 *  If we are resetting the ncr, wait for settle_time before
436 	 *  clearing it. Then command processing will be resumed.
437 	 */
438 	if (np->s.settle_time_valid) {
439 		if (time_before_eq(np->s.settle_time, thistime)) {
440 			if (sym_verbose >= 2 )
441 				printk("%s: command processing resumed\n",
442 				       sym_name(np));
443 			np->s.settle_time_valid = 0;
444 		}
445 		return;
446 	}
447 
448 	/*
449 	 *	Nothing to do for now, but that may come.
450 	 */
451 	if (np->s.lasttime + 4*HZ < thistime) {
452 		np->s.lasttime = thistime;
453 	}
454 
455 #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS
456 	/*
457 	 *  Some way-broken PCI bridges may lead to
458 	 *  completions being lost when the clearing
459 	 *  of the INTFLY flag by the CPU occurs
460 	 *  concurrently with the chip raising this flag.
461 	 *  If this ever happen, lost completions will
462 	 * be reaped here.
463 	 */
464 	sym_wakeup_done(np);
465 #endif
466 }
467 
468 
469 /*
470  *  PCI BUS error handler.
471  */
472 void sym_log_bus_error(struct Scsi_Host *shost)
473 {
474 	struct sym_data *sym_data = shost_priv(shost);
475 	struct pci_dev *pdev = sym_data->pdev;
476 	unsigned short pci_sts;
477 	pci_read_config_word(pdev, PCI_STATUS, &pci_sts);
478 	if (pci_sts & 0xf900) {
479 		pci_write_config_word(pdev, PCI_STATUS, pci_sts);
480 		shost_printk(KERN_WARNING, shost,
481 			"PCI bus error: status = 0x%04x\n", pci_sts & 0xf900);
482 	}
483 }
484 
485 /*
486  * queuecommand method.  Entered with the host adapter lock held and
487  * interrupts disabled.
488  */
489 static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
490 					void (*done)(struct scsi_cmnd *))
491 {
492 	struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
493 	struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
494 	int sts = 0;
495 
496 	cmd->scsi_done = done;
497 	memset(ucp, 0, sizeof(*ucp));
498 
499 	/*
500 	 *  Shorten our settle_time if needed for
501 	 *  this command not to time out.
502 	 */
503 	if (np->s.settle_time_valid && cmd->request->timeout) {
504 		unsigned long tlimit = jiffies + cmd->request->timeout;
505 		tlimit -= SYM_CONF_TIMER_INTERVAL*2;
506 		if (time_after(np->s.settle_time, tlimit)) {
507 			np->s.settle_time = tlimit;
508 		}
509 	}
510 
511 	if (np->s.settle_time_valid)
512 		return SCSI_MLQUEUE_HOST_BUSY;
513 
514 	sts = sym_queue_command(np, cmd);
515 	if (sts)
516 		return SCSI_MLQUEUE_HOST_BUSY;
517 	return 0;
518 }
519 
520 static DEF_SCSI_QCMD(sym53c8xx_queue_command)
521 
522 /*
523  *  Linux entry point of the interrupt handler.
524  */
525 static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
526 {
527 	struct Scsi_Host *shost = dev_id;
528 	struct sym_data *sym_data = shost_priv(shost);
529 	irqreturn_t result;
530 
531 	/* Avoid spinloop trying to handle interrupts on frozen device */
532 	if (pci_channel_offline(sym_data->pdev))
533 		return IRQ_NONE;
534 
535 	if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("[");
536 
537 	spin_lock(shost->host_lock);
538 	result = sym_interrupt(shost);
539 	spin_unlock(shost->host_lock);
540 
541 	if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n");
542 
543 	return result;
544 }
545 
546 /*
547  *  Linux entry point of the timer handler
548  */
549 static void sym53c8xx_timer(struct timer_list *t)
550 {
551 	struct sym_hcb *np = from_timer(np, t, s.timer);
552 	unsigned long flags;
553 
554 	spin_lock_irqsave(np->s.host->host_lock, flags);
555 	sym_timer(np);
556 	spin_unlock_irqrestore(np->s.host->host_lock, flags);
557 }
558 
559 
560 /*
561  *  What the eh thread wants us to perform.
562  */
563 #define SYM_EH_ABORT		0
564 #define SYM_EH_DEVICE_RESET	1
565 #define SYM_EH_BUS_RESET	2
566 #define SYM_EH_HOST_RESET	3
567 
568 /*
569  *  Generic method for our eh processing.
570  *  The 'op' argument tells what we have to do.
571  */
572 static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
573 {
574 	struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
575 	struct Scsi_Host *shost = cmd->device->host;
576 	struct sym_data *sym_data = shost_priv(shost);
577 	struct pci_dev *pdev = sym_data->pdev;
578 	struct sym_hcb *np = sym_data->ncb;
579 	SYM_QUEHEAD *qp;
580 	int cmd_queued = 0;
581 	int sts = -1;
582 	struct completion eh_done;
583 
584 	scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname);
585 
586 	/* We may be in an error condition because the PCI bus
587 	 * went down. In this case, we need to wait until the
588 	 * PCI bus is reset, the card is reset, and only then
589 	 * proceed with the scsi error recovery.  There's no
590 	 * point in hurrying; take a leisurely wait.
591 	 */
592 #define WAIT_FOR_PCI_RECOVERY	35
593 	if (pci_channel_offline(pdev)) {
594 		int finished_reset = 0;
595 		init_completion(&eh_done);
596 		spin_lock_irq(shost->host_lock);
597 		/* Make sure we didn't race */
598 		if (pci_channel_offline(pdev)) {
599 			BUG_ON(sym_data->io_reset);
600 			sym_data->io_reset = &eh_done;
601 		} else {
602 			finished_reset = 1;
603 		}
604 		spin_unlock_irq(shost->host_lock);
605 		if (!finished_reset)
606 			finished_reset = wait_for_completion_timeout
607 						(sym_data->io_reset,
608 						WAIT_FOR_PCI_RECOVERY*HZ);
609 		spin_lock_irq(shost->host_lock);
610 		sym_data->io_reset = NULL;
611 		spin_unlock_irq(shost->host_lock);
612 		if (!finished_reset)
613 			return SCSI_FAILED;
614 	}
615 
616 	spin_lock_irq(shost->host_lock);
617 	/* This one is queued in some place -> to wait for completion */
618 	FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
619 		struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
620 		if (cp->cmd == cmd) {
621 			cmd_queued = 1;
622 			break;
623 		}
624 	}
625 
626 	/* Try to proceed the operation we have been asked for */
627 	sts = -1;
628 	switch(op) {
629 	case SYM_EH_ABORT:
630 		sts = sym_abort_scsiio(np, cmd, 1);
631 		break;
632 	case SYM_EH_DEVICE_RESET:
633 		sts = sym_reset_scsi_target(np, cmd->device->id);
634 		break;
635 	case SYM_EH_BUS_RESET:
636 		sym_reset_scsi_bus(np, 1);
637 		sts = 0;
638 		break;
639 	case SYM_EH_HOST_RESET:
640 		sym_reset_scsi_bus(np, 0);
641 		sym_start_up(shost, 1);
642 		sts = 0;
643 		break;
644 	default:
645 		break;
646 	}
647 
648 	/* On error, restore everything and cross fingers :) */
649 	if (sts)
650 		cmd_queued = 0;
651 
652 	if (cmd_queued) {
653 		init_completion(&eh_done);
654 		ucmd->eh_done = &eh_done;
655 		spin_unlock_irq(shost->host_lock);
656 		if (!wait_for_completion_timeout(&eh_done, 5*HZ)) {
657 			ucmd->eh_done = NULL;
658 			sts = -2;
659 		}
660 	} else {
661 		spin_unlock_irq(shost->host_lock);
662 	}
663 
664 	dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
665 			sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
666 	return sts ? SCSI_FAILED : SCSI_SUCCESS;
667 }
668 
669 
670 /*
671  * Error handlers called from the eh thread (one thread per HBA).
672  */
673 static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
674 {
675 	return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
676 }
677 
678 static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
679 {
680 	return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
681 }
682 
683 static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
684 {
685 	return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
686 }
687 
688 static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
689 {
690 	return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
691 }
692 
693 /*
694  *  Tune device queuing depth, according to various limits.
695  */
696 static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
697 {
698 	struct sym_lcb *lp = sym_lp(tp, lun);
699 	u_short	oldtags;
700 
701 	if (!lp)
702 		return;
703 
704 	oldtags = lp->s.reqtags;
705 
706 	if (reqtags > lp->s.scdev_depth)
707 		reqtags = lp->s.scdev_depth;
708 
709 	lp->s.reqtags     = reqtags;
710 
711 	if (reqtags != oldtags) {
712 		dev_info(&tp->starget->dev,
713 		         "tagged command queuing %s, command queue depth %d.\n",
714 		          lp->s.reqtags ? "enabled" : "disabled", reqtags);
715 	}
716 }
717 
718 static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
719 {
720 	struct sym_hcb *np = sym_get_hcb(sdev->host);
721 	struct sym_tcb *tp = &np->target[sdev->id];
722 	struct sym_lcb *lp;
723 	unsigned long flags;
724 	int error;
725 
726 	if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
727 		return -ENXIO;
728 
729 	spin_lock_irqsave(np->s.host->host_lock, flags);
730 
731 	/*
732 	 * Fail the device init if the device is flagged NOSCAN at BOOT in
733 	 * the NVRAM.  This may speed up boot and maintain coherency with
734 	 * BIOS device numbering.  Clearing the flag allows the user to
735 	 * rescan skipped devices later.  We also return an error for
736 	 * devices not flagged for SCAN LUNS in the NVRAM since some single
737 	 * lun devices behave badly when asked for a non zero LUN.
738 	 */
739 
740 	if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
741 		tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
742 		starget_printk(KERN_INFO, sdev->sdev_target,
743 				"Scan at boot disabled in NVRAM\n");
744 		error = -ENXIO;
745 		goto out;
746 	}
747 
748 	if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
749 		if (sdev->lun != 0) {
750 			error = -ENXIO;
751 			goto out;
752 		}
753 		starget_printk(KERN_INFO, sdev->sdev_target,
754 				"Multiple LUNs disabled in NVRAM\n");
755 	}
756 
757 	lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
758 	if (!lp) {
759 		error = -ENOMEM;
760 		goto out;
761 	}
762 	if (tp->nlcb == 1)
763 		tp->starget = sdev->sdev_target;
764 
765 	spi_min_period(tp->starget) = tp->usr_period;
766 	spi_max_width(tp->starget) = tp->usr_width;
767 
768 	error = 0;
769 out:
770 	spin_unlock_irqrestore(np->s.host->host_lock, flags);
771 
772 	return error;
773 }
774 
775 /*
776  * Linux entry point for device queue sizing.
777  */
778 static int sym53c8xx_slave_configure(struct scsi_device *sdev)
779 {
780 	struct sym_hcb *np = sym_get_hcb(sdev->host);
781 	struct sym_tcb *tp = &np->target[sdev->id];
782 	struct sym_lcb *lp = sym_lp(tp, sdev->lun);
783 	int reqtags, depth_to_use;
784 
785 	/*
786 	 *  Get user flags.
787 	 */
788 	lp->curr_flags = lp->user_flags;
789 
790 	/*
791 	 *  Select queue depth from driver setup.
792 	 *  Do not use more than configured by user.
793 	 *  Use at least 1.
794 	 *  Do not use more than our maximum.
795 	 */
796 	reqtags = sym_driver_setup.max_tag;
797 	if (reqtags > tp->usrtags)
798 		reqtags = tp->usrtags;
799 	if (!sdev->tagged_supported)
800 		reqtags = 0;
801 	if (reqtags > SYM_CONF_MAX_TAG)
802 		reqtags = SYM_CONF_MAX_TAG;
803 	depth_to_use = reqtags ? reqtags : 1;
804 	scsi_change_queue_depth(sdev, depth_to_use);
805 	lp->s.scdev_depth = depth_to_use;
806 	sym_tune_dev_queuing(tp, sdev->lun, reqtags);
807 
808 	if (!spi_initial_dv(sdev->sdev_target))
809 		spi_dv_device(sdev);
810 
811 	return 0;
812 }
813 
814 static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
815 {
816 	struct sym_hcb *np = sym_get_hcb(sdev->host);
817 	struct sym_tcb *tp = &np->target[sdev->id];
818 	struct sym_lcb *lp = sym_lp(tp, sdev->lun);
819 	unsigned long flags;
820 
821 	/* if slave_alloc returned before allocating a sym_lcb, return */
822 	if (!lp)
823 		return;
824 
825 	spin_lock_irqsave(np->s.host->host_lock, flags);
826 
827 	if (lp->busy_itlq || lp->busy_itl) {
828 		/*
829 		 * This really shouldn't happen, but we can't return an error
830 		 * so let's try to stop all on-going I/O.
831 		 */
832 		starget_printk(KERN_WARNING, tp->starget,
833 			       "Removing busy LCB (%d)\n", (u8)sdev->lun);
834 		sym_reset_scsi_bus(np, 1);
835 	}
836 
837 	if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
838 		/*
839 		 * It was the last unit for this target.
840 		 */
841 		tp->head.sval        = 0;
842 		tp->head.wval        = np->rv_scntl3;
843 		tp->head.uval        = 0;
844 		tp->tgoal.check_nego = 1;
845 		tp->starget	     = NULL;
846 	}
847 
848 	spin_unlock_irqrestore(np->s.host->host_lock, flags);
849 }
850 
851 /*
852  *  Linux entry point for info() function
853  */
854 static const char *sym53c8xx_info (struct Scsi_Host *host)
855 {
856 	return SYM_DRIVER_NAME;
857 }
858 
859 
860 #ifdef SYM_LINUX_PROC_INFO_SUPPORT
861 /*
862  *  Proc file system stuff
863  *
864  *  A read operation returns adapter information.
865  *  A write operation is a control command.
866  *  The string is parsed in the driver code and the command is passed
867  *  to the sym_usercmd() function.
868  */
869 
870 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT
871 
872 struct	sym_usrcmd {
873 	u_long	target;
874 	u_long	lun;
875 	u_long	data;
876 	u_long	cmd;
877 };
878 
879 #define UC_SETSYNC      10
880 #define UC_SETTAGS	11
881 #define UC_SETDEBUG	12
882 #define UC_SETWIDE	14
883 #define UC_SETFLAG	15
884 #define UC_SETVERBOSE	17
885 #define UC_RESETDEV	18
886 #define UC_CLEARDEV	19
887 
888 static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
889 {
890 	struct sym_tcb *tp;
891 	int t, l;
892 
893 	switch (uc->cmd) {
894 	case 0: return;
895 
896 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
897 	case UC_SETDEBUG:
898 		sym_debug_flags = uc->data;
899 		break;
900 #endif
901 	case UC_SETVERBOSE:
902 		np->verbose = uc->data;
903 		break;
904 	default:
905 		/*
906 		 * We assume that other commands apply to targets.
907 		 * This should always be the case and avoid the below
908 		 * 4 lines to be repeated 6 times.
909 		 */
910 		for (t = 0; t < SYM_CONF_MAX_TARGET; t++) {
911 			if (!((uc->target >> t) & 1))
912 				continue;
913 			tp = &np->target[t];
914 			if (!tp->nlcb)
915 				continue;
916 
917 			switch (uc->cmd) {
918 
919 			case UC_SETSYNC:
920 				if (!uc->data || uc->data >= 255) {
921 					tp->tgoal.iu = tp->tgoal.dt =
922 						tp->tgoal.qas = 0;
923 					tp->tgoal.offset = 0;
924 				} else if (uc->data <= 9 && np->minsync_dt) {
925 					if (uc->data < np->minsync_dt)
926 						uc->data = np->minsync_dt;
927 					tp->tgoal.iu = tp->tgoal.dt =
928 						tp->tgoal.qas = 1;
929 					tp->tgoal.width = 1;
930 					tp->tgoal.period = uc->data;
931 					tp->tgoal.offset = np->maxoffs_dt;
932 				} else {
933 					if (uc->data < np->minsync)
934 						uc->data = np->minsync;
935 					tp->tgoal.iu = tp->tgoal.dt =
936 						tp->tgoal.qas = 0;
937 					tp->tgoal.period = uc->data;
938 					tp->tgoal.offset = np->maxoffs;
939 				}
940 				tp->tgoal.check_nego = 1;
941 				break;
942 			case UC_SETWIDE:
943 				tp->tgoal.width = uc->data ? 1 : 0;
944 				tp->tgoal.check_nego = 1;
945 				break;
946 			case UC_SETTAGS:
947 				for (l = 0; l < SYM_CONF_MAX_LUN; l++)
948 					sym_tune_dev_queuing(tp, l, uc->data);
949 				break;
950 			case UC_RESETDEV:
951 				tp->to_reset = 1;
952 				np->istat_sem = SEM;
953 				OUTB(np, nc_istat, SIGP|SEM);
954 				break;
955 			case UC_CLEARDEV:
956 				for (l = 0; l < SYM_CONF_MAX_LUN; l++) {
957 					struct sym_lcb *lp = sym_lp(tp, l);
958 					if (lp) lp->to_clear = 1;
959 				}
960 				np->istat_sem = SEM;
961 				OUTB(np, nc_istat, SIGP|SEM);
962 				break;
963 			case UC_SETFLAG:
964 				tp->usrflags = uc->data;
965 				break;
966 			}
967 		}
968 		break;
969 	}
970 }
971 
972 static int sym_skip_spaces(char *ptr, int len)
973 {
974 	int cnt, c;
975 
976 	for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--);
977 
978 	return (len - cnt);
979 }
980 
981 static int get_int_arg(char *ptr, int len, u_long *pv)
982 {
983 	char *end;
984 
985 	*pv = simple_strtoul(ptr, &end, 10);
986 	return (end - ptr);
987 }
988 
989 static int is_keyword(char *ptr, int len, char *verb)
990 {
991 	int verb_len = strlen(verb);
992 
993 	if (len >= verb_len && !memcmp(verb, ptr, verb_len))
994 		return verb_len;
995 	else
996 		return 0;
997 }
998 
999 #define SKIP_SPACES(ptr, len)						\
1000 	if ((arg_len = sym_skip_spaces(ptr, len)) < 1)			\
1001 		return -EINVAL;						\
1002 	ptr += arg_len; len -= arg_len;
1003 
1004 #define GET_INT_ARG(ptr, len, v)					\
1005 	if (!(arg_len = get_int_arg(ptr, len, &(v))))			\
1006 		return -EINVAL;						\
1007 	ptr += arg_len; len -= arg_len;
1008 
1009 
1010 /*
1011  * Parse a control command
1012  */
1013 
1014 static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length)
1015 {
1016 	struct sym_hcb *np = sym_get_hcb(shost);
1017 	char *ptr	= buffer;
1018 	int len		= length;
1019 	struct sym_usrcmd cmd, *uc = &cmd;
1020 	int		arg_len;
1021 	u_long 		target;
1022 
1023 	memset(uc, 0, sizeof(*uc));
1024 
1025 	if (len > 0 && ptr[len-1] == '\n')
1026 		--len;
1027 
1028 	if	((arg_len = is_keyword(ptr, len, "setsync")) != 0)
1029 		uc->cmd = UC_SETSYNC;
1030 	else if	((arg_len = is_keyword(ptr, len, "settags")) != 0)
1031 		uc->cmd = UC_SETTAGS;
1032 	else if	((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
1033 		uc->cmd = UC_SETVERBOSE;
1034 	else if	((arg_len = is_keyword(ptr, len, "setwide")) != 0)
1035 		uc->cmd = UC_SETWIDE;
1036 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
1037 	else if	((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
1038 		uc->cmd = UC_SETDEBUG;
1039 #endif
1040 	else if	((arg_len = is_keyword(ptr, len, "setflag")) != 0)
1041 		uc->cmd = UC_SETFLAG;
1042 	else if	((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
1043 		uc->cmd = UC_RESETDEV;
1044 	else if	((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
1045 		uc->cmd = UC_CLEARDEV;
1046 	else
1047 		arg_len = 0;
1048 
1049 #ifdef DEBUG_PROC_INFO
1050 printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
1051 #endif
1052 
1053 	if (!arg_len)
1054 		return -EINVAL;
1055 	ptr += arg_len; len -= arg_len;
1056 
1057 	switch(uc->cmd) {
1058 	case UC_SETSYNC:
1059 	case UC_SETTAGS:
1060 	case UC_SETWIDE:
1061 	case UC_SETFLAG:
1062 	case UC_RESETDEV:
1063 	case UC_CLEARDEV:
1064 		SKIP_SPACES(ptr, len);
1065 		if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
1066 			ptr += arg_len; len -= arg_len;
1067 			uc->target = ~0;
1068 		} else {
1069 			GET_INT_ARG(ptr, len, target);
1070 			uc->target = (1<<target);
1071 #ifdef DEBUG_PROC_INFO
1072 printk("sym_user_command: target=%ld\n", target);
1073 #endif
1074 		}
1075 		break;
1076 	}
1077 
1078 	switch(uc->cmd) {
1079 	case UC_SETVERBOSE:
1080 	case UC_SETSYNC:
1081 	case UC_SETTAGS:
1082 	case UC_SETWIDE:
1083 		SKIP_SPACES(ptr, len);
1084 		GET_INT_ARG(ptr, len, uc->data);
1085 #ifdef DEBUG_PROC_INFO
1086 printk("sym_user_command: data=%ld\n", uc->data);
1087 #endif
1088 		break;
1089 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
1090 	case UC_SETDEBUG:
1091 		while (len > 0) {
1092 			SKIP_SPACES(ptr, len);
1093 			if	((arg_len = is_keyword(ptr, len, "alloc")))
1094 				uc->data |= DEBUG_ALLOC;
1095 			else if	((arg_len = is_keyword(ptr, len, "phase")))
1096 				uc->data |= DEBUG_PHASE;
1097 			else if	((arg_len = is_keyword(ptr, len, "queue")))
1098 				uc->data |= DEBUG_QUEUE;
1099 			else if	((arg_len = is_keyword(ptr, len, "result")))
1100 				uc->data |= DEBUG_RESULT;
1101 			else if	((arg_len = is_keyword(ptr, len, "scatter")))
1102 				uc->data |= DEBUG_SCATTER;
1103 			else if	((arg_len = is_keyword(ptr, len, "script")))
1104 				uc->data |= DEBUG_SCRIPT;
1105 			else if	((arg_len = is_keyword(ptr, len, "tiny")))
1106 				uc->data |= DEBUG_TINY;
1107 			else if	((arg_len = is_keyword(ptr, len, "timing")))
1108 				uc->data |= DEBUG_TIMING;
1109 			else if	((arg_len = is_keyword(ptr, len, "nego")))
1110 				uc->data |= DEBUG_NEGO;
1111 			else if	((arg_len = is_keyword(ptr, len, "tags")))
1112 				uc->data |= DEBUG_TAGS;
1113 			else if	((arg_len = is_keyword(ptr, len, "pointer")))
1114 				uc->data |= DEBUG_POINTER;
1115 			else
1116 				return -EINVAL;
1117 			ptr += arg_len; len -= arg_len;
1118 		}
1119 #ifdef DEBUG_PROC_INFO
1120 printk("sym_user_command: data=%ld\n", uc->data);
1121 #endif
1122 		break;
1123 #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */
1124 	case UC_SETFLAG:
1125 		while (len > 0) {
1126 			SKIP_SPACES(ptr, len);
1127 			if	((arg_len = is_keyword(ptr, len, "no_disc")))
1128 				uc->data &= ~SYM_DISC_ENABLED;
1129 			else
1130 				return -EINVAL;
1131 			ptr += arg_len; len -= arg_len;
1132 		}
1133 		break;
1134 	default:
1135 		break;
1136 	}
1137 
1138 	if (len)
1139 		return -EINVAL;
1140 	else {
1141 		unsigned long flags;
1142 
1143 		spin_lock_irqsave(shost->host_lock, flags);
1144 		sym_exec_user_command(np, uc);
1145 		spin_unlock_irqrestore(shost->host_lock, flags);
1146 	}
1147 	return length;
1148 }
1149 
1150 #endif	/* SYM_LINUX_USER_COMMAND_SUPPORT */
1151 
1152 
1153 /*
1154  *  Copy formatted information into the input buffer.
1155  */
1156 static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost)
1157 {
1158 #ifdef SYM_LINUX_USER_INFO_SUPPORT
1159 	struct sym_data *sym_data = shost_priv(shost);
1160 	struct pci_dev *pdev = sym_data->pdev;
1161 	struct sym_hcb *np = sym_data->ncb;
1162 
1163 	seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, "
1164 		 "revision id 0x%x\n", np->s.chip_name,
1165 		 pdev->device, pdev->revision);
1166 	seq_printf(m, "At PCI address %s, IRQ %u\n",
1167 			 pci_name(pdev), pdev->irq);
1168 	seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n",
1169 		 (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
1170 		 np->maxwide ? "Wide" : "Narrow",
1171 		 np->minsync_dt ? ", DT capable" : "");
1172 
1173 	seq_printf(m, "Max. started commands %d, "
1174 		 "max. commands per LUN %d\n",
1175 		 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
1176 
1177 	return 0;
1178 #else
1179 	return -EINVAL;
1180 #endif /* SYM_LINUX_USER_INFO_SUPPORT */
1181 }
1182 
1183 #endif /* SYM_LINUX_PROC_INFO_SUPPORT */
1184 
1185 /*
1186  * Free resources claimed by sym_iomap_device().  Note that
1187  * sym_free_resources() should be used instead of this function after calling
1188  * sym_attach().
1189  */
1190 static void sym_iounmap_device(struct sym_device *device)
1191 {
1192 	if (device->s.ioaddr)
1193 		pci_iounmap(device->pdev, device->s.ioaddr);
1194 	if (device->s.ramaddr)
1195 		pci_iounmap(device->pdev, device->s.ramaddr);
1196 }
1197 
1198 /*
1199  *	Free controller resources.
1200  */
1201 static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
1202 		int do_free_irq)
1203 {
1204 	/*
1205 	 *  Free O/S specific resources.
1206 	 */
1207 	if (do_free_irq)
1208 		free_irq(pdev->irq, np->s.host);
1209 	if (np->s.ioaddr)
1210 		pci_iounmap(pdev, np->s.ioaddr);
1211 	if (np->s.ramaddr)
1212 		pci_iounmap(pdev, np->s.ramaddr);
1213 	/*
1214 	 *  Free O/S independent resources.
1215 	 */
1216 	sym_hcb_free(np);
1217 
1218 	sym_mfree_dma(np, sizeof(*np), "HCB");
1219 }
1220 
1221 /*
1222  *  Host attach and initialisations.
1223  *
1224  *  Allocate host data and ncb structure.
1225  *  Remap MMIO region.
1226  *  Do chip initialization.
1227  *  If all is OK, install interrupt handling and
1228  *  start the timer daemon.
1229  */
1230 static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
1231 				    struct sym_device *dev)
1232 {
1233 	struct sym_data *sym_data;
1234 	struct sym_hcb *np = NULL;
1235 	struct Scsi_Host *shost = NULL;
1236 	struct pci_dev *pdev = dev->pdev;
1237 	unsigned long flags;
1238 	struct sym_fw *fw;
1239 	int do_free_irq = 0;
1240 
1241 	printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n",
1242 		unit, dev->chip.name, pdev->revision, pci_name(pdev),
1243 		pdev->irq);
1244 
1245 	/*
1246 	 *  Get the firmware for this chip.
1247 	 */
1248 	fw = sym_find_firmware(&dev->chip);
1249 	if (!fw)
1250 		goto attach_failed;
1251 
1252 	shost = scsi_host_alloc(tpnt, sizeof(*sym_data));
1253 	if (!shost)
1254 		goto attach_failed;
1255 	sym_data = shost_priv(shost);
1256 
1257 	/*
1258 	 *  Allocate immediately the host control block,
1259 	 *  since we are only expecting to succeed. :)
1260 	 *  We keep track in the HCB of all the resources that
1261 	 *  are to be released on error.
1262 	 */
1263 	np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB");
1264 	if (!np)
1265 		goto attach_failed;
1266 	np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */
1267 	sym_data->ncb = np;
1268 	sym_data->pdev = pdev;
1269 	np->s.host = shost;
1270 
1271 	pci_set_drvdata(pdev, shost);
1272 
1273 	/*
1274 	 *  Copy some useful infos to the HCB.
1275 	 */
1276 	np->hcb_ba	= vtobus(np);
1277 	np->verbose	= sym_driver_setup.verbose;
1278 	np->s.unit	= unit;
1279 	np->features	= dev->chip.features;
1280 	np->clock_divn	= dev->chip.nr_divisor;
1281 	np->maxoffs	= dev->chip.offset_max;
1282 	np->maxburst	= dev->chip.burst_max;
1283 	np->myaddr	= dev->host_id;
1284 	np->mmio_ba	= (u32)dev->mmio_base;
1285 	np->ram_ba	= (u32)dev->ram_base;
1286 	np->s.ioaddr	= dev->s.ioaddr;
1287 	np->s.ramaddr	= dev->s.ramaddr;
1288 
1289 	/*
1290 	 *  Edit its name.
1291 	 */
1292 	strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name));
1293 	sprintf(np->s.inst_name, "sym%d", np->s.unit);
1294 
1295 	if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
1296 			!dma_set_mask(&pdev->dev, DMA_DAC_MASK)) {
1297 		set_dac(np);
1298 	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
1299 		printf_warning("%s: No suitable DMA available\n", sym_name(np));
1300 		goto attach_failed;
1301 	}
1302 
1303 	if (sym_hcb_attach(shost, fw, dev->nvram))
1304 		goto attach_failed;
1305 
1306 	/*
1307 	 *  Install the interrupt handler.
1308 	 *  If we synchonize the C code with SCRIPTS on interrupt,
1309 	 *  we do not want to share the INTR line at all.
1310 	 */
1311 	if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX,
1312 			shost)) {
1313 		printf_err("%s: request irq %u failure\n",
1314 			sym_name(np), pdev->irq);
1315 		goto attach_failed;
1316 	}
1317 	do_free_irq = 1;
1318 
1319 	/*
1320 	 *  After SCSI devices have been opened, we cannot
1321 	 *  reset the bus safely, so we do it here.
1322 	 */
1323 	spin_lock_irqsave(shost->host_lock, flags);
1324 	if (sym_reset_scsi_bus(np, 0))
1325 		goto reset_failed;
1326 
1327 	/*
1328 	 *  Start the SCRIPTS.
1329 	 */
1330 	sym_start_up(shost, 1);
1331 
1332 	/*
1333 	 *  Start the timer daemon
1334 	 */
1335 	timer_setup(&np->s.timer, sym53c8xx_timer, 0);
1336 	np->s.lasttime=0;
1337 	sym_timer (np);
1338 
1339 	/*
1340 	 *  Fill Linux host instance structure
1341 	 *  and return success.
1342 	 */
1343 	shost->max_channel	= 0;
1344 	shost->this_id		= np->myaddr;
1345 	shost->max_id		= np->maxwide ? 16 : 8;
1346 	shost->max_lun		= SYM_CONF_MAX_LUN;
1347 	shost->unique_id	= pci_resource_start(pdev, 0);
1348 	shost->cmd_per_lun	= SYM_CONF_MAX_TAG;
1349 	shost->can_queue	= (SYM_CONF_MAX_START-2);
1350 	shost->sg_tablesize	= SYM_CONF_MAX_SG;
1351 	shost->max_cmd_len	= 16;
1352 	BUG_ON(sym2_transport_template == NULL);
1353 	shost->transportt	= sym2_transport_template;
1354 
1355 	/* 53c896 rev 1 errata: DMA may not cross 16MB boundary */
1356 	if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2)
1357 		shost->dma_boundary = 0xFFFFFF;
1358 
1359 	spin_unlock_irqrestore(shost->host_lock, flags);
1360 
1361 	return shost;
1362 
1363  reset_failed:
1364 	printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, "
1365 		   "TERMINATION, DEVICE POWER etc.!\n", sym_name(np));
1366 	spin_unlock_irqrestore(shost->host_lock, flags);
1367  attach_failed:
1368 	printf_info("sym%d: giving up ...\n", unit);
1369 	if (np)
1370 		sym_free_resources(np, pdev, do_free_irq);
1371 	else
1372 		sym_iounmap_device(dev);
1373 	if (shost)
1374 		scsi_host_put(shost);
1375 
1376 	return NULL;
1377 }
1378 
1379 
1380 /*
1381  *    Detect and try to read SYMBIOS and TEKRAM NVRAM.
1382  */
1383 #if SYM_CONF_NVRAM_SUPPORT
1384 static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
1385 {
1386 	devp->nvram = nvp;
1387 	nvp->type = 0;
1388 
1389 	sym_read_nvram(devp, nvp);
1390 }
1391 #else
1392 static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
1393 {
1394 }
1395 #endif	/* SYM_CONF_NVRAM_SUPPORT */
1396 
1397 static int sym_check_supported(struct sym_device *device)
1398 {
1399 	struct sym_chip *chip;
1400 	struct pci_dev *pdev = device->pdev;
1401 	unsigned long io_port = pci_resource_start(pdev, 0);
1402 	int i;
1403 
1404 	/*
1405 	 *  If user excluded this chip, do not initialize it.
1406 	 *  I hate this code so much.  Must kill it.
1407 	 */
1408 	if (io_port) {
1409 		for (i = 0 ; i < 8 ; i++) {
1410 			if (sym_driver_setup.excludes[i] == io_port)
1411 				return -ENODEV;
1412 		}
1413 	}
1414 
1415 	/*
1416 	 * Check if the chip is supported.  Then copy the chip description
1417 	 * to our device structure so we can make it match the actual device
1418 	 * and options.
1419 	 */
1420 	chip = sym_lookup_chip_table(pdev->device, pdev->revision);
1421 	if (!chip) {
1422 		dev_info(&pdev->dev, "device not supported\n");
1423 		return -ENODEV;
1424 	}
1425 	memcpy(&device->chip, chip, sizeof(device->chip));
1426 
1427 	return 0;
1428 }
1429 
1430 /*
1431  * Ignore Symbios chips controlled by various RAID controllers.
1432  * These controllers set value 0x52414944 at RAM end - 16.
1433  */
1434 static int sym_check_raid(struct sym_device *device)
1435 {
1436 	unsigned int ram_size, ram_val;
1437 
1438 	if (!device->s.ramaddr)
1439 		return 0;
1440 
1441 	if (device->chip.features & FE_RAM8K)
1442 		ram_size = 8192;
1443 	else
1444 		ram_size = 4096;
1445 
1446 	ram_val = readl(device->s.ramaddr + ram_size - 16);
1447 	if (ram_val != 0x52414944)
1448 		return 0;
1449 
1450 	dev_info(&device->pdev->dev,
1451 			"not initializing, driven by RAID controller.\n");
1452 	return -ENODEV;
1453 }
1454 
1455 static int sym_set_workarounds(struct sym_device *device)
1456 {
1457 	struct sym_chip *chip = &device->chip;
1458 	struct pci_dev *pdev = device->pdev;
1459 	u_short status_reg;
1460 
1461 	/*
1462 	 *  (ITEM 12 of a DEL about the 896 I haven't yet).
1463 	 *  We must ensure the chip will use WRITE AND INVALIDATE.
1464 	 *  The revision number limit is for now arbitrary.
1465 	 */
1466 	if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) {
1467 		chip->features	|= (FE_WRIE | FE_CLSE);
1468 	}
1469 
1470 	/* If the chip can do Memory Write Invalidate, enable it */
1471 	if (chip->features & FE_WRIE) {
1472 		if (pci_set_mwi(pdev))
1473 			return -ENODEV;
1474 	}
1475 
1476 	/*
1477 	 *  Work around for errant bit in 895A. The 66Mhz
1478 	 *  capable bit is set erroneously. Clear this bit.
1479 	 *  (Item 1 DEL 533)
1480 	 *
1481 	 *  Make sure Config space and Features agree.
1482 	 *
1483 	 *  Recall: writes are not normal to status register -
1484 	 *  write a 1 to clear and a 0 to leave unchanged.
1485 	 *  Can only reset bits.
1486 	 */
1487 	pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1488 	if (chip->features & FE_66MHZ) {
1489 		if (!(status_reg & PCI_STATUS_66MHZ))
1490 			chip->features &= ~FE_66MHZ;
1491 	} else {
1492 		if (status_reg & PCI_STATUS_66MHZ) {
1493 			status_reg = PCI_STATUS_66MHZ;
1494 			pci_write_config_word(pdev, PCI_STATUS, status_reg);
1495 			pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1496 		}
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 /*
1503  * Map HBA registers and on-chip SRAM (if present).
1504  */
1505 static int sym_iomap_device(struct sym_device *device)
1506 {
1507 	struct pci_dev *pdev = device->pdev;
1508 	struct pci_bus_region bus_addr;
1509 	int i = 2;
1510 
1511 	pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]);
1512 	device->mmio_base = bus_addr.start;
1513 
1514 	if (device->chip.features & FE_RAM) {
1515 		/*
1516 		 * If the BAR is 64-bit, resource 2 will be occupied by the
1517 		 * upper 32 bits
1518 		 */
1519 		if (!pdev->resource[i].flags)
1520 			i++;
1521 		pcibios_resource_to_bus(pdev->bus, &bus_addr,
1522 					&pdev->resource[i]);
1523 		device->ram_base = bus_addr.start;
1524 	}
1525 
1526 #ifdef CONFIG_SCSI_SYM53C8XX_MMIO
1527 	if (device->mmio_base)
1528 		device->s.ioaddr = pci_iomap(pdev, 1,
1529 						pci_resource_len(pdev, 1));
1530 #endif
1531 	if (!device->s.ioaddr)
1532 		device->s.ioaddr = pci_iomap(pdev, 0,
1533 						pci_resource_len(pdev, 0));
1534 	if (!device->s.ioaddr) {
1535 		dev_err(&pdev->dev, "could not map registers; giving up.\n");
1536 		return -EIO;
1537 	}
1538 	if (device->ram_base) {
1539 		device->s.ramaddr = pci_iomap(pdev, i,
1540 						pci_resource_len(pdev, i));
1541 		if (!device->s.ramaddr) {
1542 			dev_warn(&pdev->dev,
1543 				"could not map SRAM; continuing anyway.\n");
1544 			device->ram_base = 0;
1545 		}
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 /*
1552  * The NCR PQS and PDS cards are constructed as a DEC bridge
1553  * behind which sits a proprietary NCR memory controller and
1554  * either four or two 53c875s as separate devices.  We can tell
1555  * if an 875 is part of a PQS/PDS or not since if it is, it will
1556  * be on the same bus as the memory controller.  In its usual
1557  * mode of operation, the 875s are slaved to the memory
1558  * controller for all transfers.  To operate with the Linux
1559  * driver, the memory controller is disabled and the 875s
1560  * freed to function independently.  The only wrinkle is that
1561  * the preset SCSI ID (which may be zero) must be read in from
1562  * a special configuration space register of the 875.
1563  */
1564 static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev)
1565 {
1566 	int slot;
1567 	u8 tmp;
1568 
1569 	for (slot = 0; slot < 256; slot++) {
1570 		struct pci_dev *memc = pci_get_slot(pdev->bus, slot);
1571 
1572 		if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) {
1573 			pci_dev_put(memc);
1574 			continue;
1575 		}
1576 
1577 		/* bit 1: allow individual 875 configuration */
1578 		pci_read_config_byte(memc, 0x44, &tmp);
1579 		if ((tmp & 0x2) == 0) {
1580 			tmp |= 0x2;
1581 			pci_write_config_byte(memc, 0x44, tmp);
1582 		}
1583 
1584 		/* bit 2: drive individual 875 interrupts to the bus */
1585 		pci_read_config_byte(memc, 0x45, &tmp);
1586 		if ((tmp & 0x4) == 0) {
1587 			tmp |= 0x4;
1588 			pci_write_config_byte(memc, 0x45, tmp);
1589 		}
1590 
1591 		pci_dev_put(memc);
1592 		break;
1593 	}
1594 
1595 	pci_read_config_byte(pdev, 0x84, &tmp);
1596 	sym_dev->host_id = tmp;
1597 }
1598 
1599 /*
1600  *  Called before unloading the module.
1601  *  Detach the host.
1602  *  We have to free resources and halt the NCR chip.
1603  */
1604 static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev)
1605 {
1606 	struct sym_hcb *np = sym_get_hcb(shost);
1607 	printk("%s: detaching ...\n", sym_name(np));
1608 
1609 	del_timer_sync(&np->s.timer);
1610 
1611 	/*
1612 	 * Reset NCR chip.
1613 	 * We should use sym_soft_reset(), but we don't want to do
1614 	 * so, since we may not be safe if interrupts occur.
1615 	 */
1616 	printk("%s: resetting chip\n", sym_name(np));
1617 	OUTB(np, nc_istat, SRST);
1618 	INB(np, nc_mbox1);
1619 	udelay(10);
1620 	OUTB(np, nc_istat, 0);
1621 
1622 	sym_free_resources(np, pdev, 1);
1623 	scsi_host_put(shost);
1624 
1625 	return 1;
1626 }
1627 
1628 /*
1629  * Driver host template.
1630  */
1631 static struct scsi_host_template sym2_template = {
1632 	.module			= THIS_MODULE,
1633 	.name			= "sym53c8xx",
1634 	.info			= sym53c8xx_info,
1635 	.queuecommand		= sym53c8xx_queue_command,
1636 	.slave_alloc		= sym53c8xx_slave_alloc,
1637 	.slave_configure	= sym53c8xx_slave_configure,
1638 	.slave_destroy		= sym53c8xx_slave_destroy,
1639 	.eh_abort_handler	= sym53c8xx_eh_abort_handler,
1640 	.eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
1641 	.eh_bus_reset_handler	= sym53c8xx_eh_bus_reset_handler,
1642 	.eh_host_reset_handler	= sym53c8xx_eh_host_reset_handler,
1643 	.this_id		= 7,
1644 	.max_sectors		= 0xFFFF,
1645 #ifdef SYM_LINUX_PROC_INFO_SUPPORT
1646 	.show_info		= sym_show_info,
1647 #ifdef	SYM_LINUX_USER_COMMAND_SUPPORT
1648 	.write_info		= sym_user_command,
1649 #endif
1650 	.proc_name		= NAME53C8XX,
1651 #endif
1652 };
1653 
1654 static int attach_count;
1655 
1656 static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1657 {
1658 	struct sym_device sym_dev;
1659 	struct sym_nvram nvram;
1660 	struct Scsi_Host *shost;
1661 	int do_iounmap = 0;
1662 	int do_disable_device = 1;
1663 
1664 	memset(&sym_dev, 0, sizeof(sym_dev));
1665 	memset(&nvram, 0, sizeof(nvram));
1666 	sym_dev.pdev = pdev;
1667 	sym_dev.host_id = SYM_SETUP_HOST_ID;
1668 
1669 	if (pci_enable_device(pdev))
1670 		goto leave;
1671 
1672 	pci_set_master(pdev);
1673 
1674 	if (pci_request_regions(pdev, NAME53C8XX))
1675 		goto disable;
1676 
1677 	if (sym_check_supported(&sym_dev))
1678 		goto free;
1679 
1680 	if (sym_iomap_device(&sym_dev))
1681 		goto free;
1682 	do_iounmap = 1;
1683 
1684 	if (sym_check_raid(&sym_dev)) {
1685 		do_disable_device = 0;	/* Don't disable the device */
1686 		goto free;
1687 	}
1688 
1689 	if (sym_set_workarounds(&sym_dev))
1690 		goto free;
1691 
1692 	sym_config_pqs(pdev, &sym_dev);
1693 
1694 	sym_get_nvram(&sym_dev, &nvram);
1695 
1696 	do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */
1697 	shost = sym_attach(&sym2_template, attach_count, &sym_dev);
1698 	if (!shost)
1699 		goto free;
1700 
1701 	if (scsi_add_host(shost, &pdev->dev))
1702 		goto detach;
1703 	scsi_scan_host(shost);
1704 
1705 	attach_count++;
1706 
1707 	return 0;
1708 
1709  detach:
1710 	sym_detach(pci_get_drvdata(pdev), pdev);
1711  free:
1712 	if (do_iounmap)
1713 		sym_iounmap_device(&sym_dev);
1714 	pci_release_regions(pdev);
1715  disable:
1716 	if (do_disable_device)
1717 		pci_disable_device(pdev);
1718  leave:
1719 	return -ENODEV;
1720 }
1721 
1722 static void sym2_remove(struct pci_dev *pdev)
1723 {
1724 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1725 
1726 	scsi_remove_host(shost);
1727 	sym_detach(shost, pdev);
1728 	pci_release_regions(pdev);
1729 	pci_disable_device(pdev);
1730 
1731 	attach_count--;
1732 }
1733 
1734 /**
1735  * sym2_io_error_detected() - called when PCI error is detected
1736  * @pdev: pointer to PCI device
1737  * @state: current state of the PCI slot
1738  */
1739 static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev,
1740                                          pci_channel_state_t state)
1741 {
1742 	/* If slot is permanently frozen, turn everything off */
1743 	if (state == pci_channel_io_perm_failure) {
1744 		sym2_remove(pdev);
1745 		return PCI_ERS_RESULT_DISCONNECT;
1746 	}
1747 
1748 	disable_irq(pdev->irq);
1749 	pci_disable_device(pdev);
1750 
1751 	/* Request that MMIO be enabled, so register dump can be taken. */
1752 	return PCI_ERS_RESULT_CAN_RECOVER;
1753 }
1754 
1755 /**
1756  * sym2_io_slot_dump - Enable MMIO and dump debug registers
1757  * @pdev: pointer to PCI device
1758  */
1759 static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev)
1760 {
1761 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1762 
1763 	sym_dump_registers(shost);
1764 
1765 	/* Request a slot reset. */
1766 	return PCI_ERS_RESULT_NEED_RESET;
1767 }
1768 
1769 /**
1770  * sym2_reset_workarounds - hardware-specific work-arounds
1771  * @pdev: pointer to PCI device
1772  *
1773  * This routine is similar to sym_set_workarounds(), except
1774  * that, at this point, we already know that the device was
1775  * successfully initialized at least once before, and so most
1776  * of the steps taken there are un-needed here.
1777  */
1778 static void sym2_reset_workarounds(struct pci_dev *pdev)
1779 {
1780 	u_short status_reg;
1781 	struct sym_chip *chip;
1782 
1783 	chip = sym_lookup_chip_table(pdev->device, pdev->revision);
1784 
1785 	/* Work around for errant bit in 895A, in a fashion
1786 	 * similar to what is done in sym_set_workarounds().
1787 	 */
1788 	pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1789 	if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) {
1790 		status_reg = PCI_STATUS_66MHZ;
1791 		pci_write_config_word(pdev, PCI_STATUS, status_reg);
1792 		pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1793 	}
1794 }
1795 
1796 /**
1797  * sym2_io_slot_reset() - called when the pci bus has been reset.
1798  * @pdev: pointer to PCI device
1799  *
1800  * Restart the card from scratch.
1801  */
1802 static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev)
1803 {
1804 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1805 	struct sym_hcb *np = sym_get_hcb(shost);
1806 
1807 	printk(KERN_INFO "%s: recovering from a PCI slot reset\n",
1808 	          sym_name(np));
1809 
1810 	if (pci_enable_device(pdev)) {
1811 		printk(KERN_ERR "%s: Unable to enable after PCI reset\n",
1812 		        sym_name(np));
1813 		return PCI_ERS_RESULT_DISCONNECT;
1814 	}
1815 
1816 	pci_set_master(pdev);
1817 	enable_irq(pdev->irq);
1818 
1819 	/* If the chip can do Memory Write Invalidate, enable it */
1820 	if (np->features & FE_WRIE) {
1821 		if (pci_set_mwi(pdev))
1822 			return PCI_ERS_RESULT_DISCONNECT;
1823 	}
1824 
1825 	/* Perform work-arounds, analogous to sym_set_workarounds() */
1826 	sym2_reset_workarounds(pdev);
1827 
1828 	/* Perform host reset only on one instance of the card */
1829 	if (PCI_FUNC(pdev->devfn) == 0) {
1830 		if (sym_reset_scsi_bus(np, 0)) {
1831 			printk(KERN_ERR "%s: Unable to reset scsi host\n",
1832 			        sym_name(np));
1833 			return PCI_ERS_RESULT_DISCONNECT;
1834 		}
1835 		sym_start_up(shost, 1);
1836 	}
1837 
1838 	return PCI_ERS_RESULT_RECOVERED;
1839 }
1840 
1841 /**
1842  * sym2_io_resume() - resume normal ops after PCI reset
1843  * @pdev: pointer to PCI device
1844  *
1845  * Called when the error recovery driver tells us that its
1846  * OK to resume normal operation. Use completion to allow
1847  * halted scsi ops to resume.
1848  */
1849 static void sym2_io_resume(struct pci_dev *pdev)
1850 {
1851 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1852 	struct sym_data *sym_data = shost_priv(shost);
1853 
1854 	spin_lock_irq(shost->host_lock);
1855 	if (sym_data->io_reset)
1856 		complete(sym_data->io_reset);
1857 	spin_unlock_irq(shost->host_lock);
1858 }
1859 
1860 static void sym2_get_signalling(struct Scsi_Host *shost)
1861 {
1862 	struct sym_hcb *np = sym_get_hcb(shost);
1863 	enum spi_signal_type type;
1864 
1865 	switch (np->scsi_mode) {
1866 	case SMODE_SE:
1867 		type = SPI_SIGNAL_SE;
1868 		break;
1869 	case SMODE_LVD:
1870 		type = SPI_SIGNAL_LVD;
1871 		break;
1872 	case SMODE_HVD:
1873 		type = SPI_SIGNAL_HVD;
1874 		break;
1875 	default:
1876 		type = SPI_SIGNAL_UNKNOWN;
1877 		break;
1878 	}
1879 	spi_signalling(shost) = type;
1880 }
1881 
1882 static void sym2_set_offset(struct scsi_target *starget, int offset)
1883 {
1884 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1885 	struct sym_hcb *np = sym_get_hcb(shost);
1886 	struct sym_tcb *tp = &np->target[starget->id];
1887 
1888 	tp->tgoal.offset = offset;
1889 	tp->tgoal.check_nego = 1;
1890 }
1891 
1892 static void sym2_set_period(struct scsi_target *starget, int period)
1893 {
1894 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1895 	struct sym_hcb *np = sym_get_hcb(shost);
1896 	struct sym_tcb *tp = &np->target[starget->id];
1897 
1898 	/* have to have DT for these transfers, but DT will also
1899 	 * set width, so check that this is allowed */
1900 	if (period <= np->minsync && spi_width(starget))
1901 		tp->tgoal.dt = 1;
1902 
1903 	tp->tgoal.period = period;
1904 	tp->tgoal.check_nego = 1;
1905 }
1906 
1907 static void sym2_set_width(struct scsi_target *starget, int width)
1908 {
1909 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1910 	struct sym_hcb *np = sym_get_hcb(shost);
1911 	struct sym_tcb *tp = &np->target[starget->id];
1912 
1913 	/* It is illegal to have DT set on narrow transfers.  If DT is
1914 	 * clear, we must also clear IU and QAS.  */
1915 	if (width == 0)
1916 		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
1917 
1918 	tp->tgoal.width = width;
1919 	tp->tgoal.check_nego = 1;
1920 }
1921 
1922 static void sym2_set_dt(struct scsi_target *starget, int dt)
1923 {
1924 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1925 	struct sym_hcb *np = sym_get_hcb(shost);
1926 	struct sym_tcb *tp = &np->target[starget->id];
1927 
1928 	/* We must clear QAS and IU if DT is clear */
1929 	if (dt)
1930 		tp->tgoal.dt = 1;
1931 	else
1932 		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
1933 	tp->tgoal.check_nego = 1;
1934 }
1935 
1936 #if 0
1937 static void sym2_set_iu(struct scsi_target *starget, int iu)
1938 {
1939 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1940 	struct sym_hcb *np = sym_get_hcb(shost);
1941 	struct sym_tcb *tp = &np->target[starget->id];
1942 
1943 	if (iu)
1944 		tp->tgoal.iu = tp->tgoal.dt = 1;
1945 	else
1946 		tp->tgoal.iu = 0;
1947 	tp->tgoal.check_nego = 1;
1948 }
1949 
1950 static void sym2_set_qas(struct scsi_target *starget, int qas)
1951 {
1952 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1953 	struct sym_hcb *np = sym_get_hcb(shost);
1954 	struct sym_tcb *tp = &np->target[starget->id];
1955 
1956 	if (qas)
1957 		tp->tgoal.dt = tp->tgoal.qas = 1;
1958 	else
1959 		tp->tgoal.qas = 0;
1960 	tp->tgoal.check_nego = 1;
1961 }
1962 #endif
1963 
1964 static struct spi_function_template sym2_transport_functions = {
1965 	.set_offset	= sym2_set_offset,
1966 	.show_offset	= 1,
1967 	.set_period	= sym2_set_period,
1968 	.show_period	= 1,
1969 	.set_width	= sym2_set_width,
1970 	.show_width	= 1,
1971 	.set_dt		= sym2_set_dt,
1972 	.show_dt	= 1,
1973 #if 0
1974 	.set_iu		= sym2_set_iu,
1975 	.show_iu	= 1,
1976 	.set_qas	= sym2_set_qas,
1977 	.show_qas	= 1,
1978 #endif
1979 	.get_signalling	= sym2_get_signalling,
1980 };
1981 
1982 static struct pci_device_id sym2_id_table[] = {
1983 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
1984 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1985 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
1986 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
1987 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825,
1988 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1989 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815,
1990 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1991 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP,
1992 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
1993 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
1994 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1995 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
1996 	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8,  0xffff00, 0UL },
1997 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
1998 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1999 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
2000 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2001 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885,
2002 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2003 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875,
2004 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2005 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510,
2006 	  PCI_ANY_ID, PCI_ANY_ID,  PCI_CLASS_STORAGE_SCSI<<8,  0xffff00, 0UL }, /* new */
2007 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A,
2008 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2009 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A,
2010 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2011 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33,
2012 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2013 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66,
2014 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2015 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J,
2016 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2017 	{ 0, }
2018 };
2019 
2020 MODULE_DEVICE_TABLE(pci, sym2_id_table);
2021 
2022 static const struct pci_error_handlers sym2_err_handler = {
2023 	.error_detected	= sym2_io_error_detected,
2024 	.mmio_enabled	= sym2_io_slot_dump,
2025 	.slot_reset	= sym2_io_slot_reset,
2026 	.resume		= sym2_io_resume,
2027 };
2028 
2029 static struct pci_driver sym2_driver = {
2030 	.name		= NAME53C8XX,
2031 	.id_table	= sym2_id_table,
2032 	.probe		= sym2_probe,
2033 	.remove		= sym2_remove,
2034 	.err_handler 	= &sym2_err_handler,
2035 };
2036 
2037 static int __init sym2_init(void)
2038 {
2039 	int error;
2040 
2041 	sym2_setup_params();
2042 	sym2_transport_template = spi_attach_transport(&sym2_transport_functions);
2043 	if (!sym2_transport_template)
2044 		return -ENODEV;
2045 
2046 	error = pci_register_driver(&sym2_driver);
2047 	if (error)
2048 		spi_release_transport(sym2_transport_template);
2049 	return error;
2050 }
2051 
2052 static void __exit sym2_exit(void)
2053 {
2054 	pci_unregister_driver(&sym2_driver);
2055 	spi_release_transport(sym2_transport_template);
2056 }
2057 
2058 module_init(sym2_init);
2059 module_exit(sym2_exit);
2060