xref: /linux/drivers/scsi/esp_scsi.c (revision 88e45067a30918ebb4942120892963e2311330af)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /* esp_scsi.c: ESP SCSI driver.
3   *
4   * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5   */
6  
7  #include <linux/kernel.h>
8  #include <linux/types.h>
9  #include <linux/slab.h>
10  #include <linux/delay.h>
11  #include <linux/list.h>
12  #include <linux/completion.h>
13  #include <linux/kallsyms.h>
14  #include <linux/module.h>
15  #include <linux/moduleparam.h>
16  #include <linux/init.h>
17  #include <linux/irqreturn.h>
18  
19  #include <asm/irq.h>
20  #include <asm/io.h>
21  #include <asm/dma.h>
22  
23  #include <scsi/scsi.h>
24  #include <scsi/scsi_host.h>
25  #include <scsi/scsi_cmnd.h>
26  #include <scsi/scsi_device.h>
27  #include <scsi/scsi_tcq.h>
28  #include <scsi/scsi_dbg.h>
29  #include <scsi/scsi_transport_spi.h>
30  
31  #include "esp_scsi.h"
32  
33  #define DRV_MODULE_NAME		"esp"
34  #define PFX DRV_MODULE_NAME	": "
35  #define DRV_VERSION		"2.000"
36  #define DRV_MODULE_RELDATE	"April 19, 2007"
37  
38  /* SCSI bus reset settle time in seconds.  */
39  static int esp_bus_reset_settle = 3;
40  
41  static u32 esp_debug;
42  #define ESP_DEBUG_INTR		0x00000001
43  #define ESP_DEBUG_SCSICMD	0x00000002
44  #define ESP_DEBUG_RESET		0x00000004
45  #define ESP_DEBUG_MSGIN		0x00000008
46  #define ESP_DEBUG_MSGOUT	0x00000010
47  #define ESP_DEBUG_CMDDONE	0x00000020
48  #define ESP_DEBUG_DISCONNECT	0x00000040
49  #define ESP_DEBUG_DATASTART	0x00000080
50  #define ESP_DEBUG_DATADONE	0x00000100
51  #define ESP_DEBUG_RECONNECT	0x00000200
52  #define ESP_DEBUG_AUTOSENSE	0x00000400
53  #define ESP_DEBUG_EVENT		0x00000800
54  #define ESP_DEBUG_COMMAND	0x00001000
55  
56  #define esp_log_intr(f, a...) \
57  do {	if (esp_debug & ESP_DEBUG_INTR) \
58  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
59  } while (0)
60  
61  #define esp_log_reset(f, a...) \
62  do {	if (esp_debug & ESP_DEBUG_RESET) \
63  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
64  } while (0)
65  
66  #define esp_log_msgin(f, a...) \
67  do {	if (esp_debug & ESP_DEBUG_MSGIN) \
68  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
69  } while (0)
70  
71  #define esp_log_msgout(f, a...) \
72  do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
73  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
74  } while (0)
75  
76  #define esp_log_cmddone(f, a...) \
77  do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
78  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
79  } while (0)
80  
81  #define esp_log_disconnect(f, a...) \
82  do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
83  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
84  } while (0)
85  
86  #define esp_log_datastart(f, a...) \
87  do {	if (esp_debug & ESP_DEBUG_DATASTART) \
88  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
89  } while (0)
90  
91  #define esp_log_datadone(f, a...) \
92  do {	if (esp_debug & ESP_DEBUG_DATADONE) \
93  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
94  } while (0)
95  
96  #define esp_log_reconnect(f, a...) \
97  do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
98  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
99  } while (0)
100  
101  #define esp_log_autosense(f, a...) \
102  do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
103  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
104  } while (0)
105  
106  #define esp_log_event(f, a...) \
107  do {   if (esp_debug & ESP_DEBUG_EVENT)	\
108  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
109  } while (0)
110  
111  #define esp_log_command(f, a...) \
112  do {   if (esp_debug & ESP_DEBUG_COMMAND)	\
113  		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
114  } while (0)
115  
116  #define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
117  #define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
118  
esp_log_fill_regs(struct esp * esp,struct esp_event_ent * p)119  static void esp_log_fill_regs(struct esp *esp,
120  			      struct esp_event_ent *p)
121  {
122  	p->sreg = esp->sreg;
123  	p->seqreg = esp->seqreg;
124  	p->sreg2 = esp->sreg2;
125  	p->ireg = esp->ireg;
126  	p->select_state = esp->select_state;
127  	p->event = esp->event;
128  }
129  
scsi_esp_cmd(struct esp * esp,u8 val)130  void scsi_esp_cmd(struct esp *esp, u8 val)
131  {
132  	struct esp_event_ent *p;
133  	int idx = esp->esp_event_cur;
134  
135  	p = &esp->esp_event_log[idx];
136  	p->type = ESP_EVENT_TYPE_CMD;
137  	p->val = val;
138  	esp_log_fill_regs(esp, p);
139  
140  	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
141  
142  	esp_log_command("cmd[%02x]\n", val);
143  	esp_write8(val, ESP_CMD);
144  }
145  EXPORT_SYMBOL(scsi_esp_cmd);
146  
esp_send_dma_cmd(struct esp * esp,int len,int max_len,int cmd)147  static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
148  {
149  	if (esp->flags & ESP_FLAG_USE_FIFO) {
150  		int i;
151  
152  		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
153  		for (i = 0; i < len; i++)
154  			esp_write8(esp->command_block[i], ESP_FDATA);
155  		scsi_esp_cmd(esp, cmd);
156  	} else {
157  		if (esp->rev == FASHME)
158  			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
159  		cmd |= ESP_CMD_DMA;
160  		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
161  				       len, max_len, 0, cmd);
162  	}
163  }
164  
esp_event(struct esp * esp,u8 val)165  static void esp_event(struct esp *esp, u8 val)
166  {
167  	struct esp_event_ent *p;
168  	int idx = esp->esp_event_cur;
169  
170  	p = &esp->esp_event_log[idx];
171  	p->type = ESP_EVENT_TYPE_EVENT;
172  	p->val = val;
173  	esp_log_fill_regs(esp, p);
174  
175  	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
176  
177  	esp->event = val;
178  }
179  
esp_dump_cmd_log(struct esp * esp)180  static void esp_dump_cmd_log(struct esp *esp)
181  {
182  	int idx = esp->esp_event_cur;
183  	int stop = idx;
184  
185  	shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
186  	do {
187  		struct esp_event_ent *p = &esp->esp_event_log[idx];
188  
189  		shost_printk(KERN_INFO, esp->host,
190  			     "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
191  			     "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
192  			     idx,
193  			     p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
194  			     p->val, p->sreg, p->seqreg,
195  			     p->sreg2, p->ireg, p->select_state, p->event);
196  
197  		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
198  	} while (idx != stop);
199  }
200  
esp_flush_fifo(struct esp * esp)201  static void esp_flush_fifo(struct esp *esp)
202  {
203  	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204  	if (esp->rev == ESP236) {
205  		int lim = 1000;
206  
207  		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
208  			if (--lim == 0) {
209  				shost_printk(KERN_ALERT, esp->host,
210  					     "ESP_FF_BYTES will not clear!\n");
211  				break;
212  			}
213  			udelay(1);
214  		}
215  	}
216  }
217  
hme_read_fifo(struct esp * esp)218  static void hme_read_fifo(struct esp *esp)
219  {
220  	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
221  	int idx = 0;
222  
223  	while (fcnt--) {
224  		esp->fifo[idx++] = esp_read8(ESP_FDATA);
225  		esp->fifo[idx++] = esp_read8(ESP_FDATA);
226  	}
227  	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
228  		esp_write8(0, ESP_FDATA);
229  		esp->fifo[idx++] = esp_read8(ESP_FDATA);
230  		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
231  	}
232  	esp->fifo_cnt = idx;
233  }
234  
esp_set_all_config3(struct esp * esp,u8 val)235  static void esp_set_all_config3(struct esp *esp, u8 val)
236  {
237  	int i;
238  
239  	for (i = 0; i < ESP_MAX_TARGET; i++)
240  		esp->target[i].esp_config3 = val;
241  }
242  
243  /* Reset the ESP chip, _not_ the SCSI bus. */
esp_reset_esp(struct esp * esp)244  static void esp_reset_esp(struct esp *esp)
245  {
246  	/* Now reset the ESP chip */
247  	scsi_esp_cmd(esp, ESP_CMD_RC);
248  	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
249  	if (esp->rev == FAST)
250  		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
251  	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
252  
253  	/* This is the only point at which it is reliable to read
254  	 * the ID-code for a fast ESP chip variants.
255  	 */
256  	esp->max_period = ((35 * esp->ccycle) / 1000);
257  	if (esp->rev == FAST) {
258  		u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
259  
260  		if (family_code == ESP_UID_F236) {
261  			esp->rev = FAS236;
262  		} else if (family_code == ESP_UID_HME) {
263  			esp->rev = FASHME; /* Version is usually '5'. */
264  		} else if (family_code == ESP_UID_FSC) {
265  			esp->rev = FSC;
266  			/* Enable Active Negation */
267  			esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
268  		} else {
269  			esp->rev = FAS100A;
270  		}
271  		esp->min_period = ((4 * esp->ccycle) / 1000);
272  	} else {
273  		esp->min_period = ((5 * esp->ccycle) / 1000);
274  	}
275  	if (esp->rev == FAS236) {
276  		/*
277  		 * The AM53c974 chip returns the same ID as FAS236;
278  		 * try to configure glitch eater.
279  		 */
280  		u8 config4 = ESP_CONFIG4_GE1;
281  		esp_write8(config4, ESP_CFG4);
282  		config4 = esp_read8(ESP_CFG4);
283  		if (config4 & ESP_CONFIG4_GE1) {
284  			esp->rev = PCSCSI;
285  			esp_write8(esp->config4, ESP_CFG4);
286  		}
287  	}
288  	esp->max_period = (esp->max_period + 3)>>2;
289  	esp->min_period = (esp->min_period + 3)>>2;
290  
291  	esp_write8(esp->config1, ESP_CFG1);
292  	switch (esp->rev) {
293  	case ESP100:
294  		/* nothing to do */
295  		break;
296  
297  	case ESP100A:
298  		esp_write8(esp->config2, ESP_CFG2);
299  		break;
300  
301  	case ESP236:
302  		/* Slow 236 */
303  		esp_write8(esp->config2, ESP_CFG2);
304  		esp->prev_cfg3 = esp->target[0].esp_config3;
305  		esp_write8(esp->prev_cfg3, ESP_CFG3);
306  		break;
307  
308  	case FASHME:
309  		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
310  		fallthrough;
311  
312  	case FAS236:
313  	case PCSCSI:
314  	case FSC:
315  		esp_write8(esp->config2, ESP_CFG2);
316  		if (esp->rev == FASHME) {
317  			u8 cfg3 = esp->target[0].esp_config3;
318  
319  			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
320  			if (esp->scsi_id >= 8)
321  				cfg3 |= ESP_CONFIG3_IDBIT3;
322  			esp_set_all_config3(esp, cfg3);
323  		} else {
324  			u32 cfg3 = esp->target[0].esp_config3;
325  
326  			cfg3 |= ESP_CONFIG3_FCLK;
327  			esp_set_all_config3(esp, cfg3);
328  		}
329  		esp->prev_cfg3 = esp->target[0].esp_config3;
330  		esp_write8(esp->prev_cfg3, ESP_CFG3);
331  		if (esp->rev == FASHME) {
332  			esp->radelay = 80;
333  		} else {
334  			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
335  				esp->radelay = 0;
336  			else
337  				esp->radelay = 96;
338  		}
339  		break;
340  
341  	case FAS100A:
342  		/* Fast 100a */
343  		esp_write8(esp->config2, ESP_CFG2);
344  		esp_set_all_config3(esp,
345  				    (esp->target[0].esp_config3 |
346  				     ESP_CONFIG3_FCLOCK));
347  		esp->prev_cfg3 = esp->target[0].esp_config3;
348  		esp_write8(esp->prev_cfg3, ESP_CFG3);
349  		esp->radelay = 32;
350  		break;
351  
352  	default:
353  		break;
354  	}
355  
356  	/* Reload the configuration registers */
357  	esp_write8(esp->cfact, ESP_CFACT);
358  
359  	esp->prev_stp = 0;
360  	esp_write8(esp->prev_stp, ESP_STP);
361  
362  	esp->prev_soff = 0;
363  	esp_write8(esp->prev_soff, ESP_SOFF);
364  
365  	esp_write8(esp->neg_defp, ESP_TIMEO);
366  
367  	/* Eat any bitrot in the chip */
368  	esp_read8(ESP_INTRPT);
369  	udelay(100);
370  }
371  
esp_map_dma(struct esp * esp,struct scsi_cmnd * cmd)372  static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
373  {
374  	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
375  	struct scatterlist *sg = scsi_sglist(cmd);
376  	int total = 0, i;
377  	struct scatterlist *s;
378  
379  	if (cmd->sc_data_direction == DMA_NONE)
380  		return;
381  
382  	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
383  		/*
384  		 * For pseudo DMA and PIO we need the virtual address instead of
385  		 * a dma address, so perform an identity mapping.
386  		 */
387  		spriv->num_sg = scsi_sg_count(cmd);
388  
389  		scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
390  			s->dma_address = (uintptr_t)sg_virt(s);
391  			total += sg_dma_len(s);
392  		}
393  	} else {
394  		spriv->num_sg = scsi_dma_map(cmd);
395  		scsi_for_each_sg(cmd, s, spriv->num_sg, i)
396  			total += sg_dma_len(s);
397  	}
398  	spriv->cur_residue = sg_dma_len(sg);
399  	spriv->prv_sg = NULL;
400  	spriv->cur_sg = sg;
401  	spriv->tot_residue = total;
402  }
403  
esp_cur_dma_addr(struct esp_cmd_entry * ent,struct scsi_cmnd * cmd)404  static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
405  				   struct scsi_cmnd *cmd)
406  {
407  	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
408  
409  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
410  		return ent->sense_dma +
411  			(ent->sense_ptr - cmd->sense_buffer);
412  	}
413  
414  	return sg_dma_address(p->cur_sg) +
415  		(sg_dma_len(p->cur_sg) -
416  		 p->cur_residue);
417  }
418  
esp_cur_dma_len(struct esp_cmd_entry * ent,struct scsi_cmnd * cmd)419  static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
420  				    struct scsi_cmnd *cmd)
421  {
422  	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
423  
424  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
425  		return SCSI_SENSE_BUFFERSIZE -
426  			(ent->sense_ptr - cmd->sense_buffer);
427  	}
428  	return p->cur_residue;
429  }
430  
esp_advance_dma(struct esp * esp,struct esp_cmd_entry * ent,struct scsi_cmnd * cmd,unsigned int len)431  static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
432  			    struct scsi_cmnd *cmd, unsigned int len)
433  {
434  	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
435  
436  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
437  		ent->sense_ptr += len;
438  		return;
439  	}
440  
441  	p->cur_residue -= len;
442  	p->tot_residue -= len;
443  	if (p->cur_residue < 0 || p->tot_residue < 0) {
444  		shost_printk(KERN_ERR, esp->host,
445  			     "Data transfer overflow.\n");
446  		shost_printk(KERN_ERR, esp->host,
447  			     "cur_residue[%d] tot_residue[%d] len[%u]\n",
448  			     p->cur_residue, p->tot_residue, len);
449  		p->cur_residue = 0;
450  		p->tot_residue = 0;
451  	}
452  	if (!p->cur_residue && p->tot_residue) {
453  		p->prv_sg = p->cur_sg;
454  		p->cur_sg = sg_next(p->cur_sg);
455  		p->cur_residue = sg_dma_len(p->cur_sg);
456  	}
457  }
458  
esp_unmap_dma(struct esp * esp,struct scsi_cmnd * cmd)459  static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
460  {
461  	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
462  		scsi_dma_unmap(cmd);
463  }
464  
esp_save_pointers(struct esp * esp,struct esp_cmd_entry * ent)465  static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
466  {
467  	struct scsi_cmnd *cmd = ent->cmd;
468  	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
469  
470  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
471  		ent->saved_sense_ptr = ent->sense_ptr;
472  		return;
473  	}
474  	ent->saved_cur_residue = spriv->cur_residue;
475  	ent->saved_prv_sg = spriv->prv_sg;
476  	ent->saved_cur_sg = spriv->cur_sg;
477  	ent->saved_tot_residue = spriv->tot_residue;
478  }
479  
esp_restore_pointers(struct esp * esp,struct esp_cmd_entry * ent)480  static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
481  {
482  	struct scsi_cmnd *cmd = ent->cmd;
483  	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
484  
485  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
486  		ent->sense_ptr = ent->saved_sense_ptr;
487  		return;
488  	}
489  	spriv->cur_residue = ent->saved_cur_residue;
490  	spriv->prv_sg = ent->saved_prv_sg;
491  	spriv->cur_sg = ent->saved_cur_sg;
492  	spriv->tot_residue = ent->saved_tot_residue;
493  }
494  
esp_write_tgt_config3(struct esp * esp,int tgt)495  static void esp_write_tgt_config3(struct esp *esp, int tgt)
496  {
497  	if (esp->rev > ESP100A) {
498  		u8 val = esp->target[tgt].esp_config3;
499  
500  		if (val != esp->prev_cfg3) {
501  			esp->prev_cfg3 = val;
502  			esp_write8(val, ESP_CFG3);
503  		}
504  	}
505  }
506  
esp_write_tgt_sync(struct esp * esp,int tgt)507  static void esp_write_tgt_sync(struct esp *esp, int tgt)
508  {
509  	u8 off = esp->target[tgt].esp_offset;
510  	u8 per = esp->target[tgt].esp_period;
511  
512  	if (off != esp->prev_soff) {
513  		esp->prev_soff = off;
514  		esp_write8(off, ESP_SOFF);
515  	}
516  	if (per != esp->prev_stp) {
517  		esp->prev_stp = per;
518  		esp_write8(per, ESP_STP);
519  	}
520  }
521  
esp_dma_length_limit(struct esp * esp,u32 dma_addr,u32 dma_len)522  static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
523  {
524  	if (esp->rev == FASHME) {
525  		/* Arbitrary segment boundaries, 24-bit counts.  */
526  		if (dma_len > (1U << 24))
527  			dma_len = (1U << 24);
528  	} else {
529  		u32 base, end;
530  
531  		/* ESP chip limits other variants by 16-bits of transfer
532  		 * count.  Actually on FAS100A and FAS236 we could get
533  		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
534  		 * in the ESP_CFG2 register but that causes other unwanted
535  		 * changes so we don't use it currently.
536  		 */
537  		if (dma_len > (1U << 16))
538  			dma_len = (1U << 16);
539  
540  		/* All of the DMA variants hooked up to these chips
541  		 * cannot handle crossing a 24-bit address boundary.
542  		 */
543  		base = dma_addr & ((1U << 24) - 1U);
544  		end = base + dma_len;
545  		if (end > (1U << 24))
546  			end = (1U <<24);
547  		dma_len = end - base;
548  	}
549  	return dma_len;
550  }
551  
esp_need_to_nego_wide(struct esp_target_data * tp)552  static int esp_need_to_nego_wide(struct esp_target_data *tp)
553  {
554  	struct scsi_target *target = tp->starget;
555  
556  	return spi_width(target) != tp->nego_goal_width;
557  }
558  
esp_need_to_nego_sync(struct esp_target_data * tp)559  static int esp_need_to_nego_sync(struct esp_target_data *tp)
560  {
561  	struct scsi_target *target = tp->starget;
562  
563  	/* When offset is zero, period is "don't care".  */
564  	if (!spi_offset(target) && !tp->nego_goal_offset)
565  		return 0;
566  
567  	if (spi_offset(target) == tp->nego_goal_offset &&
568  	    spi_period(target) == tp->nego_goal_period)
569  		return 0;
570  
571  	return 1;
572  }
573  
esp_alloc_lun_tag(struct esp_cmd_entry * ent,struct esp_lun_data * lp)574  static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
575  			     struct esp_lun_data *lp)
576  {
577  	if (!ent->orig_tag[0]) {
578  		/* Non-tagged, slot already taken?  */
579  		if (lp->non_tagged_cmd)
580  			return -EBUSY;
581  
582  		if (lp->hold) {
583  			/* We are being held by active tagged
584  			 * commands.
585  			 */
586  			if (lp->num_tagged)
587  				return -EBUSY;
588  
589  			/* Tagged commands completed, we can unplug
590  			 * the queue and run this untagged command.
591  			 */
592  			lp->hold = 0;
593  		} else if (lp->num_tagged) {
594  			/* Plug the queue until num_tagged decreases
595  			 * to zero in esp_free_lun_tag.
596  			 */
597  			lp->hold = 1;
598  			return -EBUSY;
599  		}
600  
601  		lp->non_tagged_cmd = ent;
602  		return 0;
603  	}
604  
605  	/* Tagged command. Check that it isn't blocked by a non-tagged one. */
606  	if (lp->non_tagged_cmd || lp->hold)
607  		return -EBUSY;
608  
609  	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
610  
611  	lp->tagged_cmds[ent->orig_tag[1]] = ent;
612  	lp->num_tagged++;
613  
614  	return 0;
615  }
616  
esp_free_lun_tag(struct esp_cmd_entry * ent,struct esp_lun_data * lp)617  static void esp_free_lun_tag(struct esp_cmd_entry *ent,
618  			     struct esp_lun_data *lp)
619  {
620  	if (ent->orig_tag[0]) {
621  		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
622  		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
623  		lp->num_tagged--;
624  	} else {
625  		BUG_ON(lp->non_tagged_cmd != ent);
626  		lp->non_tagged_cmd = NULL;
627  	}
628  }
629  
esp_map_sense(struct esp * esp,struct esp_cmd_entry * ent)630  static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
631  {
632  	ent->sense_ptr = ent->cmd->sense_buffer;
633  	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
634  		ent->sense_dma = (uintptr_t)ent->sense_ptr;
635  		return;
636  	}
637  
638  	ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
639  					SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
640  }
641  
esp_unmap_sense(struct esp * esp,struct esp_cmd_entry * ent)642  static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
643  {
644  	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
645  		dma_unmap_single(esp->dev, ent->sense_dma,
646  				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
647  	ent->sense_ptr = NULL;
648  }
649  
650  /* When a contingent allegiance condition is created, we force feed a
651   * REQUEST_SENSE command to the device to fetch the sense data.  I
652   * tried many other schemes, relying on the scsi error handling layer
653   * to send out the REQUEST_SENSE automatically, but this was difficult
654   * to get right especially in the presence of applications like smartd
655   * which use SG_IO to send out their own REQUEST_SENSE commands.
656   */
esp_autosense(struct esp * esp,struct esp_cmd_entry * ent)657  static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
658  {
659  	struct scsi_cmnd *cmd = ent->cmd;
660  	struct scsi_device *dev = cmd->device;
661  	int tgt, lun;
662  	u8 *p, val;
663  
664  	tgt = dev->id;
665  	lun = dev->lun;
666  
667  
668  	if (!ent->sense_ptr) {
669  		esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
670  				  tgt, lun);
671  		esp_map_sense(esp, ent);
672  	}
673  	ent->saved_sense_ptr = ent->sense_ptr;
674  
675  	esp->active_cmd = ent;
676  
677  	p = esp->command_block;
678  	esp->msg_out_len = 0;
679  
680  	*p++ = IDENTIFY(0, lun);
681  	*p++ = REQUEST_SENSE;
682  	*p++ = ((dev->scsi_level <= SCSI_2) ?
683  		(lun << 5) : 0);
684  	*p++ = 0;
685  	*p++ = 0;
686  	*p++ = SCSI_SENSE_BUFFERSIZE;
687  	*p++ = 0;
688  
689  	esp->select_state = ESP_SELECT_BASIC;
690  
691  	val = tgt;
692  	if (esp->rev == FASHME)
693  		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
694  	esp_write8(val, ESP_BUSID);
695  
696  	esp_write_tgt_sync(esp, tgt);
697  	esp_write_tgt_config3(esp, tgt);
698  
699  	val = (p - esp->command_block);
700  
701  	esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
702  }
703  
find_and_prep_issuable_command(struct esp * esp)704  static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
705  {
706  	struct esp_cmd_entry *ent;
707  
708  	list_for_each_entry(ent, &esp->queued_cmds, list) {
709  		struct scsi_cmnd *cmd = ent->cmd;
710  		struct scsi_device *dev = cmd->device;
711  		struct esp_lun_data *lp = dev->hostdata;
712  
713  		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
714  			ent->tag[0] = 0;
715  			ent->tag[1] = 0;
716  			return ent;
717  		}
718  
719  		if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
720  			ent->tag[0] = 0;
721  			ent->tag[1] = 0;
722  		}
723  		ent->orig_tag[0] = ent->tag[0];
724  		ent->orig_tag[1] = ent->tag[1];
725  
726  		if (esp_alloc_lun_tag(ent, lp) < 0)
727  			continue;
728  
729  		return ent;
730  	}
731  
732  	return NULL;
733  }
734  
esp_maybe_execute_command(struct esp * esp)735  static void esp_maybe_execute_command(struct esp *esp)
736  {
737  	struct esp_target_data *tp;
738  	struct scsi_device *dev;
739  	struct scsi_cmnd *cmd;
740  	struct esp_cmd_entry *ent;
741  	bool select_and_stop = false;
742  	int tgt, lun, i;
743  	u32 val, start_cmd;
744  	u8 *p;
745  
746  	if (esp->active_cmd ||
747  	    (esp->flags & ESP_FLAG_RESETTING))
748  		return;
749  
750  	ent = find_and_prep_issuable_command(esp);
751  	if (!ent)
752  		return;
753  
754  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
755  		esp_autosense(esp, ent);
756  		return;
757  	}
758  
759  	cmd = ent->cmd;
760  	dev = cmd->device;
761  	tgt = dev->id;
762  	lun = dev->lun;
763  	tp = &esp->target[tgt];
764  
765  	list_move(&ent->list, &esp->active_cmds);
766  
767  	esp->active_cmd = ent;
768  
769  	esp_map_dma(esp, cmd);
770  	esp_save_pointers(esp, ent);
771  
772  	if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
773  		select_and_stop = true;
774  
775  	p = esp->command_block;
776  
777  	esp->msg_out_len = 0;
778  	if (tp->flags & ESP_TGT_CHECK_NEGO) {
779  		/* Need to negotiate.  If the target is broken
780  		 * go for synchronous transfers and non-wide.
781  		 */
782  		if (tp->flags & ESP_TGT_BROKEN) {
783  			tp->flags &= ~ESP_TGT_DISCONNECT;
784  			tp->nego_goal_period = 0;
785  			tp->nego_goal_offset = 0;
786  			tp->nego_goal_width = 0;
787  			tp->nego_goal_tags = 0;
788  		}
789  
790  		/* If the settings are not changing, skip this.  */
791  		if (spi_width(tp->starget) == tp->nego_goal_width &&
792  		    spi_period(tp->starget) == tp->nego_goal_period &&
793  		    spi_offset(tp->starget) == tp->nego_goal_offset) {
794  			tp->flags &= ~ESP_TGT_CHECK_NEGO;
795  			goto build_identify;
796  		}
797  
798  		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
799  			esp->msg_out_len =
800  				spi_populate_width_msg(&esp->msg_out[0],
801  						       (tp->nego_goal_width ?
802  							1 : 0));
803  			tp->flags |= ESP_TGT_NEGO_WIDE;
804  		} else if (esp_need_to_nego_sync(tp)) {
805  			esp->msg_out_len =
806  				spi_populate_sync_msg(&esp->msg_out[0],
807  						      tp->nego_goal_period,
808  						      tp->nego_goal_offset);
809  			tp->flags |= ESP_TGT_NEGO_SYNC;
810  		} else {
811  			tp->flags &= ~ESP_TGT_CHECK_NEGO;
812  		}
813  
814  		/* If there are multiple message bytes, use Select and Stop */
815  		if (esp->msg_out_len)
816  			select_and_stop = true;
817  	}
818  
819  build_identify:
820  	*p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
821  
822  	if (ent->tag[0] && esp->rev == ESP100) {
823  		/* ESP100 lacks select w/atn3 command, use select
824  		 * and stop instead.
825  		 */
826  		select_and_stop = true;
827  	}
828  
829  	if (select_and_stop) {
830  		esp->cmd_bytes_left = cmd->cmd_len;
831  		esp->cmd_bytes_ptr = &cmd->cmnd[0];
832  
833  		if (ent->tag[0]) {
834  			for (i = esp->msg_out_len - 1;
835  			     i >= 0; i--)
836  				esp->msg_out[i + 2] = esp->msg_out[i];
837  			esp->msg_out[0] = ent->tag[0];
838  			esp->msg_out[1] = ent->tag[1];
839  			esp->msg_out_len += 2;
840  		}
841  
842  		start_cmd = ESP_CMD_SELAS;
843  		esp->select_state = ESP_SELECT_MSGOUT;
844  	} else {
845  		start_cmd = ESP_CMD_SELA;
846  		if (ent->tag[0]) {
847  			*p++ = ent->tag[0];
848  			*p++ = ent->tag[1];
849  
850  			start_cmd = ESP_CMD_SA3;
851  		}
852  
853  		for (i = 0; i < cmd->cmd_len; i++)
854  			*p++ = cmd->cmnd[i];
855  
856  		esp->select_state = ESP_SELECT_BASIC;
857  	}
858  	val = tgt;
859  	if (esp->rev == FASHME)
860  		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
861  	esp_write8(val, ESP_BUSID);
862  
863  	esp_write_tgt_sync(esp, tgt);
864  	esp_write_tgt_config3(esp, tgt);
865  
866  	val = (p - esp->command_block);
867  
868  	if (esp_debug & ESP_DEBUG_SCSICMD) {
869  		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
870  		for (i = 0; i < cmd->cmd_len; i++)
871  			printk("%02x ", cmd->cmnd[i]);
872  		printk("]\n");
873  	}
874  
875  	esp_send_dma_cmd(esp, val, 16, start_cmd);
876  }
877  
esp_get_ent(struct esp * esp)878  static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
879  {
880  	struct list_head *head = &esp->esp_cmd_pool;
881  	struct esp_cmd_entry *ret;
882  
883  	if (list_empty(head)) {
884  		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
885  	} else {
886  		ret = list_entry(head->next, struct esp_cmd_entry, list);
887  		list_del(&ret->list);
888  		memset(ret, 0, sizeof(*ret));
889  	}
890  	return ret;
891  }
892  
esp_put_ent(struct esp * esp,struct esp_cmd_entry * ent)893  static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
894  {
895  	list_add(&ent->list, &esp->esp_cmd_pool);
896  }
897  
esp_cmd_is_done(struct esp * esp,struct esp_cmd_entry * ent,struct scsi_cmnd * cmd,unsigned char host_byte)898  static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
899  			    struct scsi_cmnd *cmd, unsigned char host_byte)
900  {
901  	struct scsi_device *dev = cmd->device;
902  	int tgt = dev->id;
903  	int lun = dev->lun;
904  
905  	esp->active_cmd = NULL;
906  	esp_unmap_dma(esp, cmd);
907  	esp_free_lun_tag(ent, dev->hostdata);
908  	cmd->result = 0;
909  	set_host_byte(cmd, host_byte);
910  	if (host_byte == DID_OK)
911  		set_status_byte(cmd, ent->status);
912  
913  	if (ent->eh_done) {
914  		complete(ent->eh_done);
915  		ent->eh_done = NULL;
916  	}
917  
918  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
919  		esp_unmap_sense(esp, ent);
920  
921  		/* Restore the message/status bytes to what we actually
922  		 * saw originally.  Also, report that we are providing
923  		 * the sense data.
924  		 */
925  		cmd->result = SAM_STAT_CHECK_CONDITION;
926  
927  		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
928  		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
929  			int i;
930  
931  			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
932  			       esp->host->unique_id, tgt, lun);
933  			for (i = 0; i < 18; i++)
934  				printk("%02x ", cmd->sense_buffer[i]);
935  			printk("]\n");
936  		}
937  	}
938  
939  	scsi_done(cmd);
940  
941  	list_del(&ent->list);
942  	esp_put_ent(esp, ent);
943  
944  	esp_maybe_execute_command(esp);
945  }
946  
esp_event_queue_full(struct esp * esp,struct esp_cmd_entry * ent)947  static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
948  {
949  	struct scsi_device *dev = ent->cmd->device;
950  	struct esp_lun_data *lp = dev->hostdata;
951  
952  	scsi_track_queue_full(dev, lp->num_tagged - 1);
953  }
954  
esp_queuecommand_lck(struct scsi_cmnd * cmd)955  static int esp_queuecommand_lck(struct scsi_cmnd *cmd)
956  {
957  	struct scsi_device *dev = cmd->device;
958  	struct esp *esp = shost_priv(dev->host);
959  	struct esp_cmd_priv *spriv;
960  	struct esp_cmd_entry *ent;
961  
962  	ent = esp_get_ent(esp);
963  	if (!ent)
964  		return SCSI_MLQUEUE_HOST_BUSY;
965  
966  	ent->cmd = cmd;
967  
968  	spriv = ESP_CMD_PRIV(cmd);
969  	spriv->num_sg = 0;
970  
971  	list_add_tail(&ent->list, &esp->queued_cmds);
972  
973  	esp_maybe_execute_command(esp);
974  
975  	return 0;
976  }
977  
DEF_SCSI_QCMD(esp_queuecommand)978  static DEF_SCSI_QCMD(esp_queuecommand)
979  
980  static int esp_check_gross_error(struct esp *esp)
981  {
982  	if (esp->sreg & ESP_STAT_SPAM) {
983  		/* Gross Error, could be one of:
984  		 * - top of fifo overwritten
985  		 * - top of command register overwritten
986  		 * - DMA programmed with wrong direction
987  		 * - improper phase change
988  		 */
989  		shost_printk(KERN_ERR, esp->host,
990  			     "Gross error sreg[%02x]\n", esp->sreg);
991  		/* XXX Reset the chip. XXX */
992  		return 1;
993  	}
994  	return 0;
995  }
996  
esp_check_spur_intr(struct esp * esp)997  static int esp_check_spur_intr(struct esp *esp)
998  {
999  	switch (esp->rev) {
1000  	case ESP100:
1001  	case ESP100A:
1002  		/* The interrupt pending bit of the status register cannot
1003  		 * be trusted on these revisions.
1004  		 */
1005  		esp->sreg &= ~ESP_STAT_INTR;
1006  		break;
1007  
1008  	default:
1009  		if (!(esp->sreg & ESP_STAT_INTR)) {
1010  			if (esp->ireg & ESP_INTR_SR)
1011  				return 1;
1012  
1013  			/* If the DMA is indicating interrupt pending and the
1014  			 * ESP is not, the only possibility is a DMA error.
1015  			 */
1016  			if (!esp->ops->dma_error(esp)) {
1017  				shost_printk(KERN_ERR, esp->host,
1018  					     "Spurious irq, sreg=%02x.\n",
1019  					     esp->sreg);
1020  				return -1;
1021  			}
1022  
1023  			shost_printk(KERN_ERR, esp->host, "DMA error\n");
1024  
1025  			/* XXX Reset the chip. XXX */
1026  			return -1;
1027  		}
1028  		break;
1029  	}
1030  
1031  	return 0;
1032  }
1033  
esp_schedule_reset(struct esp * esp)1034  static void esp_schedule_reset(struct esp *esp)
1035  {
1036  	esp_log_reset("esp_schedule_reset() from %ps\n",
1037  		      __builtin_return_address(0));
1038  	esp->flags |= ESP_FLAG_RESETTING;
1039  	esp_event(esp, ESP_EVENT_RESET);
1040  }
1041  
1042  /* In order to avoid having to add a special half-reconnected state
1043   * into the driver we just sit here and poll through the rest of
1044   * the reselection process to get the tag message bytes.
1045   */
esp_reconnect_with_tag(struct esp * esp,struct esp_lun_data * lp)1046  static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1047  						    struct esp_lun_data *lp)
1048  {
1049  	struct esp_cmd_entry *ent;
1050  	int i;
1051  
1052  	if (!lp->num_tagged) {
1053  		shost_printk(KERN_ERR, esp->host,
1054  			     "Reconnect w/num_tagged==0\n");
1055  		return NULL;
1056  	}
1057  
1058  	esp_log_reconnect("reconnect tag, ");
1059  
1060  	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1061  		if (esp->ops->irq_pending(esp))
1062  			break;
1063  	}
1064  	if (i == ESP_QUICKIRQ_LIMIT) {
1065  		shost_printk(KERN_ERR, esp->host,
1066  			     "Reconnect IRQ1 timeout\n");
1067  		return NULL;
1068  	}
1069  
1070  	esp->sreg = esp_read8(ESP_STATUS);
1071  	esp->ireg = esp_read8(ESP_INTRPT);
1072  
1073  	esp_log_reconnect("IRQ(%d:%x:%x), ",
1074  			  i, esp->ireg, esp->sreg);
1075  
1076  	if (esp->ireg & ESP_INTR_DC) {
1077  		shost_printk(KERN_ERR, esp->host,
1078  			     "Reconnect, got disconnect.\n");
1079  		return NULL;
1080  	}
1081  
1082  	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1083  		shost_printk(KERN_ERR, esp->host,
1084  			     "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1085  		return NULL;
1086  	}
1087  
1088  	/* DMA in the tag bytes... */
1089  	esp->command_block[0] = 0xff;
1090  	esp->command_block[1] = 0xff;
1091  	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1092  			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1093  
1094  	/* ACK the message.  */
1095  	scsi_esp_cmd(esp, ESP_CMD_MOK);
1096  
1097  	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1098  		if (esp->ops->irq_pending(esp)) {
1099  			esp->sreg = esp_read8(ESP_STATUS);
1100  			esp->ireg = esp_read8(ESP_INTRPT);
1101  			if (esp->ireg & ESP_INTR_FDONE)
1102  				break;
1103  		}
1104  		udelay(1);
1105  	}
1106  	if (i == ESP_RESELECT_TAG_LIMIT) {
1107  		shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1108  		return NULL;
1109  	}
1110  	esp->ops->dma_drain(esp);
1111  	esp->ops->dma_invalidate(esp);
1112  
1113  	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1114  			  i, esp->ireg, esp->sreg,
1115  			  esp->command_block[0],
1116  			  esp->command_block[1]);
1117  
1118  	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1119  	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1120  		shost_printk(KERN_ERR, esp->host,
1121  			     "Reconnect, bad tag type %02x.\n",
1122  			     esp->command_block[0]);
1123  		return NULL;
1124  	}
1125  
1126  	ent = lp->tagged_cmds[esp->command_block[1]];
1127  	if (!ent) {
1128  		shost_printk(KERN_ERR, esp->host,
1129  			     "Reconnect, no entry for tag %02x.\n",
1130  			     esp->command_block[1]);
1131  		return NULL;
1132  	}
1133  
1134  	return ent;
1135  }
1136  
esp_reconnect(struct esp * esp)1137  static int esp_reconnect(struct esp *esp)
1138  {
1139  	struct esp_cmd_entry *ent;
1140  	struct esp_target_data *tp;
1141  	struct esp_lun_data *lp;
1142  	struct scsi_device *dev;
1143  	int target, lun;
1144  
1145  	BUG_ON(esp->active_cmd);
1146  	if (esp->rev == FASHME) {
1147  		/* FASHME puts the target and lun numbers directly
1148  		 * into the fifo.
1149  		 */
1150  		target = esp->fifo[0];
1151  		lun = esp->fifo[1] & 0x7;
1152  	} else {
1153  		u8 bits = esp_read8(ESP_FDATA);
1154  
1155  		/* Older chips put the lun directly into the fifo, but
1156  		 * the target is given as a sample of the arbitration
1157  		 * lines on the bus at reselection time.  So we should
1158  		 * see the ID of the ESP and the one reconnecting target
1159  		 * set in the bitmap.
1160  		 */
1161  		if (!(bits & esp->scsi_id_mask))
1162  			goto do_reset;
1163  		bits &= ~esp->scsi_id_mask;
1164  		if (!bits || (bits & (bits - 1)))
1165  			goto do_reset;
1166  
1167  		target = ffs(bits) - 1;
1168  		lun = (esp_read8(ESP_FDATA) & 0x7);
1169  
1170  		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1171  		if (esp->rev == ESP100) {
1172  			u8 ireg = esp_read8(ESP_INTRPT);
1173  			/* This chip has a bug during reselection that can
1174  			 * cause a spurious illegal-command interrupt, which
1175  			 * we simply ACK here.  Another possibility is a bus
1176  			 * reset so we must check for that.
1177  			 */
1178  			if (ireg & ESP_INTR_SR)
1179  				goto do_reset;
1180  		}
1181  		scsi_esp_cmd(esp, ESP_CMD_NULL);
1182  	}
1183  
1184  	esp_write_tgt_sync(esp, target);
1185  	esp_write_tgt_config3(esp, target);
1186  
1187  	scsi_esp_cmd(esp, ESP_CMD_MOK);
1188  
1189  	if (esp->rev == FASHME)
1190  		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1191  			   ESP_BUSID);
1192  
1193  	tp = &esp->target[target];
1194  	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1195  	if (!dev) {
1196  		shost_printk(KERN_ERR, esp->host,
1197  			     "Reconnect, no lp tgt[%u] lun[%u]\n",
1198  			     target, lun);
1199  		goto do_reset;
1200  	}
1201  	lp = dev->hostdata;
1202  
1203  	ent = lp->non_tagged_cmd;
1204  	if (!ent) {
1205  		ent = esp_reconnect_with_tag(esp, lp);
1206  		if (!ent)
1207  			goto do_reset;
1208  	}
1209  
1210  	esp->active_cmd = ent;
1211  
1212  	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1213  	esp_restore_pointers(esp, ent);
1214  	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1215  	return 1;
1216  
1217  do_reset:
1218  	esp_schedule_reset(esp);
1219  	return 0;
1220  }
1221  
esp_finish_select(struct esp * esp)1222  static int esp_finish_select(struct esp *esp)
1223  {
1224  	struct esp_cmd_entry *ent;
1225  	struct scsi_cmnd *cmd;
1226  
1227  	/* No longer selecting.  */
1228  	esp->select_state = ESP_SELECT_NONE;
1229  
1230  	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1231  	ent = esp->active_cmd;
1232  	cmd = ent->cmd;
1233  
1234  	if (esp->ops->dma_error(esp)) {
1235  		/* If we see a DMA error during or as a result of selection,
1236  		 * all bets are off.
1237  		 */
1238  		esp_schedule_reset(esp);
1239  		esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
1240  		return 0;
1241  	}
1242  
1243  	esp->ops->dma_invalidate(esp);
1244  
1245  	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1246  		struct esp_target_data *tp = &esp->target[cmd->device->id];
1247  
1248  		/* Carefully back out of the selection attempt.  Release
1249  		 * resources (such as DMA mapping & TAG) and reset state (such
1250  		 * as message out and command delivery variables).
1251  		 */
1252  		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1253  			esp_unmap_dma(esp, cmd);
1254  			esp_free_lun_tag(ent, cmd->device->hostdata);
1255  			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1256  			esp->cmd_bytes_ptr = NULL;
1257  			esp->cmd_bytes_left = 0;
1258  		} else {
1259  			esp_unmap_sense(esp, ent);
1260  		}
1261  
1262  		/* Now that the state is unwound properly, put back onto
1263  		 * the issue queue.  This command is no longer active.
1264  		 */
1265  		list_move(&ent->list, &esp->queued_cmds);
1266  		esp->active_cmd = NULL;
1267  
1268  		/* Return value ignored by caller, it directly invokes
1269  		 * esp_reconnect().
1270  		 */
1271  		return 0;
1272  	}
1273  
1274  	if (esp->ireg == ESP_INTR_DC) {
1275  		struct scsi_device *dev = cmd->device;
1276  
1277  		/* Disconnect.  Make sure we re-negotiate sync and
1278  		 * wide parameters if this target starts responding
1279  		 * again in the future.
1280  		 */
1281  		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1282  
1283  		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1284  		esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
1285  		return 1;
1286  	}
1287  
1288  	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1289  		/* Selection successful.  On pre-FAST chips we have
1290  		 * to do a NOP and possibly clean out the FIFO.
1291  		 */
1292  		if (esp->rev <= ESP236) {
1293  			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1294  
1295  			scsi_esp_cmd(esp, ESP_CMD_NULL);
1296  
1297  			if (!fcnt &&
1298  			    (!esp->prev_soff ||
1299  			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1300  				esp_flush_fifo(esp);
1301  		}
1302  
1303  		/* If we are doing a Select And Stop command, negotiation, etc.
1304  		 * we'll do the right thing as we transition to the next phase.
1305  		 */
1306  		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1307  		return 0;
1308  	}
1309  
1310  	shost_printk(KERN_INFO, esp->host,
1311  		     "Unexpected selection completion ireg[%x]\n", esp->ireg);
1312  	esp_schedule_reset(esp);
1313  	return 0;
1314  }
1315  
esp_data_bytes_sent(struct esp * esp,struct esp_cmd_entry * ent,struct scsi_cmnd * cmd)1316  static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1317  			       struct scsi_cmnd *cmd)
1318  {
1319  	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1320  
1321  	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1322  	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1323  		fifo_cnt <<= 1;
1324  
1325  	ecount = 0;
1326  	if (!(esp->sreg & ESP_STAT_TCNT)) {
1327  		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1328  			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1329  		if (esp->rev == FASHME)
1330  			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1331  		if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1332  			ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1333  	}
1334  
1335  	bytes_sent = esp->data_dma_len;
1336  	bytes_sent -= ecount;
1337  	bytes_sent -= esp->send_cmd_residual;
1338  
1339  	/*
1340  	 * The am53c974 has a DMA 'peculiarity'. The doc states:
1341  	 * In some odd byte conditions, one residual byte will
1342  	 * be left in the SCSI FIFO, and the FIFO Flags will
1343  	 * never count to '0 '. When this happens, the residual
1344  	 * byte should be retrieved via PIO following completion
1345  	 * of the BLAST operation.
1346  	 */
1347  	if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1348  		size_t count = 1;
1349  		size_t offset = bytes_sent;
1350  		u8 bval = esp_read8(ESP_FDATA);
1351  
1352  		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1353  			ent->sense_ptr[bytes_sent] = bval;
1354  		else {
1355  			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1356  			u8 *ptr;
1357  
1358  			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1359  						  &offset, &count);
1360  			if (likely(ptr)) {
1361  				*(ptr + offset) = bval;
1362  				scsi_kunmap_atomic_sg(ptr);
1363  			}
1364  		}
1365  		bytes_sent += fifo_cnt;
1366  		ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1367  	}
1368  	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1369  		bytes_sent -= fifo_cnt;
1370  
1371  	flush_fifo = 0;
1372  	if (!esp->prev_soff) {
1373  		/* Synchronous data transfer, always flush fifo. */
1374  		flush_fifo = 1;
1375  	} else {
1376  		if (esp->rev == ESP100) {
1377  			u32 fflags, phase;
1378  
1379  			/* ESP100 has a chip bug where in the synchronous data
1380  			 * phase it can mistake a final long REQ pulse from the
1381  			 * target as an extra data byte.  Fun.
1382  			 *
1383  			 * To detect this case we resample the status register
1384  			 * and fifo flags.  If we're still in a data phase and
1385  			 * we see spurious chunks in the fifo, we return error
1386  			 * to the caller which should reset and set things up
1387  			 * such that we only try future transfers to this
1388  			 * target in synchronous mode.
1389  			 */
1390  			esp->sreg = esp_read8(ESP_STATUS);
1391  			phase = esp->sreg & ESP_STAT_PMASK;
1392  			fflags = esp_read8(ESP_FFLAGS);
1393  
1394  			if ((phase == ESP_DOP &&
1395  			     (fflags & ESP_FF_ONOTZERO)) ||
1396  			    (phase == ESP_DIP &&
1397  			     (fflags & ESP_FF_FBYTES)))
1398  				return -1;
1399  		}
1400  		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1401  			flush_fifo = 1;
1402  	}
1403  
1404  	if (flush_fifo)
1405  		esp_flush_fifo(esp);
1406  
1407  	return bytes_sent;
1408  }
1409  
esp_setsync(struct esp * esp,struct esp_target_data * tp,u8 scsi_period,u8 scsi_offset,u8 esp_stp,u8 esp_soff)1410  static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1411  			u8 scsi_period, u8 scsi_offset,
1412  			u8 esp_stp, u8 esp_soff)
1413  {
1414  	spi_period(tp->starget) = scsi_period;
1415  	spi_offset(tp->starget) = scsi_offset;
1416  	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1417  
1418  	if (esp_soff) {
1419  		esp_stp &= 0x1f;
1420  		esp_soff |= esp->radelay;
1421  		if (esp->rev >= FAS236) {
1422  			u8 bit = ESP_CONFIG3_FSCSI;
1423  			if (esp->rev >= FAS100A)
1424  				bit = ESP_CONFIG3_FAST;
1425  
1426  			if (scsi_period < 50) {
1427  				if (esp->rev == FASHME)
1428  					esp_soff &= ~esp->radelay;
1429  				tp->esp_config3 |= bit;
1430  			} else {
1431  				tp->esp_config3 &= ~bit;
1432  			}
1433  			esp->prev_cfg3 = tp->esp_config3;
1434  			esp_write8(esp->prev_cfg3, ESP_CFG3);
1435  		}
1436  	}
1437  
1438  	tp->esp_period = esp->prev_stp = esp_stp;
1439  	tp->esp_offset = esp->prev_soff = esp_soff;
1440  
1441  	esp_write8(esp_soff, ESP_SOFF);
1442  	esp_write8(esp_stp, ESP_STP);
1443  
1444  	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1445  
1446  	spi_display_xfer_agreement(tp->starget);
1447  }
1448  
esp_msgin_reject(struct esp * esp)1449  static void esp_msgin_reject(struct esp *esp)
1450  {
1451  	struct esp_cmd_entry *ent = esp->active_cmd;
1452  	struct scsi_cmnd *cmd = ent->cmd;
1453  	struct esp_target_data *tp;
1454  	int tgt;
1455  
1456  	tgt = cmd->device->id;
1457  	tp = &esp->target[tgt];
1458  
1459  	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1460  		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1461  
1462  		if (!esp_need_to_nego_sync(tp)) {
1463  			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1464  			scsi_esp_cmd(esp, ESP_CMD_RATN);
1465  		} else {
1466  			esp->msg_out_len =
1467  				spi_populate_sync_msg(&esp->msg_out[0],
1468  						      tp->nego_goal_period,
1469  						      tp->nego_goal_offset);
1470  			tp->flags |= ESP_TGT_NEGO_SYNC;
1471  			scsi_esp_cmd(esp, ESP_CMD_SATN);
1472  		}
1473  		return;
1474  	}
1475  
1476  	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1477  		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1478  		tp->esp_period = 0;
1479  		tp->esp_offset = 0;
1480  		esp_setsync(esp, tp, 0, 0, 0, 0);
1481  		scsi_esp_cmd(esp, ESP_CMD_RATN);
1482  		return;
1483  	}
1484  
1485  	shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1486  	esp_schedule_reset(esp);
1487  }
1488  
esp_msgin_sdtr(struct esp * esp,struct esp_target_data * tp)1489  static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1490  {
1491  	u8 period = esp->msg_in[3];
1492  	u8 offset = esp->msg_in[4];
1493  	u8 stp;
1494  
1495  	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1496  		goto do_reject;
1497  
1498  	if (offset > 15)
1499  		goto do_reject;
1500  
1501  	if (offset) {
1502  		int one_clock;
1503  
1504  		if (period > esp->max_period) {
1505  			period = offset = 0;
1506  			goto do_sdtr;
1507  		}
1508  		if (period < esp->min_period)
1509  			goto do_reject;
1510  
1511  		one_clock = esp->ccycle / 1000;
1512  		stp = DIV_ROUND_UP(period << 2, one_clock);
1513  		if (stp && esp->rev >= FAS236) {
1514  			if (stp >= 50)
1515  				stp--;
1516  		}
1517  	} else {
1518  		stp = 0;
1519  	}
1520  
1521  	esp_setsync(esp, tp, period, offset, stp, offset);
1522  	return;
1523  
1524  do_reject:
1525  	esp->msg_out[0] = MESSAGE_REJECT;
1526  	esp->msg_out_len = 1;
1527  	scsi_esp_cmd(esp, ESP_CMD_SATN);
1528  	return;
1529  
1530  do_sdtr:
1531  	tp->nego_goal_period = period;
1532  	tp->nego_goal_offset = offset;
1533  	esp->msg_out_len =
1534  		spi_populate_sync_msg(&esp->msg_out[0],
1535  				      tp->nego_goal_period,
1536  				      tp->nego_goal_offset);
1537  	scsi_esp_cmd(esp, ESP_CMD_SATN);
1538  }
1539  
esp_msgin_wdtr(struct esp * esp,struct esp_target_data * tp)1540  static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1541  {
1542  	int size = 8 << esp->msg_in[3];
1543  	u8 cfg3;
1544  
1545  	if (esp->rev != FASHME)
1546  		goto do_reject;
1547  
1548  	if (size != 8 && size != 16)
1549  		goto do_reject;
1550  
1551  	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1552  		goto do_reject;
1553  
1554  	cfg3 = tp->esp_config3;
1555  	if (size == 16) {
1556  		tp->flags |= ESP_TGT_WIDE;
1557  		cfg3 |= ESP_CONFIG3_EWIDE;
1558  	} else {
1559  		tp->flags &= ~ESP_TGT_WIDE;
1560  		cfg3 &= ~ESP_CONFIG3_EWIDE;
1561  	}
1562  	tp->esp_config3 = cfg3;
1563  	esp->prev_cfg3 = cfg3;
1564  	esp_write8(cfg3, ESP_CFG3);
1565  
1566  	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1567  
1568  	spi_period(tp->starget) = 0;
1569  	spi_offset(tp->starget) = 0;
1570  	if (!esp_need_to_nego_sync(tp)) {
1571  		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1572  		scsi_esp_cmd(esp, ESP_CMD_RATN);
1573  	} else {
1574  		esp->msg_out_len =
1575  			spi_populate_sync_msg(&esp->msg_out[0],
1576  					      tp->nego_goal_period,
1577  					      tp->nego_goal_offset);
1578  		tp->flags |= ESP_TGT_NEGO_SYNC;
1579  		scsi_esp_cmd(esp, ESP_CMD_SATN);
1580  	}
1581  	return;
1582  
1583  do_reject:
1584  	esp->msg_out[0] = MESSAGE_REJECT;
1585  	esp->msg_out_len = 1;
1586  	scsi_esp_cmd(esp, ESP_CMD_SATN);
1587  }
1588  
esp_msgin_extended(struct esp * esp)1589  static void esp_msgin_extended(struct esp *esp)
1590  {
1591  	struct esp_cmd_entry *ent = esp->active_cmd;
1592  	struct scsi_cmnd *cmd = ent->cmd;
1593  	struct esp_target_data *tp;
1594  	int tgt = cmd->device->id;
1595  
1596  	tp = &esp->target[tgt];
1597  	if (esp->msg_in[2] == EXTENDED_SDTR) {
1598  		esp_msgin_sdtr(esp, tp);
1599  		return;
1600  	}
1601  	if (esp->msg_in[2] == EXTENDED_WDTR) {
1602  		esp_msgin_wdtr(esp, tp);
1603  		return;
1604  	}
1605  
1606  	shost_printk(KERN_INFO, esp->host,
1607  		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
1608  
1609  	esp->msg_out[0] = MESSAGE_REJECT;
1610  	esp->msg_out_len = 1;
1611  	scsi_esp_cmd(esp, ESP_CMD_SATN);
1612  }
1613  
1614  /* Analyze msgin bytes received from target so far.  Return non-zero
1615   * if there are more bytes needed to complete the message.
1616   */
esp_msgin_process(struct esp * esp)1617  static int esp_msgin_process(struct esp *esp)
1618  {
1619  	u8 msg0 = esp->msg_in[0];
1620  	int len = esp->msg_in_len;
1621  
1622  	if (msg0 & 0x80) {
1623  		/* Identify */
1624  		shost_printk(KERN_INFO, esp->host,
1625  			     "Unexpected msgin identify\n");
1626  		return 0;
1627  	}
1628  
1629  	switch (msg0) {
1630  	case EXTENDED_MESSAGE:
1631  		if (len == 1)
1632  			return 1;
1633  		if (len < esp->msg_in[1] + 2)
1634  			return 1;
1635  		esp_msgin_extended(esp);
1636  		return 0;
1637  
1638  	case IGNORE_WIDE_RESIDUE: {
1639  		struct esp_cmd_entry *ent;
1640  		struct esp_cmd_priv *spriv;
1641  		if (len == 1)
1642  			return 1;
1643  
1644  		if (esp->msg_in[1] != 1)
1645  			goto do_reject;
1646  
1647  		ent = esp->active_cmd;
1648  		spriv = ESP_CMD_PRIV(ent->cmd);
1649  
1650  		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1651  			spriv->cur_sg = spriv->prv_sg;
1652  			spriv->cur_residue = 1;
1653  		} else
1654  			spriv->cur_residue++;
1655  		spriv->tot_residue++;
1656  		return 0;
1657  	}
1658  	case NOP:
1659  		return 0;
1660  	case RESTORE_POINTERS:
1661  		esp_restore_pointers(esp, esp->active_cmd);
1662  		return 0;
1663  	case SAVE_POINTERS:
1664  		esp_save_pointers(esp, esp->active_cmd);
1665  		return 0;
1666  
1667  	case COMMAND_COMPLETE:
1668  	case DISCONNECT: {
1669  		struct esp_cmd_entry *ent = esp->active_cmd;
1670  
1671  		ent->message = msg0;
1672  		esp_event(esp, ESP_EVENT_FREE_BUS);
1673  		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1674  		return 0;
1675  	}
1676  	case MESSAGE_REJECT:
1677  		esp_msgin_reject(esp);
1678  		return 0;
1679  
1680  	default:
1681  	do_reject:
1682  		esp->msg_out[0] = MESSAGE_REJECT;
1683  		esp->msg_out_len = 1;
1684  		scsi_esp_cmd(esp, ESP_CMD_SATN);
1685  		return 0;
1686  	}
1687  }
1688  
esp_process_event(struct esp * esp)1689  static int esp_process_event(struct esp *esp)
1690  {
1691  	int write, i;
1692  
1693  again:
1694  	write = 0;
1695  	esp_log_event("process event %d phase %x\n",
1696  		      esp->event, esp->sreg & ESP_STAT_PMASK);
1697  	switch (esp->event) {
1698  	case ESP_EVENT_CHECK_PHASE:
1699  		switch (esp->sreg & ESP_STAT_PMASK) {
1700  		case ESP_DOP:
1701  			esp_event(esp, ESP_EVENT_DATA_OUT);
1702  			break;
1703  		case ESP_DIP:
1704  			esp_event(esp, ESP_EVENT_DATA_IN);
1705  			break;
1706  		case ESP_STATP:
1707  			esp_flush_fifo(esp);
1708  			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1709  			esp_event(esp, ESP_EVENT_STATUS);
1710  			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1711  			return 1;
1712  
1713  		case ESP_MOP:
1714  			esp_event(esp, ESP_EVENT_MSGOUT);
1715  			break;
1716  
1717  		case ESP_MIP:
1718  			esp_event(esp, ESP_EVENT_MSGIN);
1719  			break;
1720  
1721  		case ESP_CMDP:
1722  			esp_event(esp, ESP_EVENT_CMD_START);
1723  			break;
1724  
1725  		default:
1726  			shost_printk(KERN_INFO, esp->host,
1727  				     "Unexpected phase, sreg=%02x\n",
1728  				     esp->sreg);
1729  			esp_schedule_reset(esp);
1730  			return 0;
1731  		}
1732  		goto again;
1733  
1734  	case ESP_EVENT_DATA_IN:
1735  		write = 1;
1736  		fallthrough;
1737  
1738  	case ESP_EVENT_DATA_OUT: {
1739  		struct esp_cmd_entry *ent = esp->active_cmd;
1740  		struct scsi_cmnd *cmd = ent->cmd;
1741  		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1742  		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1743  
1744  		if (esp->rev == ESP100)
1745  			scsi_esp_cmd(esp, ESP_CMD_NULL);
1746  
1747  		if (write)
1748  			ent->flags |= ESP_CMD_FLAG_WRITE;
1749  		else
1750  			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1751  
1752  		if (esp->ops->dma_length_limit)
1753  			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1754  							     dma_len);
1755  		else
1756  			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1757  
1758  		esp->data_dma_len = dma_len;
1759  
1760  		if (!dma_len) {
1761  			shost_printk(KERN_ERR, esp->host,
1762  				     "DMA length is zero!\n");
1763  			shost_printk(KERN_ERR, esp->host,
1764  				     "cur adr[%08llx] len[%08x]\n",
1765  				     (unsigned long long)esp_cur_dma_addr(ent, cmd),
1766  				     esp_cur_dma_len(ent, cmd));
1767  			esp_schedule_reset(esp);
1768  			return 0;
1769  		}
1770  
1771  		esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1772  				  (unsigned long long)dma_addr, dma_len, write);
1773  
1774  		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1775  				       write, ESP_CMD_DMA | ESP_CMD_TI);
1776  		esp_event(esp, ESP_EVENT_DATA_DONE);
1777  		break;
1778  	}
1779  	case ESP_EVENT_DATA_DONE: {
1780  		struct esp_cmd_entry *ent = esp->active_cmd;
1781  		struct scsi_cmnd *cmd = ent->cmd;
1782  		int bytes_sent;
1783  
1784  		if (esp->ops->dma_error(esp)) {
1785  			shost_printk(KERN_INFO, esp->host,
1786  				     "data done, DMA error, resetting\n");
1787  			esp_schedule_reset(esp);
1788  			return 0;
1789  		}
1790  
1791  		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1792  			/* XXX parity errors, etc. XXX */
1793  
1794  			esp->ops->dma_drain(esp);
1795  		}
1796  		esp->ops->dma_invalidate(esp);
1797  
1798  		if (esp->ireg != ESP_INTR_BSERV) {
1799  			/* We should always see exactly a bus-service
1800  			 * interrupt at the end of a successful transfer.
1801  			 */
1802  			shost_printk(KERN_INFO, esp->host,
1803  				     "data done, not BSERV, resetting\n");
1804  			esp_schedule_reset(esp);
1805  			return 0;
1806  		}
1807  
1808  		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1809  
1810  		esp_log_datadone("data done flgs[%x] sent[%d]\n",
1811  				 ent->flags, bytes_sent);
1812  
1813  		if (bytes_sent < 0) {
1814  			/* XXX force sync mode for this target XXX */
1815  			esp_schedule_reset(esp);
1816  			return 0;
1817  		}
1818  
1819  		esp_advance_dma(esp, ent, cmd, bytes_sent);
1820  		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1821  		goto again;
1822  	}
1823  
1824  	case ESP_EVENT_STATUS: {
1825  		struct esp_cmd_entry *ent = esp->active_cmd;
1826  
1827  		if (esp->ireg & ESP_INTR_FDONE) {
1828  			ent->status = esp_read8(ESP_FDATA);
1829  			ent->message = esp_read8(ESP_FDATA);
1830  			scsi_esp_cmd(esp, ESP_CMD_MOK);
1831  		} else if (esp->ireg == ESP_INTR_BSERV) {
1832  			ent->status = esp_read8(ESP_FDATA);
1833  			ent->message = 0xff;
1834  			esp_event(esp, ESP_EVENT_MSGIN);
1835  			return 0;
1836  		}
1837  
1838  		if (ent->message != COMMAND_COMPLETE) {
1839  			shost_printk(KERN_INFO, esp->host,
1840  				     "Unexpected message %x in status\n",
1841  				     ent->message);
1842  			esp_schedule_reset(esp);
1843  			return 0;
1844  		}
1845  
1846  		esp_event(esp, ESP_EVENT_FREE_BUS);
1847  		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1848  		break;
1849  	}
1850  	case ESP_EVENT_FREE_BUS: {
1851  		struct esp_cmd_entry *ent = esp->active_cmd;
1852  		struct scsi_cmnd *cmd = ent->cmd;
1853  
1854  		if (ent->message == COMMAND_COMPLETE ||
1855  		    ent->message == DISCONNECT)
1856  			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1857  
1858  		if (ent->message == COMMAND_COMPLETE) {
1859  			esp_log_cmddone("Command done status[%x] message[%x]\n",
1860  					ent->status, ent->message);
1861  			if (ent->status == SAM_STAT_TASK_SET_FULL)
1862  				esp_event_queue_full(esp, ent);
1863  
1864  			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1865  			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1866  				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1867  				esp_autosense(esp, ent);
1868  			} else {
1869  				esp_cmd_is_done(esp, ent, cmd, DID_OK);
1870  			}
1871  		} else if (ent->message == DISCONNECT) {
1872  			esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1873  					   cmd->device->id,
1874  					   ent->tag[0], ent->tag[1]);
1875  
1876  			esp->active_cmd = NULL;
1877  			esp_maybe_execute_command(esp);
1878  		} else {
1879  			shost_printk(KERN_INFO, esp->host,
1880  				     "Unexpected message %x in freebus\n",
1881  				     ent->message);
1882  			esp_schedule_reset(esp);
1883  			return 0;
1884  		}
1885  		if (esp->active_cmd)
1886  			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1887  		break;
1888  	}
1889  	case ESP_EVENT_MSGOUT: {
1890  		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1891  
1892  		if (esp_debug & ESP_DEBUG_MSGOUT) {
1893  			int i;
1894  			printk("ESP: Sending message [ ");
1895  			for (i = 0; i < esp->msg_out_len; i++)
1896  				printk("%02x ", esp->msg_out[i]);
1897  			printk("]\n");
1898  		}
1899  
1900  		if (esp->rev == FASHME) {
1901  			int i;
1902  
1903  			/* Always use the fifo.  */
1904  			for (i = 0; i < esp->msg_out_len; i++) {
1905  				esp_write8(esp->msg_out[i], ESP_FDATA);
1906  				esp_write8(0, ESP_FDATA);
1907  			}
1908  			scsi_esp_cmd(esp, ESP_CMD_TI);
1909  		} else {
1910  			if (esp->msg_out_len == 1) {
1911  				esp_write8(esp->msg_out[0], ESP_FDATA);
1912  				scsi_esp_cmd(esp, ESP_CMD_TI);
1913  			} else if (esp->flags & ESP_FLAG_USE_FIFO) {
1914  				for (i = 0; i < esp->msg_out_len; i++)
1915  					esp_write8(esp->msg_out[i], ESP_FDATA);
1916  				scsi_esp_cmd(esp, ESP_CMD_TI);
1917  			} else {
1918  				/* Use DMA. */
1919  				memcpy(esp->command_block,
1920  				       esp->msg_out,
1921  				       esp->msg_out_len);
1922  
1923  				esp->ops->send_dma_cmd(esp,
1924  						       esp->command_block_dma,
1925  						       esp->msg_out_len,
1926  						       esp->msg_out_len,
1927  						       0,
1928  						       ESP_CMD_DMA|ESP_CMD_TI);
1929  			}
1930  		}
1931  		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1932  		break;
1933  	}
1934  	case ESP_EVENT_MSGOUT_DONE:
1935  		if (esp->rev == FASHME) {
1936  			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1937  		} else {
1938  			if (esp->msg_out_len > 1)
1939  				esp->ops->dma_invalidate(esp);
1940  
1941  			/* XXX if the chip went into disconnected mode,
1942  			 * we can't run the phase state machine anyway.
1943  			 */
1944  			if (!(esp->ireg & ESP_INTR_DC))
1945  				scsi_esp_cmd(esp, ESP_CMD_NULL);
1946  		}
1947  
1948  		esp->msg_out_len = 0;
1949  
1950  		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1951  		goto again;
1952  	case ESP_EVENT_MSGIN:
1953  		if (esp->ireg & ESP_INTR_BSERV) {
1954  			if (esp->rev == FASHME) {
1955  				if (!(esp_read8(ESP_STATUS2) &
1956  				      ESP_STAT2_FEMPTY))
1957  					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1958  			} else {
1959  				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1960  				if (esp->rev == ESP100)
1961  					scsi_esp_cmd(esp, ESP_CMD_NULL);
1962  			}
1963  			scsi_esp_cmd(esp, ESP_CMD_TI);
1964  			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1965  			return 1;
1966  		}
1967  		if (esp->ireg & ESP_INTR_FDONE) {
1968  			u8 val;
1969  
1970  			if (esp->rev == FASHME)
1971  				val = esp->fifo[0];
1972  			else
1973  				val = esp_read8(ESP_FDATA);
1974  			esp->msg_in[esp->msg_in_len++] = val;
1975  
1976  			esp_log_msgin("Got msgin byte %x\n", val);
1977  
1978  			if (!esp_msgin_process(esp))
1979  				esp->msg_in_len = 0;
1980  
1981  			if (esp->rev == FASHME)
1982  				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1983  
1984  			scsi_esp_cmd(esp, ESP_CMD_MOK);
1985  
1986  			/* Check whether a bus reset is to be done next */
1987  			if (esp->event == ESP_EVENT_RESET)
1988  				return 0;
1989  
1990  			if (esp->event != ESP_EVENT_FREE_BUS)
1991  				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1992  		} else {
1993  			shost_printk(KERN_INFO, esp->host,
1994  				     "MSGIN neither BSERV not FDON, resetting");
1995  			esp_schedule_reset(esp);
1996  			return 0;
1997  		}
1998  		break;
1999  	case ESP_EVENT_CMD_START:
2000  		memcpy(esp->command_block, esp->cmd_bytes_ptr,
2001  		       esp->cmd_bytes_left);
2002  		esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2003  		esp_event(esp, ESP_EVENT_CMD_DONE);
2004  		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2005  		break;
2006  	case ESP_EVENT_CMD_DONE:
2007  		esp->ops->dma_invalidate(esp);
2008  		if (esp->ireg & ESP_INTR_BSERV) {
2009  			esp_event(esp, ESP_EVENT_CHECK_PHASE);
2010  			goto again;
2011  		}
2012  		esp_schedule_reset(esp);
2013  		return 0;
2014  
2015  	case ESP_EVENT_RESET:
2016  		scsi_esp_cmd(esp, ESP_CMD_RS);
2017  		break;
2018  
2019  	default:
2020  		shost_printk(KERN_INFO, esp->host,
2021  			     "Unexpected event %x, resetting\n", esp->event);
2022  		esp_schedule_reset(esp);
2023  		return 0;
2024  	}
2025  	return 1;
2026  }
2027  
esp_reset_cleanup_one(struct esp * esp,struct esp_cmd_entry * ent)2028  static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2029  {
2030  	struct scsi_cmnd *cmd = ent->cmd;
2031  
2032  	esp_unmap_dma(esp, cmd);
2033  	esp_free_lun_tag(ent, cmd->device->hostdata);
2034  	cmd->result = DID_RESET << 16;
2035  
2036  	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2037  		esp_unmap_sense(esp, ent);
2038  
2039  	scsi_done(cmd);
2040  	list_del(&ent->list);
2041  	esp_put_ent(esp, ent);
2042  }
2043  
esp_clear_hold(struct scsi_device * dev,void * data)2044  static void esp_clear_hold(struct scsi_device *dev, void *data)
2045  {
2046  	struct esp_lun_data *lp = dev->hostdata;
2047  
2048  	BUG_ON(lp->num_tagged);
2049  	lp->hold = 0;
2050  }
2051  
esp_reset_cleanup(struct esp * esp)2052  static void esp_reset_cleanup(struct esp *esp)
2053  {
2054  	struct esp_cmd_entry *ent, *tmp;
2055  	int i;
2056  
2057  	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2058  		struct scsi_cmnd *cmd = ent->cmd;
2059  
2060  		list_del(&ent->list);
2061  		cmd->result = DID_RESET << 16;
2062  		scsi_done(cmd);
2063  		esp_put_ent(esp, ent);
2064  	}
2065  
2066  	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2067  		if (ent == esp->active_cmd)
2068  			esp->active_cmd = NULL;
2069  		esp_reset_cleanup_one(esp, ent);
2070  	}
2071  
2072  	BUG_ON(esp->active_cmd != NULL);
2073  
2074  	/* Force renegotiation of sync/wide transfers.  */
2075  	for (i = 0; i < ESP_MAX_TARGET; i++) {
2076  		struct esp_target_data *tp = &esp->target[i];
2077  
2078  		tp->esp_period = 0;
2079  		tp->esp_offset = 0;
2080  		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2081  				     ESP_CONFIG3_FSCSI |
2082  				     ESP_CONFIG3_FAST);
2083  		tp->flags &= ~ESP_TGT_WIDE;
2084  		tp->flags |= ESP_TGT_CHECK_NEGO;
2085  
2086  		if (tp->starget)
2087  			__starget_for_each_device(tp->starget, NULL,
2088  						  esp_clear_hold);
2089  	}
2090  	esp->flags &= ~ESP_FLAG_RESETTING;
2091  }
2092  
2093  /* Runs under host->lock */
__esp_interrupt(struct esp * esp)2094  static void __esp_interrupt(struct esp *esp)
2095  {
2096  	int finish_reset, intr_done;
2097  	u8 phase;
2098  
2099         /*
2100  	* Once INTRPT is read STATUS and SSTEP are cleared.
2101  	*/
2102  	esp->sreg = esp_read8(ESP_STATUS);
2103  	esp->seqreg = esp_read8(ESP_SSTEP);
2104  	esp->ireg = esp_read8(ESP_INTRPT);
2105  
2106  	if (esp->flags & ESP_FLAG_RESETTING) {
2107  		finish_reset = 1;
2108  	} else {
2109  		if (esp_check_gross_error(esp))
2110  			return;
2111  
2112  		finish_reset = esp_check_spur_intr(esp);
2113  		if (finish_reset < 0)
2114  			return;
2115  	}
2116  
2117  	if (esp->ireg & ESP_INTR_SR)
2118  		finish_reset = 1;
2119  
2120  	if (finish_reset) {
2121  		esp_reset_cleanup(esp);
2122  		if (esp->eh_reset) {
2123  			complete(esp->eh_reset);
2124  			esp->eh_reset = NULL;
2125  		}
2126  		return;
2127  	}
2128  
2129  	phase = (esp->sreg & ESP_STAT_PMASK);
2130  	if (esp->rev == FASHME) {
2131  		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2132  		     esp->select_state == ESP_SELECT_NONE &&
2133  		     esp->event != ESP_EVENT_STATUS &&
2134  		     esp->event != ESP_EVENT_DATA_DONE) ||
2135  		    (esp->ireg & ESP_INTR_RSEL)) {
2136  			esp->sreg2 = esp_read8(ESP_STATUS2);
2137  			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2138  			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2139  				hme_read_fifo(esp);
2140  		}
2141  	}
2142  
2143  	esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2144  		     "sreg2[%02x] ireg[%02x]\n",
2145  		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2146  
2147  	intr_done = 0;
2148  
2149  	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2150  		shost_printk(KERN_INFO, esp->host,
2151  			     "unexpected IREG %02x\n", esp->ireg);
2152  		if (esp->ireg & ESP_INTR_IC)
2153  			esp_dump_cmd_log(esp);
2154  
2155  		esp_schedule_reset(esp);
2156  	} else {
2157  		if (esp->ireg & ESP_INTR_RSEL) {
2158  			if (esp->active_cmd)
2159  				(void) esp_finish_select(esp);
2160  			intr_done = esp_reconnect(esp);
2161  		} else {
2162  			/* Some combination of FDONE, BSERV, DC. */
2163  			if (esp->select_state != ESP_SELECT_NONE)
2164  				intr_done = esp_finish_select(esp);
2165  		}
2166  	}
2167  	while (!intr_done)
2168  		intr_done = esp_process_event(esp);
2169  }
2170  
scsi_esp_intr(int irq,void * dev_id)2171  irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2172  {
2173  	struct esp *esp = dev_id;
2174  	unsigned long flags;
2175  	irqreturn_t ret;
2176  
2177  	spin_lock_irqsave(esp->host->host_lock, flags);
2178  	ret = IRQ_NONE;
2179  	if (esp->ops->irq_pending(esp)) {
2180  		ret = IRQ_HANDLED;
2181  		for (;;) {
2182  			int i;
2183  
2184  			__esp_interrupt(esp);
2185  			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2186  				break;
2187  			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2188  
2189  			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2190  				if (esp->ops->irq_pending(esp))
2191  					break;
2192  			}
2193  			if (i == ESP_QUICKIRQ_LIMIT)
2194  				break;
2195  		}
2196  	}
2197  	spin_unlock_irqrestore(esp->host->host_lock, flags);
2198  
2199  	return ret;
2200  }
2201  EXPORT_SYMBOL(scsi_esp_intr);
2202  
esp_get_revision(struct esp * esp)2203  static void esp_get_revision(struct esp *esp)
2204  {
2205  	u8 val;
2206  
2207  	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2208  	if (esp->config2 == 0) {
2209  		esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2210  		esp_write8(esp->config2, ESP_CFG2);
2211  
2212  		val = esp_read8(ESP_CFG2);
2213  		val &= ~ESP_CONFIG2_MAGIC;
2214  
2215  		esp->config2 = 0;
2216  		if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2217  			/*
2218  			 * If what we write to cfg2 does not come back,
2219  			 * cfg2 is not implemented.
2220  			 * Therefore this must be a plain esp100.
2221  			 */
2222  			esp->rev = ESP100;
2223  			return;
2224  		}
2225  	}
2226  
2227  	esp_set_all_config3(esp, 5);
2228  	esp->prev_cfg3 = 5;
2229  	esp_write8(esp->config2, ESP_CFG2);
2230  	esp_write8(0, ESP_CFG3);
2231  	esp_write8(esp->prev_cfg3, ESP_CFG3);
2232  
2233  	val = esp_read8(ESP_CFG3);
2234  	if (val != 5) {
2235  		/* The cfg2 register is implemented, however
2236  		 * cfg3 is not, must be esp100a.
2237  		 */
2238  		esp->rev = ESP100A;
2239  	} else {
2240  		esp_set_all_config3(esp, 0);
2241  		esp->prev_cfg3 = 0;
2242  		esp_write8(esp->prev_cfg3, ESP_CFG3);
2243  
2244  		/* All of cfg{1,2,3} implemented, must be one of
2245  		 * the fas variants, figure out which one.
2246  		 */
2247  		if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2248  			esp->rev = FAST;
2249  			esp->sync_defp = SYNC_DEFP_FAST;
2250  		} else {
2251  			esp->rev = ESP236;
2252  		}
2253  	}
2254  }
2255  
esp_init_swstate(struct esp * esp)2256  static void esp_init_swstate(struct esp *esp)
2257  {
2258  	int i;
2259  
2260  	INIT_LIST_HEAD(&esp->queued_cmds);
2261  	INIT_LIST_HEAD(&esp->active_cmds);
2262  	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2263  
2264  	/* Start with a clear state, domain validation (via ->sdev_configure,
2265  	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2266  	 * commands.
2267  	 */
2268  	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2269  		esp->target[i].flags = 0;
2270  		esp->target[i].nego_goal_period = 0;
2271  		esp->target[i].nego_goal_offset = 0;
2272  		esp->target[i].nego_goal_width = 0;
2273  		esp->target[i].nego_goal_tags = 0;
2274  	}
2275  }
2276  
2277  /* This places the ESP into a known state at boot time. */
esp_bootup_reset(struct esp * esp)2278  static void esp_bootup_reset(struct esp *esp)
2279  {
2280  	u8 val;
2281  
2282  	/* Reset the DMA */
2283  	esp->ops->reset_dma(esp);
2284  
2285  	/* Reset the ESP */
2286  	esp_reset_esp(esp);
2287  
2288  	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2289  	val = esp_read8(ESP_CFG1);
2290  	val |= ESP_CONFIG1_SRRDISAB;
2291  	esp_write8(val, ESP_CFG1);
2292  
2293  	scsi_esp_cmd(esp, ESP_CMD_RS);
2294  	udelay(400);
2295  
2296  	esp_write8(esp->config1, ESP_CFG1);
2297  
2298  	/* Eat any bitrot in the chip and we are done... */
2299  	esp_read8(ESP_INTRPT);
2300  }
2301  
esp_set_clock_params(struct esp * esp)2302  static void esp_set_clock_params(struct esp *esp)
2303  {
2304  	int fhz;
2305  	u8 ccf;
2306  
2307  	/* This is getting messy but it has to be done correctly or else
2308  	 * you get weird behavior all over the place.  We are trying to
2309  	 * basically figure out three pieces of information.
2310  	 *
2311  	 * a) Clock Conversion Factor
2312  	 *
2313  	 *    This is a representation of the input crystal clock frequency
2314  	 *    going into the ESP on this machine.  Any operation whose timing
2315  	 *    is longer than 400ns depends on this value being correct.  For
2316  	 *    example, you'll get blips for arbitration/selection during high
2317  	 *    load or with multiple targets if this is not set correctly.
2318  	 *
2319  	 * b) Selection Time-Out
2320  	 *
2321  	 *    The ESP isn't very bright and will arbitrate for the bus and try
2322  	 *    to select a target forever if you let it.  This value tells the
2323  	 *    ESP when it has taken too long to negotiate and that it should
2324  	 *    interrupt the CPU so we can see what happened.  The value is
2325  	 *    computed as follows (from NCR/Symbios chip docs).
2326  	 *
2327  	 *          (Time Out Period) *  (Input Clock)
2328  	 *    STO = ----------------------------------
2329  	 *          (8192) * (Clock Conversion Factor)
2330  	 *
2331  	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2332  	 *
2333  	 * c) Imperical constants for synchronous offset and transfer period
2334           *    register values
2335  	 *
2336  	 *    This entails the smallest and largest sync period we could ever
2337  	 *    handle on this ESP.
2338  	 */
2339  	fhz = esp->cfreq;
2340  
2341  	ccf = ((fhz / 1000000) + 4) / 5;
2342  	if (ccf == 1)
2343  		ccf = 2;
2344  
2345  	/* If we can't find anything reasonable, just assume 20MHZ.
2346  	 * This is the clock frequency of the older sun4c's where I've
2347  	 * been unable to find the clock-frequency PROM property.  All
2348  	 * other machines provide useful values it seems.
2349  	 */
2350  	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2351  		fhz = 20000000;
2352  		ccf = 4;
2353  	}
2354  
2355  	esp->cfact = (ccf == 8 ? 0 : ccf);
2356  	esp->cfreq = fhz;
2357  	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2358  	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2359  	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2360  	esp->sync_defp = SYNC_DEFP_SLOW;
2361  }
2362  
2363  static const char *esp_chip_names[] = {
2364  	"ESP100",
2365  	"ESP100A",
2366  	"ESP236",
2367  	"FAS236",
2368  	"AM53C974",
2369  	"53CF9x-2",
2370  	"FAS100A",
2371  	"FAST",
2372  	"FASHME",
2373  };
2374  
2375  static struct scsi_transport_template *esp_transport_template;
2376  
scsi_esp_register(struct esp * esp)2377  int scsi_esp_register(struct esp *esp)
2378  {
2379  	static int instance;
2380  	int err;
2381  
2382  	if (!esp->num_tags)
2383  		esp->num_tags = ESP_DEFAULT_TAGS;
2384  	esp->host->transportt = esp_transport_template;
2385  	esp->host->max_lun = ESP_MAX_LUN;
2386  	esp->host->cmd_per_lun = 2;
2387  	esp->host->unique_id = instance;
2388  
2389  	esp_set_clock_params(esp);
2390  
2391  	esp_get_revision(esp);
2392  
2393  	esp_init_swstate(esp);
2394  
2395  	esp_bootup_reset(esp);
2396  
2397  	dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2398  		   esp->host->unique_id, esp->regs, esp->dma_regs,
2399  		   esp->host->irq);
2400  	dev_printk(KERN_INFO, esp->dev,
2401  		   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2402  		   esp->host->unique_id, esp_chip_names[esp->rev],
2403  		   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2404  
2405  	/* Let the SCSI bus reset settle. */
2406  	ssleep(esp_bus_reset_settle);
2407  
2408  	err = scsi_add_host(esp->host, esp->dev);
2409  	if (err)
2410  		return err;
2411  
2412  	instance++;
2413  
2414  	scsi_scan_host(esp->host);
2415  
2416  	return 0;
2417  }
2418  EXPORT_SYMBOL(scsi_esp_register);
2419  
scsi_esp_unregister(struct esp * esp)2420  void scsi_esp_unregister(struct esp *esp)
2421  {
2422  	scsi_remove_host(esp->host);
2423  }
2424  EXPORT_SYMBOL(scsi_esp_unregister);
2425  
esp_target_alloc(struct scsi_target * starget)2426  static int esp_target_alloc(struct scsi_target *starget)
2427  {
2428  	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2429  	struct esp_target_data *tp = &esp->target[starget->id];
2430  
2431  	tp->starget = starget;
2432  
2433  	return 0;
2434  }
2435  
esp_target_destroy(struct scsi_target * starget)2436  static void esp_target_destroy(struct scsi_target *starget)
2437  {
2438  	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2439  	struct esp_target_data *tp = &esp->target[starget->id];
2440  
2441  	tp->starget = NULL;
2442  }
2443  
esp_sdev_init(struct scsi_device * dev)2444  static int esp_sdev_init(struct scsi_device *dev)
2445  {
2446  	struct esp *esp = shost_priv(dev->host);
2447  	struct esp_target_data *tp = &esp->target[dev->id];
2448  	struct esp_lun_data *lp;
2449  
2450  	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2451  	if (!lp)
2452  		return -ENOMEM;
2453  	dev->hostdata = lp;
2454  
2455  	spi_min_period(tp->starget) = esp->min_period;
2456  	spi_max_offset(tp->starget) = 15;
2457  
2458  	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2459  		spi_max_width(tp->starget) = 1;
2460  	else
2461  		spi_max_width(tp->starget) = 0;
2462  
2463  	return 0;
2464  }
2465  
esp_sdev_configure(struct scsi_device * dev,struct queue_limits * lim)2466  static int esp_sdev_configure(struct scsi_device *dev, struct queue_limits *lim)
2467  {
2468  	struct esp *esp = shost_priv(dev->host);
2469  	struct esp_target_data *tp = &esp->target[dev->id];
2470  
2471  	if (dev->tagged_supported)
2472  		scsi_change_queue_depth(dev, esp->num_tags);
2473  
2474  	tp->flags |= ESP_TGT_DISCONNECT;
2475  
2476  	if (!spi_initial_dv(dev->sdev_target))
2477  		spi_dv_device(dev);
2478  
2479  	return 0;
2480  }
2481  
esp_sdev_destroy(struct scsi_device * dev)2482  static void esp_sdev_destroy(struct scsi_device *dev)
2483  {
2484  	struct esp_lun_data *lp = dev->hostdata;
2485  
2486  	kfree(lp);
2487  	dev->hostdata = NULL;
2488  }
2489  
esp_eh_abort_handler(struct scsi_cmnd * cmd)2490  static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2491  {
2492  	struct esp *esp = shost_priv(cmd->device->host);
2493  	struct esp_cmd_entry *ent, *tmp;
2494  	struct completion eh_done;
2495  	unsigned long flags;
2496  
2497  	/* XXX This helps a lot with debugging but might be a bit
2498  	 * XXX much for the final driver.
2499  	 */
2500  	spin_lock_irqsave(esp->host->host_lock, flags);
2501  	shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2502  		     cmd, cmd->cmnd[0]);
2503  	ent = esp->active_cmd;
2504  	if (ent)
2505  		shost_printk(KERN_ERR, esp->host,
2506  			     "Current command [%p:%02x]\n",
2507  			     ent->cmd, ent->cmd->cmnd[0]);
2508  	list_for_each_entry(ent, &esp->queued_cmds, list) {
2509  		shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2510  			     ent->cmd, ent->cmd->cmnd[0]);
2511  	}
2512  	list_for_each_entry(ent, &esp->active_cmds, list) {
2513  		shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2514  			     ent->cmd, ent->cmd->cmnd[0]);
2515  	}
2516  	esp_dump_cmd_log(esp);
2517  	spin_unlock_irqrestore(esp->host->host_lock, flags);
2518  
2519  	spin_lock_irqsave(esp->host->host_lock, flags);
2520  
2521  	ent = NULL;
2522  	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2523  		if (tmp->cmd == cmd) {
2524  			ent = tmp;
2525  			break;
2526  		}
2527  	}
2528  
2529  	if (ent) {
2530  		/* Easiest case, we didn't even issue the command
2531  		 * yet so it is trivial to abort.
2532  		 */
2533  		list_del(&ent->list);
2534  
2535  		cmd->result = DID_ABORT << 16;
2536  		scsi_done(cmd);
2537  
2538  		esp_put_ent(esp, ent);
2539  
2540  		goto out_success;
2541  	}
2542  
2543  	init_completion(&eh_done);
2544  
2545  	ent = esp->active_cmd;
2546  	if (ent && ent->cmd == cmd) {
2547  		/* Command is the currently active command on
2548  		 * the bus.  If we already have an output message
2549  		 * pending, no dice.
2550  		 */
2551  		if (esp->msg_out_len)
2552  			goto out_failure;
2553  
2554  		/* Send out an abort, encouraging the target to
2555  		 * go to MSGOUT phase by asserting ATN.
2556  		 */
2557  		esp->msg_out[0] = ABORT_TASK_SET;
2558  		esp->msg_out_len = 1;
2559  		ent->eh_done = &eh_done;
2560  
2561  		scsi_esp_cmd(esp, ESP_CMD_SATN);
2562  	} else {
2563  		/* The command is disconnected.  This is not easy to
2564  		 * abort.  For now we fail and let the scsi error
2565  		 * handling layer go try a scsi bus reset or host
2566  		 * reset.
2567  		 *
2568  		 * What we could do is put together a scsi command
2569  		 * solely for the purpose of sending an abort message
2570  		 * to the target.  Coming up with all the code to
2571  		 * cook up scsi commands, special case them everywhere,
2572  		 * etc. is for questionable gain and it would be better
2573  		 * if the generic scsi error handling layer could do at
2574  		 * least some of that for us.
2575  		 *
2576  		 * Anyways this is an area for potential future improvement
2577  		 * in this driver.
2578  		 */
2579  		goto out_failure;
2580  	}
2581  
2582  	spin_unlock_irqrestore(esp->host->host_lock, flags);
2583  
2584  	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2585  		spin_lock_irqsave(esp->host->host_lock, flags);
2586  		ent->eh_done = NULL;
2587  		spin_unlock_irqrestore(esp->host->host_lock, flags);
2588  
2589  		return FAILED;
2590  	}
2591  
2592  	return SUCCESS;
2593  
2594  out_success:
2595  	spin_unlock_irqrestore(esp->host->host_lock, flags);
2596  	return SUCCESS;
2597  
2598  out_failure:
2599  	/* XXX This might be a good location to set ESP_TGT_BROKEN
2600  	 * XXX since we know which target/lun in particular is
2601  	 * XXX causing trouble.
2602  	 */
2603  	spin_unlock_irqrestore(esp->host->host_lock, flags);
2604  	return FAILED;
2605  }
2606  
esp_eh_bus_reset_handler(struct scsi_cmnd * cmd)2607  static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2608  {
2609  	struct esp *esp = shost_priv(cmd->device->host);
2610  	struct completion eh_reset;
2611  	unsigned long flags;
2612  
2613  	init_completion(&eh_reset);
2614  
2615  	spin_lock_irqsave(esp->host->host_lock, flags);
2616  
2617  	esp->eh_reset = &eh_reset;
2618  
2619  	/* XXX This is too simple... We should add lots of
2620  	 * XXX checks here so that if we find that the chip is
2621  	 * XXX very wedged we return failure immediately so
2622  	 * XXX that we can perform a full chip reset.
2623  	 */
2624  	esp->flags |= ESP_FLAG_RESETTING;
2625  	scsi_esp_cmd(esp, ESP_CMD_RS);
2626  
2627  	spin_unlock_irqrestore(esp->host->host_lock, flags);
2628  
2629  	ssleep(esp_bus_reset_settle);
2630  
2631  	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2632  		spin_lock_irqsave(esp->host->host_lock, flags);
2633  		esp->eh_reset = NULL;
2634  		spin_unlock_irqrestore(esp->host->host_lock, flags);
2635  
2636  		return FAILED;
2637  	}
2638  
2639  	return SUCCESS;
2640  }
2641  
2642  /* All bets are off, reset the entire device.  */
esp_eh_host_reset_handler(struct scsi_cmnd * cmd)2643  static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2644  {
2645  	struct esp *esp = shost_priv(cmd->device->host);
2646  	unsigned long flags;
2647  
2648  	spin_lock_irqsave(esp->host->host_lock, flags);
2649  	esp_bootup_reset(esp);
2650  	esp_reset_cleanup(esp);
2651  	spin_unlock_irqrestore(esp->host->host_lock, flags);
2652  
2653  	ssleep(esp_bus_reset_settle);
2654  
2655  	return SUCCESS;
2656  }
2657  
esp_info(struct Scsi_Host * host)2658  static const char *esp_info(struct Scsi_Host *host)
2659  {
2660  	return "esp";
2661  }
2662  
2663  const struct scsi_host_template scsi_esp_template = {
2664  	.module			= THIS_MODULE,
2665  	.name			= "esp",
2666  	.info			= esp_info,
2667  	.queuecommand		= esp_queuecommand,
2668  	.target_alloc		= esp_target_alloc,
2669  	.target_destroy		= esp_target_destroy,
2670  	.sdev_init		= esp_sdev_init,
2671  	.sdev_configure		= esp_sdev_configure,
2672  	.sdev_destroy		= esp_sdev_destroy,
2673  	.eh_abort_handler	= esp_eh_abort_handler,
2674  	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2675  	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2676  	.can_queue		= 7,
2677  	.this_id		= 7,
2678  	.sg_tablesize		= SG_ALL,
2679  	.max_sectors		= 0xffff,
2680  	.skip_settle_delay	= 1,
2681  	.cmd_size		= sizeof(struct esp_cmd_priv),
2682  };
2683  EXPORT_SYMBOL(scsi_esp_template);
2684  
esp_get_signalling(struct Scsi_Host * host)2685  static void esp_get_signalling(struct Scsi_Host *host)
2686  {
2687  	struct esp *esp = shost_priv(host);
2688  	enum spi_signal_type type;
2689  
2690  	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2691  		type = SPI_SIGNAL_HVD;
2692  	else
2693  		type = SPI_SIGNAL_SE;
2694  
2695  	spi_signalling(host) = type;
2696  }
2697  
esp_set_offset(struct scsi_target * target,int offset)2698  static void esp_set_offset(struct scsi_target *target, int offset)
2699  {
2700  	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2701  	struct esp *esp = shost_priv(host);
2702  	struct esp_target_data *tp = &esp->target[target->id];
2703  
2704  	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2705  		tp->nego_goal_offset = 0;
2706  	else
2707  		tp->nego_goal_offset = offset;
2708  	tp->flags |= ESP_TGT_CHECK_NEGO;
2709  }
2710  
esp_set_period(struct scsi_target * target,int period)2711  static void esp_set_period(struct scsi_target *target, int period)
2712  {
2713  	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2714  	struct esp *esp = shost_priv(host);
2715  	struct esp_target_data *tp = &esp->target[target->id];
2716  
2717  	tp->nego_goal_period = period;
2718  	tp->flags |= ESP_TGT_CHECK_NEGO;
2719  }
2720  
esp_set_width(struct scsi_target * target,int width)2721  static void esp_set_width(struct scsi_target *target, int width)
2722  {
2723  	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2724  	struct esp *esp = shost_priv(host);
2725  	struct esp_target_data *tp = &esp->target[target->id];
2726  
2727  	tp->nego_goal_width = (width ? 1 : 0);
2728  	tp->flags |= ESP_TGT_CHECK_NEGO;
2729  }
2730  
2731  static struct spi_function_template esp_transport_ops = {
2732  	.set_offset		= esp_set_offset,
2733  	.show_offset		= 1,
2734  	.set_period		= esp_set_period,
2735  	.show_period		= 1,
2736  	.set_width		= esp_set_width,
2737  	.show_width		= 1,
2738  	.get_signalling		= esp_get_signalling,
2739  };
2740  
esp_init(void)2741  static int __init esp_init(void)
2742  {
2743  	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2744  	if (!esp_transport_template)
2745  		return -ENODEV;
2746  
2747  	return 0;
2748  }
2749  
esp_exit(void)2750  static void __exit esp_exit(void)
2751  {
2752  	spi_release_transport(esp_transport_template);
2753  }
2754  
2755  MODULE_DESCRIPTION("ESP SCSI driver core");
2756  MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
2757  MODULE_LICENSE("GPL");
2758  MODULE_VERSION(DRV_VERSION);
2759  
2760  module_param(esp_bus_reset_settle, int, 0);
2761  MODULE_PARM_DESC(esp_bus_reset_settle,
2762  		 "ESP scsi bus reset delay in seconds");
2763  
2764  module_param(esp_debug, int, 0);
2765  MODULE_PARM_DESC(esp_debug,
2766  "ESP bitmapped debugging message enable value:\n"
2767  "	0x00000001	Log interrupt events\n"
2768  "	0x00000002	Log scsi commands\n"
2769  "	0x00000004	Log resets\n"
2770  "	0x00000008	Log message in events\n"
2771  "	0x00000010	Log message out events\n"
2772  "	0x00000020	Log command completion\n"
2773  "	0x00000040	Log disconnects\n"
2774  "	0x00000080	Log data start\n"
2775  "	0x00000100	Log data done\n"
2776  "	0x00000200	Log reconnects\n"
2777  "	0x00000400	Log auto-sense data\n"
2778  );
2779  
2780  module_init(esp_init);
2781  module_exit(esp_exit);
2782  
2783  #ifdef CONFIG_SCSI_ESP_PIO
esp_wait_for_fifo(struct esp * esp)2784  static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2785  {
2786  	int i = 500000;
2787  
2788  	do {
2789  		unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2790  
2791  		if (fbytes)
2792  			return fbytes;
2793  
2794  		udelay(1);
2795  	} while (--i);
2796  
2797  	shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2798  		     esp_read8(ESP_STATUS));
2799  	return 0;
2800  }
2801  
esp_wait_for_intr(struct esp * esp)2802  static inline int esp_wait_for_intr(struct esp *esp)
2803  {
2804  	int i = 500000;
2805  
2806  	do {
2807  		esp->sreg = esp_read8(ESP_STATUS);
2808  		if (esp->sreg & ESP_STAT_INTR)
2809  			return 0;
2810  
2811  		udelay(1);
2812  	} while (--i);
2813  
2814  	shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2815  		     esp->sreg);
2816  	return 1;
2817  }
2818  
2819  #define ESP_FIFO_SIZE 16
2820  
esp_send_pio_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)2821  void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2822  		      u32 dma_count, int write, u8 cmd)
2823  {
2824  	u8 phase = esp->sreg & ESP_STAT_PMASK;
2825  
2826  	cmd &= ~ESP_CMD_DMA;
2827  	esp->send_cmd_error = 0;
2828  
2829  	if (write) {
2830  		u8 *dst = (u8 *)addr;
2831  		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2832  
2833  		scsi_esp_cmd(esp, cmd);
2834  
2835  		while (1) {
2836  			if (!esp_wait_for_fifo(esp))
2837  				break;
2838  
2839  			*dst++ = readb(esp->fifo_reg);
2840  			--esp_count;
2841  
2842  			if (!esp_count)
2843  				break;
2844  
2845  			if (esp_wait_for_intr(esp)) {
2846  				esp->send_cmd_error = 1;
2847  				break;
2848  			}
2849  
2850  			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2851  				break;
2852  
2853  			esp->ireg = esp_read8(ESP_INTRPT);
2854  			if (esp->ireg & mask) {
2855  				esp->send_cmd_error = 1;
2856  				break;
2857  			}
2858  
2859  			if (phase == ESP_MIP)
2860  				esp_write8(ESP_CMD_MOK, ESP_CMD);
2861  
2862  			esp_write8(ESP_CMD_TI, ESP_CMD);
2863  		}
2864  	} else {
2865  		unsigned int n = ESP_FIFO_SIZE;
2866  		u8 *src = (u8 *)addr;
2867  
2868  		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2869  
2870  		if (n > esp_count)
2871  			n = esp_count;
2872  		writesb(esp->fifo_reg, src, n);
2873  		src += n;
2874  		esp_count -= n;
2875  
2876  		scsi_esp_cmd(esp, cmd);
2877  
2878  		while (esp_count) {
2879  			if (esp_wait_for_intr(esp)) {
2880  				esp->send_cmd_error = 1;
2881  				break;
2882  			}
2883  
2884  			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2885  				break;
2886  
2887  			esp->ireg = esp_read8(ESP_INTRPT);
2888  			if (esp->ireg & ~ESP_INTR_BSERV) {
2889  				esp->send_cmd_error = 1;
2890  				break;
2891  			}
2892  
2893  			n = ESP_FIFO_SIZE -
2894  			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2895  
2896  			if (n > esp_count)
2897  				n = esp_count;
2898  			writesb(esp->fifo_reg, src, n);
2899  			src += n;
2900  			esp_count -= n;
2901  
2902  			esp_write8(ESP_CMD_TI, ESP_CMD);
2903  		}
2904  	}
2905  
2906  	esp->send_cmd_residual = esp_count;
2907  }
2908  EXPORT_SYMBOL(esp_send_pio_cmd);
2909  #endif
2910