1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
4 * of PCI-SCSI IO processors.
5 *
6 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
7 *
8 * This driver is derived from the Linux sym53c8xx driver.
9 * Copyright (C) 1998-2000 Gerard Roudier
10 *
11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
12 * a port of the FreeBSD ncr driver to Linux-1.2.13.
13 *
14 * The original ncr driver has been written for 386bsd and FreeBSD by
15 * Wolfgang Stanglmeier <wolf@cologne.de>
16 * Stefan Esser <se@mi.Uni-Koeln.de>
17 * Copyright (C) 1994 Wolfgang Stanglmeier
18 *
19 * Other major contributions:
20 *
21 * NVRAM detection and reading.
22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
23 *
24 *-----------------------------------------------------------------------------
25 */
26
27 #include <linux/gfp.h>
28
29 #ifndef SYM_HIPD_H
30 #define SYM_HIPD_H
31
32 /*
33 * Generic driver options.
34 *
35 * They may be defined in platform specific headers, if they
36 * are useful.
37 *
38 * SYM_OPT_HANDLE_DEVICE_QUEUEING
39 * When this option is set, the driver will use a queue per
40 * device and handle QUEUE FULL status requeuing internally.
41 *
42 * SYM_OPT_LIMIT_COMMAND_REORDERING
43 * When this option is set, the driver tries to limit tagged
44 * command reordering to some reasonable value.
45 * (set for Linux)
46 */
47 #if 0
48 #define SYM_OPT_HANDLE_DEVICE_QUEUEING
49 #define SYM_OPT_LIMIT_COMMAND_REORDERING
50 #endif
51
52 /*
53 * Active debugging tags and verbosity.
54 * Both DEBUG_FLAGS and sym_verbose can be redefined
55 * by the platform specific code to something else.
56 */
57 #define DEBUG_ALLOC (0x0001)
58 #define DEBUG_PHASE (0x0002)
59 #define DEBUG_POLL (0x0004)
60 #define DEBUG_QUEUE (0x0008)
61 #define DEBUG_RESULT (0x0010)
62 #define DEBUG_SCATTER (0x0020)
63 #define DEBUG_SCRIPT (0x0040)
64 #define DEBUG_TINY (0x0080)
65 #define DEBUG_TIMING (0x0100)
66 #define DEBUG_NEGO (0x0200)
67 #define DEBUG_TAGS (0x0400)
68 #define DEBUG_POINTER (0x0800)
69
70 #ifndef DEBUG_FLAGS
71 #define DEBUG_FLAGS (0x0000)
72 #endif
73
74 #ifndef sym_verbose
75 #define sym_verbose (np->verbose)
76 #endif
77
78 /*
79 * These ones should have been already defined.
80 */
81 #ifndef assert
82 #define assert(expression) { \
83 if (!(expression)) { \
84 (void)panic( \
85 "assertion \"%s\" failed: file \"%s\", line %d\n", \
86 #expression, \
87 __FILE__, __LINE__); \
88 } \
89 }
90 #endif
91
92 /*
93 * Number of tasks per device we want to handle.
94 */
95 #if SYM_CONF_MAX_TAG_ORDER > 8
96 #error "more than 256 tags per logical unit not allowed."
97 #endif
98 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
99
100 /*
101 * Donnot use more tasks that we can handle.
102 */
103 #ifndef SYM_CONF_MAX_TAG
104 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
105 #endif
106 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
107 #undef SYM_CONF_MAX_TAG
108 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
109 #endif
110
111 /*
112 * This one means 'NO TAG for this job'
113 */
114 #define NO_TAG (256)
115
116 /*
117 * Number of SCSI targets.
118 */
119 #if SYM_CONF_MAX_TARGET > 16
120 #error "more than 16 targets not allowed."
121 #endif
122
123 /*
124 * Number of logical units per target.
125 */
126 #if SYM_CONF_MAX_LUN > 64
127 #error "more than 64 logical units per target not allowed."
128 #endif
129
130 /*
131 * Asynchronous pre-scaler (ns). Shall be 40 for
132 * the SCSI timings to be compliant.
133 */
134 #define SYM_CONF_MIN_ASYNC (40)
135
136
137 /*
138 * MEMORY ALLOCATOR.
139 */
140
141 #define SYM_MEM_WARN 1 /* Warn on failed operations */
142
143 #define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */
144 #define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
145 #define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */
146 /*
147 * Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16.
148 * Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized.
149 * (1 PAGE at a time is just fine).
150 */
151 #define SYM_MEM_SHIFT 4
152 #define SYM_MEM_CLUSTER_SIZE (1UL << SYM_MEM_CLUSTER_SHIFT)
153 #define SYM_MEM_CLUSTER_MASK (SYM_MEM_CLUSTER_SIZE-1)
154
155 /*
156 * Number of entries in the START and DONE queues.
157 *
158 * We limit to 1 PAGE in order to succeed allocation of
159 * these queues. Each entry is 8 bytes long (2 DWORDS).
160 */
161 #ifdef SYM_CONF_MAX_START
162 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
163 #else
164 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
165 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
166 #endif
167
168 #if SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8
169 #undef SYM_CONF_MAX_QUEUE
170 #define SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8)
171 #undef SYM_CONF_MAX_START
172 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
173 #endif
174
175 /*
176 * For this one, we want a short name :-)
177 */
178 #define MAX_QUEUE SYM_CONF_MAX_QUEUE
179
180 /*
181 * Common definitions for both bus space based and legacy IO methods.
182 */
183
184 #define INB_OFF(np, o) ioread8(np->s.ioaddr + (o))
185 #define INW_OFF(np, o) ioread16(np->s.ioaddr + (o))
186 #define INL_OFF(np, o) ioread32(np->s.ioaddr + (o))
187
188 #define OUTB_OFF(np, o, val) iowrite8((val), np->s.ioaddr + (o))
189 #define OUTW_OFF(np, o, val) iowrite16((val), np->s.ioaddr + (o))
190 #define OUTL_OFF(np, o, val) iowrite32((val), np->s.ioaddr + (o))
191
192 #define INB(np, r) INB_OFF(np, offsetof(struct sym_reg, r))
193 #define INW(np, r) INW_OFF(np, offsetof(struct sym_reg, r))
194 #define INL(np, r) INL_OFF(np, offsetof(struct sym_reg, r))
195
196 #define OUTB(np, r, v) OUTB_OFF(np, offsetof(struct sym_reg, r), (v))
197 #define OUTW(np, r, v) OUTW_OFF(np, offsetof(struct sym_reg, r), (v))
198 #define OUTL(np, r, v) OUTL_OFF(np, offsetof(struct sym_reg, r), (v))
199
200 #define OUTONB(np, r, m) OUTB(np, r, INB(np, r) | (m))
201 #define OUTOFFB(np, r, m) OUTB(np, r, INB(np, r) & ~(m))
202 #define OUTONW(np, r, m) OUTW(np, r, INW(np, r) | (m))
203 #define OUTOFFW(np, r, m) OUTW(np, r, INW(np, r) & ~(m))
204 #define OUTONL(np, r, m) OUTL(np, r, INL(np, r) | (m))
205 #define OUTOFFL(np, r, m) OUTL(np, r, INL(np, r) & ~(m))
206
207 /*
208 * We normally want the chip to have a consistent view
209 * of driver internal data structures when we restart it.
210 * Thus these macros.
211 */
212 #define OUTL_DSP(np, v) \
213 do { \
214 MEMORY_WRITE_BARRIER(); \
215 OUTL(np, nc_dsp, (v)); \
216 } while (0)
217
218 #define OUTONB_STD() \
219 do { \
220 MEMORY_WRITE_BARRIER(); \
221 OUTONB(np, nc_dcntl, (STD|NOCOM)); \
222 } while (0)
223
224 /*
225 * Command control block states.
226 */
227 #define HS_IDLE (0)
228 #define HS_BUSY (1)
229 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/
230 #define HS_DISCONNECT (3) /* Disconnected by target */
231 #define HS_WAIT (4) /* waiting for resource */
232
233 #define HS_DONEMASK (0x80)
234 #define HS_COMPLETE (4|HS_DONEMASK)
235 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
236 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */
237 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */
238
239 /*
240 * Software Interrupt Codes
241 */
242 #define SIR_BAD_SCSI_STATUS (1)
243 #define SIR_SEL_ATN_NO_MSG_OUT (2)
244 #define SIR_MSG_RECEIVED (3)
245 #define SIR_MSG_WEIRD (4)
246 #define SIR_NEGO_FAILED (5)
247 #define SIR_NEGO_PROTO (6)
248 #define SIR_SCRIPT_STOPPED (7)
249 #define SIR_REJECT_TO_SEND (8)
250 #define SIR_SWIDE_OVERRUN (9)
251 #define SIR_SODL_UNDERRUN (10)
252 #define SIR_RESEL_NO_MSG_IN (11)
253 #define SIR_RESEL_NO_IDENTIFY (12)
254 #define SIR_RESEL_BAD_LUN (13)
255 #define SIR_TARGET_SELECTED (14)
256 #define SIR_RESEL_BAD_I_T_L (15)
257 #define SIR_RESEL_BAD_I_T_L_Q (16)
258 #define SIR_ABORT_SENT (17)
259 #define SIR_RESEL_ABORTED (18)
260 #define SIR_MSG_OUT_DONE (19)
261 #define SIR_COMPLETE_ERROR (20)
262 #define SIR_DATA_OVERRUN (21)
263 #define SIR_BAD_PHASE (22)
264 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
265 #define SIR_DMAP_DIRTY (23)
266 #define SIR_MAX (23)
267 #else
268 #define SIR_MAX (22)
269 #endif
270
271 /*
272 * Extended error bit codes.
273 * xerr_status field of struct sym_ccb.
274 */
275 #define XE_EXTRA_DATA (1) /* unexpected data phase */
276 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */
277 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */
278 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */
279 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */
280
281 /*
282 * Negotiation status.
283 * nego_status field of struct sym_ccb.
284 */
285 #define NS_SYNC (1)
286 #define NS_WIDE (2)
287 #define NS_PPR (3)
288
289 /*
290 * A CCB hashed table is used to retrieve CCB address
291 * from DSA value.
292 */
293 #define CCB_HASH_SHIFT 8
294 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
295 #define CCB_HASH_MASK (CCB_HASH_SIZE-1)
296 #if 1
297 #define CCB_HASH_CODE(dsa) \
298 (((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK)
299 #else
300 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
301 #endif
302
303 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
304 /*
305 * We may want to use segment registers for 64 bit DMA.
306 * 16 segments registers -> up to 64 GB addressable.
307 */
308 #define SYM_DMAP_SHIFT (4)
309 #define SYM_DMAP_SIZE (1u<<SYM_DMAP_SHIFT)
310 #define SYM_DMAP_MASK (SYM_DMAP_SIZE-1)
311 #endif
312
313 /*
314 * Device flags.
315 */
316 #define SYM_DISC_ENABLED (1)
317 #define SYM_TAGS_ENABLED (1<<1)
318 #define SYM_SCAN_BOOT_DISABLED (1<<2)
319 #define SYM_SCAN_LUNS_DISABLED (1<<3)
320
321 /*
322 * Host adapter miscellaneous flags.
323 */
324 #define SYM_AVOID_BUS_RESET (1)
325
326 /*
327 * Misc.
328 */
329 #define SYM_SNOOP_TIMEOUT (10000000)
330 #define BUS_8_BIT 0
331 #define BUS_16_BIT 1
332
333 /*
334 * Gather negotiable parameters value
335 */
336 struct sym_trans {
337 u8 period;
338 u8 offset;
339 unsigned int width:1;
340 unsigned int iu:1;
341 unsigned int dt:1;
342 unsigned int qas:1;
343 unsigned int check_nego:1;
344 unsigned int renego:2;
345 };
346
347 /*
348 * Global TCB HEADER.
349 *
350 * Due to lack of indirect addressing on earlier NCR chips,
351 * this substructure is copied from the TCB to a global
352 * address after selection.
353 * For SYMBIOS chips that support LOAD/STORE this copy is
354 * not needed and thus not performed.
355 */
356 struct sym_tcbh {
357 /*
358 * Scripts bus addresses of LUN table accessed from scripts.
359 * LUN #0 is a special case, since multi-lun devices are rare,
360 * and we we want to speed-up the general case and not waste
361 * resources.
362 */
363 u32 luntbl_sa; /* bus address of this table */
364 u32 lun0_sa; /* bus address of LCB #0 */
365 /*
366 * Actual SYNC/WIDE IO registers value for this target.
367 * 'sval', 'wval' and 'uval' are read from SCRIPTS and
368 * so have alignment constraints.
369 */
370 /*0*/ u_char uval; /* -> SCNTL4 register */
371 /*1*/ u_char sval; /* -> SXFER io register */
372 /*2*/ u_char filler1;
373 /*3*/ u_char wval; /* -> SCNTL3 io register */
374 };
375
376 /*
377 * Target Control Block
378 */
379 struct sym_tcb {
380 /*
381 * TCB header.
382 * Assumed at offset 0.
383 */
384 /*0*/ struct sym_tcbh head;
385
386 /*
387 * LUN table used by the SCRIPTS processor.
388 * An array of bus addresses is used on reselection.
389 */
390 u32 *luntbl; /* LCBs bus address table */
391 int nlcb; /* Number of valid LCBs (including LUN #0) */
392
393 /*
394 * LUN table used by the C code.
395 */
396 struct sym_lcb *lun0p; /* LCB of LUN #0 (usual case) */
397 #if SYM_CONF_MAX_LUN > 1
398 struct sym_lcb **lunmp; /* Other LCBs [1..MAX_LUN] */
399 #endif
400
401 #ifdef SYM_HAVE_STCB
402 /*
403 * O/S specific data structure.
404 */
405 struct sym_stcb s;
406 #endif
407
408 /* Transfer goal */
409 struct sym_trans tgoal;
410
411 /* Last printed transfer speed */
412 struct sym_trans tprint;
413
414 /*
415 * Keep track of the CCB used for the negotiation in order
416 * to ensure that only 1 negotiation is queued at a time.
417 */
418 struct sym_ccb * nego_cp; /* CCB used for the nego */
419
420 /*
421 * Set when we want to reset the device.
422 */
423 u_char to_reset;
424
425 /*
426 * Other user settable limits and options.
427 * These limits are read from the NVRAM if present.
428 */
429 unsigned char usrflags;
430 unsigned char usr_period;
431 unsigned char usr_width;
432 unsigned short usrtags;
433 struct scsi_target *starget;
434 };
435
436 /*
437 * Global LCB HEADER.
438 *
439 * Due to lack of indirect addressing on earlier NCR chips,
440 * this substructure is copied from the LCB to a global
441 * address after selection.
442 * For SYMBIOS chips that support LOAD/STORE this copy is
443 * not needed and thus not performed.
444 */
445 struct sym_lcbh {
446 /*
447 * SCRIPTS address jumped by SCRIPTS on reselection.
448 * For not probed logical units, this address points to
449 * SCRIPTS that deal with bad LU handling (must be at
450 * offset zero of the LCB for that reason).
451 */
452 /*0*/ u32 resel_sa;
453
454 /*
455 * Task (bus address of a CCB) read from SCRIPTS that points
456 * to the unique ITL nexus allowed to be disconnected.
457 */
458 u32 itl_task_sa;
459
460 /*
461 * Task table bus address (read from SCRIPTS).
462 */
463 u32 itlq_tbl_sa;
464 };
465
466 /*
467 * Logical Unit Control Block
468 */
469 struct sym_lcb {
470 /*
471 * TCB header.
472 * Assumed at offset 0.
473 */
474 /*0*/ struct sym_lcbh head;
475
476 /*
477 * Task table read from SCRIPTS that contains pointers to
478 * ITLQ nexuses. The bus address read from SCRIPTS is
479 * inside the header.
480 */
481 u32 *itlq_tbl; /* Kernel virtual address */
482
483 /*
484 * Busy CCBs management.
485 */
486 u_short busy_itlq; /* Number of busy tagged CCBs */
487 u_short busy_itl; /* Number of busy untagged CCBs */
488
489 /*
490 * Circular tag allocation buffer.
491 */
492 u_short ia_tag; /* Tag allocation index */
493 u_short if_tag; /* Tag release index */
494 u_char *cb_tags; /* Circular tags buffer */
495
496 /*
497 * O/S specific data structure.
498 */
499 #ifdef SYM_HAVE_SLCB
500 struct sym_slcb s;
501 #endif
502
503 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
504 /*
505 * Optionnaly the driver can handle device queueing,
506 * and requeues internally command to redo.
507 */
508 SYM_QUEHEAD waiting_ccbq;
509 SYM_QUEHEAD started_ccbq;
510 int num_sgood;
511 u_short started_tags;
512 u_short started_no_tag;
513 u_short started_max;
514 u_short started_limit;
515 #endif
516
517 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
518 /*
519 * Optionally the driver can try to prevent SCSI
520 * IOs from being reordered too much.
521 */
522 u_char tags_si; /* Current index to tags sum */
523 u_short tags_sum[2]; /* Tags sum counters */
524 u_short tags_since; /* # of tags since last switch */
525 #endif
526
527 /*
528 * Set when we want to clear all tasks.
529 */
530 u_char to_clear;
531
532 /*
533 * Capabilities.
534 */
535 u_char user_flags;
536 u_char curr_flags;
537 };
538
539 /*
540 * Action from SCRIPTS on a task.
541 * Is part of the CCB, but is also used separately to plug
542 * error handling action to perform from SCRIPTS.
543 */
544 struct sym_actscr {
545 u32 start; /* Jumped by SCRIPTS after selection */
546 u32 restart; /* Jumped by SCRIPTS on relection */
547 };
548
549 /*
550 * Phase mismatch context.
551 *
552 * It is part of the CCB and is used as parameters for the
553 * DATA pointer. We need two contexts to handle correctly the
554 * SAVED DATA POINTER.
555 */
556 struct sym_pmc {
557 struct sym_tblmove sg; /* Updated interrupted SG block */
558 u32 ret; /* SCRIPT return address */
559 };
560
561 /*
562 * LUN control block lookup.
563 * We use a direct pointer for LUN #0, and a table of
564 * pointers which is only allocated for devices that support
565 * LUN(s) > 0.
566 */
567 #if SYM_CONF_MAX_LUN <= 1
568 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
569 #else
570 #define sym_lp(tp, lun) \
571 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL
572 #endif
573
574 /*
575 * Status are used by the host and the script processor.
576 *
577 * The last four bytes (status[4]) are copied to the
578 * scratchb register (declared as scr0..scr3) just after the
579 * select/reselect, and copied back just after disconnecting.
580 * Inside the script the XX_REG are used.
581 */
582
583 /*
584 * Last four bytes (script)
585 */
586 #define HX_REG scr0
587 #define HX_PRT nc_scr0
588 #define HS_REG scr1
589 #define HS_PRT nc_scr1
590 #define SS_REG scr2
591 #define SS_PRT nc_scr2
592 #define HF_REG scr3
593 #define HF_PRT nc_scr3
594
595 /*
596 * Last four bytes (host)
597 */
598 #define host_xflags phys.head.status[0]
599 #define host_status phys.head.status[1]
600 #define ssss_status phys.head.status[2]
601 #define host_flags phys.head.status[3]
602
603 /*
604 * Host flags
605 */
606 #define HF_IN_PM0 1u
607 #define HF_IN_PM1 (1u<<1)
608 #define HF_ACT_PM (1u<<2)
609 #define HF_DP_SAVED (1u<<3)
610 #define HF_SENSE (1u<<4)
611 #define HF_EXT_ERR (1u<<5)
612 #define HF_DATA_IN (1u<<6)
613 #ifdef SYM_CONF_IARB_SUPPORT
614 #define HF_HINT_IARB (1u<<7)
615 #endif
616
617 /*
618 * More host flags
619 */
620 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
621 #define HX_DMAP_DIRTY (1u<<7)
622 #endif
623
624 /*
625 * Global CCB HEADER.
626 *
627 * Due to lack of indirect addressing on earlier NCR chips,
628 * this substructure is copied from the ccb to a global
629 * address after selection (or reselection) and copied back
630 * before disconnect.
631 * For SYMBIOS chips that support LOAD/STORE this copy is
632 * not needed and thus not performed.
633 */
634
635 struct sym_ccbh {
636 /*
637 * Start and restart SCRIPTS addresses (must be at 0).
638 */
639 /*0*/ struct sym_actscr go;
640
641 /*
642 * SCRIPTS jump address that deal with data pointers.
643 * 'savep' points to the position in the script responsible
644 * for the actual transfer of data.
645 * It's written on reception of a SAVE_DATA_POINTER message.
646 */
647 u32 savep; /* Jump address to saved data pointer */
648 u32 lastp; /* SCRIPTS address at end of data */
649
650 /*
651 * Status fields.
652 */
653 u8 status[4];
654 };
655
656 /*
657 * GET/SET the value of the data pointer used by SCRIPTS.
658 *
659 * We must distinguish between the LOAD/STORE-based SCRIPTS
660 * that use directly the header in the CCB, and the NCR-GENERIC
661 * SCRIPTS that use the copy of the header in the HCB.
662 */
663 #if SYM_CONF_GENERIC_SUPPORT
664 #define sym_set_script_dp(np, cp, dp) \
665 do { \
666 if (np->features & FE_LDSTR) \
667 cp->phys.head.lastp = cpu_to_scr(dp); \
668 else \
669 np->ccb_head.lastp = cpu_to_scr(dp); \
670 } while (0)
671 #define sym_get_script_dp(np, cp) \
672 scr_to_cpu((np->features & FE_LDSTR) ? \
673 cp->phys.head.lastp : np->ccb_head.lastp)
674 #else
675 #define sym_set_script_dp(np, cp, dp) \
676 do { \
677 cp->phys.head.lastp = cpu_to_scr(dp); \
678 } while (0)
679
680 #define sym_get_script_dp(np, cp) (cp->phys.head.lastp)
681 #endif
682
683 /*
684 * Data Structure Block
685 *
686 * During execution of a ccb by the script processor, the
687 * DSA (data structure address) register points to this
688 * substructure of the ccb.
689 */
690 struct sym_dsb {
691 /*
692 * CCB header.
693 * Also assumed at offset 0 of the sym_ccb structure.
694 */
695 /*0*/ struct sym_ccbh head;
696
697 /*
698 * Phase mismatch contexts.
699 * We need two to handle correctly the SAVED DATA POINTER.
700 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
701 * for address calculation from SCRIPTS.
702 */
703 struct sym_pmc pm0;
704 struct sym_pmc pm1;
705
706 /*
707 * Table data for Script
708 */
709 struct sym_tblsel select;
710 struct sym_tblmove smsg;
711 struct sym_tblmove smsg_ext;
712 struct sym_tblmove cmd;
713 struct sym_tblmove sense;
714 struct sym_tblmove wresid;
715 struct sym_tblmove data [SYM_CONF_MAX_SG];
716 };
717
718 /*
719 * Our Command Control Block
720 */
721 struct sym_ccb {
722 /*
723 * This is the data structure which is pointed by the DSA
724 * register when it is executed by the script processor.
725 * It must be the first entry.
726 */
727 struct sym_dsb phys;
728
729 /*
730 * Pointer to CAM ccb and related stuff.
731 */
732 struct scsi_cmnd *cmd; /* CAM scsiio ccb */
733 u8 cdb_buf[16]; /* Copy of CDB */
734 #define SYM_SNS_BBUF_LEN 32
735 u8 sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */
736 int data_len; /* Total data length */
737 int segments; /* Number of SG segments */
738
739 u8 order; /* Tag type (if tagged command) */
740 unsigned char odd_byte_adjustment; /* odd-sized req on wide bus */
741
742 u_char nego_status; /* Negotiation status */
743 u_char xerr_status; /* Extended error flags */
744 u32 extra_bytes; /* Extraneous bytes transferred */
745
746 /*
747 * Message areas.
748 * We prepare a message to be sent after selection.
749 * We may use a second one if the command is rescheduled
750 * due to CHECK_CONDITION or COMMAND TERMINATED.
751 * Contents are IDENTIFY and SIMPLE_TAG.
752 * While negotiating sync or wide transfer,
753 * a SDTR or WDTR message is appended.
754 */
755 u_char scsi_smsg [12];
756 u_char scsi_smsg2[12];
757
758 /*
759 * Auto request sense related fields.
760 */
761 u_char sensecmd[6]; /* Request Sense command */
762 u_char sv_scsi_status; /* Saved SCSI status */
763 u_char sv_xerr_status; /* Saved extended status */
764 int sv_resid; /* Saved residual */
765
766 /*
767 * Other fields.
768 */
769 u32 ccb_ba; /* BUS address of this CCB */
770 u_short tag; /* Tag for this transfer */
771 /* NO_TAG means no tag */
772 u_char target;
773 u_char lun;
774 struct sym_ccb *link_ccbh; /* Host adapter CCB hash chain */
775 SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */
776 u32 startp; /* Initial data pointer */
777 u32 goalp; /* Expected last data pointer */
778 int ext_sg; /* Extreme data pointer, used */
779 int ext_ofs; /* to calculate the residual. */
780 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
781 SYM_QUEHEAD link2_ccbq; /* Link for device queueing */
782 u_char started; /* CCB queued to the squeue */
783 #endif
784 u_char to_abort; /* Want this IO to be aborted */
785 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
786 u_char tags_si; /* Lun tags sum index (0,1) */
787 #endif
788 };
789
790 #define CCB_BA(cp,lbl) cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
791
792 typedef struct device *m_pool_ident_t;
793
794 /*
795 * Host Control Block
796 */
797 struct sym_hcb {
798 /*
799 * Global headers.
800 * Due to poorness of addressing capabilities, earlier
801 * chips (810, 815, 825) copy part of the data structures
802 * (CCB, TCB and LCB) in fixed areas.
803 */
804 #if SYM_CONF_GENERIC_SUPPORT
805 struct sym_ccbh ccb_head;
806 struct sym_tcbh tcb_head;
807 struct sym_lcbh lcb_head;
808 #endif
809 /*
810 * Idle task and invalid task actions and
811 * their bus addresses.
812 */
813 struct sym_actscr idletask, notask, bad_itl, bad_itlq;
814 u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
815
816 /*
817 * Dummy lun table to protect us against target
818 * returning bad lun number on reselection.
819 */
820 u32 *badluntbl; /* Table physical address */
821 u32 badlun_sa; /* SCRIPT handler BUS address */
822
823 /*
824 * Bus address of this host control block.
825 */
826 u32 hcb_ba;
827
828 /*
829 * Bit 32-63 of the on-chip RAM bus address in LE format.
830 * The START_RAM64 script loads the MMRS and MMWS from this
831 * field.
832 */
833 u32 scr_ram_seg;
834
835 /*
836 * Initial value of some IO register bits.
837 * These values are assumed to have been set by BIOS, and may
838 * be used to probe adapter implementation differences.
839 */
840 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
841 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
842 sv_stest1;
843
844 /*
845 * Actual initial value of IO register bits used by the
846 * driver. They are loaded at initialisation according to
847 * features that are to be enabled/disabled.
848 */
849 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
850 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
851
852 /*
853 * Target data.
854 */
855 struct sym_tcb target[SYM_CONF_MAX_TARGET];
856
857 /*
858 * Target control block bus address array used by the SCRIPT
859 * on reselection.
860 */
861 u32 *targtbl;
862 u32 targtbl_ba;
863
864 /*
865 * DMA pool handle for this HBA.
866 */
867 m_pool_ident_t bus_dmat;
868
869 /*
870 * O/S specific data structure
871 */
872 struct sym_shcb s;
873
874 /*
875 * Physical bus addresses of the chip.
876 */
877 u32 mmio_ba; /* MMIO 32 bit BUS address */
878 u32 ram_ba; /* RAM 32 bit BUS address */
879
880 /*
881 * SCRIPTS virtual and physical bus addresses.
882 * 'script' is loaded in the on-chip RAM if present.
883 * 'scripth' stays in main memory for all chips except the
884 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
885 */
886 u_char *scripta0; /* Copy of scripts A, B, Z */
887 u_char *scriptb0;
888 u_char *scriptz0;
889 u32 scripta_ba; /* Actual scripts A, B, Z */
890 u32 scriptb_ba; /* 32 bit bus addresses. */
891 u32 scriptz_ba;
892 u_short scripta_sz; /* Actual size of script A, B, Z*/
893 u_short scriptb_sz;
894 u_short scriptz_sz;
895
896 /*
897 * Bus addresses, setup and patch methods for
898 * the selected firmware.
899 */
900 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */
901 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */
902 struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */
903 void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw);
904 void (*fw_patch)(struct Scsi_Host *);
905 char *fw_name;
906
907 /*
908 * General controller parameters and configuration.
909 */
910 u_int features; /* Chip features map */
911 u_char myaddr; /* SCSI id of the adapter */
912 u_char maxburst; /* log base 2 of dwords burst */
913 u_char maxwide; /* Maximum transfer width */
914 u_char minsync; /* Min sync period factor (ST) */
915 u_char maxsync; /* Max sync period factor (ST) */
916 u_char maxoffs; /* Max scsi offset (ST) */
917 u_char minsync_dt; /* Min sync period factor (DT) */
918 u_char maxsync_dt; /* Max sync period factor (DT) */
919 u_char maxoffs_dt; /* Max scsi offset (DT) */
920 u_char multiplier; /* Clock multiplier (1,2,4) */
921 u_char clock_divn; /* Number of clock divisors */
922 u32 clock_khz; /* SCSI clock frequency in KHz */
923 u32 pciclk_khz; /* Estimated PCI clock in KHz */
924 /*
925 * Start queue management.
926 * It is filled up by the host processor and accessed by the
927 * SCRIPTS processor in order to start SCSI commands.
928 */
929 volatile /* Prevent code optimizations */
930 u32 *squeue; /* Start queue virtual address */
931 u32 squeue_ba; /* Start queue BUS address */
932 u_short squeueput; /* Next free slot of the queue */
933 u_short actccbs; /* Number of allocated CCBs */
934
935 /*
936 * Command completion queue.
937 * It is the same size as the start queue to avoid overflow.
938 */
939 u_short dqueueget; /* Next position to scan */
940 volatile /* Prevent code optimizations */
941 u32 *dqueue; /* Completion (done) queue */
942 u32 dqueue_ba; /* Done queue BUS address */
943
944 /*
945 * Miscellaneous buffers accessed by the scripts-processor.
946 * They shall be DWORD aligned, because they may be read or
947 * written with a script command.
948 */
949 u_char msgout[8]; /* Buffer for MESSAGE OUT */
950 u_char msgin [8]; /* Buffer for MESSAGE IN */
951 u32 lastmsg; /* Last SCSI message sent */
952 u32 scratch; /* Scratch for SCSI receive */
953 /* Also used for cache test */
954 /*
955 * Miscellaneous configuration and status parameters.
956 */
957 u_char usrflags; /* Miscellaneous user flags */
958 u_char scsi_mode; /* Current SCSI BUS mode */
959 u_char verbose; /* Verbosity for this controller*/
960
961 /*
962 * CCB lists and queue.
963 */
964 struct sym_ccb **ccbh; /* CCBs hashed by DSA value */
965 /* CCB_HASH_SIZE lists of CCBs */
966 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
967 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
968
969 /*
970 * During error handling and/or recovery,
971 * active CCBs that are to be completed with
972 * error or requeued are moved from the busy_ccbq
973 * to the comp_ccbq prior to completion.
974 */
975 SYM_QUEHEAD comp_ccbq;
976
977 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
978 SYM_QUEHEAD dummy_ccbq;
979 #endif
980
981 /*
982 * IMMEDIATE ARBITRATION (IARB) control.
983 *
984 * We keep track in 'last_cp' of the last CCB that has been
985 * queued to the SCRIPTS processor and clear 'last_cp' when
986 * this CCB completes. If last_cp is not zero at the moment
987 * we queue a new CCB, we set a flag in 'last_cp' that is
988 * used by the SCRIPTS as a hint for setting IARB.
989 * We donnot set more than 'iarb_max' consecutive hints for
990 * IARB in order to leave devices a chance to reselect.
991 * By the way, any non zero value of 'iarb_max' is unfair. :)
992 */
993 #ifdef SYM_CONF_IARB_SUPPORT
994 u_short iarb_max; /* Max. # consecutive IARB hints*/
995 u_short iarb_count; /* Actual # of these hints */
996 struct sym_ccb * last_cp;
997 #endif
998
999 /*
1000 * Command abort handling.
1001 * We need to synchronize tightly with the SCRIPTS
1002 * processor in order to handle things correctly.
1003 */
1004 u_char abrt_msg[4]; /* Message to send buffer */
1005 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */
1006 struct sym_tblsel abrt_sel; /* Sync params for selection */
1007 u_char istat_sem; /* Tells the chip to stop (SEM) */
1008
1009 /*
1010 * 64 bit DMA handling.
1011 */
1012 #if SYM_CONF_DMA_ADDRESSING_MODE != 0
1013 u_char use_dac; /* Use PCI DAC cycles */
1014 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
1015 u_char dmap_dirty; /* Dma segments registers dirty */
1016 u32 dmap_bah[SYM_DMAP_SIZE];/* Segment registers map */
1017 #endif
1018 #endif
1019 };
1020
1021 #if SYM_CONF_DMA_ADDRESSING_MODE == 0
1022 #define use_dac(np) 0
1023 #define set_dac(np) do { } while (0)
1024 #else
1025 #define use_dac(np) (np)->use_dac
1026 #define set_dac(np) (np)->use_dac = 1
1027 #endif
1028
1029 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
1030
1031
1032 /*
1033 * FIRMWARES (sym_fw.c)
1034 */
1035 struct sym_fw * sym_find_firmware(struct sym_chip *chip);
1036 void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len);
1037
1038 /*
1039 * Driver methods called from O/S specific code.
1040 */
1041 char *sym_driver_name(void);
1042 void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
1043 int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
1044 struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
1045 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1046 void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
1047 #else
1048 void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
1049 #endif
1050 void sym_start_up(struct Scsi_Host *, int reason);
1051 irqreturn_t sym_interrupt(struct Scsi_Host *);
1052 int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
1053 struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1054 void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1055 struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1056 int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1057 int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1058 int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1059 int sym_reset_scsi_target(struct sym_hcb *np, int target);
1060 void sym_hcb_free(struct sym_hcb *np);
1061 int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram);
1062
1063 /*
1064 * Build a scatter/gather entry.
1065 *
1066 * For 64 bit systems, we use the 8 upper bits of the size field
1067 * to provide bus address bits 32-39 to the SCRIPTS processor.
1068 * This allows the 895A, 896, 1010 to address up to 1 TB of memory.
1069 */
1070
1071 #if SYM_CONF_DMA_ADDRESSING_MODE == 0
1072 #define DMA_DAC_MASK DMA_BIT_MASK(32)
1073 #define sym_build_sge(np, data, badd, len) \
1074 do { \
1075 (data)->addr = cpu_to_scr(badd); \
1076 (data)->size = cpu_to_scr(len); \
1077 } while (0)
1078 #elif SYM_CONF_DMA_ADDRESSING_MODE == 1
1079 #define DMA_DAC_MASK DMA_BIT_MASK(40)
1080 #define sym_build_sge(np, data, badd, len) \
1081 do { \
1082 (data)->addr = cpu_to_scr(badd); \
1083 (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
1084 } while (0)
1085 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2
1086 #define DMA_DAC_MASK DMA_BIT_MASK(64)
1087 int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
1088 static inline void
sym_build_sge(struct sym_hcb * np,struct sym_tblmove * data,u64 badd,int len)1089 sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
1090 {
1091 u32 h = (badd>>32);
1092 int s = (h&SYM_DMAP_MASK);
1093
1094 if (h != np->dmap_bah[s])
1095 goto bad;
1096 good:
1097 (data)->addr = cpu_to_scr(badd);
1098 (data)->size = cpu_to_scr((s<<24) + len);
1099 return;
1100 bad:
1101 s = sym_lookup_dmap(np, h, s);
1102 goto good;
1103 }
1104 #else
1105 #error "Unsupported DMA addressing mode"
1106 #endif
1107
1108 /*
1109 * MEMORY ALLOCATOR.
1110 */
1111
1112 #define sym_get_mem_cluster() \
1113 (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
1114 #define sym_free_mem_cluster(p) \
1115 free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
1116
1117 /*
1118 * Link between free memory chunks of a given size.
1119 */
1120 typedef struct sym_m_link {
1121 struct sym_m_link *next;
1122 } *m_link_p;
1123
1124 /*
1125 * Virtual to bus physical translation for a given cluster.
1126 * Such a structure is only useful with DMA abstraction.
1127 */
1128 typedef struct sym_m_vtob { /* Virtual to Bus address translation */
1129 struct sym_m_vtob *next;
1130 void *vaddr; /* Virtual address */
1131 dma_addr_t baddr; /* Bus physical address */
1132 } *m_vtob_p;
1133
1134 /* Hash this stuff a bit to speed up translations */
1135 #define VTOB_HASH_SHIFT 5
1136 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
1137 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
1138 #define VTOB_HASH_CODE(m) \
1139 ((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK)
1140
1141 /*
1142 * Memory pool of a given kind.
1143 * Ideally, we want to use:
1144 * 1) 1 pool for memory we donnot need to involve in DMA.
1145 * 2) The same pool for controllers that require same DMA
1146 * constraints and features.
1147 * The OS specific m_pool_id_t thing and the sym_m_pool_match()
1148 * method are expected to tell the driver about.
1149 */
1150 typedef struct sym_m_pool {
1151 m_pool_ident_t dev_dmat; /* Identifies the pool (see above) */
1152 void * (*get_mem_cluster)(struct sym_m_pool *);
1153 #ifdef SYM_MEM_FREE_UNUSED
1154 void (*free_mem_cluster)(struct sym_m_pool *, void *);
1155 #endif
1156 #define M_GET_MEM_CLUSTER() mp->get_mem_cluster(mp)
1157 #define M_FREE_MEM_CLUSTER(p) mp->free_mem_cluster(mp, p)
1158 int nump;
1159 m_vtob_p vtob[VTOB_HASH_SIZE];
1160 struct sym_m_pool *next;
1161 struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1];
1162 } *m_pool_p;
1163
1164 /*
1165 * Alloc, free and translate addresses to bus physical
1166 * for DMAable memory.
1167 */
1168 void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name);
1169 void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name);
1170 dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
1171
1172 /*
1173 * Verbs used by the driver code for DMAable memory handling.
1174 * The _uvptv_ macro avoids a nasty warning about pointer to volatile
1175 * being discarded.
1176 */
1177 #define _uvptv_(p) ((void *)((u_long)(p)))
1178
1179 #define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n)
1180 #define _sym_mfree_dma(np, p, l, n) \
1181 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
1182 #define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n)
1183 #define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n)
1184 #define vtobus(p) __vtobus(np->bus_dmat, _uvptv_(p))
1185
1186 /*
1187 * We have to provide the driver memory allocator with methods for
1188 * it to maintain virtual to bus physical address translations.
1189 */
1190
1191 #define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2)
1192
sym_m_get_dma_mem_cluster(m_pool_p mp,m_vtob_p vbp)1193 static inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1194 {
1195 void *vaddr = NULL;
1196 dma_addr_t baddr = 0;
1197
1198 vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
1199 GFP_ATOMIC);
1200 if (vaddr) {
1201 vbp->vaddr = vaddr;
1202 vbp->baddr = baddr;
1203 }
1204 return vaddr;
1205 }
1206
sym_m_free_dma_mem_cluster(m_pool_p mp,m_vtob_p vbp)1207 static inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1208 {
1209 dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
1210 vbp->baddr);
1211 }
1212
1213 #endif /* SYM_HIPD_H */
1214