xref: /linux/drivers/scsi/sym53c8xx_2/sym_hipd.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3  * of PCI-SCSI IO processors.
4  *
5  * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
6  *
7  * This driver is derived from the Linux sym53c8xx driver.
8  * Copyright (C) 1998-2000  Gerard Roudier
9  *
10  * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11  * a port of the FreeBSD ncr driver to Linux-1.2.13.
12  *
13  * The original ncr driver has been written for 386bsd and FreeBSD by
14  *         Wolfgang Stanglmeier        <wolf@cologne.de>
15  *         Stefan Esser                <se@mi.Uni-Koeln.de>
16  * Copyright (C) 1994  Wolfgang Stanglmeier
17  *
18  * Other major contributions:
19  *
20  * NVRAM detection and reading.
21  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22  *
23  *-----------------------------------------------------------------------------
24  *
25  * This program is free software; you can redistribute it and/or modify
26  * it under the terms of the GNU General Public License as published by
27  * the Free Software Foundation; either version 2 of the License, or
28  * (at your option) any later version.
29  *
30  * This program is distributed in the hope that it will be useful,
31  * but WITHOUT ANY WARRANTY; without even the implied warranty of
32  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
33  * GNU General Public License for more details.
34  *
35  * You should have received a copy of the GNU General Public License
36  * along with this program; if not, write to the Free Software
37  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
38  */
39 
40 #include <linux/gfp.h>
41 
42 #ifndef SYM_HIPD_H
43 #define SYM_HIPD_H
44 
45 /*
46  *  Generic driver options.
47  *
48  *  They may be defined in platform specific headers, if they
49  *  are useful.
50  *
51  *    SYM_OPT_HANDLE_DEVICE_QUEUEING
52  *        When this option is set, the driver will use a queue per
53  *        device and handle QUEUE FULL status requeuing internally.
54  *
55  *    SYM_OPT_LIMIT_COMMAND_REORDERING
56  *        When this option is set, the driver tries to limit tagged
57  *        command reordering to some reasonable value.
58  *        (set for Linux)
59  */
60 #if 0
61 #define SYM_OPT_HANDLE_DEVICE_QUEUEING
62 #define SYM_OPT_LIMIT_COMMAND_REORDERING
63 #endif
64 
65 /*
66  *  Active debugging tags and verbosity.
67  *  Both DEBUG_FLAGS and sym_verbose can be redefined
68  *  by the platform specific code to something else.
69  */
70 #define DEBUG_ALLOC	(0x0001)
71 #define DEBUG_PHASE	(0x0002)
72 #define DEBUG_POLL	(0x0004)
73 #define DEBUG_QUEUE	(0x0008)
74 #define DEBUG_RESULT	(0x0010)
75 #define DEBUG_SCATTER	(0x0020)
76 #define DEBUG_SCRIPT	(0x0040)
77 #define DEBUG_TINY	(0x0080)
78 #define DEBUG_TIMING	(0x0100)
79 #define DEBUG_NEGO	(0x0200)
80 #define DEBUG_TAGS	(0x0400)
81 #define DEBUG_POINTER	(0x0800)
82 
83 #ifndef DEBUG_FLAGS
84 #define DEBUG_FLAGS	(0x0000)
85 #endif
86 
87 #ifndef sym_verbose
88 #define sym_verbose	(np->verbose)
89 #endif
90 
91 /*
92  *  These ones should have been already defined.
93  */
94 #ifndef assert
95 #define	assert(expression) { \
96 	if (!(expression)) { \
97 		(void)panic( \
98 			"assertion \"%s\" failed: file \"%s\", line %d\n", \
99 			#expression, \
100 			__FILE__, __LINE__); \
101 	} \
102 }
103 #endif
104 
105 /*
106  *  Number of tasks per device we want to handle.
107  */
108 #if	SYM_CONF_MAX_TAG_ORDER > 8
109 #error	"more than 256 tags per logical unit not allowed."
110 #endif
111 #define	SYM_CONF_MAX_TASK	(1<<SYM_CONF_MAX_TAG_ORDER)
112 
113 /*
114  *  Donnot use more tasks that we can handle.
115  */
116 #ifndef	SYM_CONF_MAX_TAG
117 #define	SYM_CONF_MAX_TAG	SYM_CONF_MAX_TASK
118 #endif
119 #if	SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
120 #undef	SYM_CONF_MAX_TAG
121 #define	SYM_CONF_MAX_TAG	SYM_CONF_MAX_TASK
122 #endif
123 
124 /*
125  *    This one means 'NO TAG for this job'
126  */
127 #define NO_TAG	(256)
128 
129 /*
130  *  Number of SCSI targets.
131  */
132 #if	SYM_CONF_MAX_TARGET > 16
133 #error	"more than 16 targets not allowed."
134 #endif
135 
136 /*
137  *  Number of logical units per target.
138  */
139 #if	SYM_CONF_MAX_LUN > 64
140 #error	"more than 64 logical units per target not allowed."
141 #endif
142 
143 /*
144  *    Asynchronous pre-scaler (ns). Shall be 40 for
145  *    the SCSI timings to be compliant.
146  */
147 #define	SYM_CONF_MIN_ASYNC (40)
148 
149 
150 /*
151  * MEMORY ALLOCATOR.
152  */
153 
154 #define SYM_MEM_WARN	1	/* Warn on failed operations */
155 
156 #define SYM_MEM_PAGE_ORDER 0	/* 1 PAGE  maximum */
157 #define SYM_MEM_CLUSTER_SHIFT	(PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
158 #define SYM_MEM_FREE_UNUSED	/* Free unused pages immediately */
159 /*
160  *  Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16.
161  *  Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized.
162  *  (1 PAGE at a time is just fine).
163  */
164 #define SYM_MEM_SHIFT	4
165 #define SYM_MEM_CLUSTER_SIZE	(1UL << SYM_MEM_CLUSTER_SHIFT)
166 #define SYM_MEM_CLUSTER_MASK	(SYM_MEM_CLUSTER_SIZE-1)
167 
168 /*
169  *  Number of entries in the START and DONE queues.
170  *
171  *  We limit to 1 PAGE in order to succeed allocation of
172  *  these queues. Each entry is 8 bytes long (2 DWORDS).
173  */
174 #ifdef	SYM_CONF_MAX_START
175 #define	SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
176 #else
177 #define	SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
178 #define	SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
179 #endif
180 
181 #if	SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8
182 #undef	SYM_CONF_MAX_QUEUE
183 #define	SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8)
184 #undef	SYM_CONF_MAX_START
185 #define	SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
186 #endif
187 
188 /*
189  *  For this one, we want a short name :-)
190  */
191 #define MAX_QUEUE	SYM_CONF_MAX_QUEUE
192 
193 /*
194  *  Common definitions for both bus space based and legacy IO methods.
195  */
196 
197 #define INB_OFF(np, o)		ioread8(np->s.ioaddr + (o))
198 #define INW_OFF(np, o)		ioread16(np->s.ioaddr + (o))
199 #define INL_OFF(np, o)		ioread32(np->s.ioaddr + (o))
200 
201 #define OUTB_OFF(np, o, val)	iowrite8((val), np->s.ioaddr + (o))
202 #define OUTW_OFF(np, o, val)	iowrite16((val), np->s.ioaddr + (o))
203 #define OUTL_OFF(np, o, val)	iowrite32((val), np->s.ioaddr + (o))
204 
205 #define INB(np, r)		INB_OFF(np, offsetof(struct sym_reg, r))
206 #define INW(np, r)		INW_OFF(np, offsetof(struct sym_reg, r))
207 #define INL(np, r)		INL_OFF(np, offsetof(struct sym_reg, r))
208 
209 #define OUTB(np, r, v)		OUTB_OFF(np, offsetof(struct sym_reg, r), (v))
210 #define OUTW(np, r, v)		OUTW_OFF(np, offsetof(struct sym_reg, r), (v))
211 #define OUTL(np, r, v)		OUTL_OFF(np, offsetof(struct sym_reg, r), (v))
212 
213 #define OUTONB(np, r, m)	OUTB(np, r, INB(np, r) | (m))
214 #define OUTOFFB(np, r, m)	OUTB(np, r, INB(np, r) & ~(m))
215 #define OUTONW(np, r, m)	OUTW(np, r, INW(np, r) | (m))
216 #define OUTOFFW(np, r, m)	OUTW(np, r, INW(np, r) & ~(m))
217 #define OUTONL(np, r, m)	OUTL(np, r, INL(np, r) | (m))
218 #define OUTOFFL(np, r, m)	OUTL(np, r, INL(np, r) & ~(m))
219 
220 /*
221  *  We normally want the chip to have a consistent view
222  *  of driver internal data structures when we restart it.
223  *  Thus these macros.
224  */
225 #define OUTL_DSP(np, v)				\
226 	do {					\
227 		MEMORY_WRITE_BARRIER();		\
228 		OUTL(np, nc_dsp, (v));		\
229 	} while (0)
230 
231 #define OUTONB_STD()				\
232 	do {					\
233 		MEMORY_WRITE_BARRIER();		\
234 		OUTONB(np, nc_dcntl, (STD|NOCOM));	\
235 	} while (0)
236 
237 /*
238  *  Command control block states.
239  */
240 #define HS_IDLE		(0)
241 #define HS_BUSY		(1)
242 #define HS_NEGOTIATE	(2)	/* sync/wide data transfer*/
243 #define HS_DISCONNECT	(3)	/* Disconnected by target */
244 #define HS_WAIT		(4)	/* waiting for resource	  */
245 
246 #define HS_DONEMASK	(0x80)
247 #define HS_COMPLETE	(4|HS_DONEMASK)
248 #define HS_SEL_TIMEOUT	(5|HS_DONEMASK)	/* Selection timeout      */
249 #define HS_UNEXPECTED	(6|HS_DONEMASK)	/* Unexpected disconnect  */
250 #define HS_COMP_ERR	(7|HS_DONEMASK)	/* Completed with error	  */
251 
252 /*
253  *  Software Interrupt Codes
254  */
255 #define	SIR_BAD_SCSI_STATUS	(1)
256 #define	SIR_SEL_ATN_NO_MSG_OUT	(2)
257 #define	SIR_MSG_RECEIVED	(3)
258 #define	SIR_MSG_WEIRD		(4)
259 #define	SIR_NEGO_FAILED		(5)
260 #define	SIR_NEGO_PROTO		(6)
261 #define	SIR_SCRIPT_STOPPED	(7)
262 #define	SIR_REJECT_TO_SEND	(8)
263 #define	SIR_SWIDE_OVERRUN	(9)
264 #define	SIR_SODL_UNDERRUN	(10)
265 #define	SIR_RESEL_NO_MSG_IN	(11)
266 #define	SIR_RESEL_NO_IDENTIFY	(12)
267 #define	SIR_RESEL_BAD_LUN	(13)
268 #define	SIR_TARGET_SELECTED	(14)
269 #define	SIR_RESEL_BAD_I_T_L	(15)
270 #define	SIR_RESEL_BAD_I_T_L_Q	(16)
271 #define	SIR_ABORT_SENT		(17)
272 #define	SIR_RESEL_ABORTED	(18)
273 #define	SIR_MSG_OUT_DONE	(19)
274 #define	SIR_COMPLETE_ERROR	(20)
275 #define	SIR_DATA_OVERRUN	(21)
276 #define	SIR_BAD_PHASE		(22)
277 #if	SYM_CONF_DMA_ADDRESSING_MODE == 2
278 #define	SIR_DMAP_DIRTY		(23)
279 #define	SIR_MAX			(23)
280 #else
281 #define	SIR_MAX			(22)
282 #endif
283 
284 /*
285  *  Extended error bit codes.
286  *  xerr_status field of struct sym_ccb.
287  */
288 #define	XE_EXTRA_DATA	(1)	/* unexpected data phase	 */
289 #define	XE_BAD_PHASE	(1<<1)	/* illegal phase (4/5)		 */
290 #define	XE_PARITY_ERR	(1<<2)	/* unrecovered SCSI parity error */
291 #define	XE_SODL_UNRUN	(1<<3)	/* ODD transfer in DATA OUT phase */
292 #define	XE_SWIDE_OVRUN	(1<<4)	/* ODD transfer in DATA IN phase */
293 
294 /*
295  *  Negotiation status.
296  *  nego_status field of struct sym_ccb.
297  */
298 #define NS_SYNC		(1)
299 #define NS_WIDE		(2)
300 #define NS_PPR		(3)
301 
302 /*
303  *  A CCB hashed table is used to retrieve CCB address
304  *  from DSA value.
305  */
306 #define CCB_HASH_SHIFT		8
307 #define CCB_HASH_SIZE		(1UL << CCB_HASH_SHIFT)
308 #define CCB_HASH_MASK		(CCB_HASH_SIZE-1)
309 #if 1
310 #define CCB_HASH_CODE(dsa)	\
311 	(((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK)
312 #else
313 #define CCB_HASH_CODE(dsa)	(((dsa) >> 9) & CCB_HASH_MASK)
314 #endif
315 
316 #if	SYM_CONF_DMA_ADDRESSING_MODE == 2
317 /*
318  *  We may want to use segment registers for 64 bit DMA.
319  *  16 segments registers -> up to 64 GB addressable.
320  */
321 #define SYM_DMAP_SHIFT	(4)
322 #define SYM_DMAP_SIZE	(1u<<SYM_DMAP_SHIFT)
323 #define SYM_DMAP_MASK	(SYM_DMAP_SIZE-1)
324 #endif
325 
326 /*
327  *  Device flags.
328  */
329 #define SYM_DISC_ENABLED	(1)
330 #define SYM_TAGS_ENABLED	(1<<1)
331 #define SYM_SCAN_BOOT_DISABLED	(1<<2)
332 #define SYM_SCAN_LUNS_DISABLED	(1<<3)
333 
334 /*
335  *  Host adapter miscellaneous flags.
336  */
337 #define SYM_AVOID_BUS_RESET	(1)
338 
339 /*
340  *  Misc.
341  */
342 #define SYM_SNOOP_TIMEOUT (10000000)
343 #define BUS_8_BIT	0
344 #define BUS_16_BIT	1
345 
346 /*
347  *  Gather negotiable parameters value
348  */
349 struct sym_trans {
350 	u8 period;
351 	u8 offset;
352 	unsigned int width:1;
353 	unsigned int iu:1;
354 	unsigned int dt:1;
355 	unsigned int qas:1;
356 	unsigned int check_nego:1;
357 	unsigned int renego:2;
358 };
359 
360 /*
361  *  Global TCB HEADER.
362  *
363  *  Due to lack of indirect addressing on earlier NCR chips,
364  *  this substructure is copied from the TCB to a global
365  *  address after selection.
366  *  For SYMBIOS chips that support LOAD/STORE this copy is
367  *  not needed and thus not performed.
368  */
369 struct sym_tcbh {
370 	/*
371 	 *  Scripts bus addresses of LUN table accessed from scripts.
372 	 *  LUN #0 is a special case, since multi-lun devices are rare,
373 	 *  and we we want to speed-up the general case and not waste
374 	 *  resources.
375 	 */
376 	u32	luntbl_sa;	/* bus address of this table	*/
377 	u32	lun0_sa;	/* bus address of LCB #0	*/
378 	/*
379 	 *  Actual SYNC/WIDE IO registers value for this target.
380 	 *  'sval', 'wval' and 'uval' are read from SCRIPTS and
381 	 *  so have alignment constraints.
382 	 */
383 /*0*/	u_char	uval;		/* -> SCNTL4 register		*/
384 /*1*/	u_char	sval;		/* -> SXFER  io register	*/
385 /*2*/	u_char	filler1;
386 /*3*/	u_char	wval;		/* -> SCNTL3 io register	*/
387 };
388 
389 /*
390  *  Target Control Block
391  */
392 struct sym_tcb {
393 	/*
394 	 *  TCB header.
395 	 *  Assumed at offset 0.
396 	 */
397 /*0*/	struct sym_tcbh head;
398 
399 	/*
400 	 *  LUN table used by the SCRIPTS processor.
401 	 *  An array of bus addresses is used on reselection.
402 	 */
403 	u32	*luntbl;	/* LCBs bus address table	*/
404 	int	nlcb;		/* Number of valid LCBs (including LUN #0) */
405 
406 	/*
407 	 *  LUN table used by the C code.
408 	 */
409 	struct sym_lcb *lun0p;		/* LCB of LUN #0 (usual case)	*/
410 #if SYM_CONF_MAX_LUN > 1
411 	struct sym_lcb **lunmp;		/* Other LCBs [1..MAX_LUN]	*/
412 #endif
413 
414 #ifdef	SYM_HAVE_STCB
415 	/*
416 	 *  O/S specific data structure.
417 	 */
418 	struct sym_stcb s;
419 #endif
420 
421 	/* Transfer goal */
422 	struct sym_trans tgoal;
423 
424 	/* Last printed transfer speed */
425 	struct sym_trans tprint;
426 
427 	/*
428 	 * Keep track of the CCB used for the negotiation in order
429 	 * to ensure that only 1 negotiation is queued at a time.
430 	 */
431 	struct sym_ccb *  nego_cp;	/* CCB used for the nego		*/
432 
433 	/*
434 	 *  Set when we want to reset the device.
435 	 */
436 	u_char	to_reset;
437 
438 	/*
439 	 *  Other user settable limits and options.
440 	 *  These limits are read from the NVRAM if present.
441 	 */
442 	unsigned char	usrflags;
443 	unsigned char	usr_period;
444 	unsigned char	usr_width;
445 	unsigned short	usrtags;
446 	struct scsi_target *starget;
447 };
448 
449 /*
450  *  Global LCB HEADER.
451  *
452  *  Due to lack of indirect addressing on earlier NCR chips,
453  *  this substructure is copied from the LCB to a global
454  *  address after selection.
455  *  For SYMBIOS chips that support LOAD/STORE this copy is
456  *  not needed and thus not performed.
457  */
458 struct sym_lcbh {
459 	/*
460 	 *  SCRIPTS address jumped by SCRIPTS on reselection.
461 	 *  For not probed logical units, this address points to
462 	 *  SCRIPTS that deal with bad LU handling (must be at
463 	 *  offset zero of the LCB for that reason).
464 	 */
465 /*0*/	u32	resel_sa;
466 
467 	/*
468 	 *  Task (bus address of a CCB) read from SCRIPTS that points
469 	 *  to the unique ITL nexus allowed to be disconnected.
470 	 */
471 	u32	itl_task_sa;
472 
473 	/*
474 	 *  Task table bus address (read from SCRIPTS).
475 	 */
476 	u32	itlq_tbl_sa;
477 };
478 
479 /*
480  *  Logical Unit Control Block
481  */
482 struct sym_lcb {
483 	/*
484 	 *  TCB header.
485 	 *  Assumed at offset 0.
486 	 */
487 /*0*/	struct sym_lcbh head;
488 
489 	/*
490 	 *  Task table read from SCRIPTS that contains pointers to
491 	 *  ITLQ nexuses. The bus address read from SCRIPTS is
492 	 *  inside the header.
493 	 */
494 	u32	*itlq_tbl;	/* Kernel virtual address	*/
495 
496 	/*
497 	 *  Busy CCBs management.
498 	 */
499 	u_short	busy_itlq;	/* Number of busy tagged CCBs	*/
500 	u_short	busy_itl;	/* Number of busy untagged CCBs	*/
501 
502 	/*
503 	 *  Circular tag allocation buffer.
504 	 */
505 	u_short	ia_tag;		/* Tag allocation index		*/
506 	u_short	if_tag;		/* Tag release index		*/
507 	u_char	*cb_tags;	/* Circular tags buffer		*/
508 
509 	/*
510 	 *  O/S specific data structure.
511 	 */
512 #ifdef	SYM_HAVE_SLCB
513 	struct sym_slcb s;
514 #endif
515 
516 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
517 	/*
518 	 *  Optionnaly the driver can handle device queueing,
519 	 *  and requeues internally command to redo.
520 	 */
521 	SYM_QUEHEAD waiting_ccbq;
522 	SYM_QUEHEAD started_ccbq;
523 	int	num_sgood;
524 	u_short	started_tags;
525 	u_short	started_no_tag;
526 	u_short	started_max;
527 	u_short	started_limit;
528 #endif
529 
530 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
531 	/*
532 	 *  Optionally the driver can try to prevent SCSI
533 	 *  IOs from being reordered too much.
534 	 */
535 	u_char		tags_si;	/* Current index to tags sum	*/
536 	u_short		tags_sum[2];	/* Tags sum counters		*/
537 	u_short		tags_since;	/* # of tags since last switch	*/
538 #endif
539 
540 	/*
541 	 *  Set when we want to clear all tasks.
542 	 */
543 	u_char to_clear;
544 
545 	/*
546 	 *  Capabilities.
547 	 */
548 	u_char	user_flags;
549 	u_char	curr_flags;
550 };
551 
552 /*
553  *  Action from SCRIPTS on a task.
554  *  Is part of the CCB, but is also used separately to plug
555  *  error handling action to perform from SCRIPTS.
556  */
557 struct sym_actscr {
558 	u32	start;		/* Jumped by SCRIPTS after selection	*/
559 	u32	restart;	/* Jumped by SCRIPTS on relection	*/
560 };
561 
562 /*
563  *  Phase mismatch context.
564  *
565  *  It is part of the CCB and is used as parameters for the
566  *  DATA pointer. We need two contexts to handle correctly the
567  *  SAVED DATA POINTER.
568  */
569 struct sym_pmc {
570 	struct	sym_tblmove sg;	/* Updated interrupted SG block	*/
571 	u32	ret;		/* SCRIPT return address	*/
572 };
573 
574 /*
575  *  LUN control block lookup.
576  *  We use a direct pointer for LUN #0, and a table of
577  *  pointers which is only allocated for devices that support
578  *  LUN(s) > 0.
579  */
580 #if SYM_CONF_MAX_LUN <= 1
581 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
582 #else
583 #define sym_lp(tp, lun) \
584 	(!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL
585 #endif
586 
587 /*
588  *  Status are used by the host and the script processor.
589  *
590  *  The last four bytes (status[4]) are copied to the
591  *  scratchb register (declared as scr0..scr3) just after the
592  *  select/reselect, and copied back just after disconnecting.
593  *  Inside the script the XX_REG are used.
594  */
595 
596 /*
597  *  Last four bytes (script)
598  */
599 #define  HX_REG	scr0
600 #define  HX_PRT	nc_scr0
601 #define  HS_REG	scr1
602 #define  HS_PRT	nc_scr1
603 #define  SS_REG	scr2
604 #define  SS_PRT	nc_scr2
605 #define  HF_REG	scr3
606 #define  HF_PRT	nc_scr3
607 
608 /*
609  *  Last four bytes (host)
610  */
611 #define  host_xflags   phys.head.status[0]
612 #define  host_status   phys.head.status[1]
613 #define  ssss_status   phys.head.status[2]
614 #define  host_flags    phys.head.status[3]
615 
616 /*
617  *  Host flags
618  */
619 #define HF_IN_PM0	1u
620 #define HF_IN_PM1	(1u<<1)
621 #define HF_ACT_PM	(1u<<2)
622 #define HF_DP_SAVED	(1u<<3)
623 #define HF_SENSE	(1u<<4)
624 #define HF_EXT_ERR	(1u<<5)
625 #define HF_DATA_IN	(1u<<6)
626 #ifdef SYM_CONF_IARB_SUPPORT
627 #define HF_HINT_IARB	(1u<<7)
628 #endif
629 
630 /*
631  *  More host flags
632  */
633 #if	SYM_CONF_DMA_ADDRESSING_MODE == 2
634 #define	HX_DMAP_DIRTY	(1u<<7)
635 #endif
636 
637 /*
638  *  Global CCB HEADER.
639  *
640  *  Due to lack of indirect addressing on earlier NCR chips,
641  *  this substructure is copied from the ccb to a global
642  *  address after selection (or reselection) and copied back
643  *  before disconnect.
644  *  For SYMBIOS chips that support LOAD/STORE this copy is
645  *  not needed and thus not performed.
646  */
647 
648 struct sym_ccbh {
649 	/*
650 	 *  Start and restart SCRIPTS addresses (must be at 0).
651 	 */
652 /*0*/	struct sym_actscr go;
653 
654 	/*
655 	 *  SCRIPTS jump address that deal with data pointers.
656 	 *  'savep' points to the position in the script responsible
657 	 *  for the actual transfer of data.
658 	 *  It's written on reception of a SAVE_DATA_POINTER message.
659 	 */
660 	u32	savep;		/* Jump address to saved data pointer	*/
661 	u32	lastp;		/* SCRIPTS address at end of data	*/
662 
663 	/*
664 	 *  Status fields.
665 	 */
666 	u8	status[4];
667 };
668 
669 /*
670  *  GET/SET the value of the data pointer used by SCRIPTS.
671  *
672  *  We must distinguish between the LOAD/STORE-based SCRIPTS
673  *  that use directly the header in the CCB, and the NCR-GENERIC
674  *  SCRIPTS that use the copy of the header in the HCB.
675  */
676 #if	SYM_CONF_GENERIC_SUPPORT
677 #define sym_set_script_dp(np, cp, dp)				\
678 	do {							\
679 		if (np->features & FE_LDSTR)			\
680 			cp->phys.head.lastp = cpu_to_scr(dp);	\
681 		else						\
682 			np->ccb_head.lastp = cpu_to_scr(dp);	\
683 	} while (0)
684 #define sym_get_script_dp(np, cp) 				\
685 	scr_to_cpu((np->features & FE_LDSTR) ?			\
686 		cp->phys.head.lastp : np->ccb_head.lastp)
687 #else
688 #define sym_set_script_dp(np, cp, dp)				\
689 	do {							\
690 		cp->phys.head.lastp = cpu_to_scr(dp);		\
691 	} while (0)
692 
693 #define sym_get_script_dp(np, cp) (cp->phys.head.lastp)
694 #endif
695 
696 /*
697  *  Data Structure Block
698  *
699  *  During execution of a ccb by the script processor, the
700  *  DSA (data structure address) register points to this
701  *  substructure of the ccb.
702  */
703 struct sym_dsb {
704 	/*
705 	 *  CCB header.
706 	 *  Also assumed at offset 0 of the sym_ccb structure.
707 	 */
708 /*0*/	struct sym_ccbh head;
709 
710 	/*
711 	 *  Phase mismatch contexts.
712 	 *  We need two to handle correctly the SAVED DATA POINTER.
713 	 *  MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
714 	 *  for address calculation from SCRIPTS.
715 	 */
716 	struct sym_pmc pm0;
717 	struct sym_pmc pm1;
718 
719 	/*
720 	 *  Table data for Script
721 	 */
722 	struct sym_tblsel  select;
723 	struct sym_tblmove smsg;
724 	struct sym_tblmove smsg_ext;
725 	struct sym_tblmove cmd;
726 	struct sym_tblmove sense;
727 	struct sym_tblmove wresid;
728 	struct sym_tblmove data [SYM_CONF_MAX_SG];
729 };
730 
731 /*
732  *  Our Command Control Block
733  */
734 struct sym_ccb {
735 	/*
736 	 *  This is the data structure which is pointed by the DSA
737 	 *  register when it is executed by the script processor.
738 	 *  It must be the first entry.
739 	 */
740 	struct sym_dsb phys;
741 
742 	/*
743 	 *  Pointer to CAM ccb and related stuff.
744 	 */
745 	struct scsi_cmnd *cmd;	/* CAM scsiio ccb		*/
746 	u8	cdb_buf[16];	/* Copy of CDB			*/
747 #define	SYM_SNS_BBUF_LEN 32
748 	u8	sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */
749 	int	data_len;	/* Total data length		*/
750 	int	segments;	/* Number of SG segments	*/
751 
752 	u8	order;		/* Tag type (if tagged command)	*/
753 	unsigned char odd_byte_adjustment;	/* odd-sized req on wide bus */
754 
755 	u_char	nego_status;	/* Negotiation status		*/
756 	u_char	xerr_status;	/* Extended error flags		*/
757 	u32	extra_bytes;	/* Extraneous bytes transferred	*/
758 
759 	/*
760 	 *  Message areas.
761 	 *  We prepare a message to be sent after selection.
762 	 *  We may use a second one if the command is rescheduled
763 	 *  due to CHECK_CONDITION or COMMAND TERMINATED.
764 	 *  Contents are IDENTIFY and SIMPLE_TAG.
765 	 *  While negotiating sync or wide transfer,
766 	 *  a SDTR or WDTR message is appended.
767 	 */
768 	u_char	scsi_smsg [12];
769 	u_char	scsi_smsg2[12];
770 
771 	/*
772 	 *  Auto request sense related fields.
773 	 */
774 	u_char	sensecmd[6];	/* Request Sense command	*/
775 	u_char	sv_scsi_status;	/* Saved SCSI status 		*/
776 	u_char	sv_xerr_status;	/* Saved extended status	*/
777 	int	sv_resid;	/* Saved residual		*/
778 
779 	/*
780 	 *  Other fields.
781 	 */
782 	u32	ccb_ba;		/* BUS address of this CCB	*/
783 	u_short	tag;		/* Tag for this transfer	*/
784 				/*  NO_TAG means no tag		*/
785 	u_char	target;
786 	u_char	lun;
787 	struct sym_ccb *link_ccbh;	/* Host adapter CCB hash chain	*/
788 	SYM_QUEHEAD link_ccbq;	/* Link to free/busy CCB queue	*/
789 	u32	startp;		/* Initial data pointer		*/
790 	u32	goalp;		/* Expected last data pointer	*/
791 	int	ext_sg;		/* Extreme data pointer, used	*/
792 	int	ext_ofs;	/*  to calculate the residual.	*/
793 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
794 	SYM_QUEHEAD link2_ccbq;	/* Link for device queueing	*/
795 	u_char	started;	/* CCB queued to the squeue	*/
796 #endif
797 	u_char	to_abort;	/* Want this IO to be aborted	*/
798 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
799 	u_char	tags_si;	/* Lun tags sum index (0,1)	*/
800 #endif
801 };
802 
803 #define CCB_BA(cp,lbl)	cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
804 
805 typedef struct device *m_pool_ident_t;
806 
807 /*
808  *  Host Control Block
809  */
810 struct sym_hcb {
811 	/*
812 	 *  Global headers.
813 	 *  Due to poorness of addressing capabilities, earlier
814 	 *  chips (810, 815, 825) copy part of the data structures
815 	 *  (CCB, TCB and LCB) in fixed areas.
816 	 */
817 #if	SYM_CONF_GENERIC_SUPPORT
818 	struct sym_ccbh	ccb_head;
819 	struct sym_tcbh	tcb_head;
820 	struct sym_lcbh	lcb_head;
821 #endif
822 	/*
823 	 *  Idle task and invalid task actions and
824 	 *  their bus addresses.
825 	 */
826 	struct sym_actscr idletask, notask, bad_itl, bad_itlq;
827 	u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
828 
829 	/*
830 	 *  Dummy lun table to protect us against target
831 	 *  returning bad lun number on reselection.
832 	 */
833 	u32	*badluntbl;	/* Table physical address	*/
834 	u32	badlun_sa;	/* SCRIPT handler BUS address	*/
835 
836 	/*
837 	 *  Bus address of this host control block.
838 	 */
839 	u32	hcb_ba;
840 
841 	/*
842 	 *  Bit 32-63 of the on-chip RAM bus address in LE format.
843 	 *  The START_RAM64 script loads the MMRS and MMWS from this
844 	 *  field.
845 	 */
846 	u32	scr_ram_seg;
847 
848 	/*
849 	 *  Initial value of some IO register bits.
850 	 *  These values are assumed to have been set by BIOS, and may
851 	 *  be used to probe adapter implementation differences.
852 	 */
853 	u_char	sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
854 		sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
855 		sv_stest1;
856 
857 	/*
858 	 *  Actual initial value of IO register bits used by the
859 	 *  driver. They are loaded at initialisation according to
860 	 *  features that are to be enabled/disabled.
861 	 */
862 	u_char	rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
863 		rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
864 
865 	/*
866 	 *  Target data.
867 	 */
868 	struct sym_tcb	target[SYM_CONF_MAX_TARGET];
869 
870 	/*
871 	 *  Target control block bus address array used by the SCRIPT
872 	 *  on reselection.
873 	 */
874 	u32		*targtbl;
875 	u32		targtbl_ba;
876 
877 	/*
878 	 *  DMA pool handle for this HBA.
879 	 */
880 	m_pool_ident_t	bus_dmat;
881 
882 	/*
883 	 *  O/S specific data structure
884 	 */
885 	struct sym_shcb s;
886 
887 	/*
888 	 *  Physical bus addresses of the chip.
889 	 */
890 	u32		mmio_ba;	/* MMIO 32 bit BUS address	*/
891 	u32		ram_ba;		/* RAM 32 bit BUS address	*/
892 
893 	/*
894 	 *  SCRIPTS virtual and physical bus addresses.
895 	 *  'script'  is loaded in the on-chip RAM if present.
896 	 *  'scripth' stays in main memory for all chips except the
897 	 *  53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
898 	 */
899 	u_char		*scripta0;	/* Copy of scripts A, B, Z	*/
900 	u_char		*scriptb0;
901 	u_char		*scriptz0;
902 	u32		scripta_ba;	/* Actual scripts A, B, Z	*/
903 	u32		scriptb_ba;	/* 32 bit bus addresses.	*/
904 	u32		scriptz_ba;
905 	u_short		scripta_sz;	/* Actual size of script A, B, Z*/
906 	u_short		scriptb_sz;
907 	u_short		scriptz_sz;
908 
909 	/*
910 	 *  Bus addresses, setup and patch methods for
911 	 *  the selected firmware.
912 	 */
913 	struct sym_fwa_ba fwa_bas;	/* Useful SCRIPTA bus addresses	*/
914 	struct sym_fwb_ba fwb_bas;	/* Useful SCRIPTB bus addresses	*/
915 	struct sym_fwz_ba fwz_bas;	/* Useful SCRIPTZ bus addresses	*/
916 	void		(*fw_setup)(struct sym_hcb *np, struct sym_fw *fw);
917 	void		(*fw_patch)(struct Scsi_Host *);
918 	char		*fw_name;
919 
920 	/*
921 	 *  General controller parameters and configuration.
922 	 */
923 	u_int	features;	/* Chip features map		*/
924 	u_char	myaddr;		/* SCSI id of the adapter	*/
925 	u_char	maxburst;	/* log base 2 of dwords burst	*/
926 	u_char	maxwide;	/* Maximum transfer width	*/
927 	u_char	minsync;	/* Min sync period factor (ST)	*/
928 	u_char	maxsync;	/* Max sync period factor (ST)	*/
929 	u_char	maxoffs;	/* Max scsi offset        (ST)	*/
930 	u_char	minsync_dt;	/* Min sync period factor (DT)	*/
931 	u_char	maxsync_dt;	/* Max sync period factor (DT)	*/
932 	u_char	maxoffs_dt;	/* Max scsi offset        (DT)	*/
933 	u_char	multiplier;	/* Clock multiplier (1,2,4)	*/
934 	u_char	clock_divn;	/* Number of clock divisors	*/
935 	u32	clock_khz;	/* SCSI clock frequency in KHz	*/
936 	u32	pciclk_khz;	/* Estimated PCI clock  in KHz	*/
937 	/*
938 	 *  Start queue management.
939 	 *  It is filled up by the host processor and accessed by the
940 	 *  SCRIPTS processor in order to start SCSI commands.
941 	 */
942 	volatile		/* Prevent code optimizations	*/
943 	u32	*squeue;	/* Start queue virtual address	*/
944 	u32	squeue_ba;	/* Start queue BUS address	*/
945 	u_short	squeueput;	/* Next free slot of the queue	*/
946 	u_short	actccbs;	/* Number of allocated CCBs	*/
947 
948 	/*
949 	 *  Command completion queue.
950 	 *  It is the same size as the start queue to avoid overflow.
951 	 */
952 	u_short	dqueueget;	/* Next position to scan	*/
953 	volatile		/* Prevent code optimizations	*/
954 	u32	*dqueue;	/* Completion (done) queue	*/
955 	u32	dqueue_ba;	/* Done queue BUS address	*/
956 
957 	/*
958 	 *  Miscellaneous buffers accessed by the scripts-processor.
959 	 *  They shall be DWORD aligned, because they may be read or
960 	 *  written with a script command.
961 	 */
962 	u_char		msgout[8];	/* Buffer for MESSAGE OUT 	*/
963 	u_char		msgin [8];	/* Buffer for MESSAGE IN	*/
964 	u32		lastmsg;	/* Last SCSI message sent	*/
965 	u32		scratch;	/* Scratch for SCSI receive	*/
966 					/* Also used for cache test 	*/
967 	/*
968 	 *  Miscellaneous configuration and status parameters.
969 	 */
970 	u_char		usrflags;	/* Miscellaneous user flags	*/
971 	u_char		scsi_mode;	/* Current SCSI BUS mode	*/
972 	u_char		verbose;	/* Verbosity for this controller*/
973 
974 	/*
975 	 *  CCB lists and queue.
976 	 */
977 	struct sym_ccb **ccbh;			/* CCBs hashed by DSA value	*/
978 					/* CCB_HASH_SIZE lists of CCBs	*/
979 	SYM_QUEHEAD	free_ccbq;	/* Queue of available CCBs	*/
980 	SYM_QUEHEAD	busy_ccbq;	/* Queue of busy CCBs		*/
981 
982 	/*
983 	 *  During error handling and/or recovery,
984 	 *  active CCBs that are to be completed with
985 	 *  error or requeued are moved from the busy_ccbq
986 	 *  to the comp_ccbq prior to completion.
987 	 */
988 	SYM_QUEHEAD	comp_ccbq;
989 
990 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
991 	SYM_QUEHEAD	dummy_ccbq;
992 #endif
993 
994 	/*
995 	 *  IMMEDIATE ARBITRATION (IARB) control.
996 	 *
997 	 *  We keep track in 'last_cp' of the last CCB that has been
998 	 *  queued to the SCRIPTS processor and clear 'last_cp' when
999 	 *  this CCB completes. If last_cp is not zero at the moment
1000 	 *  we queue a new CCB, we set a flag in 'last_cp' that is
1001 	 *  used by the SCRIPTS as a hint for setting IARB.
1002 	 *  We donnot set more than 'iarb_max' consecutive hints for
1003 	 *  IARB in order to leave devices a chance to reselect.
1004 	 *  By the way, any non zero value of 'iarb_max' is unfair. :)
1005 	 */
1006 #ifdef SYM_CONF_IARB_SUPPORT
1007 	u_short		iarb_max;	/* Max. # consecutive IARB hints*/
1008 	u_short		iarb_count;	/* Actual # of these hints	*/
1009 	struct sym_ccb *	last_cp;
1010 #endif
1011 
1012 	/*
1013 	 *  Command abort handling.
1014 	 *  We need to synchronize tightly with the SCRIPTS
1015 	 *  processor in order to handle things correctly.
1016 	 */
1017 	u_char		abrt_msg[4];	/* Message to send buffer	*/
1018 	struct sym_tblmove abrt_tbl;	/* Table for the MOV of it 	*/
1019 	struct sym_tblsel  abrt_sel;	/* Sync params for selection	*/
1020 	u_char		istat_sem;	/* Tells the chip to stop (SEM)	*/
1021 
1022 	/*
1023 	 *  64 bit DMA handling.
1024 	 */
1025 #if	SYM_CONF_DMA_ADDRESSING_MODE != 0
1026 	u_char	use_dac;		/* Use PCI DAC cycles		*/
1027 #if	SYM_CONF_DMA_ADDRESSING_MODE == 2
1028 	u_char	dmap_dirty;		/* Dma segments registers dirty	*/
1029 	u32	dmap_bah[SYM_DMAP_SIZE];/* Segment registers map	*/
1030 #endif
1031 #endif
1032 };
1033 
1034 #if SYM_CONF_DMA_ADDRESSING_MODE == 0
1035 #define use_dac(np)	0
1036 #define set_dac(np)	do { } while (0)
1037 #else
1038 #define use_dac(np)	(np)->use_dac
1039 #define set_dac(np)	(np)->use_dac = 1
1040 #endif
1041 
1042 #define HCB_BA(np, lbl)	(np->hcb_ba + offsetof(struct sym_hcb, lbl))
1043 
1044 
1045 /*
1046  *  FIRMWARES (sym_fw.c)
1047  */
1048 struct sym_fw * sym_find_firmware(struct sym_chip *chip);
1049 void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len);
1050 
1051 /*
1052  *  Driver methods called from O/S specific code.
1053  */
1054 char *sym_driver_name(void);
1055 void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
1056 int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
1057 struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
1058 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1059 void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
1060 #else
1061 void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
1062 #endif
1063 void sym_start_up(struct Scsi_Host *, int reason);
1064 irqreturn_t sym_interrupt(struct Scsi_Host *);
1065 int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
1066 struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1067 void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1068 struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1069 int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1070 int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1071 int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1072 int sym_reset_scsi_target(struct sym_hcb *np, int target);
1073 void sym_hcb_free(struct sym_hcb *np);
1074 int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram);
1075 
1076 /*
1077  *  Build a scatter/gather entry.
1078  *
1079  *  For 64 bit systems, we use the 8 upper bits of the size field
1080  *  to provide bus address bits 32-39 to the SCRIPTS processor.
1081  *  This allows the 895A, 896, 1010 to address up to 1 TB of memory.
1082  */
1083 
1084 #if   SYM_CONF_DMA_ADDRESSING_MODE == 0
1085 #define DMA_DAC_MASK	DMA_BIT_MASK(32)
1086 #define sym_build_sge(np, data, badd, len)	\
1087 do {						\
1088 	(data)->addr = cpu_to_scr(badd);	\
1089 	(data)->size = cpu_to_scr(len);		\
1090 } while (0)
1091 #elif SYM_CONF_DMA_ADDRESSING_MODE == 1
1092 #define DMA_DAC_MASK	DMA_BIT_MASK(40)
1093 #define sym_build_sge(np, data, badd, len)				\
1094 do {									\
1095 	(data)->addr = cpu_to_scr(badd);				\
1096 	(data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len);	\
1097 } while (0)
1098 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2
1099 #define DMA_DAC_MASK	DMA_BIT_MASK(64)
1100 int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
1101 static inline void
1102 sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
1103 {
1104 	u32 h = (badd>>32);
1105 	int s = (h&SYM_DMAP_MASK);
1106 
1107 	if (h != np->dmap_bah[s])
1108 		goto bad;
1109 good:
1110 	(data)->addr = cpu_to_scr(badd);
1111 	(data)->size = cpu_to_scr((s<<24) + len);
1112 	return;
1113 bad:
1114 	s = sym_lookup_dmap(np, h, s);
1115 	goto good;
1116 }
1117 #else
1118 #error "Unsupported DMA addressing mode"
1119 #endif
1120 
1121 /*
1122  *  MEMORY ALLOCATOR.
1123  */
1124 
1125 #define sym_get_mem_cluster()	\
1126 	(void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
1127 #define sym_free_mem_cluster(p)	\
1128 	free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
1129 
1130 /*
1131  *  Link between free memory chunks of a given size.
1132  */
1133 typedef struct sym_m_link {
1134 	struct sym_m_link *next;
1135 } *m_link_p;
1136 
1137 /*
1138  *  Virtual to bus physical translation for a given cluster.
1139  *  Such a structure is only useful with DMA abstraction.
1140  */
1141 typedef struct sym_m_vtob {	/* Virtual to Bus address translation */
1142 	struct sym_m_vtob *next;
1143 	void *vaddr;		/* Virtual address */
1144 	dma_addr_t baddr;	/* Bus physical address */
1145 } *m_vtob_p;
1146 
1147 /* Hash this stuff a bit to speed up translations */
1148 #define VTOB_HASH_SHIFT		5
1149 #define VTOB_HASH_SIZE		(1UL << VTOB_HASH_SHIFT)
1150 #define VTOB_HASH_MASK		(VTOB_HASH_SIZE-1)
1151 #define VTOB_HASH_CODE(m)	\
1152 	((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK)
1153 
1154 /*
1155  *  Memory pool of a given kind.
1156  *  Ideally, we want to use:
1157  *  1) 1 pool for memory we donnot need to involve in DMA.
1158  *  2) The same pool for controllers that require same DMA
1159  *     constraints and features.
1160  *     The OS specific m_pool_id_t thing and the sym_m_pool_match()
1161  *     method are expected to tell the driver about.
1162  */
1163 typedef struct sym_m_pool {
1164 	m_pool_ident_t	dev_dmat;	/* Identifies the pool (see above) */
1165 	void * (*get_mem_cluster)(struct sym_m_pool *);
1166 #ifdef	SYM_MEM_FREE_UNUSED
1167 	void (*free_mem_cluster)(struct sym_m_pool *, void *);
1168 #endif
1169 #define M_GET_MEM_CLUSTER()		mp->get_mem_cluster(mp)
1170 #define M_FREE_MEM_CLUSTER(p)		mp->free_mem_cluster(mp, p)
1171 	int nump;
1172 	m_vtob_p vtob[VTOB_HASH_SIZE];
1173 	struct sym_m_pool *next;
1174 	struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1];
1175 } *m_pool_p;
1176 
1177 /*
1178  *  Alloc, free and translate addresses to bus physical
1179  *  for DMAable memory.
1180  */
1181 void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name);
1182 void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name);
1183 dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
1184 
1185 /*
1186  * Verbs used by the driver code for DMAable memory handling.
1187  * The _uvptv_ macro avoids a nasty warning about pointer to volatile
1188  * being discarded.
1189  */
1190 #define _uvptv_(p) ((void *)((u_long)(p)))
1191 
1192 #define _sym_calloc_dma(np, l, n)	__sym_calloc_dma(np->bus_dmat, l, n)
1193 #define _sym_mfree_dma(np, p, l, n)	\
1194 			__sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
1195 #define sym_calloc_dma(l, n)		_sym_calloc_dma(np, l, n)
1196 #define sym_mfree_dma(p, l, n)		_sym_mfree_dma(np, p, l, n)
1197 #define vtobus(p)			__vtobus(np->bus_dmat, _uvptv_(p))
1198 
1199 /*
1200  *  We have to provide the driver memory allocator with methods for
1201  *  it to maintain virtual to bus physical address translations.
1202  */
1203 
1204 #define sym_m_pool_match(mp_id1, mp_id2)	(mp_id1 == mp_id2)
1205 
1206 static inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1207 {
1208 	void *vaddr = NULL;
1209 	dma_addr_t baddr = 0;
1210 
1211 	vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
1212 			GFP_ATOMIC);
1213 	if (vaddr) {
1214 		vbp->vaddr = vaddr;
1215 		vbp->baddr = baddr;
1216 	}
1217 	return vaddr;
1218 }
1219 
1220 static inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1221 {
1222 	dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
1223 			vbp->baddr);
1224 }
1225 
1226 #endif /* SYM_HIPD_H */
1227