xref: /titanic_50/usr/src/uts/common/sys/ddidmareq.h (revision d6114e2d100d9ec3b45f9968d45ac2e3a0827af0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_DDIDMAREQ_H
27 #define	_SYS_DDIDMAREQ_H
28 
29 #ifdef	__cplusplus
30 extern "C" {
31 #endif
32 
33 /*
34  * Memory Objects
35  *
36  * Definitions of structures that can describe
37  * an object that can be mapped for DMA.
38  */
39 
40 /*
41  * Structure describing a virtual address
42  */
43 struct v_address {
44 	caddr_t		v_addr;		/* base virtual address */
45 	struct	as	*v_as;		/* pointer to address space */
46 	void 		*v_priv;	/* priv data for shadow I/O */
47 };
48 
49 /*
50  * Structure describing a page-based address
51  */
52 struct pp_address {
53 	/*
54 	 * A pointer to a circularly linked list of page structures.
55 	 */
56 	struct page *pp_pp;
57 	uint_t pp_offset;	/* offset within first page */
58 };
59 
60 /*
61  * Structure to describe a physical memory address.
62  */
63 struct phy_address {
64 	ulong_t	p_addr;		/* base physical address */
65 	ulong_t	p_memtype;	/* memory type */
66 };
67 
68 /*
69  * A union of all of the above structures.
70  *
71  * This union describes the relationship between
72  * the kind of an address description and an object.
73  */
74 typedef union {
75 	struct v_address virt_obj;	/* Some virtual address		*/
76 	struct pp_address pp_obj;	/* Some page-based address	*/
77 	struct phy_address phys_obj;	/* Some physical address	*/
78 } ddi_dma_aobj_t;
79 
80 /*
81  * DMA object types - used to select how the object
82  * being mapped is being addressed by the IU.
83  */
84 typedef enum {
85 	DMA_OTYP_VADDR = 0,	/* enforce starting value of zero */
86 	DMA_OTYP_PAGES,
87 	DMA_OTYP_PADDR,
88 	DMA_OTYP_BUFVADDR
89 } ddi_dma_atyp_t;
90 
91 /*
92  * A compact package to describe an object that is to be mapped for DMA.
93  */
94 typedef struct {
95 	uint_t		dmao_size;	/* size, in bytes, of the object */
96 	ddi_dma_atyp_t	dmao_type;	/* type of object */
97 	ddi_dma_aobj_t	dmao_obj;	/* the object described */
98 } ddi_dma_obj_t;
99 
100 /*
101  * DMA addressing limits.
102  *
103  * This structure describes the constraints that a particular device's
104  * DMA engine has to its parent so that the parent may correctly set
105  * things up for a DMA mapping. Each parent may in turn modify the
106  * constraints listed in a DMA request structure in order to describe
107  * to its parent any changed or additional constraints. The rules
108  * are that each parent may modify a constraint in order to further
109  * constrain things (e.g., picking a more limited address range than
110  * that permitted by the child), but that the parent may not ignore
111  * a child's constraints.
112  *
113  * A particular constraint that we do *not* address is whether or not
114  * a requested mapping is too large for a DMA engine's counter to
115  * correctly track. It is still up to each driver to explicitly handle
116  * transfers that are too large for its own hardware to deal with directly.
117  *
118  * The mapping routines that are cognizant of this structure will
119  * copy any user defined limits structure if they need to modify
120  * the fields (as alluded to above).
121  *
122  * A note as to how to define constraints:
123  *
124  * How you define the constraints for your device depends on how you
125  * define your device. For example, you may have an SBus card with a
126  * device on it that address only the bottom 16mb of virtual DMA space.
127  * However, if the card also has ancillary circuitry that pulls the high 8
128  * bits of address lines high, the more correct expression for your device
129  * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff].
130  */
131 #if defined(__sparc)
132 typedef struct ddi_dma_lim {
133 
134 	/*
135 	 * Low range of 32 bit addressing capability.
136 	 */
137 	uint_t	dlim_addr_lo;
138 
139 	/*
140 	 * Upper inclusive bound of addressing capability. It is an
141 	 * inclusive boundary limit to allow for the addressing range
142 	 * [0..0xffffffff] to be specified in preference to [0..0].
143 	 */
144 	uint_t	dlim_addr_hi;
145 
146 	/*
147 	 * Inclusive upper bound with which The DMA engine's counter acts as
148 	 * a register.
149 	 *
150 	 * This handles the case where an upper portion of a DMA address
151 	 * register is a latch instead of being a full 32 bit register
152 	 * (e.g., the upper 8 bits may remain constant while the lower
153 	 * 24 bits are the real address register).
154 	 *
155 	 * This essentially gives a hint about segment limitations
156 	 * to the mapping routines.
157 	 */
158 	uint_t	dlim_cntr_max;
159 
160 	/*
161 	 * DMA burst sizes.
162 	 *
163 	 * At the time of a mapping request, this tag defines the possible
164 	 * DMA burst cycle sizes that the requestor's DMA engine can
165 	 * emit. The format of the data is binary encoding of burst sizes
166 	 * assumed to be powers of two. That is, if a DMA engine is capable
167 	 * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17.
168 	 *
169 	 * As the mapping request is handled by intervening nexi, the
170 	 * burstsizes value may be modified. Prior to enabling DMA for
171 	 * the specific device, the driver that owns the DMA engine should
172 	 * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes
173 	 * have become and program their DMA engine appropriately.
174 	 */
175 	uint_t	dlim_burstsizes;
176 
177 	/*
178 	 * Minimum effective DMA transfer size, in units of bytes.
179 	 *
180 	 * This value specifies the minimum effective granularity of the
181 	 * DMA engine. It is distinct from dlim_burtsizes in that it
182 	 * describes the minimum amount of access a DMA transfer will
183 	 * effect. dlim_burtsizes describes in what electrical fashion
184 	 * the DMA engine might perform its accesses, while dlim_minxfer
185 	 * describes the minimum amount of memory that can be touched by
186 	 * the DMA transfer.
187 	 *
188 	 * As the mapping request is handled by intervening nexi, the
189 	 * dlim_minxfer value may be modifed contingent upon the presence
190 	 * (and use) of I/O caches and DMA write buffers in between the
191 	 * DMA engine and the object that DMA is being performed on.
192 	 *
193 	 */
194 	uint_t	dlim_minxfer;
195 
196 	/*
197 	 * Expected average data rate for this DMA engine
198 	 * while transferring data.
199 	 *
200 	 * This is used as a hint for a number of operations that might
201 	 * want to know the possible optimal latency requirements of this
202 	 * device. A value of zero will be interpreted as a 'do not care'.
203 	 */
204 	uint_t	dlim_dmaspeed;
205 
206 } ddi_dma_lim_t;
207 
208 #elif defined(__x86)
209 
210 /*
211  * values for dlim_minxfer
212  */
213 #define	DMA_UNIT_8  1
214 #define	DMA_UNIT_16 2
215 #define	DMA_UNIT_32 4
216 
217 /*
218  * Version number
219  */
220 #define	DMALIM_VER0	((0x86000000) + 0)
221 
222 typedef struct ddi_dma_lim {
223 
224 	/*
225 	 * Low range of 32 bit addressing capability.
226 	 */
227 	uint_t	dlim_addr_lo;
228 
229 	/*
230 	 * Upper Inclusive bound of 32 bit addressing capability.
231 	 *
232 	 * The ISA nexus restricts this to 0x00ffffff, since this bus has
233 	 * only 24 address lines.  This enforces the 16 Mb address limitation.
234 	 * The EISA nexus restricts this to 0xffffffff.
235 	 */
236 	uint_t	dlim_addr_hi;
237 
238 	/*
239 	 * DMA engine counter not used; set to 0
240 	 */
241 	uint_t	dlim_cntr_max;
242 
243 	/*
244 	 *  DMA burst sizes not used; set to 1
245 	 */
246 	uint_t	dlim_burstsizes;
247 
248 	/*
249 	 * Minimum effective DMA transfer size.
250 	 *
251 	 * This value specifies the minimum effective granularity of the
252 	 * DMA engine. It is distinct from dlim_burstsizes in that it
253 	 * describes the minimum amount of access a DMA transfer will
254 	 * effect. dlim_burstsizes describes in what electrical fashion
255 	 * the DMA engine might perform its accesses, while dlim_minxfer
256 	 * describes the minimum amount of memory that can be touched by
257 	 * the DMA transfer.
258 	 *
259 	 * This value also implies the required address alignment.
260 	 * The number of bytes transferred is assumed to be
261 	 * 	dlim_minxfer * (DMA engine count)
262 	 *
263 	 * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32.
264 	 */
265 	uint_t	dlim_minxfer;
266 
267 	/*
268 	 * Expected average data rate for this DMA engine
269 	 * while transferring data.
270 	 *
271 	 * This is used as a hint for a number of operations that might
272 	 * want to know the possible optimal latency requirements of this
273 	 * device. A value of zero will be interpreted as a 'do not care'.
274 	 */
275 	uint_t	dlim_dmaspeed;
276 
277 
278 	/*
279 	 * Version number of this structure
280 	 */
281 	uint_t	dlim_version;	/* = 0x86 << 24 + 0 */
282 
283 	/*
284 	 * Inclusive upper bound with which the DMA engine's Address acts as
285 	 * a register.
286 	 * This handles the case where an upper portion of a DMA address
287 	 * register is a latch instead of being a full 32 bit register
288 	 * (e.g., the upper 16 bits remain constant while the lower 16 bits
289 	 * are incremented for each DMA transfer).
290 	 *
291 	 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
292 	 * since the ISA DMA engine has a 16-bit register for low address and
293 	 * an 8-bit latch for high address.  This enforces the first 64 Kb
294 	 * limitation (address boundary).
295 	 * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff.
296 	 */
297 	uint_t	dlim_adreg_max;
298 
299 	/*
300 	 * Maximum transfer count that the DMA engine can handle.
301 	 *
302 	 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
303 	 * since the ISA DMA engine has a 16-bit register for counting.
304 	 * This enforces the other 64 Kb limitation (count size).
305 	 * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff,
306 	 * since the EISA DMA engine has a 24-bit register for counting.
307 	 *
308 	 * This transfer count limitation is a per segment limitation.
309 	 * It can also be used to restrict the size of segments.
310 	 *
311 	 * This is used as a bit mask, so it must be a power of 2, minus 1.
312 	 */
313 	uint_t	dlim_ctreg_max;
314 
315 	/*
316 	 * Granularity of DMA transfer, in units of bytes.
317 	 *
318 	 * Breakup sizes must be multiples of this value.
319 	 * If no scatter/gather capabilty is specified, then the size of
320 	 * each DMA transfer must be a multiple of this value.
321 	 *
322 	 * If there is scatter/gather capability, then a single cookie cannot
323 	 * be smaller in size than the minimum xfer value, and may be less
324 	 * than the granularity value.  The total transfer length of the
325 	 * scatter/gather list should be a multiple of the granularity value;
326 	 * use dlim_sgllen to specify the length of the scatter/gather list.
327 	 *
328 	 * This value should be equal to the sector size of the device.
329 	 */
330 	uint_t	dlim_granular;
331 
332 	/*
333 	 * Length of scatter/gather list
334 	 *
335 	 * This value specifies the number of segments or cookies that a DMA
336 	 * engine can consume in one i/o request to the device.  For 3rd-party
337 	 * DMA that uses the bus nexus this should be set to 1.  Devices with
338 	 * 1st-party DMA capability should specify the number of entries in
339 	 * its scatter/gather list.  The breakup routine will ensure that each
340 	 * group of dlim_sgllen cookies (within a DMA window) will have a
341 	 * total transfer length that is a multiple of dlim_granular.
342 	 *
343 	 *	< 0  :  tbd
344 	 *	= 0  :  breakup is for PIO.
345 	 *	= 1  :  breakup is for DMA engine with no scatter/gather
346 	 *		capability.
347 	 *	>= 2 :  breakup is for DMA engine with scatter/gather
348 	 *		capability; value is max number of entries in list.
349 	 *
350 	 * Note that this list length is not dependent on the DMA window
351 	 * size.  The size of the DMA window is based on resources consumed,
352 	 * such as intermediate buffers.  Several s/g lists may exist within
353 	 * a window.  But the end of a window does imply the end of the s/g
354 	 * list.
355 	 */
356 	short	dlim_sgllen;
357 
358 	/*
359 	 * Size of device i/o request
360 	 *
361 	 * This value indicates the maximum number of bytes the device
362 	 * can transmit/receive for one i/o command.  This limitation is
363 	 * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen).
364 	 */
365 	uint_t	dlim_reqsize;
366 
367 } ddi_dma_lim_t;
368 
369 #else
370 #error "struct ddi_dma_lim not defined for this architecture"
371 #endif	/* defined(__sparc) */
372 
373 /*
374  * Flags definition for dma_attr_flags
375  */
376 
377 /*
378  * return physical DMA address on platforms
379  * which support DVMA
380  */
381 #define	DDI_DMA_FORCE_PHYSICAL		0x0100
382 
383 /*
384  * An error will be flagged for DMA data path errors
385  */
386 #define	DDI_DMA_FLAGERR			0x200
387 
388 /*
389  * Enable relaxed ordering
390  */
391 #define	DDI_DMA_RELAXED_ORDERING	0x400
392 
393 #define	DMA_ATTR_V0		0
394 #define	DMA_ATTR_VERSION	DMA_ATTR_V0
395 
396 typedef struct ddi_dma_attr {
397 	uint_t		dma_attr_version;	/* version number */
398 	uint64_t	dma_attr_addr_lo;	/* low DMA address range */
399 	uint64_t	dma_attr_addr_hi;	/* high DMA address range */
400 	uint64_t	dma_attr_count_max;	/* DMA counter register */
401 	uint64_t	dma_attr_align;		/* DMA address alignment */
402 	uint_t		dma_attr_burstsizes;	/* DMA burstsizes */
403 	uint32_t	dma_attr_minxfer;	/* min effective DMA size */
404 	uint64_t 	dma_attr_maxxfer;	/* max DMA xfer size */
405 	uint64_t 	dma_attr_seg;		/* segment boundary */
406 	int		dma_attr_sgllen;	/* s/g length */
407 	uint32_t	dma_attr_granular;	/* granularity of device */
408 	uint_t		dma_attr_flags;		/* Bus specific DMA flags */
409 } ddi_dma_attr_t;
410 
411 /*
412  * Handy macro to set a maximum bit value (should be elsewhere)
413  *
414  * Clear off all bits lower then 'mybit' in val; if there are no
415  * bits higher than or equal to mybit in val then set mybit. Assumes
416  * mybit equals some power of 2 and is not zero.
417  */
418 #define	maxbit(val, mybit)	\
419 	((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0)
420 
421 /*
422  * Handy macro to set a minimum bit value (should be elsewhere)
423  *
424  * Clear off all bits higher then 'mybit' in val; if there are no
425  * bits lower than or equal to mybit in val then set mybit. Assumes
426  * mybit equals some pow2 and is not zero.
427  */
428 #define	minbit(val, mybit)	\
429 	(((val)&((mybit)|((mybit)-1))) | \
430 	((((val) & ((mybit)-1)) == 0) ? (mybit) : 0))
431 
432 /*
433  * Structure of a request to map an object for DMA.
434  */
435 typedef struct ddi_dma_req {
436 	/*
437 	 * Caller's DMA engine constraints.
438 	 *
439 	 * If there are no particular constraints to the caller's DMA
440 	 * engine, this field may be set to NULL. The implementation DMA
441 	 * setup functions will then select a set of standard beginning
442 	 * constraints.
443 	 *
444 	 * In either case, as the mapping proceeds, the initial DMA
445 	 * constraints may become more restrictive as each intervening
446 	 * nexus might add further restrictions.
447 	 */
448 	ddi_dma_lim_t	*dmar_limits;
449 
450 	/*
451 	 * Contains the information passed to the DMA mapping allocation
452 	 * routine(s).
453 	 */
454 	uint_t		dmar_flags;
455 
456 	/*
457 	 * Callback function. A caller of the DMA mapping functions must
458 	 * specify by filling in this field whether the allocation routines
459 	 * can sleep awaiting mapping resources, must *not* sleep awaiting
460 	 * resources, or may *not* sleep awaiting any resources and must
461 	 * call the function specified by dmar_fp with the the argument
462 	 * dmar_arg when resources might have become available at a future
463 	 * time.
464 	 */
465 	int		(*dmar_fp)();
466 
467 	caddr_t		dmar_arg;	/* Callback function argument */
468 
469 	/*
470 	 * Description of the object to be mapped for DMA.
471 	 * Must be last in this structure in case that the
472 	 * union ddi_dma_obj_t changes in the future.
473 	 */
474 	ddi_dma_obj_t	dmar_object;
475 
476 } ddi_dma_req_t;
477 
478 /*
479  * Defines for the DMA mapping allocation functions
480  *
481  * If a DMA callback funtion is set to anything other than the following
482  * defines then it is assumed that one wishes a callback and is providing
483  * a function address.
484  */
485 #ifdef __STDC__
486 #define	DDI_DMA_DONTWAIT	((int (*)(caddr_t))0)
487 #define	DDI_DMA_SLEEP		((int (*)(caddr_t))1)
488 #else
489 #define	DDI_DMA_DONTWAIT	((int (*)())0)
490 #define	DDI_DMA_SLEEP		((int (*)())1)
491 #endif
492 
493 /*
494  * Return values from callback functions.
495  */
496 #define	DDI_DMA_CALLBACK_RUNOUT	0
497 #define	DDI_DMA_CALLBACK_DONE	1
498 
499 /*
500  * Flag definitions for the allocation functions.
501  */
502 #define	DDI_DMA_WRITE		0x0001	/* Direction memory --> IO 	*/
503 #define	DDI_DMA_READ		0x0002	/* Direction IO --> memory	*/
504 #define	DDI_DMA_RDWR		(DDI_DMA_READ | DDI_DMA_WRITE)
505 
506 /*
507  * If possible, establish a MMU redzone after the mapping (to protect
508  * against cheap DMA hardware that might get out of control).
509  */
510 #define	DDI_DMA_REDZONE		0x0004
511 
512 /*
513  * A partial allocation is allowed. That is, if the size of the object
514  * exceeds the mapping resources available, only map a portion of the
515  * object and return status indicating that this took place. The caller
516  * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to
517  * change, at a later point, the actual mapped portion of the object.
518  *
519  * The mapped portion begins at offset 0 of the object.
520  *
521  */
522 #define	DDI_DMA_PARTIAL		0x0008
523 
524 /*
525  * Map the object for byte consistent access. Note that explicit
526  * synchronization (via ddi_dma_sync(9F)) will still be required.
527  * Consider this flag to be a hint to the mapping routines as to
528  * the intended use of the mapping.
529  *
530  * Normal data transfers can be usually consider to use 'streaming'
531  * modes of operations. They start at a specific point, transfer a
532  * fairly large amount of data sequentially, and then stop (usually
533  * on a well aligned boundary).
534  *
535  * Control mode data transfers (for memory resident device control blocks,
536  * e.g., ethernet message descriptors) do not access memory in such
537  * a streaming sequential fashion. Instead, they tend to modify a few
538  * words or bytes, move around and maybe modify a few more.
539  *
540  * There are many machine implementations that make this difficult to
541  * control in a generic and seamless fashion. Therefore, explicit synch-
542  * ronization steps (via ddi_dma_sync(9F)) are still required (even if you
543  * ask for a byte-consistent mapping) in order to make the view of the
544  * memory object shared between a CPU and a DMA master in consistent.
545  * However, judicious use of this flag can give sufficient hints to
546  * the mapping routines to attempt to pick the most efficacious mapping
547  * such that the synchronization steps are as efficient as possible.
548  *
549  */
550 #define	DDI_DMA_CONSISTENT	0x0010
551 
552 /*
553  * Some DMA mappings have to be 'exclusive' access.
554  */
555 #define	DDI_DMA_EXCLUSIVE	0x0020
556 
557 /*
558  * Sequential, unidirectional, block-sized and block aligned transfers
559  */
560 #define	DDI_DMA_STREAMING	0x0040
561 
562 /*
563  * Support for 64-bit SBus devices
564  */
565 #define	DDI_DMA_SBUS_64BIT	0x2000
566 
567 /*
568  * Return values from the mapping allocation functions.
569  */
570 
571 /*
572  * succeeded in satisfying request
573  */
574 #define	DDI_DMA_MAPPED		0
575 
576 /*
577  * Mapping is legitimate (for advisory calls).
578  */
579 #define	DDI_DMA_MAPOK		0
580 
581 /*
582  * Succeeded in mapping a portion of the request.
583  */
584 #define	DDI_DMA_PARTIAL_MAP	1
585 
586 /*
587  * indicates end of window/segment list
588  */
589 #define	DDI_DMA_DONE		2
590 
591 /*
592  * No resources to map request.
593  */
594 #define	DDI_DMA_NORESOURCES	-1
595 
596 /*
597  * Can't establish a mapping to the specified object
598  * (no specific reason).
599  */
600 #define	DDI_DMA_NOMAPPING	-2
601 
602 /*
603  * The request is too big to be mapped.
604  */
605 #define	DDI_DMA_TOOBIG		-3
606 
607 /*
608  * The request is too small to be mapped.
609  */
610 #define	DDI_DMA_TOOSMALL	-4
611 
612 /*
613  * The request cannot be mapped because the object
614  * is locked against mapping by another DMA master.
615  */
616 #define	DDI_DMA_LOCKED		-5
617 
618 /*
619  * The request cannot be mapped because the limits
620  * structure has bogus values.
621  */
622 #define	DDI_DMA_BADLIMITS	-6
623 
624 /*
625  * the segment/window pointer is stale
626  */
627 #define	DDI_DMA_STALE		-7
628 
629 /*
630  * The system can't allocate DMA resources using
631  * the given DMA attributes
632  */
633 #define	DDI_DMA_BADATTR		-8
634 
635 /*
636  * A DMA handle is already used for a DMA
637  */
638 #define	DDI_DMA_INUSE		-9
639 
640 
641 /*
642  * DVMA disabled or not supported. use physical DMA
643  */
644 #define	DDI_DMA_USE_PHYSICAL		-10
645 
646 
647 /*
648  * In order for the access to a memory object to be consistent
649  * between a device and a CPU, the function ddi_dma_sync(9F)
650  * must be called upon the DMA handle. The following flags
651  * define whose view of the object should be made consistent.
652  * There are different flags here because on different machines
653  * there are definite performance implications of how long
654  * such synchronization takes.
655  *
656  * DDI_DMA_SYNC_FORDEV makes all device references to the object
657  * mapped by the DMA handle up to date. It should be used by a
658  * driver after a cpu modifies the memory object (over the range
659  * specified by the other arguments to the ddi_dma_sync(9F) call).
660  *
661  * DDI_DMA_SYNC_FORCPU makes all cpu references to the object
662  * mapped by the DMA handle up to date. It should be used
663  * by a driver after the receipt of data from the device to
664  * the memory object is done (over the range specified by
665  * the other arguments to the ddi_dma_sync(9F) call).
666  *
667  * If the only mapping that concerns the driver is one for the
668  * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the
669  * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the
670  * system that if it can synchronize the kernel's view faster
671  * that the CPU's view, it can do so, otherwise it acts the
672  * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might
673  * speed up the synchronization of kernel mappings in case of
674  * non IO-coherent CPU caches.
675  */
676 #define	DDI_DMA_SYNC_FORDEV	0x0
677 #define	DDI_DMA_SYNC_FORCPU	0x1
678 #define	DDI_DMA_SYNC_FORKERNEL	0x2
679 
680 /*
681  * Bus nexus control functions for DMA
682  */
683 
684 /*
685  * Control operations, defined here so that devops.h can be included
686  * by drivers without having to include a specific SYSDDI implementation
687  * header file.
688  */
689 
690 enum ddi_dma_ctlops {
691 	DDI_DMA_FREE,		/* free reference to object		*/
692 	DDI_DMA_SYNC,		/* synchronize cache references		*/
693 	DDI_DMA_HTOC,		/* return DMA cookie for handle		*/
694 	DDI_DMA_KVADDR,		/* return kernel virtual address	*/
695 	DDI_DMA_MOVWIN,		/* change mapped DMA window on object	*/
696 	DDI_DMA_REPWIN,		/* report current window on DMA object	*/
697 	DDI_DMA_GETERR,		/* report any post-transfer DMA errors	*/
698 	DDI_DMA_COFF,		/* convert a DMA cookie to an offset	*/
699 	DDI_DMA_NEXTWIN,	/* get next window within object	*/
700 	DDI_DMA_NEXTSEG,	/* get next segment within window	*/
701 	DDI_DMA_SEGTOC,		/* return segment DMA cookie		*/
702 	DDI_DMA_RESERVE,	/* reserve some DVMA range		*/
703 	DDI_DMA_RELEASE,	/* free preallocated DVMA range		*/
704 	DDI_DMA_RESETH,		/* reset next cookie ptr in handle	*/
705 	DDI_DMA_CKSYNC,		/* sync intermediate buffer to cookies	*/
706 	DDI_DMA_IOPB_ALLOC,	/* get contiguous DMA-able memory	*/
707 	DDI_DMA_IOPB_FREE,	/* return contiguous DMA-able memory	*/
708 	DDI_DMA_SMEM_ALLOC,	/* get contiguous DMA-able memory	*/
709 	DDI_DMA_SMEM_FREE,	/* return contiguous DMA-able memory	*/
710 	DDI_DMA_SET_SBUS64,	/* 64 bit SBus support			*/
711 	DDI_DMA_REMAP,		/* remap DMA buffers after relocation	*/
712 
713 		/*
714 		 * control ops for DMA engine on motherboard
715 		 */
716 	DDI_DMA_E_ACQUIRE,	/* get channel for exclusive use	*/
717 	DDI_DMA_E_FREE,		/* release channel			*/
718 	DDI_DMA_E_1STPTY,	/* setup channel for 1st party DMA	*/
719 	DDI_DMA_E_GETCB,	/* get control block for DMA engine	*/
720 	DDI_DMA_E_FREECB,	/* free control blk for DMA engine	*/
721 	DDI_DMA_E_PROG,		/* program channel of DMA engine	*/
722 	DDI_DMA_E_SWSETUP,	/* setup channel for software control	*/
723 	DDI_DMA_E_SWSTART,	/* software operation of DMA channel	*/
724 	DDI_DMA_E_ENABLE,	/* enable channel of DMA engine		*/
725 	DDI_DMA_E_STOP,		/* stop a channel of DMA engine		*/
726 	DDI_DMA_E_DISABLE,	/* disable channel of DMA engine	*/
727 	DDI_DMA_E_GETCNT,	/* get remaining xfer count		*/
728 	DDI_DMA_E_GETLIM,	/* get DMA engine limits		*/
729 	DDI_DMA_E_GETATTR	/* get DMA engine attributes		*/
730 };
731 
732 /*
733  * Cache attribute flags:
734  *
735  * IOMEM_DATA_CACHED
736  *   The CPU can cache the data it fetches and push it to memory at a later
737  *   time. This is the default attribute and used if no cache attributes is
738  *   specified.
739  *
740  * IOMEM_DATA_UC_WR_COMBINE
741  *   The CPU never caches the data but writes may occur out of order or be
742  *   combined. It implies re-ordering.
743  *
744  * IOMEM_DATA_UNCACHED
745  *   The CPU never caches the data and has uncacheable access to memory.
746  *   It also implies strict ordering.
747  *
748  * The cache attributes are mutually exclusive, and any combination of the
749  * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED
750  * is meaningful, but others lead to a failure.
751  */
752 #define	IOMEM_DATA_CACHED		0x10000 /* data is cached */
753 #define	IOMEM_DATA_UC_WR_COMBINE	0x20000 /* data is not cached, but */
754 						/* writes might be combined */
755 #define	IOMEM_DATA_UNCACHED		0x40000 /* data is not cached. */
756 #define	IOMEM_DATA_MASK			0xF0000	/* cache attrs mask */
757 
758 /*
759  * Check if either uncacheable or write-combining specified. (those flags are
760  * mutually exclusive) This macro is used to override hat attributes if either
761  * one is set.
762  */
763 #define	OVERRIDE_CACHE_ATTR(attr)	\
764 	(attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE))
765 
766 /*
767  * Get the cache attribute from flags. If there is no attributes,
768  * return IOMEM_DATA_CACHED (default attribute).
769  */
770 #define	IOMEM_CACHE_ATTR(flags)	\
771 	((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \
772 	    IOMEM_DATA_CACHED)
773 
774 #ifdef	__cplusplus
775 }
776 #endif
777 
778 #endif	/* _SYS_DDIDMAREQ_H */
779