xref: /titanic_44/usr/src/uts/common/sys/ddidmareq.h (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #ifndef	_SYS_DDIDMAREQ_H
28 #define	_SYS_DDIDMAREQ_H
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #ifdef	__cplusplus
33 extern "C" {
34 #endif
35 
36 /*
37  * Memory Objects
38  *
39  * Definitions of structures that can describe
40  * an object that can be mapped for DMA.
41  */
42 
43 /*
44  * Structure describing a virtual address
45  */
46 struct v_address {
47 	caddr_t		v_addr;		/* base virtual address */
48 	struct	as	*v_as;		/* pointer to address space */
49 	void 		*v_priv;	/* priv data for shadow I/O */
50 };
51 
52 /*
53  * Structure describing a page-based address
54  */
55 struct pp_address {
56 	/*
57 	 * A pointer to a circularly linked list of page structures.
58 	 */
59 	struct page *pp_pp;
60 	uint_t pp_offset;	/* offset within first page */
61 };
62 
63 /*
64  * Structure to describe a physical memory address.
65  */
66 struct phy_address {
67 	ulong_t	p_addr;		/* base physical address */
68 	ulong_t	p_memtype;	/* memory type */
69 };
70 
71 /*
72  * A union of all of the above structures.
73  *
74  * This union describes the relationship between
75  * the kind of an address description and an object.
76  */
77 typedef union {
78 	struct v_address virt_obj;	/* Some virtual address		*/
79 	struct pp_address pp_obj;	/* Some page-based address	*/
80 	struct phy_address phys_obj;	/* Some physical address	*/
81 } ddi_dma_aobj_t;
82 
83 /*
84  * DMA object types - used to select how the object
85  * being mapped is being addressed by the IU.
86  */
87 typedef enum {
88 	DMA_OTYP_VADDR = 0,	/* enforce starting value of zero */
89 	DMA_OTYP_PAGES,
90 	DMA_OTYP_PADDR,
91 	DMA_OTYP_BUFVADDR
92 } ddi_dma_atyp_t;
93 
94 /*
95  * A compact package to describe an object that is to be mapped for DMA.
96  */
97 typedef struct {
98 	uint_t		dmao_size;	/* size, in bytes, of the object */
99 	ddi_dma_atyp_t	dmao_type;	/* type of object */
100 	ddi_dma_aobj_t	dmao_obj;	/* the object described */
101 } ddi_dma_obj_t;
102 
103 /*
104  * DMA addressing limits.
105  *
106  * This structure describes the constraints that a particular device's
107  * DMA engine has to its parent so that the parent may correctly set
108  * things up for a DMA mapping. Each parent may in turn modify the
109  * constraints listed in a DMA request structure in order to describe
110  * to its parent any changed or additional constraints. The rules
111  * are that each parent may modify a constraint in order to further
112  * constrain things (e.g., picking a more limited address range than
113  * that permitted by the child), but that the parent may not ignore
114  * a child's constraints.
115  *
116  * A particular constraint that we do *not* address is whether or not
117  * a requested mapping is too large for a DMA engine's counter to
118  * correctly track. It is still up to each driver to explicitly handle
119  * transfers that are too large for its own hardware to deal with directly.
120  *
121  * The mapping routines that are cognizant of this structure will
122  * copy any user defined limits structure if they need to modify
123  * the fields (as alluded to above).
124  *
125  * A note as to how to define constraints:
126  *
127  * How you define the constraints for your device depends on how you
128  * define your device. For example, you may have an SBus card with a
129  * device on it that address only the bottom 16mb of virtual DMA space.
130  * However, if the card also has ancillary circuitry that pulls the high 8
131  * bits of address lines high, the more correct expression for your device
132  * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff].
133  */
134 #if defined(__sparc)
135 typedef struct ddi_dma_lim {
136 
137 	/*
138 	 * Low range of 32 bit addressing capability.
139 	 */
140 	uint_t	dlim_addr_lo;
141 
142 	/*
143 	 * Upper inclusive bound of addressing capability. It is an
144 	 * inclusive boundary limit to allow for the addressing range
145 	 * [0..0xffffffff] to be specified in preference to [0..0].
146 	 */
147 	uint_t	dlim_addr_hi;
148 
149 	/*
150 	 * Inclusive upper bound with which The DMA engine's counter acts as
151 	 * a register.
152 	 *
153 	 * This handles the case where an upper portion of a DMA address
154 	 * register is a latch instead of being a full 32 bit register
155 	 * (e.g., the upper 8 bits may remain constant while the lower
156 	 * 24 bits are the real address register).
157 	 *
158 	 * This essentially gives a hint about segment limitations
159 	 * to the mapping routines.
160 	 */
161 	uint_t	dlim_cntr_max;
162 
163 	/*
164 	 * DMA burst sizes.
165 	 *
166 	 * At the time of a mapping request, this tag defines the possible
167 	 * DMA burst cycle sizes that the requestor's DMA engine can
168 	 * emit. The format of the data is binary encoding of burst sizes
169 	 * assumed to be powers of two. That is, if a DMA engine is capable
170 	 * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17.
171 	 *
172 	 * As the mapping request is handled by intervening nexi, the
173 	 * burstsizes value may be modified. Prior to enabling DMA for
174 	 * the specific device, the driver that owns the DMA engine should
175 	 * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes
176 	 * have become and program their DMA engine appropriately.
177 	 */
178 	uint_t	dlim_burstsizes;
179 
180 	/*
181 	 * Minimum effective DMA transfer size, in units of bytes.
182 	 *
183 	 * This value specifies the minimum effective granularity of the
184 	 * DMA engine. It is distinct from dlim_burtsizes in that it
185 	 * describes the minimum amount of access a DMA transfer will
186 	 * effect. dlim_burtsizes describes in what electrical fashion
187 	 * the DMA engine might perform its accesses, while dlim_minxfer
188 	 * describes the minimum amount of memory that can be touched by
189 	 * the DMA transfer.
190 	 *
191 	 * As the mapping request is handled by intervening nexi, the
192 	 * dlim_minxfer value may be modifed contingent upon the presence
193 	 * (and use) of I/O caches and DMA write buffers in between the
194 	 * DMA engine and the object that DMA is being performed on.
195 	 *
196 	 */
197 	uint_t	dlim_minxfer;
198 
199 	/*
200 	 * Expected average data rate for this DMA engine
201 	 * while transferring data.
202 	 *
203 	 * This is used as a hint for a number of operations that might
204 	 * want to know the possible optimal latency requirements of this
205 	 * device. A value of zero will be interpreted as a 'do not care'.
206 	 */
207 	uint_t	dlim_dmaspeed;
208 
209 } ddi_dma_lim_t;
210 
211 #elif defined(__x86)
212 
213 /*
214  * values for dlim_minxfer
215  */
216 #define	DMA_UNIT_8  1
217 #define	DMA_UNIT_16 2
218 #define	DMA_UNIT_32 4
219 
220 /*
221  * Version number
222  */
223 #define	DMALIM_VER0	((0x86000000) + 0)
224 
225 typedef struct ddi_dma_lim {
226 
227 	/*
228 	 * Low range of 32 bit addressing capability.
229 	 */
230 	uint_t	dlim_addr_lo;
231 
232 	/*
233 	 * Upper Inclusive bound of 32 bit addressing capability.
234 	 *
235 	 * The ISA nexus restricts this to 0x00ffffff, since this bus has
236 	 * only 24 address lines.  This enforces the 16 Mb address limitation.
237 	 * The EISA nexus restricts this to 0xffffffff.
238 	 */
239 	uint_t	dlim_addr_hi;
240 
241 	/*
242 	 * DMA engine counter not used; set to 0
243 	 */
244 	uint_t	dlim_cntr_max;
245 
246 	/*
247 	 *  DMA burst sizes not used; set to 1
248 	 */
249 	uint_t	dlim_burstsizes;
250 
251 	/*
252 	 * Minimum effective DMA transfer size.
253 	 *
254 	 * This value specifies the minimum effective granularity of the
255 	 * DMA engine. It is distinct from dlim_burstsizes in that it
256 	 * describes the minimum amount of access a DMA transfer will
257 	 * effect. dlim_burstsizes describes in what electrical fashion
258 	 * the DMA engine might perform its accesses, while dlim_minxfer
259 	 * describes the minimum amount of memory that can be touched by
260 	 * the DMA transfer.
261 	 *
262 	 * This value also implies the required address alignment.
263 	 * The number of bytes transferred is assumed to be
264 	 * 	dlim_minxfer * (DMA engine count)
265 	 *
266 	 * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32.
267 	 */
268 	uint_t	dlim_minxfer;
269 
270 	/*
271 	 * Expected average data rate for this DMA engine
272 	 * while transferring data.
273 	 *
274 	 * This is used as a hint for a number of operations that might
275 	 * want to know the possible optimal latency requirements of this
276 	 * device. A value of zero will be interpreted as a 'do not care'.
277 	 */
278 	uint_t	dlim_dmaspeed;
279 
280 
281 	/*
282 	 * Version number of this structure
283 	 */
284 	uint_t	dlim_version;	/* = 0x86 << 24 + 0 */
285 
286 	/*
287 	 * Inclusive upper bound with which the DMA engine's Address acts as
288 	 * a register.
289 	 * This handles the case where an upper portion of a DMA address
290 	 * register is a latch instead of being a full 32 bit register
291 	 * (e.g., the upper 16 bits remain constant while the lower 16 bits
292 	 * are incremented for each DMA transfer).
293 	 *
294 	 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
295 	 * since the ISA DMA engine has a 16-bit register for low address and
296 	 * an 8-bit latch for high address.  This enforces the first 64 Kb
297 	 * limitation (address boundary).
298 	 * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff.
299 	 */
300 	uint_t	dlim_adreg_max;
301 
302 	/*
303 	 * Maximum transfer count that the DMA engine can handle.
304 	 *
305 	 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
306 	 * since the ISA DMA engine has a 16-bit register for counting.
307 	 * This enforces the other 64 Kb limitation (count size).
308 	 * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff,
309 	 * since the EISA DMA engine has a 24-bit register for counting.
310 	 *
311 	 * This transfer count limitation is a per segment limitation.
312 	 * It can also be used to restrict the size of segments.
313 	 *
314 	 * This is used as a bit mask, so it must be a power of 2, minus 1.
315 	 */
316 	uint_t	dlim_ctreg_max;
317 
318 	/*
319 	 * Granularity of DMA transfer, in units of bytes.
320 	 *
321 	 * Breakup sizes must be multiples of this value.
322 	 * If no scatter/gather capabilty is specified, then the size of
323 	 * each DMA transfer must be a multiple of this value.
324 	 *
325 	 * If there is scatter/gather capability, then a single cookie cannot
326 	 * be smaller in size than the minimum xfer value, and may be less
327 	 * than the granularity value.  The total transfer length of the
328 	 * scatter/gather list should be a multiple of the granularity value;
329 	 * use dlim_sgllen to specify the length of the scatter/gather list.
330 	 *
331 	 * This value should be equal to the sector size of the device.
332 	 */
333 	uint_t	dlim_granular;
334 
335 	/*
336 	 * Length of scatter/gather list
337 	 *
338 	 * This value specifies the number of segments or cookies that a DMA
339 	 * engine can consume in one i/o request to the device.  For 3rd-party
340 	 * DMA that uses the bus nexus this should be set to 1.  Devices with
341 	 * 1st-party DMA capability should specify the number of entries in
342 	 * its scatter/gather list.  The breakup routine will ensure that each
343 	 * group of dlim_sgllen cookies (within a DMA window) will have a
344 	 * total transfer length that is a multiple of dlim_granular.
345 	 *
346 	 *	< 0  :  tbd
347 	 *	= 0  :  breakup is for PIO.
348 	 *	= 1  :  breakup is for DMA engine with no scatter/gather
349 	 *		capability.
350 	 *	>= 2 :  breakup is for DMA engine with scatter/gather
351 	 *		capability; value is max number of entries in list.
352 	 *
353 	 * Note that this list length is not dependent on the DMA window
354 	 * size.  The size of the DMA window is based on resources consumed,
355 	 * such as intermediate buffers.  Several s/g lists may exist within
356 	 * a window.  But the end of a window does imply the end of the s/g
357 	 * list.
358 	 */
359 	short	dlim_sgllen;
360 
361 	/*
362 	 * Size of device i/o request
363 	 *
364 	 * This value indicates the maximum number of bytes the device
365 	 * can transmit/receive for one i/o command.  This limitation is
366 	 * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen).
367 	 */
368 	uint_t	dlim_reqsize;
369 
370 } ddi_dma_lim_t;
371 
372 #else
373 #error "struct ddi_dma_lim not defined for this architecture"
374 #endif	/* defined(__sparc) */
375 
376 /*
377  * Flags definition for dma_attr_flags
378  */
379 
380 /*
381  * return physical DMA address on platforms
382  * which support DVMA
383  */
384 #define	DDI_DMA_FORCE_PHYSICAL		0x0100
385 
386 /*
387  * An error will be flagged for DMA data path errors
388  */
389 #define	DDI_DMA_FLAGERR			0x200
390 
391 #define	DMA_ATTR_V0		0
392 #define	DMA_ATTR_VERSION	DMA_ATTR_V0
393 
394 typedef struct ddi_dma_attr {
395 	uint_t		dma_attr_version;	/* version number */
396 	uint64_t	dma_attr_addr_lo;	/* low DMA address range */
397 	uint64_t	dma_attr_addr_hi;	/* high DMA address range */
398 	uint64_t	dma_attr_count_max;	/* DMA counter register */
399 	uint64_t	dma_attr_align;		/* DMA address alignment */
400 	uint_t		dma_attr_burstsizes;	/* DMA burstsizes */
401 	uint32_t	dma_attr_minxfer;	/* min effective DMA size */
402 	uint64_t 	dma_attr_maxxfer;	/* max DMA xfer size */
403 	uint64_t 	dma_attr_seg;		/* segment boundary */
404 	int		dma_attr_sgllen;	/* s/g length */
405 	uint32_t	dma_attr_granular;	/* granularity of device */
406 	uint_t		dma_attr_flags;		/* Bus specific DMA flags */
407 } ddi_dma_attr_t;
408 
409 /*
410  * Handy macro to set a maximum bit value (should be elsewhere)
411  *
412  * Clear off all bits lower then 'mybit' in val; if there are no
413  * bits higher than or equal to mybit in val then set mybit. Assumes
414  * mybit equals some power of 2 and is not zero.
415  */
416 #define	maxbit(val, mybit)	\
417 	((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0)
418 
419 /*
420  * Handy macro to set a minimum bit value (should be elsewhere)
421  *
422  * Clear off all bits higher then 'mybit' in val; if there are no
423  * bits lower than or equal to mybit in val then set mybit. Assumes
424  * mybit equals some pow2 and is not zero.
425  */
426 #define	minbit(val, mybit)	\
427 	(((val)&((mybit)|((mybit)-1))) | \
428 	((((val) & ((mybit)-1)) == 0) ? (mybit) : 0))
429 
430 /*
431  * Structure of a request to map an object for DMA.
432  */
433 typedef struct ddi_dma_req {
434 	/*
435 	 * Caller's DMA engine constraints.
436 	 *
437 	 * If there are no particular constraints to the caller's DMA
438 	 * engine, this field may be set to NULL. The implementation DMA
439 	 * setup functions will then select a set of standard beginning
440 	 * constraints.
441 	 *
442 	 * In either case, as the mapping proceeds, the initial DMA
443 	 * constraints may become more restrictive as each intervening
444 	 * nexus might add further restrictions.
445 	 */
446 	ddi_dma_lim_t	*dmar_limits;
447 
448 	/*
449 	 * Contains the information passed to the DMA mapping allocation
450 	 * routine(s).
451 	 */
452 	uint_t		dmar_flags;
453 
454 	/*
455 	 * Callback function. A caller of the DMA mapping functions must
456 	 * specify by filling in this field whether the allocation routines
457 	 * can sleep awaiting mapping resources, must *not* sleep awaiting
458 	 * resources, or may *not* sleep awaiting any resources and must
459 	 * call the function specified by dmar_fp with the the argument
460 	 * dmar_arg when resources might have become available at a future
461 	 * time.
462 	 */
463 	int		(*dmar_fp)();
464 
465 	caddr_t		dmar_arg;	/* Callback function argument */
466 
467 	/*
468 	 * Description of the object to be mapped for DMA.
469 	 * Must be last in this structure in case that the
470 	 * union ddi_dma_obj_t changes in the future.
471 	 */
472 	ddi_dma_obj_t	dmar_object;
473 
474 } ddi_dma_req_t;
475 
476 /*
477  * Defines for the DMA mapping allocation functions
478  *
479  * If a DMA callback funtion is set to anything other than the following
480  * defines then it is assumed that one wishes a callback and is providing
481  * a function address.
482  */
483 #ifdef __STDC__
484 #define	DDI_DMA_DONTWAIT	((int (*)(caddr_t))0)
485 #define	DDI_DMA_SLEEP		((int (*)(caddr_t))1)
486 #else
487 #define	DDI_DMA_DONTWAIT	((int (*)())0)
488 #define	DDI_DMA_SLEEP		((int (*)())1)
489 #endif
490 
491 /*
492  * Return values from callback functions.
493  */
494 #define	DDI_DMA_CALLBACK_RUNOUT	0
495 #define	DDI_DMA_CALLBACK_DONE	1
496 
497 /*
498  * Flag definitions for the allocation functions.
499  */
500 #define	DDI_DMA_WRITE		0x0001	/* Direction memory --> IO 	*/
501 #define	DDI_DMA_READ		0x0002	/* Direction IO --> memory	*/
502 #define	DDI_DMA_RDWR		(DDI_DMA_READ | DDI_DMA_WRITE)
503 
504 /*
505  * If possible, establish a MMU redzone after the mapping (to protect
506  * against cheap DMA hardware that might get out of control).
507  */
508 #define	DDI_DMA_REDZONE		0x0004
509 
510 /*
511  * A partial allocation is allowed. That is, if the size of the object
512  * exceeds the mapping resources available, only map a portion of the
513  * object and return status indicating that this took place. The caller
514  * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to
515  * change, at a later point, the actual mapped portion of the object.
516  *
517  * The mapped portion begins at offset 0 of the object.
518  *
519  */
520 #define	DDI_DMA_PARTIAL		0x0008
521 
522 /*
523  * Map the object for byte consistent access. Note that explicit
524  * synchronization (via ddi_dma_sync(9F)) will still be required.
525  * Consider this flag to be a hint to the mapping routines as to
526  * the intended use of the mapping.
527  *
528  * Normal data transfers can be usually consider to use 'streaming'
529  * modes of operations. They start at a specific point, transfer a
530  * fairly large amount of data sequentially, and then stop (usually
531  * on a well aligned boundary).
532  *
533  * Control mode data transfers (for memory resident device control blocks,
534  * e.g., ethernet message descriptors) do not access memory in such
535  * a streaming sequential fashion. Instead, they tend to modify a few
536  * words or bytes, move around and maybe modify a few more.
537  *
538  * There are many machine implementations that make this difficult to
539  * control in a generic and seamless fashion. Therefore, explicit synch-
540  * ronization steps (via ddi_dma_sync(9F)) are still required (even if you
541  * ask for a byte-consistent mapping) in order to make the view of the
542  * memory object shared between a CPU and a DMA master in consistent.
543  * However, judicious use of this flag can give sufficient hints to
544  * the mapping routines to attempt to pick the most efficacious mapping
545  * such that the synchronization steps are as efficient as possible.
546  *
547  */
548 #define	DDI_DMA_CONSISTENT	0x0010
549 
550 /*
551  * Some DMA mappings have to be 'exclusive' access.
552  */
553 #define	DDI_DMA_EXCLUSIVE	0x0020
554 
555 /*
556  * Sequential, unidirectional, block-sized and block aligned transfers
557  */
558 #define	DDI_DMA_STREAMING	0x0040
559 
560 /*
561  * Support for 64-bit SBus devices
562  */
563 #define	DDI_DMA_SBUS_64BIT	0x2000
564 
565 /*
566  * Return values from the mapping allocation functions.
567  */
568 
569 /*
570  * succeeded in satisfying request
571  */
572 #define	DDI_DMA_MAPPED		0
573 
574 /*
575  * Mapping is legitimate (for advisory calls).
576  */
577 #define	DDI_DMA_MAPOK		0
578 
579 /*
580  * Succeeded in mapping a portion of the request.
581  */
582 #define	DDI_DMA_PARTIAL_MAP	1
583 
584 /*
585  * indicates end of window/segment list
586  */
587 #define	DDI_DMA_DONE		2
588 
589 /*
590  * No resources to map request.
591  */
592 #define	DDI_DMA_NORESOURCES	-1
593 
594 /*
595  * Can't establish a mapping to the specified object
596  * (no specific reason).
597  */
598 #define	DDI_DMA_NOMAPPING	-2
599 
600 /*
601  * The request is too big to be mapped.
602  */
603 #define	DDI_DMA_TOOBIG		-3
604 
605 /*
606  * The request is too small to be mapped.
607  */
608 #define	DDI_DMA_TOOSMALL	-4
609 
610 /*
611  * The request cannot be mapped because the object
612  * is locked against mapping by another DMA master.
613  */
614 #define	DDI_DMA_LOCKED		-5
615 
616 /*
617  * The request cannot be mapped because the limits
618  * structure has bogus values.
619  */
620 #define	DDI_DMA_BADLIMITS	-6
621 
622 /*
623  * the segment/window pointer is stale
624  */
625 #define	DDI_DMA_STALE		-7
626 
627 /*
628  * The system can't allocate DMA resources using
629  * the given DMA attributes
630  */
631 #define	DDI_DMA_BADATTR		-8
632 
633 /*
634  * A DMA handle is already used for a DMA
635  */
636 #define	DDI_DMA_INUSE		-9
637 
638 /*
639  * In order for the access to a memory object to be consistent
640  * between a device and a CPU, the function ddi_dma_sync(9F)
641  * must be called upon the DMA handle. The following flags
642  * define whose view of the object should be made consistent.
643  * There are different flags here because on different machines
644  * there are definite performance implications of how long
645  * such synchronization takes.
646  *
647  * DDI_DMA_SYNC_FORDEV makes all device references to the object
648  * mapped by the DMA handle up to date. It should be used by a
649  * driver after a cpu modifies the memory object (over the range
650  * specified by the other arguments to the ddi_dma_sync(9F) call).
651  *
652  * DDI_DMA_SYNC_FORCPU makes all cpu references to the object
653  * mapped by the DMA handle up to date. It should be used
654  * by a driver after the receipt of data from the device to
655  * the memory object is done (over the range specified by
656  * the other arguments to the ddi_dma_sync(9F) call).
657  *
658  * If the only mapping that concerns the driver is one for the
659  * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the
660  * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the
661  * system that if it can synchronize the kernel's view faster
662  * that the CPU's view, it can do so, otherwise it acts the
663  * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might
664  * speed up the synchronization of kernel mappings in case of
665  * non IO-coherent CPU caches.
666  */
667 #define	DDI_DMA_SYNC_FORDEV	0x0
668 #define	DDI_DMA_SYNC_FORCPU	0x1
669 #define	DDI_DMA_SYNC_FORKERNEL	0x2
670 
671 /*
672  * Bus nexus control functions for DMA
673  */
674 
675 /*
676  * Control operations, defined here so that devops.h can be included
677  * by drivers without having to include a specific SYSDDI implementation
678  * header file.
679  */
680 
681 enum ddi_dma_ctlops {
682 	DDI_DMA_FREE,		/* free reference to object		*/
683 	DDI_DMA_SYNC,		/* synchronize cache references		*/
684 	DDI_DMA_HTOC,		/* return DMA cookie for handle		*/
685 	DDI_DMA_KVADDR,		/* return kernel virtual address	*/
686 	DDI_DMA_MOVWIN,		/* change mapped DMA window on object	*/
687 	DDI_DMA_REPWIN,		/* report current window on DMA object	*/
688 	DDI_DMA_GETERR,		/* report any post-transfer DMA errors	*/
689 	DDI_DMA_COFF,		/* convert a DMA cookie to an offset	*/
690 	DDI_DMA_NEXTWIN,	/* get next window within object	*/
691 	DDI_DMA_NEXTSEG,	/* get next segment within window	*/
692 	DDI_DMA_SEGTOC,		/* return segment DMA cookie		*/
693 	DDI_DMA_RESERVE,	/* reserve some DVMA range		*/
694 	DDI_DMA_RELEASE,	/* free preallocated DVMA range		*/
695 	DDI_DMA_RESETH,		/* reset next cookie ptr in handle	*/
696 	DDI_DMA_CKSYNC,		/* sync intermediate buffer to cookies	*/
697 	DDI_DMA_IOPB_ALLOC,	/* get contiguous DMA-able memory	*/
698 	DDI_DMA_IOPB_FREE,	/* return contiguous DMA-able memory	*/
699 	DDI_DMA_SMEM_ALLOC,	/* get contiguous DMA-able memory	*/
700 	DDI_DMA_SMEM_FREE,	/* return contiguous DMA-able memory	*/
701 	DDI_DMA_SET_SBUS64,	/* 64 bit SBus support			*/
702 	DDI_DMA_REMAP,		/* remap DMA buffers after relocation	*/
703 
704 		/*
705 		 * control ops for DMA engine on motherboard
706 		 */
707 	DDI_DMA_E_ACQUIRE,	/* get channel for exclusive use	*/
708 	DDI_DMA_E_FREE,		/* release channel			*/
709 	DDI_DMA_E_1STPTY,	/* setup channel for 1st party DMA	*/
710 	DDI_DMA_E_GETCB,	/* get control block for DMA engine	*/
711 	DDI_DMA_E_FREECB,	/* free control blk for DMA engine	*/
712 	DDI_DMA_E_PROG,		/* program channel of DMA engine	*/
713 	DDI_DMA_E_SWSETUP,	/* setup channel for software control	*/
714 	DDI_DMA_E_SWSTART,	/* software operation of DMA channel	*/
715 	DDI_DMA_E_ENABLE,	/* enable channel of DMA engine		*/
716 	DDI_DMA_E_STOP,		/* stop a channel of DMA engine		*/
717 	DDI_DMA_E_DISABLE,	/* disable channel of DMA engine	*/
718 	DDI_DMA_E_GETCNT,	/* get remaining xfer count		*/
719 	DDI_DMA_E_GETLIM,	/* get DMA engine limits		*/
720 	DDI_DMA_E_GETATTR	/* get DMA engine attributes		*/
721 };
722 
723 #ifdef	__cplusplus
724 }
725 #endif
726 
727 #endif	/* _SYS_DDIDMAREQ_H */
728