1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_DDIDMAREQ_H 27 #define _SYS_DDIDMAREQ_H 28 29 #ifdef __cplusplus 30 extern "C" { 31 #endif 32 33 /* 34 * Memory Objects 35 * 36 * Definitions of structures that can describe 37 * an object that can be mapped for DMA. 38 */ 39 40 /* 41 * Structure describing a virtual address 42 */ 43 struct v_address { 44 caddr_t v_addr; /* base virtual address */ 45 struct as *v_as; /* pointer to address space */ 46 void *v_priv; /* priv data for shadow I/O */ 47 }; 48 49 /* 50 * Structure describing a page-based address 51 */ 52 struct pp_address { 53 /* 54 * A pointer to a circularly linked list of page structures. 55 */ 56 struct page *pp_pp; 57 uint_t pp_offset; /* offset within first page */ 58 }; 59 60 /* 61 * Structure to describe a physical memory address. 62 */ 63 struct phy_address { 64 ulong_t p_addr; /* base physical address */ 65 ulong_t p_memtype; /* memory type */ 66 }; 67 68 /* 69 * A union of all of the above structures. 70 * 71 * This union describes the relationship between 72 * the kind of an address description and an object. 73 */ 74 typedef union { 75 struct v_address virt_obj; /* Some virtual address */ 76 struct pp_address pp_obj; /* Some page-based address */ 77 struct phy_address phys_obj; /* Some physical address */ 78 } ddi_dma_aobj_t; 79 80 /* 81 * DMA object types - used to select how the object 82 * being mapped is being addressed by the IU. 83 */ 84 typedef enum { 85 DMA_OTYP_VADDR = 0, /* enforce starting value of zero */ 86 DMA_OTYP_PAGES, 87 DMA_OTYP_PADDR, 88 DMA_OTYP_BUFVADDR 89 } ddi_dma_atyp_t; 90 91 /* 92 * A compact package to describe an object that is to be mapped for DMA. 93 */ 94 typedef struct { 95 uint_t dmao_size; /* size, in bytes, of the object */ 96 ddi_dma_atyp_t dmao_type; /* type of object */ 97 ddi_dma_aobj_t dmao_obj; /* the object described */ 98 } ddi_dma_obj_t; 99 100 /* 101 * DMA addressing limits. 102 * 103 * This structure describes the constraints that a particular device's 104 * DMA engine has to its parent so that the parent may correctly set 105 * things up for a DMA mapping. Each parent may in turn modify the 106 * constraints listed in a DMA request structure in order to describe 107 * to its parent any changed or additional constraints. The rules 108 * are that each parent may modify a constraint in order to further 109 * constrain things (e.g., picking a more limited address range than 110 * that permitted by the child), but that the parent may not ignore 111 * a child's constraints. 112 * 113 * A particular constraint that we do *not* address is whether or not 114 * a requested mapping is too large for a DMA engine's counter to 115 * correctly track. It is still up to each driver to explicitly handle 116 * transfers that are too large for its own hardware to deal with directly. 117 * 118 * The mapping routines that are cognizant of this structure will 119 * copy any user defined limits structure if they need to modify 120 * the fields (as alluded to above). 121 * 122 * A note as to how to define constraints: 123 * 124 * How you define the constraints for your device depends on how you 125 * define your device. For example, you may have an SBus card with a 126 * device on it that address only the bottom 16mb of virtual DMA space. 127 * However, if the card also has ancillary circuitry that pulls the high 8 128 * bits of address lines high, the more correct expression for your device 129 * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff]. 130 */ 131 #if defined(__sparc) 132 typedef struct ddi_dma_lim { 133 134 /* 135 * Low range of 32 bit addressing capability. 136 */ 137 uint_t dlim_addr_lo; 138 139 /* 140 * Upper inclusive bound of addressing capability. It is an 141 * inclusive boundary limit to allow for the addressing range 142 * [0..0xffffffff] to be specified in preference to [0..0]. 143 */ 144 uint_t dlim_addr_hi; 145 146 /* 147 * Inclusive upper bound with which The DMA engine's counter acts as 148 * a register. 149 * 150 * This handles the case where an upper portion of a DMA address 151 * register is a latch instead of being a full 32 bit register 152 * (e.g., the upper 8 bits may remain constant while the lower 153 * 24 bits are the real address register). 154 * 155 * This essentially gives a hint about segment limitations 156 * to the mapping routines. 157 */ 158 uint_t dlim_cntr_max; 159 160 /* 161 * DMA burst sizes. 162 * 163 * At the time of a mapping request, this tag defines the possible 164 * DMA burst cycle sizes that the requestor's DMA engine can 165 * emit. The format of the data is binary encoding of burst sizes 166 * assumed to be powers of two. That is, if a DMA engine is capable 167 * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17. 168 * 169 * As the mapping request is handled by intervening nexi, the 170 * burstsizes value may be modified. Prior to enabling DMA for 171 * the specific device, the driver that owns the DMA engine should 172 * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes 173 * have become and program their DMA engine appropriately. 174 */ 175 uint_t dlim_burstsizes; 176 177 /* 178 * Minimum effective DMA transfer size, in units of bytes. 179 * 180 * This value specifies the minimum effective granularity of the 181 * DMA engine. It is distinct from dlim_burtsizes in that it 182 * describes the minimum amount of access a DMA transfer will 183 * effect. dlim_burtsizes describes in what electrical fashion 184 * the DMA engine might perform its accesses, while dlim_minxfer 185 * describes the minimum amount of memory that can be touched by 186 * the DMA transfer. 187 * 188 * As the mapping request is handled by intervening nexi, the 189 * dlim_minxfer value may be modifed contingent upon the presence 190 * (and use) of I/O caches and DMA write buffers in between the 191 * DMA engine and the object that DMA is being performed on. 192 * 193 */ 194 uint_t dlim_minxfer; 195 196 /* 197 * Expected average data rate for this DMA engine 198 * while transferring data. 199 * 200 * This is used as a hint for a number of operations that might 201 * want to know the possible optimal latency requirements of this 202 * device. A value of zero will be interpreted as a 'do not care'. 203 */ 204 uint_t dlim_dmaspeed; 205 206 } ddi_dma_lim_t; 207 208 #elif defined(__x86) 209 210 /* 211 * values for dlim_minxfer 212 */ 213 #define DMA_UNIT_8 1 214 #define DMA_UNIT_16 2 215 #define DMA_UNIT_32 4 216 217 /* 218 * Version number 219 */ 220 #define DMALIM_VER0 ((0x86000000) + 0) 221 222 typedef struct ddi_dma_lim { 223 224 /* 225 * Low range of 32 bit addressing capability. 226 */ 227 uint_t dlim_addr_lo; 228 229 /* 230 * Upper Inclusive bound of 32 bit addressing capability. 231 * 232 * The ISA nexus restricts this to 0x00ffffff, since this bus has 233 * only 24 address lines. This enforces the 16 Mb address limitation. 234 * The EISA nexus restricts this to 0xffffffff. 235 */ 236 uint_t dlim_addr_hi; 237 238 /* 239 * DMA engine counter not used; set to 0 240 */ 241 uint_t dlim_cntr_max; 242 243 /* 244 * DMA burst sizes not used; set to 1 245 */ 246 uint_t dlim_burstsizes; 247 248 /* 249 * Minimum effective DMA transfer size. 250 * 251 * This value specifies the minimum effective granularity of the 252 * DMA engine. It is distinct from dlim_burstsizes in that it 253 * describes the minimum amount of access a DMA transfer will 254 * effect. dlim_burstsizes describes in what electrical fashion 255 * the DMA engine might perform its accesses, while dlim_minxfer 256 * describes the minimum amount of memory that can be touched by 257 * the DMA transfer. 258 * 259 * This value also implies the required address alignment. 260 * The number of bytes transferred is assumed to be 261 * dlim_minxfer * (DMA engine count) 262 * 263 * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32. 264 */ 265 uint_t dlim_minxfer; 266 267 /* 268 * Expected average data rate for this DMA engine 269 * while transferring data. 270 * 271 * This is used as a hint for a number of operations that might 272 * want to know the possible optimal latency requirements of this 273 * device. A value of zero will be interpreted as a 'do not care'. 274 */ 275 uint_t dlim_dmaspeed; 276 277 278 /* 279 * Version number of this structure 280 */ 281 uint_t dlim_version; /* = 0x86 << 24 + 0 */ 282 283 /* 284 * Inclusive upper bound with which the DMA engine's Address acts as 285 * a register. 286 * This handles the case where an upper portion of a DMA address 287 * register is a latch instead of being a full 32 bit register 288 * (e.g., the upper 16 bits remain constant while the lower 16 bits 289 * are incremented for each DMA transfer). 290 * 291 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff, 292 * since the ISA DMA engine has a 16-bit register for low address and 293 * an 8-bit latch for high address. This enforces the first 64 Kb 294 * limitation (address boundary). 295 * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff. 296 */ 297 uint_t dlim_adreg_max; 298 299 /* 300 * Maximum transfer count that the DMA engine can handle. 301 * 302 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff, 303 * since the ISA DMA engine has a 16-bit register for counting. 304 * This enforces the other 64 Kb limitation (count size). 305 * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff, 306 * since the EISA DMA engine has a 24-bit register for counting. 307 * 308 * This transfer count limitation is a per segment limitation. 309 * It can also be used to restrict the size of segments. 310 * 311 * This is used as a bit mask, so it must be a power of 2, minus 1. 312 */ 313 uint_t dlim_ctreg_max; 314 315 /* 316 * Granularity of DMA transfer, in units of bytes. 317 * 318 * Breakup sizes must be multiples of this value. 319 * If no scatter/gather capabilty is specified, then the size of 320 * each DMA transfer must be a multiple of this value. 321 * 322 * If there is scatter/gather capability, then a single cookie cannot 323 * be smaller in size than the minimum xfer value, and may be less 324 * than the granularity value. The total transfer length of the 325 * scatter/gather list should be a multiple of the granularity value; 326 * use dlim_sgllen to specify the length of the scatter/gather list. 327 * 328 * This value should be equal to the sector size of the device. 329 */ 330 uint_t dlim_granular; 331 332 /* 333 * Length of scatter/gather list 334 * 335 * This value specifies the number of segments or cookies that a DMA 336 * engine can consume in one i/o request to the device. For 3rd-party 337 * DMA that uses the bus nexus this should be set to 1. Devices with 338 * 1st-party DMA capability should specify the number of entries in 339 * its scatter/gather list. The breakup routine will ensure that each 340 * group of dlim_sgllen cookies (within a DMA window) will have a 341 * total transfer length that is a multiple of dlim_granular. 342 * 343 * < 0 : tbd 344 * = 0 : breakup is for PIO. 345 * = 1 : breakup is for DMA engine with no scatter/gather 346 * capability. 347 * >= 2 : breakup is for DMA engine with scatter/gather 348 * capability; value is max number of entries in list. 349 * 350 * Note that this list length is not dependent on the DMA window 351 * size. The size of the DMA window is based on resources consumed, 352 * such as intermediate buffers. Several s/g lists may exist within 353 * a window. But the end of a window does imply the end of the s/g 354 * list. 355 */ 356 short dlim_sgllen; 357 358 /* 359 * Size of device i/o request 360 * 361 * This value indicates the maximum number of bytes the device 362 * can transmit/receive for one i/o command. This limitation is 363 * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen). 364 */ 365 uint_t dlim_reqsize; 366 367 } ddi_dma_lim_t; 368 369 #else 370 #error "struct ddi_dma_lim not defined for this architecture" 371 #endif /* defined(__sparc) */ 372 373 /* 374 * Flags definition for dma_attr_flags 375 */ 376 377 /* 378 * return physical DMA address on platforms 379 * which support DVMA 380 */ 381 #define DDI_DMA_FORCE_PHYSICAL 0x0100 382 383 /* 384 * An error will be flagged for DMA data path errors 385 */ 386 #define DDI_DMA_FLAGERR 0x200 387 388 /* 389 * Enable relaxed ordering 390 */ 391 #define DDI_DMA_RELAXED_ORDERING 0x400 392 393 394 /* 395 * Consolidation private x86 only flag which will cause a bounce buffer 396 * (paddr < dma_attr_seg) to be used if the buffer passed to the bind 397 * operation contains pages both above and below dma_attr_seg. If this flag 398 * is set, dma_attr_seg must be <= dma_attr_addr_hi. 399 */ 400 #define _DDI_DMA_BOUNCE_ON_SEG 0x8000 401 402 #define DMA_ATTR_V0 0 403 #define DMA_ATTR_VERSION DMA_ATTR_V0 404 405 typedef struct ddi_dma_attr { 406 uint_t dma_attr_version; /* version number */ 407 uint64_t dma_attr_addr_lo; /* low DMA address range */ 408 uint64_t dma_attr_addr_hi; /* high DMA address range */ 409 uint64_t dma_attr_count_max; /* DMA counter register */ 410 uint64_t dma_attr_align; /* DMA address alignment */ 411 uint_t dma_attr_burstsizes; /* DMA burstsizes */ 412 uint32_t dma_attr_minxfer; /* min effective DMA size */ 413 uint64_t dma_attr_maxxfer; /* max DMA xfer size */ 414 uint64_t dma_attr_seg; /* segment boundary */ 415 int dma_attr_sgllen; /* s/g length */ 416 uint32_t dma_attr_granular; /* granularity of device */ 417 uint_t dma_attr_flags; /* Bus specific DMA flags */ 418 } ddi_dma_attr_t; 419 420 /* 421 * Handy macro to set a maximum bit value (should be elsewhere) 422 * 423 * Clear off all bits lower then 'mybit' in val; if there are no 424 * bits higher than or equal to mybit in val then set mybit. Assumes 425 * mybit equals some power of 2 and is not zero. 426 */ 427 #define maxbit(val, mybit) \ 428 ((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0) 429 430 /* 431 * Handy macro to set a minimum bit value (should be elsewhere) 432 * 433 * Clear off all bits higher then 'mybit' in val; if there are no 434 * bits lower than or equal to mybit in val then set mybit. Assumes 435 * mybit equals some pow2 and is not zero. 436 */ 437 #define minbit(val, mybit) \ 438 (((val)&((mybit)|((mybit)-1))) | \ 439 ((((val) & ((mybit)-1)) == 0) ? (mybit) : 0)) 440 441 /* 442 * Structure of a request to map an object for DMA. 443 */ 444 typedef struct ddi_dma_req { 445 /* 446 * Caller's DMA engine constraints. 447 * 448 * If there are no particular constraints to the caller's DMA 449 * engine, this field may be set to NULL. The implementation DMA 450 * setup functions will then select a set of standard beginning 451 * constraints. 452 * 453 * In either case, as the mapping proceeds, the initial DMA 454 * constraints may become more restrictive as each intervening 455 * nexus might add further restrictions. 456 */ 457 ddi_dma_lim_t *dmar_limits; 458 459 /* 460 * Contains the information passed to the DMA mapping allocation 461 * routine(s). 462 */ 463 uint_t dmar_flags; 464 465 /* 466 * Callback function. A caller of the DMA mapping functions must 467 * specify by filling in this field whether the allocation routines 468 * can sleep awaiting mapping resources, must *not* sleep awaiting 469 * resources, or may *not* sleep awaiting any resources and must 470 * call the function specified by dmar_fp with the the argument 471 * dmar_arg when resources might have become available at a future 472 * time. 473 */ 474 int (*dmar_fp)(); 475 476 caddr_t dmar_arg; /* Callback function argument */ 477 478 /* 479 * Description of the object to be mapped for DMA. 480 * Must be last in this structure in case that the 481 * union ddi_dma_obj_t changes in the future. 482 */ 483 ddi_dma_obj_t dmar_object; 484 485 } ddi_dma_req_t; 486 487 /* 488 * Defines for the DMA mapping allocation functions 489 * 490 * If a DMA callback funtion is set to anything other than the following 491 * defines then it is assumed that one wishes a callback and is providing 492 * a function address. 493 */ 494 #ifdef __STDC__ 495 #define DDI_DMA_DONTWAIT ((int (*)(caddr_t))0) 496 #define DDI_DMA_SLEEP ((int (*)(caddr_t))1) 497 #else 498 #define DDI_DMA_DONTWAIT ((int (*)())0) 499 #define DDI_DMA_SLEEP ((int (*)())1) 500 #endif 501 502 /* 503 * Return values from callback functions. 504 */ 505 #define DDI_DMA_CALLBACK_RUNOUT 0 506 #define DDI_DMA_CALLBACK_DONE 1 507 508 /* 509 * Flag definitions for the allocation functions. 510 */ 511 #define DDI_DMA_WRITE 0x0001 /* Direction memory --> IO */ 512 #define DDI_DMA_READ 0x0002 /* Direction IO --> memory */ 513 #define DDI_DMA_RDWR (DDI_DMA_READ | DDI_DMA_WRITE) 514 515 /* 516 * If possible, establish a MMU redzone after the mapping (to protect 517 * against cheap DMA hardware that might get out of control). 518 */ 519 #define DDI_DMA_REDZONE 0x0004 520 521 /* 522 * A partial allocation is allowed. That is, if the size of the object 523 * exceeds the mapping resources available, only map a portion of the 524 * object and return status indicating that this took place. The caller 525 * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to 526 * change, at a later point, the actual mapped portion of the object. 527 * 528 * The mapped portion begins at offset 0 of the object. 529 * 530 */ 531 #define DDI_DMA_PARTIAL 0x0008 532 533 /* 534 * Map the object for byte consistent access. Note that explicit 535 * synchronization (via ddi_dma_sync(9F)) will still be required. 536 * Consider this flag to be a hint to the mapping routines as to 537 * the intended use of the mapping. 538 * 539 * Normal data transfers can be usually consider to use 'streaming' 540 * modes of operations. They start at a specific point, transfer a 541 * fairly large amount of data sequentially, and then stop (usually 542 * on a well aligned boundary). 543 * 544 * Control mode data transfers (for memory resident device control blocks, 545 * e.g., ethernet message descriptors) do not access memory in such 546 * a streaming sequential fashion. Instead, they tend to modify a few 547 * words or bytes, move around and maybe modify a few more. 548 * 549 * There are many machine implementations that make this difficult to 550 * control in a generic and seamless fashion. Therefore, explicit synch- 551 * ronization steps (via ddi_dma_sync(9F)) are still required (even if you 552 * ask for a byte-consistent mapping) in order to make the view of the 553 * memory object shared between a CPU and a DMA master in consistent. 554 * However, judicious use of this flag can give sufficient hints to 555 * the mapping routines to attempt to pick the most efficacious mapping 556 * such that the synchronization steps are as efficient as possible. 557 * 558 */ 559 #define DDI_DMA_CONSISTENT 0x0010 560 561 /* 562 * Some DMA mappings have to be 'exclusive' access. 563 */ 564 #define DDI_DMA_EXCLUSIVE 0x0020 565 566 /* 567 * Sequential, unidirectional, block-sized and block aligned transfers 568 */ 569 #define DDI_DMA_STREAMING 0x0040 570 571 /* 572 * Support for 64-bit SBus devices 573 */ 574 #define DDI_DMA_SBUS_64BIT 0x2000 575 576 /* 577 * Return values from the mapping allocation functions. 578 */ 579 580 /* 581 * succeeded in satisfying request 582 */ 583 #define DDI_DMA_MAPPED 0 584 585 /* 586 * Mapping is legitimate (for advisory calls). 587 */ 588 #define DDI_DMA_MAPOK 0 589 590 /* 591 * Succeeded in mapping a portion of the request. 592 */ 593 #define DDI_DMA_PARTIAL_MAP 1 594 595 /* 596 * indicates end of window/segment list 597 */ 598 #define DDI_DMA_DONE 2 599 600 /* 601 * No resources to map request. 602 */ 603 #define DDI_DMA_NORESOURCES -1 604 605 /* 606 * Can't establish a mapping to the specified object 607 * (no specific reason). 608 */ 609 #define DDI_DMA_NOMAPPING -2 610 611 /* 612 * The request is too big to be mapped. 613 */ 614 #define DDI_DMA_TOOBIG -3 615 616 /* 617 * The request is too small to be mapped. 618 */ 619 #define DDI_DMA_TOOSMALL -4 620 621 /* 622 * The request cannot be mapped because the object 623 * is locked against mapping by another DMA master. 624 */ 625 #define DDI_DMA_LOCKED -5 626 627 /* 628 * The request cannot be mapped because the limits 629 * structure has bogus values. 630 */ 631 #define DDI_DMA_BADLIMITS -6 632 633 /* 634 * the segment/window pointer is stale 635 */ 636 #define DDI_DMA_STALE -7 637 638 /* 639 * The system can't allocate DMA resources using 640 * the given DMA attributes 641 */ 642 #define DDI_DMA_BADATTR -8 643 644 /* 645 * A DMA handle is already used for a DMA 646 */ 647 #define DDI_DMA_INUSE -9 648 649 650 /* 651 * DVMA disabled or not supported. use physical DMA 652 */ 653 #define DDI_DMA_USE_PHYSICAL -10 654 655 656 /* 657 * In order for the access to a memory object to be consistent 658 * between a device and a CPU, the function ddi_dma_sync(9F) 659 * must be called upon the DMA handle. The following flags 660 * define whose view of the object should be made consistent. 661 * There are different flags here because on different machines 662 * there are definite performance implications of how long 663 * such synchronization takes. 664 * 665 * DDI_DMA_SYNC_FORDEV makes all device references to the object 666 * mapped by the DMA handle up to date. It should be used by a 667 * driver after a cpu modifies the memory object (over the range 668 * specified by the other arguments to the ddi_dma_sync(9F) call). 669 * 670 * DDI_DMA_SYNC_FORCPU makes all cpu references to the object 671 * mapped by the DMA handle up to date. It should be used 672 * by a driver after the receipt of data from the device to 673 * the memory object is done (over the range specified by 674 * the other arguments to the ddi_dma_sync(9F) call). 675 * 676 * If the only mapping that concerns the driver is one for the 677 * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the 678 * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the 679 * system that if it can synchronize the kernel's view faster 680 * that the CPU's view, it can do so, otherwise it acts the 681 * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might 682 * speed up the synchronization of kernel mappings in case of 683 * non IO-coherent CPU caches. 684 */ 685 #define DDI_DMA_SYNC_FORDEV 0x0 686 #define DDI_DMA_SYNC_FORCPU 0x1 687 #define DDI_DMA_SYNC_FORKERNEL 0x2 688 689 /* 690 * Bus nexus control functions for DMA 691 */ 692 693 /* 694 * Control operations, defined here so that devops.h can be included 695 * by drivers without having to include a specific SYSDDI implementation 696 * header file. 697 */ 698 699 enum ddi_dma_ctlops { 700 DDI_DMA_FREE, /* free reference to object */ 701 DDI_DMA_SYNC, /* synchronize cache references */ 702 DDI_DMA_HTOC, /* return DMA cookie for handle */ 703 DDI_DMA_KVADDR, /* return kernel virtual address */ 704 DDI_DMA_MOVWIN, /* change mapped DMA window on object */ 705 DDI_DMA_REPWIN, /* report current window on DMA object */ 706 DDI_DMA_GETERR, /* report any post-transfer DMA errors */ 707 DDI_DMA_COFF, /* convert a DMA cookie to an offset */ 708 DDI_DMA_NEXTWIN, /* get next window within object */ 709 DDI_DMA_NEXTSEG, /* get next segment within window */ 710 DDI_DMA_SEGTOC, /* return segment DMA cookie */ 711 DDI_DMA_RESERVE, /* reserve some DVMA range */ 712 DDI_DMA_RELEASE, /* free preallocated DVMA range */ 713 DDI_DMA_RESETH, /* reset next cookie ptr in handle */ 714 DDI_DMA_CKSYNC, /* sync intermediate buffer to cookies */ 715 DDI_DMA_IOPB_ALLOC, /* get contiguous DMA-able memory */ 716 DDI_DMA_IOPB_FREE, /* return contiguous DMA-able memory */ 717 DDI_DMA_SMEM_ALLOC, /* get contiguous DMA-able memory */ 718 DDI_DMA_SMEM_FREE, /* return contiguous DMA-able memory */ 719 DDI_DMA_SET_SBUS64, /* 64 bit SBus support */ 720 DDI_DMA_REMAP, /* remap DMA buffers after relocation */ 721 722 /* 723 * control ops for DMA engine on motherboard 724 */ 725 DDI_DMA_E_ACQUIRE, /* get channel for exclusive use */ 726 DDI_DMA_E_FREE, /* release channel */ 727 DDI_DMA_E_1STPTY, /* setup channel for 1st party DMA */ 728 DDI_DMA_E_GETCB, /* get control block for DMA engine */ 729 DDI_DMA_E_FREECB, /* free control blk for DMA engine */ 730 DDI_DMA_E_PROG, /* program channel of DMA engine */ 731 DDI_DMA_E_SWSETUP, /* setup channel for software control */ 732 DDI_DMA_E_SWSTART, /* software operation of DMA channel */ 733 DDI_DMA_E_ENABLE, /* enable channel of DMA engine */ 734 DDI_DMA_E_STOP, /* stop a channel of DMA engine */ 735 DDI_DMA_E_DISABLE, /* disable channel of DMA engine */ 736 DDI_DMA_E_GETCNT, /* get remaining xfer count */ 737 DDI_DMA_E_GETLIM, /* get DMA engine limits */ 738 DDI_DMA_E_GETATTR /* get DMA engine attributes */ 739 }; 740 741 /* 742 * Cache attribute flags: 743 * 744 * IOMEM_DATA_CACHED 745 * The CPU can cache the data it fetches and push it to memory at a later 746 * time. This is the default attribute and used if no cache attributes is 747 * specified. 748 * 749 * IOMEM_DATA_UC_WR_COMBINE 750 * The CPU never caches the data but writes may occur out of order or be 751 * combined. It implies re-ordering. 752 * 753 * IOMEM_DATA_UNCACHED 754 * The CPU never caches the data and has uncacheable access to memory. 755 * It also implies strict ordering. 756 * 757 * The cache attributes are mutually exclusive, and any combination of the 758 * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED 759 * is meaningful, but others lead to a failure. 760 */ 761 #define IOMEM_DATA_CACHED 0x10000 /* data is cached */ 762 #define IOMEM_DATA_UC_WR_COMBINE 0x20000 /* data is not cached, but */ 763 /* writes might be combined */ 764 #define IOMEM_DATA_UNCACHED 0x40000 /* data is not cached. */ 765 #define IOMEM_DATA_MASK 0xF0000 /* cache attrs mask */ 766 767 /* 768 * Check if either uncacheable or write-combining specified. (those flags are 769 * mutually exclusive) This macro is used to override hat attributes if either 770 * one is set. 771 */ 772 #define OVERRIDE_CACHE_ATTR(attr) \ 773 (attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE)) 774 775 /* 776 * Get the cache attribute from flags. If there is no attributes, 777 * return IOMEM_DATA_CACHED (default attribute). 778 */ 779 #define IOMEM_CACHE_ATTR(flags) \ 780 ((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \ 781 IOMEM_DATA_CACHED) 782 783 #ifdef __cplusplus 784 } 785 #endif 786 787 #endif /* _SYS_DDIDMAREQ_H */ 788