1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #ifndef _SYS_DDIDMAREQ_H 26 #define _SYS_DDIDMAREQ_H 27 28 #ifdef __cplusplus 29 extern "C" { 30 #endif 31 32 /* 33 * Memory Objects 34 * 35 * Definitions of structures that can describe 36 * an object that can be mapped for DMA. 37 */ 38 39 /* 40 * Structure describing a virtual address 41 */ 42 struct v_address { 43 caddr_t v_addr; /* base virtual address */ 44 struct as *v_as; /* pointer to address space */ 45 void *v_priv; /* priv data for shadow I/O */ 46 }; 47 48 /* 49 * Structure describing a page-based address 50 */ 51 struct pp_address { 52 /* 53 * A pointer to a circularly linked list of page structures. 54 */ 55 struct page *pp_pp; 56 uint_t pp_offset; /* offset within first page */ 57 }; 58 59 /* 60 * Structure to describe a physical memory address. 61 */ 62 struct phy_address { 63 ulong_t p_addr; /* base physical address */ 64 ulong_t p_memtype; /* memory type */ 65 }; 66 67 /* 68 * Structure to describe an array DVMA addresses. 69 * Under normal circumstances, dv_nseg will be 1. 70 * dvs_start is always page aligned. 71 */ 72 struct dvma_address { 73 size_t dv_off; 74 size_t dv_nseg; 75 struct dvmaseg { 76 uint64_t dvs_start; 77 size_t dvs_len; 78 } *dv_seg; 79 }; 80 81 /* 82 * A union of all of the above structures. 83 * 84 * This union describes the relationship between 85 * the kind of an address description and an object. 86 */ 87 typedef union { 88 struct v_address virt_obj; /* Some virtual address */ 89 struct pp_address pp_obj; /* Some page-based address */ 90 struct phy_address phys_obj; /* Some physical address */ 91 struct dvma_address dvma_obj; 92 } ddi_dma_aobj_t; 93 94 /* 95 * DMA object types - used to select how the object 96 * being mapped is being addressed by the IU. 97 */ 98 typedef enum { 99 DMA_OTYP_VADDR = 0, /* enforce starting value of zero */ 100 DMA_OTYP_PAGES, 101 DMA_OTYP_PADDR, 102 DMA_OTYP_BUFVADDR, 103 DMA_OTYP_DVADDR 104 } ddi_dma_atyp_t; 105 106 /* 107 * A compact package to describe an object that is to be mapped for DMA. 108 */ 109 typedef struct { 110 uint_t dmao_size; /* size, in bytes, of the object */ 111 ddi_dma_atyp_t dmao_type; /* type of object */ 112 ddi_dma_aobj_t dmao_obj; /* the object described */ 113 } ddi_dma_obj_t; 114 115 /* 116 * DMA addressing limits. 117 * 118 * This structure describes the constraints that a particular device's 119 * DMA engine has to its parent so that the parent may correctly set 120 * things up for a DMA mapping. Each parent may in turn modify the 121 * constraints listed in a DMA request structure in order to describe 122 * to its parent any changed or additional constraints. The rules 123 * are that each parent may modify a constraint in order to further 124 * constrain things (e.g., picking a more limited address range than 125 * that permitted by the child), but that the parent may not ignore 126 * a child's constraints. 127 * 128 * A particular constraint that we do *not* address is whether or not 129 * a requested mapping is too large for a DMA engine's counter to 130 * correctly track. It is still up to each driver to explicitly handle 131 * transfers that are too large for its own hardware to deal with directly. 132 * 133 * The mapping routines that are cognizant of this structure will 134 * copy any user defined limits structure if they need to modify 135 * the fields (as alluded to above). 136 * 137 * A note as to how to define constraints: 138 * 139 * How you define the constraints for your device depends on how you 140 * define your device. For example, you may have an SBus card with a 141 * device on it that address only the bottom 16mb of virtual DMA space. 142 * However, if the card also has ancillary circuitry that pulls the high 8 143 * bits of address lines high, the more correct expression for your device 144 * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff]. 145 */ 146 #if defined(__sparc) 147 typedef struct ddi_dma_lim { 148 149 /* 150 * Low range of 32 bit addressing capability. 151 */ 152 uint_t dlim_addr_lo; 153 154 /* 155 * Upper inclusive bound of addressing capability. It is an 156 * inclusive boundary limit to allow for the addressing range 157 * [0..0xffffffff] to be specified in preference to [0..0]. 158 */ 159 uint_t dlim_addr_hi; 160 161 /* 162 * Inclusive upper bound with which The DMA engine's counter acts as 163 * a register. 164 * 165 * This handles the case where an upper portion of a DMA address 166 * register is a latch instead of being a full 32 bit register 167 * (e.g., the upper 8 bits may remain constant while the lower 168 * 24 bits are the real address register). 169 * 170 * This essentially gives a hint about segment limitations 171 * to the mapping routines. 172 */ 173 uint_t dlim_cntr_max; 174 175 /* 176 * DMA burst sizes. 177 * 178 * At the time of a mapping request, this tag defines the possible 179 * DMA burst cycle sizes that the requestor's DMA engine can 180 * emit. The format of the data is binary encoding of burst sizes 181 * assumed to be powers of two. That is, if a DMA engine is capable 182 * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17. 183 * 184 * As the mapping request is handled by intervening nexi, the 185 * burstsizes value may be modified. Prior to enabling DMA for 186 * the specific device, the driver that owns the DMA engine should 187 * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes 188 * have become and program their DMA engine appropriately. 189 */ 190 uint_t dlim_burstsizes; 191 192 /* 193 * Minimum effective DMA transfer size, in units of bytes. 194 * 195 * This value specifies the minimum effective granularity of the 196 * DMA engine. It is distinct from dlim_burtsizes in that it 197 * describes the minimum amount of access a DMA transfer will 198 * effect. dlim_burtsizes describes in what electrical fashion 199 * the DMA engine might perform its accesses, while dlim_minxfer 200 * describes the minimum amount of memory that can be touched by 201 * the DMA transfer. 202 * 203 * As the mapping request is handled by intervening nexi, the 204 * dlim_minxfer value may be modifed contingent upon the presence 205 * (and use) of I/O caches and DMA write buffers in between the 206 * DMA engine and the object that DMA is being performed on. 207 * 208 */ 209 uint_t dlim_minxfer; 210 211 /* 212 * Expected average data rate for this DMA engine 213 * while transferring data. 214 * 215 * This is used as a hint for a number of operations that might 216 * want to know the possible optimal latency requirements of this 217 * device. A value of zero will be interpreted as a 'do not care'. 218 */ 219 uint_t dlim_dmaspeed; 220 221 } ddi_dma_lim_t; 222 223 #elif defined(__x86) 224 225 /* 226 * values for dlim_minxfer 227 */ 228 #define DMA_UNIT_8 1 229 #define DMA_UNIT_16 2 230 #define DMA_UNIT_32 4 231 232 /* 233 * Version number 234 */ 235 #define DMALIM_VER0 ((0x86000000) + 0) 236 237 typedef struct ddi_dma_lim { 238 239 /* 240 * Low range of 32 bit addressing capability. 241 */ 242 uint_t dlim_addr_lo; 243 244 /* 245 * Upper Inclusive bound of 32 bit addressing capability. 246 * 247 * The ISA nexus restricts this to 0x00ffffff, since this bus has 248 * only 24 address lines. This enforces the 16 Mb address limitation. 249 * The EISA nexus restricts this to 0xffffffff. 250 */ 251 uint_t dlim_addr_hi; 252 253 /* 254 * DMA engine counter not used; set to 0 255 */ 256 uint_t dlim_cntr_max; 257 258 /* 259 * DMA burst sizes not used; set to 1 260 */ 261 uint_t dlim_burstsizes; 262 263 /* 264 * Minimum effective DMA transfer size. 265 * 266 * This value specifies the minimum effective granularity of the 267 * DMA engine. It is distinct from dlim_burstsizes in that it 268 * describes the minimum amount of access a DMA transfer will 269 * effect. dlim_burstsizes describes in what electrical fashion 270 * the DMA engine might perform its accesses, while dlim_minxfer 271 * describes the minimum amount of memory that can be touched by 272 * the DMA transfer. 273 * 274 * This value also implies the required address alignment. 275 * The number of bytes transferred is assumed to be 276 * dlim_minxfer * (DMA engine count) 277 * 278 * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32. 279 */ 280 uint_t dlim_minxfer; 281 282 /* 283 * Expected average data rate for this DMA engine 284 * while transferring data. 285 * 286 * This is used as a hint for a number of operations that might 287 * want to know the possible optimal latency requirements of this 288 * device. A value of zero will be interpreted as a 'do not care'. 289 */ 290 uint_t dlim_dmaspeed; 291 292 293 /* 294 * Version number of this structure 295 */ 296 uint_t dlim_version; /* = 0x86 << 24 + 0 */ 297 298 /* 299 * Inclusive upper bound with which the DMA engine's Address acts as 300 * a register. 301 * This handles the case where an upper portion of a DMA address 302 * register is a latch instead of being a full 32 bit register 303 * (e.g., the upper 16 bits remain constant while the lower 16 bits 304 * are incremented for each DMA transfer). 305 * 306 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff, 307 * since the ISA DMA engine has a 16-bit register for low address and 308 * an 8-bit latch for high address. This enforces the first 64 Kb 309 * limitation (address boundary). 310 * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff. 311 */ 312 uint_t dlim_adreg_max; 313 314 /* 315 * Maximum transfer count that the DMA engine can handle. 316 * 317 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff, 318 * since the ISA DMA engine has a 16-bit register for counting. 319 * This enforces the other 64 Kb limitation (count size). 320 * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff, 321 * since the EISA DMA engine has a 24-bit register for counting. 322 * 323 * This transfer count limitation is a per segment limitation. 324 * It can also be used to restrict the size of segments. 325 * 326 * This is used as a bit mask, so it must be a power of 2, minus 1. 327 */ 328 uint_t dlim_ctreg_max; 329 330 /* 331 * Granularity of DMA transfer, in units of bytes. 332 * 333 * Breakup sizes must be multiples of this value. 334 * If no scatter/gather capabilty is specified, then the size of 335 * each DMA transfer must be a multiple of this value. 336 * 337 * If there is scatter/gather capability, then a single cookie cannot 338 * be smaller in size than the minimum xfer value, and may be less 339 * than the granularity value. The total transfer length of the 340 * scatter/gather list should be a multiple of the granularity value; 341 * use dlim_sgllen to specify the length of the scatter/gather list. 342 * 343 * This value should be equal to the sector size of the device. 344 */ 345 uint_t dlim_granular; 346 347 /* 348 * Length of scatter/gather list 349 * 350 * This value specifies the number of segments or cookies that a DMA 351 * engine can consume in one i/o request to the device. For 3rd-party 352 * DMA that uses the bus nexus this should be set to 1. Devices with 353 * 1st-party DMA capability should specify the number of entries in 354 * its scatter/gather list. The breakup routine will ensure that each 355 * group of dlim_sgllen cookies (within a DMA window) will have a 356 * total transfer length that is a multiple of dlim_granular. 357 * 358 * < 0 : tbd 359 * = 0 : breakup is for PIO. 360 * = 1 : breakup is for DMA engine with no scatter/gather 361 * capability. 362 * >= 2 : breakup is for DMA engine with scatter/gather 363 * capability; value is max number of entries in list. 364 * 365 * Note that this list length is not dependent on the DMA window 366 * size. The size of the DMA window is based on resources consumed, 367 * such as intermediate buffers. Several s/g lists may exist within 368 * a window. But the end of a window does imply the end of the s/g 369 * list. 370 */ 371 short dlim_sgllen; 372 373 /* 374 * Size of device i/o request 375 * 376 * This value indicates the maximum number of bytes the device 377 * can transmit/receive for one i/o command. This limitation is 378 * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen). 379 */ 380 uint_t dlim_reqsize; 381 382 } ddi_dma_lim_t; 383 384 #else 385 #error "struct ddi_dma_lim not defined for this architecture" 386 #endif /* defined(__sparc) */ 387 388 /* 389 * Flags definition for dma_attr_flags 390 */ 391 392 /* 393 * return physical DMA address on platforms 394 * which support DVMA 395 */ 396 #define DDI_DMA_FORCE_PHYSICAL 0x0100 397 398 /* 399 * An error will be flagged for DMA data path errors 400 */ 401 #define DDI_DMA_FLAGERR 0x200 402 403 /* 404 * Enable relaxed ordering 405 */ 406 #define DDI_DMA_RELAXED_ORDERING 0x400 407 408 409 /* 410 * Consolidation private x86 only flag which will cause a bounce buffer 411 * (paddr < dma_attr_seg) to be used if the buffer passed to the bind 412 * operation contains pages both above and below dma_attr_seg. If this flag 413 * is set, dma_attr_seg must be <= dma_attr_addr_hi. 414 */ 415 #define _DDI_DMA_BOUNCE_ON_SEG 0x8000 416 417 #define DMA_ATTR_V0 0 418 #define DMA_ATTR_VERSION DMA_ATTR_V0 419 420 typedef struct ddi_dma_attr { 421 uint_t dma_attr_version; /* version number */ 422 uint64_t dma_attr_addr_lo; /* low DMA address range */ 423 uint64_t dma_attr_addr_hi; /* high DMA address range */ 424 uint64_t dma_attr_count_max; /* DMA counter register */ 425 uint64_t dma_attr_align; /* DMA address alignment */ 426 uint_t dma_attr_burstsizes; /* DMA burstsizes */ 427 uint32_t dma_attr_minxfer; /* min effective DMA size */ 428 uint64_t dma_attr_maxxfer; /* max DMA xfer size */ 429 uint64_t dma_attr_seg; /* segment boundary */ 430 int dma_attr_sgllen; /* s/g length */ 431 uint32_t dma_attr_granular; /* granularity of device */ 432 uint_t dma_attr_flags; /* Bus specific DMA flags */ 433 } ddi_dma_attr_t; 434 435 /* 436 * Handy macro to set a maximum bit value (should be elsewhere) 437 * 438 * Clear off all bits lower then 'mybit' in val; if there are no 439 * bits higher than or equal to mybit in val then set mybit. Assumes 440 * mybit equals some power of 2 and is not zero. 441 */ 442 #define maxbit(val, mybit) \ 443 ((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0) 444 445 /* 446 * Handy macro to set a minimum bit value (should be elsewhere) 447 * 448 * Clear off all bits higher then 'mybit' in val; if there are no 449 * bits lower than or equal to mybit in val then set mybit. Assumes 450 * mybit equals some pow2 and is not zero. 451 */ 452 #define minbit(val, mybit) \ 453 (((val)&((mybit)|((mybit)-1))) | \ 454 ((((val) & ((mybit)-1)) == 0) ? (mybit) : 0)) 455 456 /* 457 * Structure of a request to map an object for DMA. 458 */ 459 typedef struct ddi_dma_req { 460 /* 461 * Caller's DMA engine constraints. 462 * 463 * If there are no particular constraints to the caller's DMA 464 * engine, this field may be set to NULL. The implementation DMA 465 * setup functions will then select a set of standard beginning 466 * constraints. 467 * 468 * In either case, as the mapping proceeds, the initial DMA 469 * constraints may become more restrictive as each intervening 470 * nexus might add further restrictions. 471 */ 472 ddi_dma_lim_t *dmar_limits; 473 474 /* 475 * Contains the information passed to the DMA mapping allocation 476 * routine(s). 477 */ 478 uint_t dmar_flags; 479 480 /* 481 * Callback function. A caller of the DMA mapping functions must 482 * specify by filling in this field whether the allocation routines 483 * can sleep awaiting mapping resources, must *not* sleep awaiting 484 * resources, or may *not* sleep awaiting any resources and must 485 * call the function specified by dmar_fp with the the argument 486 * dmar_arg when resources might have become available at a future 487 * time. 488 */ 489 int (*dmar_fp)(); 490 491 caddr_t dmar_arg; /* Callback function argument */ 492 493 /* 494 * Description of the object to be mapped for DMA. 495 * Must be last in this structure in case that the 496 * union ddi_dma_obj_t changes in the future. 497 */ 498 ddi_dma_obj_t dmar_object; 499 500 } ddi_dma_req_t; 501 502 /* 503 * Defines for the DMA mapping allocation functions 504 * 505 * If a DMA callback funtion is set to anything other than the following 506 * defines then it is assumed that one wishes a callback and is providing 507 * a function address. 508 */ 509 #ifdef __STDC__ 510 #define DDI_DMA_DONTWAIT ((int (*)(caddr_t))0) 511 #define DDI_DMA_SLEEP ((int (*)(caddr_t))1) 512 #else 513 #define DDI_DMA_DONTWAIT ((int (*)())0) 514 #define DDI_DMA_SLEEP ((int (*)())1) 515 #endif 516 517 /* 518 * Return values from callback functions. 519 */ 520 #define DDI_DMA_CALLBACK_RUNOUT 0 521 #define DDI_DMA_CALLBACK_DONE 1 522 523 /* 524 * Flag definitions for the allocation functions. 525 */ 526 #define DDI_DMA_WRITE 0x0001 /* Direction memory --> IO */ 527 #define DDI_DMA_READ 0x0002 /* Direction IO --> memory */ 528 #define DDI_DMA_RDWR (DDI_DMA_READ | DDI_DMA_WRITE) 529 530 /* 531 * If possible, establish a MMU redzone after the mapping (to protect 532 * against cheap DMA hardware that might get out of control). 533 */ 534 #define DDI_DMA_REDZONE 0x0004 535 536 /* 537 * A partial allocation is allowed. That is, if the size of the object 538 * exceeds the mapping resources available, only map a portion of the 539 * object and return status indicating that this took place. The caller 540 * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to 541 * change, at a later point, the actual mapped portion of the object. 542 * 543 * The mapped portion begins at offset 0 of the object. 544 * 545 */ 546 #define DDI_DMA_PARTIAL 0x0008 547 548 /* 549 * Map the object for byte consistent access. Note that explicit 550 * synchronization (via ddi_dma_sync(9F)) will still be required. 551 * Consider this flag to be a hint to the mapping routines as to 552 * the intended use of the mapping. 553 * 554 * Normal data transfers can be usually consider to use 'streaming' 555 * modes of operations. They start at a specific point, transfer a 556 * fairly large amount of data sequentially, and then stop (usually 557 * on a well aligned boundary). 558 * 559 * Control mode data transfers (for memory resident device control blocks, 560 * e.g., ethernet message descriptors) do not access memory in such 561 * a streaming sequential fashion. Instead, they tend to modify a few 562 * words or bytes, move around and maybe modify a few more. 563 * 564 * There are many machine implementations that make this difficult to 565 * control in a generic and seamless fashion. Therefore, explicit synch- 566 * ronization steps (via ddi_dma_sync(9F)) are still required (even if you 567 * ask for a byte-consistent mapping) in order to make the view of the 568 * memory object shared between a CPU and a DMA master in consistent. 569 * However, judicious use of this flag can give sufficient hints to 570 * the mapping routines to attempt to pick the most efficacious mapping 571 * such that the synchronization steps are as efficient as possible. 572 * 573 */ 574 #define DDI_DMA_CONSISTENT 0x0010 575 576 /* 577 * Some DMA mappings have to be 'exclusive' access. 578 */ 579 #define DDI_DMA_EXCLUSIVE 0x0020 580 581 /* 582 * Sequential, unidirectional, block-sized and block aligned transfers 583 */ 584 #define DDI_DMA_STREAMING 0x0040 585 586 /* 587 * Support for 64-bit SBus devices 588 */ 589 #define DDI_DMA_SBUS_64BIT 0x2000 590 591 /* 592 * Return values from the mapping allocation functions. 593 */ 594 595 /* 596 * succeeded in satisfying request 597 */ 598 #define DDI_DMA_MAPPED 0 599 600 /* 601 * Mapping is legitimate (for advisory calls). 602 */ 603 #define DDI_DMA_MAPOK 0 604 605 /* 606 * Succeeded in mapping a portion of the request. 607 */ 608 #define DDI_DMA_PARTIAL_MAP 1 609 610 /* 611 * indicates end of window/segment list 612 */ 613 #define DDI_DMA_DONE 2 614 615 /* 616 * No resources to map request. 617 */ 618 #define DDI_DMA_NORESOURCES -1 619 620 /* 621 * Can't establish a mapping to the specified object 622 * (no specific reason). 623 */ 624 #define DDI_DMA_NOMAPPING -2 625 626 /* 627 * The request is too big to be mapped. 628 */ 629 #define DDI_DMA_TOOBIG -3 630 631 /* 632 * The request is too small to be mapped. 633 */ 634 #define DDI_DMA_TOOSMALL -4 635 636 /* 637 * The request cannot be mapped because the object 638 * is locked against mapping by another DMA master. 639 */ 640 #define DDI_DMA_LOCKED -5 641 642 /* 643 * The request cannot be mapped because the limits 644 * structure has bogus values. 645 */ 646 #define DDI_DMA_BADLIMITS -6 647 648 /* 649 * the segment/window pointer is stale 650 */ 651 #define DDI_DMA_STALE -7 652 653 /* 654 * The system can't allocate DMA resources using 655 * the given DMA attributes 656 */ 657 #define DDI_DMA_BADATTR -8 658 659 /* 660 * A DMA handle is already used for a DMA 661 */ 662 #define DDI_DMA_INUSE -9 663 664 665 /* 666 * DVMA disabled or not supported. use physical DMA 667 */ 668 #define DDI_DMA_USE_PHYSICAL -10 669 670 671 /* 672 * In order for the access to a memory object to be consistent 673 * between a device and a CPU, the function ddi_dma_sync(9F) 674 * must be called upon the DMA handle. The following flags 675 * define whose view of the object should be made consistent. 676 * There are different flags here because on different machines 677 * there are definite performance implications of how long 678 * such synchronization takes. 679 * 680 * DDI_DMA_SYNC_FORDEV makes all device references to the object 681 * mapped by the DMA handle up to date. It should be used by a 682 * driver after a cpu modifies the memory object (over the range 683 * specified by the other arguments to the ddi_dma_sync(9F) call). 684 * 685 * DDI_DMA_SYNC_FORCPU makes all cpu references to the object 686 * mapped by the DMA handle up to date. It should be used 687 * by a driver after the receipt of data from the device to 688 * the memory object is done (over the range specified by 689 * the other arguments to the ddi_dma_sync(9F) call). 690 * 691 * If the only mapping that concerns the driver is one for the 692 * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the 693 * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the 694 * system that if it can synchronize the kernel's view faster 695 * that the CPU's view, it can do so, otherwise it acts the 696 * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might 697 * speed up the synchronization of kernel mappings in case of 698 * non IO-coherent CPU caches. 699 */ 700 #define DDI_DMA_SYNC_FORDEV 0x0 701 #define DDI_DMA_SYNC_FORCPU 0x1 702 #define DDI_DMA_SYNC_FORKERNEL 0x2 703 704 /* 705 * Bus nexus control functions for DMA 706 */ 707 708 /* 709 * Control operations, defined here so that devops.h can be included 710 * by drivers without having to include a specific SYSDDI implementation 711 * header file. 712 */ 713 714 enum ddi_dma_ctlops { 715 DDI_DMA_FREE, /* free reference to object */ 716 DDI_DMA_SYNC, /* synchronize cache references */ 717 DDI_DMA_HTOC, /* return DMA cookie for handle */ 718 DDI_DMA_KVADDR, /* return kernel virtual address */ 719 DDI_DMA_MOVWIN, /* change mapped DMA window on object */ 720 DDI_DMA_REPWIN, /* report current window on DMA object */ 721 DDI_DMA_GETERR, /* report any post-transfer DMA errors */ 722 DDI_DMA_COFF, /* convert a DMA cookie to an offset */ 723 DDI_DMA_NEXTWIN, /* get next window within object */ 724 DDI_DMA_NEXTSEG, /* get next segment within window */ 725 DDI_DMA_SEGTOC, /* return segment DMA cookie */ 726 DDI_DMA_RESERVE, /* reserve some DVMA range */ 727 DDI_DMA_RELEASE, /* free preallocated DVMA range */ 728 DDI_DMA_RESETH, /* reset next cookie ptr in handle */ 729 DDI_DMA_CKSYNC, /* sync intermediate buffer to cookies */ 730 DDI_DMA_IOPB_ALLOC, /* get contiguous DMA-able memory */ 731 DDI_DMA_IOPB_FREE, /* return contiguous DMA-able memory */ 732 DDI_DMA_SMEM_ALLOC, /* get contiguous DMA-able memory */ 733 DDI_DMA_SMEM_FREE, /* return contiguous DMA-able memory */ 734 DDI_DMA_SET_SBUS64, /* 64 bit SBus support */ 735 DDI_DMA_REMAP, /* remap DMA buffers after relocation */ 736 737 /* 738 * control ops for DMA engine on motherboard 739 */ 740 DDI_DMA_E_ACQUIRE, /* get channel for exclusive use */ 741 DDI_DMA_E_FREE, /* release channel */ 742 DDI_DMA_E_1STPTY, /* setup channel for 1st party DMA */ 743 DDI_DMA_E_GETCB, /* get control block for DMA engine */ 744 DDI_DMA_E_FREECB, /* free control blk for DMA engine */ 745 DDI_DMA_E_PROG, /* program channel of DMA engine */ 746 DDI_DMA_E_SWSETUP, /* setup channel for software control */ 747 DDI_DMA_E_SWSTART, /* software operation of DMA channel */ 748 DDI_DMA_E_ENABLE, /* enable channel of DMA engine */ 749 DDI_DMA_E_STOP, /* stop a channel of DMA engine */ 750 DDI_DMA_E_DISABLE, /* disable channel of DMA engine */ 751 DDI_DMA_E_GETCNT, /* get remaining xfer count */ 752 DDI_DMA_E_GETLIM, /* get DMA engine limits */ 753 DDI_DMA_E_GETATTR /* get DMA engine attributes */ 754 }; 755 756 /* 757 * Cache attribute flags: 758 * 759 * IOMEM_DATA_CACHED 760 * The CPU can cache the data it fetches and push it to memory at a later 761 * time. This is the default attribute and used if no cache attributes is 762 * specified. 763 * 764 * IOMEM_DATA_UC_WR_COMBINE 765 * The CPU never caches the data but writes may occur out of order or be 766 * combined. It implies re-ordering. 767 * 768 * IOMEM_DATA_UNCACHED 769 * The CPU never caches the data and has uncacheable access to memory. 770 * It also implies strict ordering. 771 * 772 * The cache attributes are mutually exclusive, and any combination of the 773 * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED 774 * is meaningful, but others lead to a failure. 775 */ 776 #define IOMEM_DATA_CACHED 0x10000 /* data is cached */ 777 #define IOMEM_DATA_UC_WR_COMBINE 0x20000 /* data is not cached, but */ 778 /* writes might be combined */ 779 #define IOMEM_DATA_UNCACHED 0x40000 /* data is not cached. */ 780 #define IOMEM_DATA_MASK 0xF0000 /* cache attrs mask */ 781 782 /* 783 * Check if either uncacheable or write-combining specified. (those flags are 784 * mutually exclusive) This macro is used to override hat attributes if either 785 * one is set. 786 */ 787 #define OVERRIDE_CACHE_ATTR(attr) \ 788 (attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE)) 789 790 /* 791 * Get the cache attribute from flags. If there is no attributes, 792 * return IOMEM_DATA_CACHED (default attribute). 793 */ 794 #define IOMEM_CACHE_ATTR(flags) \ 795 ((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \ 796 IOMEM_DATA_CACHED) 797 798 #ifdef __cplusplus 799 } 800 #endif 801 802 #endif /* _SYS_DDIDMAREQ_H */ 803