1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * x86 root nexus driver 31 */ 32 33 #include <sys/sysmacros.h> 34 #include <sys/conf.h> 35 #include <sys/autoconf.h> 36 #include <sys/sysmacros.h> 37 #include <sys/debug.h> 38 #include <sys/psw.h> 39 #include <sys/ddidmareq.h> 40 #include <sys/promif.h> 41 #include <sys/devops.h> 42 #include <sys/kmem.h> 43 #include <sys/cmn_err.h> 44 #include <vm/seg.h> 45 #include <vm/seg_kmem.h> 46 #include <vm/seg_dev.h> 47 #include <sys/vmem.h> 48 #include <sys/mman.h> 49 #include <vm/hat.h> 50 #include <vm/as.h> 51 #include <vm/page.h> 52 #include <sys/avintr.h> 53 #include <sys/errno.h> 54 #include <sys/modctl.h> 55 #include <sys/ddi_impldefs.h> 56 #include <sys/sunddi.h> 57 #include <sys/sunndi.h> 58 #include <sys/mach_intr.h> 59 #include <sys/psm.h> 60 #include <sys/ontrap.h> 61 #include <sys/atomic.h> 62 #include <sys/sdt.h> 63 #include <sys/rootnex.h> 64 #include <vm/hat_i86.h> 65 66 67 /* 68 * enable/disable extra checking of function parameters. Useful for debugging 69 * drivers. 70 */ 71 #ifdef DEBUG 72 int rootnex_alloc_check_parms = 1; 73 int rootnex_bind_check_parms = 1; 74 int rootnex_bind_check_inuse = 1; 75 int rootnex_unbind_verify_buffer = 0; 76 int rootnex_sync_check_parms = 1; 77 #else 78 int rootnex_alloc_check_parms = 0; 79 int rootnex_bind_check_parms = 0; 80 int rootnex_bind_check_inuse = 0; 81 int rootnex_unbind_verify_buffer = 0; 82 int rootnex_sync_check_parms = 0; 83 #endif 84 85 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */ 86 int rootnex_bind_fail = 1; 87 int rootnex_bind_warn = 1; 88 uint8_t *rootnex_warn_list; 89 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */ 90 #define ROOTNEX_BIND_WARNING (0x1 << 0) 91 92 /* 93 * revert back to old broken behavior of always sync'ing entire copy buffer. 94 * This is useful if be have a buggy driver which doesn't correctly pass in 95 * the offset and size into ddi_dma_sync(). 96 */ 97 int rootnex_sync_ignore_params = 0; 98 99 /* 100 * maximum size that we will allow for a copy buffer. Can be patched on the 101 * fly 102 */ 103 size_t rootnex_max_copybuf_size = 0x100000; 104 105 /* 106 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1 107 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a 108 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit 109 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65 110 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages 111 * (< 8K). We will still need to allocate the copy buffer during bind though 112 * (if we need one). These can only be modified in /etc/system before rootnex 113 * attach. 114 */ 115 #if defined(__amd64) 116 int rootnex_prealloc_cookies = 65; 117 int rootnex_prealloc_windows = 4; 118 int rootnex_prealloc_copybuf = 2; 119 #else 120 int rootnex_prealloc_cookies = 33; 121 int rootnex_prealloc_windows = 4; 122 int rootnex_prealloc_copybuf = 2; 123 #endif 124 125 /* driver global state */ 126 static rootnex_state_t *rootnex_state; 127 128 /* shortcut to rootnex counters */ 129 static uint64_t *rootnex_cnt; 130 131 /* 132 * XXX - does x86 even need these or are they left over from the SPARC days? 133 */ 134 /* statically defined integer/boolean properties for the root node */ 135 static rootnex_intprop_t rootnex_intprp[] = { 136 { "PAGESIZE", PAGESIZE }, 137 { "MMU_PAGESIZE", MMU_PAGESIZE }, 138 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET }, 139 { DDI_RELATIVE_ADDRESSING, 1 }, 140 }; 141 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t)) 142 143 144 static struct cb_ops rootnex_cb_ops = { 145 nodev, /* open */ 146 nodev, /* close */ 147 nodev, /* strategy */ 148 nodev, /* print */ 149 nodev, /* dump */ 150 nodev, /* read */ 151 nodev, /* write */ 152 nodev, /* ioctl */ 153 nodev, /* devmap */ 154 nodev, /* mmap */ 155 nodev, /* segmap */ 156 nochpoll, /* chpoll */ 157 ddi_prop_op, /* cb_prop_op */ 158 NULL, /* struct streamtab */ 159 D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */ 160 CB_REV, /* Rev */ 161 nodev, /* cb_aread */ 162 nodev /* cb_awrite */ 163 }; 164 165 static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 166 off_t offset, off_t len, caddr_t *vaddrp); 167 static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, 168 struct hat *hat, struct seg *seg, caddr_t addr, 169 struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock); 170 static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 171 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep); 172 static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 173 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 174 ddi_dma_handle_t *handlep); 175 static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 176 ddi_dma_handle_t handle); 177 static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 178 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 179 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 180 static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 181 ddi_dma_handle_t handle); 182 static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, 183 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 184 static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, 185 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 186 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 187 static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 188 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 189 off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags); 190 static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, 191 ddi_ctl_enum_t ctlop, void *arg, void *result); 192 static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, 193 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result); 194 195 196 static struct bus_ops rootnex_bus_ops = { 197 BUSO_REV, 198 rootnex_map, 199 NULL, 200 NULL, 201 NULL, 202 rootnex_map_fault, 203 rootnex_dma_map, 204 rootnex_dma_allochdl, 205 rootnex_dma_freehdl, 206 rootnex_dma_bindhdl, 207 rootnex_dma_unbindhdl, 208 rootnex_dma_sync, 209 rootnex_dma_win, 210 rootnex_dma_mctl, 211 rootnex_ctlops, 212 ddi_bus_prop_op, 213 i_ddi_rootnex_get_eventcookie, 214 i_ddi_rootnex_add_eventcall, 215 i_ddi_rootnex_remove_eventcall, 216 i_ddi_rootnex_post_event, 217 0, /* bus_intr_ctl */ 218 0, /* bus_config */ 219 0, /* bus_unconfig */ 220 NULL, /* bus_fm_init */ 221 NULL, /* bus_fm_fini */ 222 NULL, /* bus_fm_access_enter */ 223 NULL, /* bus_fm_access_exit */ 224 NULL, /* bus_powr */ 225 rootnex_intr_ops /* bus_intr_op */ 226 }; 227 228 static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 229 static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 230 231 static struct dev_ops rootnex_ops = { 232 DEVO_REV, 233 0, 234 ddi_no_info, 235 nulldev, 236 nulldev, 237 rootnex_attach, 238 rootnex_detach, 239 nulldev, 240 &rootnex_cb_ops, 241 &rootnex_bus_ops 242 }; 243 244 static struct modldrv rootnex_modldrv = { 245 &mod_driverops, 246 "i86pc root nexus %I%", 247 &rootnex_ops 248 }; 249 250 static struct modlinkage rootnex_modlinkage = { 251 MODREV_1, 252 (void *)&rootnex_modldrv, 253 NULL 254 }; 255 256 257 /* 258 * extern hacks 259 */ 260 extern struct seg_ops segdev_ops; 261 extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */ 262 #ifdef DDI_MAP_DEBUG 263 extern int ddi_map_debug_flag; 264 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf 265 #endif 266 #define ptob64(x) (((uint64_t)(x)) << MMU_PAGESHIFT) 267 extern void i86_pp_map(page_t *pp, caddr_t kaddr); 268 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr); 269 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 270 psm_intr_op_t, int *); 271 extern int impl_ddi_sunbus_initchild(dev_info_t *dip); 272 extern void impl_ddi_sunbus_removechild(dev_info_t *dip); 273 /* 274 * Use device arena to use for device control register mappings. 275 * Various kernel memory walkers (debugger, dtrace) need to know 276 * to avoid this address range to prevent undesired device activity. 277 */ 278 extern void *device_arena_alloc(size_t size, int vm_flag); 279 extern void device_arena_free(void * vaddr, size_t size); 280 281 282 /* 283 * Internal functions 284 */ 285 static int rootnex_dma_init(); 286 static void rootnex_add_props(dev_info_t *); 287 static int rootnex_ctl_reportdev(dev_info_t *dip); 288 static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum); 289 static int rootnex_ctlops_poke(peekpoke_ctlops_t *in_args); 290 static int rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result); 291 static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 292 static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 293 static int rootnex_map_handle(ddi_map_req_t *mp); 294 static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp); 295 static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize); 296 static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, 297 ddi_dma_attr_t *attr); 298 static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 299 rootnex_sglinfo_t *sglinfo); 300 static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 301 rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag); 302 static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 303 rootnex_dma_t *dma, ddi_dma_attr_t *attr); 304 static void rootnex_teardown_copybuf(rootnex_dma_t *dma); 305 static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 306 ddi_dma_attr_t *attr, int kmflag); 307 static void rootnex_teardown_windows(rootnex_dma_t *dma); 308 static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 309 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset); 310 static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, 311 rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset, 312 size_t *copybuf_used, page_t **cur_pp); 313 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, 314 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, 315 ddi_dma_attr_t *attr, off_t cur_offset); 316 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, 317 rootnex_dma_t *dma, rootnex_window_t **windowp, 318 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used); 319 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, 320 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie); 321 static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 322 off_t offset, size_t size, uint_t cache_flags); 323 static int rootnex_verify_buffer(rootnex_dma_t *dma); 324 325 326 /* 327 * _init() 328 * 329 */ 330 int 331 _init(void) 332 { 333 334 rootnex_state = NULL; 335 return (mod_install(&rootnex_modlinkage)); 336 } 337 338 339 /* 340 * _info() 341 * 342 */ 343 int 344 _info(struct modinfo *modinfop) 345 { 346 return (mod_info(&rootnex_modlinkage, modinfop)); 347 } 348 349 350 /* 351 * _fini() 352 * 353 */ 354 int 355 _fini(void) 356 { 357 return (EBUSY); 358 } 359 360 361 /* 362 * rootnex_attach() 363 * 364 */ 365 static int 366 rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 367 { 368 int e; 369 370 371 switch (cmd) { 372 case DDI_ATTACH: 373 break; 374 case DDI_RESUME: 375 return (DDI_SUCCESS); 376 default: 377 return (DDI_FAILURE); 378 } 379 380 /* 381 * We should only have one instance of rootnex. Save it away since we 382 * don't have an easy way to get it back later. 383 */ 384 ASSERT(rootnex_state == NULL); 385 rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP); 386 387 rootnex_state->r_dip = dip; 388 rootnex_state->r_reserved_msg_printed = B_FALSE; 389 rootnex_cnt = &rootnex_state->r_counters[0]; 390 391 mutex_init(&rootnex_state->r_peekpoke_mutex, NULL, MUTEX_SPIN, 392 (void *)ipltospl(15)); 393 394 /* initialize DMA related state */ 395 e = rootnex_dma_init(); 396 if (e != DDI_SUCCESS) { 397 mutex_destroy(&rootnex_state->r_peekpoke_mutex); 398 kmem_free(rootnex_state, sizeof (rootnex_state_t)); 399 return (DDI_FAILURE); 400 } 401 402 /* Add static root node properties */ 403 rootnex_add_props(dip); 404 405 /* since we can't call ddi_report_dev() */ 406 cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip)); 407 408 /* Initialize rootnex event handle */ 409 i_ddi_rootnex_init_events(dip); 410 411 return (DDI_SUCCESS); 412 } 413 414 415 /* 416 * rootnex_detach() 417 * 418 */ 419 /*ARGSUSED*/ 420 static int 421 rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 422 { 423 switch (cmd) { 424 case DDI_SUSPEND: 425 break; 426 default: 427 return (DDI_FAILURE); 428 } 429 430 return (DDI_SUCCESS); 431 } 432 433 434 /* 435 * rootnex_dma_init() 436 * 437 */ 438 /*ARGSUSED*/ 439 static int 440 rootnex_dma_init() 441 { 442 size_t bufsize; 443 444 445 /* 446 * size of our cookie/window/copybuf state needed in dma bind that we 447 * pre-alloc in dma_alloc_handle 448 */ 449 rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies; 450 rootnex_state->r_prealloc_size = 451 (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) + 452 (rootnex_prealloc_windows * sizeof (rootnex_window_t)) + 453 (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t)); 454 455 /* 456 * setup DDI DMA handle kmem cache, align each handle on 64 bytes, 457 * allocate 16 extra bytes for struct pointer alignment 458 * (p->dmai_private & dma->dp_prealloc_buffer) 459 */ 460 bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) + 461 rootnex_state->r_prealloc_size + 0x10; 462 rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl", 463 bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0); 464 if (rootnex_state->r_dmahdl_cache == NULL) { 465 return (DDI_FAILURE); 466 } 467 468 /* 469 * allocate array to track which major numbers we have printed warnings 470 * for. 471 */ 472 rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list), 473 KM_SLEEP); 474 475 return (DDI_SUCCESS); 476 } 477 478 479 /* 480 * rootnex_add_props() 481 * 482 */ 483 static void 484 rootnex_add_props(dev_info_t *dip) 485 { 486 rootnex_intprop_t *rpp; 487 int i; 488 489 /* Add static integer/boolean properties to the root node */ 490 rpp = rootnex_intprp; 491 for (i = 0; i < NROOT_INTPROPS; i++) { 492 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, 493 rpp[i].prop_name, rpp[i].prop_value); 494 } 495 } 496 497 498 499 /* 500 * ************************* 501 * ctlops related routines 502 * ************************* 503 */ 504 505 /* 506 * rootnex_ctlops() 507 * 508 */ 509 /*ARGSUSED*/ 510 static int 511 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 512 void *arg, void *result) 513 { 514 int n, *ptr; 515 struct ddi_parent_private_data *pdp; 516 517 switch (ctlop) { 518 case DDI_CTLOPS_DMAPMAPC: 519 /* 520 * Return 'partial' to indicate that dma mapping 521 * has to be done in the main MMU. 522 */ 523 return (DDI_DMA_PARTIAL); 524 525 case DDI_CTLOPS_BTOP: 526 /* 527 * Convert byte count input to physical page units. 528 * (byte counts that are not a page-size multiple 529 * are rounded down) 530 */ 531 *(ulong_t *)result = btop(*(ulong_t *)arg); 532 return (DDI_SUCCESS); 533 534 case DDI_CTLOPS_PTOB: 535 /* 536 * Convert size in physical pages to bytes 537 */ 538 *(ulong_t *)result = ptob(*(ulong_t *)arg); 539 return (DDI_SUCCESS); 540 541 case DDI_CTLOPS_BTOPR: 542 /* 543 * Convert byte count input to physical page units 544 * (byte counts that are not a page-size multiple 545 * are rounded up) 546 */ 547 *(ulong_t *)result = btopr(*(ulong_t *)arg); 548 return (DDI_SUCCESS); 549 550 case DDI_CTLOPS_POKE: 551 return (rootnex_ctlops_poke((peekpoke_ctlops_t *)arg)); 552 553 case DDI_CTLOPS_PEEK: 554 return (rootnex_ctlops_peek((peekpoke_ctlops_t *)arg, result)); 555 556 case DDI_CTLOPS_INITCHILD: 557 return (impl_ddi_sunbus_initchild(arg)); 558 559 case DDI_CTLOPS_UNINITCHILD: 560 impl_ddi_sunbus_removechild(arg); 561 return (DDI_SUCCESS); 562 563 case DDI_CTLOPS_REPORTDEV: 564 return (rootnex_ctl_reportdev(rdip)); 565 566 case DDI_CTLOPS_IOMIN: 567 /* 568 * Nothing to do here but reflect back.. 569 */ 570 return (DDI_SUCCESS); 571 572 case DDI_CTLOPS_REGSIZE: 573 case DDI_CTLOPS_NREGS: 574 break; 575 576 case DDI_CTLOPS_SIDDEV: 577 if (ndi_dev_is_prom_node(rdip)) 578 return (DDI_SUCCESS); 579 if (ndi_dev_is_persistent_node(rdip)) 580 return (DDI_SUCCESS); 581 return (DDI_FAILURE); 582 583 case DDI_CTLOPS_POWER: 584 return ((*pm_platform_power)((power_req_t *)arg)); 585 586 case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */ 587 case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */ 588 case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */ 589 case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */ 590 case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */ 591 case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */ 592 if (!rootnex_state->r_reserved_msg_printed) { 593 rootnex_state->r_reserved_msg_printed = B_TRUE; 594 cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for " 595 "1 or more reserved/obsolete operations."); 596 } 597 return (DDI_FAILURE); 598 599 default: 600 return (DDI_FAILURE); 601 } 602 /* 603 * The rest are for "hardware" properties 604 */ 605 if ((pdp = ddi_get_parent_data(rdip)) == NULL) 606 return (DDI_FAILURE); 607 608 if (ctlop == DDI_CTLOPS_NREGS) { 609 ptr = (int *)result; 610 *ptr = pdp->par_nreg; 611 } else { 612 off_t *size = (off_t *)result; 613 614 ptr = (int *)arg; 615 n = *ptr; 616 if (n >= pdp->par_nreg) { 617 return (DDI_FAILURE); 618 } 619 *size = (off_t)pdp->par_reg[n].regspec_size; 620 } 621 return (DDI_SUCCESS); 622 } 623 624 625 /* 626 * rootnex_ctl_reportdev() 627 * 628 */ 629 static int 630 rootnex_ctl_reportdev(dev_info_t *dev) 631 { 632 int i, n, len, f_len = 0; 633 char *buf; 634 635 buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP); 636 f_len += snprintf(buf, REPORTDEV_BUFSIZE, 637 "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev)); 638 len = strlen(buf); 639 640 for (i = 0; i < sparc_pd_getnreg(dev); i++) { 641 642 struct regspec *rp = sparc_pd_getreg(dev, i); 643 644 if (i == 0) 645 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 646 ": "); 647 else 648 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 649 " and "); 650 len = strlen(buf); 651 652 switch (rp->regspec_bustype) { 653 654 case BTEISA: 655 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 656 "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr); 657 break; 658 659 case BTISA: 660 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 661 "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr); 662 break; 663 664 default: 665 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 666 "space %x offset %x", 667 rp->regspec_bustype, rp->regspec_addr); 668 break; 669 } 670 len = strlen(buf); 671 } 672 for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) { 673 int pri; 674 675 if (i != 0) { 676 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 677 ","); 678 len = strlen(buf); 679 } 680 pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri); 681 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 682 " sparc ipl %d", pri); 683 len = strlen(buf); 684 } 685 #ifdef DEBUG 686 if (f_len + 1 >= REPORTDEV_BUFSIZE) { 687 cmn_err(CE_NOTE, "next message is truncated: " 688 "printed length 1024, real length %d", f_len); 689 } 690 #endif /* DEBUG */ 691 cmn_err(CE_CONT, "?%s\n", buf); 692 kmem_free(buf, REPORTDEV_BUFSIZE); 693 return (DDI_SUCCESS); 694 } 695 696 697 /* 698 * rootnex_ctlops_poke() 699 * 700 */ 701 static int 702 rootnex_ctlops_poke(peekpoke_ctlops_t *in_args) 703 { 704 int err = DDI_SUCCESS; 705 on_trap_data_t otd; 706 707 /* Cautious access not supported. */ 708 if (in_args->handle != NULL) 709 return (DDI_FAILURE); 710 711 mutex_enter(&rootnex_state->r_peekpoke_mutex); 712 713 /* Set up protected environment. */ 714 if (!on_trap(&otd, OT_DATA_ACCESS)) { 715 switch (in_args->size) { 716 case sizeof (uint8_t): 717 *(uint8_t *)in_args->dev_addr = *(uint8_t *) 718 in_args->host_addr; 719 break; 720 721 case sizeof (uint16_t): 722 *(uint16_t *)in_args->dev_addr = 723 *(uint16_t *)in_args->host_addr; 724 break; 725 726 case sizeof (uint32_t): 727 *(uint32_t *)in_args->dev_addr = 728 *(uint32_t *)in_args->host_addr; 729 break; 730 731 case sizeof (uint64_t): 732 *(uint64_t *)in_args->dev_addr = 733 *(uint64_t *)in_args->host_addr; 734 break; 735 736 default: 737 err = DDI_FAILURE; 738 break; 739 } 740 } else 741 err = DDI_FAILURE; 742 743 /* Take down protected environment. */ 744 no_trap(); 745 mutex_exit(&rootnex_state->r_peekpoke_mutex); 746 747 return (err); 748 } 749 750 751 /* 752 * rootnex_ctlops_peek() 753 * 754 */ 755 static int 756 rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result) 757 { 758 int err = DDI_SUCCESS; 759 on_trap_data_t otd; 760 761 /* Cautious access not supported. */ 762 if (in_args->handle != NULL) 763 return (DDI_FAILURE); 764 765 mutex_enter(&rootnex_state->r_peekpoke_mutex); 766 767 if (!on_trap(&otd, OT_DATA_ACCESS)) { 768 switch (in_args->size) { 769 case sizeof (uint8_t): 770 *(uint8_t *)in_args->host_addr = 771 *(uint8_t *)in_args->dev_addr; 772 break; 773 774 case sizeof (uint16_t): 775 *(uint16_t *)in_args->host_addr = 776 *(uint16_t *)in_args->dev_addr; 777 break; 778 779 case sizeof (uint32_t): 780 *(uint32_t *)in_args->host_addr = 781 *(uint32_t *)in_args->dev_addr; 782 break; 783 784 case sizeof (uint64_t): 785 *(uint64_t *)in_args->host_addr = 786 *(uint64_t *)in_args->dev_addr; 787 break; 788 789 default: 790 err = DDI_FAILURE; 791 break; 792 } 793 result = (void *)in_args->host_addr; 794 } else 795 err = DDI_FAILURE; 796 797 no_trap(); 798 mutex_exit(&rootnex_state->r_peekpoke_mutex); 799 800 return (err); 801 } 802 803 804 805 /* 806 * ****************** 807 * map related code 808 * ****************** 809 */ 810 811 /* 812 * rootnex_map() 813 * 814 */ 815 static int 816 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset, 817 off_t len, caddr_t *vaddrp) 818 { 819 struct regspec *rp, tmp_reg; 820 ddi_map_req_t mr = *mp; /* Get private copy of request */ 821 int error; 822 823 mp = &mr; 824 825 switch (mp->map_op) { 826 case DDI_MO_MAP_LOCKED: 827 case DDI_MO_UNMAP: 828 case DDI_MO_MAP_HANDLE: 829 break; 830 default: 831 #ifdef DDI_MAP_DEBUG 832 cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.", 833 mp->map_op); 834 #endif /* DDI_MAP_DEBUG */ 835 return (DDI_ME_UNIMPLEMENTED); 836 } 837 838 if (mp->map_flags & DDI_MF_USER_MAPPING) { 839 #ifdef DDI_MAP_DEBUG 840 cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user."); 841 #endif /* DDI_MAP_DEBUG */ 842 return (DDI_ME_UNIMPLEMENTED); 843 } 844 845 /* 846 * First, if given an rnumber, convert it to a regspec... 847 * (Presumably, this is on behalf of a child of the root node?) 848 */ 849 850 if (mp->map_type == DDI_MT_RNUMBER) { 851 852 int rnumber = mp->map_obj.rnumber; 853 #ifdef DDI_MAP_DEBUG 854 static char *out_of_range = 855 "rootnex_map: Out of range rnumber <%d>, device <%s>"; 856 #endif /* DDI_MAP_DEBUG */ 857 858 rp = i_ddi_rnumber_to_regspec(rdip, rnumber); 859 if (rp == NULL) { 860 #ifdef DDI_MAP_DEBUG 861 cmn_err(CE_WARN, out_of_range, rnumber, 862 ddi_get_name(rdip)); 863 #endif /* DDI_MAP_DEBUG */ 864 return (DDI_ME_RNUMBER_RANGE); 865 } 866 867 /* 868 * Convert the given ddi_map_req_t from rnumber to regspec... 869 */ 870 871 mp->map_type = DDI_MT_REGSPEC; 872 mp->map_obj.rp = rp; 873 } 874 875 /* 876 * Adjust offset and length correspnding to called values... 877 * XXX: A non-zero length means override the one in the regspec 878 * XXX: (regardless of what's in the parent's range?) 879 */ 880 881 tmp_reg = *(mp->map_obj.rp); /* Preserve underlying data */ 882 rp = mp->map_obj.rp = &tmp_reg; /* Use tmp_reg in request */ 883 884 #ifdef DDI_MAP_DEBUG 885 cmn_err(CE_CONT, 886 "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d>" 887 " offset %d len %d handle 0x%x\n", 888 ddi_get_name(dip), ddi_get_name(rdip), 889 rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 890 offset, len, mp->map_handlep); 891 #endif /* DDI_MAP_DEBUG */ 892 893 /* 894 * I/O or memory mapping: 895 * 896 * <bustype=0, addr=x, len=x>: memory 897 * <bustype=1, addr=x, len=x>: i/o 898 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 899 */ 900 901 if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 902 cmn_err(CE_WARN, "<%s,%s> invalid register spec" 903 " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip), 904 ddi_get_name(rdip), rp->regspec_bustype, 905 rp->regspec_addr, rp->regspec_size); 906 return (DDI_ME_INVAL); 907 } 908 909 if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) { 910 /* 911 * compatibility i/o mapping 912 */ 913 rp->regspec_bustype += (uint_t)offset; 914 } else { 915 /* 916 * Normal memory or i/o mapping 917 */ 918 rp->regspec_addr += (uint_t)offset; 919 } 920 921 if (len != 0) 922 rp->regspec_size = (uint_t)len; 923 924 #ifdef DDI_MAP_DEBUG 925 cmn_err(CE_CONT, 926 " <%s,%s> <0x%x, 0x%x, 0x%d>" 927 " offset %d len %d handle 0x%x\n", 928 ddi_get_name(dip), ddi_get_name(rdip), 929 rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 930 offset, len, mp->map_handlep); 931 #endif /* DDI_MAP_DEBUG */ 932 933 /* 934 * Apply any parent ranges at this level, if applicable. 935 * (This is where nexus specific regspec translation takes place. 936 * Use of this function is implicit agreement that translation is 937 * provided via ddi_apply_range.) 938 */ 939 940 #ifdef DDI_MAP_DEBUG 941 ddi_map_debug("applying range of parent <%s> to child <%s>...\n", 942 ddi_get_name(dip), ddi_get_name(rdip)); 943 #endif /* DDI_MAP_DEBUG */ 944 945 if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0) 946 return (error); 947 948 switch (mp->map_op) { 949 case DDI_MO_MAP_LOCKED: 950 951 /* 952 * Set up the locked down kernel mapping to the regspec... 953 */ 954 955 return (rootnex_map_regspec(mp, vaddrp)); 956 957 case DDI_MO_UNMAP: 958 959 /* 960 * Release mapping... 961 */ 962 963 return (rootnex_unmap_regspec(mp, vaddrp)); 964 965 case DDI_MO_MAP_HANDLE: 966 967 return (rootnex_map_handle(mp)); 968 969 default: 970 return (DDI_ME_UNIMPLEMENTED); 971 } 972 } 973 974 975 /* 976 * rootnex_map_fault() 977 * 978 * fault in mappings for requestors 979 */ 980 /*ARGSUSED*/ 981 static int 982 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat, 983 struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, 984 uint_t lock) 985 { 986 987 #ifdef DDI_MAP_DEBUG 988 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn); 989 ddi_map_debug(" Seg <%s>\n", 990 seg->s_ops == &segdev_ops ? "segdev" : 991 seg == &kvseg ? "segkmem" : "NONE!"); 992 #endif /* DDI_MAP_DEBUG */ 993 994 /* 995 * This is all terribly broken, but it is a start 996 * 997 * XXX Note that this test means that segdev_ops 998 * must be exported from seg_dev.c. 999 * XXX What about devices with their own segment drivers? 1000 */ 1001 if (seg->s_ops == &segdev_ops) { 1002 struct segdev_data *sdp = 1003 (struct segdev_data *)seg->s_data; 1004 1005 if (hat == NULL) { 1006 /* 1007 * This is one plausible interpretation of 1008 * a null hat i.e. use the first hat on the 1009 * address space hat list which by convention is 1010 * the hat of the system MMU. At alternative 1011 * would be to panic .. this might well be better .. 1012 */ 1013 ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); 1014 hat = seg->s_as->a_hat; 1015 cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); 1016 } 1017 hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr, 1018 (lock ? HAT_LOAD_LOCK : HAT_LOAD)); 1019 } else if (seg == &kvseg && dp == NULL) { 1020 hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot, 1021 HAT_LOAD_LOCK); 1022 } else 1023 return (DDI_FAILURE); 1024 return (DDI_SUCCESS); 1025 } 1026 1027 1028 /* 1029 * rootnex_map_regspec() 1030 * we don't support mapping of I/O cards above 4Gb 1031 */ 1032 static int 1033 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 1034 { 1035 ulong_t base; 1036 void *cvaddr; 1037 uint_t npages, pgoffset; 1038 struct regspec *rp; 1039 ddi_acc_hdl_t *hp; 1040 ddi_acc_impl_t *ap; 1041 uint_t hat_acc_flags; 1042 1043 rp = mp->map_obj.rp; 1044 hp = mp->map_handlep; 1045 1046 #ifdef DDI_MAP_DEBUG 1047 ddi_map_debug( 1048 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n", 1049 rp->regspec_bustype, rp->regspec_addr, 1050 rp->regspec_size, mp->map_handlep); 1051 #endif /* DDI_MAP_DEBUG */ 1052 1053 /* 1054 * I/O or memory mapping 1055 * 1056 * <bustype=0, addr=x, len=x>: memory 1057 * <bustype=1, addr=x, len=x>: i/o 1058 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1059 */ 1060 1061 if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 1062 cmn_err(CE_WARN, "rootnex: invalid register spec" 1063 " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype, 1064 rp->regspec_addr, rp->regspec_size); 1065 return (DDI_FAILURE); 1066 } 1067 1068 if (rp->regspec_bustype != 0) { 1069 /* 1070 * I/O space - needs a handle. 1071 */ 1072 if (hp == NULL) { 1073 return (DDI_FAILURE); 1074 } 1075 ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1076 ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE; 1077 impl_acc_hdl_init(hp); 1078 1079 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1080 #ifdef DDI_MAP_DEBUG 1081 ddi_map_debug("rootnex_map_regspec: mmap() \ 1082 to I/O space is not supported.\n"); 1083 #endif /* DDI_MAP_DEBUG */ 1084 return (DDI_ME_INVAL); 1085 } else { 1086 /* 1087 * 1275-compliant vs. compatibility i/o mapping 1088 */ 1089 *vaddrp = 1090 (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ? 1091 ((caddr_t)(uintptr_t)rp->regspec_bustype) : 1092 ((caddr_t)(uintptr_t)rp->regspec_addr); 1093 } 1094 1095 #ifdef DDI_MAP_DEBUG 1096 ddi_map_debug( 1097 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n", 1098 rp->regspec_size, *vaddrp); 1099 #endif /* DDI_MAP_DEBUG */ 1100 return (DDI_SUCCESS); 1101 } 1102 1103 /* 1104 * Memory space 1105 */ 1106 1107 if (hp != NULL) { 1108 /* 1109 * hat layer ignores 1110 * hp->ah_acc.devacc_attr_endian_flags. 1111 */ 1112 switch (hp->ah_acc.devacc_attr_dataorder) { 1113 case DDI_STRICTORDER_ACC: 1114 hat_acc_flags = HAT_STRICTORDER; 1115 break; 1116 case DDI_UNORDERED_OK_ACC: 1117 hat_acc_flags = HAT_UNORDERED_OK; 1118 break; 1119 case DDI_MERGING_OK_ACC: 1120 hat_acc_flags = HAT_MERGING_OK; 1121 break; 1122 case DDI_LOADCACHING_OK_ACC: 1123 hat_acc_flags = HAT_LOADCACHING_OK; 1124 break; 1125 case DDI_STORECACHING_OK_ACC: 1126 hat_acc_flags = HAT_STORECACHING_OK; 1127 break; 1128 } 1129 ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1130 ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR; 1131 impl_acc_hdl_init(hp); 1132 hp->ah_hat_flags = hat_acc_flags; 1133 } else { 1134 hat_acc_flags = HAT_STRICTORDER; 1135 } 1136 1137 base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */ 1138 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */ 1139 1140 if (rp->regspec_size == 0) { 1141 #ifdef DDI_MAP_DEBUG 1142 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n"); 1143 #endif /* DDI_MAP_DEBUG */ 1144 return (DDI_ME_INVAL); 1145 } 1146 1147 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1148 *vaddrp = (caddr_t)mmu_btop(base); 1149 } else { 1150 npages = mmu_btopr(rp->regspec_size + pgoffset); 1151 1152 #ifdef DDI_MAP_DEBUG 1153 ddi_map_debug("rootnex_map_regspec: Mapping %d pages \ 1154 physical %x ", 1155 npages, base); 1156 #endif /* DDI_MAP_DEBUG */ 1157 1158 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); 1159 if (cvaddr == NULL) 1160 return (DDI_ME_NORESOURCES); 1161 1162 /* 1163 * Now map in the pages we've allocated... 1164 */ 1165 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base), 1166 mp->map_prot | hat_acc_flags, HAT_LOAD_LOCK); 1167 *vaddrp = (caddr_t)cvaddr + pgoffset; 1168 } 1169 1170 #ifdef DDI_MAP_DEBUG 1171 ddi_map_debug("at virtual 0x%x\n", *vaddrp); 1172 #endif /* DDI_MAP_DEBUG */ 1173 return (DDI_SUCCESS); 1174 } 1175 1176 1177 /* 1178 * rootnex_unmap_regspec() 1179 * 1180 */ 1181 static int 1182 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 1183 { 1184 caddr_t addr = (caddr_t)*vaddrp; 1185 uint_t npages, pgoffset; 1186 struct regspec *rp; 1187 1188 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) 1189 return (0); 1190 1191 rp = mp->map_obj.rp; 1192 1193 if (rp->regspec_size == 0) { 1194 #ifdef DDI_MAP_DEBUG 1195 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n"); 1196 #endif /* DDI_MAP_DEBUG */ 1197 return (DDI_ME_INVAL); 1198 } 1199 1200 /* 1201 * I/O or memory mapping: 1202 * 1203 * <bustype=0, addr=x, len=x>: memory 1204 * <bustype=1, addr=x, len=x>: i/o 1205 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1206 */ 1207 if (rp->regspec_bustype != 0) { 1208 /* 1209 * This is I/O space, which requires no particular 1210 * processing on unmap since it isn't mapped in the 1211 * first place. 1212 */ 1213 return (DDI_SUCCESS); 1214 } 1215 1216 /* 1217 * Memory space 1218 */ 1219 pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET; 1220 npages = mmu_btopr(rp->regspec_size + pgoffset); 1221 hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK); 1222 device_arena_free(addr - pgoffset, ptob(npages)); 1223 1224 /* 1225 * Destroy the pointer - the mapping has logically gone 1226 */ 1227 *vaddrp = NULL; 1228 1229 return (DDI_SUCCESS); 1230 } 1231 1232 1233 /* 1234 * rootnex_map_handle() 1235 * 1236 */ 1237 static int 1238 rootnex_map_handle(ddi_map_req_t *mp) 1239 { 1240 ddi_acc_hdl_t *hp; 1241 ulong_t base; 1242 uint_t pgoffset; 1243 struct regspec *rp; 1244 1245 rp = mp->map_obj.rp; 1246 1247 #ifdef DDI_MAP_DEBUG 1248 ddi_map_debug( 1249 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n", 1250 rp->regspec_bustype, rp->regspec_addr, 1251 rp->regspec_size, mp->map_handlep); 1252 #endif /* DDI_MAP_DEBUG */ 1253 1254 /* 1255 * I/O or memory mapping: 1256 * 1257 * <bustype=0, addr=x, len=x>: memory 1258 * <bustype=1, addr=x, len=x>: i/o 1259 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1260 */ 1261 if (rp->regspec_bustype != 0) { 1262 /* 1263 * This refers to I/O space, and we don't support "mapping" 1264 * I/O space to a user. 1265 */ 1266 return (DDI_FAILURE); 1267 } 1268 1269 /* 1270 * Set up the hat_flags for the mapping. 1271 */ 1272 hp = mp->map_handlep; 1273 1274 switch (hp->ah_acc.devacc_attr_endian_flags) { 1275 case DDI_NEVERSWAP_ACC: 1276 hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER; 1277 break; 1278 case DDI_STRUCTURE_LE_ACC: 1279 hp->ah_hat_flags = HAT_STRUCTURE_LE; 1280 break; 1281 case DDI_STRUCTURE_BE_ACC: 1282 return (DDI_FAILURE); 1283 default: 1284 return (DDI_REGS_ACC_CONFLICT); 1285 } 1286 1287 switch (hp->ah_acc.devacc_attr_dataorder) { 1288 case DDI_STRICTORDER_ACC: 1289 break; 1290 case DDI_UNORDERED_OK_ACC: 1291 hp->ah_hat_flags |= HAT_UNORDERED_OK; 1292 break; 1293 case DDI_MERGING_OK_ACC: 1294 hp->ah_hat_flags |= HAT_MERGING_OK; 1295 break; 1296 case DDI_LOADCACHING_OK_ACC: 1297 hp->ah_hat_flags |= HAT_LOADCACHING_OK; 1298 break; 1299 case DDI_STORECACHING_OK_ACC: 1300 hp->ah_hat_flags |= HAT_STORECACHING_OK; 1301 break; 1302 default: 1303 return (DDI_FAILURE); 1304 } 1305 1306 base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */ 1307 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */ 1308 1309 if (rp->regspec_size == 0) 1310 return (DDI_ME_INVAL); 1311 1312 hp->ah_pfn = mmu_btop(base); 1313 hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset); 1314 1315 return (DDI_SUCCESS); 1316 } 1317 1318 1319 1320 /* 1321 * ************************ 1322 * interrupt related code 1323 * ************************ 1324 */ 1325 1326 /* 1327 * rootnex_intr_ops() 1328 * bus_intr_op() function for interrupt support 1329 */ 1330 /* ARGSUSED */ 1331 static int 1332 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1333 ddi_intr_handle_impl_t *hdlp, void *result) 1334 { 1335 struct intrspec *ispec; 1336 struct ddi_parent_private_data *pdp; 1337 1338 DDI_INTR_NEXDBG((CE_CONT, 1339 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n", 1340 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 1341 1342 /* Process the interrupt operation */ 1343 switch (intr_op) { 1344 case DDI_INTROP_GETCAP: 1345 /* First check with pcplusmp */ 1346 if (psm_intr_ops == NULL) 1347 return (DDI_FAILURE); 1348 1349 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) { 1350 *(int *)result = 0; 1351 return (DDI_FAILURE); 1352 } 1353 break; 1354 case DDI_INTROP_SETCAP: 1355 if (psm_intr_ops == NULL) 1356 return (DDI_FAILURE); 1357 1358 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) 1359 return (DDI_FAILURE); 1360 break; 1361 case DDI_INTROP_ALLOC: 1362 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1363 return (DDI_FAILURE); 1364 hdlp->ih_pri = ispec->intrspec_pri; 1365 *(int *)result = hdlp->ih_scratch1; 1366 break; 1367 case DDI_INTROP_FREE: 1368 pdp = ddi_get_parent_data(rdip); 1369 /* 1370 * Special case for 'pcic' driver' only. 1371 * If an intrspec was created for it, clean it up here 1372 * See detailed comments on this in the function 1373 * rootnex_get_ispec(). 1374 */ 1375 if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 1376 kmem_free(pdp->par_intr, sizeof (struct intrspec) * 1377 pdp->par_nintr); 1378 /* 1379 * Set it to zero; so that 1380 * DDI framework doesn't free it again 1381 */ 1382 pdp->par_intr = NULL; 1383 pdp->par_nintr = 0; 1384 } 1385 break; 1386 case DDI_INTROP_GETPRI: 1387 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1388 return (DDI_FAILURE); 1389 *(int *)result = ispec->intrspec_pri; 1390 break; 1391 case DDI_INTROP_SETPRI: 1392 /* Validate the interrupt priority passed to us */ 1393 if (*(int *)result > LOCK_LEVEL) 1394 return (DDI_FAILURE); 1395 1396 /* Ensure that PSM is all initialized and ispec is ok */ 1397 if ((psm_intr_ops == NULL) || 1398 ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)) 1399 return (DDI_FAILURE); 1400 1401 /* Change the priority */ 1402 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 1403 PSM_FAILURE) 1404 return (DDI_FAILURE); 1405 1406 /* update the ispec with the new priority */ 1407 ispec->intrspec_pri = *(int *)result; 1408 break; 1409 case DDI_INTROP_ADDISR: 1410 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1411 return (DDI_FAILURE); 1412 ispec->intrspec_func = hdlp->ih_cb_func; 1413 break; 1414 case DDI_INTROP_REMISR: 1415 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1416 return (DDI_FAILURE); 1417 ispec->intrspec_func = (uint_t (*)()) 0; 1418 break; 1419 case DDI_INTROP_ENABLE: 1420 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1421 return (DDI_FAILURE); 1422 1423 /* Call psmi to translate irq with the dip */ 1424 if (psm_intr_ops == NULL) 1425 return (DDI_FAILURE); 1426 1427 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1428 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, 1429 (int *)&hdlp->ih_vector); 1430 1431 /* Add the interrupt handler */ 1432 if (!add_avintr((void *)hdlp, ispec->intrspec_pri, 1433 hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector, 1434 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip)) 1435 return (DDI_FAILURE); 1436 break; 1437 case DDI_INTROP_DISABLE: 1438 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1439 return (DDI_FAILURE); 1440 1441 /* Call psm_ops() to translate irq with the dip */ 1442 if (psm_intr_ops == NULL) 1443 return (DDI_FAILURE); 1444 1445 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1446 (void) (*psm_intr_ops)(rdip, hdlp, 1447 PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector); 1448 1449 /* Remove the interrupt handler */ 1450 rem_avintr((void *)hdlp, ispec->intrspec_pri, 1451 hdlp->ih_cb_func, hdlp->ih_vector); 1452 break; 1453 case DDI_INTROP_SETMASK: 1454 if (psm_intr_ops == NULL) 1455 return (DDI_FAILURE); 1456 1457 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL)) 1458 return (DDI_FAILURE); 1459 break; 1460 case DDI_INTROP_CLRMASK: 1461 if (psm_intr_ops == NULL) 1462 return (DDI_FAILURE); 1463 1464 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL)) 1465 return (DDI_FAILURE); 1466 break; 1467 case DDI_INTROP_GETPENDING: 1468 if (psm_intr_ops == NULL) 1469 return (DDI_FAILURE); 1470 1471 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING, 1472 result)) { 1473 *(int *)result = 0; 1474 return (DDI_FAILURE); 1475 } 1476 break; 1477 case DDI_INTROP_NINTRS: 1478 if ((pdp = ddi_get_parent_data(rdip)) == NULL) 1479 return (DDI_FAILURE); 1480 *(int *)result = pdp->par_nintr; 1481 if (pdp->par_nintr == 0) { 1482 /* 1483 * Special case for 'pcic' driver' only. This driver 1484 * driver is a child of 'isa' and 'rootnex' drivers. 1485 * 1486 * See detailed comments on this in the function 1487 * rootnex_get_ispec(). 1488 * 1489 * Children of 'pcic' send 'NINITR' request all the 1490 * way to rootnex driver. But, the 'pdp->par_nintr' 1491 * field may not initialized. So, we fake it here 1492 * to return 1 (a la what PCMCIA nexus does). 1493 */ 1494 if (strcmp(ddi_get_name(rdip), "pcic") == 0) 1495 *(int *)result = 1; 1496 } 1497 break; 1498 case DDI_INTROP_SUPPORTED_TYPES: 1499 *(int *)result = 0; 1500 *(int *)result |= DDI_INTR_TYPE_FIXED; /* Always ... */ 1501 break; 1502 case DDI_INTROP_NAVAIL: 1503 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1504 return (DDI_FAILURE); 1505 1506 if (psm_intr_ops == NULL) { 1507 *(int *)result = 1; 1508 break; 1509 } 1510 1511 /* Priority in the handle not initialized yet */ 1512 hdlp->ih_pri = ispec->intrspec_pri; 1513 (void) (*psm_intr_ops)(rdip, hdlp, 1514 PSM_INTR_OP_NAVAIL_VECTORS, result); 1515 break; 1516 default: 1517 return (DDI_FAILURE); 1518 } 1519 1520 return (DDI_SUCCESS); 1521 } 1522 1523 1524 /* 1525 * rootnex_get_ispec() 1526 * convert an interrupt number to an interrupt specification. 1527 * The interrupt number determines which interrupt spec will be 1528 * returned if more than one exists. 1529 * 1530 * Look into the parent private data area of the 'rdip' to find out 1531 * the interrupt specification. First check to make sure there is 1532 * one that matchs "inumber" and then return a pointer to it. 1533 * 1534 * Return NULL if one could not be found. 1535 * 1536 * NOTE: This is needed for rootnex_intr_ops() 1537 */ 1538 static struct intrspec * 1539 rootnex_get_ispec(dev_info_t *rdip, int inum) 1540 { 1541 struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip); 1542 1543 /* 1544 * Special case handling for drivers that provide their own 1545 * intrspec structures instead of relying on the DDI framework. 1546 * 1547 * A broken hardware driver in ON could potentially provide its 1548 * own intrspec structure, instead of relying on the hardware. 1549 * If these drivers are children of 'rootnex' then we need to 1550 * continue to provide backward compatibility to them here. 1551 * 1552 * Following check is a special case for 'pcic' driver which 1553 * was found to have broken hardwre andby provides its own intrspec. 1554 * 1555 * Verbatim comments from this driver are shown here: 1556 * "Don't use the ddi_add_intr since we don't have a 1557 * default intrspec in all cases." 1558 * 1559 * Since an 'ispec' may not be always created for it, 1560 * check for that and create one if so. 1561 * 1562 * NOTE: Currently 'pcic' is the only driver found to do this. 1563 */ 1564 if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 1565 pdp->par_nintr = 1; 1566 pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) * 1567 pdp->par_nintr, KM_SLEEP); 1568 } 1569 1570 /* Validate the interrupt number */ 1571 if (inum >= pdp->par_nintr) 1572 return (NULL); 1573 1574 /* Get the interrupt structure pointer and return that */ 1575 return ((struct intrspec *)&pdp->par_intr[inum]); 1576 } 1577 1578 1579 /* 1580 * ****************** 1581 * dma related code 1582 * ****************** 1583 */ 1584 1585 /* 1586 * rootnex_dma_allochdl() 1587 * called from ddi_dma_alloc_handle(). 1588 */ 1589 /*ARGSUSED*/ 1590 static int 1591 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 1592 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 1593 { 1594 uint64_t maxsegmentsize_ll; 1595 uint_t maxsegmentsize; 1596 ddi_dma_impl_t *hp; 1597 rootnex_dma_t *dma; 1598 uint64_t count_max; 1599 uint64_t seg; 1600 int kmflag; 1601 int e; 1602 1603 1604 /* convert our sleep flags */ 1605 if (waitfp == DDI_DMA_SLEEP) { 1606 kmflag = KM_SLEEP; 1607 } else { 1608 kmflag = KM_NOSLEEP; 1609 } 1610 1611 /* 1612 * We try to do only one memory allocation here. We'll do a little 1613 * pointer manipulation later. If the bind ends up taking more than 1614 * our prealloc's space, we'll have to allocate more memory in the 1615 * bind operation. Not great, but much better than before and the 1616 * best we can do with the current bind interfaces. 1617 */ 1618 hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag); 1619 if (hp == NULL) { 1620 if (waitfp != DDI_DMA_DONTWAIT) { 1621 ddi_set_callback(waitfp, arg, 1622 &rootnex_state->r_dvma_call_list_id); 1623 } 1624 return (DDI_DMA_NORESOURCES); 1625 } 1626 1627 /* Do our pointer manipulation now, align the structures */ 1628 hp->dmai_private = (void *)(((uintptr_t)hp + 1629 (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7); 1630 dma = (rootnex_dma_t *)hp->dmai_private; 1631 dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma + 1632 sizeof (rootnex_dma_t) + 0x7) & ~0x7); 1633 1634 /* setup the handle */ 1635 rootnex_clean_dmahdl(hp); 1636 dma->dp_dip = rdip; 1637 dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo; 1638 dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi; 1639 hp->dmai_minxfer = attr->dma_attr_minxfer; 1640 hp->dmai_burstsizes = attr->dma_attr_burstsizes; 1641 hp->dmai_rdip = rdip; 1642 hp->dmai_attr = *attr; 1643 1644 /* we don't need to worry about the SPL since we do a tryenter */ 1645 mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL); 1646 1647 /* 1648 * Figure out our maximum segment size. If the segment size is greater 1649 * than 4G, we will limit it to (4G - 1) since the max size of a dma 1650 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and 1651 * dma_attr_count_max are size-1 type values. 1652 * 1653 * Maximum segment size is the largest physically contiguous chunk of 1654 * memory that we can return from a bind (i.e. the maximum size of a 1655 * single cookie). 1656 */ 1657 1658 /* handle the rollover cases */ 1659 seg = attr->dma_attr_seg + 1; 1660 if (seg < attr->dma_attr_seg) { 1661 seg = attr->dma_attr_seg; 1662 } 1663 count_max = attr->dma_attr_count_max + 1; 1664 if (count_max < attr->dma_attr_count_max) { 1665 count_max = attr->dma_attr_count_max; 1666 } 1667 1668 /* 1669 * granularity may or may not be a power of two. If it isn't, we can't 1670 * use a simple mask. 1671 */ 1672 if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) { 1673 dma->dp_granularity_power_2 = B_FALSE; 1674 } else { 1675 dma->dp_granularity_power_2 = B_TRUE; 1676 } 1677 1678 /* 1679 * maxxfer should be a whole multiple of granularity. If we're going to 1680 * break up a window because we're greater than maxxfer, we might as 1681 * well make sure it's maxxfer is a whole multiple so we don't have to 1682 * worry about triming the window later on for this case. 1683 */ 1684 if (attr->dma_attr_granular > 1) { 1685 if (dma->dp_granularity_power_2) { 1686 dma->dp_maxxfer = attr->dma_attr_maxxfer - 1687 (attr->dma_attr_maxxfer & 1688 (attr->dma_attr_granular - 1)); 1689 } else { 1690 dma->dp_maxxfer = attr->dma_attr_maxxfer - 1691 (attr->dma_attr_maxxfer % attr->dma_attr_granular); 1692 } 1693 } else { 1694 dma->dp_maxxfer = attr->dma_attr_maxxfer; 1695 } 1696 1697 maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer); 1698 maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max); 1699 if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) { 1700 maxsegmentsize = 0xFFFFFFFF; 1701 } else { 1702 maxsegmentsize = maxsegmentsize_ll; 1703 } 1704 dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize; 1705 dma->dp_sglinfo.si_segmask = attr->dma_attr_seg; 1706 1707 /* check the ddi_dma_attr arg to make sure it makes a little sense */ 1708 if (rootnex_alloc_check_parms) { 1709 e = rootnex_valid_alloc_parms(attr, maxsegmentsize); 1710 if (e != DDI_SUCCESS) { 1711 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]); 1712 (void) rootnex_dma_freehdl(dip, rdip, 1713 (ddi_dma_handle_t)hp); 1714 return (e); 1715 } 1716 } 1717 1718 *handlep = (ddi_dma_handle_t)hp; 1719 1720 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1721 DTRACE_PROBE1(rootnex__alloc__handle, uint64_t, 1722 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1723 1724 return (DDI_SUCCESS); 1725 } 1726 1727 1728 /* 1729 * rootnex_dma_freehdl() 1730 * called from ddi_dma_free_handle(). 1731 */ 1732 /*ARGSUSED*/ 1733 static int 1734 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 1735 { 1736 ddi_dma_impl_t *hp; 1737 rootnex_dma_t *dma; 1738 1739 1740 hp = (ddi_dma_impl_t *)handle; 1741 dma = (rootnex_dma_t *)hp->dmai_private; 1742 1743 /* unbind should have been called first */ 1744 ASSERT(!dma->dp_inuse); 1745 1746 mutex_destroy(&dma->dp_mutex); 1747 kmem_cache_free(rootnex_state->r_dmahdl_cache, hp); 1748 1749 ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1750 DTRACE_PROBE1(rootnex__free__handle, uint64_t, 1751 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1752 1753 if (rootnex_state->r_dvma_call_list_id) 1754 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 1755 1756 return (DDI_SUCCESS); 1757 } 1758 1759 1760 /* 1761 * rootnex_dma_bindhdl() 1762 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle(). 1763 */ 1764 /*ARGSUSED*/ 1765 static int 1766 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1767 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1768 { 1769 rootnex_sglinfo_t *sinfo; 1770 ddi_dma_attr_t *attr; 1771 ddi_dma_impl_t *hp; 1772 rootnex_dma_t *dma; 1773 int kmflag; 1774 int e; 1775 1776 1777 hp = (ddi_dma_impl_t *)handle; 1778 dma = (rootnex_dma_t *)hp->dmai_private; 1779 sinfo = &dma->dp_sglinfo; 1780 attr = &hp->dmai_attr; 1781 1782 hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 1783 1784 /* 1785 * This is useful for debugging a driver. Not as useful in a production 1786 * system. The only time this will fail is if you have a driver bug. 1787 */ 1788 if (rootnex_bind_check_inuse) { 1789 /* 1790 * No one else should ever have this lock unless someone else 1791 * is trying to use this handle. So contention on the lock 1792 * is the same as inuse being set. 1793 */ 1794 e = mutex_tryenter(&dma->dp_mutex); 1795 if (e == 0) { 1796 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1797 return (DDI_DMA_INUSE); 1798 } 1799 if (dma->dp_inuse) { 1800 mutex_exit(&dma->dp_mutex); 1801 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1802 return (DDI_DMA_INUSE); 1803 } 1804 dma->dp_inuse = B_TRUE; 1805 mutex_exit(&dma->dp_mutex); 1806 } 1807 1808 /* check the ddi_dma_attr arg to make sure it makes a little sense */ 1809 if (rootnex_bind_check_parms) { 1810 e = rootnex_valid_bind_parms(dmareq, attr); 1811 if (e != DDI_SUCCESS) { 1812 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1813 rootnex_clean_dmahdl(hp); 1814 return (e); 1815 } 1816 } 1817 1818 /* save away the original bind info */ 1819 dma->dp_dma = dmareq->dmar_object; 1820 1821 /* 1822 * Figure out a rough estimate of what maximum number of pages this 1823 * buffer could use (a high estimate of course). 1824 */ 1825 sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1; 1826 1827 /* 1828 * We'll use the pre-allocated cookies for any bind that will *always* 1829 * fit (more important to be consistent, we don't want to create 1830 * additional degenerate cases). 1831 */ 1832 if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) { 1833 dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer; 1834 dma->dp_need_to_free_cookie = B_FALSE; 1835 DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip, 1836 uint_t, sinfo->si_max_pages); 1837 1838 /* 1839 * For anything larger than that, we'll go ahead and allocate the 1840 * maximum number of pages we expect to see. Hopefuly, we won't be 1841 * seeing this path in the fast path for high performance devices very 1842 * frequently. 1843 * 1844 * a ddi bind interface that allowed the driver to provide storage to 1845 * the bind interface would speed this case up. 1846 */ 1847 } else { 1848 /* convert the sleep flags */ 1849 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 1850 kmflag = KM_SLEEP; 1851 } else { 1852 kmflag = KM_NOSLEEP; 1853 } 1854 1855 /* 1856 * Save away how much memory we allocated. If we're doing a 1857 * nosleep, the alloc could fail... 1858 */ 1859 dma->dp_cookie_size = sinfo->si_max_pages * 1860 sizeof (ddi_dma_cookie_t); 1861 dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag); 1862 if (dma->dp_cookies == NULL) { 1863 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1864 rootnex_clean_dmahdl(hp); 1865 return (DDI_DMA_NORESOURCES); 1866 } 1867 dma->dp_need_to_free_cookie = B_TRUE; 1868 DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t, 1869 sinfo->si_max_pages); 1870 } 1871 hp->dmai_cookie = dma->dp_cookies; 1872 1873 /* 1874 * Get the real sgl. rootnex_get_sgl will fill in cookie array while 1875 * looking at the contraints in the dma structure. It will then put some 1876 * additional state about the sgl in the dma struct (i.e. is the sgl 1877 * clean, or do we need to do some munging; how many pages need to be 1878 * copied, etc.) 1879 */ 1880 rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies, 1881 &dma->dp_sglinfo); 1882 ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages); 1883 1884 /* if we don't need a copy buffer, we don't need to sync */ 1885 if (sinfo->si_copybuf_req == 0) { 1886 hp->dmai_rflags |= DMP_NOSYNC; 1887 } 1888 1889 /* 1890 * if we don't need the copybuf and we don't need to do a partial, we 1891 * hit the fast path. All the high performance devices should be trying 1892 * to hit this path. To hit this path, a device should be able to reach 1893 * all of memory, shouldn't try to bind more than it can transfer, and 1894 * the buffer shouldn't require more cookies than the driver/device can 1895 * handle [sgllen]). 1896 */ 1897 if ((sinfo->si_copybuf_req == 0) && 1898 (sinfo->si_sgl_size <= attr->dma_attr_sgllen) && 1899 (dma->dp_dma.dmao_size < dma->dp_maxxfer)) { 1900 /* 1901 * copy out the first cookie and ccountp, set the cookie 1902 * pointer to the second cookie. The first cookie is passed 1903 * back on the stack. Additional cookies are accessed via 1904 * ddi_dma_nextcookie() 1905 */ 1906 *cookiep = dma->dp_cookies[0]; 1907 *ccountp = sinfo->si_sgl_size; 1908 hp->dmai_cookie++; 1909 hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 1910 hp->dmai_nwin = 1; 1911 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 1912 DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t, 1913 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t, 1914 dma->dp_dma.dmao_size); 1915 return (DDI_DMA_MAPPED); 1916 } 1917 1918 /* 1919 * go to the slow path, we may need to alloc more memory, create 1920 * multiple windows, and munge up a sgl to make the device happy. 1921 */ 1922 e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag); 1923 if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 1924 if (dma->dp_need_to_free_cookie) { 1925 kmem_free(dma->dp_cookies, dma->dp_cookie_size); 1926 } 1927 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1928 rootnex_clean_dmahdl(hp); /* must be after free cookie */ 1929 return (e); 1930 } 1931 1932 /* if the first window uses the copy buffer, sync it for the device */ 1933 if ((dma->dp_window[dma->dp_current_win].wd_dosync) && 1934 (hp->dmai_rflags & DDI_DMA_WRITE)) { 1935 (void) rootnex_dma_sync(dip, rdip, handle, 0, 0, 1936 DDI_DMA_SYNC_FORDEV); 1937 } 1938 1939 /* 1940 * copy out the first cookie and ccountp, set the cookie pointer to the 1941 * second cookie. Make sure the partial flag is set/cleared correctly. 1942 * If we have a partial map (i.e. multiple windows), the number of 1943 * cookies we return is the number of cookies in the first window. 1944 */ 1945 if (e == DDI_DMA_MAPPED) { 1946 hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 1947 *ccountp = sinfo->si_sgl_size; 1948 } else { 1949 hp->dmai_rflags |= DDI_DMA_PARTIAL; 1950 *ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt; 1951 ASSERT(hp->dmai_nwin <= dma->dp_max_win); 1952 } 1953 *cookiep = dma->dp_cookies[0]; 1954 hp->dmai_cookie++; 1955 1956 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 1957 DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t, 1958 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t, 1959 dma->dp_dma.dmao_size); 1960 return (e); 1961 } 1962 1963 1964 /* 1965 * rootnex_dma_unbindhdl() 1966 * called from ddi_dma_unbind_handle() 1967 */ 1968 /*ARGSUSED*/ 1969 static int 1970 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 1971 ddi_dma_handle_t handle) 1972 { 1973 ddi_dma_impl_t *hp; 1974 rootnex_dma_t *dma; 1975 int e; 1976 1977 1978 hp = (ddi_dma_impl_t *)handle; 1979 dma = (rootnex_dma_t *)hp->dmai_private; 1980 1981 /* make sure the buffer wasn't free'd before calling unbind */ 1982 if (rootnex_unbind_verify_buffer) { 1983 e = rootnex_verify_buffer(dma); 1984 if (e != DDI_SUCCESS) { 1985 ASSERT(0); 1986 return (DDI_FAILURE); 1987 } 1988 } 1989 1990 /* sync the current window before unbinding the buffer */ 1991 if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync && 1992 (hp->dmai_rflags & DDI_DMA_READ)) { 1993 (void) rootnex_dma_sync(dip, rdip, handle, 0, 0, 1994 DDI_DMA_SYNC_FORCPU); 1995 } 1996 1997 /* 1998 * cleanup and copy buffer or window state. if we didn't use the copy 1999 * buffer or windows, there won't be much to do :-) 2000 */ 2001 rootnex_teardown_copybuf(dma); 2002 rootnex_teardown_windows(dma); 2003 2004 /* 2005 * If we had to allocate space to for the worse case sgl (it didn't 2006 * fit into our pre-allocate buffer), free that up now 2007 */ 2008 if (dma->dp_need_to_free_cookie) { 2009 kmem_free(dma->dp_cookies, dma->dp_cookie_size); 2010 } 2011 2012 /* 2013 * clean up the handle so it's ready for the next bind (i.e. if the 2014 * handle is reused). 2015 */ 2016 rootnex_clean_dmahdl(hp); 2017 2018 if (rootnex_state->r_dvma_call_list_id) 2019 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 2020 2021 ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2022 DTRACE_PROBE1(rootnex__unbind, uint64_t, 2023 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2024 2025 return (DDI_SUCCESS); 2026 } 2027 2028 2029 /* 2030 * rootnex_verify_buffer() 2031 * verify buffer wasn't free'd 2032 */ 2033 static int 2034 rootnex_verify_buffer(rootnex_dma_t *dma) 2035 { 2036 peekpoke_ctlops_t peek; 2037 page_t **pplist; 2038 caddr_t vaddr; 2039 uint_t pcnt; 2040 uint_t poff; 2041 page_t *pp; 2042 uint8_t b; 2043 int i; 2044 int e; 2045 2046 2047 /* Figure out how many pages this buffer occupies */ 2048 if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) { 2049 poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET; 2050 } else { 2051 vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr; 2052 poff = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2053 } 2054 pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff); 2055 2056 switch (dma->dp_dma.dmao_type) { 2057 case DMA_OTYP_PAGES: 2058 /* 2059 * for a linked list of pp's walk through them to make sure 2060 * they're locked and not free. 2061 */ 2062 pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp; 2063 for (i = 0; i < pcnt; i++) { 2064 if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) { 2065 return (DDI_FAILURE); 2066 } 2067 pp = pp->p_next; 2068 } 2069 break; 2070 2071 case DMA_OTYP_VADDR: 2072 case DMA_OTYP_BUFVADDR: 2073 pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv; 2074 /* 2075 * for an array of pp's walk through them to make sure they're 2076 * not free. It's possible that they may not be locked. 2077 */ 2078 if (pplist) { 2079 for (i = 0; i < pcnt; i++) { 2080 if (PP_ISFREE(pplist[i])) { 2081 return (DDI_FAILURE); 2082 } 2083 } 2084 2085 /* For a virtual address, try to peek at each page */ 2086 } else { 2087 if (dma->dp_sglinfo.si_asp == &kas) { 2088 bzero(&peek, sizeof (peekpoke_ctlops_t)); 2089 peek.host_addr = (uintptr_t)&b; 2090 peek.size = sizeof (uint8_t); 2091 peek.dev_addr = (uintptr_t)vaddr; 2092 for (i = 0; i < pcnt; i++) { 2093 e = rootnex_ctlops_peek(&peek, &b); 2094 if (e != DDI_SUCCESS) { 2095 return (DDI_FAILURE); 2096 } 2097 peek.dev_addr += MMU_PAGESIZE; 2098 } 2099 } 2100 } 2101 break; 2102 2103 default: 2104 ASSERT(0); 2105 break; 2106 } 2107 2108 return (DDI_SUCCESS); 2109 } 2110 2111 2112 /* 2113 * rootnex_clean_dmahdl() 2114 * Clean the dma handle. This should be called on a handle alloc and an 2115 * unbind handle. Set the handle state to the default settings. 2116 */ 2117 static void 2118 rootnex_clean_dmahdl(ddi_dma_impl_t *hp) 2119 { 2120 rootnex_dma_t *dma; 2121 2122 2123 dma = (rootnex_dma_t *)hp->dmai_private; 2124 2125 hp->dmai_nwin = 0; 2126 dma->dp_current_cookie = 0; 2127 dma->dp_copybuf_size = 0; 2128 dma->dp_window = NULL; 2129 dma->dp_cbaddr = NULL; 2130 dma->dp_inuse = B_FALSE; 2131 dma->dp_need_to_free_cookie = B_FALSE; 2132 dma->dp_need_to_free_window = B_FALSE; 2133 dma->dp_partial_required = B_FALSE; 2134 dma->dp_trim_required = B_FALSE; 2135 dma->dp_sglinfo.si_copybuf_req = 0; 2136 #if !defined(__amd64) 2137 dma->dp_cb_remaping = B_FALSE; 2138 dma->dp_kva = NULL; 2139 #endif 2140 2141 /* FMA related initialization */ 2142 hp->dmai_fault = 0; 2143 hp->dmai_fault_check = NULL; 2144 hp->dmai_fault_notify = NULL; 2145 hp->dmai_error.err_ena = 0; 2146 hp->dmai_error.err_status = DDI_FM_OK; 2147 hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED; 2148 hp->dmai_error.err_ontrap = NULL; 2149 hp->dmai_error.err_fep = NULL; 2150 } 2151 2152 2153 /* 2154 * rootnex_valid_alloc_parms() 2155 * Called in ddi_dma_alloc_handle path to validate its parameters. 2156 */ 2157 static int 2158 rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize) 2159 { 2160 if ((attr->dma_attr_seg < MMU_PAGEOFFSET) || 2161 (attr->dma_attr_count_max < MMU_PAGEOFFSET) || 2162 (attr->dma_attr_granular > MMU_PAGESIZE) || 2163 (attr->dma_attr_maxxfer < MMU_PAGESIZE)) { 2164 return (DDI_DMA_BADATTR); 2165 } 2166 2167 if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) { 2168 return (DDI_DMA_BADATTR); 2169 } 2170 2171 if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET || 2172 MMU_PAGESIZE & (attr->dma_attr_granular - 1) || 2173 attr->dma_attr_sgllen <= 0) { 2174 return (DDI_DMA_BADATTR); 2175 } 2176 2177 /* We should be able to DMA into every byte offset in a page */ 2178 if (maxsegmentsize < MMU_PAGESIZE) { 2179 return (DDI_DMA_BADATTR); 2180 } 2181 2182 return (DDI_SUCCESS); 2183 } 2184 2185 2186 /* 2187 * rootnex_valid_bind_parms() 2188 * Called in ddi_dma_*_bind_handle path to validate its parameters. 2189 */ 2190 /* ARGSUSED */ 2191 static int 2192 rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr) 2193 { 2194 #if !defined(__amd64) 2195 /* 2196 * we only support up to a 2G-1 transfer size on 32-bit kernels so 2197 * we can track the offset for the obsoleted interfaces. 2198 */ 2199 if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) { 2200 return (DDI_DMA_TOOBIG); 2201 } 2202 #endif 2203 2204 return (DDI_SUCCESS); 2205 } 2206 2207 2208 /* 2209 * rootnex_get_sgl() 2210 * Called in bind fastpath to get the sgl. Most of this will be replaced 2211 * with a call to the vm layer when vm2.0 comes around... 2212 */ 2213 static void 2214 rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 2215 rootnex_sglinfo_t *sglinfo) 2216 { 2217 ddi_dma_atyp_t buftype; 2218 uint64_t last_page; 2219 uint64_t offset; 2220 uint64_t addrhi; 2221 uint64_t addrlo; 2222 uint64_t maxseg; 2223 page_t **pplist; 2224 uint64_t paddr; 2225 uint32_t psize; 2226 uint32_t size; 2227 caddr_t vaddr; 2228 uint_t pcnt; 2229 page_t *pp; 2230 uint_t cnt; 2231 2232 2233 /* shortcuts */ 2234 pplist = dmar_object->dmao_obj.virt_obj.v_priv; 2235 vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 2236 maxseg = sglinfo->si_max_cookie_size; 2237 buftype = dmar_object->dmao_type; 2238 addrhi = sglinfo->si_max_addr; 2239 addrlo = sglinfo->si_min_addr; 2240 size = dmar_object->dmao_size; 2241 2242 pcnt = 0; 2243 cnt = 0; 2244 2245 /* 2246 * if we were passed down a linked list of pages, i.e. pointer to 2247 * page_t, use this to get our physical address and buf offset. 2248 */ 2249 if (buftype == DMA_OTYP_PAGES) { 2250 pp = dmar_object->dmao_obj.pp_obj.pp_pp; 2251 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2252 offset = dmar_object->dmao_obj.pp_obj.pp_offset & 2253 MMU_PAGEOFFSET; 2254 paddr = ptob64(pp->p_pagenum) + offset; 2255 psize = MIN(size, (MMU_PAGESIZE - offset)); 2256 pp = pp->p_next; 2257 sglinfo->si_asp = NULL; 2258 2259 /* 2260 * We weren't passed down a linked list of pages, but if we were passed 2261 * down an array of pages, use this to get our physical address and buf 2262 * offset. 2263 */ 2264 } else if (pplist != NULL) { 2265 ASSERT((buftype == DMA_OTYP_VADDR) || 2266 (buftype == DMA_OTYP_BUFVADDR)); 2267 2268 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2269 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2270 if (sglinfo->si_asp == NULL) { 2271 sglinfo->si_asp = &kas; 2272 } 2273 2274 ASSERT(!PP_ISFREE(pplist[pcnt])); 2275 paddr = ptob64(pplist[pcnt]->p_pagenum); 2276 paddr += offset; 2277 psize = MIN(size, (MMU_PAGESIZE - offset)); 2278 pcnt++; 2279 2280 /* 2281 * All we have is a virtual address, we'll need to call into the VM 2282 * to get the physical address. 2283 */ 2284 } else { 2285 ASSERT((buftype == DMA_OTYP_VADDR) || 2286 (buftype == DMA_OTYP_BUFVADDR)); 2287 2288 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2289 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2290 if (sglinfo->si_asp == NULL) { 2291 sglinfo->si_asp = &kas; 2292 } 2293 2294 paddr = ptob64(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 2295 paddr += offset; 2296 psize = MIN(size, (MMU_PAGESIZE - offset)); 2297 vaddr += psize; 2298 } 2299 2300 /* 2301 * Setup the first cookie with the physical address of the page and the 2302 * size of the page (which takes into account the initial offset into 2303 * the page. 2304 */ 2305 sgl[cnt].dmac_laddress = paddr; 2306 sgl[cnt].dmac_size = psize; 2307 sgl[cnt].dmac_type = 0; 2308 2309 /* 2310 * Save away the buffer offset into the page. We'll need this later in 2311 * the copy buffer code to help figure out the page index within the 2312 * buffer and the offset into the current page. 2313 */ 2314 sglinfo->si_buf_offset = offset; 2315 2316 /* 2317 * If the DMA engine can't reach the physical address, increase how 2318 * much copy buffer we need. We always increase by pagesize so we don't 2319 * have to worry about converting offsets. Set a flag in the cookies 2320 * dmac_type to indicate that it uses the copy buffer. If this isn't the 2321 * last cookie, go to the next cookie (since we separate each page which 2322 * uses the copy buffer in case the copy buffer is not physically 2323 * contiguous. 2324 */ 2325 if ((paddr < addrlo) || ((paddr + psize) > addrhi)) { 2326 sglinfo->si_copybuf_req += MMU_PAGESIZE; 2327 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 2328 if ((cnt + 1) < sglinfo->si_max_pages) { 2329 cnt++; 2330 sgl[cnt].dmac_laddress = 0; 2331 sgl[cnt].dmac_size = 0; 2332 sgl[cnt].dmac_type = 0; 2333 } 2334 } 2335 2336 /* 2337 * save this page's physical address so we can figure out if the next 2338 * page is physically contiguous. Keep decrementing size until we are 2339 * done with the buffer. 2340 */ 2341 last_page = paddr & MMU_PAGEMASK; 2342 size -= psize; 2343 2344 while (size > 0) { 2345 /* Get the size for this page (i.e. partial or full page) */ 2346 psize = MIN(size, MMU_PAGESIZE); 2347 2348 if (buftype == DMA_OTYP_PAGES) { 2349 /* get the paddr from the page_t */ 2350 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2351 paddr = ptob64(pp->p_pagenum); 2352 pp = pp->p_next; 2353 } else if (pplist != NULL) { 2354 /* index into the array of page_t's to get the paddr */ 2355 ASSERT(!PP_ISFREE(pplist[pcnt])); 2356 paddr = ptob64(pplist[pcnt]->p_pagenum); 2357 pcnt++; 2358 } else { 2359 /* call into the VM to get the paddr */ 2360 paddr = ptob64(hat_getpfnum(sglinfo->si_asp->a_hat, 2361 vaddr)); 2362 vaddr += psize; 2363 } 2364 2365 /* check to see if this page needs the copy buffer */ 2366 if ((paddr < addrlo) || ((paddr + psize) > addrhi)) { 2367 sglinfo->si_copybuf_req += MMU_PAGESIZE; 2368 2369 /* 2370 * if there is something in the current cookie, go to 2371 * the next one. We only want one page in a cookie which 2372 * uses the copybuf since the copybuf doesn't have to 2373 * be physically contiguous. 2374 */ 2375 if (sgl[cnt].dmac_size != 0) { 2376 cnt++; 2377 } 2378 sgl[cnt].dmac_laddress = paddr; 2379 sgl[cnt].dmac_size = psize; 2380 #if defined(__amd64) 2381 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 2382 #else 2383 /* 2384 * save the buf offset for 32-bit kernel. used in the 2385 * obsoleted interfaces. 2386 */ 2387 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF | 2388 (dmar_object->dmao_size - size); 2389 #endif 2390 /* if this isn't the last cookie, go to the next one */ 2391 if ((cnt + 1) < sglinfo->si_max_pages) { 2392 cnt++; 2393 sgl[cnt].dmac_laddress = 0; 2394 sgl[cnt].dmac_size = 0; 2395 sgl[cnt].dmac_type = 0; 2396 } 2397 2398 /* 2399 * this page didn't need the copy buffer, if it's not physically 2400 * contiguous, or it would put us over a segment boundary, or it 2401 * puts us over the max cookie size, or the current sgl doesn't 2402 * have anything in it. 2403 */ 2404 } else if (((last_page + MMU_PAGESIZE) != paddr) || 2405 !(paddr & sglinfo->si_segmask) || 2406 ((sgl[cnt].dmac_size + psize) > maxseg) || 2407 (sgl[cnt].dmac_size == 0)) { 2408 /* 2409 * if we're not already in a new cookie, go to the next 2410 * cookie. 2411 */ 2412 if (sgl[cnt].dmac_size != 0) { 2413 cnt++; 2414 } 2415 2416 /* save the cookie information */ 2417 sgl[cnt].dmac_laddress = paddr; 2418 sgl[cnt].dmac_size = psize; 2419 #if defined(__amd64) 2420 sgl[cnt].dmac_type = 0; 2421 #else 2422 /* 2423 * save the buf offset for 32-bit kernel. used in the 2424 * obsoleted interfaces. 2425 */ 2426 sgl[cnt].dmac_type = dmar_object->dmao_size - size; 2427 #endif 2428 2429 /* 2430 * this page didn't need the copy buffer, it is physically 2431 * contiguous with the last page, and it's <= the max cookie 2432 * size. 2433 */ 2434 } else { 2435 sgl[cnt].dmac_size += psize; 2436 2437 /* 2438 * if this exactly == the maximum cookie size, and 2439 * it isn't the last cookie, go to the next cookie. 2440 */ 2441 if (((sgl[cnt].dmac_size + psize) == maxseg) && 2442 ((cnt + 1) < sglinfo->si_max_pages)) { 2443 cnt++; 2444 sgl[cnt].dmac_laddress = 0; 2445 sgl[cnt].dmac_size = 0; 2446 sgl[cnt].dmac_type = 0; 2447 } 2448 } 2449 2450 /* 2451 * save this page's physical address so we can figure out if the 2452 * next page is physically contiguous. Keep decrementing size 2453 * until we are done with the buffer. 2454 */ 2455 last_page = paddr; 2456 size -= psize; 2457 } 2458 2459 /* we're done, save away how many cookies the sgl has */ 2460 if (sgl[cnt].dmac_size == 0) { 2461 ASSERT(cnt < sglinfo->si_max_pages); 2462 sglinfo->si_sgl_size = cnt; 2463 } else { 2464 sglinfo->si_sgl_size = cnt + 1; 2465 } 2466 } 2467 2468 2469 /* 2470 * rootnex_bind_slowpath() 2471 * Call in the bind path if the calling driver can't use the sgl without 2472 * modifying it. We either need to use the copy buffer and/or we will end up 2473 * with a partial bind. 2474 */ 2475 static int 2476 rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 2477 rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag) 2478 { 2479 rootnex_sglinfo_t *sinfo; 2480 rootnex_window_t *window; 2481 ddi_dma_cookie_t *cookie; 2482 size_t copybuf_used; 2483 size_t dmac_size; 2484 boolean_t partial; 2485 off_t cur_offset; 2486 page_t *cur_pp; 2487 major_t mnum; 2488 int e; 2489 int i; 2490 2491 2492 sinfo = &dma->dp_sglinfo; 2493 copybuf_used = 0; 2494 partial = B_FALSE; 2495 2496 /* 2497 * If we're using the copybuf, set the copybuf state in dma struct. 2498 * Needs to be first since it sets the copy buffer size. 2499 */ 2500 if (sinfo->si_copybuf_req != 0) { 2501 e = rootnex_setup_copybuf(hp, dmareq, dma, attr); 2502 if (e != DDI_SUCCESS) { 2503 return (e); 2504 } 2505 } else { 2506 dma->dp_copybuf_size = 0; 2507 } 2508 2509 /* 2510 * Figure out if we need to do a partial mapping. If so, figure out 2511 * if we need to trim the buffers when we munge the sgl. 2512 */ 2513 if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) || 2514 (dma->dp_dma.dmao_size > dma->dp_maxxfer) || 2515 (attr->dma_attr_sgllen < sinfo->si_sgl_size)) { 2516 dma->dp_partial_required = B_TRUE; 2517 if (attr->dma_attr_granular != 1) { 2518 dma->dp_trim_required = B_TRUE; 2519 } 2520 } else { 2521 dma->dp_partial_required = B_FALSE; 2522 dma->dp_trim_required = B_FALSE; 2523 } 2524 2525 /* If we need to do a partial bind, make sure the driver supports it */ 2526 if (dma->dp_partial_required && 2527 !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) { 2528 2529 mnum = ddi_driver_major(dma->dp_dip); 2530 /* 2531 * patchable which allows us to print one warning per major 2532 * number. 2533 */ 2534 if ((rootnex_bind_warn) && 2535 ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) { 2536 rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING; 2537 cmn_err(CE_WARN, "!%s: coding error detected, the " 2538 "driver is using ddi_dma_attr(9S) incorrectly. " 2539 "There is a small risk of data corruption in " 2540 "particular with large I/Os. The driver should be " 2541 "replaced with a corrected version for proper " 2542 "system operation. To disable this warning, add " 2543 "'set rootnex:rootnex_bind_warn=0' to " 2544 "/etc/system(4).", ddi_driver_name(dma->dp_dip)); 2545 } 2546 return (DDI_DMA_TOOBIG); 2547 } 2548 2549 /* 2550 * we might need multiple windows, setup state to handle them. In this 2551 * code path, we will have at least one window. 2552 */ 2553 e = rootnex_setup_windows(hp, dma, attr, kmflag); 2554 if (e != DDI_SUCCESS) { 2555 rootnex_teardown_copybuf(dma); 2556 return (e); 2557 } 2558 2559 window = &dma->dp_window[0]; 2560 cookie = &dma->dp_cookies[0]; 2561 cur_offset = 0; 2562 rootnex_init_win(hp, dma, window, cookie, cur_offset); 2563 if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) { 2564 cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 2565 } 2566 2567 /* loop though all the cookies we got back from get_sgl() */ 2568 for (i = 0; i < sinfo->si_sgl_size; i++) { 2569 /* 2570 * If we're using the copy buffer, check this cookie and setup 2571 * its associated copy buffer state. If this cookie uses the 2572 * copy buffer, make sure we sync this window during dma_sync. 2573 */ 2574 if (dma->dp_copybuf_size > 0) { 2575 rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie, 2576 cur_offset, ©buf_used, &cur_pp); 2577 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2578 window->wd_dosync = B_TRUE; 2579 } 2580 } 2581 2582 /* 2583 * save away the cookie size, since it could be modified in 2584 * the windowing code. 2585 */ 2586 dmac_size = cookie->dmac_size; 2587 2588 /* if we went over max copybuf size */ 2589 if (dma->dp_copybuf_size && 2590 (copybuf_used > dma->dp_copybuf_size)) { 2591 partial = B_TRUE; 2592 e = rootnex_copybuf_window_boundary(hp, dma, &window, 2593 cookie, cur_offset, ©buf_used); 2594 if (e != DDI_SUCCESS) { 2595 rootnex_teardown_copybuf(dma); 2596 rootnex_teardown_windows(dma); 2597 return (e); 2598 } 2599 2600 /* 2601 * if the coookie uses the copy buffer, make sure the 2602 * new window we just moved to is set to sync. 2603 */ 2604 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2605 window->wd_dosync = B_TRUE; 2606 } 2607 DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *, 2608 dma->dp_dip); 2609 2610 /* if the cookie cnt == max sgllen, move to the next window */ 2611 } else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) { 2612 partial = B_TRUE; 2613 ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen); 2614 e = rootnex_sgllen_window_boundary(hp, dma, &window, 2615 cookie, attr, cur_offset); 2616 if (e != DDI_SUCCESS) { 2617 rootnex_teardown_copybuf(dma); 2618 rootnex_teardown_windows(dma); 2619 return (e); 2620 } 2621 2622 /* 2623 * if the coookie uses the copy buffer, make sure the 2624 * new window we just moved to is set to sync. 2625 */ 2626 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2627 window->wd_dosync = B_TRUE; 2628 } 2629 DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *, 2630 dma->dp_dip); 2631 2632 /* else if we will be over maxxfer */ 2633 } else if ((window->wd_size + dmac_size) > 2634 dma->dp_maxxfer) { 2635 partial = B_TRUE; 2636 e = rootnex_maxxfer_window_boundary(hp, dma, &window, 2637 cookie); 2638 if (e != DDI_SUCCESS) { 2639 rootnex_teardown_copybuf(dma); 2640 rootnex_teardown_windows(dma); 2641 return (e); 2642 } 2643 2644 /* 2645 * if the coookie uses the copy buffer, make sure the 2646 * new window we just moved to is set to sync. 2647 */ 2648 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2649 window->wd_dosync = B_TRUE; 2650 } 2651 DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *, 2652 dma->dp_dip); 2653 2654 /* else this cookie fits in the current window */ 2655 } else { 2656 window->wd_cookie_cnt++; 2657 window->wd_size += dmac_size; 2658 } 2659 2660 /* track our offset into the buffer, go to the next cookie */ 2661 ASSERT(dmac_size <= dma->dp_dma.dmao_size); 2662 ASSERT(cookie->dmac_size <= dmac_size); 2663 cur_offset += dmac_size; 2664 cookie++; 2665 } 2666 2667 /* if we ended up with a zero sized window in the end, clean it up */ 2668 if (window->wd_size == 0) { 2669 hp->dmai_nwin--; 2670 window--; 2671 } 2672 2673 ASSERT(window->wd_trim.tr_trim_last == B_FALSE); 2674 2675 if (!partial) { 2676 return (DDI_DMA_MAPPED); 2677 } 2678 2679 ASSERT(dma->dp_partial_required); 2680 return (DDI_DMA_PARTIAL_MAP); 2681 } 2682 2683 2684 /* 2685 * rootnex_setup_copybuf() 2686 * Called in bind slowpath. Figures out if we're going to use the copy 2687 * buffer, and if we do, sets up the basic state to handle it. 2688 */ 2689 static int 2690 rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 2691 rootnex_dma_t *dma, ddi_dma_attr_t *attr) 2692 { 2693 rootnex_sglinfo_t *sinfo; 2694 ddi_dma_attr_t lattr; 2695 size_t max_copybuf; 2696 int cansleep; 2697 int e; 2698 #if !defined(__amd64) 2699 int vmflag; 2700 #endif 2701 2702 2703 sinfo = &dma->dp_sglinfo; 2704 2705 /* 2706 * read this first so it's consistent through the routine so we can 2707 * patch it on the fly. 2708 */ 2709 max_copybuf = rootnex_max_copybuf_size & MMU_PAGEMASK; 2710 2711 /* We need to call into the rootnex on ddi_dma_sync() */ 2712 hp->dmai_rflags &= ~DMP_NOSYNC; 2713 2714 /* make sure the copybuf size <= the max size */ 2715 dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf); 2716 ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0); 2717 2718 #if !defined(__amd64) 2719 /* 2720 * if we don't have kva space to copy to/from, allocate the KVA space 2721 * now. We only do this for the 32-bit kernel. We use seg kpm space for 2722 * the 64-bit kernel. 2723 */ 2724 if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) || 2725 (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) { 2726 2727 /* convert the sleep flags */ 2728 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 2729 vmflag = VM_SLEEP; 2730 } else { 2731 vmflag = VM_NOSLEEP; 2732 } 2733 2734 /* allocate Kernel VA space that we can bcopy to/from */ 2735 dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size, 2736 vmflag); 2737 if (dma->dp_kva == NULL) { 2738 return (DDI_DMA_NORESOURCES); 2739 } 2740 } 2741 #endif 2742 2743 /* convert the sleep flags */ 2744 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 2745 cansleep = 1; 2746 } else { 2747 cansleep = 0; 2748 } 2749 2750 /* 2751 * Allocated the actual copy buffer. This needs to fit within the DMA 2752 * engines limits, so we can't use kmem_alloc... 2753 */ 2754 lattr = *attr; 2755 lattr.dma_attr_align = MMU_PAGESIZE; 2756 e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep, 2757 0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL); 2758 if (e != DDI_SUCCESS) { 2759 #if !defined(__amd64) 2760 if (dma->dp_kva != NULL) { 2761 vmem_free(heap_arena, dma->dp_kva, 2762 dma->dp_copybuf_size); 2763 } 2764 #endif 2765 return (DDI_DMA_NORESOURCES); 2766 } 2767 2768 DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip, 2769 size_t, dma->dp_copybuf_size); 2770 2771 return (DDI_SUCCESS); 2772 } 2773 2774 2775 /* 2776 * rootnex_setup_windows() 2777 * Called in bind slowpath to setup the window state. We always have windows 2778 * in the slowpath. Even if the window count = 1. 2779 */ 2780 static int 2781 rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 2782 ddi_dma_attr_t *attr, int kmflag) 2783 { 2784 rootnex_window_t *windowp; 2785 rootnex_sglinfo_t *sinfo; 2786 size_t copy_state_size; 2787 size_t win_state_size; 2788 size_t state_available; 2789 size_t space_needed; 2790 uint_t copybuf_win; 2791 uint_t maxxfer_win; 2792 size_t space_used; 2793 uint_t sglwin; 2794 2795 2796 sinfo = &dma->dp_sglinfo; 2797 2798 dma->dp_current_win = 0; 2799 hp->dmai_nwin = 0; 2800 2801 /* If we don't need to do a partial, we only have one window */ 2802 if (!dma->dp_partial_required) { 2803 dma->dp_max_win = 1; 2804 2805 /* 2806 * we need multiple windows, need to figure out the worse case number 2807 * of windows. 2808 */ 2809 } else { 2810 /* 2811 * if we need windows because we need more copy buffer that 2812 * we allow, the worse case number of windows we could need 2813 * here would be (copybuf space required / copybuf space that 2814 * we have) plus one for remainder, and plus 2 to handle the 2815 * extra pages on the trim for the first and last pages of the 2816 * buffer (a page is the minimum window size so under the right 2817 * attr settings, you could have a window for each page). 2818 * The last page will only be hit here if the size is not a 2819 * multiple of the granularity (which theoretically shouldn't 2820 * be the case but never has been enforced, so we could have 2821 * broken things without it). 2822 */ 2823 if (sinfo->si_copybuf_req > dma->dp_copybuf_size) { 2824 ASSERT(dma->dp_copybuf_size > 0); 2825 copybuf_win = (sinfo->si_copybuf_req / 2826 dma->dp_copybuf_size) + 1 + 2; 2827 } else { 2828 copybuf_win = 0; 2829 } 2830 2831 /* 2832 * if we need windows because we have more cookies than the H/W 2833 * can handle, the number of windows we would need here would 2834 * be (cookie count / cookies count H/W supports) plus one for 2835 * remainder, and plus 2 to handle the extra pages on the trim 2836 * (see above comment about trim) 2837 */ 2838 if (attr->dma_attr_sgllen < sinfo->si_sgl_size) { 2839 sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen) 2840 + 1) + 2; 2841 } else { 2842 sglwin = 0; 2843 } 2844 2845 /* 2846 * if we need windows because we're binding more memory than the 2847 * H/W can transfer at once, the number of windows we would need 2848 * here would be (xfer count / max xfer H/W supports) plus one 2849 * for remainder, and plus 2 to handle the extra pages on the 2850 * trim (see above comment about trim) 2851 */ 2852 if (dma->dp_dma.dmao_size > dma->dp_maxxfer) { 2853 maxxfer_win = (dma->dp_dma.dmao_size / 2854 dma->dp_maxxfer) + 1 + 2; 2855 } else { 2856 maxxfer_win = 0; 2857 } 2858 dma->dp_max_win = copybuf_win + sglwin + maxxfer_win; 2859 ASSERT(dma->dp_max_win > 0); 2860 } 2861 win_state_size = dma->dp_max_win * sizeof (rootnex_window_t); 2862 2863 /* 2864 * Get space for window and potential copy buffer state. Before we 2865 * go and allocate memory, see if we can get away with using what's 2866 * left in the pre-allocted state or the dynamically allocated sgl. 2867 */ 2868 space_used = (uintptr_t)(sinfo->si_sgl_size * 2869 sizeof (ddi_dma_cookie_t)); 2870 2871 /* if we dynamically allocated space for the cookies */ 2872 if (dma->dp_need_to_free_cookie) { 2873 /* if we have more space in the pre-allocted buffer, use it */ 2874 ASSERT(space_used <= dma->dp_cookie_size); 2875 if ((dma->dp_cookie_size - space_used) <= 2876 rootnex_state->r_prealloc_size) { 2877 state_available = rootnex_state->r_prealloc_size; 2878 windowp = (rootnex_window_t *)dma->dp_prealloc_buffer; 2879 2880 /* 2881 * else, we have more free space in the dynamically allocated 2882 * buffer, i.e. the buffer wasn't worse case fragmented so we 2883 * didn't need a lot of cookies. 2884 */ 2885 } else { 2886 state_available = dma->dp_cookie_size - space_used; 2887 windowp = (rootnex_window_t *) 2888 &dma->dp_cookies[sinfo->si_sgl_size]; 2889 } 2890 2891 /* we used the pre-alloced buffer */ 2892 } else { 2893 ASSERT(space_used <= rootnex_state->r_prealloc_size); 2894 state_available = rootnex_state->r_prealloc_size - space_used; 2895 windowp = (rootnex_window_t *) 2896 &dma->dp_cookies[sinfo->si_sgl_size]; 2897 } 2898 2899 /* 2900 * figure out how much state we need to track the copy buffer. Add an 2901 * addition 8 bytes for pointer alignemnt later. 2902 */ 2903 if (dma->dp_copybuf_size > 0) { 2904 copy_state_size = sinfo->si_max_pages * 2905 sizeof (rootnex_pgmap_t); 2906 } else { 2907 copy_state_size = 0; 2908 } 2909 /* add an additional 8 bytes for pointer alignment */ 2910 space_needed = win_state_size + copy_state_size + 0x8; 2911 2912 /* if we have enough space already, use it */ 2913 if (state_available >= space_needed) { 2914 dma->dp_window = windowp; 2915 dma->dp_need_to_free_window = B_FALSE; 2916 2917 /* not enough space, need to allocate more. */ 2918 } else { 2919 dma->dp_window = kmem_alloc(space_needed, kmflag); 2920 if (dma->dp_window == NULL) { 2921 return (DDI_DMA_NORESOURCES); 2922 } 2923 dma->dp_need_to_free_window = B_TRUE; 2924 dma->dp_window_size = space_needed; 2925 DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *, 2926 dma->dp_dip, size_t, space_needed); 2927 } 2928 2929 /* 2930 * we allocate copy buffer state and window state at the same time. 2931 * setup our copy buffer state pointers. Make sure it's aligned. 2932 */ 2933 if (dma->dp_copybuf_size > 0) { 2934 dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t) 2935 &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7); 2936 2937 #if !defined(__amd64) 2938 /* 2939 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to 2940 * false/NULL. Should be quicker to bzero vs loop and set. 2941 */ 2942 bzero(dma->dp_pgmap, copy_state_size); 2943 #endif 2944 } else { 2945 dma->dp_pgmap = NULL; 2946 } 2947 2948 return (DDI_SUCCESS); 2949 } 2950 2951 2952 /* 2953 * rootnex_teardown_copybuf() 2954 * cleans up after rootnex_setup_copybuf() 2955 */ 2956 static void 2957 rootnex_teardown_copybuf(rootnex_dma_t *dma) 2958 { 2959 #if !defined(__amd64) 2960 int i; 2961 2962 /* 2963 * if we allocated kernel heap VMEM space, go through all the pages and 2964 * map out any of the ones that we're mapped into the kernel heap VMEM 2965 * arena. Then free the VMEM space. 2966 */ 2967 if (dma->dp_kva != NULL) { 2968 for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) { 2969 if (dma->dp_pgmap[i].pm_mapped) { 2970 hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr, 2971 MMU_PAGESIZE, HAT_UNLOAD); 2972 dma->dp_pgmap[i].pm_mapped = B_FALSE; 2973 } 2974 } 2975 2976 vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size); 2977 } 2978 2979 #endif 2980 2981 /* if we allocated a copy buffer, free it */ 2982 if (dma->dp_cbaddr != NULL) { 2983 i_ddi_mem_free(dma->dp_cbaddr, 0); 2984 } 2985 } 2986 2987 2988 /* 2989 * rootnex_teardown_windows() 2990 * cleans up after rootnex_setup_windows() 2991 */ 2992 static void 2993 rootnex_teardown_windows(rootnex_dma_t *dma) 2994 { 2995 /* 2996 * if we had to allocate window state on the last bind (because we 2997 * didn't have enough pre-allocated space in the handle), free it. 2998 */ 2999 if (dma->dp_need_to_free_window) { 3000 kmem_free(dma->dp_window, dma->dp_window_size); 3001 } 3002 } 3003 3004 3005 /* 3006 * rootnex_init_win() 3007 * Called in bind slow path during creation of a new window. Initializes 3008 * window state to default values. 3009 */ 3010 /*ARGSUSED*/ 3011 static void 3012 rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3013 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset) 3014 { 3015 hp->dmai_nwin++; 3016 window->wd_dosync = B_FALSE; 3017 window->wd_offset = cur_offset; 3018 window->wd_size = 0; 3019 window->wd_first_cookie = cookie; 3020 window->wd_cookie_cnt = 0; 3021 window->wd_trim.tr_trim_first = B_FALSE; 3022 window->wd_trim.tr_trim_last = B_FALSE; 3023 window->wd_trim.tr_first_copybuf_win = B_FALSE; 3024 window->wd_trim.tr_last_copybuf_win = B_FALSE; 3025 #if !defined(__amd64) 3026 window->wd_remap_copybuf = dma->dp_cb_remaping; 3027 #endif 3028 } 3029 3030 3031 /* 3032 * rootnex_setup_cookie() 3033 * Called in the bind slow path when the sgl uses the copy buffer. If any of 3034 * the sgl uses the copy buffer, we need to go through each cookie, figure 3035 * out if it uses the copy buffer, and if it does, save away everything we'll 3036 * need during sync. 3037 */ 3038 static void 3039 rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma, 3040 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used, 3041 page_t **cur_pp) 3042 { 3043 boolean_t copybuf_sz_power_2; 3044 rootnex_sglinfo_t *sinfo; 3045 uint_t pidx; 3046 uint_t pcnt; 3047 off_t poff; 3048 #if defined(__amd64) 3049 pfn_t pfn; 3050 #else 3051 page_t **pplist; 3052 #endif 3053 3054 sinfo = &dma->dp_sglinfo; 3055 3056 /* 3057 * Calculate the page index relative to the start of the buffer. The 3058 * index to the current page for our buffer is the offset into the 3059 * first page of the buffer plus our current offset into the buffer 3060 * itself, shifted of course... 3061 */ 3062 pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT; 3063 ASSERT(pidx < sinfo->si_max_pages); 3064 3065 /* if this cookie uses the copy buffer */ 3066 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3067 /* 3068 * NOTE: we know that since this cookie uses the copy buffer, it 3069 * is <= MMU_PAGESIZE. 3070 */ 3071 3072 /* 3073 * get the offset into the page. For the 64-bit kernel, get the 3074 * pfn which we'll use with seg kpm. 3075 */ 3076 poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET; 3077 #if defined(__amd64) 3078 pfn = cookie->_dmu._dmac_ll >> MMU_PAGESHIFT; 3079 #endif 3080 3081 /* figure out if the copybuf size is a power of 2 */ 3082 if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) { 3083 copybuf_sz_power_2 = B_FALSE; 3084 } else { 3085 copybuf_sz_power_2 = B_TRUE; 3086 } 3087 3088 /* This page uses the copy buffer */ 3089 dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE; 3090 3091 /* 3092 * save the copy buffer KVA that we'll use with this page. 3093 * if we still fit within the copybuf, it's a simple add. 3094 * otherwise, we need to wrap over using & or % accordingly. 3095 */ 3096 if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) { 3097 dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr + 3098 *copybuf_used; 3099 } else { 3100 if (copybuf_sz_power_2) { 3101 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 3102 (uintptr_t)dma->dp_cbaddr + 3103 (*copybuf_used & 3104 (dma->dp_copybuf_size - 1))); 3105 } else { 3106 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 3107 (uintptr_t)dma->dp_cbaddr + 3108 (*copybuf_used % dma->dp_copybuf_size)); 3109 } 3110 } 3111 3112 /* 3113 * over write the cookie physical address with the address of 3114 * the physical address of the copy buffer page that we will 3115 * use. 3116 */ 3117 cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat, 3118 dma->dp_pgmap[pidx].pm_cbaddr)) + poff; 3119 3120 /* if we have a kernel VA, it's easy, just save that address */ 3121 if ((dmar_object->dmao_type != DMA_OTYP_PAGES) && 3122 (sinfo->si_asp == &kas)) { 3123 /* 3124 * save away the page aligned virtual address of the 3125 * driver buffer. Offsets are handled in the sync code. 3126 */ 3127 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t) 3128 dmar_object->dmao_obj.virt_obj.v_addr + cur_offset) 3129 & MMU_PAGEMASK); 3130 #if !defined(__amd64) 3131 /* 3132 * we didn't need to, and will never need to map this 3133 * page. 3134 */ 3135 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3136 #endif 3137 3138 /* we don't have a kernel VA. We need one for the bcopy. */ 3139 } else { 3140 #if defined(__amd64) 3141 /* 3142 * for the 64-bit kernel, it's easy. We use seg kpm to 3143 * get a Kernel VA for the corresponding pfn. 3144 */ 3145 dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn); 3146 #else 3147 /* 3148 * for the 32-bit kernel, this is a pain. First we'll 3149 * save away the page_t or user VA for this page. This 3150 * is needed in rootnex_dma_win() when we switch to a 3151 * new window which requires us to re-map the copy 3152 * buffer. 3153 */ 3154 pplist = dmar_object->dmao_obj.virt_obj.v_priv; 3155 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3156 dma->dp_pgmap[pidx].pm_pp = *cur_pp; 3157 dma->dp_pgmap[pidx].pm_vaddr = NULL; 3158 } else if (pplist != NULL) { 3159 dma->dp_pgmap[pidx].pm_pp = pplist[pidx]; 3160 dma->dp_pgmap[pidx].pm_vaddr = NULL; 3161 } else { 3162 dma->dp_pgmap[pidx].pm_pp = NULL; 3163 dma->dp_pgmap[pidx].pm_vaddr = (caddr_t) 3164 (((uintptr_t) 3165 dmar_object->dmao_obj.virt_obj.v_addr + 3166 cur_offset) & MMU_PAGEMASK); 3167 } 3168 3169 /* 3170 * save away the page aligned virtual address which was 3171 * allocated from the kernel heap arena (taking into 3172 * account if we need more copy buffer than we alloced 3173 * and use multiple windows to handle this, i.e. &,%). 3174 * NOTE: there isn't and physical memory backing up this 3175 * virtual address space currently. 3176 */ 3177 if ((*copybuf_used + MMU_PAGESIZE) <= 3178 dma->dp_copybuf_size) { 3179 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3180 (((uintptr_t)dma->dp_kva + *copybuf_used) & 3181 MMU_PAGEMASK); 3182 } else { 3183 if (copybuf_sz_power_2) { 3184 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3185 (((uintptr_t)dma->dp_kva + 3186 (*copybuf_used & 3187 (dma->dp_copybuf_size - 1))) & 3188 MMU_PAGEMASK); 3189 } else { 3190 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3191 (((uintptr_t)dma->dp_kva + 3192 (*copybuf_used % 3193 dma->dp_copybuf_size)) & 3194 MMU_PAGEMASK); 3195 } 3196 } 3197 3198 /* 3199 * if we haven't used up the available copy buffer yet, 3200 * map the kva to the physical page. 3201 */ 3202 if (!dma->dp_cb_remaping && ((*copybuf_used + 3203 MMU_PAGESIZE) <= dma->dp_copybuf_size)) { 3204 dma->dp_pgmap[pidx].pm_mapped = B_TRUE; 3205 if (dma->dp_pgmap[pidx].pm_pp != NULL) { 3206 i86_pp_map(dma->dp_pgmap[pidx].pm_pp, 3207 dma->dp_pgmap[pidx].pm_kaddr); 3208 } else { 3209 i86_va_map(dma->dp_pgmap[pidx].pm_vaddr, 3210 sinfo->si_asp, 3211 dma->dp_pgmap[pidx].pm_kaddr); 3212 } 3213 3214 /* 3215 * we've used up the available copy buffer, this page 3216 * will have to be mapped during rootnex_dma_win() when 3217 * we switch to a new window which requires a re-map 3218 * the copy buffer. (32-bit kernel only) 3219 */ 3220 } else { 3221 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3222 } 3223 #endif 3224 /* go to the next page_t */ 3225 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3226 *cur_pp = (*cur_pp)->p_next; 3227 } 3228 } 3229 3230 /* add to the copy buffer count */ 3231 *copybuf_used += MMU_PAGESIZE; 3232 3233 /* 3234 * This cookie doesn't use the copy buffer. Walk through the pages this 3235 * cookie occupies to reflect this. 3236 */ 3237 } else { 3238 /* 3239 * figure out how many pages the cookie occupies. We need to 3240 * use the original page offset of the buffer and the cookies 3241 * offset in the buffer to do this. 3242 */ 3243 poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET; 3244 pcnt = mmu_btopr(cookie->dmac_size + poff); 3245 3246 while (pcnt > 0) { 3247 #if !defined(__amd64) 3248 /* 3249 * the 32-bit kernel doesn't have seg kpm, so we need 3250 * to map in the driver buffer (if it didn't come down 3251 * with a kernel VA) on the fly. Since this page doesn't 3252 * use the copy buffer, it's not, or will it ever, have 3253 * to be mapped in. 3254 */ 3255 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3256 #endif 3257 dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE; 3258 3259 /* 3260 * we need to update pidx and cur_pp or we'll loose 3261 * track of where we are. 3262 */ 3263 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3264 *cur_pp = (*cur_pp)->p_next; 3265 } 3266 pidx++; 3267 pcnt--; 3268 } 3269 } 3270 } 3271 3272 3273 /* 3274 * rootnex_sgllen_window_boundary() 3275 * Called in the bind slow path when the next cookie causes us to exceed (in 3276 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl 3277 * length supported by the DMA H/W. 3278 */ 3279 static int 3280 rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3281 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr, 3282 off_t cur_offset) 3283 { 3284 off_t new_offset; 3285 size_t trim_sz; 3286 off_t coffset; 3287 3288 3289 /* 3290 * if we know we'll never have to trim, it's pretty easy. Just move to 3291 * the next window and init it. We're done. 3292 */ 3293 if (!dma->dp_trim_required) { 3294 (*windowp)++; 3295 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3296 (*windowp)->wd_cookie_cnt++; 3297 (*windowp)->wd_size = cookie->dmac_size; 3298 return (DDI_SUCCESS); 3299 } 3300 3301 /* figure out how much we need to trim from the window */ 3302 ASSERT(attr->dma_attr_granular != 0); 3303 if (dma->dp_granularity_power_2) { 3304 trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1); 3305 } else { 3306 trim_sz = (*windowp)->wd_size % attr->dma_attr_granular; 3307 } 3308 3309 /* The window's a whole multiple of granularity. We're done */ 3310 if (trim_sz == 0) { 3311 (*windowp)++; 3312 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3313 (*windowp)->wd_cookie_cnt++; 3314 (*windowp)->wd_size = cookie->dmac_size; 3315 return (DDI_SUCCESS); 3316 } 3317 3318 /* 3319 * The window's not a whole multiple of granularity, since we know this 3320 * is due to the sgllen, we need to go back to the last cookie and trim 3321 * that one, add the left over part of the old cookie into the new 3322 * window, and then add in the new cookie into the new window. 3323 */ 3324 3325 /* 3326 * make sure the driver isn't making us do something bad... Trimming and 3327 * sgllen == 1 don't go together. 3328 */ 3329 if (attr->dma_attr_sgllen == 1) { 3330 return (DDI_DMA_NOMAPPING); 3331 } 3332 3333 /* 3334 * first, setup the current window to account for the trim. Need to go 3335 * back to the last cookie for this. 3336 */ 3337 cookie--; 3338 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3339 (*windowp)->wd_trim.tr_last_cookie = cookie; 3340 (*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll; 3341 ASSERT(cookie->dmac_size > trim_sz); 3342 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3343 (*windowp)->wd_size -= trim_sz; 3344 3345 /* save the buffer offsets for the next window */ 3346 coffset = cookie->dmac_size - trim_sz; 3347 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3348 3349 /* 3350 * set this now in case this is the first window. all other cases are 3351 * set in dma_win() 3352 */ 3353 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 3354 3355 /* 3356 * initialize the next window using what's left over in the previous 3357 * cookie. 3358 */ 3359 (*windowp)++; 3360 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3361 (*windowp)->wd_cookie_cnt++; 3362 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3363 (*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset; 3364 (*windowp)->wd_trim.tr_first_size = trim_sz; 3365 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3366 (*windowp)->wd_dosync = B_TRUE; 3367 } 3368 3369 /* 3370 * now go back to the current cookie and add it to the new window. set 3371 * the new window size to the what was left over from the previous 3372 * cookie and what's in the current cookie. 3373 */ 3374 cookie++; 3375 (*windowp)->wd_cookie_cnt++; 3376 (*windowp)->wd_size = trim_sz + cookie->dmac_size; 3377 3378 /* 3379 * trim plus the next cookie could put us over maxxfer (a cookie can be 3380 * a max size of maxxfer). Handle that case. 3381 */ 3382 if ((*windowp)->wd_size > dma->dp_maxxfer) { 3383 /* 3384 * maxxfer is already a whole multiple of granularity, and this 3385 * trim will be <= the previous trim (since a cookie can't be 3386 * larger than maxxfer). Make things simple here. 3387 */ 3388 trim_sz = (*windowp)->wd_size - dma->dp_maxxfer; 3389 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3390 (*windowp)->wd_trim.tr_last_cookie = cookie; 3391 (*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll; 3392 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3393 (*windowp)->wd_size -= trim_sz; 3394 ASSERT((*windowp)->wd_size == dma->dp_maxxfer); 3395 3396 /* save the buffer offsets for the next window */ 3397 coffset = cookie->dmac_size - trim_sz; 3398 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3399 3400 /* setup the next window */ 3401 (*windowp)++; 3402 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3403 (*windowp)->wd_cookie_cnt++; 3404 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3405 (*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + 3406 coffset; 3407 (*windowp)->wd_trim.tr_first_size = trim_sz; 3408 } 3409 3410 return (DDI_SUCCESS); 3411 } 3412 3413 3414 /* 3415 * rootnex_copybuf_window_boundary() 3416 * Called in bind slowpath when we get to a window boundary because we used 3417 * up all the copy buffer that we have. 3418 */ 3419 static int 3420 rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3421 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset, 3422 size_t *copybuf_used) 3423 { 3424 rootnex_sglinfo_t *sinfo; 3425 off_t new_offset; 3426 size_t trim_sz; 3427 off_t coffset; 3428 uint_t pidx; 3429 off_t poff; 3430 3431 3432 sinfo = &dma->dp_sglinfo; 3433 3434 /* 3435 * the copy buffer should be a whole multiple of page size. We know that 3436 * this cookie is <= MMU_PAGESIZE. 3437 */ 3438 ASSERT(cookie->dmac_size <= MMU_PAGESIZE); 3439 3440 /* 3441 * from now on, all new windows in this bind need to be re-mapped during 3442 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf 3443 * space... 3444 */ 3445 #if !defined(__amd64) 3446 dma->dp_cb_remaping = B_TRUE; 3447 #endif 3448 3449 /* reset copybuf used */ 3450 *copybuf_used = 0; 3451 3452 /* 3453 * if we don't have to trim (since granularity is set to 1), go to the 3454 * next window and add the current cookie to it. We know the current 3455 * cookie uses the copy buffer since we're in this code path. 3456 */ 3457 if (!dma->dp_trim_required) { 3458 (*windowp)++; 3459 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3460 3461 /* Add this cookie to the new window */ 3462 (*windowp)->wd_cookie_cnt++; 3463 (*windowp)->wd_size += cookie->dmac_size; 3464 *copybuf_used += MMU_PAGESIZE; 3465 return (DDI_SUCCESS); 3466 } 3467 3468 /* 3469 * *** may need to trim, figure it out. 3470 */ 3471 3472 /* figure out how much we need to trim from the window */ 3473 if (dma->dp_granularity_power_2) { 3474 trim_sz = (*windowp)->wd_size & 3475 (hp->dmai_attr.dma_attr_granular - 1); 3476 } else { 3477 trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular; 3478 } 3479 3480 /* 3481 * if the window's a whole multiple of granularity, go to the next 3482 * window, init it, then add in the current cookie. We know the current 3483 * cookie uses the copy buffer since we're in this code path. 3484 */ 3485 if (trim_sz == 0) { 3486 (*windowp)++; 3487 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3488 3489 /* Add this cookie to the new window */ 3490 (*windowp)->wd_cookie_cnt++; 3491 (*windowp)->wd_size += cookie->dmac_size; 3492 *copybuf_used += MMU_PAGESIZE; 3493 return (DDI_SUCCESS); 3494 } 3495 3496 /* 3497 * *** We figured it out, we definitly need to trim 3498 */ 3499 3500 /* 3501 * make sure the driver isn't making us do something bad... 3502 * Trimming and sgllen == 1 don't go together. 3503 */ 3504 if (hp->dmai_attr.dma_attr_sgllen == 1) { 3505 return (DDI_DMA_NOMAPPING); 3506 } 3507 3508 /* 3509 * first, setup the current window to account for the trim. Need to go 3510 * back to the last cookie for this. Some of the last cookie will be in 3511 * the current window, and some of the last cookie will be in the new 3512 * window. All of the current cookie will be in the new window. 3513 */ 3514 cookie--; 3515 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3516 (*windowp)->wd_trim.tr_last_cookie = cookie; 3517 (*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll; 3518 ASSERT(cookie->dmac_size > trim_sz); 3519 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3520 (*windowp)->wd_size -= trim_sz; 3521 3522 /* 3523 * we're trimming the last cookie (not the current cookie). So that 3524 * last cookie may have or may not have been using the copy buffer ( 3525 * we know the cookie passed in uses the copy buffer since we're in 3526 * this code path). 3527 * 3528 * If the last cookie doesn't use the copy buffer, nothing special to 3529 * do. However, if it does uses the copy buffer, it will be both the 3530 * last page in the current window and the first page in the next 3531 * window. Since we are reusing the copy buffer (and KVA space on the 3532 * 32-bit kernel), this page will use the end of the copy buffer in the 3533 * current window, and the start of the copy buffer in the next window. 3534 * Track that info... The cookie physical address was already set to 3535 * the copy buffer physical address in setup_cookie.. 3536 */ 3537 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3538 pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset + 3539 (*windowp)->wd_size) >> MMU_PAGESHIFT; 3540 (*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE; 3541 (*windowp)->wd_trim.tr_last_pidx = pidx; 3542 (*windowp)->wd_trim.tr_last_cbaddr = 3543 dma->dp_pgmap[pidx].pm_cbaddr; 3544 #if !defined(__amd64) 3545 (*windowp)->wd_trim.tr_last_kaddr = 3546 dma->dp_pgmap[pidx].pm_kaddr; 3547 #endif 3548 } 3549 3550 /* save the buffer offsets for the next window */ 3551 coffset = cookie->dmac_size - trim_sz; 3552 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3553 3554 /* 3555 * set this now in case this is the first window. all other cases are 3556 * set in dma_win() 3557 */ 3558 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 3559 3560 /* 3561 * initialize the next window using what's left over in the previous 3562 * cookie. 3563 */ 3564 (*windowp)++; 3565 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3566 (*windowp)->wd_cookie_cnt++; 3567 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3568 (*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + coffset; 3569 (*windowp)->wd_trim.tr_first_size = trim_sz; 3570 3571 /* 3572 * again, we're tracking if the last cookie uses the copy buffer. 3573 * read the comment above for more info on why we need to track 3574 * additional state. 3575 * 3576 * For the first cookie in the new window, we need reset the physical 3577 * address to DMA into to the start of the copy buffer plus any 3578 * initial page offset which may be present. 3579 */ 3580 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3581 (*windowp)->wd_dosync = B_TRUE; 3582 (*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE; 3583 (*windowp)->wd_trim.tr_first_pidx = pidx; 3584 (*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr; 3585 poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET; 3586 (*windowp)->wd_trim.tr_first_paddr = ptob64(hat_getpfnum( 3587 kas.a_hat, dma->dp_cbaddr)) + poff; 3588 #if !defined(__amd64) 3589 (*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva; 3590 #endif 3591 /* account for the cookie copybuf usage in the new window */ 3592 *copybuf_used += MMU_PAGESIZE; 3593 3594 /* 3595 * every piece of code has to have a hack, and here is this 3596 * ones :-) 3597 * 3598 * There is a complex interaction between setup_cookie and the 3599 * copybuf window boundary. The complexity had to be in either 3600 * the maxxfer window, or the copybuf window, and I chose the 3601 * copybuf code. 3602 * 3603 * So in this code path, we have taken the last cookie, 3604 * virtually broken it in half due to the trim, and it happens 3605 * to use the copybuf which further complicates life. At the 3606 * same time, we have already setup the current cookie, which 3607 * is now wrong. More background info: the current cookie uses 3608 * the copybuf, so it is only a page long max. So we need to 3609 * fix the current cookies copy buffer address, physical 3610 * address, and kva for the 32-bit kernel. We due this by 3611 * bumping them by page size (of course, we can't due this on 3612 * the physical address since the copy buffer may not be 3613 * physically contiguous). 3614 */ 3615 cookie++; 3616 dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE; 3617 poff = cookie->_dmu._dmac_ll & MMU_PAGEOFFSET; 3618 cookie->_dmu._dmac_ll = ptob64(hat_getpfnum(kas.a_hat, 3619 dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff; 3620 #if !defined(__amd64) 3621 ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE); 3622 dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE; 3623 #endif 3624 } else { 3625 /* go back to the current cookie */ 3626 cookie++; 3627 } 3628 3629 /* 3630 * add the current cookie to the new window. set the new window size to 3631 * the what was left over from the previous cookie and what's in the 3632 * current cookie. 3633 */ 3634 (*windowp)->wd_cookie_cnt++; 3635 (*windowp)->wd_size = trim_sz + cookie->dmac_size; 3636 ASSERT((*windowp)->wd_size < dma->dp_maxxfer); 3637 3638 /* 3639 * we know that the cookie passed in always uses the copy buffer. We 3640 * wouldn't be here if it didn't. 3641 */ 3642 *copybuf_used += MMU_PAGESIZE; 3643 3644 return (DDI_SUCCESS); 3645 } 3646 3647 3648 /* 3649 * rootnex_maxxfer_window_boundary() 3650 * Called in bind slowpath when we get to a window boundary because we will 3651 * go over maxxfer. 3652 */ 3653 static int 3654 rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3655 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie) 3656 { 3657 size_t dmac_size; 3658 off_t new_offset; 3659 size_t trim_sz; 3660 off_t coffset; 3661 3662 3663 /* 3664 * calculate how much we have to trim off of the current cookie to equal 3665 * maxxfer. We don't have to account for granularity here since our 3666 * maxxfer already takes that into account. 3667 */ 3668 trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer; 3669 ASSERT(trim_sz <= cookie->dmac_size); 3670 ASSERT(trim_sz <= dma->dp_maxxfer); 3671 3672 /* save cookie size since we need it later and we might change it */ 3673 dmac_size = cookie->dmac_size; 3674 3675 /* 3676 * if we're not trimming the entire cookie, setup the current window to 3677 * account for the trim. 3678 */ 3679 if (trim_sz < cookie->dmac_size) { 3680 (*windowp)->wd_cookie_cnt++; 3681 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3682 (*windowp)->wd_trim.tr_last_cookie = cookie; 3683 (*windowp)->wd_trim.tr_last_paddr = cookie->_dmu._dmac_ll; 3684 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3685 (*windowp)->wd_size = dma->dp_maxxfer; 3686 3687 /* 3688 * set the adjusted cookie size now in case this is the first 3689 * window. All other windows are taken care of in get win 3690 */ 3691 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 3692 } 3693 3694 /* 3695 * coffset is the current offset within the cookie, new_offset is the 3696 * current offset with the entire buffer. 3697 */ 3698 coffset = dmac_size - trim_sz; 3699 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3700 3701 /* initialize the next window */ 3702 (*windowp)++; 3703 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3704 (*windowp)->wd_cookie_cnt++; 3705 (*windowp)->wd_size = trim_sz; 3706 if (trim_sz < dmac_size) { 3707 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3708 (*windowp)->wd_trim.tr_first_paddr = cookie->_dmu._dmac_ll + 3709 coffset; 3710 (*windowp)->wd_trim.tr_first_size = trim_sz; 3711 } 3712 3713 return (DDI_SUCCESS); 3714 } 3715 3716 3717 /* 3718 * rootnex_dma_sync() 3719 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags. 3720 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC 3721 * is set, ddi_dma_sync() returns immediately passing back success. 3722 */ 3723 /*ARGSUSED*/ 3724 static int 3725 rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 3726 off_t off, size_t len, uint_t cache_flags) 3727 { 3728 rootnex_sglinfo_t *sinfo; 3729 rootnex_pgmap_t *cbpage; 3730 rootnex_window_t *win; 3731 ddi_dma_impl_t *hp; 3732 rootnex_dma_t *dma; 3733 caddr_t fromaddr; 3734 caddr_t toaddr; 3735 uint_t psize; 3736 off_t offset; 3737 uint_t pidx; 3738 size_t size; 3739 off_t poff; 3740 int e; 3741 3742 3743 hp = (ddi_dma_impl_t *)handle; 3744 dma = (rootnex_dma_t *)hp->dmai_private; 3745 sinfo = &dma->dp_sglinfo; 3746 3747 /* 3748 * if we don't have any windows, we don't need to sync. A copybuf 3749 * will cause us to have at least one window. 3750 */ 3751 if (dma->dp_window == NULL) { 3752 return (DDI_SUCCESS); 3753 } 3754 3755 /* This window may not need to be sync'd */ 3756 win = &dma->dp_window[dma->dp_current_win]; 3757 if (!win->wd_dosync) { 3758 return (DDI_SUCCESS); 3759 } 3760 3761 /* handle off and len special cases */ 3762 if ((off == 0) || (rootnex_sync_ignore_params)) { 3763 offset = win->wd_offset; 3764 } else { 3765 offset = off; 3766 } 3767 if ((len == 0) || (rootnex_sync_ignore_params)) { 3768 size = win->wd_size; 3769 } else { 3770 size = len; 3771 } 3772 3773 /* check the sync args to make sure they make a little sense */ 3774 if (rootnex_sync_check_parms) { 3775 e = rootnex_valid_sync_parms(hp, win, offset, size, 3776 cache_flags); 3777 if (e != DDI_SUCCESS) { 3778 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]); 3779 return (DDI_FAILURE); 3780 } 3781 } 3782 3783 /* 3784 * special case the first page to handle the offset into the page. The 3785 * offset to the current page for our buffer is the offset into the 3786 * first page of the buffer plus our current offset into the buffer 3787 * itself, masked of course. 3788 */ 3789 poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET; 3790 psize = MIN((MMU_PAGESIZE - poff), size); 3791 3792 /* go through all the pages that we want to sync */ 3793 while (size > 0) { 3794 /* 3795 * Calculate the page index relative to the start of the buffer. 3796 * The index to the current page for our buffer is the offset 3797 * into the first page of the buffer plus our current offset 3798 * into the buffer itself, shifted of course... 3799 */ 3800 pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT; 3801 ASSERT(pidx < sinfo->si_max_pages); 3802 3803 /* 3804 * if this page uses the copy buffer, we need to sync it, 3805 * otherwise, go on to the next page. 3806 */ 3807 cbpage = &dma->dp_pgmap[pidx]; 3808 ASSERT((cbpage->pm_uses_copybuf == B_TRUE) || 3809 (cbpage->pm_uses_copybuf == B_FALSE)); 3810 if (cbpage->pm_uses_copybuf) { 3811 /* cbaddr and kaddr should be page aligned */ 3812 ASSERT(((uintptr_t)cbpage->pm_cbaddr & 3813 MMU_PAGEOFFSET) == 0); 3814 ASSERT(((uintptr_t)cbpage->pm_kaddr & 3815 MMU_PAGEOFFSET) == 0); 3816 3817 /* 3818 * if we're copying for the device, we are going to 3819 * copy from the drivers buffer and to the rootnex 3820 * allocated copy buffer. 3821 */ 3822 if (cache_flags == DDI_DMA_SYNC_FORDEV) { 3823 fromaddr = cbpage->pm_kaddr + poff; 3824 toaddr = cbpage->pm_cbaddr + poff; 3825 DTRACE_PROBE2(rootnex__sync__dev, 3826 dev_info_t *, dma->dp_dip, size_t, psize); 3827 3828 /* 3829 * if we're copying for the cpu/kernel, we are going to 3830 * copy from the rootnex allocated copy buffer to the 3831 * drivers buffer. 3832 */ 3833 } else { 3834 fromaddr = cbpage->pm_cbaddr + poff; 3835 toaddr = cbpage->pm_kaddr + poff; 3836 DTRACE_PROBE2(rootnex__sync__cpu, 3837 dev_info_t *, dma->dp_dip, size_t, psize); 3838 } 3839 3840 bcopy(fromaddr, toaddr, psize); 3841 } 3842 3843 /* 3844 * decrement size until we're done, update our offset into the 3845 * buffer, and get the next page size. 3846 */ 3847 size -= psize; 3848 offset += psize; 3849 psize = MIN(MMU_PAGESIZE, size); 3850 3851 /* page offset is zero for the rest of this loop */ 3852 poff = 0; 3853 } 3854 3855 return (DDI_SUCCESS); 3856 } 3857 3858 3859 /* 3860 * rootnex_valid_sync_parms() 3861 * checks the parameters passed to sync to verify they are correct. 3862 */ 3863 static int 3864 rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 3865 off_t offset, size_t size, uint_t cache_flags) 3866 { 3867 off_t woffset; 3868 3869 3870 /* 3871 * the first part of the test to make sure the offset passed in is 3872 * within the window. 3873 */ 3874 if (offset < win->wd_offset) { 3875 return (DDI_FAILURE); 3876 } 3877 3878 /* 3879 * second and last part of the test to make sure the offset and length 3880 * passed in is within the window. 3881 */ 3882 woffset = offset - win->wd_offset; 3883 if ((woffset + size) > win->wd_size) { 3884 return (DDI_FAILURE); 3885 } 3886 3887 /* 3888 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should 3889 * be set too. 3890 */ 3891 if ((cache_flags == DDI_DMA_SYNC_FORDEV) && 3892 (hp->dmai_rflags & DDI_DMA_WRITE)) { 3893 return (DDI_SUCCESS); 3894 } 3895 3896 /* 3897 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL 3898 * should be set. Also DDI_DMA_READ should be set in the flags. 3899 */ 3900 if (((cache_flags == DDI_DMA_SYNC_FORCPU) || 3901 (cache_flags == DDI_DMA_SYNC_FORKERNEL)) && 3902 (hp->dmai_rflags & DDI_DMA_READ)) { 3903 return (DDI_SUCCESS); 3904 } 3905 3906 return (DDI_FAILURE); 3907 } 3908 3909 3910 /* 3911 * rootnex_dma_win() 3912 * called from ddi_dma_getwin() 3913 */ 3914 /*ARGSUSED*/ 3915 static int 3916 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 3917 uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 3918 uint_t *ccountp) 3919 { 3920 rootnex_window_t *window; 3921 rootnex_trim_t *trim; 3922 ddi_dma_impl_t *hp; 3923 rootnex_dma_t *dma; 3924 #if !defined(__amd64) 3925 rootnex_sglinfo_t *sinfo; 3926 rootnex_pgmap_t *pmap; 3927 uint_t pidx; 3928 uint_t pcnt; 3929 off_t poff; 3930 int i; 3931 #endif 3932 3933 3934 hp = (ddi_dma_impl_t *)handle; 3935 dma = (rootnex_dma_t *)hp->dmai_private; 3936 #if !defined(__amd64) 3937 sinfo = &dma->dp_sglinfo; 3938 #endif 3939 3940 /* If we try and get a window which doesn't exist, return failure */ 3941 if (win >= hp->dmai_nwin) { 3942 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 3943 return (DDI_FAILURE); 3944 } 3945 3946 /* 3947 * if we don't have any windows, and they're asking for the first 3948 * window, setup the cookie pointer to the first cookie in the bind. 3949 * setup our return values, then increment the cookie since we return 3950 * the first cookie on the stack. 3951 */ 3952 if (dma->dp_window == NULL) { 3953 if (win != 0) { 3954 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 3955 return (DDI_FAILURE); 3956 } 3957 hp->dmai_cookie = dma->dp_cookies; 3958 *offp = 0; 3959 *lenp = dma->dp_dma.dmao_size; 3960 *ccountp = dma->dp_sglinfo.si_sgl_size; 3961 *cookiep = hp->dmai_cookie[0]; 3962 hp->dmai_cookie++; 3963 return (DDI_SUCCESS); 3964 } 3965 3966 /* sync the old window before moving on to the new one */ 3967 window = &dma->dp_window[dma->dp_current_win]; 3968 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) { 3969 (void) rootnex_dma_sync(dip, rdip, handle, 0, 0, 3970 DDI_DMA_SYNC_FORCPU); 3971 } 3972 3973 #if !defined(__amd64) 3974 /* 3975 * before we move to the next window, if we need to re-map, unmap all 3976 * the pages in this window. 3977 */ 3978 if (dma->dp_cb_remaping) { 3979 /* 3980 * If we switch to this window again, we'll need to map in 3981 * on the fly next time. 3982 */ 3983 window->wd_remap_copybuf = B_TRUE; 3984 3985 /* 3986 * calculate the page index into the buffer where this window 3987 * starts, and the number of pages this window takes up. 3988 */ 3989 pidx = (sinfo->si_buf_offset + window->wd_offset) >> 3990 MMU_PAGESHIFT; 3991 poff = (sinfo->si_buf_offset + window->wd_offset) & 3992 MMU_PAGEOFFSET; 3993 pcnt = mmu_btopr(window->wd_size + poff); 3994 ASSERT((pidx + pcnt) <= sinfo->si_max_pages); 3995 3996 /* unmap pages which are currently mapped in this window */ 3997 for (i = 0; i < pcnt; i++) { 3998 if (dma->dp_pgmap[pidx].pm_mapped) { 3999 hat_unload(kas.a_hat, 4000 dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE, 4001 HAT_UNLOAD); 4002 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 4003 } 4004 pidx++; 4005 } 4006 } 4007 #endif 4008 4009 /* 4010 * Move to the new window. 4011 * NOTE: current_win must be set for sync to work right 4012 */ 4013 dma->dp_current_win = win; 4014 window = &dma->dp_window[win]; 4015 4016 /* if needed, adjust the first and/or last cookies for trim */ 4017 trim = &window->wd_trim; 4018 if (trim->tr_trim_first) { 4019 window->wd_first_cookie->_dmu._dmac_ll = trim->tr_first_paddr; 4020 window->wd_first_cookie->dmac_size = trim->tr_first_size; 4021 #if !defined(__amd64) 4022 window->wd_first_cookie->dmac_type = 4023 (window->wd_first_cookie->dmac_type & 4024 ROOTNEX_USES_COPYBUF) + window->wd_offset; 4025 #endif 4026 if (trim->tr_first_copybuf_win) { 4027 dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr = 4028 trim->tr_first_cbaddr; 4029 #if !defined(__amd64) 4030 dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr = 4031 trim->tr_first_kaddr; 4032 #endif 4033 } 4034 } 4035 if (trim->tr_trim_last) { 4036 trim->tr_last_cookie->_dmu._dmac_ll = trim->tr_last_paddr; 4037 trim->tr_last_cookie->dmac_size = trim->tr_last_size; 4038 if (trim->tr_last_copybuf_win) { 4039 dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr = 4040 trim->tr_last_cbaddr; 4041 #if !defined(__amd64) 4042 dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr = 4043 trim->tr_last_kaddr; 4044 #endif 4045 } 4046 } 4047 4048 /* 4049 * setup the cookie pointer to the first cookie in the window. setup 4050 * our return values, then increment the cookie since we return the 4051 * first cookie on the stack. 4052 */ 4053 hp->dmai_cookie = window->wd_first_cookie; 4054 *offp = window->wd_offset; 4055 *lenp = window->wd_size; 4056 *ccountp = window->wd_cookie_cnt; 4057 *cookiep = hp->dmai_cookie[0]; 4058 hp->dmai_cookie++; 4059 4060 #if !defined(__amd64) 4061 /* re-map copybuf if required for this window */ 4062 if (dma->dp_cb_remaping) { 4063 /* 4064 * calculate the page index into the buffer where this 4065 * window starts. 4066 */ 4067 pidx = (sinfo->si_buf_offset + window->wd_offset) >> 4068 MMU_PAGESHIFT; 4069 ASSERT(pidx < sinfo->si_max_pages); 4070 4071 /* 4072 * the first page can get unmapped if it's shared with the 4073 * previous window. Even if the rest of this window is already 4074 * mapped in, we need to still check this one. 4075 */ 4076 pmap = &dma->dp_pgmap[pidx]; 4077 if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) { 4078 if (pmap->pm_pp != NULL) { 4079 pmap->pm_mapped = B_TRUE; 4080 i86_pp_map(pmap->pm_pp, pmap->pm_kaddr); 4081 } else if (pmap->pm_vaddr != NULL) { 4082 pmap->pm_mapped = B_TRUE; 4083 i86_va_map(pmap->pm_vaddr, sinfo->si_asp, 4084 pmap->pm_kaddr); 4085 } 4086 } 4087 pidx++; 4088 4089 /* map in the rest of the pages if required */ 4090 if (window->wd_remap_copybuf) { 4091 window->wd_remap_copybuf = B_FALSE; 4092 4093 /* figure out many pages this window takes up */ 4094 poff = (sinfo->si_buf_offset + window->wd_offset) & 4095 MMU_PAGEOFFSET; 4096 pcnt = mmu_btopr(window->wd_size + poff); 4097 ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages); 4098 4099 /* map pages which require it */ 4100 for (i = 1; i < pcnt; i++) { 4101 pmap = &dma->dp_pgmap[pidx]; 4102 if (pmap->pm_uses_copybuf) { 4103 ASSERT(pmap->pm_mapped == B_FALSE); 4104 if (pmap->pm_pp != NULL) { 4105 pmap->pm_mapped = B_TRUE; 4106 i86_pp_map(pmap->pm_pp, 4107 pmap->pm_kaddr); 4108 } else if (pmap->pm_vaddr != NULL) { 4109 pmap->pm_mapped = B_TRUE; 4110 i86_va_map(pmap->pm_vaddr, 4111 sinfo->si_asp, 4112 pmap->pm_kaddr); 4113 } 4114 } 4115 pidx++; 4116 } 4117 } 4118 } 4119 #endif 4120 4121 /* if the new window uses the copy buffer, sync it for the device */ 4122 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) { 4123 (void) rootnex_dma_sync(dip, rdip, handle, 0, 0, 4124 DDI_DMA_SYNC_FORDEV); 4125 } 4126 4127 return (DDI_SUCCESS); 4128 } 4129 4130 4131 4132 /* 4133 * ************************ 4134 * obsoleted dma routines 4135 * ************************ 4136 */ 4137 4138 /* 4139 * rootnex_dma_map() 4140 * called from ddi_dma_setup() 4141 */ 4142 /* ARGSUSED */ 4143 static int 4144 rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, struct ddi_dma_req *dmareq, 4145 ddi_dma_handle_t *handlep) 4146 { 4147 #if defined(__amd64) 4148 /* 4149 * this interface is not supported in 64-bit x86 kernel. See comment in 4150 * rootnex_dma_mctl() 4151 */ 4152 ASSERT(0); 4153 return (DDI_DMA_NORESOURCES); 4154 4155 #else /* 32-bit x86 kernel */ 4156 ddi_dma_handle_t *lhandlep; 4157 ddi_dma_handle_t lhandle; 4158 ddi_dma_cookie_t cookie; 4159 ddi_dma_attr_t dma_attr; 4160 ddi_dma_lim_t *dma_lim; 4161 uint_t ccnt; 4162 int e; 4163 4164 4165 /* 4166 * if the driver is just testing to see if it's possible to do the bind, 4167 * we'll use local state. Otherwise, use the handle pointer passed in. 4168 */ 4169 if (handlep == NULL) { 4170 lhandlep = &lhandle; 4171 } else { 4172 lhandlep = handlep; 4173 } 4174 4175 /* convert the limit structure to a dma_attr one */ 4176 dma_lim = dmareq->dmar_limits; 4177 dma_attr.dma_attr_version = DMA_ATTR_V0; 4178 dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo; 4179 dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi; 4180 dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer; 4181 dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max; 4182 dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max; 4183 dma_attr.dma_attr_granular = dma_lim->dlim_granular; 4184 dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen; 4185 dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize; 4186 dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes; 4187 dma_attr.dma_attr_align = MMU_PAGESIZE; 4188 dma_attr.dma_attr_flags = 0; 4189 4190 e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp, 4191 dmareq->dmar_arg, lhandlep); 4192 if (e != DDI_SUCCESS) { 4193 return (e); 4194 } 4195 4196 e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt); 4197 if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 4198 (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 4199 return (e); 4200 } 4201 4202 /* 4203 * if the driver is just testing to see if it's possible to do the bind, 4204 * free up the local state and return the result. 4205 */ 4206 if (handlep == NULL) { 4207 (void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep); 4208 (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 4209 if (e == DDI_DMA_MAPPED) { 4210 return (DDI_DMA_MAPOK); 4211 } else { 4212 return (DDI_DMA_NOMAPPING); 4213 } 4214 } 4215 4216 return (e); 4217 #endif /* defined(__amd64) */ 4218 } 4219 4220 4221 /* 4222 * rootnex_dma_mctl() 4223 * 4224 */ 4225 /* ARGSUSED */ 4226 static int 4227 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4228 enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp, 4229 uint_t cache_flags) 4230 { 4231 #if defined(__amd64) 4232 /* 4233 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a 4234 * common implementation in genunix, so they no longer have x86 4235 * specific functionality which called into dma_ctl. 4236 * 4237 * The rest of the obsoleted interfaces were never supported in the 4238 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface 4239 * was not ported to the x86 64-bit kernel do to serious x86 rootnex 4240 * implementation issues. 4241 * 4242 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and 4243 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we 4244 * reflect that now too... 4245 * 4246 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are 4247 * not going to put this functionality into the 64-bit x86 kernel now. 4248 * It wasn't ported to the 64-bit kernel for s10, no reason to change 4249 * that in a future release. 4250 */ 4251 ASSERT(0); 4252 return (DDI_FAILURE); 4253 4254 #else /* 32-bit x86 kernel */ 4255 ddi_dma_cookie_t lcookie; 4256 ddi_dma_cookie_t *cookie; 4257 rootnex_window_t *window; 4258 ddi_dma_impl_t *hp; 4259 rootnex_dma_t *dma; 4260 uint_t nwin; 4261 uint_t ccnt; 4262 size_t len; 4263 off_t off; 4264 int e; 4265 4266 4267 /* 4268 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little 4269 * hacky since were optimizing for the current interfaces and so we can 4270 * cleanup the mess in genunix. Hopefully we will remove the this 4271 * obsoleted routines someday soon. 4272 */ 4273 4274 switch (request) { 4275 4276 case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */ 4277 hp = (ddi_dma_impl_t *)handle; 4278 cookie = (ddi_dma_cookie_t *)objpp; 4279 4280 /* 4281 * convert segment to cookie. We don't distinguish between the 4282 * two :-) 4283 */ 4284 *cookie = *hp->dmai_cookie; 4285 *lenp = cookie->dmac_size; 4286 *offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF; 4287 return (DDI_SUCCESS); 4288 4289 case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */ 4290 hp = (ddi_dma_impl_t *)handle; 4291 dma = (rootnex_dma_t *)hp->dmai_private; 4292 4293 if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) { 4294 return (DDI_DMA_STALE); 4295 } 4296 4297 /* handle the case where we don't have any windows */ 4298 if (dma->dp_window == NULL) { 4299 /* 4300 * if seg == NULL, and we don't have any windows, 4301 * return the first cookie in the sgl. 4302 */ 4303 if (*lenp == NULL) { 4304 dma->dp_current_cookie = 0; 4305 hp->dmai_cookie = dma->dp_cookies; 4306 *objpp = (caddr_t)handle; 4307 return (DDI_SUCCESS); 4308 4309 /* if we have more cookies, go to the next cookie */ 4310 } else { 4311 if ((dma->dp_current_cookie + 1) >= 4312 dma->dp_sglinfo.si_sgl_size) { 4313 return (DDI_DMA_DONE); 4314 } 4315 dma->dp_current_cookie++; 4316 hp->dmai_cookie++; 4317 return (DDI_SUCCESS); 4318 } 4319 } 4320 4321 /* We have one or more windows */ 4322 window = &dma->dp_window[dma->dp_current_win]; 4323 4324 /* 4325 * if seg == NULL, return the first cookie in the current 4326 * window 4327 */ 4328 if (*lenp == NULL) { 4329 dma->dp_current_cookie = 0; 4330 hp->dmai_cookie = window->wd_first_cookie; 4331 4332 /* 4333 * go to the next cookie in the window then see if we done with 4334 * this window. 4335 */ 4336 } else { 4337 if ((dma->dp_current_cookie + 1) >= 4338 window->wd_cookie_cnt) { 4339 return (DDI_DMA_DONE); 4340 } 4341 dma->dp_current_cookie++; 4342 hp->dmai_cookie++; 4343 } 4344 *objpp = (caddr_t)handle; 4345 return (DDI_SUCCESS); 4346 4347 case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */ 4348 hp = (ddi_dma_impl_t *)handle; 4349 dma = (rootnex_dma_t *)hp->dmai_private; 4350 4351 if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) { 4352 return (DDI_DMA_STALE); 4353 } 4354 4355 /* if win == NULL, return the first window in the bind */ 4356 if (*offp == NULL) { 4357 nwin = 0; 4358 4359 /* 4360 * else, go to the next window then see if we're done with all 4361 * the windows. 4362 */ 4363 } else { 4364 nwin = dma->dp_current_win + 1; 4365 if (nwin >= hp->dmai_nwin) { 4366 return (DDI_DMA_DONE); 4367 } 4368 } 4369 4370 /* switch to the next window */ 4371 e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len, 4372 &lcookie, &ccnt); 4373 ASSERT(e == DDI_SUCCESS); 4374 if (e != DDI_SUCCESS) { 4375 return (DDI_DMA_STALE); 4376 } 4377 4378 /* reset the cookie back to the first cookie in the window */ 4379 if (dma->dp_window != NULL) { 4380 window = &dma->dp_window[dma->dp_current_win]; 4381 hp->dmai_cookie = window->wd_first_cookie; 4382 } else { 4383 hp->dmai_cookie = dma->dp_cookies; 4384 } 4385 4386 *objpp = (caddr_t)handle; 4387 return (DDI_SUCCESS); 4388 4389 case DDI_DMA_FREE: /* ddi_dma_free() */ 4390 (void) rootnex_dma_unbindhdl(dip, rdip, handle); 4391 (void) rootnex_dma_freehdl(dip, rdip, handle); 4392 if (rootnex_state->r_dvma_call_list_id) { 4393 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 4394 } 4395 return (DDI_SUCCESS); 4396 4397 case DDI_DMA_IOPB_ALLOC: /* get contiguous DMA-able memory */ 4398 case DDI_DMA_SMEM_ALLOC: /* get contiguous DMA-able memory */ 4399 /* should never get here, handled in genunix */ 4400 ASSERT(0); 4401 return (DDI_FAILURE); 4402 4403 case DDI_DMA_KVADDR: 4404 case DDI_DMA_GETERR: 4405 case DDI_DMA_COFF: 4406 return (DDI_FAILURE); 4407 } 4408 4409 return (DDI_FAILURE); 4410 #endif /* defined(__amd64) */ 4411 } 4412