1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * x86 root nexus driver 28 */ 29 30 #include <sys/sysmacros.h> 31 #include <sys/conf.h> 32 #include <sys/autoconf.h> 33 #include <sys/sysmacros.h> 34 #include <sys/debug.h> 35 #include <sys/psw.h> 36 #include <sys/ddidmareq.h> 37 #include <sys/promif.h> 38 #include <sys/devops.h> 39 #include <sys/kmem.h> 40 #include <sys/cmn_err.h> 41 #include <vm/seg.h> 42 #include <vm/seg_kmem.h> 43 #include <vm/seg_dev.h> 44 #include <sys/vmem.h> 45 #include <sys/mman.h> 46 #include <vm/hat.h> 47 #include <vm/as.h> 48 #include <vm/page.h> 49 #include <sys/avintr.h> 50 #include <sys/errno.h> 51 #include <sys/modctl.h> 52 #include <sys/ddi_impldefs.h> 53 #include <sys/sunddi.h> 54 #include <sys/sunndi.h> 55 #include <sys/mach_intr.h> 56 #include <sys/psm.h> 57 #include <sys/ontrap.h> 58 #include <sys/atomic.h> 59 #include <sys/sdt.h> 60 #include <sys/rootnex.h> 61 #include <vm/hat_i86.h> 62 #include <sys/ddifm.h> 63 #include <sys/ddi_isa.h> 64 65 #ifdef __xpv 66 #include <sys/bootinfo.h> 67 #include <sys/hypervisor.h> 68 #include <sys/bootconf.h> 69 #include <vm/kboot_mmu.h> 70 #else 71 #include <sys/intel_iommu.h> 72 #endif 73 74 75 /* 76 * enable/disable extra checking of function parameters. Useful for debugging 77 * drivers. 78 */ 79 #ifdef DEBUG 80 int rootnex_alloc_check_parms = 1; 81 int rootnex_bind_check_parms = 1; 82 int rootnex_bind_check_inuse = 1; 83 int rootnex_unbind_verify_buffer = 0; 84 int rootnex_sync_check_parms = 1; 85 #else 86 int rootnex_alloc_check_parms = 0; 87 int rootnex_bind_check_parms = 0; 88 int rootnex_bind_check_inuse = 0; 89 int rootnex_unbind_verify_buffer = 0; 90 int rootnex_sync_check_parms = 0; 91 #endif 92 93 /* Master Abort and Target Abort panic flag */ 94 int rootnex_fm_ma_ta_panic_flag = 0; 95 96 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */ 97 int rootnex_bind_fail = 1; 98 int rootnex_bind_warn = 1; 99 uint8_t *rootnex_warn_list; 100 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */ 101 #define ROOTNEX_BIND_WARNING (0x1 << 0) 102 103 /* 104 * revert back to old broken behavior of always sync'ing entire copy buffer. 105 * This is useful if be have a buggy driver which doesn't correctly pass in 106 * the offset and size into ddi_dma_sync(). 107 */ 108 int rootnex_sync_ignore_params = 0; 109 110 /* 111 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1 112 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a 113 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit 114 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65 115 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages 116 * (< 8K). We will still need to allocate the copy buffer during bind though 117 * (if we need one). These can only be modified in /etc/system before rootnex 118 * attach. 119 */ 120 #if defined(__amd64) 121 int rootnex_prealloc_cookies = 65; 122 int rootnex_prealloc_windows = 4; 123 int rootnex_prealloc_copybuf = 2; 124 #else 125 int rootnex_prealloc_cookies = 33; 126 int rootnex_prealloc_windows = 4; 127 int rootnex_prealloc_copybuf = 2; 128 #endif 129 130 /* driver global state */ 131 static rootnex_state_t *rootnex_state; 132 133 /* shortcut to rootnex counters */ 134 static uint64_t *rootnex_cnt; 135 136 /* 137 * XXX - does x86 even need these or are they left over from the SPARC days? 138 */ 139 /* statically defined integer/boolean properties for the root node */ 140 static rootnex_intprop_t rootnex_intprp[] = { 141 { "PAGESIZE", PAGESIZE }, 142 { "MMU_PAGESIZE", MMU_PAGESIZE }, 143 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET }, 144 { DDI_RELATIVE_ADDRESSING, 1 }, 145 }; 146 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t)) 147 148 #ifdef __xpv 149 typedef maddr_t rootnex_addr_t; 150 #define ROOTNEX_PADDR_TO_RBASE(xinfo, pa) \ 151 (DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa)) 152 #else 153 typedef paddr_t rootnex_addr_t; 154 #endif 155 156 #if !defined(__xpv) 157 char _depends_on[] = "mach/pcplusmp misc/iommulib"; 158 #endif 159 160 static struct cb_ops rootnex_cb_ops = { 161 nodev, /* open */ 162 nodev, /* close */ 163 nodev, /* strategy */ 164 nodev, /* print */ 165 nodev, /* dump */ 166 nodev, /* read */ 167 nodev, /* write */ 168 nodev, /* ioctl */ 169 nodev, /* devmap */ 170 nodev, /* mmap */ 171 nodev, /* segmap */ 172 nochpoll, /* chpoll */ 173 ddi_prop_op, /* cb_prop_op */ 174 NULL, /* struct streamtab */ 175 D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */ 176 CB_REV, /* Rev */ 177 nodev, /* cb_aread */ 178 nodev /* cb_awrite */ 179 }; 180 181 static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 182 off_t offset, off_t len, caddr_t *vaddrp); 183 static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, 184 struct hat *hat, struct seg *seg, caddr_t addr, 185 struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock); 186 static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 187 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep); 188 static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 189 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 190 ddi_dma_handle_t *handlep); 191 static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 192 ddi_dma_handle_t handle); 193 static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 194 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 195 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 196 static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 197 ddi_dma_handle_t handle); 198 static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, 199 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 200 static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, 201 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 202 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 203 static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 204 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 205 off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags); 206 static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, 207 ddi_ctl_enum_t ctlop, void *arg, void *result); 208 static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 209 ddi_iblock_cookie_t *ibc); 210 static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, 211 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result); 212 213 static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 214 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 215 ddi_dma_handle_t *handlep); 216 static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 217 ddi_dma_handle_t handle); 218 static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 219 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 220 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 221 static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 222 ddi_dma_handle_t handle); 223 #if !defined(__xpv) 224 static void rootnex_coredma_reset_cookies(dev_info_t *dip, 225 ddi_dma_handle_t handle); 226 static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 227 ddi_dma_cookie_t **cookiepp, uint_t *ccountp); 228 static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 229 ddi_dma_cookie_t *cookiep, uint_t ccount); 230 static int rootnex_coredma_clear_cookies(dev_info_t *dip, 231 ddi_dma_handle_t handle); 232 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle); 233 #endif 234 static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, 235 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 236 static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, 237 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 238 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 239 static int rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip, 240 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep); 241 static int rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip, 242 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp, 243 size_t *lenp, caddr_t *objpp, uint_t cache_flags); 244 245 static struct bus_ops rootnex_bus_ops = { 246 BUSO_REV, 247 rootnex_map, 248 NULL, 249 NULL, 250 NULL, 251 rootnex_map_fault, 252 rootnex_dma_map, 253 rootnex_dma_allochdl, 254 rootnex_dma_freehdl, 255 rootnex_dma_bindhdl, 256 rootnex_dma_unbindhdl, 257 rootnex_dma_sync, 258 rootnex_dma_win, 259 rootnex_dma_mctl, 260 rootnex_ctlops, 261 ddi_bus_prop_op, 262 i_ddi_rootnex_get_eventcookie, 263 i_ddi_rootnex_add_eventcall, 264 i_ddi_rootnex_remove_eventcall, 265 i_ddi_rootnex_post_event, 266 0, /* bus_intr_ctl */ 267 0, /* bus_config */ 268 0, /* bus_unconfig */ 269 rootnex_fm_init, /* bus_fm_init */ 270 NULL, /* bus_fm_fini */ 271 NULL, /* bus_fm_access_enter */ 272 NULL, /* bus_fm_access_exit */ 273 NULL, /* bus_powr */ 274 rootnex_intr_ops /* bus_intr_op */ 275 }; 276 277 static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 278 static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 279 280 static struct dev_ops rootnex_ops = { 281 DEVO_REV, 282 0, 283 ddi_no_info, 284 nulldev, 285 nulldev, 286 rootnex_attach, 287 rootnex_detach, 288 nulldev, 289 &rootnex_cb_ops, 290 &rootnex_bus_ops, 291 NULL, 292 ddi_quiesce_not_needed, /* quiesce */ 293 }; 294 295 static struct modldrv rootnex_modldrv = { 296 &mod_driverops, 297 "i86pc root nexus", 298 &rootnex_ops 299 }; 300 301 static struct modlinkage rootnex_modlinkage = { 302 MODREV_1, 303 (void *)&rootnex_modldrv, 304 NULL 305 }; 306 307 #if !defined(__xpv) 308 static iommulib_nexops_t iommulib_nexops = { 309 IOMMU_NEXOPS_VERSION, 310 "Rootnex IOMMU ops Vers 1.1", 311 NULL, 312 rootnex_coredma_allochdl, 313 rootnex_coredma_freehdl, 314 rootnex_coredma_bindhdl, 315 rootnex_coredma_unbindhdl, 316 rootnex_coredma_reset_cookies, 317 rootnex_coredma_get_cookies, 318 rootnex_coredma_set_cookies, 319 rootnex_coredma_clear_cookies, 320 rootnex_coredma_get_sleep_flags, 321 rootnex_coredma_sync, 322 rootnex_coredma_win, 323 rootnex_coredma_map, 324 rootnex_coredma_mctl 325 }; 326 #endif 327 328 /* 329 * extern hacks 330 */ 331 extern struct seg_ops segdev_ops; 332 extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */ 333 #ifdef DDI_MAP_DEBUG 334 extern int ddi_map_debug_flag; 335 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf 336 #endif 337 extern void i86_pp_map(page_t *pp, caddr_t kaddr); 338 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr); 339 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 340 psm_intr_op_t, int *); 341 extern int impl_ddi_sunbus_initchild(dev_info_t *dip); 342 extern void impl_ddi_sunbus_removechild(dev_info_t *dip); 343 344 /* 345 * Use device arena to use for device control register mappings. 346 * Various kernel memory walkers (debugger, dtrace) need to know 347 * to avoid this address range to prevent undesired device activity. 348 */ 349 extern void *device_arena_alloc(size_t size, int vm_flag); 350 extern void device_arena_free(void * vaddr, size_t size); 351 352 353 /* 354 * Internal functions 355 */ 356 static int rootnex_dma_init(); 357 static void rootnex_add_props(dev_info_t *); 358 static int rootnex_ctl_reportdev(dev_info_t *dip); 359 static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum); 360 static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 361 static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 362 static int rootnex_map_handle(ddi_map_req_t *mp); 363 static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp); 364 static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize); 365 static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, 366 ddi_dma_attr_t *attr); 367 static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 368 rootnex_sglinfo_t *sglinfo); 369 static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 370 rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag); 371 static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 372 rootnex_dma_t *dma, ddi_dma_attr_t *attr); 373 static void rootnex_teardown_copybuf(rootnex_dma_t *dma); 374 static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 375 ddi_dma_attr_t *attr, int kmflag); 376 static void rootnex_teardown_windows(rootnex_dma_t *dma); 377 static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 378 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset); 379 static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, 380 rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset, 381 size_t *copybuf_used, page_t **cur_pp); 382 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, 383 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, 384 ddi_dma_attr_t *attr, off_t cur_offset); 385 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, 386 rootnex_dma_t *dma, rootnex_window_t **windowp, 387 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used); 388 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, 389 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie); 390 static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 391 off_t offset, size_t size, uint_t cache_flags); 392 static int rootnex_verify_buffer(rootnex_dma_t *dma); 393 static int rootnex_dma_check(dev_info_t *dip, const void *handle, 394 const void *comp_addr, const void *not_used); 395 396 /* 397 * _init() 398 * 399 */ 400 int 401 _init(void) 402 { 403 404 rootnex_state = NULL; 405 return (mod_install(&rootnex_modlinkage)); 406 } 407 408 409 /* 410 * _info() 411 * 412 */ 413 int 414 _info(struct modinfo *modinfop) 415 { 416 return (mod_info(&rootnex_modlinkage, modinfop)); 417 } 418 419 420 /* 421 * _fini() 422 * 423 */ 424 int 425 _fini(void) 426 { 427 return (EBUSY); 428 } 429 430 431 /* 432 * rootnex_attach() 433 * 434 */ 435 static int 436 rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 437 { 438 int fmcap; 439 int e; 440 441 switch (cmd) { 442 case DDI_ATTACH: 443 break; 444 case DDI_RESUME: 445 return (DDI_SUCCESS); 446 default: 447 return (DDI_FAILURE); 448 } 449 450 /* 451 * We should only have one instance of rootnex. Save it away since we 452 * don't have an easy way to get it back later. 453 */ 454 ASSERT(rootnex_state == NULL); 455 rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP); 456 457 rootnex_state->r_dip = dip; 458 rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15); 459 rootnex_state->r_reserved_msg_printed = B_FALSE; 460 rootnex_cnt = &rootnex_state->r_counters[0]; 461 rootnex_state->r_intel_iommu_enabled = B_FALSE; 462 463 /* 464 * Set minimum fm capability level for i86pc platforms and then 465 * initialize error handling. Since we're the rootnex, we don't 466 * care what's returned in the fmcap field. 467 */ 468 ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | 469 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; 470 fmcap = ddi_system_fmcap; 471 ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc); 472 473 /* initialize DMA related state */ 474 e = rootnex_dma_init(); 475 if (e != DDI_SUCCESS) { 476 kmem_free(rootnex_state, sizeof (rootnex_state_t)); 477 return (DDI_FAILURE); 478 } 479 480 /* Add static root node properties */ 481 rootnex_add_props(dip); 482 483 /* since we can't call ddi_report_dev() */ 484 cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip)); 485 486 /* Initialize rootnex event handle */ 487 i_ddi_rootnex_init_events(dip); 488 489 #if !defined(__xpv) 490 #if defined(__amd64) 491 /* probe intel iommu */ 492 intel_iommu_probe_and_parse(); 493 494 /* attach the iommu nodes */ 495 if (intel_iommu_support) { 496 if (intel_iommu_attach_dmar_nodes() == DDI_SUCCESS) { 497 rootnex_state->r_intel_iommu_enabled = B_TRUE; 498 } else { 499 intel_iommu_release_dmar_info(); 500 } 501 } 502 #endif 503 504 e = iommulib_nexus_register(dip, &iommulib_nexops, 505 &rootnex_state->r_iommulib_handle); 506 507 ASSERT(e == DDI_SUCCESS); 508 #endif 509 510 return (DDI_SUCCESS); 511 } 512 513 514 /* 515 * rootnex_detach() 516 * 517 */ 518 /*ARGSUSED*/ 519 static int 520 rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 521 { 522 switch (cmd) { 523 case DDI_SUSPEND: 524 break; 525 default: 526 return (DDI_FAILURE); 527 } 528 529 return (DDI_SUCCESS); 530 } 531 532 533 /* 534 * rootnex_dma_init() 535 * 536 */ 537 /*ARGSUSED*/ 538 static int 539 rootnex_dma_init() 540 { 541 size_t bufsize; 542 543 544 /* 545 * size of our cookie/window/copybuf state needed in dma bind that we 546 * pre-alloc in dma_alloc_handle 547 */ 548 rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies; 549 rootnex_state->r_prealloc_size = 550 (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) + 551 (rootnex_prealloc_windows * sizeof (rootnex_window_t)) + 552 (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t)); 553 554 /* 555 * setup DDI DMA handle kmem cache, align each handle on 64 bytes, 556 * allocate 16 extra bytes for struct pointer alignment 557 * (p->dmai_private & dma->dp_prealloc_buffer) 558 */ 559 bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) + 560 rootnex_state->r_prealloc_size + 0x10; 561 rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl", 562 bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0); 563 if (rootnex_state->r_dmahdl_cache == NULL) { 564 return (DDI_FAILURE); 565 } 566 567 /* 568 * allocate array to track which major numbers we have printed warnings 569 * for. 570 */ 571 rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list), 572 KM_SLEEP); 573 574 return (DDI_SUCCESS); 575 } 576 577 578 /* 579 * rootnex_add_props() 580 * 581 */ 582 static void 583 rootnex_add_props(dev_info_t *dip) 584 { 585 rootnex_intprop_t *rpp; 586 int i; 587 588 /* Add static integer/boolean properties to the root node */ 589 rpp = rootnex_intprp; 590 for (i = 0; i < NROOT_INTPROPS; i++) { 591 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, 592 rpp[i].prop_name, rpp[i].prop_value); 593 } 594 } 595 596 597 598 /* 599 * ************************* 600 * ctlops related routines 601 * ************************* 602 */ 603 604 /* 605 * rootnex_ctlops() 606 * 607 */ 608 /*ARGSUSED*/ 609 static int 610 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 611 void *arg, void *result) 612 { 613 int n, *ptr; 614 struct ddi_parent_private_data *pdp; 615 616 switch (ctlop) { 617 case DDI_CTLOPS_DMAPMAPC: 618 /* 619 * Return 'partial' to indicate that dma mapping 620 * has to be done in the main MMU. 621 */ 622 return (DDI_DMA_PARTIAL); 623 624 case DDI_CTLOPS_BTOP: 625 /* 626 * Convert byte count input to physical page units. 627 * (byte counts that are not a page-size multiple 628 * are rounded down) 629 */ 630 *(ulong_t *)result = btop(*(ulong_t *)arg); 631 return (DDI_SUCCESS); 632 633 case DDI_CTLOPS_PTOB: 634 /* 635 * Convert size in physical pages to bytes 636 */ 637 *(ulong_t *)result = ptob(*(ulong_t *)arg); 638 return (DDI_SUCCESS); 639 640 case DDI_CTLOPS_BTOPR: 641 /* 642 * Convert byte count input to physical page units 643 * (byte counts that are not a page-size multiple 644 * are rounded up) 645 */ 646 *(ulong_t *)result = btopr(*(ulong_t *)arg); 647 return (DDI_SUCCESS); 648 649 case DDI_CTLOPS_INITCHILD: 650 return (impl_ddi_sunbus_initchild(arg)); 651 652 case DDI_CTLOPS_UNINITCHILD: 653 impl_ddi_sunbus_removechild(arg); 654 return (DDI_SUCCESS); 655 656 case DDI_CTLOPS_REPORTDEV: 657 return (rootnex_ctl_reportdev(rdip)); 658 659 case DDI_CTLOPS_IOMIN: 660 /* 661 * Nothing to do here but reflect back.. 662 */ 663 return (DDI_SUCCESS); 664 665 case DDI_CTLOPS_REGSIZE: 666 case DDI_CTLOPS_NREGS: 667 break; 668 669 case DDI_CTLOPS_SIDDEV: 670 if (ndi_dev_is_prom_node(rdip)) 671 return (DDI_SUCCESS); 672 if (ndi_dev_is_persistent_node(rdip)) 673 return (DDI_SUCCESS); 674 return (DDI_FAILURE); 675 676 case DDI_CTLOPS_POWER: 677 return ((*pm_platform_power)((power_req_t *)arg)); 678 679 case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */ 680 case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */ 681 case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */ 682 case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */ 683 case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */ 684 case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */ 685 if (!rootnex_state->r_reserved_msg_printed) { 686 rootnex_state->r_reserved_msg_printed = B_TRUE; 687 cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for " 688 "1 or more reserved/obsolete operations."); 689 } 690 return (DDI_FAILURE); 691 692 default: 693 return (DDI_FAILURE); 694 } 695 /* 696 * The rest are for "hardware" properties 697 */ 698 if ((pdp = ddi_get_parent_data(rdip)) == NULL) 699 return (DDI_FAILURE); 700 701 if (ctlop == DDI_CTLOPS_NREGS) { 702 ptr = (int *)result; 703 *ptr = pdp->par_nreg; 704 } else { 705 off_t *size = (off_t *)result; 706 707 ptr = (int *)arg; 708 n = *ptr; 709 if (n >= pdp->par_nreg) { 710 return (DDI_FAILURE); 711 } 712 *size = (off_t)pdp->par_reg[n].regspec_size; 713 } 714 return (DDI_SUCCESS); 715 } 716 717 718 /* 719 * rootnex_ctl_reportdev() 720 * 721 */ 722 static int 723 rootnex_ctl_reportdev(dev_info_t *dev) 724 { 725 int i, n, len, f_len = 0; 726 char *buf; 727 728 buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP); 729 f_len += snprintf(buf, REPORTDEV_BUFSIZE, 730 "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev)); 731 len = strlen(buf); 732 733 for (i = 0; i < sparc_pd_getnreg(dev); i++) { 734 735 struct regspec *rp = sparc_pd_getreg(dev, i); 736 737 if (i == 0) 738 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 739 ": "); 740 else 741 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 742 " and "); 743 len = strlen(buf); 744 745 switch (rp->regspec_bustype) { 746 747 case BTEISA: 748 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 749 "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr); 750 break; 751 752 case BTISA: 753 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 754 "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr); 755 break; 756 757 default: 758 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 759 "space %x offset %x", 760 rp->regspec_bustype, rp->regspec_addr); 761 break; 762 } 763 len = strlen(buf); 764 } 765 for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) { 766 int pri; 767 768 if (i != 0) { 769 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 770 ","); 771 len = strlen(buf); 772 } 773 pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri); 774 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 775 " sparc ipl %d", pri); 776 len = strlen(buf); 777 } 778 #ifdef DEBUG 779 if (f_len + 1 >= REPORTDEV_BUFSIZE) { 780 cmn_err(CE_NOTE, "next message is truncated: " 781 "printed length 1024, real length %d", f_len); 782 } 783 #endif /* DEBUG */ 784 cmn_err(CE_CONT, "?%s\n", buf); 785 kmem_free(buf, REPORTDEV_BUFSIZE); 786 return (DDI_SUCCESS); 787 } 788 789 790 /* 791 * ****************** 792 * map related code 793 * ****************** 794 */ 795 796 /* 797 * rootnex_map() 798 * 799 */ 800 static int 801 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset, 802 off_t len, caddr_t *vaddrp) 803 { 804 struct regspec *rp, tmp_reg; 805 ddi_map_req_t mr = *mp; /* Get private copy of request */ 806 int error; 807 808 mp = &mr; 809 810 switch (mp->map_op) { 811 case DDI_MO_MAP_LOCKED: 812 case DDI_MO_UNMAP: 813 case DDI_MO_MAP_HANDLE: 814 break; 815 default: 816 #ifdef DDI_MAP_DEBUG 817 cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.", 818 mp->map_op); 819 #endif /* DDI_MAP_DEBUG */ 820 return (DDI_ME_UNIMPLEMENTED); 821 } 822 823 if (mp->map_flags & DDI_MF_USER_MAPPING) { 824 #ifdef DDI_MAP_DEBUG 825 cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user."); 826 #endif /* DDI_MAP_DEBUG */ 827 return (DDI_ME_UNIMPLEMENTED); 828 } 829 830 /* 831 * First, if given an rnumber, convert it to a regspec... 832 * (Presumably, this is on behalf of a child of the root node?) 833 */ 834 835 if (mp->map_type == DDI_MT_RNUMBER) { 836 837 int rnumber = mp->map_obj.rnumber; 838 #ifdef DDI_MAP_DEBUG 839 static char *out_of_range = 840 "rootnex_map: Out of range rnumber <%d>, device <%s>"; 841 #endif /* DDI_MAP_DEBUG */ 842 843 rp = i_ddi_rnumber_to_regspec(rdip, rnumber); 844 if (rp == NULL) { 845 #ifdef DDI_MAP_DEBUG 846 cmn_err(CE_WARN, out_of_range, rnumber, 847 ddi_get_name(rdip)); 848 #endif /* DDI_MAP_DEBUG */ 849 return (DDI_ME_RNUMBER_RANGE); 850 } 851 852 /* 853 * Convert the given ddi_map_req_t from rnumber to regspec... 854 */ 855 856 mp->map_type = DDI_MT_REGSPEC; 857 mp->map_obj.rp = rp; 858 } 859 860 /* 861 * Adjust offset and length correspnding to called values... 862 * XXX: A non-zero length means override the one in the regspec 863 * XXX: (regardless of what's in the parent's range?) 864 */ 865 866 tmp_reg = *(mp->map_obj.rp); /* Preserve underlying data */ 867 rp = mp->map_obj.rp = &tmp_reg; /* Use tmp_reg in request */ 868 869 #ifdef DDI_MAP_DEBUG 870 cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d " 871 "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 872 rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset, 873 len, mp->map_handlep); 874 #endif /* DDI_MAP_DEBUG */ 875 876 /* 877 * I/O or memory mapping: 878 * 879 * <bustype=0, addr=x, len=x>: memory 880 * <bustype=1, addr=x, len=x>: i/o 881 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 882 */ 883 884 if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 885 cmn_err(CE_WARN, "<%s,%s> invalid register spec" 886 " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip), 887 ddi_get_name(rdip), rp->regspec_bustype, 888 rp->regspec_addr, rp->regspec_size); 889 return (DDI_ME_INVAL); 890 } 891 892 if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) { 893 /* 894 * compatibility i/o mapping 895 */ 896 rp->regspec_bustype += (uint_t)offset; 897 } else { 898 /* 899 * Normal memory or i/o mapping 900 */ 901 rp->regspec_addr += (uint_t)offset; 902 } 903 904 if (len != 0) 905 rp->regspec_size = (uint_t)len; 906 907 #ifdef DDI_MAP_DEBUG 908 cmn_err(CE_CONT, " <%s,%s> <0x%x, 0x%x, 0x%d> offset %d " 909 "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 910 rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 911 offset, len, mp->map_handlep); 912 #endif /* DDI_MAP_DEBUG */ 913 914 /* 915 * Apply any parent ranges at this level, if applicable. 916 * (This is where nexus specific regspec translation takes place. 917 * Use of this function is implicit agreement that translation is 918 * provided via ddi_apply_range.) 919 */ 920 921 #ifdef DDI_MAP_DEBUG 922 ddi_map_debug("applying range of parent <%s> to child <%s>...\n", 923 ddi_get_name(dip), ddi_get_name(rdip)); 924 #endif /* DDI_MAP_DEBUG */ 925 926 if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0) 927 return (error); 928 929 switch (mp->map_op) { 930 case DDI_MO_MAP_LOCKED: 931 932 /* 933 * Set up the locked down kernel mapping to the regspec... 934 */ 935 936 return (rootnex_map_regspec(mp, vaddrp)); 937 938 case DDI_MO_UNMAP: 939 940 /* 941 * Release mapping... 942 */ 943 944 return (rootnex_unmap_regspec(mp, vaddrp)); 945 946 case DDI_MO_MAP_HANDLE: 947 948 return (rootnex_map_handle(mp)); 949 950 default: 951 return (DDI_ME_UNIMPLEMENTED); 952 } 953 } 954 955 956 /* 957 * rootnex_map_fault() 958 * 959 * fault in mappings for requestors 960 */ 961 /*ARGSUSED*/ 962 static int 963 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat, 964 struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, 965 uint_t lock) 966 { 967 968 #ifdef DDI_MAP_DEBUG 969 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn); 970 ddi_map_debug(" Seg <%s>\n", 971 seg->s_ops == &segdev_ops ? "segdev" : 972 seg == &kvseg ? "segkmem" : "NONE!"); 973 #endif /* DDI_MAP_DEBUG */ 974 975 /* 976 * This is all terribly broken, but it is a start 977 * 978 * XXX Note that this test means that segdev_ops 979 * must be exported from seg_dev.c. 980 * XXX What about devices with their own segment drivers? 981 */ 982 if (seg->s_ops == &segdev_ops) { 983 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 984 985 if (hat == NULL) { 986 /* 987 * This is one plausible interpretation of 988 * a null hat i.e. use the first hat on the 989 * address space hat list which by convention is 990 * the hat of the system MMU. At alternative 991 * would be to panic .. this might well be better .. 992 */ 993 ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); 994 hat = seg->s_as->a_hat; 995 cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); 996 } 997 hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr, 998 (lock ? HAT_LOAD_LOCK : HAT_LOAD)); 999 } else if (seg == &kvseg && dp == NULL) { 1000 hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot, 1001 HAT_LOAD_LOCK); 1002 } else 1003 return (DDI_FAILURE); 1004 return (DDI_SUCCESS); 1005 } 1006 1007 1008 /* 1009 * rootnex_map_regspec() 1010 * we don't support mapping of I/O cards above 4Gb 1011 */ 1012 static int 1013 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 1014 { 1015 rootnex_addr_t rbase; 1016 void *cvaddr; 1017 uint_t npages, pgoffset; 1018 struct regspec *rp; 1019 ddi_acc_hdl_t *hp; 1020 ddi_acc_impl_t *ap; 1021 uint_t hat_acc_flags; 1022 paddr_t pbase; 1023 1024 rp = mp->map_obj.rp; 1025 hp = mp->map_handlep; 1026 1027 #ifdef DDI_MAP_DEBUG 1028 ddi_map_debug( 1029 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n", 1030 rp->regspec_bustype, rp->regspec_addr, 1031 rp->regspec_size, mp->map_handlep); 1032 #endif /* DDI_MAP_DEBUG */ 1033 1034 /* 1035 * I/O or memory mapping 1036 * 1037 * <bustype=0, addr=x, len=x>: memory 1038 * <bustype=1, addr=x, len=x>: i/o 1039 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1040 */ 1041 1042 if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 1043 cmn_err(CE_WARN, "rootnex: invalid register spec" 1044 " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype, 1045 rp->regspec_addr, rp->regspec_size); 1046 return (DDI_FAILURE); 1047 } 1048 1049 if (rp->regspec_bustype != 0) { 1050 /* 1051 * I/O space - needs a handle. 1052 */ 1053 if (hp == NULL) { 1054 return (DDI_FAILURE); 1055 } 1056 ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1057 ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE; 1058 impl_acc_hdl_init(hp); 1059 1060 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1061 #ifdef DDI_MAP_DEBUG 1062 ddi_map_debug("rootnex_map_regspec: mmap() " 1063 "to I/O space is not supported.\n"); 1064 #endif /* DDI_MAP_DEBUG */ 1065 return (DDI_ME_INVAL); 1066 } else { 1067 /* 1068 * 1275-compliant vs. compatibility i/o mapping 1069 */ 1070 *vaddrp = 1071 (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ? 1072 ((caddr_t)(uintptr_t)rp->regspec_bustype) : 1073 ((caddr_t)(uintptr_t)rp->regspec_addr); 1074 #ifdef __xpv 1075 if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1076 hp->ah_pfn = xen_assign_pfn( 1077 mmu_btop((ulong_t)rp->regspec_addr & 1078 MMU_PAGEMASK)); 1079 } else { 1080 hp->ah_pfn = mmu_btop( 1081 (ulong_t)rp->regspec_addr & MMU_PAGEMASK); 1082 } 1083 #else 1084 hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr & 1085 MMU_PAGEMASK); 1086 #endif 1087 hp->ah_pnum = mmu_btopr(rp->regspec_size + 1088 (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET); 1089 } 1090 1091 #ifdef DDI_MAP_DEBUG 1092 ddi_map_debug( 1093 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n", 1094 rp->regspec_size, *vaddrp); 1095 #endif /* DDI_MAP_DEBUG */ 1096 return (DDI_SUCCESS); 1097 } 1098 1099 /* 1100 * Memory space 1101 */ 1102 1103 if (hp != NULL) { 1104 /* 1105 * hat layer ignores 1106 * hp->ah_acc.devacc_attr_endian_flags. 1107 */ 1108 switch (hp->ah_acc.devacc_attr_dataorder) { 1109 case DDI_STRICTORDER_ACC: 1110 hat_acc_flags = HAT_STRICTORDER; 1111 break; 1112 case DDI_UNORDERED_OK_ACC: 1113 hat_acc_flags = HAT_UNORDERED_OK; 1114 break; 1115 case DDI_MERGING_OK_ACC: 1116 hat_acc_flags = HAT_MERGING_OK; 1117 break; 1118 case DDI_LOADCACHING_OK_ACC: 1119 hat_acc_flags = HAT_LOADCACHING_OK; 1120 break; 1121 case DDI_STORECACHING_OK_ACC: 1122 hat_acc_flags = HAT_STORECACHING_OK; 1123 break; 1124 } 1125 ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1126 ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR; 1127 impl_acc_hdl_init(hp); 1128 hp->ah_hat_flags = hat_acc_flags; 1129 } else { 1130 hat_acc_flags = HAT_STRICTORDER; 1131 } 1132 1133 rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK); 1134 #ifdef __xpv 1135 /* 1136 * If we're dom0, we're using a real device so we need to translate 1137 * the MA to a PA. 1138 */ 1139 if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1140 pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))); 1141 } else { 1142 pbase = rbase; 1143 } 1144 #else 1145 pbase = rbase; 1146 #endif 1147 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 1148 1149 if (rp->regspec_size == 0) { 1150 #ifdef DDI_MAP_DEBUG 1151 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n"); 1152 #endif /* DDI_MAP_DEBUG */ 1153 return (DDI_ME_INVAL); 1154 } 1155 1156 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1157 /* extra cast to make gcc happy */ 1158 *vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase)); 1159 } else { 1160 npages = mmu_btopr(rp->regspec_size + pgoffset); 1161 1162 #ifdef DDI_MAP_DEBUG 1163 ddi_map_debug("rootnex_map_regspec: Mapping %d pages " 1164 "physical %llx", npages, pbase); 1165 #endif /* DDI_MAP_DEBUG */ 1166 1167 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); 1168 if (cvaddr == NULL) 1169 return (DDI_ME_NORESOURCES); 1170 1171 /* 1172 * Now map in the pages we've allocated... 1173 */ 1174 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), 1175 mmu_btop(pbase), mp->map_prot | hat_acc_flags, 1176 HAT_LOAD_LOCK); 1177 *vaddrp = (caddr_t)cvaddr + pgoffset; 1178 1179 /* save away pfn and npages for FMA */ 1180 hp = mp->map_handlep; 1181 if (hp) { 1182 hp->ah_pfn = mmu_btop(pbase); 1183 hp->ah_pnum = npages; 1184 } 1185 } 1186 1187 #ifdef DDI_MAP_DEBUG 1188 ddi_map_debug("at virtual 0x%x\n", *vaddrp); 1189 #endif /* DDI_MAP_DEBUG */ 1190 return (DDI_SUCCESS); 1191 } 1192 1193 1194 /* 1195 * rootnex_unmap_regspec() 1196 * 1197 */ 1198 static int 1199 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 1200 { 1201 caddr_t addr = (caddr_t)*vaddrp; 1202 uint_t npages, pgoffset; 1203 struct regspec *rp; 1204 1205 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) 1206 return (0); 1207 1208 rp = mp->map_obj.rp; 1209 1210 if (rp->regspec_size == 0) { 1211 #ifdef DDI_MAP_DEBUG 1212 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n"); 1213 #endif /* DDI_MAP_DEBUG */ 1214 return (DDI_ME_INVAL); 1215 } 1216 1217 /* 1218 * I/O or memory mapping: 1219 * 1220 * <bustype=0, addr=x, len=x>: memory 1221 * <bustype=1, addr=x, len=x>: i/o 1222 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1223 */ 1224 if (rp->regspec_bustype != 0) { 1225 /* 1226 * This is I/O space, which requires no particular 1227 * processing on unmap since it isn't mapped in the 1228 * first place. 1229 */ 1230 return (DDI_SUCCESS); 1231 } 1232 1233 /* 1234 * Memory space 1235 */ 1236 pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET; 1237 npages = mmu_btopr(rp->regspec_size + pgoffset); 1238 hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK); 1239 device_arena_free(addr - pgoffset, ptob(npages)); 1240 1241 /* 1242 * Destroy the pointer - the mapping has logically gone 1243 */ 1244 *vaddrp = NULL; 1245 1246 return (DDI_SUCCESS); 1247 } 1248 1249 1250 /* 1251 * rootnex_map_handle() 1252 * 1253 */ 1254 static int 1255 rootnex_map_handle(ddi_map_req_t *mp) 1256 { 1257 rootnex_addr_t rbase; 1258 ddi_acc_hdl_t *hp; 1259 uint_t pgoffset; 1260 struct regspec *rp; 1261 paddr_t pbase; 1262 1263 rp = mp->map_obj.rp; 1264 1265 #ifdef DDI_MAP_DEBUG 1266 ddi_map_debug( 1267 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n", 1268 rp->regspec_bustype, rp->regspec_addr, 1269 rp->regspec_size, mp->map_handlep); 1270 #endif /* DDI_MAP_DEBUG */ 1271 1272 /* 1273 * I/O or memory mapping: 1274 * 1275 * <bustype=0, addr=x, len=x>: memory 1276 * <bustype=1, addr=x, len=x>: i/o 1277 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1278 */ 1279 if (rp->regspec_bustype != 0) { 1280 /* 1281 * This refers to I/O space, and we don't support "mapping" 1282 * I/O space to a user. 1283 */ 1284 return (DDI_FAILURE); 1285 } 1286 1287 /* 1288 * Set up the hat_flags for the mapping. 1289 */ 1290 hp = mp->map_handlep; 1291 1292 switch (hp->ah_acc.devacc_attr_endian_flags) { 1293 case DDI_NEVERSWAP_ACC: 1294 hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER; 1295 break; 1296 case DDI_STRUCTURE_LE_ACC: 1297 hp->ah_hat_flags = HAT_STRUCTURE_LE; 1298 break; 1299 case DDI_STRUCTURE_BE_ACC: 1300 return (DDI_FAILURE); 1301 default: 1302 return (DDI_REGS_ACC_CONFLICT); 1303 } 1304 1305 switch (hp->ah_acc.devacc_attr_dataorder) { 1306 case DDI_STRICTORDER_ACC: 1307 break; 1308 case DDI_UNORDERED_OK_ACC: 1309 hp->ah_hat_flags |= HAT_UNORDERED_OK; 1310 break; 1311 case DDI_MERGING_OK_ACC: 1312 hp->ah_hat_flags |= HAT_MERGING_OK; 1313 break; 1314 case DDI_LOADCACHING_OK_ACC: 1315 hp->ah_hat_flags |= HAT_LOADCACHING_OK; 1316 break; 1317 case DDI_STORECACHING_OK_ACC: 1318 hp->ah_hat_flags |= HAT_STORECACHING_OK; 1319 break; 1320 default: 1321 return (DDI_FAILURE); 1322 } 1323 1324 rbase = (rootnex_addr_t)rp->regspec_addr & 1325 (~(rootnex_addr_t)MMU_PAGEOFFSET); 1326 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 1327 1328 if (rp->regspec_size == 0) 1329 return (DDI_ME_INVAL); 1330 1331 #ifdef __xpv 1332 /* 1333 * If we're dom0, we're using a real device so we need to translate 1334 * the MA to a PA. 1335 */ 1336 if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1337 pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) | 1338 (rbase & MMU_PAGEOFFSET); 1339 } else { 1340 pbase = rbase; 1341 } 1342 #else 1343 pbase = rbase; 1344 #endif 1345 1346 hp->ah_pfn = mmu_btop(pbase); 1347 hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset); 1348 1349 return (DDI_SUCCESS); 1350 } 1351 1352 1353 1354 /* 1355 * ************************ 1356 * interrupt related code 1357 * ************************ 1358 */ 1359 1360 /* 1361 * rootnex_intr_ops() 1362 * bus_intr_op() function for interrupt support 1363 */ 1364 /* ARGSUSED */ 1365 static int 1366 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1367 ddi_intr_handle_impl_t *hdlp, void *result) 1368 { 1369 struct intrspec *ispec; 1370 struct ddi_parent_private_data *pdp; 1371 1372 DDI_INTR_NEXDBG((CE_CONT, 1373 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n", 1374 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 1375 1376 /* Process the interrupt operation */ 1377 switch (intr_op) { 1378 case DDI_INTROP_GETCAP: 1379 /* First check with pcplusmp */ 1380 if (psm_intr_ops == NULL) 1381 return (DDI_FAILURE); 1382 1383 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) { 1384 *(int *)result = 0; 1385 return (DDI_FAILURE); 1386 } 1387 break; 1388 case DDI_INTROP_SETCAP: 1389 if (psm_intr_ops == NULL) 1390 return (DDI_FAILURE); 1391 1392 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) 1393 return (DDI_FAILURE); 1394 break; 1395 case DDI_INTROP_ALLOC: 1396 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1397 return (DDI_FAILURE); 1398 hdlp->ih_pri = ispec->intrspec_pri; 1399 *(int *)result = hdlp->ih_scratch1; 1400 break; 1401 case DDI_INTROP_FREE: 1402 pdp = ddi_get_parent_data(rdip); 1403 /* 1404 * Special case for 'pcic' driver' only. 1405 * If an intrspec was created for it, clean it up here 1406 * See detailed comments on this in the function 1407 * rootnex_get_ispec(). 1408 */ 1409 if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 1410 kmem_free(pdp->par_intr, sizeof (struct intrspec) * 1411 pdp->par_nintr); 1412 /* 1413 * Set it to zero; so that 1414 * DDI framework doesn't free it again 1415 */ 1416 pdp->par_intr = NULL; 1417 pdp->par_nintr = 0; 1418 } 1419 break; 1420 case DDI_INTROP_GETPRI: 1421 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1422 return (DDI_FAILURE); 1423 *(int *)result = ispec->intrspec_pri; 1424 break; 1425 case DDI_INTROP_SETPRI: 1426 /* Validate the interrupt priority passed to us */ 1427 if (*(int *)result > LOCK_LEVEL) 1428 return (DDI_FAILURE); 1429 1430 /* Ensure that PSM is all initialized and ispec is ok */ 1431 if ((psm_intr_ops == NULL) || 1432 ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)) 1433 return (DDI_FAILURE); 1434 1435 /* Change the priority */ 1436 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 1437 PSM_FAILURE) 1438 return (DDI_FAILURE); 1439 1440 /* update the ispec with the new priority */ 1441 ispec->intrspec_pri = *(int *)result; 1442 break; 1443 case DDI_INTROP_ADDISR: 1444 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1445 return (DDI_FAILURE); 1446 ispec->intrspec_func = hdlp->ih_cb_func; 1447 break; 1448 case DDI_INTROP_REMISR: 1449 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1450 return (DDI_FAILURE); 1451 ispec->intrspec_func = (uint_t (*)()) 0; 1452 break; 1453 case DDI_INTROP_ENABLE: 1454 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1455 return (DDI_FAILURE); 1456 1457 /* Call psmi to translate irq with the dip */ 1458 if (psm_intr_ops == NULL) 1459 return (DDI_FAILURE); 1460 1461 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1462 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, 1463 (int *)&hdlp->ih_vector); 1464 1465 /* Add the interrupt handler */ 1466 if (!add_avintr((void *)hdlp, ispec->intrspec_pri, 1467 hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector, 1468 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip)) 1469 return (DDI_FAILURE); 1470 break; 1471 case DDI_INTROP_DISABLE: 1472 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1473 return (DDI_FAILURE); 1474 1475 /* Call psm_ops() to translate irq with the dip */ 1476 if (psm_intr_ops == NULL) 1477 return (DDI_FAILURE); 1478 1479 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1480 (void) (*psm_intr_ops)(rdip, hdlp, 1481 PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector); 1482 1483 /* Remove the interrupt handler */ 1484 rem_avintr((void *)hdlp, ispec->intrspec_pri, 1485 hdlp->ih_cb_func, hdlp->ih_vector); 1486 break; 1487 case DDI_INTROP_SETMASK: 1488 if (psm_intr_ops == NULL) 1489 return (DDI_FAILURE); 1490 1491 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL)) 1492 return (DDI_FAILURE); 1493 break; 1494 case DDI_INTROP_CLRMASK: 1495 if (psm_intr_ops == NULL) 1496 return (DDI_FAILURE); 1497 1498 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL)) 1499 return (DDI_FAILURE); 1500 break; 1501 case DDI_INTROP_GETPENDING: 1502 if (psm_intr_ops == NULL) 1503 return (DDI_FAILURE); 1504 1505 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING, 1506 result)) { 1507 *(int *)result = 0; 1508 return (DDI_FAILURE); 1509 } 1510 break; 1511 case DDI_INTROP_NAVAIL: 1512 case DDI_INTROP_NINTRS: 1513 *(int *)result = i_ddi_get_intx_nintrs(rdip); 1514 if (*(int *)result == 0) { 1515 /* 1516 * Special case for 'pcic' driver' only. This driver 1517 * driver is a child of 'isa' and 'rootnex' drivers. 1518 * 1519 * See detailed comments on this in the function 1520 * rootnex_get_ispec(). 1521 * 1522 * Children of 'pcic' send 'NINITR' request all the 1523 * way to rootnex driver. But, the 'pdp->par_nintr' 1524 * field may not initialized. So, we fake it here 1525 * to return 1 (a la what PCMCIA nexus does). 1526 */ 1527 if (strcmp(ddi_get_name(rdip), "pcic") == 0) 1528 *(int *)result = 1; 1529 else 1530 return (DDI_FAILURE); 1531 } 1532 break; 1533 case DDI_INTROP_SUPPORTED_TYPES: 1534 *(int *)result = DDI_INTR_TYPE_FIXED; /* Always ... */ 1535 break; 1536 default: 1537 return (DDI_FAILURE); 1538 } 1539 1540 return (DDI_SUCCESS); 1541 } 1542 1543 1544 /* 1545 * rootnex_get_ispec() 1546 * convert an interrupt number to an interrupt specification. 1547 * The interrupt number determines which interrupt spec will be 1548 * returned if more than one exists. 1549 * 1550 * Look into the parent private data area of the 'rdip' to find out 1551 * the interrupt specification. First check to make sure there is 1552 * one that matchs "inumber" and then return a pointer to it. 1553 * 1554 * Return NULL if one could not be found. 1555 * 1556 * NOTE: This is needed for rootnex_intr_ops() 1557 */ 1558 static struct intrspec * 1559 rootnex_get_ispec(dev_info_t *rdip, int inum) 1560 { 1561 struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip); 1562 1563 /* 1564 * Special case handling for drivers that provide their own 1565 * intrspec structures instead of relying on the DDI framework. 1566 * 1567 * A broken hardware driver in ON could potentially provide its 1568 * own intrspec structure, instead of relying on the hardware. 1569 * If these drivers are children of 'rootnex' then we need to 1570 * continue to provide backward compatibility to them here. 1571 * 1572 * Following check is a special case for 'pcic' driver which 1573 * was found to have broken hardwre andby provides its own intrspec. 1574 * 1575 * Verbatim comments from this driver are shown here: 1576 * "Don't use the ddi_add_intr since we don't have a 1577 * default intrspec in all cases." 1578 * 1579 * Since an 'ispec' may not be always created for it, 1580 * check for that and create one if so. 1581 * 1582 * NOTE: Currently 'pcic' is the only driver found to do this. 1583 */ 1584 if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 1585 pdp->par_nintr = 1; 1586 pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) * 1587 pdp->par_nintr, KM_SLEEP); 1588 } 1589 1590 /* Validate the interrupt number */ 1591 if (inum >= pdp->par_nintr) 1592 return (NULL); 1593 1594 /* Get the interrupt structure pointer and return that */ 1595 return ((struct intrspec *)&pdp->par_intr[inum]); 1596 } 1597 1598 1599 /* 1600 * ****************** 1601 * dma related code 1602 * ****************** 1603 */ 1604 1605 /*ARGSUSED*/ 1606 static int 1607 rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 1608 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 1609 ddi_dma_handle_t *handlep) 1610 { 1611 uint64_t maxsegmentsize_ll; 1612 uint_t maxsegmentsize; 1613 ddi_dma_impl_t *hp; 1614 rootnex_dma_t *dma; 1615 uint64_t count_max; 1616 uint64_t seg; 1617 int kmflag; 1618 int e; 1619 1620 1621 /* convert our sleep flags */ 1622 if (waitfp == DDI_DMA_SLEEP) { 1623 kmflag = KM_SLEEP; 1624 } else { 1625 kmflag = KM_NOSLEEP; 1626 } 1627 1628 /* 1629 * We try to do only one memory allocation here. We'll do a little 1630 * pointer manipulation later. If the bind ends up taking more than 1631 * our prealloc's space, we'll have to allocate more memory in the 1632 * bind operation. Not great, but much better than before and the 1633 * best we can do with the current bind interfaces. 1634 */ 1635 hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag); 1636 if (hp == NULL) { 1637 if (waitfp != DDI_DMA_DONTWAIT) { 1638 ddi_set_callback(waitfp, arg, 1639 &rootnex_state->r_dvma_call_list_id); 1640 } 1641 return (DDI_DMA_NORESOURCES); 1642 } 1643 1644 /* Do our pointer manipulation now, align the structures */ 1645 hp->dmai_private = (void *)(((uintptr_t)hp + 1646 (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7); 1647 dma = (rootnex_dma_t *)hp->dmai_private; 1648 dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma + 1649 sizeof (rootnex_dma_t) + 0x7) & ~0x7); 1650 1651 /* setup the handle */ 1652 rootnex_clean_dmahdl(hp); 1653 dma->dp_dip = rdip; 1654 dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo; 1655 dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi; 1656 hp->dmai_minxfer = attr->dma_attr_minxfer; 1657 hp->dmai_burstsizes = attr->dma_attr_burstsizes; 1658 hp->dmai_rdip = rdip; 1659 hp->dmai_attr = *attr; 1660 1661 /* we don't need to worry about the SPL since we do a tryenter */ 1662 mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL); 1663 1664 /* 1665 * Figure out our maximum segment size. If the segment size is greater 1666 * than 4G, we will limit it to (4G - 1) since the max size of a dma 1667 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and 1668 * dma_attr_count_max are size-1 type values. 1669 * 1670 * Maximum segment size is the largest physically contiguous chunk of 1671 * memory that we can return from a bind (i.e. the maximum size of a 1672 * single cookie). 1673 */ 1674 1675 /* handle the rollover cases */ 1676 seg = attr->dma_attr_seg + 1; 1677 if (seg < attr->dma_attr_seg) { 1678 seg = attr->dma_attr_seg; 1679 } 1680 count_max = attr->dma_attr_count_max + 1; 1681 if (count_max < attr->dma_attr_count_max) { 1682 count_max = attr->dma_attr_count_max; 1683 } 1684 1685 /* 1686 * granularity may or may not be a power of two. If it isn't, we can't 1687 * use a simple mask. 1688 */ 1689 if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) { 1690 dma->dp_granularity_power_2 = B_FALSE; 1691 } else { 1692 dma->dp_granularity_power_2 = B_TRUE; 1693 } 1694 1695 /* 1696 * maxxfer should be a whole multiple of granularity. If we're going to 1697 * break up a window because we're greater than maxxfer, we might as 1698 * well make sure it's maxxfer is a whole multiple so we don't have to 1699 * worry about triming the window later on for this case. 1700 */ 1701 if (attr->dma_attr_granular > 1) { 1702 if (dma->dp_granularity_power_2) { 1703 dma->dp_maxxfer = attr->dma_attr_maxxfer - 1704 (attr->dma_attr_maxxfer & 1705 (attr->dma_attr_granular - 1)); 1706 } else { 1707 dma->dp_maxxfer = attr->dma_attr_maxxfer - 1708 (attr->dma_attr_maxxfer % attr->dma_attr_granular); 1709 } 1710 } else { 1711 dma->dp_maxxfer = attr->dma_attr_maxxfer; 1712 } 1713 1714 maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer); 1715 maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max); 1716 if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) { 1717 maxsegmentsize = 0xFFFFFFFF; 1718 } else { 1719 maxsegmentsize = maxsegmentsize_ll; 1720 } 1721 dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize; 1722 dma->dp_sglinfo.si_segmask = attr->dma_attr_seg; 1723 1724 /* check the ddi_dma_attr arg to make sure it makes a little sense */ 1725 if (rootnex_alloc_check_parms) { 1726 e = rootnex_valid_alloc_parms(attr, maxsegmentsize); 1727 if (e != DDI_SUCCESS) { 1728 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]); 1729 (void) rootnex_dma_freehdl(dip, rdip, 1730 (ddi_dma_handle_t)hp); 1731 return (e); 1732 } 1733 } 1734 1735 *handlep = (ddi_dma_handle_t)hp; 1736 1737 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1738 DTRACE_PROBE1(rootnex__alloc__handle, uint64_t, 1739 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1740 1741 return (DDI_SUCCESS); 1742 } 1743 1744 1745 /* 1746 * rootnex_dma_allochdl() 1747 * called from ddi_dma_alloc_handle(). 1748 */ 1749 static int 1750 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 1751 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 1752 { 1753 #if !defined(__xpv) 1754 uint_t error = ENOTSUP; 1755 int retval; 1756 1757 retval = iommulib_nex_open(rdip, &error); 1758 1759 if (retval != DDI_SUCCESS && error == ENOTSUP) { 1760 /* No IOMMU */ 1761 return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 1762 handlep)); 1763 } else if (retval != DDI_SUCCESS) { 1764 return (DDI_FAILURE); 1765 } 1766 1767 ASSERT(IOMMULIB_HDL(rdip)); 1768 1769 /* has an IOMMU */ 1770 return (iommulib_nexdma_allochdl(dip, rdip, attr, 1771 waitfp, arg, handlep)); 1772 #else 1773 return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 1774 handlep)); 1775 #endif 1776 } 1777 1778 /*ARGSUSED*/ 1779 static int 1780 rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 1781 ddi_dma_handle_t handle) 1782 { 1783 ddi_dma_impl_t *hp; 1784 rootnex_dma_t *dma; 1785 1786 1787 hp = (ddi_dma_impl_t *)handle; 1788 dma = (rootnex_dma_t *)hp->dmai_private; 1789 1790 /* unbind should have been called first */ 1791 ASSERT(!dma->dp_inuse); 1792 1793 mutex_destroy(&dma->dp_mutex); 1794 kmem_cache_free(rootnex_state->r_dmahdl_cache, hp); 1795 1796 ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1797 DTRACE_PROBE1(rootnex__free__handle, uint64_t, 1798 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1799 1800 if (rootnex_state->r_dvma_call_list_id) 1801 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 1802 1803 return (DDI_SUCCESS); 1804 } 1805 1806 /* 1807 * rootnex_dma_freehdl() 1808 * called from ddi_dma_free_handle(). 1809 */ 1810 static int 1811 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 1812 { 1813 #if !defined(__xpv) 1814 if (IOMMU_USED(handle)) { 1815 return (iommulib_nexdma_freehdl(dip, rdip, handle)); 1816 } 1817 #endif 1818 return (rootnex_coredma_freehdl(dip, rdip, handle)); 1819 } 1820 1821 1822 /*ARGSUSED*/ 1823 static int 1824 rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 1825 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 1826 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1827 { 1828 rootnex_sglinfo_t *sinfo; 1829 ddi_dma_attr_t *attr; 1830 ddi_dma_impl_t *hp; 1831 rootnex_dma_t *dma; 1832 int kmflag; 1833 int e; 1834 1835 1836 hp = (ddi_dma_impl_t *)handle; 1837 dma = (rootnex_dma_t *)hp->dmai_private; 1838 sinfo = &dma->dp_sglinfo; 1839 attr = &hp->dmai_attr; 1840 1841 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 1842 dma->dp_sleep_flags = KM_SLEEP; 1843 } else { 1844 dma->dp_sleep_flags = KM_NOSLEEP; 1845 } 1846 1847 hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 1848 1849 /* 1850 * This is useful for debugging a driver. Not as useful in a production 1851 * system. The only time this will fail is if you have a driver bug. 1852 */ 1853 if (rootnex_bind_check_inuse) { 1854 /* 1855 * No one else should ever have this lock unless someone else 1856 * is trying to use this handle. So contention on the lock 1857 * is the same as inuse being set. 1858 */ 1859 e = mutex_tryenter(&dma->dp_mutex); 1860 if (e == 0) { 1861 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1862 return (DDI_DMA_INUSE); 1863 } 1864 if (dma->dp_inuse) { 1865 mutex_exit(&dma->dp_mutex); 1866 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1867 return (DDI_DMA_INUSE); 1868 } 1869 dma->dp_inuse = B_TRUE; 1870 mutex_exit(&dma->dp_mutex); 1871 } 1872 1873 /* check the ddi_dma_attr arg to make sure it makes a little sense */ 1874 if (rootnex_bind_check_parms) { 1875 e = rootnex_valid_bind_parms(dmareq, attr); 1876 if (e != DDI_SUCCESS) { 1877 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1878 rootnex_clean_dmahdl(hp); 1879 return (e); 1880 } 1881 } 1882 1883 /* save away the original bind info */ 1884 dma->dp_dma = dmareq->dmar_object; 1885 1886 #if !defined(__xpv) 1887 if (rootnex_state->r_intel_iommu_enabled) { 1888 e = intel_iommu_map_sgl(handle, dmareq, 1889 rootnex_state->r_prealloc_cookies); 1890 1891 switch (e) { 1892 case IOMMU_SGL_SUCCESS: 1893 goto rootnex_sgl_end; 1894 1895 case IOMMU_SGL_DISABLE: 1896 goto rootnex_sgl_start; 1897 1898 case IOMMU_SGL_NORESOURCES: 1899 cmn_err(CE_WARN, "iommu map sgl failed for %s", 1900 ddi_node_name(dma->dp_dip)); 1901 rootnex_clean_dmahdl(hp); 1902 return (DDI_DMA_NORESOURCES); 1903 1904 default: 1905 cmn_err(CE_WARN, 1906 "undefined value returned from" 1907 " intel_iommu_map_sgl: %d", 1908 e); 1909 rootnex_clean_dmahdl(hp); 1910 return (DDI_DMA_NORESOURCES); 1911 } 1912 } 1913 #endif 1914 1915 rootnex_sgl_start: 1916 /* 1917 * Figure out a rough estimate of what maximum number of pages this 1918 * buffer could use (a high estimate of course). 1919 */ 1920 sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1; 1921 1922 /* 1923 * We'll use the pre-allocated cookies for any bind that will *always* 1924 * fit (more important to be consistent, we don't want to create 1925 * additional degenerate cases). 1926 */ 1927 if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) { 1928 dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer; 1929 dma->dp_need_to_free_cookie = B_FALSE; 1930 DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip, 1931 uint_t, sinfo->si_max_pages); 1932 1933 /* 1934 * For anything larger than that, we'll go ahead and allocate the 1935 * maximum number of pages we expect to see. Hopefuly, we won't be 1936 * seeing this path in the fast path for high performance devices very 1937 * frequently. 1938 * 1939 * a ddi bind interface that allowed the driver to provide storage to 1940 * the bind interface would speed this case up. 1941 */ 1942 } else { 1943 /* convert the sleep flags */ 1944 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 1945 kmflag = KM_SLEEP; 1946 } else { 1947 kmflag = KM_NOSLEEP; 1948 } 1949 1950 /* 1951 * Save away how much memory we allocated. If we're doing a 1952 * nosleep, the alloc could fail... 1953 */ 1954 dma->dp_cookie_size = sinfo->si_max_pages * 1955 sizeof (ddi_dma_cookie_t); 1956 dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag); 1957 if (dma->dp_cookies == NULL) { 1958 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1959 rootnex_clean_dmahdl(hp); 1960 return (DDI_DMA_NORESOURCES); 1961 } 1962 dma->dp_need_to_free_cookie = B_TRUE; 1963 DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t, 1964 sinfo->si_max_pages); 1965 } 1966 hp->dmai_cookie = dma->dp_cookies; 1967 1968 /* 1969 * Get the real sgl. rootnex_get_sgl will fill in cookie array while 1970 * looking at the contraints in the dma structure. It will then put some 1971 * additional state about the sgl in the dma struct (i.e. is the sgl 1972 * clean, or do we need to do some munging; how many pages need to be 1973 * copied, etc.) 1974 */ 1975 rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies, 1976 &dma->dp_sglinfo); 1977 1978 rootnex_sgl_end: 1979 ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages); 1980 /* if we don't need a copy buffer, we don't need to sync */ 1981 if (sinfo->si_copybuf_req == 0) { 1982 hp->dmai_rflags |= DMP_NOSYNC; 1983 } 1984 1985 /* 1986 * if we don't need the copybuf and we don't need to do a partial, we 1987 * hit the fast path. All the high performance devices should be trying 1988 * to hit this path. To hit this path, a device should be able to reach 1989 * all of memory, shouldn't try to bind more than it can transfer, and 1990 * the buffer shouldn't require more cookies than the driver/device can 1991 * handle [sgllen]). 1992 */ 1993 if ((sinfo->si_copybuf_req == 0) && 1994 (sinfo->si_sgl_size <= attr->dma_attr_sgllen) && 1995 (dma->dp_dma.dmao_size < dma->dp_maxxfer)) { 1996 /* 1997 * If the driver supports FMA, insert the handle in the FMA DMA 1998 * handle cache. 1999 */ 2000 if (attr->dma_attr_flags & DDI_DMA_FLAGERR) { 2001 hp->dmai_error.err_cf = rootnex_dma_check; 2002 (void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL); 2003 } 2004 2005 /* 2006 * copy out the first cookie and ccountp, set the cookie 2007 * pointer to the second cookie. The first cookie is passed 2008 * back on the stack. Additional cookies are accessed via 2009 * ddi_dma_nextcookie() 2010 */ 2011 *cookiep = dma->dp_cookies[0]; 2012 *ccountp = sinfo->si_sgl_size; 2013 hp->dmai_cookie++; 2014 hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 2015 hp->dmai_nwin = 1; 2016 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2017 DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t, 2018 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t, 2019 dma->dp_dma.dmao_size); 2020 return (DDI_DMA_MAPPED); 2021 } 2022 2023 /* 2024 * go to the slow path, we may need to alloc more memory, create 2025 * multiple windows, and munge up a sgl to make the device happy. 2026 */ 2027 e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag); 2028 if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 2029 if (dma->dp_need_to_free_cookie) { 2030 kmem_free(dma->dp_cookies, dma->dp_cookie_size); 2031 } 2032 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 2033 rootnex_clean_dmahdl(hp); /* must be after free cookie */ 2034 return (e); 2035 } 2036 2037 /* 2038 * If the driver supports FMA, insert the handle in the FMA DMA handle 2039 * cache. 2040 */ 2041 if (attr->dma_attr_flags & DDI_DMA_FLAGERR) { 2042 hp->dmai_error.err_cf = rootnex_dma_check; 2043 (void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL); 2044 } 2045 2046 /* if the first window uses the copy buffer, sync it for the device */ 2047 if ((dma->dp_window[dma->dp_current_win].wd_dosync) && 2048 (hp->dmai_rflags & DDI_DMA_WRITE)) { 2049 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 2050 DDI_DMA_SYNC_FORDEV); 2051 } 2052 2053 /* 2054 * copy out the first cookie and ccountp, set the cookie pointer to the 2055 * second cookie. Make sure the partial flag is set/cleared correctly. 2056 * If we have a partial map (i.e. multiple windows), the number of 2057 * cookies we return is the number of cookies in the first window. 2058 */ 2059 if (e == DDI_DMA_MAPPED) { 2060 hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 2061 *ccountp = sinfo->si_sgl_size; 2062 } else { 2063 hp->dmai_rflags |= DDI_DMA_PARTIAL; 2064 *ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt; 2065 ASSERT(hp->dmai_nwin <= dma->dp_max_win); 2066 } 2067 *cookiep = dma->dp_cookies[0]; 2068 hp->dmai_cookie++; 2069 2070 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2071 DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t, 2072 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t, 2073 dma->dp_dma.dmao_size); 2074 return (e); 2075 } 2076 2077 2078 /* 2079 * rootnex_dma_bindhdl() 2080 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle(). 2081 */ 2082 static int 2083 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 2084 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 2085 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 2086 { 2087 #if !defined(__xpv) 2088 if (IOMMU_USED(handle)) { 2089 return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq, 2090 cookiep, ccountp)); 2091 } 2092 #endif 2093 return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq, 2094 cookiep, ccountp)); 2095 } 2096 2097 /*ARGSUSED*/ 2098 static int 2099 rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 2100 ddi_dma_handle_t handle) 2101 { 2102 ddi_dma_impl_t *hp; 2103 rootnex_dma_t *dma; 2104 int e; 2105 2106 hp = (ddi_dma_impl_t *)handle; 2107 dma = (rootnex_dma_t *)hp->dmai_private; 2108 2109 /* make sure the buffer wasn't free'd before calling unbind */ 2110 if (rootnex_unbind_verify_buffer) { 2111 e = rootnex_verify_buffer(dma); 2112 if (e != DDI_SUCCESS) { 2113 ASSERT(0); 2114 return (DDI_FAILURE); 2115 } 2116 } 2117 2118 /* sync the current window before unbinding the buffer */ 2119 if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync && 2120 (hp->dmai_rflags & DDI_DMA_READ)) { 2121 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 2122 DDI_DMA_SYNC_FORCPU); 2123 } 2124 2125 /* 2126 * If the driver supports FMA, remove the handle in the FMA DMA handle 2127 * cache. 2128 */ 2129 if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 2130 if ((DEVI(rdip)->devi_fmhdl != NULL) && 2131 (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) { 2132 (void) ndi_fmc_remove(rdip, DMA_HANDLE, hp); 2133 } 2134 } 2135 2136 /* 2137 * cleanup and copy buffer or window state. if we didn't use the copy 2138 * buffer or windows, there won't be much to do :-) 2139 */ 2140 rootnex_teardown_copybuf(dma); 2141 rootnex_teardown_windows(dma); 2142 2143 #if !defined(__xpv) 2144 /* 2145 * If intel iommu enabled, clean up the page tables and free the dvma 2146 */ 2147 if (rootnex_state->r_intel_iommu_enabled) { 2148 intel_iommu_unmap_sgl(handle); 2149 } 2150 #endif 2151 2152 /* 2153 * If we had to allocate space to for the worse case sgl (it didn't 2154 * fit into our pre-allocate buffer), free that up now 2155 */ 2156 if (dma->dp_need_to_free_cookie) { 2157 kmem_free(dma->dp_cookies, dma->dp_cookie_size); 2158 } 2159 2160 /* 2161 * clean up the handle so it's ready for the next bind (i.e. if the 2162 * handle is reused). 2163 */ 2164 rootnex_clean_dmahdl(hp); 2165 2166 if (rootnex_state->r_dvma_call_list_id) 2167 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 2168 2169 ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2170 DTRACE_PROBE1(rootnex__unbind, uint64_t, 2171 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2172 2173 return (DDI_SUCCESS); 2174 } 2175 2176 /* 2177 * rootnex_dma_unbindhdl() 2178 * called from ddi_dma_unbind_handle() 2179 */ 2180 /*ARGSUSED*/ 2181 static int 2182 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 2183 ddi_dma_handle_t handle) 2184 { 2185 #if !defined(__xpv) 2186 if (IOMMU_USED(handle)) { 2187 return (iommulib_nexdma_unbindhdl(dip, rdip, handle)); 2188 } 2189 #endif 2190 return (rootnex_coredma_unbindhdl(dip, rdip, handle)); 2191 } 2192 2193 #if !defined(__xpv) 2194 2195 static int 2196 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle) 2197 { 2198 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2199 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2200 2201 if (dma->dp_sleep_flags != KM_SLEEP && 2202 dma->dp_sleep_flags != KM_NOSLEEP) 2203 cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle"); 2204 return (dma->dp_sleep_flags); 2205 } 2206 /*ARGSUSED*/ 2207 static void 2208 rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 2209 { 2210 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2211 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2212 rootnex_window_t *window; 2213 2214 if (dma->dp_window) { 2215 window = &dma->dp_window[dma->dp_current_win]; 2216 hp->dmai_cookie = window->wd_first_cookie; 2217 } else { 2218 hp->dmai_cookie = dma->dp_cookies; 2219 } 2220 hp->dmai_cookie++; 2221 } 2222 2223 /*ARGSUSED*/ 2224 static int 2225 rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 2226 ddi_dma_cookie_t **cookiepp, uint_t *ccountp) 2227 { 2228 int i; 2229 int km_flags; 2230 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2231 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2232 rootnex_window_t *window; 2233 ddi_dma_cookie_t *cp; 2234 ddi_dma_cookie_t *cookie; 2235 2236 ASSERT(*cookiepp == NULL); 2237 ASSERT(*ccountp == 0); 2238 2239 if (dma->dp_window) { 2240 window = &dma->dp_window[dma->dp_current_win]; 2241 cp = window->wd_first_cookie; 2242 *ccountp = window->wd_cookie_cnt; 2243 } else { 2244 cp = dma->dp_cookies; 2245 *ccountp = dma->dp_sglinfo.si_sgl_size; 2246 } 2247 2248 km_flags = rootnex_coredma_get_sleep_flags(handle); 2249 cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags); 2250 if (cookie == NULL) { 2251 return (DDI_DMA_NORESOURCES); 2252 } 2253 2254 for (i = 0; i < *ccountp; i++) { 2255 cookie[i].dmac_notused = cp[i].dmac_notused; 2256 cookie[i].dmac_type = cp[i].dmac_type; 2257 cookie[i].dmac_address = cp[i].dmac_address; 2258 cookie[i].dmac_size = cp[i].dmac_size; 2259 } 2260 2261 *cookiepp = cookie; 2262 2263 return (DDI_SUCCESS); 2264 } 2265 2266 /*ARGSUSED*/ 2267 static int 2268 rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 2269 ddi_dma_cookie_t *cookiep, uint_t ccount) 2270 { 2271 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2272 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2273 rootnex_window_t *window; 2274 ddi_dma_cookie_t *cur_cookiep; 2275 2276 ASSERT(cookiep); 2277 ASSERT(ccount != 0); 2278 ASSERT(dma->dp_need_to_switch_cookies == B_FALSE); 2279 2280 if (dma->dp_window) { 2281 window = &dma->dp_window[dma->dp_current_win]; 2282 dma->dp_saved_cookies = window->wd_first_cookie; 2283 window->wd_first_cookie = cookiep; 2284 ASSERT(ccount == window->wd_cookie_cnt); 2285 cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 2286 + window->wd_first_cookie; 2287 } else { 2288 dma->dp_saved_cookies = dma->dp_cookies; 2289 dma->dp_cookies = cookiep; 2290 ASSERT(ccount == dma->dp_sglinfo.si_sgl_size); 2291 cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 2292 + dma->dp_cookies; 2293 } 2294 2295 dma->dp_need_to_switch_cookies = B_TRUE; 2296 hp->dmai_cookie = cur_cookiep; 2297 2298 return (DDI_SUCCESS); 2299 } 2300 2301 /*ARGSUSED*/ 2302 static int 2303 rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 2304 { 2305 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2306 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2307 rootnex_window_t *window; 2308 ddi_dma_cookie_t *cur_cookiep; 2309 ddi_dma_cookie_t *cookie_array; 2310 uint_t ccount; 2311 2312 /* check if cookies have not been switched */ 2313 if (dma->dp_need_to_switch_cookies == B_FALSE) 2314 return (DDI_SUCCESS); 2315 2316 ASSERT(dma->dp_saved_cookies); 2317 2318 if (dma->dp_window) { 2319 window = &dma->dp_window[dma->dp_current_win]; 2320 cookie_array = window->wd_first_cookie; 2321 window->wd_first_cookie = dma->dp_saved_cookies; 2322 dma->dp_saved_cookies = NULL; 2323 ccount = window->wd_cookie_cnt; 2324 cur_cookiep = (hp->dmai_cookie - cookie_array) 2325 + window->wd_first_cookie; 2326 } else { 2327 cookie_array = dma->dp_cookies; 2328 dma->dp_cookies = dma->dp_saved_cookies; 2329 dma->dp_saved_cookies = NULL; 2330 ccount = dma->dp_sglinfo.si_sgl_size; 2331 cur_cookiep = (hp->dmai_cookie - cookie_array) 2332 + dma->dp_cookies; 2333 } 2334 2335 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount); 2336 2337 hp->dmai_cookie = cur_cookiep; 2338 2339 dma->dp_need_to_switch_cookies = B_FALSE; 2340 2341 return (DDI_SUCCESS); 2342 } 2343 2344 #endif 2345 2346 /* 2347 * rootnex_verify_buffer() 2348 * verify buffer wasn't free'd 2349 */ 2350 static int 2351 rootnex_verify_buffer(rootnex_dma_t *dma) 2352 { 2353 page_t **pplist; 2354 caddr_t vaddr; 2355 uint_t pcnt; 2356 uint_t poff; 2357 page_t *pp; 2358 char b; 2359 int i; 2360 2361 /* Figure out how many pages this buffer occupies */ 2362 if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) { 2363 poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET; 2364 } else { 2365 vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr; 2366 poff = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2367 } 2368 pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff); 2369 2370 switch (dma->dp_dma.dmao_type) { 2371 case DMA_OTYP_PAGES: 2372 /* 2373 * for a linked list of pp's walk through them to make sure 2374 * they're locked and not free. 2375 */ 2376 pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp; 2377 for (i = 0; i < pcnt; i++) { 2378 if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) { 2379 return (DDI_FAILURE); 2380 } 2381 pp = pp->p_next; 2382 } 2383 break; 2384 2385 case DMA_OTYP_VADDR: 2386 case DMA_OTYP_BUFVADDR: 2387 pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv; 2388 /* 2389 * for an array of pp's walk through them to make sure they're 2390 * not free. It's possible that they may not be locked. 2391 */ 2392 if (pplist) { 2393 for (i = 0; i < pcnt; i++) { 2394 if (PP_ISFREE(pplist[i])) { 2395 return (DDI_FAILURE); 2396 } 2397 } 2398 2399 /* For a virtual address, try to peek at each page */ 2400 } else { 2401 if (dma->dp_sglinfo.si_asp == &kas) { 2402 for (i = 0; i < pcnt; i++) { 2403 if (ddi_peek8(NULL, vaddr, &b) == 2404 DDI_FAILURE) 2405 return (DDI_FAILURE); 2406 vaddr += MMU_PAGESIZE; 2407 } 2408 } 2409 } 2410 break; 2411 2412 default: 2413 ASSERT(0); 2414 break; 2415 } 2416 2417 return (DDI_SUCCESS); 2418 } 2419 2420 2421 /* 2422 * rootnex_clean_dmahdl() 2423 * Clean the dma handle. This should be called on a handle alloc and an 2424 * unbind handle. Set the handle state to the default settings. 2425 */ 2426 static void 2427 rootnex_clean_dmahdl(ddi_dma_impl_t *hp) 2428 { 2429 rootnex_dma_t *dma; 2430 2431 2432 dma = (rootnex_dma_t *)hp->dmai_private; 2433 2434 hp->dmai_nwin = 0; 2435 dma->dp_current_cookie = 0; 2436 dma->dp_copybuf_size = 0; 2437 dma->dp_window = NULL; 2438 dma->dp_cbaddr = NULL; 2439 dma->dp_inuse = B_FALSE; 2440 dma->dp_need_to_free_cookie = B_FALSE; 2441 dma->dp_need_to_switch_cookies = B_FALSE; 2442 dma->dp_saved_cookies = NULL; 2443 dma->dp_sleep_flags = KM_PANIC; 2444 dma->dp_need_to_free_window = B_FALSE; 2445 dma->dp_partial_required = B_FALSE; 2446 dma->dp_trim_required = B_FALSE; 2447 dma->dp_sglinfo.si_copybuf_req = 0; 2448 #if !defined(__amd64) 2449 dma->dp_cb_remaping = B_FALSE; 2450 dma->dp_kva = NULL; 2451 #endif 2452 2453 /* FMA related initialization */ 2454 hp->dmai_fault = 0; 2455 hp->dmai_fault_check = NULL; 2456 hp->dmai_fault_notify = NULL; 2457 hp->dmai_error.err_ena = 0; 2458 hp->dmai_error.err_status = DDI_FM_OK; 2459 hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED; 2460 hp->dmai_error.err_ontrap = NULL; 2461 hp->dmai_error.err_fep = NULL; 2462 hp->dmai_error.err_cf = NULL; 2463 } 2464 2465 2466 /* 2467 * rootnex_valid_alloc_parms() 2468 * Called in ddi_dma_alloc_handle path to validate its parameters. 2469 */ 2470 static int 2471 rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize) 2472 { 2473 if ((attr->dma_attr_seg < MMU_PAGEOFFSET) || 2474 (attr->dma_attr_count_max < MMU_PAGEOFFSET) || 2475 (attr->dma_attr_granular > MMU_PAGESIZE) || 2476 (attr->dma_attr_maxxfer < MMU_PAGESIZE)) { 2477 return (DDI_DMA_BADATTR); 2478 } 2479 2480 if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) { 2481 return (DDI_DMA_BADATTR); 2482 } 2483 2484 if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET || 2485 MMU_PAGESIZE & (attr->dma_attr_granular - 1) || 2486 attr->dma_attr_sgllen <= 0) { 2487 return (DDI_DMA_BADATTR); 2488 } 2489 2490 /* We should be able to DMA into every byte offset in a page */ 2491 if (maxsegmentsize < MMU_PAGESIZE) { 2492 return (DDI_DMA_BADATTR); 2493 } 2494 2495 return (DDI_SUCCESS); 2496 } 2497 2498 2499 /* 2500 * rootnex_valid_bind_parms() 2501 * Called in ddi_dma_*_bind_handle path to validate its parameters. 2502 */ 2503 /* ARGSUSED */ 2504 static int 2505 rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr) 2506 { 2507 #if !defined(__amd64) 2508 /* 2509 * we only support up to a 2G-1 transfer size on 32-bit kernels so 2510 * we can track the offset for the obsoleted interfaces. 2511 */ 2512 if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) { 2513 return (DDI_DMA_TOOBIG); 2514 } 2515 #endif 2516 2517 return (DDI_SUCCESS); 2518 } 2519 2520 2521 /* 2522 * rootnex_get_sgl() 2523 * Called in bind fastpath to get the sgl. Most of this will be replaced 2524 * with a call to the vm layer when vm2.0 comes around... 2525 */ 2526 static void 2527 rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 2528 rootnex_sglinfo_t *sglinfo) 2529 { 2530 ddi_dma_atyp_t buftype; 2531 rootnex_addr_t raddr; 2532 uint64_t last_page; 2533 uint64_t offset; 2534 uint64_t addrhi; 2535 uint64_t addrlo; 2536 uint64_t maxseg; 2537 page_t **pplist; 2538 uint64_t paddr; 2539 uint32_t psize; 2540 uint32_t size; 2541 caddr_t vaddr; 2542 uint_t pcnt; 2543 page_t *pp; 2544 uint_t cnt; 2545 2546 2547 /* shortcuts */ 2548 pplist = dmar_object->dmao_obj.virt_obj.v_priv; 2549 vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 2550 maxseg = sglinfo->si_max_cookie_size; 2551 buftype = dmar_object->dmao_type; 2552 addrhi = sglinfo->si_max_addr; 2553 addrlo = sglinfo->si_min_addr; 2554 size = dmar_object->dmao_size; 2555 2556 pcnt = 0; 2557 cnt = 0; 2558 2559 /* 2560 * if we were passed down a linked list of pages, i.e. pointer to 2561 * page_t, use this to get our physical address and buf offset. 2562 */ 2563 if (buftype == DMA_OTYP_PAGES) { 2564 pp = dmar_object->dmao_obj.pp_obj.pp_pp; 2565 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2566 offset = dmar_object->dmao_obj.pp_obj.pp_offset & 2567 MMU_PAGEOFFSET; 2568 paddr = pfn_to_pa(pp->p_pagenum) + offset; 2569 psize = MIN(size, (MMU_PAGESIZE - offset)); 2570 pp = pp->p_next; 2571 sglinfo->si_asp = NULL; 2572 2573 /* 2574 * We weren't passed down a linked list of pages, but if we were passed 2575 * down an array of pages, use this to get our physical address and buf 2576 * offset. 2577 */ 2578 } else if (pplist != NULL) { 2579 ASSERT((buftype == DMA_OTYP_VADDR) || 2580 (buftype == DMA_OTYP_BUFVADDR)); 2581 2582 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2583 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2584 if (sglinfo->si_asp == NULL) { 2585 sglinfo->si_asp = &kas; 2586 } 2587 2588 ASSERT(!PP_ISFREE(pplist[pcnt])); 2589 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2590 paddr += offset; 2591 psize = MIN(size, (MMU_PAGESIZE - offset)); 2592 pcnt++; 2593 2594 /* 2595 * All we have is a virtual address, we'll need to call into the VM 2596 * to get the physical address. 2597 */ 2598 } else { 2599 ASSERT((buftype == DMA_OTYP_VADDR) || 2600 (buftype == DMA_OTYP_BUFVADDR)); 2601 2602 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2603 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2604 if (sglinfo->si_asp == NULL) { 2605 sglinfo->si_asp = &kas; 2606 } 2607 2608 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 2609 paddr += offset; 2610 psize = MIN(size, (MMU_PAGESIZE - offset)); 2611 vaddr += psize; 2612 } 2613 2614 #ifdef __xpv 2615 /* 2616 * If we're dom0, we're using a real device so we need to load 2617 * the cookies with MFNs instead of PFNs. 2618 */ 2619 raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2620 #else 2621 raddr = paddr; 2622 #endif 2623 2624 /* 2625 * Setup the first cookie with the physical address of the page and the 2626 * size of the page (which takes into account the initial offset into 2627 * the page. 2628 */ 2629 sgl[cnt].dmac_laddress = raddr; 2630 sgl[cnt].dmac_size = psize; 2631 sgl[cnt].dmac_type = 0; 2632 2633 /* 2634 * Save away the buffer offset into the page. We'll need this later in 2635 * the copy buffer code to help figure out the page index within the 2636 * buffer and the offset into the current page. 2637 */ 2638 sglinfo->si_buf_offset = offset; 2639 2640 /* 2641 * If the DMA engine can't reach the physical address, increase how 2642 * much copy buffer we need. We always increase by pagesize so we don't 2643 * have to worry about converting offsets. Set a flag in the cookies 2644 * dmac_type to indicate that it uses the copy buffer. If this isn't the 2645 * last cookie, go to the next cookie (since we separate each page which 2646 * uses the copy buffer in case the copy buffer is not physically 2647 * contiguous. 2648 */ 2649 if ((raddr < addrlo) || ((raddr + psize) > addrhi)) { 2650 sglinfo->si_copybuf_req += MMU_PAGESIZE; 2651 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 2652 if ((cnt + 1) < sglinfo->si_max_pages) { 2653 cnt++; 2654 sgl[cnt].dmac_laddress = 0; 2655 sgl[cnt].dmac_size = 0; 2656 sgl[cnt].dmac_type = 0; 2657 } 2658 } 2659 2660 /* 2661 * save this page's physical address so we can figure out if the next 2662 * page is physically contiguous. Keep decrementing size until we are 2663 * done with the buffer. 2664 */ 2665 last_page = raddr & MMU_PAGEMASK; 2666 size -= psize; 2667 2668 while (size > 0) { 2669 /* Get the size for this page (i.e. partial or full page) */ 2670 psize = MIN(size, MMU_PAGESIZE); 2671 2672 if (buftype == DMA_OTYP_PAGES) { 2673 /* get the paddr from the page_t */ 2674 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2675 paddr = pfn_to_pa(pp->p_pagenum); 2676 pp = pp->p_next; 2677 } else if (pplist != NULL) { 2678 /* index into the array of page_t's to get the paddr */ 2679 ASSERT(!PP_ISFREE(pplist[pcnt])); 2680 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2681 pcnt++; 2682 } else { 2683 /* call into the VM to get the paddr */ 2684 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, 2685 vaddr)); 2686 vaddr += psize; 2687 } 2688 2689 #ifdef __xpv 2690 /* 2691 * If we're dom0, we're using a real device so we need to load 2692 * the cookies with MFNs instead of PFNs. 2693 */ 2694 raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2695 #else 2696 raddr = paddr; 2697 #endif 2698 /* check to see if this page needs the copy buffer */ 2699 if ((raddr < addrlo) || ((raddr + psize) > addrhi)) { 2700 sglinfo->si_copybuf_req += MMU_PAGESIZE; 2701 2702 /* 2703 * if there is something in the current cookie, go to 2704 * the next one. We only want one page in a cookie which 2705 * uses the copybuf since the copybuf doesn't have to 2706 * be physically contiguous. 2707 */ 2708 if (sgl[cnt].dmac_size != 0) { 2709 cnt++; 2710 } 2711 sgl[cnt].dmac_laddress = raddr; 2712 sgl[cnt].dmac_size = psize; 2713 #if defined(__amd64) 2714 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 2715 #else 2716 /* 2717 * save the buf offset for 32-bit kernel. used in the 2718 * obsoleted interfaces. 2719 */ 2720 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF | 2721 (dmar_object->dmao_size - size); 2722 #endif 2723 /* if this isn't the last cookie, go to the next one */ 2724 if ((cnt + 1) < sglinfo->si_max_pages) { 2725 cnt++; 2726 sgl[cnt].dmac_laddress = 0; 2727 sgl[cnt].dmac_size = 0; 2728 sgl[cnt].dmac_type = 0; 2729 } 2730 2731 /* 2732 * this page didn't need the copy buffer, if it's not physically 2733 * contiguous, or it would put us over a segment boundary, or it 2734 * puts us over the max cookie size, or the current sgl doesn't 2735 * have anything in it. 2736 */ 2737 } else if (((last_page + MMU_PAGESIZE) != raddr) || 2738 !(raddr & sglinfo->si_segmask) || 2739 ((sgl[cnt].dmac_size + psize) > maxseg) || 2740 (sgl[cnt].dmac_size == 0)) { 2741 /* 2742 * if we're not already in a new cookie, go to the next 2743 * cookie. 2744 */ 2745 if (sgl[cnt].dmac_size != 0) { 2746 cnt++; 2747 } 2748 2749 /* save the cookie information */ 2750 sgl[cnt].dmac_laddress = raddr; 2751 sgl[cnt].dmac_size = psize; 2752 #if defined(__amd64) 2753 sgl[cnt].dmac_type = 0; 2754 #else 2755 /* 2756 * save the buf offset for 32-bit kernel. used in the 2757 * obsoleted interfaces. 2758 */ 2759 sgl[cnt].dmac_type = dmar_object->dmao_size - size; 2760 #endif 2761 2762 /* 2763 * this page didn't need the copy buffer, it is physically 2764 * contiguous with the last page, and it's <= the max cookie 2765 * size. 2766 */ 2767 } else { 2768 sgl[cnt].dmac_size += psize; 2769 2770 /* 2771 * if this exactly == the maximum cookie size, and 2772 * it isn't the last cookie, go to the next cookie. 2773 */ 2774 if (((sgl[cnt].dmac_size + psize) == maxseg) && 2775 ((cnt + 1) < sglinfo->si_max_pages)) { 2776 cnt++; 2777 sgl[cnt].dmac_laddress = 0; 2778 sgl[cnt].dmac_size = 0; 2779 sgl[cnt].dmac_type = 0; 2780 } 2781 } 2782 2783 /* 2784 * save this page's physical address so we can figure out if the 2785 * next page is physically contiguous. Keep decrementing size 2786 * until we are done with the buffer. 2787 */ 2788 last_page = raddr; 2789 size -= psize; 2790 } 2791 2792 /* we're done, save away how many cookies the sgl has */ 2793 if (sgl[cnt].dmac_size == 0) { 2794 ASSERT(cnt < sglinfo->si_max_pages); 2795 sglinfo->si_sgl_size = cnt; 2796 } else { 2797 sglinfo->si_sgl_size = cnt + 1; 2798 } 2799 } 2800 2801 2802 /* 2803 * rootnex_bind_slowpath() 2804 * Call in the bind path if the calling driver can't use the sgl without 2805 * modifying it. We either need to use the copy buffer and/or we will end up 2806 * with a partial bind. 2807 */ 2808 static int 2809 rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 2810 rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag) 2811 { 2812 rootnex_sglinfo_t *sinfo; 2813 rootnex_window_t *window; 2814 ddi_dma_cookie_t *cookie; 2815 size_t copybuf_used; 2816 size_t dmac_size; 2817 boolean_t partial; 2818 off_t cur_offset; 2819 page_t *cur_pp; 2820 major_t mnum; 2821 int e; 2822 int i; 2823 2824 2825 sinfo = &dma->dp_sglinfo; 2826 copybuf_used = 0; 2827 partial = B_FALSE; 2828 2829 /* 2830 * If we're using the copybuf, set the copybuf state in dma struct. 2831 * Needs to be first since it sets the copy buffer size. 2832 */ 2833 if (sinfo->si_copybuf_req != 0) { 2834 e = rootnex_setup_copybuf(hp, dmareq, dma, attr); 2835 if (e != DDI_SUCCESS) { 2836 return (e); 2837 } 2838 } else { 2839 dma->dp_copybuf_size = 0; 2840 } 2841 2842 /* 2843 * Figure out if we need to do a partial mapping. If so, figure out 2844 * if we need to trim the buffers when we munge the sgl. 2845 */ 2846 if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) || 2847 (dma->dp_dma.dmao_size > dma->dp_maxxfer) || 2848 (attr->dma_attr_sgllen < sinfo->si_sgl_size)) { 2849 dma->dp_partial_required = B_TRUE; 2850 if (attr->dma_attr_granular != 1) { 2851 dma->dp_trim_required = B_TRUE; 2852 } 2853 } else { 2854 dma->dp_partial_required = B_FALSE; 2855 dma->dp_trim_required = B_FALSE; 2856 } 2857 2858 /* If we need to do a partial bind, make sure the driver supports it */ 2859 if (dma->dp_partial_required && 2860 !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) { 2861 2862 mnum = ddi_driver_major(dma->dp_dip); 2863 /* 2864 * patchable which allows us to print one warning per major 2865 * number. 2866 */ 2867 if ((rootnex_bind_warn) && 2868 ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) { 2869 rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING; 2870 cmn_err(CE_WARN, "!%s: coding error detected, the " 2871 "driver is using ddi_dma_attr(9S) incorrectly. " 2872 "There is a small risk of data corruption in " 2873 "particular with large I/Os. The driver should be " 2874 "replaced with a corrected version for proper " 2875 "system operation. To disable this warning, add " 2876 "'set rootnex:rootnex_bind_warn=0' to " 2877 "/etc/system(4).", ddi_driver_name(dma->dp_dip)); 2878 } 2879 return (DDI_DMA_TOOBIG); 2880 } 2881 2882 /* 2883 * we might need multiple windows, setup state to handle them. In this 2884 * code path, we will have at least one window. 2885 */ 2886 e = rootnex_setup_windows(hp, dma, attr, kmflag); 2887 if (e != DDI_SUCCESS) { 2888 rootnex_teardown_copybuf(dma); 2889 return (e); 2890 } 2891 2892 window = &dma->dp_window[0]; 2893 cookie = &dma->dp_cookies[0]; 2894 cur_offset = 0; 2895 rootnex_init_win(hp, dma, window, cookie, cur_offset); 2896 if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) { 2897 cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 2898 } 2899 2900 /* loop though all the cookies we got back from get_sgl() */ 2901 for (i = 0; i < sinfo->si_sgl_size; i++) { 2902 /* 2903 * If we're using the copy buffer, check this cookie and setup 2904 * its associated copy buffer state. If this cookie uses the 2905 * copy buffer, make sure we sync this window during dma_sync. 2906 */ 2907 if (dma->dp_copybuf_size > 0) { 2908 rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie, 2909 cur_offset, ©buf_used, &cur_pp); 2910 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2911 window->wd_dosync = B_TRUE; 2912 } 2913 } 2914 2915 /* 2916 * save away the cookie size, since it could be modified in 2917 * the windowing code. 2918 */ 2919 dmac_size = cookie->dmac_size; 2920 2921 /* if we went over max copybuf size */ 2922 if (dma->dp_copybuf_size && 2923 (copybuf_used > dma->dp_copybuf_size)) { 2924 partial = B_TRUE; 2925 e = rootnex_copybuf_window_boundary(hp, dma, &window, 2926 cookie, cur_offset, ©buf_used); 2927 if (e != DDI_SUCCESS) { 2928 rootnex_teardown_copybuf(dma); 2929 rootnex_teardown_windows(dma); 2930 return (e); 2931 } 2932 2933 /* 2934 * if the coookie uses the copy buffer, make sure the 2935 * new window we just moved to is set to sync. 2936 */ 2937 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2938 window->wd_dosync = B_TRUE; 2939 } 2940 DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *, 2941 dma->dp_dip); 2942 2943 /* if the cookie cnt == max sgllen, move to the next window */ 2944 } else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) { 2945 partial = B_TRUE; 2946 ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen); 2947 e = rootnex_sgllen_window_boundary(hp, dma, &window, 2948 cookie, attr, cur_offset); 2949 if (e != DDI_SUCCESS) { 2950 rootnex_teardown_copybuf(dma); 2951 rootnex_teardown_windows(dma); 2952 return (e); 2953 } 2954 2955 /* 2956 * if the coookie uses the copy buffer, make sure the 2957 * new window we just moved to is set to sync. 2958 */ 2959 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2960 window->wd_dosync = B_TRUE; 2961 } 2962 DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *, 2963 dma->dp_dip); 2964 2965 /* else if we will be over maxxfer */ 2966 } else if ((window->wd_size + dmac_size) > 2967 dma->dp_maxxfer) { 2968 partial = B_TRUE; 2969 e = rootnex_maxxfer_window_boundary(hp, dma, &window, 2970 cookie); 2971 if (e != DDI_SUCCESS) { 2972 rootnex_teardown_copybuf(dma); 2973 rootnex_teardown_windows(dma); 2974 return (e); 2975 } 2976 2977 /* 2978 * if the coookie uses the copy buffer, make sure the 2979 * new window we just moved to is set to sync. 2980 */ 2981 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 2982 window->wd_dosync = B_TRUE; 2983 } 2984 DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *, 2985 dma->dp_dip); 2986 2987 /* else this cookie fits in the current window */ 2988 } else { 2989 window->wd_cookie_cnt++; 2990 window->wd_size += dmac_size; 2991 } 2992 2993 /* track our offset into the buffer, go to the next cookie */ 2994 ASSERT(dmac_size <= dma->dp_dma.dmao_size); 2995 ASSERT(cookie->dmac_size <= dmac_size); 2996 cur_offset += dmac_size; 2997 cookie++; 2998 } 2999 3000 /* if we ended up with a zero sized window in the end, clean it up */ 3001 if (window->wd_size == 0) { 3002 hp->dmai_nwin--; 3003 window--; 3004 } 3005 3006 ASSERT(window->wd_trim.tr_trim_last == B_FALSE); 3007 3008 if (!partial) { 3009 return (DDI_DMA_MAPPED); 3010 } 3011 3012 ASSERT(dma->dp_partial_required); 3013 return (DDI_DMA_PARTIAL_MAP); 3014 } 3015 3016 3017 /* 3018 * rootnex_setup_copybuf() 3019 * Called in bind slowpath. Figures out if we're going to use the copy 3020 * buffer, and if we do, sets up the basic state to handle it. 3021 */ 3022 static int 3023 rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 3024 rootnex_dma_t *dma, ddi_dma_attr_t *attr) 3025 { 3026 rootnex_sglinfo_t *sinfo; 3027 ddi_dma_attr_t lattr; 3028 size_t max_copybuf; 3029 int cansleep; 3030 int e; 3031 #if !defined(__amd64) 3032 int vmflag; 3033 #endif 3034 3035 3036 sinfo = &dma->dp_sglinfo; 3037 3038 /* read this first so it's consistent through the routine */ 3039 max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK; 3040 3041 /* We need to call into the rootnex on ddi_dma_sync() */ 3042 hp->dmai_rflags &= ~DMP_NOSYNC; 3043 3044 /* make sure the copybuf size <= the max size */ 3045 dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf); 3046 ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0); 3047 3048 #if !defined(__amd64) 3049 /* 3050 * if we don't have kva space to copy to/from, allocate the KVA space 3051 * now. We only do this for the 32-bit kernel. We use seg kpm space for 3052 * the 64-bit kernel. 3053 */ 3054 if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) || 3055 (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) { 3056 3057 /* convert the sleep flags */ 3058 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 3059 vmflag = VM_SLEEP; 3060 } else { 3061 vmflag = VM_NOSLEEP; 3062 } 3063 3064 /* allocate Kernel VA space that we can bcopy to/from */ 3065 dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size, 3066 vmflag); 3067 if (dma->dp_kva == NULL) { 3068 return (DDI_DMA_NORESOURCES); 3069 } 3070 } 3071 #endif 3072 3073 /* convert the sleep flags */ 3074 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 3075 cansleep = 1; 3076 } else { 3077 cansleep = 0; 3078 } 3079 3080 /* 3081 * Allocate the actual copy buffer. This needs to fit within the DMA 3082 * engine limits, so we can't use kmem_alloc... We don't need 3083 * contiguous memory (sgllen) since we will be forcing windows on 3084 * sgllen anyway. 3085 */ 3086 lattr = *attr; 3087 lattr.dma_attr_align = MMU_PAGESIZE; 3088 /* 3089 * this should be < 0 to indicate no limit, but due to a bug in 3090 * the rootnex, we'll set it to the maximum positive int. 3091 */ 3092 lattr.dma_attr_sgllen = 0x7fffffff; 3093 e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep, 3094 0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL); 3095 if (e != DDI_SUCCESS) { 3096 #if !defined(__amd64) 3097 if (dma->dp_kva != NULL) { 3098 vmem_free(heap_arena, dma->dp_kva, 3099 dma->dp_copybuf_size); 3100 } 3101 #endif 3102 return (DDI_DMA_NORESOURCES); 3103 } 3104 3105 DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip, 3106 size_t, dma->dp_copybuf_size); 3107 3108 return (DDI_SUCCESS); 3109 } 3110 3111 3112 /* 3113 * rootnex_setup_windows() 3114 * Called in bind slowpath to setup the window state. We always have windows 3115 * in the slowpath. Even if the window count = 1. 3116 */ 3117 static int 3118 rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3119 ddi_dma_attr_t *attr, int kmflag) 3120 { 3121 rootnex_window_t *windowp; 3122 rootnex_sglinfo_t *sinfo; 3123 size_t copy_state_size; 3124 size_t win_state_size; 3125 size_t state_available; 3126 size_t space_needed; 3127 uint_t copybuf_win; 3128 uint_t maxxfer_win; 3129 size_t space_used; 3130 uint_t sglwin; 3131 3132 3133 sinfo = &dma->dp_sglinfo; 3134 3135 dma->dp_current_win = 0; 3136 hp->dmai_nwin = 0; 3137 3138 /* If we don't need to do a partial, we only have one window */ 3139 if (!dma->dp_partial_required) { 3140 dma->dp_max_win = 1; 3141 3142 /* 3143 * we need multiple windows, need to figure out the worse case number 3144 * of windows. 3145 */ 3146 } else { 3147 /* 3148 * if we need windows because we need more copy buffer that 3149 * we allow, the worse case number of windows we could need 3150 * here would be (copybuf space required / copybuf space that 3151 * we have) plus one for remainder, and plus 2 to handle the 3152 * extra pages on the trim for the first and last pages of the 3153 * buffer (a page is the minimum window size so under the right 3154 * attr settings, you could have a window for each page). 3155 * The last page will only be hit here if the size is not a 3156 * multiple of the granularity (which theoretically shouldn't 3157 * be the case but never has been enforced, so we could have 3158 * broken things without it). 3159 */ 3160 if (sinfo->si_copybuf_req > dma->dp_copybuf_size) { 3161 ASSERT(dma->dp_copybuf_size > 0); 3162 copybuf_win = (sinfo->si_copybuf_req / 3163 dma->dp_copybuf_size) + 1 + 2; 3164 } else { 3165 copybuf_win = 0; 3166 } 3167 3168 /* 3169 * if we need windows because we have more cookies than the H/W 3170 * can handle, the number of windows we would need here would 3171 * be (cookie count / cookies count H/W supports) plus one for 3172 * remainder, and plus 2 to handle the extra pages on the trim 3173 * (see above comment about trim) 3174 */ 3175 if (attr->dma_attr_sgllen < sinfo->si_sgl_size) { 3176 sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen) 3177 + 1) + 2; 3178 } else { 3179 sglwin = 0; 3180 } 3181 3182 /* 3183 * if we need windows because we're binding more memory than the 3184 * H/W can transfer at once, the number of windows we would need 3185 * here would be (xfer count / max xfer H/W supports) plus one 3186 * for remainder, and plus 2 to handle the extra pages on the 3187 * trim (see above comment about trim) 3188 */ 3189 if (dma->dp_dma.dmao_size > dma->dp_maxxfer) { 3190 maxxfer_win = (dma->dp_dma.dmao_size / 3191 dma->dp_maxxfer) + 1 + 2; 3192 } else { 3193 maxxfer_win = 0; 3194 } 3195 dma->dp_max_win = copybuf_win + sglwin + maxxfer_win; 3196 ASSERT(dma->dp_max_win > 0); 3197 } 3198 win_state_size = dma->dp_max_win * sizeof (rootnex_window_t); 3199 3200 /* 3201 * Get space for window and potential copy buffer state. Before we 3202 * go and allocate memory, see if we can get away with using what's 3203 * left in the pre-allocted state or the dynamically allocated sgl. 3204 */ 3205 space_used = (uintptr_t)(sinfo->si_sgl_size * 3206 sizeof (ddi_dma_cookie_t)); 3207 3208 /* if we dynamically allocated space for the cookies */ 3209 if (dma->dp_need_to_free_cookie) { 3210 /* if we have more space in the pre-allocted buffer, use it */ 3211 ASSERT(space_used <= dma->dp_cookie_size); 3212 if ((dma->dp_cookie_size - space_used) <= 3213 rootnex_state->r_prealloc_size) { 3214 state_available = rootnex_state->r_prealloc_size; 3215 windowp = (rootnex_window_t *)dma->dp_prealloc_buffer; 3216 3217 /* 3218 * else, we have more free space in the dynamically allocated 3219 * buffer, i.e. the buffer wasn't worse case fragmented so we 3220 * didn't need a lot of cookies. 3221 */ 3222 } else { 3223 state_available = dma->dp_cookie_size - space_used; 3224 windowp = (rootnex_window_t *) 3225 &dma->dp_cookies[sinfo->si_sgl_size]; 3226 } 3227 3228 /* we used the pre-alloced buffer */ 3229 } else { 3230 ASSERT(space_used <= rootnex_state->r_prealloc_size); 3231 state_available = rootnex_state->r_prealloc_size - space_used; 3232 windowp = (rootnex_window_t *) 3233 &dma->dp_cookies[sinfo->si_sgl_size]; 3234 } 3235 3236 /* 3237 * figure out how much state we need to track the copy buffer. Add an 3238 * addition 8 bytes for pointer alignemnt later. 3239 */ 3240 if (dma->dp_copybuf_size > 0) { 3241 copy_state_size = sinfo->si_max_pages * 3242 sizeof (rootnex_pgmap_t); 3243 } else { 3244 copy_state_size = 0; 3245 } 3246 /* add an additional 8 bytes for pointer alignment */ 3247 space_needed = win_state_size + copy_state_size + 0x8; 3248 3249 /* if we have enough space already, use it */ 3250 if (state_available >= space_needed) { 3251 dma->dp_window = windowp; 3252 dma->dp_need_to_free_window = B_FALSE; 3253 3254 /* not enough space, need to allocate more. */ 3255 } else { 3256 dma->dp_window = kmem_alloc(space_needed, kmflag); 3257 if (dma->dp_window == NULL) { 3258 return (DDI_DMA_NORESOURCES); 3259 } 3260 dma->dp_need_to_free_window = B_TRUE; 3261 dma->dp_window_size = space_needed; 3262 DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *, 3263 dma->dp_dip, size_t, space_needed); 3264 } 3265 3266 /* 3267 * we allocate copy buffer state and window state at the same time. 3268 * setup our copy buffer state pointers. Make sure it's aligned. 3269 */ 3270 if (dma->dp_copybuf_size > 0) { 3271 dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t) 3272 &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7); 3273 3274 #if !defined(__amd64) 3275 /* 3276 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to 3277 * false/NULL. Should be quicker to bzero vs loop and set. 3278 */ 3279 bzero(dma->dp_pgmap, copy_state_size); 3280 #endif 3281 } else { 3282 dma->dp_pgmap = NULL; 3283 } 3284 3285 return (DDI_SUCCESS); 3286 } 3287 3288 3289 /* 3290 * rootnex_teardown_copybuf() 3291 * cleans up after rootnex_setup_copybuf() 3292 */ 3293 static void 3294 rootnex_teardown_copybuf(rootnex_dma_t *dma) 3295 { 3296 #if !defined(__amd64) 3297 int i; 3298 3299 /* 3300 * if we allocated kernel heap VMEM space, go through all the pages and 3301 * map out any of the ones that we're mapped into the kernel heap VMEM 3302 * arena. Then free the VMEM space. 3303 */ 3304 if (dma->dp_kva != NULL) { 3305 for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) { 3306 if (dma->dp_pgmap[i].pm_mapped) { 3307 hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr, 3308 MMU_PAGESIZE, HAT_UNLOAD); 3309 dma->dp_pgmap[i].pm_mapped = B_FALSE; 3310 } 3311 } 3312 3313 vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size); 3314 } 3315 3316 #endif 3317 3318 /* if we allocated a copy buffer, free it */ 3319 if (dma->dp_cbaddr != NULL) { 3320 i_ddi_mem_free(dma->dp_cbaddr, NULL); 3321 } 3322 } 3323 3324 3325 /* 3326 * rootnex_teardown_windows() 3327 * cleans up after rootnex_setup_windows() 3328 */ 3329 static void 3330 rootnex_teardown_windows(rootnex_dma_t *dma) 3331 { 3332 /* 3333 * if we had to allocate window state on the last bind (because we 3334 * didn't have enough pre-allocated space in the handle), free it. 3335 */ 3336 if (dma->dp_need_to_free_window) { 3337 kmem_free(dma->dp_window, dma->dp_window_size); 3338 } 3339 } 3340 3341 3342 /* 3343 * rootnex_init_win() 3344 * Called in bind slow path during creation of a new window. Initializes 3345 * window state to default values. 3346 */ 3347 /*ARGSUSED*/ 3348 static void 3349 rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3350 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset) 3351 { 3352 hp->dmai_nwin++; 3353 window->wd_dosync = B_FALSE; 3354 window->wd_offset = cur_offset; 3355 window->wd_size = 0; 3356 window->wd_first_cookie = cookie; 3357 window->wd_cookie_cnt = 0; 3358 window->wd_trim.tr_trim_first = B_FALSE; 3359 window->wd_trim.tr_trim_last = B_FALSE; 3360 window->wd_trim.tr_first_copybuf_win = B_FALSE; 3361 window->wd_trim.tr_last_copybuf_win = B_FALSE; 3362 #if !defined(__amd64) 3363 window->wd_remap_copybuf = dma->dp_cb_remaping; 3364 #endif 3365 } 3366 3367 3368 /* 3369 * rootnex_setup_cookie() 3370 * Called in the bind slow path when the sgl uses the copy buffer. If any of 3371 * the sgl uses the copy buffer, we need to go through each cookie, figure 3372 * out if it uses the copy buffer, and if it does, save away everything we'll 3373 * need during sync. 3374 */ 3375 static void 3376 rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma, 3377 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used, 3378 page_t **cur_pp) 3379 { 3380 boolean_t copybuf_sz_power_2; 3381 rootnex_sglinfo_t *sinfo; 3382 paddr_t paddr; 3383 uint_t pidx; 3384 uint_t pcnt; 3385 off_t poff; 3386 #if defined(__amd64) 3387 pfn_t pfn; 3388 #else 3389 page_t **pplist; 3390 #endif 3391 3392 sinfo = &dma->dp_sglinfo; 3393 3394 /* 3395 * Calculate the page index relative to the start of the buffer. The 3396 * index to the current page for our buffer is the offset into the 3397 * first page of the buffer plus our current offset into the buffer 3398 * itself, shifted of course... 3399 */ 3400 pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT; 3401 ASSERT(pidx < sinfo->si_max_pages); 3402 3403 /* if this cookie uses the copy buffer */ 3404 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3405 /* 3406 * NOTE: we know that since this cookie uses the copy buffer, it 3407 * is <= MMU_PAGESIZE. 3408 */ 3409 3410 /* 3411 * get the offset into the page. For the 64-bit kernel, get the 3412 * pfn which we'll use with seg kpm. 3413 */ 3414 poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 3415 #if defined(__amd64) 3416 /* mfn_to_pfn() is a NOP on i86pc */ 3417 pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT); 3418 #endif /* __amd64 */ 3419 3420 /* figure out if the copybuf size is a power of 2 */ 3421 if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) { 3422 copybuf_sz_power_2 = B_FALSE; 3423 } else { 3424 copybuf_sz_power_2 = B_TRUE; 3425 } 3426 3427 /* This page uses the copy buffer */ 3428 dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE; 3429 3430 /* 3431 * save the copy buffer KVA that we'll use with this page. 3432 * if we still fit within the copybuf, it's a simple add. 3433 * otherwise, we need to wrap over using & or % accordingly. 3434 */ 3435 if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) { 3436 dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr + 3437 *copybuf_used; 3438 } else { 3439 if (copybuf_sz_power_2) { 3440 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 3441 (uintptr_t)dma->dp_cbaddr + 3442 (*copybuf_used & 3443 (dma->dp_copybuf_size - 1))); 3444 } else { 3445 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 3446 (uintptr_t)dma->dp_cbaddr + 3447 (*copybuf_used % dma->dp_copybuf_size)); 3448 } 3449 } 3450 3451 /* 3452 * over write the cookie physical address with the address of 3453 * the physical address of the copy buffer page that we will 3454 * use. 3455 */ 3456 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 3457 dma->dp_pgmap[pidx].pm_cbaddr)) + poff; 3458 3459 #ifdef __xpv 3460 /* 3461 * If we're dom0, we're using a real device so we need to load 3462 * the cookies with MAs instead of PAs. 3463 */ 3464 cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 3465 #else 3466 cookie->dmac_laddress = paddr; 3467 #endif 3468 3469 /* if we have a kernel VA, it's easy, just save that address */ 3470 if ((dmar_object->dmao_type != DMA_OTYP_PAGES) && 3471 (sinfo->si_asp == &kas)) { 3472 /* 3473 * save away the page aligned virtual address of the 3474 * driver buffer. Offsets are handled in the sync code. 3475 */ 3476 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t) 3477 dmar_object->dmao_obj.virt_obj.v_addr + cur_offset) 3478 & MMU_PAGEMASK); 3479 #if !defined(__amd64) 3480 /* 3481 * we didn't need to, and will never need to map this 3482 * page. 3483 */ 3484 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3485 #endif 3486 3487 /* we don't have a kernel VA. We need one for the bcopy. */ 3488 } else { 3489 #if defined(__amd64) 3490 /* 3491 * for the 64-bit kernel, it's easy. We use seg kpm to 3492 * get a Kernel VA for the corresponding pfn. 3493 */ 3494 dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn); 3495 #else 3496 /* 3497 * for the 32-bit kernel, this is a pain. First we'll 3498 * save away the page_t or user VA for this page. This 3499 * is needed in rootnex_dma_win() when we switch to a 3500 * new window which requires us to re-map the copy 3501 * buffer. 3502 */ 3503 pplist = dmar_object->dmao_obj.virt_obj.v_priv; 3504 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3505 dma->dp_pgmap[pidx].pm_pp = *cur_pp; 3506 dma->dp_pgmap[pidx].pm_vaddr = NULL; 3507 } else if (pplist != NULL) { 3508 dma->dp_pgmap[pidx].pm_pp = pplist[pidx]; 3509 dma->dp_pgmap[pidx].pm_vaddr = NULL; 3510 } else { 3511 dma->dp_pgmap[pidx].pm_pp = NULL; 3512 dma->dp_pgmap[pidx].pm_vaddr = (caddr_t) 3513 (((uintptr_t) 3514 dmar_object->dmao_obj.virt_obj.v_addr + 3515 cur_offset) & MMU_PAGEMASK); 3516 } 3517 3518 /* 3519 * save away the page aligned virtual address which was 3520 * allocated from the kernel heap arena (taking into 3521 * account if we need more copy buffer than we alloced 3522 * and use multiple windows to handle this, i.e. &,%). 3523 * NOTE: there isn't and physical memory backing up this 3524 * virtual address space currently. 3525 */ 3526 if ((*copybuf_used + MMU_PAGESIZE) <= 3527 dma->dp_copybuf_size) { 3528 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3529 (((uintptr_t)dma->dp_kva + *copybuf_used) & 3530 MMU_PAGEMASK); 3531 } else { 3532 if (copybuf_sz_power_2) { 3533 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3534 (((uintptr_t)dma->dp_kva + 3535 (*copybuf_used & 3536 (dma->dp_copybuf_size - 1))) & 3537 MMU_PAGEMASK); 3538 } else { 3539 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3540 (((uintptr_t)dma->dp_kva + 3541 (*copybuf_used % 3542 dma->dp_copybuf_size)) & 3543 MMU_PAGEMASK); 3544 } 3545 } 3546 3547 /* 3548 * if we haven't used up the available copy buffer yet, 3549 * map the kva to the physical page. 3550 */ 3551 if (!dma->dp_cb_remaping && ((*copybuf_used + 3552 MMU_PAGESIZE) <= dma->dp_copybuf_size)) { 3553 dma->dp_pgmap[pidx].pm_mapped = B_TRUE; 3554 if (dma->dp_pgmap[pidx].pm_pp != NULL) { 3555 i86_pp_map(dma->dp_pgmap[pidx].pm_pp, 3556 dma->dp_pgmap[pidx].pm_kaddr); 3557 } else { 3558 i86_va_map(dma->dp_pgmap[pidx].pm_vaddr, 3559 sinfo->si_asp, 3560 dma->dp_pgmap[pidx].pm_kaddr); 3561 } 3562 3563 /* 3564 * we've used up the available copy buffer, this page 3565 * will have to be mapped during rootnex_dma_win() when 3566 * we switch to a new window which requires a re-map 3567 * the copy buffer. (32-bit kernel only) 3568 */ 3569 } else { 3570 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3571 } 3572 #endif 3573 /* go to the next page_t */ 3574 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3575 *cur_pp = (*cur_pp)->p_next; 3576 } 3577 } 3578 3579 /* add to the copy buffer count */ 3580 *copybuf_used += MMU_PAGESIZE; 3581 3582 /* 3583 * This cookie doesn't use the copy buffer. Walk through the pages this 3584 * cookie occupies to reflect this. 3585 */ 3586 } else { 3587 /* 3588 * figure out how many pages the cookie occupies. We need to 3589 * use the original page offset of the buffer and the cookies 3590 * offset in the buffer to do this. 3591 */ 3592 poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET; 3593 pcnt = mmu_btopr(cookie->dmac_size + poff); 3594 3595 while (pcnt > 0) { 3596 #if !defined(__amd64) 3597 /* 3598 * the 32-bit kernel doesn't have seg kpm, so we need 3599 * to map in the driver buffer (if it didn't come down 3600 * with a kernel VA) on the fly. Since this page doesn't 3601 * use the copy buffer, it's not, or will it ever, have 3602 * to be mapped in. 3603 */ 3604 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3605 #endif 3606 dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE; 3607 3608 /* 3609 * we need to update pidx and cur_pp or we'll loose 3610 * track of where we are. 3611 */ 3612 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3613 *cur_pp = (*cur_pp)->p_next; 3614 } 3615 pidx++; 3616 pcnt--; 3617 } 3618 } 3619 } 3620 3621 3622 /* 3623 * rootnex_sgllen_window_boundary() 3624 * Called in the bind slow path when the next cookie causes us to exceed (in 3625 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl 3626 * length supported by the DMA H/W. 3627 */ 3628 static int 3629 rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3630 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr, 3631 off_t cur_offset) 3632 { 3633 off_t new_offset; 3634 size_t trim_sz; 3635 off_t coffset; 3636 3637 3638 /* 3639 * if we know we'll never have to trim, it's pretty easy. Just move to 3640 * the next window and init it. We're done. 3641 */ 3642 if (!dma->dp_trim_required) { 3643 (*windowp)++; 3644 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3645 (*windowp)->wd_cookie_cnt++; 3646 (*windowp)->wd_size = cookie->dmac_size; 3647 return (DDI_SUCCESS); 3648 } 3649 3650 /* figure out how much we need to trim from the window */ 3651 ASSERT(attr->dma_attr_granular != 0); 3652 if (dma->dp_granularity_power_2) { 3653 trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1); 3654 } else { 3655 trim_sz = (*windowp)->wd_size % attr->dma_attr_granular; 3656 } 3657 3658 /* The window's a whole multiple of granularity. We're done */ 3659 if (trim_sz == 0) { 3660 (*windowp)++; 3661 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3662 (*windowp)->wd_cookie_cnt++; 3663 (*windowp)->wd_size = cookie->dmac_size; 3664 return (DDI_SUCCESS); 3665 } 3666 3667 /* 3668 * The window's not a whole multiple of granularity, since we know this 3669 * is due to the sgllen, we need to go back to the last cookie and trim 3670 * that one, add the left over part of the old cookie into the new 3671 * window, and then add in the new cookie into the new window. 3672 */ 3673 3674 /* 3675 * make sure the driver isn't making us do something bad... Trimming and 3676 * sgllen == 1 don't go together. 3677 */ 3678 if (attr->dma_attr_sgllen == 1) { 3679 return (DDI_DMA_NOMAPPING); 3680 } 3681 3682 /* 3683 * first, setup the current window to account for the trim. Need to go 3684 * back to the last cookie for this. 3685 */ 3686 cookie--; 3687 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3688 (*windowp)->wd_trim.tr_last_cookie = cookie; 3689 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 3690 ASSERT(cookie->dmac_size > trim_sz); 3691 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3692 (*windowp)->wd_size -= trim_sz; 3693 3694 /* save the buffer offsets for the next window */ 3695 coffset = cookie->dmac_size - trim_sz; 3696 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3697 3698 /* 3699 * set this now in case this is the first window. all other cases are 3700 * set in dma_win() 3701 */ 3702 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 3703 3704 /* 3705 * initialize the next window using what's left over in the previous 3706 * cookie. 3707 */ 3708 (*windowp)++; 3709 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3710 (*windowp)->wd_cookie_cnt++; 3711 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3712 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 3713 (*windowp)->wd_trim.tr_first_size = trim_sz; 3714 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3715 (*windowp)->wd_dosync = B_TRUE; 3716 } 3717 3718 /* 3719 * now go back to the current cookie and add it to the new window. set 3720 * the new window size to the what was left over from the previous 3721 * cookie and what's in the current cookie. 3722 */ 3723 cookie++; 3724 (*windowp)->wd_cookie_cnt++; 3725 (*windowp)->wd_size = trim_sz + cookie->dmac_size; 3726 3727 /* 3728 * trim plus the next cookie could put us over maxxfer (a cookie can be 3729 * a max size of maxxfer). Handle that case. 3730 */ 3731 if ((*windowp)->wd_size > dma->dp_maxxfer) { 3732 /* 3733 * maxxfer is already a whole multiple of granularity, and this 3734 * trim will be <= the previous trim (since a cookie can't be 3735 * larger than maxxfer). Make things simple here. 3736 */ 3737 trim_sz = (*windowp)->wd_size - dma->dp_maxxfer; 3738 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3739 (*windowp)->wd_trim.tr_last_cookie = cookie; 3740 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 3741 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3742 (*windowp)->wd_size -= trim_sz; 3743 ASSERT((*windowp)->wd_size == dma->dp_maxxfer); 3744 3745 /* save the buffer offsets for the next window */ 3746 coffset = cookie->dmac_size - trim_sz; 3747 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3748 3749 /* setup the next window */ 3750 (*windowp)++; 3751 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3752 (*windowp)->wd_cookie_cnt++; 3753 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3754 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 3755 coffset; 3756 (*windowp)->wd_trim.tr_first_size = trim_sz; 3757 } 3758 3759 return (DDI_SUCCESS); 3760 } 3761 3762 3763 /* 3764 * rootnex_copybuf_window_boundary() 3765 * Called in bind slowpath when we get to a window boundary because we used 3766 * up all the copy buffer that we have. 3767 */ 3768 static int 3769 rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3770 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset, 3771 size_t *copybuf_used) 3772 { 3773 rootnex_sglinfo_t *sinfo; 3774 off_t new_offset; 3775 size_t trim_sz; 3776 paddr_t paddr; 3777 off_t coffset; 3778 uint_t pidx; 3779 off_t poff; 3780 3781 3782 sinfo = &dma->dp_sglinfo; 3783 3784 /* 3785 * the copy buffer should be a whole multiple of page size. We know that 3786 * this cookie is <= MMU_PAGESIZE. 3787 */ 3788 ASSERT(cookie->dmac_size <= MMU_PAGESIZE); 3789 3790 /* 3791 * from now on, all new windows in this bind need to be re-mapped during 3792 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf 3793 * space... 3794 */ 3795 #if !defined(__amd64) 3796 dma->dp_cb_remaping = B_TRUE; 3797 #endif 3798 3799 /* reset copybuf used */ 3800 *copybuf_used = 0; 3801 3802 /* 3803 * if we don't have to trim (since granularity is set to 1), go to the 3804 * next window and add the current cookie to it. We know the current 3805 * cookie uses the copy buffer since we're in this code path. 3806 */ 3807 if (!dma->dp_trim_required) { 3808 (*windowp)++; 3809 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3810 3811 /* Add this cookie to the new window */ 3812 (*windowp)->wd_cookie_cnt++; 3813 (*windowp)->wd_size += cookie->dmac_size; 3814 *copybuf_used += MMU_PAGESIZE; 3815 return (DDI_SUCCESS); 3816 } 3817 3818 /* 3819 * *** may need to trim, figure it out. 3820 */ 3821 3822 /* figure out how much we need to trim from the window */ 3823 if (dma->dp_granularity_power_2) { 3824 trim_sz = (*windowp)->wd_size & 3825 (hp->dmai_attr.dma_attr_granular - 1); 3826 } else { 3827 trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular; 3828 } 3829 3830 /* 3831 * if the window's a whole multiple of granularity, go to the next 3832 * window, init it, then add in the current cookie. We know the current 3833 * cookie uses the copy buffer since we're in this code path. 3834 */ 3835 if (trim_sz == 0) { 3836 (*windowp)++; 3837 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3838 3839 /* Add this cookie to the new window */ 3840 (*windowp)->wd_cookie_cnt++; 3841 (*windowp)->wd_size += cookie->dmac_size; 3842 *copybuf_used += MMU_PAGESIZE; 3843 return (DDI_SUCCESS); 3844 } 3845 3846 /* 3847 * *** We figured it out, we definitly need to trim 3848 */ 3849 3850 /* 3851 * make sure the driver isn't making us do something bad... 3852 * Trimming and sgllen == 1 don't go together. 3853 */ 3854 if (hp->dmai_attr.dma_attr_sgllen == 1) { 3855 return (DDI_DMA_NOMAPPING); 3856 } 3857 3858 /* 3859 * first, setup the current window to account for the trim. Need to go 3860 * back to the last cookie for this. Some of the last cookie will be in 3861 * the current window, and some of the last cookie will be in the new 3862 * window. All of the current cookie will be in the new window. 3863 */ 3864 cookie--; 3865 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3866 (*windowp)->wd_trim.tr_last_cookie = cookie; 3867 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 3868 ASSERT(cookie->dmac_size > trim_sz); 3869 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3870 (*windowp)->wd_size -= trim_sz; 3871 3872 /* 3873 * we're trimming the last cookie (not the current cookie). So that 3874 * last cookie may have or may not have been using the copy buffer ( 3875 * we know the cookie passed in uses the copy buffer since we're in 3876 * this code path). 3877 * 3878 * If the last cookie doesn't use the copy buffer, nothing special to 3879 * do. However, if it does uses the copy buffer, it will be both the 3880 * last page in the current window and the first page in the next 3881 * window. Since we are reusing the copy buffer (and KVA space on the 3882 * 32-bit kernel), this page will use the end of the copy buffer in the 3883 * current window, and the start of the copy buffer in the next window. 3884 * Track that info... The cookie physical address was already set to 3885 * the copy buffer physical address in setup_cookie.. 3886 */ 3887 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3888 pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset + 3889 (*windowp)->wd_size) >> MMU_PAGESHIFT; 3890 (*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE; 3891 (*windowp)->wd_trim.tr_last_pidx = pidx; 3892 (*windowp)->wd_trim.tr_last_cbaddr = 3893 dma->dp_pgmap[pidx].pm_cbaddr; 3894 #if !defined(__amd64) 3895 (*windowp)->wd_trim.tr_last_kaddr = 3896 dma->dp_pgmap[pidx].pm_kaddr; 3897 #endif 3898 } 3899 3900 /* save the buffer offsets for the next window */ 3901 coffset = cookie->dmac_size - trim_sz; 3902 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3903 3904 /* 3905 * set this now in case this is the first window. all other cases are 3906 * set in dma_win() 3907 */ 3908 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 3909 3910 /* 3911 * initialize the next window using what's left over in the previous 3912 * cookie. 3913 */ 3914 (*windowp)++; 3915 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3916 (*windowp)->wd_cookie_cnt++; 3917 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3918 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 3919 (*windowp)->wd_trim.tr_first_size = trim_sz; 3920 3921 /* 3922 * again, we're tracking if the last cookie uses the copy buffer. 3923 * read the comment above for more info on why we need to track 3924 * additional state. 3925 * 3926 * For the first cookie in the new window, we need reset the physical 3927 * address to DMA into to the start of the copy buffer plus any 3928 * initial page offset which may be present. 3929 */ 3930 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3931 (*windowp)->wd_dosync = B_TRUE; 3932 (*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE; 3933 (*windowp)->wd_trim.tr_first_pidx = pidx; 3934 (*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr; 3935 poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET; 3936 3937 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) + 3938 poff; 3939 #ifdef __xpv 3940 /* 3941 * If we're dom0, we're using a real device so we need to load 3942 * the cookies with MAs instead of PAs. 3943 */ 3944 (*windowp)->wd_trim.tr_first_paddr = 3945 ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 3946 #else 3947 (*windowp)->wd_trim.tr_first_paddr = paddr; 3948 #endif 3949 3950 #if !defined(__amd64) 3951 (*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva; 3952 #endif 3953 /* account for the cookie copybuf usage in the new window */ 3954 *copybuf_used += MMU_PAGESIZE; 3955 3956 /* 3957 * every piece of code has to have a hack, and here is this 3958 * ones :-) 3959 * 3960 * There is a complex interaction between setup_cookie and the 3961 * copybuf window boundary. The complexity had to be in either 3962 * the maxxfer window, or the copybuf window, and I chose the 3963 * copybuf code. 3964 * 3965 * So in this code path, we have taken the last cookie, 3966 * virtually broken it in half due to the trim, and it happens 3967 * to use the copybuf which further complicates life. At the 3968 * same time, we have already setup the current cookie, which 3969 * is now wrong. More background info: the current cookie uses 3970 * the copybuf, so it is only a page long max. So we need to 3971 * fix the current cookies copy buffer address, physical 3972 * address, and kva for the 32-bit kernel. We due this by 3973 * bumping them by page size (of course, we can't due this on 3974 * the physical address since the copy buffer may not be 3975 * physically contiguous). 3976 */ 3977 cookie++; 3978 dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE; 3979 poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 3980 3981 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 3982 dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff; 3983 #ifdef __xpv 3984 /* 3985 * If we're dom0, we're using a real device so we need to load 3986 * the cookies with MAs instead of PAs. 3987 */ 3988 cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 3989 #else 3990 cookie->dmac_laddress = paddr; 3991 #endif 3992 3993 #if !defined(__amd64) 3994 ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE); 3995 dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE; 3996 #endif 3997 } else { 3998 /* go back to the current cookie */ 3999 cookie++; 4000 } 4001 4002 /* 4003 * add the current cookie to the new window. set the new window size to 4004 * the what was left over from the previous cookie and what's in the 4005 * current cookie. 4006 */ 4007 (*windowp)->wd_cookie_cnt++; 4008 (*windowp)->wd_size = trim_sz + cookie->dmac_size; 4009 ASSERT((*windowp)->wd_size < dma->dp_maxxfer); 4010 4011 /* 4012 * we know that the cookie passed in always uses the copy buffer. We 4013 * wouldn't be here if it didn't. 4014 */ 4015 *copybuf_used += MMU_PAGESIZE; 4016 4017 return (DDI_SUCCESS); 4018 } 4019 4020 4021 /* 4022 * rootnex_maxxfer_window_boundary() 4023 * Called in bind slowpath when we get to a window boundary because we will 4024 * go over maxxfer. 4025 */ 4026 static int 4027 rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 4028 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie) 4029 { 4030 size_t dmac_size; 4031 off_t new_offset; 4032 size_t trim_sz; 4033 off_t coffset; 4034 4035 4036 /* 4037 * calculate how much we have to trim off of the current cookie to equal 4038 * maxxfer. We don't have to account for granularity here since our 4039 * maxxfer already takes that into account. 4040 */ 4041 trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer; 4042 ASSERT(trim_sz <= cookie->dmac_size); 4043 ASSERT(trim_sz <= dma->dp_maxxfer); 4044 4045 /* save cookie size since we need it later and we might change it */ 4046 dmac_size = cookie->dmac_size; 4047 4048 /* 4049 * if we're not trimming the entire cookie, setup the current window to 4050 * account for the trim. 4051 */ 4052 if (trim_sz < cookie->dmac_size) { 4053 (*windowp)->wd_cookie_cnt++; 4054 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 4055 (*windowp)->wd_trim.tr_last_cookie = cookie; 4056 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 4057 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 4058 (*windowp)->wd_size = dma->dp_maxxfer; 4059 4060 /* 4061 * set the adjusted cookie size now in case this is the first 4062 * window. All other windows are taken care of in get win 4063 */ 4064 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 4065 } 4066 4067 /* 4068 * coffset is the current offset within the cookie, new_offset is the 4069 * current offset with the entire buffer. 4070 */ 4071 coffset = dmac_size - trim_sz; 4072 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 4073 4074 /* initialize the next window */ 4075 (*windowp)++; 4076 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 4077 (*windowp)->wd_cookie_cnt++; 4078 (*windowp)->wd_size = trim_sz; 4079 if (trim_sz < dmac_size) { 4080 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4081 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 4082 coffset; 4083 (*windowp)->wd_trim.tr_first_size = trim_sz; 4084 } 4085 4086 return (DDI_SUCCESS); 4087 } 4088 4089 4090 /*ARGSUSED*/ 4091 static int 4092 rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4093 off_t off, size_t len, uint_t cache_flags) 4094 { 4095 rootnex_sglinfo_t *sinfo; 4096 rootnex_pgmap_t *cbpage; 4097 rootnex_window_t *win; 4098 ddi_dma_impl_t *hp; 4099 rootnex_dma_t *dma; 4100 caddr_t fromaddr; 4101 caddr_t toaddr; 4102 uint_t psize; 4103 off_t offset; 4104 uint_t pidx; 4105 size_t size; 4106 off_t poff; 4107 int e; 4108 4109 4110 hp = (ddi_dma_impl_t *)handle; 4111 dma = (rootnex_dma_t *)hp->dmai_private; 4112 sinfo = &dma->dp_sglinfo; 4113 4114 /* 4115 * if we don't have any windows, we don't need to sync. A copybuf 4116 * will cause us to have at least one window. 4117 */ 4118 if (dma->dp_window == NULL) { 4119 return (DDI_SUCCESS); 4120 } 4121 4122 /* This window may not need to be sync'd */ 4123 win = &dma->dp_window[dma->dp_current_win]; 4124 if (!win->wd_dosync) { 4125 return (DDI_SUCCESS); 4126 } 4127 4128 if (strcmp(ddi_driver_name(rdip), "bnx") == 0 || 4129 strcmp(ddi_driver_name(rdip), "ohci") == 0) 4130 cmn_err(CE_WARN, "%s: syncing DMA ...", 4131 ddi_driver_name(rdip)); 4132 4133 /* handle off and len special cases */ 4134 if ((off == 0) || (rootnex_sync_ignore_params)) { 4135 offset = win->wd_offset; 4136 } else { 4137 offset = off; 4138 } 4139 if ((len == 0) || (rootnex_sync_ignore_params)) { 4140 size = win->wd_size; 4141 } else { 4142 size = len; 4143 } 4144 4145 /* check the sync args to make sure they make a little sense */ 4146 if (rootnex_sync_check_parms) { 4147 e = rootnex_valid_sync_parms(hp, win, offset, size, 4148 cache_flags); 4149 if (e != DDI_SUCCESS) { 4150 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]); 4151 return (DDI_FAILURE); 4152 } 4153 } 4154 4155 /* 4156 * special case the first page to handle the offset into the page. The 4157 * offset to the current page for our buffer is the offset into the 4158 * first page of the buffer plus our current offset into the buffer 4159 * itself, masked of course. 4160 */ 4161 poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET; 4162 psize = MIN((MMU_PAGESIZE - poff), size); 4163 4164 /* go through all the pages that we want to sync */ 4165 while (size > 0) { 4166 /* 4167 * Calculate the page index relative to the start of the buffer. 4168 * The index to the current page for our buffer is the offset 4169 * into the first page of the buffer plus our current offset 4170 * into the buffer itself, shifted of course... 4171 */ 4172 pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT; 4173 ASSERT(pidx < sinfo->si_max_pages); 4174 4175 /* 4176 * if this page uses the copy buffer, we need to sync it, 4177 * otherwise, go on to the next page. 4178 */ 4179 cbpage = &dma->dp_pgmap[pidx]; 4180 ASSERT((cbpage->pm_uses_copybuf == B_TRUE) || 4181 (cbpage->pm_uses_copybuf == B_FALSE)); 4182 if (cbpage->pm_uses_copybuf) { 4183 /* cbaddr and kaddr should be page aligned */ 4184 ASSERT(((uintptr_t)cbpage->pm_cbaddr & 4185 MMU_PAGEOFFSET) == 0); 4186 ASSERT(((uintptr_t)cbpage->pm_kaddr & 4187 MMU_PAGEOFFSET) == 0); 4188 4189 /* 4190 * if we're copying for the device, we are going to 4191 * copy from the drivers buffer and to the rootnex 4192 * allocated copy buffer. 4193 */ 4194 if (cache_flags == DDI_DMA_SYNC_FORDEV) { 4195 fromaddr = cbpage->pm_kaddr + poff; 4196 toaddr = cbpage->pm_cbaddr + poff; 4197 DTRACE_PROBE2(rootnex__sync__dev, 4198 dev_info_t *, dma->dp_dip, size_t, psize); 4199 4200 /* 4201 * if we're copying for the cpu/kernel, we are going to 4202 * copy from the rootnex allocated copy buffer to the 4203 * drivers buffer. 4204 */ 4205 } else { 4206 fromaddr = cbpage->pm_cbaddr + poff; 4207 toaddr = cbpage->pm_kaddr + poff; 4208 DTRACE_PROBE2(rootnex__sync__cpu, 4209 dev_info_t *, dma->dp_dip, size_t, psize); 4210 } 4211 4212 bcopy(fromaddr, toaddr, psize); 4213 } 4214 4215 /* 4216 * decrement size until we're done, update our offset into the 4217 * buffer, and get the next page size. 4218 */ 4219 size -= psize; 4220 offset += psize; 4221 psize = MIN(MMU_PAGESIZE, size); 4222 4223 /* page offset is zero for the rest of this loop */ 4224 poff = 0; 4225 } 4226 4227 return (DDI_SUCCESS); 4228 } 4229 4230 /* 4231 * rootnex_dma_sync() 4232 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags. 4233 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC 4234 * is set, ddi_dma_sync() returns immediately passing back success. 4235 */ 4236 /*ARGSUSED*/ 4237 static int 4238 rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4239 off_t off, size_t len, uint_t cache_flags) 4240 { 4241 #if !defined(__xpv) 4242 if (IOMMU_USED(handle)) { 4243 return (iommulib_nexdma_sync(dip, rdip, handle, off, len, 4244 cache_flags)); 4245 } 4246 #endif 4247 return (rootnex_coredma_sync(dip, rdip, handle, off, len, 4248 cache_flags)); 4249 } 4250 4251 /* 4252 * rootnex_valid_sync_parms() 4253 * checks the parameters passed to sync to verify they are correct. 4254 */ 4255 static int 4256 rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 4257 off_t offset, size_t size, uint_t cache_flags) 4258 { 4259 off_t woffset; 4260 4261 4262 /* 4263 * the first part of the test to make sure the offset passed in is 4264 * within the window. 4265 */ 4266 if (offset < win->wd_offset) { 4267 return (DDI_FAILURE); 4268 } 4269 4270 /* 4271 * second and last part of the test to make sure the offset and length 4272 * passed in is within the window. 4273 */ 4274 woffset = offset - win->wd_offset; 4275 if ((woffset + size) > win->wd_size) { 4276 return (DDI_FAILURE); 4277 } 4278 4279 /* 4280 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should 4281 * be set too. 4282 */ 4283 if ((cache_flags == DDI_DMA_SYNC_FORDEV) && 4284 (hp->dmai_rflags & DDI_DMA_WRITE)) { 4285 return (DDI_SUCCESS); 4286 } 4287 4288 /* 4289 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL 4290 * should be set. Also DDI_DMA_READ should be set in the flags. 4291 */ 4292 if (((cache_flags == DDI_DMA_SYNC_FORCPU) || 4293 (cache_flags == DDI_DMA_SYNC_FORKERNEL)) && 4294 (hp->dmai_rflags & DDI_DMA_READ)) { 4295 return (DDI_SUCCESS); 4296 } 4297 4298 return (DDI_FAILURE); 4299 } 4300 4301 4302 /*ARGSUSED*/ 4303 static int 4304 rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4305 uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 4306 uint_t *ccountp) 4307 { 4308 rootnex_window_t *window; 4309 rootnex_trim_t *trim; 4310 ddi_dma_impl_t *hp; 4311 rootnex_dma_t *dma; 4312 #if !defined(__amd64) 4313 rootnex_sglinfo_t *sinfo; 4314 rootnex_pgmap_t *pmap; 4315 uint_t pidx; 4316 uint_t pcnt; 4317 off_t poff; 4318 int i; 4319 #endif 4320 4321 4322 hp = (ddi_dma_impl_t *)handle; 4323 dma = (rootnex_dma_t *)hp->dmai_private; 4324 #if !defined(__amd64) 4325 sinfo = &dma->dp_sglinfo; 4326 #endif 4327 4328 /* If we try and get a window which doesn't exist, return failure */ 4329 if (win >= hp->dmai_nwin) { 4330 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 4331 return (DDI_FAILURE); 4332 } 4333 4334 /* 4335 * if we don't have any windows, and they're asking for the first 4336 * window, setup the cookie pointer to the first cookie in the bind. 4337 * setup our return values, then increment the cookie since we return 4338 * the first cookie on the stack. 4339 */ 4340 if (dma->dp_window == NULL) { 4341 if (win != 0) { 4342 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 4343 return (DDI_FAILURE); 4344 } 4345 hp->dmai_cookie = dma->dp_cookies; 4346 *offp = 0; 4347 *lenp = dma->dp_dma.dmao_size; 4348 *ccountp = dma->dp_sglinfo.si_sgl_size; 4349 *cookiep = hp->dmai_cookie[0]; 4350 hp->dmai_cookie++; 4351 return (DDI_SUCCESS); 4352 } 4353 4354 /* sync the old window before moving on to the new one */ 4355 window = &dma->dp_window[dma->dp_current_win]; 4356 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) { 4357 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 4358 DDI_DMA_SYNC_FORCPU); 4359 } 4360 4361 #if !defined(__amd64) 4362 /* 4363 * before we move to the next window, if we need to re-map, unmap all 4364 * the pages in this window. 4365 */ 4366 if (dma->dp_cb_remaping) { 4367 /* 4368 * If we switch to this window again, we'll need to map in 4369 * on the fly next time. 4370 */ 4371 window->wd_remap_copybuf = B_TRUE; 4372 4373 /* 4374 * calculate the page index into the buffer where this window 4375 * starts, and the number of pages this window takes up. 4376 */ 4377 pidx = (sinfo->si_buf_offset + window->wd_offset) >> 4378 MMU_PAGESHIFT; 4379 poff = (sinfo->si_buf_offset + window->wd_offset) & 4380 MMU_PAGEOFFSET; 4381 pcnt = mmu_btopr(window->wd_size + poff); 4382 ASSERT((pidx + pcnt) <= sinfo->si_max_pages); 4383 4384 /* unmap pages which are currently mapped in this window */ 4385 for (i = 0; i < pcnt; i++) { 4386 if (dma->dp_pgmap[pidx].pm_mapped) { 4387 hat_unload(kas.a_hat, 4388 dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE, 4389 HAT_UNLOAD); 4390 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 4391 } 4392 pidx++; 4393 } 4394 } 4395 #endif 4396 4397 /* 4398 * Move to the new window. 4399 * NOTE: current_win must be set for sync to work right 4400 */ 4401 dma->dp_current_win = win; 4402 window = &dma->dp_window[win]; 4403 4404 /* if needed, adjust the first and/or last cookies for trim */ 4405 trim = &window->wd_trim; 4406 if (trim->tr_trim_first) { 4407 window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr; 4408 window->wd_first_cookie->dmac_size = trim->tr_first_size; 4409 #if !defined(__amd64) 4410 window->wd_first_cookie->dmac_type = 4411 (window->wd_first_cookie->dmac_type & 4412 ROOTNEX_USES_COPYBUF) + window->wd_offset; 4413 #endif 4414 if (trim->tr_first_copybuf_win) { 4415 dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr = 4416 trim->tr_first_cbaddr; 4417 #if !defined(__amd64) 4418 dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr = 4419 trim->tr_first_kaddr; 4420 #endif 4421 } 4422 } 4423 if (trim->tr_trim_last) { 4424 trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr; 4425 trim->tr_last_cookie->dmac_size = trim->tr_last_size; 4426 if (trim->tr_last_copybuf_win) { 4427 dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr = 4428 trim->tr_last_cbaddr; 4429 #if !defined(__amd64) 4430 dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr = 4431 trim->tr_last_kaddr; 4432 #endif 4433 } 4434 } 4435 4436 /* 4437 * setup the cookie pointer to the first cookie in the window. setup 4438 * our return values, then increment the cookie since we return the 4439 * first cookie on the stack. 4440 */ 4441 hp->dmai_cookie = window->wd_first_cookie; 4442 *offp = window->wd_offset; 4443 *lenp = window->wd_size; 4444 *ccountp = window->wd_cookie_cnt; 4445 *cookiep = hp->dmai_cookie[0]; 4446 hp->dmai_cookie++; 4447 4448 #if !defined(__amd64) 4449 /* re-map copybuf if required for this window */ 4450 if (dma->dp_cb_remaping) { 4451 /* 4452 * calculate the page index into the buffer where this 4453 * window starts. 4454 */ 4455 pidx = (sinfo->si_buf_offset + window->wd_offset) >> 4456 MMU_PAGESHIFT; 4457 ASSERT(pidx < sinfo->si_max_pages); 4458 4459 /* 4460 * the first page can get unmapped if it's shared with the 4461 * previous window. Even if the rest of this window is already 4462 * mapped in, we need to still check this one. 4463 */ 4464 pmap = &dma->dp_pgmap[pidx]; 4465 if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) { 4466 if (pmap->pm_pp != NULL) { 4467 pmap->pm_mapped = B_TRUE; 4468 i86_pp_map(pmap->pm_pp, pmap->pm_kaddr); 4469 } else if (pmap->pm_vaddr != NULL) { 4470 pmap->pm_mapped = B_TRUE; 4471 i86_va_map(pmap->pm_vaddr, sinfo->si_asp, 4472 pmap->pm_kaddr); 4473 } 4474 } 4475 pidx++; 4476 4477 /* map in the rest of the pages if required */ 4478 if (window->wd_remap_copybuf) { 4479 window->wd_remap_copybuf = B_FALSE; 4480 4481 /* figure out many pages this window takes up */ 4482 poff = (sinfo->si_buf_offset + window->wd_offset) & 4483 MMU_PAGEOFFSET; 4484 pcnt = mmu_btopr(window->wd_size + poff); 4485 ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages); 4486 4487 /* map pages which require it */ 4488 for (i = 1; i < pcnt; i++) { 4489 pmap = &dma->dp_pgmap[pidx]; 4490 if (pmap->pm_uses_copybuf) { 4491 ASSERT(pmap->pm_mapped == B_FALSE); 4492 if (pmap->pm_pp != NULL) { 4493 pmap->pm_mapped = B_TRUE; 4494 i86_pp_map(pmap->pm_pp, 4495 pmap->pm_kaddr); 4496 } else if (pmap->pm_vaddr != NULL) { 4497 pmap->pm_mapped = B_TRUE; 4498 i86_va_map(pmap->pm_vaddr, 4499 sinfo->si_asp, 4500 pmap->pm_kaddr); 4501 } 4502 } 4503 pidx++; 4504 } 4505 } 4506 } 4507 #endif 4508 4509 /* if the new window uses the copy buffer, sync it for the device */ 4510 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) { 4511 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 4512 DDI_DMA_SYNC_FORDEV); 4513 } 4514 4515 return (DDI_SUCCESS); 4516 } 4517 4518 /* 4519 * rootnex_dma_win() 4520 * called from ddi_dma_getwin() 4521 */ 4522 /*ARGSUSED*/ 4523 static int 4524 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4525 uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 4526 uint_t *ccountp) 4527 { 4528 #if !defined(__xpv) 4529 if (IOMMU_USED(handle)) { 4530 return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp, 4531 cookiep, ccountp)); 4532 } 4533 #endif 4534 4535 return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp, 4536 cookiep, ccountp)); 4537 } 4538 4539 /* 4540 * ************************ 4541 * obsoleted dma routines 4542 * ************************ 4543 */ 4544 4545 /* ARGSUSED */ 4546 static int 4547 rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip, 4548 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 4549 { 4550 #if defined(__amd64) 4551 /* 4552 * this interface is not supported in 64-bit x86 kernel. See comment in 4553 * rootnex_dma_mctl() 4554 */ 4555 return (DDI_DMA_NORESOURCES); 4556 4557 #else /* 32-bit x86 kernel */ 4558 ddi_dma_handle_t *lhandlep; 4559 ddi_dma_handle_t lhandle; 4560 ddi_dma_cookie_t cookie; 4561 ddi_dma_attr_t dma_attr; 4562 ddi_dma_lim_t *dma_lim; 4563 uint_t ccnt; 4564 int e; 4565 4566 4567 /* 4568 * if the driver is just testing to see if it's possible to do the bind, 4569 * we'll use local state. Otherwise, use the handle pointer passed in. 4570 */ 4571 if (handlep == NULL) { 4572 lhandlep = &lhandle; 4573 } else { 4574 lhandlep = handlep; 4575 } 4576 4577 /* convert the limit structure to a dma_attr one */ 4578 dma_lim = dmareq->dmar_limits; 4579 dma_attr.dma_attr_version = DMA_ATTR_V0; 4580 dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo; 4581 dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi; 4582 dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer; 4583 dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max; 4584 dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max; 4585 dma_attr.dma_attr_granular = dma_lim->dlim_granular; 4586 dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen; 4587 dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize; 4588 dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes; 4589 dma_attr.dma_attr_align = MMU_PAGESIZE; 4590 dma_attr.dma_attr_flags = 0; 4591 4592 e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp, 4593 dmareq->dmar_arg, lhandlep); 4594 if (e != DDI_SUCCESS) { 4595 return (e); 4596 } 4597 4598 e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt); 4599 if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 4600 (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 4601 return (e); 4602 } 4603 4604 /* 4605 * if the driver is just testing to see if it's possible to do the bind, 4606 * free up the local state and return the result. 4607 */ 4608 if (handlep == NULL) { 4609 (void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep); 4610 (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 4611 if (e == DDI_DMA_MAPPED) { 4612 return (DDI_DMA_MAPOK); 4613 } else { 4614 return (DDI_DMA_NOMAPPING); 4615 } 4616 } 4617 4618 return (e); 4619 #endif /* defined(__amd64) */ 4620 } 4621 4622 /* 4623 * rootnex_dma_map() 4624 * called from ddi_dma_setup() 4625 */ 4626 /* ARGSUSED */ 4627 static int 4628 rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 4629 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 4630 { 4631 /* NO IOMMU in 32 bit mode */ 4632 return (rootnex_coredma_map(dip, rdip, dmareq, handlep)); 4633 } 4634 4635 /* 4636 * rootnex_dma_mctl() 4637 * 4638 */ 4639 /* ARGSUSED */ 4640 static int 4641 rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4642 enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp, 4643 uint_t cache_flags) 4644 { 4645 #if defined(__amd64) 4646 /* 4647 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a 4648 * common implementation in genunix, so they no longer have x86 4649 * specific functionality which called into dma_ctl. 4650 * 4651 * The rest of the obsoleted interfaces were never supported in the 4652 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface 4653 * was not ported to the x86 64-bit kernel do to serious x86 rootnex 4654 * implementation issues. 4655 * 4656 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and 4657 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we 4658 * reflect that now too... 4659 * 4660 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are 4661 * not going to put this functionality into the 64-bit x86 kernel now. 4662 * It wasn't ported to the 64-bit kernel for s10, no reason to change 4663 * that in a future release. 4664 */ 4665 return (DDI_FAILURE); 4666 4667 #else /* 32-bit x86 kernel */ 4668 ddi_dma_cookie_t lcookie; 4669 ddi_dma_cookie_t *cookie; 4670 rootnex_window_t *window; 4671 ddi_dma_impl_t *hp; 4672 rootnex_dma_t *dma; 4673 uint_t nwin; 4674 uint_t ccnt; 4675 size_t len; 4676 off_t off; 4677 int e; 4678 4679 4680 /* 4681 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little 4682 * hacky since were optimizing for the current interfaces and so we can 4683 * cleanup the mess in genunix. Hopefully we will remove the this 4684 * obsoleted routines someday soon. 4685 */ 4686 4687 switch (request) { 4688 4689 case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */ 4690 hp = (ddi_dma_impl_t *)handle; 4691 cookie = (ddi_dma_cookie_t *)objpp; 4692 4693 /* 4694 * convert segment to cookie. We don't distinguish between the 4695 * two :-) 4696 */ 4697 *cookie = *hp->dmai_cookie; 4698 *lenp = cookie->dmac_size; 4699 *offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF; 4700 return (DDI_SUCCESS); 4701 4702 case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */ 4703 hp = (ddi_dma_impl_t *)handle; 4704 dma = (rootnex_dma_t *)hp->dmai_private; 4705 4706 if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) { 4707 return (DDI_DMA_STALE); 4708 } 4709 4710 /* handle the case where we don't have any windows */ 4711 if (dma->dp_window == NULL) { 4712 /* 4713 * if seg == NULL, and we don't have any windows, 4714 * return the first cookie in the sgl. 4715 */ 4716 if (*lenp == NULL) { 4717 dma->dp_current_cookie = 0; 4718 hp->dmai_cookie = dma->dp_cookies; 4719 *objpp = (caddr_t)handle; 4720 return (DDI_SUCCESS); 4721 4722 /* if we have more cookies, go to the next cookie */ 4723 } else { 4724 if ((dma->dp_current_cookie + 1) >= 4725 dma->dp_sglinfo.si_sgl_size) { 4726 return (DDI_DMA_DONE); 4727 } 4728 dma->dp_current_cookie++; 4729 hp->dmai_cookie++; 4730 return (DDI_SUCCESS); 4731 } 4732 } 4733 4734 /* We have one or more windows */ 4735 window = &dma->dp_window[dma->dp_current_win]; 4736 4737 /* 4738 * if seg == NULL, return the first cookie in the current 4739 * window 4740 */ 4741 if (*lenp == NULL) { 4742 dma->dp_current_cookie = 0; 4743 hp->dmai_cookie = window->wd_first_cookie; 4744 4745 /* 4746 * go to the next cookie in the window then see if we done with 4747 * this window. 4748 */ 4749 } else { 4750 if ((dma->dp_current_cookie + 1) >= 4751 window->wd_cookie_cnt) { 4752 return (DDI_DMA_DONE); 4753 } 4754 dma->dp_current_cookie++; 4755 hp->dmai_cookie++; 4756 } 4757 *objpp = (caddr_t)handle; 4758 return (DDI_SUCCESS); 4759 4760 case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */ 4761 hp = (ddi_dma_impl_t *)handle; 4762 dma = (rootnex_dma_t *)hp->dmai_private; 4763 4764 if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) { 4765 return (DDI_DMA_STALE); 4766 } 4767 4768 /* if win == NULL, return the first window in the bind */ 4769 if (*offp == NULL) { 4770 nwin = 0; 4771 4772 /* 4773 * else, go to the next window then see if we're done with all 4774 * the windows. 4775 */ 4776 } else { 4777 nwin = dma->dp_current_win + 1; 4778 if (nwin >= hp->dmai_nwin) { 4779 return (DDI_DMA_DONE); 4780 } 4781 } 4782 4783 /* switch to the next window */ 4784 e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len, 4785 &lcookie, &ccnt); 4786 ASSERT(e == DDI_SUCCESS); 4787 if (e != DDI_SUCCESS) { 4788 return (DDI_DMA_STALE); 4789 } 4790 4791 /* reset the cookie back to the first cookie in the window */ 4792 if (dma->dp_window != NULL) { 4793 window = &dma->dp_window[dma->dp_current_win]; 4794 hp->dmai_cookie = window->wd_first_cookie; 4795 } else { 4796 hp->dmai_cookie = dma->dp_cookies; 4797 } 4798 4799 *objpp = (caddr_t)handle; 4800 return (DDI_SUCCESS); 4801 4802 case DDI_DMA_FREE: /* ddi_dma_free() */ 4803 (void) rootnex_dma_unbindhdl(dip, rdip, handle); 4804 (void) rootnex_dma_freehdl(dip, rdip, handle); 4805 if (rootnex_state->r_dvma_call_list_id) { 4806 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 4807 } 4808 return (DDI_SUCCESS); 4809 4810 case DDI_DMA_IOPB_ALLOC: /* get contiguous DMA-able memory */ 4811 case DDI_DMA_SMEM_ALLOC: /* get contiguous DMA-able memory */ 4812 /* should never get here, handled in genunix */ 4813 ASSERT(0); 4814 return (DDI_FAILURE); 4815 4816 case DDI_DMA_KVADDR: 4817 case DDI_DMA_GETERR: 4818 case DDI_DMA_COFF: 4819 return (DDI_FAILURE); 4820 } 4821 4822 return (DDI_FAILURE); 4823 #endif /* defined(__amd64) */ 4824 } 4825 4826 /* 4827 * rootnex_dma_mctl() 4828 * 4829 */ 4830 /* ARGSUSED */ 4831 static int 4832 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4833 enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp, 4834 uint_t cache_flags) 4835 { 4836 /* NO IOMMU in 32 bit mode */ 4837 return (rootnex_coredma_mctl(dip, rdip, handle, request, offp, 4838 lenp, objpp, cache_flags)); 4839 } 4840 4841 /* 4842 * ********* 4843 * FMA Code 4844 * ********* 4845 */ 4846 4847 /* 4848 * rootnex_fm_init() 4849 * FMA init busop 4850 */ 4851 /* ARGSUSED */ 4852 static int 4853 rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 4854 ddi_iblock_cookie_t *ibc) 4855 { 4856 *ibc = rootnex_state->r_err_ibc; 4857 4858 return (ddi_system_fmcap); 4859 } 4860 4861 /* 4862 * rootnex_dma_check() 4863 * Function called after a dma fault occurred to find out whether the 4864 * fault address is associated with a driver that is able to handle faults 4865 * and recover from faults. 4866 */ 4867 /* ARGSUSED */ 4868 static int 4869 rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr, 4870 const void *not_used) 4871 { 4872 rootnex_window_t *window; 4873 uint64_t start_addr; 4874 uint64_t fault_addr; 4875 ddi_dma_impl_t *hp; 4876 rootnex_dma_t *dma; 4877 uint64_t end_addr; 4878 size_t csize; 4879 int i; 4880 int j; 4881 4882 4883 /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */ 4884 hp = (ddi_dma_impl_t *)handle; 4885 ASSERT(hp); 4886 4887 dma = (rootnex_dma_t *)hp->dmai_private; 4888 4889 /* Get the address that we need to search for */ 4890 fault_addr = *(uint64_t *)addr; 4891 4892 /* 4893 * if we don't have any windows, we can just walk through all the 4894 * cookies. 4895 */ 4896 if (dma->dp_window == NULL) { 4897 /* for each cookie */ 4898 for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) { 4899 /* 4900 * if the faulted address is within the physical address 4901 * range of the cookie, return DDI_FM_NONFATAL. 4902 */ 4903 if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) && 4904 (fault_addr <= (dma->dp_cookies[i].dmac_laddress + 4905 dma->dp_cookies[i].dmac_size))) { 4906 return (DDI_FM_NONFATAL); 4907 } 4908 } 4909 4910 /* fault_addr not within this DMA handle */ 4911 return (DDI_FM_UNKNOWN); 4912 } 4913 4914 /* we have mutiple windows, walk through each window */ 4915 for (i = 0; i < hp->dmai_nwin; i++) { 4916 window = &dma->dp_window[i]; 4917 4918 /* Go through all the cookies in the window */ 4919 for (j = 0; j < window->wd_cookie_cnt; j++) { 4920 4921 start_addr = window->wd_first_cookie[j].dmac_laddress; 4922 csize = window->wd_first_cookie[j].dmac_size; 4923 4924 /* 4925 * if we are trimming the first cookie in the window, 4926 * and this is the first cookie, adjust the start 4927 * address and size of the cookie to account for the 4928 * trim. 4929 */ 4930 if (window->wd_trim.tr_trim_first && (j == 0)) { 4931 start_addr = window->wd_trim.tr_first_paddr; 4932 csize = window->wd_trim.tr_first_size; 4933 } 4934 4935 /* 4936 * if we are trimming the last cookie in the window, 4937 * and this is the last cookie, adjust the start 4938 * address and size of the cookie to account for the 4939 * trim. 4940 */ 4941 if (window->wd_trim.tr_trim_last && 4942 (j == (window->wd_cookie_cnt - 1))) { 4943 start_addr = window->wd_trim.tr_last_paddr; 4944 csize = window->wd_trim.tr_last_size; 4945 } 4946 4947 end_addr = start_addr + csize; 4948 4949 /* 4950 * if the faulted address is within the physical address 4951 * range of the cookie, return DDI_FM_NONFATAL. 4952 */ 4953 if ((fault_addr >= start_addr) && 4954 (fault_addr <= end_addr)) { 4955 return (DDI_FM_NONFATAL); 4956 } 4957 } 4958 } 4959 4960 /* fault_addr not within this DMA handle */ 4961 return (DDI_FM_UNKNOWN); 4962 } 4963