1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * x86 root nexus driver 27 */ 28 29 #include <sys/sysmacros.h> 30 #include <sys/conf.h> 31 #include <sys/autoconf.h> 32 #include <sys/sysmacros.h> 33 #include <sys/debug.h> 34 #include <sys/psw.h> 35 #include <sys/ddidmareq.h> 36 #include <sys/promif.h> 37 #include <sys/devops.h> 38 #include <sys/kmem.h> 39 #include <sys/cmn_err.h> 40 #include <vm/seg.h> 41 #include <vm/seg_kmem.h> 42 #include <vm/seg_dev.h> 43 #include <sys/vmem.h> 44 #include <sys/mman.h> 45 #include <vm/hat.h> 46 #include <vm/as.h> 47 #include <vm/page.h> 48 #include <sys/avintr.h> 49 #include <sys/errno.h> 50 #include <sys/modctl.h> 51 #include <sys/ddi_impldefs.h> 52 #include <sys/sunddi.h> 53 #include <sys/sunndi.h> 54 #include <sys/mach_intr.h> 55 #include <sys/psm.h> 56 #include <sys/ontrap.h> 57 #include <sys/atomic.h> 58 #include <sys/sdt.h> 59 #include <sys/rootnex.h> 60 #include <vm/hat_i86.h> 61 #include <sys/ddifm.h> 62 #include <sys/ddi_isa.h> 63 #include <sys/apic.h> 64 65 #ifdef __xpv 66 #include <sys/bootinfo.h> 67 #include <sys/hypervisor.h> 68 #include <sys/bootconf.h> 69 #include <vm/kboot_mmu.h> 70 #endif 71 72 #if defined(__amd64) && !defined(__xpv) 73 #include <sys/immu.h> 74 #endif 75 76 77 /* 78 * enable/disable extra checking of function parameters. Useful for debugging 79 * drivers. 80 */ 81 #ifdef DEBUG 82 int rootnex_alloc_check_parms = 1; 83 int rootnex_bind_check_parms = 1; 84 int rootnex_bind_check_inuse = 1; 85 int rootnex_unbind_verify_buffer = 0; 86 int rootnex_sync_check_parms = 1; 87 #else 88 int rootnex_alloc_check_parms = 0; 89 int rootnex_bind_check_parms = 0; 90 int rootnex_bind_check_inuse = 0; 91 int rootnex_unbind_verify_buffer = 0; 92 int rootnex_sync_check_parms = 0; 93 #endif 94 95 boolean_t rootnex_dmar_not_setup; 96 97 /* Master Abort and Target Abort panic flag */ 98 int rootnex_fm_ma_ta_panic_flag = 0; 99 100 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */ 101 int rootnex_bind_fail = 1; 102 int rootnex_bind_warn = 1; 103 uint8_t *rootnex_warn_list; 104 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */ 105 #define ROOTNEX_BIND_WARNING (0x1 << 0) 106 107 /* 108 * revert back to old broken behavior of always sync'ing entire copy buffer. 109 * This is useful if be have a buggy driver which doesn't correctly pass in 110 * the offset and size into ddi_dma_sync(). 111 */ 112 int rootnex_sync_ignore_params = 0; 113 114 /* 115 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1 116 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a 117 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit 118 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65 119 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages 120 * (< 8K). We will still need to allocate the copy buffer during bind though 121 * (if we need one). These can only be modified in /etc/system before rootnex 122 * attach. 123 */ 124 #if defined(__amd64) 125 int rootnex_prealloc_cookies = 65; 126 int rootnex_prealloc_windows = 4; 127 int rootnex_prealloc_copybuf = 2; 128 #else 129 int rootnex_prealloc_cookies = 33; 130 int rootnex_prealloc_windows = 4; 131 int rootnex_prealloc_copybuf = 2; 132 #endif 133 134 /* driver global state */ 135 static rootnex_state_t *rootnex_state; 136 137 /* shortcut to rootnex counters */ 138 static uint64_t *rootnex_cnt; 139 140 /* 141 * XXX - does x86 even need these or are they left over from the SPARC days? 142 */ 143 /* statically defined integer/boolean properties for the root node */ 144 static rootnex_intprop_t rootnex_intprp[] = { 145 { "PAGESIZE", PAGESIZE }, 146 { "MMU_PAGESIZE", MMU_PAGESIZE }, 147 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET }, 148 { DDI_RELATIVE_ADDRESSING, 1 }, 149 }; 150 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t)) 151 152 #ifdef __xpv 153 typedef maddr_t rootnex_addr_t; 154 #define ROOTNEX_PADDR_TO_RBASE(xinfo, pa) \ 155 (DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa)) 156 #else 157 typedef paddr_t rootnex_addr_t; 158 #endif 159 160 #if !defined(__xpv) 161 char _depends_on[] = "misc/iommulib misc/acpica"; 162 #endif 163 164 static struct cb_ops rootnex_cb_ops = { 165 nodev, /* open */ 166 nodev, /* close */ 167 nodev, /* strategy */ 168 nodev, /* print */ 169 nodev, /* dump */ 170 nodev, /* read */ 171 nodev, /* write */ 172 nodev, /* ioctl */ 173 nodev, /* devmap */ 174 nodev, /* mmap */ 175 nodev, /* segmap */ 176 nochpoll, /* chpoll */ 177 ddi_prop_op, /* cb_prop_op */ 178 NULL, /* struct streamtab */ 179 D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */ 180 CB_REV, /* Rev */ 181 nodev, /* cb_aread */ 182 nodev /* cb_awrite */ 183 }; 184 185 static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 186 off_t offset, off_t len, caddr_t *vaddrp); 187 static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, 188 struct hat *hat, struct seg *seg, caddr_t addr, 189 struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock); 190 static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 191 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep); 192 static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 193 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 194 ddi_dma_handle_t *handlep); 195 static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 196 ddi_dma_handle_t handle); 197 static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 198 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 199 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 200 static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 201 ddi_dma_handle_t handle); 202 static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, 203 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 204 static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, 205 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 206 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 207 static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 208 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 209 off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags); 210 static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, 211 ddi_ctl_enum_t ctlop, void *arg, void *result); 212 static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 213 ddi_iblock_cookie_t *ibc); 214 static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, 215 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result); 216 static int rootnex_alloc_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *, 217 void *); 218 static int rootnex_free_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *); 219 220 static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 221 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 222 ddi_dma_handle_t *handlep); 223 static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 224 ddi_dma_handle_t handle); 225 static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 226 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 227 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 228 static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 229 ddi_dma_handle_t handle); 230 #if defined(__amd64) && !defined(__xpv) 231 static void rootnex_coredma_reset_cookies(dev_info_t *dip, 232 ddi_dma_handle_t handle); 233 static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 234 ddi_dma_cookie_t **cookiepp, uint_t *ccountp); 235 static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 236 ddi_dma_cookie_t *cookiep, uint_t ccount); 237 static int rootnex_coredma_clear_cookies(dev_info_t *dip, 238 ddi_dma_handle_t handle); 239 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle); 240 #endif 241 static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, 242 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 243 static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, 244 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 245 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 246 247 static struct bus_ops rootnex_bus_ops = { 248 BUSO_REV, 249 rootnex_map, 250 NULL, 251 NULL, 252 NULL, 253 rootnex_map_fault, 254 rootnex_dma_map, 255 rootnex_dma_allochdl, 256 rootnex_dma_freehdl, 257 rootnex_dma_bindhdl, 258 rootnex_dma_unbindhdl, 259 rootnex_dma_sync, 260 rootnex_dma_win, 261 rootnex_dma_mctl, 262 rootnex_ctlops, 263 ddi_bus_prop_op, 264 i_ddi_rootnex_get_eventcookie, 265 i_ddi_rootnex_add_eventcall, 266 i_ddi_rootnex_remove_eventcall, 267 i_ddi_rootnex_post_event, 268 0, /* bus_intr_ctl */ 269 0, /* bus_config */ 270 0, /* bus_unconfig */ 271 rootnex_fm_init, /* bus_fm_init */ 272 NULL, /* bus_fm_fini */ 273 NULL, /* bus_fm_access_enter */ 274 NULL, /* bus_fm_access_exit */ 275 NULL, /* bus_powr */ 276 rootnex_intr_ops /* bus_intr_op */ 277 }; 278 279 static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 280 static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 281 static int rootnex_quiesce(dev_info_t *dip); 282 283 static struct dev_ops rootnex_ops = { 284 DEVO_REV, 285 0, 286 ddi_no_info, 287 nulldev, 288 nulldev, 289 rootnex_attach, 290 rootnex_detach, 291 nulldev, 292 &rootnex_cb_ops, 293 &rootnex_bus_ops, 294 NULL, 295 rootnex_quiesce, /* quiesce */ 296 }; 297 298 static struct modldrv rootnex_modldrv = { 299 &mod_driverops, 300 "i86pc root nexus", 301 &rootnex_ops 302 }; 303 304 static struct modlinkage rootnex_modlinkage = { 305 MODREV_1, 306 (void *)&rootnex_modldrv, 307 NULL 308 }; 309 310 #if defined(__amd64) && !defined(__xpv) 311 static iommulib_nexops_t iommulib_nexops = { 312 IOMMU_NEXOPS_VERSION, 313 "Rootnex IOMMU ops Vers 1.1", 314 NULL, 315 rootnex_coredma_allochdl, 316 rootnex_coredma_freehdl, 317 rootnex_coredma_bindhdl, 318 rootnex_coredma_unbindhdl, 319 rootnex_coredma_reset_cookies, 320 rootnex_coredma_get_cookies, 321 rootnex_coredma_set_cookies, 322 rootnex_coredma_clear_cookies, 323 rootnex_coredma_get_sleep_flags, 324 rootnex_coredma_sync, 325 rootnex_coredma_win, 326 rootnex_dma_map, 327 rootnex_dma_mctl 328 }; 329 #endif 330 331 /* 332 * extern hacks 333 */ 334 extern struct seg_ops segdev_ops; 335 extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */ 336 #ifdef DDI_MAP_DEBUG 337 extern int ddi_map_debug_flag; 338 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf 339 #endif 340 extern void i86_pp_map(page_t *pp, caddr_t kaddr); 341 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr); 342 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 343 psm_intr_op_t, int *); 344 extern int impl_ddi_sunbus_initchild(dev_info_t *dip); 345 extern void impl_ddi_sunbus_removechild(dev_info_t *dip); 346 347 /* 348 * Use device arena to use for device control register mappings. 349 * Various kernel memory walkers (debugger, dtrace) need to know 350 * to avoid this address range to prevent undesired device activity. 351 */ 352 extern void *device_arena_alloc(size_t size, int vm_flag); 353 extern void device_arena_free(void * vaddr, size_t size); 354 355 356 /* 357 * Internal functions 358 */ 359 static int rootnex_dma_init(); 360 static void rootnex_add_props(dev_info_t *); 361 static int rootnex_ctl_reportdev(dev_info_t *dip); 362 static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum); 363 static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 364 static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 365 static int rootnex_map_handle(ddi_map_req_t *mp); 366 static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp); 367 static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize); 368 static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, 369 ddi_dma_attr_t *attr); 370 static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 371 rootnex_sglinfo_t *sglinfo); 372 static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 373 rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag); 374 static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 375 rootnex_dma_t *dma, ddi_dma_attr_t *attr); 376 static void rootnex_teardown_copybuf(rootnex_dma_t *dma); 377 static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 378 ddi_dma_attr_t *attr, int kmflag); 379 static void rootnex_teardown_windows(rootnex_dma_t *dma); 380 static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 381 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset); 382 static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, 383 rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset, 384 size_t *copybuf_used, page_t **cur_pp); 385 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, 386 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, 387 ddi_dma_attr_t *attr, off_t cur_offset); 388 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, 389 rootnex_dma_t *dma, rootnex_window_t **windowp, 390 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used); 391 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, 392 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie); 393 static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 394 off_t offset, size_t size, uint_t cache_flags); 395 static int rootnex_verify_buffer(rootnex_dma_t *dma); 396 static int rootnex_dma_check(dev_info_t *dip, const void *handle, 397 const void *comp_addr, const void *not_used); 398 static boolean_t rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, 399 rootnex_sglinfo_t *sglinfo); 400 401 /* 402 * _init() 403 * 404 */ 405 int 406 _init(void) 407 { 408 409 rootnex_state = NULL; 410 return (mod_install(&rootnex_modlinkage)); 411 } 412 413 414 /* 415 * _info() 416 * 417 */ 418 int 419 _info(struct modinfo *modinfop) 420 { 421 return (mod_info(&rootnex_modlinkage, modinfop)); 422 } 423 424 425 /* 426 * _fini() 427 * 428 */ 429 int 430 _fini(void) 431 { 432 return (EBUSY); 433 } 434 435 436 /* 437 * rootnex_attach() 438 * 439 */ 440 static int 441 rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 442 { 443 int fmcap; 444 int e; 445 446 switch (cmd) { 447 case DDI_ATTACH: 448 break; 449 case DDI_RESUME: 450 #if defined(__amd64) && !defined(__xpv) 451 return (immu_unquiesce()); 452 #else 453 return (DDI_SUCCESS); 454 #endif 455 default: 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * We should only have one instance of rootnex. Save it away since we 461 * don't have an easy way to get it back later. 462 */ 463 ASSERT(rootnex_state == NULL); 464 rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP); 465 466 rootnex_state->r_dip = dip; 467 rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15); 468 rootnex_state->r_reserved_msg_printed = B_FALSE; 469 rootnex_cnt = &rootnex_state->r_counters[0]; 470 471 /* 472 * Set minimum fm capability level for i86pc platforms and then 473 * initialize error handling. Since we're the rootnex, we don't 474 * care what's returned in the fmcap field. 475 */ 476 ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | 477 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; 478 fmcap = ddi_system_fmcap; 479 ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc); 480 481 /* initialize DMA related state */ 482 e = rootnex_dma_init(); 483 if (e != DDI_SUCCESS) { 484 kmem_free(rootnex_state, sizeof (rootnex_state_t)); 485 return (DDI_FAILURE); 486 } 487 488 /* Add static root node properties */ 489 rootnex_add_props(dip); 490 491 /* since we can't call ddi_report_dev() */ 492 cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip)); 493 494 /* Initialize rootnex event handle */ 495 i_ddi_rootnex_init_events(dip); 496 497 #if defined(__amd64) && !defined(__xpv) 498 e = iommulib_nexus_register(dip, &iommulib_nexops, 499 &rootnex_state->r_iommulib_handle); 500 501 ASSERT(e == DDI_SUCCESS); 502 #endif 503 504 return (DDI_SUCCESS); 505 } 506 507 508 /* 509 * rootnex_detach() 510 * 511 */ 512 /*ARGSUSED*/ 513 static int 514 rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 515 { 516 switch (cmd) { 517 case DDI_SUSPEND: 518 #if defined(__amd64) && !defined(__xpv) 519 return (immu_quiesce()); 520 #else 521 return (DDI_SUCCESS); 522 #endif 523 default: 524 return (DDI_FAILURE); 525 } 526 /*NOTREACHED*/ 527 528 } 529 530 531 /* 532 * rootnex_dma_init() 533 * 534 */ 535 /*ARGSUSED*/ 536 static int 537 rootnex_dma_init() 538 { 539 size_t bufsize; 540 541 542 /* 543 * size of our cookie/window/copybuf state needed in dma bind that we 544 * pre-alloc in dma_alloc_handle 545 */ 546 rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies; 547 rootnex_state->r_prealloc_size = 548 (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) + 549 (rootnex_prealloc_windows * sizeof (rootnex_window_t)) + 550 (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t)); 551 552 /* 553 * setup DDI DMA handle kmem cache, align each handle on 64 bytes, 554 * allocate 16 extra bytes for struct pointer alignment 555 * (p->dmai_private & dma->dp_prealloc_buffer) 556 */ 557 bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) + 558 rootnex_state->r_prealloc_size + 0x10; 559 rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl", 560 bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0); 561 if (rootnex_state->r_dmahdl_cache == NULL) { 562 return (DDI_FAILURE); 563 } 564 565 /* 566 * allocate array to track which major numbers we have printed warnings 567 * for. 568 */ 569 rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list), 570 KM_SLEEP); 571 572 return (DDI_SUCCESS); 573 } 574 575 576 /* 577 * rootnex_add_props() 578 * 579 */ 580 static void 581 rootnex_add_props(dev_info_t *dip) 582 { 583 rootnex_intprop_t *rpp; 584 int i; 585 586 /* Add static integer/boolean properties to the root node */ 587 rpp = rootnex_intprp; 588 for (i = 0; i < NROOT_INTPROPS; i++) { 589 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, 590 rpp[i].prop_name, rpp[i].prop_value); 591 } 592 } 593 594 595 596 /* 597 * ************************* 598 * ctlops related routines 599 * ************************* 600 */ 601 602 /* 603 * rootnex_ctlops() 604 * 605 */ 606 /*ARGSUSED*/ 607 static int 608 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 609 void *arg, void *result) 610 { 611 int n, *ptr; 612 struct ddi_parent_private_data *pdp; 613 614 switch (ctlop) { 615 case DDI_CTLOPS_DMAPMAPC: 616 /* 617 * Return 'partial' to indicate that dma mapping 618 * has to be done in the main MMU. 619 */ 620 return (DDI_DMA_PARTIAL); 621 622 case DDI_CTLOPS_BTOP: 623 /* 624 * Convert byte count input to physical page units. 625 * (byte counts that are not a page-size multiple 626 * are rounded down) 627 */ 628 *(ulong_t *)result = btop(*(ulong_t *)arg); 629 return (DDI_SUCCESS); 630 631 case DDI_CTLOPS_PTOB: 632 /* 633 * Convert size in physical pages to bytes 634 */ 635 *(ulong_t *)result = ptob(*(ulong_t *)arg); 636 return (DDI_SUCCESS); 637 638 case DDI_CTLOPS_BTOPR: 639 /* 640 * Convert byte count input to physical page units 641 * (byte counts that are not a page-size multiple 642 * are rounded up) 643 */ 644 *(ulong_t *)result = btopr(*(ulong_t *)arg); 645 return (DDI_SUCCESS); 646 647 case DDI_CTLOPS_INITCHILD: 648 return (impl_ddi_sunbus_initchild(arg)); 649 650 case DDI_CTLOPS_UNINITCHILD: 651 impl_ddi_sunbus_removechild(arg); 652 return (DDI_SUCCESS); 653 654 case DDI_CTLOPS_REPORTDEV: 655 return (rootnex_ctl_reportdev(rdip)); 656 657 case DDI_CTLOPS_IOMIN: 658 /* 659 * Nothing to do here but reflect back.. 660 */ 661 return (DDI_SUCCESS); 662 663 case DDI_CTLOPS_REGSIZE: 664 case DDI_CTLOPS_NREGS: 665 break; 666 667 case DDI_CTLOPS_SIDDEV: 668 if (ndi_dev_is_prom_node(rdip)) 669 return (DDI_SUCCESS); 670 if (ndi_dev_is_persistent_node(rdip)) 671 return (DDI_SUCCESS); 672 return (DDI_FAILURE); 673 674 case DDI_CTLOPS_POWER: 675 return ((*pm_platform_power)((power_req_t *)arg)); 676 677 case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */ 678 case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */ 679 case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */ 680 case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */ 681 case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */ 682 case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */ 683 if (!rootnex_state->r_reserved_msg_printed) { 684 rootnex_state->r_reserved_msg_printed = B_TRUE; 685 cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for " 686 "1 or more reserved/obsolete operations."); 687 } 688 return (DDI_FAILURE); 689 690 default: 691 return (DDI_FAILURE); 692 } 693 /* 694 * The rest are for "hardware" properties 695 */ 696 if ((pdp = ddi_get_parent_data(rdip)) == NULL) 697 return (DDI_FAILURE); 698 699 if (ctlop == DDI_CTLOPS_NREGS) { 700 ptr = (int *)result; 701 *ptr = pdp->par_nreg; 702 } else { 703 off_t *size = (off_t *)result; 704 705 ptr = (int *)arg; 706 n = *ptr; 707 if (n >= pdp->par_nreg) { 708 return (DDI_FAILURE); 709 } 710 *size = (off_t)pdp->par_reg[n].regspec_size; 711 } 712 return (DDI_SUCCESS); 713 } 714 715 716 /* 717 * rootnex_ctl_reportdev() 718 * 719 */ 720 static int 721 rootnex_ctl_reportdev(dev_info_t *dev) 722 { 723 int i, n, len, f_len = 0; 724 char *buf; 725 726 buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP); 727 f_len += snprintf(buf, REPORTDEV_BUFSIZE, 728 "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev)); 729 len = strlen(buf); 730 731 for (i = 0; i < sparc_pd_getnreg(dev); i++) { 732 733 struct regspec *rp = sparc_pd_getreg(dev, i); 734 735 if (i == 0) 736 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 737 ": "); 738 else 739 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 740 " and "); 741 len = strlen(buf); 742 743 switch (rp->regspec_bustype) { 744 745 case BTEISA: 746 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 747 "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr); 748 break; 749 750 case BTISA: 751 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 752 "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr); 753 break; 754 755 default: 756 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 757 "space %x offset %x", 758 rp->regspec_bustype, rp->regspec_addr); 759 break; 760 } 761 len = strlen(buf); 762 } 763 for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) { 764 int pri; 765 766 if (i != 0) { 767 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 768 ","); 769 len = strlen(buf); 770 } 771 pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri); 772 f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 773 " sparc ipl %d", pri); 774 len = strlen(buf); 775 } 776 #ifdef DEBUG 777 if (f_len + 1 >= REPORTDEV_BUFSIZE) { 778 cmn_err(CE_NOTE, "next message is truncated: " 779 "printed length 1024, real length %d", f_len); 780 } 781 #endif /* DEBUG */ 782 cmn_err(CE_CONT, "?%s\n", buf); 783 kmem_free(buf, REPORTDEV_BUFSIZE); 784 return (DDI_SUCCESS); 785 } 786 787 788 /* 789 * ****************** 790 * map related code 791 * ****************** 792 */ 793 794 /* 795 * rootnex_map() 796 * 797 */ 798 static int 799 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset, 800 off_t len, caddr_t *vaddrp) 801 { 802 struct regspec *rp, tmp_reg; 803 ddi_map_req_t mr = *mp; /* Get private copy of request */ 804 int error; 805 806 mp = &mr; 807 808 switch (mp->map_op) { 809 case DDI_MO_MAP_LOCKED: 810 case DDI_MO_UNMAP: 811 case DDI_MO_MAP_HANDLE: 812 break; 813 default: 814 #ifdef DDI_MAP_DEBUG 815 cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.", 816 mp->map_op); 817 #endif /* DDI_MAP_DEBUG */ 818 return (DDI_ME_UNIMPLEMENTED); 819 } 820 821 if (mp->map_flags & DDI_MF_USER_MAPPING) { 822 #ifdef DDI_MAP_DEBUG 823 cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user."); 824 #endif /* DDI_MAP_DEBUG */ 825 return (DDI_ME_UNIMPLEMENTED); 826 } 827 828 /* 829 * First, if given an rnumber, convert it to a regspec... 830 * (Presumably, this is on behalf of a child of the root node?) 831 */ 832 833 if (mp->map_type == DDI_MT_RNUMBER) { 834 835 int rnumber = mp->map_obj.rnumber; 836 #ifdef DDI_MAP_DEBUG 837 static char *out_of_range = 838 "rootnex_map: Out of range rnumber <%d>, device <%s>"; 839 #endif /* DDI_MAP_DEBUG */ 840 841 rp = i_ddi_rnumber_to_regspec(rdip, rnumber); 842 if (rp == NULL) { 843 #ifdef DDI_MAP_DEBUG 844 cmn_err(CE_WARN, out_of_range, rnumber, 845 ddi_get_name(rdip)); 846 #endif /* DDI_MAP_DEBUG */ 847 return (DDI_ME_RNUMBER_RANGE); 848 } 849 850 /* 851 * Convert the given ddi_map_req_t from rnumber to regspec... 852 */ 853 854 mp->map_type = DDI_MT_REGSPEC; 855 mp->map_obj.rp = rp; 856 } 857 858 /* 859 * Adjust offset and length correspnding to called values... 860 * XXX: A non-zero length means override the one in the regspec 861 * XXX: (regardless of what's in the parent's range?) 862 */ 863 864 tmp_reg = *(mp->map_obj.rp); /* Preserve underlying data */ 865 rp = mp->map_obj.rp = &tmp_reg; /* Use tmp_reg in request */ 866 867 #ifdef DDI_MAP_DEBUG 868 cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d " 869 "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 870 rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset, 871 len, mp->map_handlep); 872 #endif /* DDI_MAP_DEBUG */ 873 874 /* 875 * I/O or memory mapping: 876 * 877 * <bustype=0, addr=x, len=x>: memory 878 * <bustype=1, addr=x, len=x>: i/o 879 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 880 */ 881 882 if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 883 cmn_err(CE_WARN, "<%s,%s> invalid register spec" 884 " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip), 885 ddi_get_name(rdip), rp->regspec_bustype, 886 rp->regspec_addr, rp->regspec_size); 887 return (DDI_ME_INVAL); 888 } 889 890 if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) { 891 /* 892 * compatibility i/o mapping 893 */ 894 rp->regspec_bustype += (uint_t)offset; 895 } else { 896 /* 897 * Normal memory or i/o mapping 898 */ 899 rp->regspec_addr += (uint_t)offset; 900 } 901 902 if (len != 0) 903 rp->regspec_size = (uint_t)len; 904 905 #ifdef DDI_MAP_DEBUG 906 cmn_err(CE_CONT, " <%s,%s> <0x%x, 0x%x, 0x%d> offset %d " 907 "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 908 rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 909 offset, len, mp->map_handlep); 910 #endif /* DDI_MAP_DEBUG */ 911 912 /* 913 * Apply any parent ranges at this level, if applicable. 914 * (This is where nexus specific regspec translation takes place. 915 * Use of this function is implicit agreement that translation is 916 * provided via ddi_apply_range.) 917 */ 918 919 #ifdef DDI_MAP_DEBUG 920 ddi_map_debug("applying range of parent <%s> to child <%s>...\n", 921 ddi_get_name(dip), ddi_get_name(rdip)); 922 #endif /* DDI_MAP_DEBUG */ 923 924 if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0) 925 return (error); 926 927 switch (mp->map_op) { 928 case DDI_MO_MAP_LOCKED: 929 930 /* 931 * Set up the locked down kernel mapping to the regspec... 932 */ 933 934 return (rootnex_map_regspec(mp, vaddrp)); 935 936 case DDI_MO_UNMAP: 937 938 /* 939 * Release mapping... 940 */ 941 942 return (rootnex_unmap_regspec(mp, vaddrp)); 943 944 case DDI_MO_MAP_HANDLE: 945 946 return (rootnex_map_handle(mp)); 947 948 default: 949 return (DDI_ME_UNIMPLEMENTED); 950 } 951 } 952 953 954 /* 955 * rootnex_map_fault() 956 * 957 * fault in mappings for requestors 958 */ 959 /*ARGSUSED*/ 960 static int 961 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat, 962 struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, 963 uint_t lock) 964 { 965 966 #ifdef DDI_MAP_DEBUG 967 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn); 968 ddi_map_debug(" Seg <%s>\n", 969 seg->s_ops == &segdev_ops ? "segdev" : 970 seg == &kvseg ? "segkmem" : "NONE!"); 971 #endif /* DDI_MAP_DEBUG */ 972 973 /* 974 * This is all terribly broken, but it is a start 975 * 976 * XXX Note that this test means that segdev_ops 977 * must be exported from seg_dev.c. 978 * XXX What about devices with their own segment drivers? 979 */ 980 if (seg->s_ops == &segdev_ops) { 981 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 982 983 if (hat == NULL) { 984 /* 985 * This is one plausible interpretation of 986 * a null hat i.e. use the first hat on the 987 * address space hat list which by convention is 988 * the hat of the system MMU. At alternative 989 * would be to panic .. this might well be better .. 990 */ 991 ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); 992 hat = seg->s_as->a_hat; 993 cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); 994 } 995 hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr, 996 (lock ? HAT_LOAD_LOCK : HAT_LOAD)); 997 } else if (seg == &kvseg && dp == NULL) { 998 hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot, 999 HAT_LOAD_LOCK); 1000 } else 1001 return (DDI_FAILURE); 1002 return (DDI_SUCCESS); 1003 } 1004 1005 1006 /* 1007 * rootnex_map_regspec() 1008 * we don't support mapping of I/O cards above 4Gb 1009 */ 1010 static int 1011 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 1012 { 1013 rootnex_addr_t rbase; 1014 void *cvaddr; 1015 uint_t npages, pgoffset; 1016 struct regspec *rp; 1017 ddi_acc_hdl_t *hp; 1018 ddi_acc_impl_t *ap; 1019 uint_t hat_acc_flags; 1020 paddr_t pbase; 1021 1022 rp = mp->map_obj.rp; 1023 hp = mp->map_handlep; 1024 1025 #ifdef DDI_MAP_DEBUG 1026 ddi_map_debug( 1027 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n", 1028 rp->regspec_bustype, rp->regspec_addr, 1029 rp->regspec_size, mp->map_handlep); 1030 #endif /* DDI_MAP_DEBUG */ 1031 1032 /* 1033 * I/O or memory mapping 1034 * 1035 * <bustype=0, addr=x, len=x>: memory 1036 * <bustype=1, addr=x, len=x>: i/o 1037 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1038 */ 1039 1040 if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 1041 cmn_err(CE_WARN, "rootnex: invalid register spec" 1042 " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype, 1043 rp->regspec_addr, rp->regspec_size); 1044 return (DDI_FAILURE); 1045 } 1046 1047 if (rp->regspec_bustype != 0) { 1048 /* 1049 * I/O space - needs a handle. 1050 */ 1051 if (hp == NULL) { 1052 return (DDI_FAILURE); 1053 } 1054 ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1055 ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE; 1056 impl_acc_hdl_init(hp); 1057 1058 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1059 #ifdef DDI_MAP_DEBUG 1060 ddi_map_debug("rootnex_map_regspec: mmap() " 1061 "to I/O space is not supported.\n"); 1062 #endif /* DDI_MAP_DEBUG */ 1063 return (DDI_ME_INVAL); 1064 } else { 1065 /* 1066 * 1275-compliant vs. compatibility i/o mapping 1067 */ 1068 *vaddrp = 1069 (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ? 1070 ((caddr_t)(uintptr_t)rp->regspec_bustype) : 1071 ((caddr_t)(uintptr_t)rp->regspec_addr); 1072 #ifdef __xpv 1073 if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1074 hp->ah_pfn = xen_assign_pfn( 1075 mmu_btop((ulong_t)rp->regspec_addr & 1076 MMU_PAGEMASK)); 1077 } else { 1078 hp->ah_pfn = mmu_btop( 1079 (ulong_t)rp->regspec_addr & MMU_PAGEMASK); 1080 } 1081 #else 1082 hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr & 1083 MMU_PAGEMASK); 1084 #endif 1085 hp->ah_pnum = mmu_btopr(rp->regspec_size + 1086 (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET); 1087 } 1088 1089 #ifdef DDI_MAP_DEBUG 1090 ddi_map_debug( 1091 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n", 1092 rp->regspec_size, *vaddrp); 1093 #endif /* DDI_MAP_DEBUG */ 1094 return (DDI_SUCCESS); 1095 } 1096 1097 /* 1098 * Memory space 1099 */ 1100 1101 if (hp != NULL) { 1102 /* 1103 * hat layer ignores 1104 * hp->ah_acc.devacc_attr_endian_flags. 1105 */ 1106 switch (hp->ah_acc.devacc_attr_dataorder) { 1107 case DDI_STRICTORDER_ACC: 1108 hat_acc_flags = HAT_STRICTORDER; 1109 break; 1110 case DDI_UNORDERED_OK_ACC: 1111 hat_acc_flags = HAT_UNORDERED_OK; 1112 break; 1113 case DDI_MERGING_OK_ACC: 1114 hat_acc_flags = HAT_MERGING_OK; 1115 break; 1116 case DDI_LOADCACHING_OK_ACC: 1117 hat_acc_flags = HAT_LOADCACHING_OK; 1118 break; 1119 case DDI_STORECACHING_OK_ACC: 1120 hat_acc_flags = HAT_STORECACHING_OK; 1121 break; 1122 } 1123 ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1124 ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR; 1125 impl_acc_hdl_init(hp); 1126 hp->ah_hat_flags = hat_acc_flags; 1127 } else { 1128 hat_acc_flags = HAT_STRICTORDER; 1129 } 1130 1131 rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK); 1132 #ifdef __xpv 1133 /* 1134 * If we're dom0, we're using a real device so we need to translate 1135 * the MA to a PA. 1136 */ 1137 if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1138 pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))); 1139 } else { 1140 pbase = rbase; 1141 } 1142 #else 1143 pbase = rbase; 1144 #endif 1145 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 1146 1147 if (rp->regspec_size == 0) { 1148 #ifdef DDI_MAP_DEBUG 1149 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n"); 1150 #endif /* DDI_MAP_DEBUG */ 1151 return (DDI_ME_INVAL); 1152 } 1153 1154 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1155 /* extra cast to make gcc happy */ 1156 *vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase)); 1157 } else { 1158 npages = mmu_btopr(rp->regspec_size + pgoffset); 1159 1160 #ifdef DDI_MAP_DEBUG 1161 ddi_map_debug("rootnex_map_regspec: Mapping %d pages " 1162 "physical %llx", npages, pbase); 1163 #endif /* DDI_MAP_DEBUG */ 1164 1165 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); 1166 if (cvaddr == NULL) 1167 return (DDI_ME_NORESOURCES); 1168 1169 /* 1170 * Now map in the pages we've allocated... 1171 */ 1172 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), 1173 mmu_btop(pbase), mp->map_prot | hat_acc_flags, 1174 HAT_LOAD_LOCK); 1175 *vaddrp = (caddr_t)cvaddr + pgoffset; 1176 1177 /* save away pfn and npages for FMA */ 1178 hp = mp->map_handlep; 1179 if (hp) { 1180 hp->ah_pfn = mmu_btop(pbase); 1181 hp->ah_pnum = npages; 1182 } 1183 } 1184 1185 #ifdef DDI_MAP_DEBUG 1186 ddi_map_debug("at virtual 0x%x\n", *vaddrp); 1187 #endif /* DDI_MAP_DEBUG */ 1188 return (DDI_SUCCESS); 1189 } 1190 1191 1192 /* 1193 * rootnex_unmap_regspec() 1194 * 1195 */ 1196 static int 1197 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 1198 { 1199 caddr_t addr = (caddr_t)*vaddrp; 1200 uint_t npages, pgoffset; 1201 struct regspec *rp; 1202 1203 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) 1204 return (0); 1205 1206 rp = mp->map_obj.rp; 1207 1208 if (rp->regspec_size == 0) { 1209 #ifdef DDI_MAP_DEBUG 1210 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n"); 1211 #endif /* DDI_MAP_DEBUG */ 1212 return (DDI_ME_INVAL); 1213 } 1214 1215 /* 1216 * I/O or memory mapping: 1217 * 1218 * <bustype=0, addr=x, len=x>: memory 1219 * <bustype=1, addr=x, len=x>: i/o 1220 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1221 */ 1222 if (rp->regspec_bustype != 0) { 1223 /* 1224 * This is I/O space, which requires no particular 1225 * processing on unmap since it isn't mapped in the 1226 * first place. 1227 */ 1228 return (DDI_SUCCESS); 1229 } 1230 1231 /* 1232 * Memory space 1233 */ 1234 pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET; 1235 npages = mmu_btopr(rp->regspec_size + pgoffset); 1236 hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK); 1237 device_arena_free(addr - pgoffset, ptob(npages)); 1238 1239 /* 1240 * Destroy the pointer - the mapping has logically gone 1241 */ 1242 *vaddrp = NULL; 1243 1244 return (DDI_SUCCESS); 1245 } 1246 1247 1248 /* 1249 * rootnex_map_handle() 1250 * 1251 */ 1252 static int 1253 rootnex_map_handle(ddi_map_req_t *mp) 1254 { 1255 rootnex_addr_t rbase; 1256 ddi_acc_hdl_t *hp; 1257 uint_t pgoffset; 1258 struct regspec *rp; 1259 paddr_t pbase; 1260 1261 rp = mp->map_obj.rp; 1262 1263 #ifdef DDI_MAP_DEBUG 1264 ddi_map_debug( 1265 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n", 1266 rp->regspec_bustype, rp->regspec_addr, 1267 rp->regspec_size, mp->map_handlep); 1268 #endif /* DDI_MAP_DEBUG */ 1269 1270 /* 1271 * I/O or memory mapping: 1272 * 1273 * <bustype=0, addr=x, len=x>: memory 1274 * <bustype=1, addr=x, len=x>: i/o 1275 * <bustype>1, addr=0, len=x>: x86-compatibility i/o 1276 */ 1277 if (rp->regspec_bustype != 0) { 1278 /* 1279 * This refers to I/O space, and we don't support "mapping" 1280 * I/O space to a user. 1281 */ 1282 return (DDI_FAILURE); 1283 } 1284 1285 /* 1286 * Set up the hat_flags for the mapping. 1287 */ 1288 hp = mp->map_handlep; 1289 1290 switch (hp->ah_acc.devacc_attr_endian_flags) { 1291 case DDI_NEVERSWAP_ACC: 1292 hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER; 1293 break; 1294 case DDI_STRUCTURE_LE_ACC: 1295 hp->ah_hat_flags = HAT_STRUCTURE_LE; 1296 break; 1297 case DDI_STRUCTURE_BE_ACC: 1298 return (DDI_FAILURE); 1299 default: 1300 return (DDI_REGS_ACC_CONFLICT); 1301 } 1302 1303 switch (hp->ah_acc.devacc_attr_dataorder) { 1304 case DDI_STRICTORDER_ACC: 1305 break; 1306 case DDI_UNORDERED_OK_ACC: 1307 hp->ah_hat_flags |= HAT_UNORDERED_OK; 1308 break; 1309 case DDI_MERGING_OK_ACC: 1310 hp->ah_hat_flags |= HAT_MERGING_OK; 1311 break; 1312 case DDI_LOADCACHING_OK_ACC: 1313 hp->ah_hat_flags |= HAT_LOADCACHING_OK; 1314 break; 1315 case DDI_STORECACHING_OK_ACC: 1316 hp->ah_hat_flags |= HAT_STORECACHING_OK; 1317 break; 1318 default: 1319 return (DDI_FAILURE); 1320 } 1321 1322 rbase = (rootnex_addr_t)rp->regspec_addr & 1323 (~(rootnex_addr_t)MMU_PAGEOFFSET); 1324 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 1325 1326 if (rp->regspec_size == 0) 1327 return (DDI_ME_INVAL); 1328 1329 #ifdef __xpv 1330 /* 1331 * If we're dom0, we're using a real device so we need to translate 1332 * the MA to a PA. 1333 */ 1334 if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1335 pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) | 1336 (rbase & MMU_PAGEOFFSET); 1337 } else { 1338 pbase = rbase; 1339 } 1340 #else 1341 pbase = rbase; 1342 #endif 1343 1344 hp->ah_pfn = mmu_btop(pbase); 1345 hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset); 1346 1347 return (DDI_SUCCESS); 1348 } 1349 1350 1351 1352 /* 1353 * ************************ 1354 * interrupt related code 1355 * ************************ 1356 */ 1357 1358 /* 1359 * rootnex_intr_ops() 1360 * bus_intr_op() function for interrupt support 1361 */ 1362 /* ARGSUSED */ 1363 static int 1364 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1365 ddi_intr_handle_impl_t *hdlp, void *result) 1366 { 1367 struct intrspec *ispec; 1368 1369 DDI_INTR_NEXDBG((CE_CONT, 1370 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n", 1371 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 1372 1373 /* Process the interrupt operation */ 1374 switch (intr_op) { 1375 case DDI_INTROP_GETCAP: 1376 /* First check with pcplusmp */ 1377 if (psm_intr_ops == NULL) 1378 return (DDI_FAILURE); 1379 1380 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) { 1381 *(int *)result = 0; 1382 return (DDI_FAILURE); 1383 } 1384 break; 1385 case DDI_INTROP_SETCAP: 1386 if (psm_intr_ops == NULL) 1387 return (DDI_FAILURE); 1388 1389 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) 1390 return (DDI_FAILURE); 1391 break; 1392 case DDI_INTROP_ALLOC: 1393 ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED); 1394 return (rootnex_alloc_intr_fixed(rdip, hdlp, result)); 1395 case DDI_INTROP_FREE: 1396 ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED); 1397 return (rootnex_free_intr_fixed(rdip, hdlp)); 1398 case DDI_INTROP_GETPRI: 1399 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1400 return (DDI_FAILURE); 1401 *(int *)result = ispec->intrspec_pri; 1402 break; 1403 case DDI_INTROP_SETPRI: 1404 /* Validate the interrupt priority passed to us */ 1405 if (*(int *)result > LOCK_LEVEL) 1406 return (DDI_FAILURE); 1407 1408 /* Ensure that PSM is all initialized and ispec is ok */ 1409 if ((psm_intr_ops == NULL) || 1410 ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)) 1411 return (DDI_FAILURE); 1412 1413 /* Change the priority */ 1414 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 1415 PSM_FAILURE) 1416 return (DDI_FAILURE); 1417 1418 /* update the ispec with the new priority */ 1419 ispec->intrspec_pri = *(int *)result; 1420 break; 1421 case DDI_INTROP_ADDISR: 1422 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1423 return (DDI_FAILURE); 1424 ispec->intrspec_func = hdlp->ih_cb_func; 1425 break; 1426 case DDI_INTROP_REMISR: 1427 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1428 return (DDI_FAILURE); 1429 ispec->intrspec_func = (uint_t (*)()) 0; 1430 break; 1431 case DDI_INTROP_ENABLE: 1432 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1433 return (DDI_FAILURE); 1434 1435 /* Call psmi to translate irq with the dip */ 1436 if (psm_intr_ops == NULL) 1437 return (DDI_FAILURE); 1438 1439 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1440 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, 1441 (int *)&hdlp->ih_vector) == PSM_FAILURE) 1442 return (DDI_FAILURE); 1443 1444 /* Add the interrupt handler */ 1445 if (!add_avintr((void *)hdlp, ispec->intrspec_pri, 1446 hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector, 1447 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip)) 1448 return (DDI_FAILURE); 1449 break; 1450 case DDI_INTROP_DISABLE: 1451 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1452 return (DDI_FAILURE); 1453 1454 /* Call psm_ops() to translate irq with the dip */ 1455 if (psm_intr_ops == NULL) 1456 return (DDI_FAILURE); 1457 1458 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1459 (void) (*psm_intr_ops)(rdip, hdlp, 1460 PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector); 1461 1462 /* Remove the interrupt handler */ 1463 rem_avintr((void *)hdlp, ispec->intrspec_pri, 1464 hdlp->ih_cb_func, hdlp->ih_vector); 1465 break; 1466 case DDI_INTROP_SETMASK: 1467 if (psm_intr_ops == NULL) 1468 return (DDI_FAILURE); 1469 1470 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL)) 1471 return (DDI_FAILURE); 1472 break; 1473 case DDI_INTROP_CLRMASK: 1474 if (psm_intr_ops == NULL) 1475 return (DDI_FAILURE); 1476 1477 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL)) 1478 return (DDI_FAILURE); 1479 break; 1480 case DDI_INTROP_GETPENDING: 1481 if (psm_intr_ops == NULL) 1482 return (DDI_FAILURE); 1483 1484 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING, 1485 result)) { 1486 *(int *)result = 0; 1487 return (DDI_FAILURE); 1488 } 1489 break; 1490 case DDI_INTROP_NAVAIL: 1491 case DDI_INTROP_NINTRS: 1492 *(int *)result = i_ddi_get_intx_nintrs(rdip); 1493 if (*(int *)result == 0) { 1494 /* 1495 * Special case for 'pcic' driver' only. This driver 1496 * driver is a child of 'isa' and 'rootnex' drivers. 1497 * 1498 * See detailed comments on this in the function 1499 * rootnex_get_ispec(). 1500 * 1501 * Children of 'pcic' send 'NINITR' request all the 1502 * way to rootnex driver. But, the 'pdp->par_nintr' 1503 * field may not initialized. So, we fake it here 1504 * to return 1 (a la what PCMCIA nexus does). 1505 */ 1506 if (strcmp(ddi_get_name(rdip), "pcic") == 0) 1507 *(int *)result = 1; 1508 else 1509 return (DDI_FAILURE); 1510 } 1511 break; 1512 case DDI_INTROP_SUPPORTED_TYPES: 1513 *(int *)result = DDI_INTR_TYPE_FIXED; /* Always ... */ 1514 break; 1515 default: 1516 return (DDI_FAILURE); 1517 } 1518 1519 return (DDI_SUCCESS); 1520 } 1521 1522 1523 /* 1524 * rootnex_get_ispec() 1525 * convert an interrupt number to an interrupt specification. 1526 * The interrupt number determines which interrupt spec will be 1527 * returned if more than one exists. 1528 * 1529 * Look into the parent private data area of the 'rdip' to find out 1530 * the interrupt specification. First check to make sure there is 1531 * one that matchs "inumber" and then return a pointer to it. 1532 * 1533 * Return NULL if one could not be found. 1534 * 1535 * NOTE: This is needed for rootnex_intr_ops() 1536 */ 1537 static struct intrspec * 1538 rootnex_get_ispec(dev_info_t *rdip, int inum) 1539 { 1540 struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip); 1541 1542 /* 1543 * Special case handling for drivers that provide their own 1544 * intrspec structures instead of relying on the DDI framework. 1545 * 1546 * A broken hardware driver in ON could potentially provide its 1547 * own intrspec structure, instead of relying on the hardware. 1548 * If these drivers are children of 'rootnex' then we need to 1549 * continue to provide backward compatibility to them here. 1550 * 1551 * Following check is a special case for 'pcic' driver which 1552 * was found to have broken hardwre andby provides its own intrspec. 1553 * 1554 * Verbatim comments from this driver are shown here: 1555 * "Don't use the ddi_add_intr since we don't have a 1556 * default intrspec in all cases." 1557 * 1558 * Since an 'ispec' may not be always created for it, 1559 * check for that and create one if so. 1560 * 1561 * NOTE: Currently 'pcic' is the only driver found to do this. 1562 */ 1563 if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 1564 pdp->par_nintr = 1; 1565 pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) * 1566 pdp->par_nintr, KM_SLEEP); 1567 } 1568 1569 /* Validate the interrupt number */ 1570 if (inum >= pdp->par_nintr) 1571 return (NULL); 1572 1573 /* Get the interrupt structure pointer and return that */ 1574 return ((struct intrspec *)&pdp->par_intr[inum]); 1575 } 1576 1577 /* 1578 * Allocate interrupt vector for FIXED (legacy) type. 1579 */ 1580 static int 1581 rootnex_alloc_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp, 1582 void *result) 1583 { 1584 struct intrspec *ispec; 1585 ddi_intr_handle_impl_t info_hdl; 1586 int ret; 1587 int free_phdl = 0; 1588 apic_get_type_t type_info; 1589 1590 if (psm_intr_ops == NULL) 1591 return (DDI_FAILURE); 1592 1593 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1594 return (DDI_FAILURE); 1595 1596 /* 1597 * If the PSM module is "APIX" then pass the request for it 1598 * to allocate the vector now. 1599 */ 1600 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 1601 info_hdl.ih_private = &type_info; 1602 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 1603 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 1604 if (hdlp->ih_private == NULL) { /* allocate phdl structure */ 1605 free_phdl = 1; 1606 i_ddi_alloc_intr_phdl(hdlp); 1607 } 1608 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1609 ret = (*psm_intr_ops)(rdip, hdlp, 1610 PSM_INTR_OP_ALLOC_VECTORS, result); 1611 if (free_phdl) { /* free up the phdl structure */ 1612 free_phdl = 0; 1613 i_ddi_free_intr_phdl(hdlp); 1614 hdlp->ih_private = NULL; 1615 } 1616 } else { 1617 /* 1618 * No APIX module; fall back to the old scheme where the 1619 * interrupt vector is allocated during ddi_enable_intr() call. 1620 */ 1621 hdlp->ih_pri = ispec->intrspec_pri; 1622 *(int *)result = hdlp->ih_scratch1; 1623 ret = DDI_SUCCESS; 1624 } 1625 1626 return (ret); 1627 } 1628 1629 /* 1630 * Free up interrupt vector for FIXED (legacy) type. 1631 */ 1632 static int 1633 rootnex_free_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp) 1634 { 1635 struct intrspec *ispec; 1636 struct ddi_parent_private_data *pdp; 1637 ddi_intr_handle_impl_t info_hdl; 1638 int ret; 1639 apic_get_type_t type_info; 1640 1641 if (psm_intr_ops == NULL) 1642 return (DDI_FAILURE); 1643 1644 /* 1645 * If the PSM module is "APIX" then pass the request for it 1646 * to free up the vector now. 1647 */ 1648 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 1649 info_hdl.ih_private = &type_info; 1650 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 1651 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 1652 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1653 return (DDI_FAILURE); 1654 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1655 ret = (*psm_intr_ops)(rdip, hdlp, 1656 PSM_INTR_OP_FREE_VECTORS, NULL); 1657 } else { 1658 /* 1659 * No APIX module; fall back to the old scheme where 1660 * the interrupt vector was already freed during 1661 * ddi_disable_intr() call. 1662 */ 1663 ret = DDI_SUCCESS; 1664 } 1665 1666 pdp = ddi_get_parent_data(rdip); 1667 1668 /* 1669 * Special case for 'pcic' driver' only. 1670 * If an intrspec was created for it, clean it up here 1671 * See detailed comments on this in the function 1672 * rootnex_get_ispec(). 1673 */ 1674 if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 1675 kmem_free(pdp->par_intr, sizeof (struct intrspec) * 1676 pdp->par_nintr); 1677 /* 1678 * Set it to zero; so that 1679 * DDI framework doesn't free it again 1680 */ 1681 pdp->par_intr = NULL; 1682 pdp->par_nintr = 0; 1683 } 1684 1685 return (ret); 1686 } 1687 1688 1689 /* 1690 * ****************** 1691 * dma related code 1692 * ****************** 1693 */ 1694 1695 /*ARGSUSED*/ 1696 static int 1697 rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 1698 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 1699 ddi_dma_handle_t *handlep) 1700 { 1701 uint64_t maxsegmentsize_ll; 1702 uint_t maxsegmentsize; 1703 ddi_dma_impl_t *hp; 1704 rootnex_dma_t *dma; 1705 uint64_t count_max; 1706 uint64_t seg; 1707 int kmflag; 1708 int e; 1709 1710 1711 /* convert our sleep flags */ 1712 if (waitfp == DDI_DMA_SLEEP) { 1713 kmflag = KM_SLEEP; 1714 } else { 1715 kmflag = KM_NOSLEEP; 1716 } 1717 1718 /* 1719 * We try to do only one memory allocation here. We'll do a little 1720 * pointer manipulation later. If the bind ends up taking more than 1721 * our prealloc's space, we'll have to allocate more memory in the 1722 * bind operation. Not great, but much better than before and the 1723 * best we can do with the current bind interfaces. 1724 */ 1725 hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag); 1726 if (hp == NULL) { 1727 if (waitfp != DDI_DMA_DONTWAIT) { 1728 ddi_set_callback(waitfp, arg, 1729 &rootnex_state->r_dvma_call_list_id); 1730 } 1731 return (DDI_DMA_NORESOURCES); 1732 } 1733 1734 /* Do our pointer manipulation now, align the structures */ 1735 hp->dmai_private = (void *)(((uintptr_t)hp + 1736 (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7); 1737 dma = (rootnex_dma_t *)hp->dmai_private; 1738 dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma + 1739 sizeof (rootnex_dma_t) + 0x7) & ~0x7); 1740 1741 /* setup the handle */ 1742 rootnex_clean_dmahdl(hp); 1743 hp->dmai_error.err_fep = NULL; 1744 hp->dmai_error.err_cf = NULL; 1745 dma->dp_dip = rdip; 1746 dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo; 1747 dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi; 1748 hp->dmai_minxfer = attr->dma_attr_minxfer; 1749 hp->dmai_burstsizes = attr->dma_attr_burstsizes; 1750 hp->dmai_rdip = rdip; 1751 hp->dmai_attr = *attr; 1752 1753 /* we don't need to worry about the SPL since we do a tryenter */ 1754 mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL); 1755 1756 /* 1757 * Figure out our maximum segment size. If the segment size is greater 1758 * than 4G, we will limit it to (4G - 1) since the max size of a dma 1759 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and 1760 * dma_attr_count_max are size-1 type values. 1761 * 1762 * Maximum segment size is the largest physically contiguous chunk of 1763 * memory that we can return from a bind (i.e. the maximum size of a 1764 * single cookie). 1765 */ 1766 1767 /* handle the rollover cases */ 1768 seg = attr->dma_attr_seg + 1; 1769 if (seg < attr->dma_attr_seg) { 1770 seg = attr->dma_attr_seg; 1771 } 1772 count_max = attr->dma_attr_count_max + 1; 1773 if (count_max < attr->dma_attr_count_max) { 1774 count_max = attr->dma_attr_count_max; 1775 } 1776 1777 /* 1778 * granularity may or may not be a power of two. If it isn't, we can't 1779 * use a simple mask. 1780 */ 1781 if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) { 1782 dma->dp_granularity_power_2 = B_FALSE; 1783 } else { 1784 dma->dp_granularity_power_2 = B_TRUE; 1785 } 1786 1787 /* 1788 * maxxfer should be a whole multiple of granularity. If we're going to 1789 * break up a window because we're greater than maxxfer, we might as 1790 * well make sure it's maxxfer is a whole multiple so we don't have to 1791 * worry about triming the window later on for this case. 1792 */ 1793 if (attr->dma_attr_granular > 1) { 1794 if (dma->dp_granularity_power_2) { 1795 dma->dp_maxxfer = attr->dma_attr_maxxfer - 1796 (attr->dma_attr_maxxfer & 1797 (attr->dma_attr_granular - 1)); 1798 } else { 1799 dma->dp_maxxfer = attr->dma_attr_maxxfer - 1800 (attr->dma_attr_maxxfer % attr->dma_attr_granular); 1801 } 1802 } else { 1803 dma->dp_maxxfer = attr->dma_attr_maxxfer; 1804 } 1805 1806 maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer); 1807 maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max); 1808 if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) { 1809 maxsegmentsize = 0xFFFFFFFF; 1810 } else { 1811 maxsegmentsize = maxsegmentsize_ll; 1812 } 1813 dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize; 1814 dma->dp_sglinfo.si_segmask = attr->dma_attr_seg; 1815 dma->dp_sglinfo.si_flags = attr->dma_attr_flags; 1816 1817 /* check the ddi_dma_attr arg to make sure it makes a little sense */ 1818 if (rootnex_alloc_check_parms) { 1819 e = rootnex_valid_alloc_parms(attr, maxsegmentsize); 1820 if (e != DDI_SUCCESS) { 1821 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]); 1822 (void) rootnex_dma_freehdl(dip, rdip, 1823 (ddi_dma_handle_t)hp); 1824 return (e); 1825 } 1826 } 1827 1828 *handlep = (ddi_dma_handle_t)hp; 1829 1830 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1831 ROOTNEX_DPROBE1(rootnex__alloc__handle, uint64_t, 1832 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1833 1834 return (DDI_SUCCESS); 1835 } 1836 1837 1838 /* 1839 * rootnex_dma_allochdl() 1840 * called from ddi_dma_alloc_handle(). 1841 */ 1842 static int 1843 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 1844 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 1845 { 1846 int retval; 1847 #if defined(__amd64) && !defined(__xpv) 1848 uint_t error = ENOTSUP; 1849 1850 retval = iommulib_nex_open(rdip, &error); 1851 1852 if (retval != DDI_SUCCESS && error == ENOTSUP) { 1853 /* No IOMMU */ 1854 return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 1855 handlep)); 1856 } else if (retval != DDI_SUCCESS) { 1857 return (DDI_FAILURE); 1858 } 1859 1860 ASSERT(IOMMU_USED(rdip)); 1861 1862 /* has an IOMMU */ 1863 retval = iommulib_nexdma_allochdl(dip, rdip, attr, 1864 waitfp, arg, handlep); 1865 #else 1866 retval = rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 1867 handlep); 1868 #endif 1869 if (retval == DDI_SUCCESS) 1870 ndi_fmc_insert(rdip, DMA_HANDLE, *handlep, NULL); 1871 return (retval); 1872 } 1873 1874 /*ARGSUSED*/ 1875 static int 1876 rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 1877 ddi_dma_handle_t handle) 1878 { 1879 ddi_dma_impl_t *hp; 1880 rootnex_dma_t *dma; 1881 1882 1883 hp = (ddi_dma_impl_t *)handle; 1884 dma = (rootnex_dma_t *)hp->dmai_private; 1885 1886 /* unbind should have been called first */ 1887 ASSERT(!dma->dp_inuse); 1888 1889 mutex_destroy(&dma->dp_mutex); 1890 kmem_cache_free(rootnex_state->r_dmahdl_cache, hp); 1891 1892 ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1893 ROOTNEX_DPROBE1(rootnex__free__handle, uint64_t, 1894 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 1895 1896 if (rootnex_state->r_dvma_call_list_id) 1897 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 1898 1899 return (DDI_SUCCESS); 1900 } 1901 1902 /* 1903 * rootnex_dma_freehdl() 1904 * called from ddi_dma_free_handle(). 1905 */ 1906 static int 1907 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 1908 { 1909 ndi_fmc_remove(rdip, DMA_HANDLE, handle); 1910 #if defined(__amd64) && !defined(__xpv) 1911 if (IOMMU_USED(rdip)) { 1912 return (iommulib_nexdma_freehdl(dip, rdip, handle)); 1913 } 1914 #endif 1915 return (rootnex_coredma_freehdl(dip, rdip, handle)); 1916 } 1917 1918 /*ARGSUSED*/ 1919 static int 1920 rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 1921 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 1922 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1923 { 1924 rootnex_sglinfo_t *sinfo; 1925 ddi_dma_attr_t *attr; 1926 ddi_dma_impl_t *hp; 1927 rootnex_dma_t *dma; 1928 int kmflag; 1929 int e; 1930 1931 hp = (ddi_dma_impl_t *)handle; 1932 dma = (rootnex_dma_t *)hp->dmai_private; 1933 sinfo = &dma->dp_sglinfo; 1934 attr = &hp->dmai_attr; 1935 1936 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 1937 dma->dp_sleep_flags = KM_SLEEP; 1938 } else { 1939 dma->dp_sleep_flags = KM_NOSLEEP; 1940 } 1941 1942 hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 1943 1944 /* 1945 * This is useful for debugging a driver. Not as useful in a production 1946 * system. The only time this will fail is if you have a driver bug. 1947 */ 1948 if (rootnex_bind_check_inuse) { 1949 /* 1950 * No one else should ever have this lock unless someone else 1951 * is trying to use this handle. So contention on the lock 1952 * is the same as inuse being set. 1953 */ 1954 e = mutex_tryenter(&dma->dp_mutex); 1955 if (e == 0) { 1956 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1957 return (DDI_DMA_INUSE); 1958 } 1959 if (dma->dp_inuse) { 1960 mutex_exit(&dma->dp_mutex); 1961 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1962 return (DDI_DMA_INUSE); 1963 } 1964 dma->dp_inuse = B_TRUE; 1965 mutex_exit(&dma->dp_mutex); 1966 } 1967 1968 /* check the ddi_dma_attr arg to make sure it makes a little sense */ 1969 if (rootnex_bind_check_parms) { 1970 e = rootnex_valid_bind_parms(dmareq, attr); 1971 if (e != DDI_SUCCESS) { 1972 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1973 rootnex_clean_dmahdl(hp); 1974 return (e); 1975 } 1976 } 1977 1978 /* save away the original bind info */ 1979 dma->dp_dma = dmareq->dmar_object; 1980 1981 #if defined(__amd64) && !defined(__xpv) 1982 e = immu_map_sgl(hp, dmareq, rootnex_prealloc_cookies, rdip); 1983 switch (e) { 1984 case DDI_DMA_MAPPED: 1985 goto out; 1986 case DDI_DMA_USE_PHYSICAL: 1987 break; 1988 case DDI_DMA_PARTIAL: 1989 ddi_err(DER_PANIC, rdip, "Partial DVMA map"); 1990 e = DDI_DMA_NORESOURCES; 1991 /*FALLTHROUGH*/ 1992 default: 1993 ddi_err(DER_MODE, rdip, "DVMA map failed"); 1994 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 1995 rootnex_clean_dmahdl(hp); 1996 return (e); 1997 } 1998 #endif 1999 2000 /* 2001 * Figure out a rough estimate of what maximum number of pages this 2002 * buffer could use (a high estimate of course). 2003 */ 2004 sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1; 2005 2006 /* 2007 * We'll use the pre-allocated cookies for any bind that will *always* 2008 * fit (more important to be consistent, we don't want to create 2009 * additional degenerate cases). 2010 */ 2011 if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) { 2012 dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer; 2013 dma->dp_need_to_free_cookie = B_FALSE; 2014 DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip, 2015 uint_t, sinfo->si_max_pages); 2016 2017 /* 2018 * For anything larger than that, we'll go ahead and allocate the 2019 * maximum number of pages we expect to see. Hopefuly, we won't be 2020 * seeing this path in the fast path for high performance devices very 2021 * frequently. 2022 * 2023 * a ddi bind interface that allowed the driver to provide storage to 2024 * the bind interface would speed this case up. 2025 */ 2026 } else { 2027 /* convert the sleep flags */ 2028 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 2029 kmflag = KM_SLEEP; 2030 } else { 2031 kmflag = KM_NOSLEEP; 2032 } 2033 2034 /* 2035 * Save away how much memory we allocated. If we're doing a 2036 * nosleep, the alloc could fail... 2037 */ 2038 dma->dp_cookie_size = sinfo->si_max_pages * 2039 sizeof (ddi_dma_cookie_t); 2040 dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag); 2041 if (dma->dp_cookies == NULL) { 2042 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 2043 rootnex_clean_dmahdl(hp); 2044 return (DDI_DMA_NORESOURCES); 2045 } 2046 dma->dp_need_to_free_cookie = B_TRUE; 2047 DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t, 2048 sinfo->si_max_pages); 2049 } 2050 hp->dmai_cookie = dma->dp_cookies; 2051 2052 /* 2053 * Get the real sgl. rootnex_get_sgl will fill in cookie array while 2054 * looking at the constraints in the dma structure. It will then put 2055 * some additional state about the sgl in the dma struct (i.e. is 2056 * the sgl clean, or do we need to do some munging; how many pages 2057 * need to be copied, etc.) 2058 */ 2059 rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies, 2060 &dma->dp_sglinfo); 2061 2062 out: 2063 ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages); 2064 /* if we don't need a copy buffer, we don't need to sync */ 2065 if (sinfo->si_copybuf_req == 0) { 2066 hp->dmai_rflags |= DMP_NOSYNC; 2067 } 2068 2069 /* 2070 * if we don't need the copybuf and we don't need to do a partial, we 2071 * hit the fast path. All the high performance devices should be trying 2072 * to hit this path. To hit this path, a device should be able to reach 2073 * all of memory, shouldn't try to bind more than it can transfer, and 2074 * the buffer shouldn't require more cookies than the driver/device can 2075 * handle [sgllen]). 2076 */ 2077 if ((sinfo->si_copybuf_req == 0) && 2078 (sinfo->si_sgl_size <= attr->dma_attr_sgllen) && 2079 (dma->dp_dma.dmao_size < dma->dp_maxxfer)) { 2080 /* 2081 * If the driver supports FMA, insert the handle in the FMA DMA 2082 * handle cache. 2083 */ 2084 if (attr->dma_attr_flags & DDI_DMA_FLAGERR) 2085 hp->dmai_error.err_cf = rootnex_dma_check; 2086 2087 /* 2088 * copy out the first cookie and ccountp, set the cookie 2089 * pointer to the second cookie. The first cookie is passed 2090 * back on the stack. Additional cookies are accessed via 2091 * ddi_dma_nextcookie() 2092 */ 2093 *cookiep = dma->dp_cookies[0]; 2094 *ccountp = sinfo->si_sgl_size; 2095 hp->dmai_cookie++; 2096 hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 2097 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2098 DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, 2099 uint64_t, rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], 2100 uint_t, dma->dp_dma.dmao_size); 2101 2102 2103 return (DDI_DMA_MAPPED); 2104 } 2105 2106 /* 2107 * go to the slow path, we may need to alloc more memory, create 2108 * multiple windows, and munge up a sgl to make the device happy. 2109 */ 2110 e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag); 2111 if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 2112 if (dma->dp_need_to_free_cookie) { 2113 kmem_free(dma->dp_cookies, dma->dp_cookie_size); 2114 } 2115 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 2116 rootnex_clean_dmahdl(hp); /* must be after free cookie */ 2117 return (e); 2118 } 2119 2120 /* 2121 * If the driver supports FMA, insert the handle in the FMA DMA handle 2122 * cache. 2123 */ 2124 if (attr->dma_attr_flags & DDI_DMA_FLAGERR) 2125 hp->dmai_error.err_cf = rootnex_dma_check; 2126 2127 /* if the first window uses the copy buffer, sync it for the device */ 2128 if ((dma->dp_window[dma->dp_current_win].wd_dosync) && 2129 (hp->dmai_rflags & DDI_DMA_WRITE)) { 2130 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 2131 DDI_DMA_SYNC_FORDEV); 2132 } 2133 2134 /* 2135 * copy out the first cookie and ccountp, set the cookie pointer to the 2136 * second cookie. Make sure the partial flag is set/cleared correctly. 2137 * If we have a partial map (i.e. multiple windows), the number of 2138 * cookies we return is the number of cookies in the first window. 2139 */ 2140 if (e == DDI_DMA_MAPPED) { 2141 hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 2142 *ccountp = sinfo->si_sgl_size; 2143 hp->dmai_nwin = 1; 2144 } else { 2145 hp->dmai_rflags |= DDI_DMA_PARTIAL; 2146 *ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt; 2147 ASSERT(hp->dmai_nwin <= dma->dp_max_win); 2148 } 2149 *cookiep = dma->dp_cookies[0]; 2150 hp->dmai_cookie++; 2151 2152 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2153 ROOTNEX_DPROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t, 2154 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t, 2155 dma->dp_dma.dmao_size); 2156 return (e); 2157 } 2158 2159 /* 2160 * rootnex_dma_bindhdl() 2161 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle(). 2162 */ 2163 static int 2164 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 2165 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 2166 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 2167 { 2168 #if defined(__amd64) && !defined(__xpv) 2169 if (IOMMU_USED(rdip)) { 2170 return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq, 2171 cookiep, ccountp)); 2172 } 2173 #endif 2174 return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq, 2175 cookiep, ccountp)); 2176 } 2177 2178 2179 2180 /*ARGSUSED*/ 2181 static int 2182 rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 2183 ddi_dma_handle_t handle) 2184 { 2185 ddi_dma_impl_t *hp; 2186 rootnex_dma_t *dma; 2187 int e; 2188 2189 hp = (ddi_dma_impl_t *)handle; 2190 dma = (rootnex_dma_t *)hp->dmai_private; 2191 2192 /* make sure the buffer wasn't free'd before calling unbind */ 2193 if (rootnex_unbind_verify_buffer) { 2194 e = rootnex_verify_buffer(dma); 2195 if (e != DDI_SUCCESS) { 2196 ASSERT(0); 2197 return (DDI_FAILURE); 2198 } 2199 } 2200 2201 /* sync the current window before unbinding the buffer */ 2202 if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync && 2203 (hp->dmai_rflags & DDI_DMA_READ)) { 2204 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 2205 DDI_DMA_SYNC_FORCPU); 2206 } 2207 2208 /* 2209 * cleanup and copy buffer or window state. if we didn't use the copy 2210 * buffer or windows, there won't be much to do :-) 2211 */ 2212 rootnex_teardown_copybuf(dma); 2213 rootnex_teardown_windows(dma); 2214 2215 #if defined(__amd64) && !defined(__xpv) 2216 /* 2217 * Clean up the page tables and free the dvma 2218 */ 2219 e = immu_unmap_sgl(hp, rdip); 2220 if (e != DDI_DMA_USE_PHYSICAL && e != DDI_SUCCESS) { 2221 return (e); 2222 } 2223 #endif 2224 2225 /* 2226 * If we had to allocate space to for the worse case sgl (it didn't 2227 * fit into our pre-allocate buffer), free that up now 2228 */ 2229 if (dma->dp_need_to_free_cookie) { 2230 kmem_free(dma->dp_cookies, dma->dp_cookie_size); 2231 } 2232 2233 /* 2234 * clean up the handle so it's ready for the next bind (i.e. if the 2235 * handle is reused). 2236 */ 2237 rootnex_clean_dmahdl(hp); 2238 hp->dmai_error.err_cf = NULL; 2239 2240 if (rootnex_state->r_dvma_call_list_id) 2241 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 2242 2243 ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2244 ROOTNEX_DPROBE1(rootnex__unbind, uint64_t, 2245 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 2246 2247 return (DDI_SUCCESS); 2248 } 2249 2250 /* 2251 * rootnex_dma_unbindhdl() 2252 * called from ddi_dma_unbind_handle() 2253 */ 2254 /*ARGSUSED*/ 2255 static int 2256 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 2257 ddi_dma_handle_t handle) 2258 { 2259 #if defined(__amd64) && !defined(__xpv) 2260 if (IOMMU_USED(rdip)) { 2261 return (iommulib_nexdma_unbindhdl(dip, rdip, handle)); 2262 } 2263 #endif 2264 return (rootnex_coredma_unbindhdl(dip, rdip, handle)); 2265 } 2266 2267 #if defined(__amd64) && !defined(__xpv) 2268 2269 static int 2270 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle) 2271 { 2272 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2273 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2274 2275 if (dma->dp_sleep_flags != KM_SLEEP && 2276 dma->dp_sleep_flags != KM_NOSLEEP) 2277 cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle"); 2278 return (dma->dp_sleep_flags); 2279 } 2280 /*ARGSUSED*/ 2281 static void 2282 rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 2283 { 2284 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2285 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2286 rootnex_window_t *window; 2287 2288 if (dma->dp_window) { 2289 window = &dma->dp_window[dma->dp_current_win]; 2290 hp->dmai_cookie = window->wd_first_cookie; 2291 } else { 2292 hp->dmai_cookie = dma->dp_cookies; 2293 } 2294 hp->dmai_cookie++; 2295 } 2296 2297 /*ARGSUSED*/ 2298 static int 2299 rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 2300 ddi_dma_cookie_t **cookiepp, uint_t *ccountp) 2301 { 2302 int i; 2303 int km_flags; 2304 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2305 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2306 rootnex_window_t *window; 2307 ddi_dma_cookie_t *cp; 2308 ddi_dma_cookie_t *cookie; 2309 2310 ASSERT(*cookiepp == NULL); 2311 ASSERT(*ccountp == 0); 2312 2313 if (dma->dp_window) { 2314 window = &dma->dp_window[dma->dp_current_win]; 2315 cp = window->wd_first_cookie; 2316 *ccountp = window->wd_cookie_cnt; 2317 } else { 2318 cp = dma->dp_cookies; 2319 *ccountp = dma->dp_sglinfo.si_sgl_size; 2320 } 2321 2322 km_flags = rootnex_coredma_get_sleep_flags(handle); 2323 cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags); 2324 if (cookie == NULL) { 2325 return (DDI_DMA_NORESOURCES); 2326 } 2327 2328 for (i = 0; i < *ccountp; i++) { 2329 cookie[i].dmac_notused = cp[i].dmac_notused; 2330 cookie[i].dmac_type = cp[i].dmac_type; 2331 cookie[i].dmac_address = cp[i].dmac_address; 2332 cookie[i].dmac_size = cp[i].dmac_size; 2333 } 2334 2335 *cookiepp = cookie; 2336 2337 return (DDI_SUCCESS); 2338 } 2339 2340 /*ARGSUSED*/ 2341 static int 2342 rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 2343 ddi_dma_cookie_t *cookiep, uint_t ccount) 2344 { 2345 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2346 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2347 rootnex_window_t *window; 2348 ddi_dma_cookie_t *cur_cookiep; 2349 2350 ASSERT(cookiep); 2351 ASSERT(ccount != 0); 2352 ASSERT(dma->dp_need_to_switch_cookies == B_FALSE); 2353 2354 if (dma->dp_window) { 2355 window = &dma->dp_window[dma->dp_current_win]; 2356 dma->dp_saved_cookies = window->wd_first_cookie; 2357 window->wd_first_cookie = cookiep; 2358 ASSERT(ccount == window->wd_cookie_cnt); 2359 cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 2360 + window->wd_first_cookie; 2361 } else { 2362 dma->dp_saved_cookies = dma->dp_cookies; 2363 dma->dp_cookies = cookiep; 2364 ASSERT(ccount == dma->dp_sglinfo.si_sgl_size); 2365 cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 2366 + dma->dp_cookies; 2367 } 2368 2369 dma->dp_need_to_switch_cookies = B_TRUE; 2370 hp->dmai_cookie = cur_cookiep; 2371 2372 return (DDI_SUCCESS); 2373 } 2374 2375 /*ARGSUSED*/ 2376 static int 2377 rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 2378 { 2379 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2380 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 2381 rootnex_window_t *window; 2382 ddi_dma_cookie_t *cur_cookiep; 2383 ddi_dma_cookie_t *cookie_array; 2384 uint_t ccount; 2385 2386 /* check if cookies have not been switched */ 2387 if (dma->dp_need_to_switch_cookies == B_FALSE) 2388 return (DDI_SUCCESS); 2389 2390 ASSERT(dma->dp_saved_cookies); 2391 2392 if (dma->dp_window) { 2393 window = &dma->dp_window[dma->dp_current_win]; 2394 cookie_array = window->wd_first_cookie; 2395 window->wd_first_cookie = dma->dp_saved_cookies; 2396 dma->dp_saved_cookies = NULL; 2397 ccount = window->wd_cookie_cnt; 2398 cur_cookiep = (hp->dmai_cookie - cookie_array) 2399 + window->wd_first_cookie; 2400 } else { 2401 cookie_array = dma->dp_cookies; 2402 dma->dp_cookies = dma->dp_saved_cookies; 2403 dma->dp_saved_cookies = NULL; 2404 ccount = dma->dp_sglinfo.si_sgl_size; 2405 cur_cookiep = (hp->dmai_cookie - cookie_array) 2406 + dma->dp_cookies; 2407 } 2408 2409 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount); 2410 2411 hp->dmai_cookie = cur_cookiep; 2412 2413 dma->dp_need_to_switch_cookies = B_FALSE; 2414 2415 return (DDI_SUCCESS); 2416 } 2417 2418 #endif 2419 2420 /* 2421 * rootnex_verify_buffer() 2422 * verify buffer wasn't free'd 2423 */ 2424 static int 2425 rootnex_verify_buffer(rootnex_dma_t *dma) 2426 { 2427 page_t **pplist; 2428 caddr_t vaddr; 2429 uint_t pcnt; 2430 uint_t poff; 2431 page_t *pp; 2432 char b; 2433 int i; 2434 2435 /* Figure out how many pages this buffer occupies */ 2436 if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) { 2437 poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET; 2438 } else { 2439 vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr; 2440 poff = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2441 } 2442 pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff); 2443 2444 switch (dma->dp_dma.dmao_type) { 2445 case DMA_OTYP_PAGES: 2446 /* 2447 * for a linked list of pp's walk through them to make sure 2448 * they're locked and not free. 2449 */ 2450 pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp; 2451 for (i = 0; i < pcnt; i++) { 2452 if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) { 2453 return (DDI_FAILURE); 2454 } 2455 pp = pp->p_next; 2456 } 2457 break; 2458 2459 case DMA_OTYP_VADDR: 2460 case DMA_OTYP_BUFVADDR: 2461 pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv; 2462 /* 2463 * for an array of pp's walk through them to make sure they're 2464 * not free. It's possible that they may not be locked. 2465 */ 2466 if (pplist) { 2467 for (i = 0; i < pcnt; i++) { 2468 if (PP_ISFREE(pplist[i])) { 2469 return (DDI_FAILURE); 2470 } 2471 } 2472 2473 /* For a virtual address, try to peek at each page */ 2474 } else { 2475 if (dma->dp_sglinfo.si_asp == &kas) { 2476 for (i = 0; i < pcnt; i++) { 2477 if (ddi_peek8(NULL, vaddr, &b) == 2478 DDI_FAILURE) 2479 return (DDI_FAILURE); 2480 vaddr += MMU_PAGESIZE; 2481 } 2482 } 2483 } 2484 break; 2485 2486 default: 2487 ASSERT(0); 2488 break; 2489 } 2490 2491 return (DDI_SUCCESS); 2492 } 2493 2494 2495 /* 2496 * rootnex_clean_dmahdl() 2497 * Clean the dma handle. This should be called on a handle alloc and an 2498 * unbind handle. Set the handle state to the default settings. 2499 */ 2500 static void 2501 rootnex_clean_dmahdl(ddi_dma_impl_t *hp) 2502 { 2503 rootnex_dma_t *dma; 2504 2505 2506 dma = (rootnex_dma_t *)hp->dmai_private; 2507 2508 hp->dmai_nwin = 0; 2509 dma->dp_current_cookie = 0; 2510 dma->dp_copybuf_size = 0; 2511 dma->dp_window = NULL; 2512 dma->dp_cbaddr = NULL; 2513 dma->dp_inuse = B_FALSE; 2514 dma->dp_need_to_free_cookie = B_FALSE; 2515 dma->dp_need_to_switch_cookies = B_FALSE; 2516 dma->dp_saved_cookies = NULL; 2517 dma->dp_sleep_flags = KM_PANIC; 2518 dma->dp_need_to_free_window = B_FALSE; 2519 dma->dp_partial_required = B_FALSE; 2520 dma->dp_trim_required = B_FALSE; 2521 dma->dp_sglinfo.si_copybuf_req = 0; 2522 #if !defined(__amd64) 2523 dma->dp_cb_remaping = B_FALSE; 2524 dma->dp_kva = NULL; 2525 #endif 2526 2527 /* FMA related initialization */ 2528 hp->dmai_fault = 0; 2529 hp->dmai_fault_check = NULL; 2530 hp->dmai_fault_notify = NULL; 2531 hp->dmai_error.err_ena = 0; 2532 hp->dmai_error.err_status = DDI_FM_OK; 2533 hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED; 2534 hp->dmai_error.err_ontrap = NULL; 2535 } 2536 2537 2538 /* 2539 * rootnex_valid_alloc_parms() 2540 * Called in ddi_dma_alloc_handle path to validate its parameters. 2541 */ 2542 static int 2543 rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize) 2544 { 2545 if ((attr->dma_attr_seg < MMU_PAGEOFFSET) || 2546 (attr->dma_attr_count_max < MMU_PAGEOFFSET) || 2547 (attr->dma_attr_granular > MMU_PAGESIZE) || 2548 (attr->dma_attr_maxxfer < MMU_PAGESIZE)) { 2549 return (DDI_DMA_BADATTR); 2550 } 2551 2552 if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) { 2553 return (DDI_DMA_BADATTR); 2554 } 2555 2556 if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET || 2557 MMU_PAGESIZE & (attr->dma_attr_granular - 1) || 2558 attr->dma_attr_sgllen <= 0) { 2559 return (DDI_DMA_BADATTR); 2560 } 2561 2562 /* We should be able to DMA into every byte offset in a page */ 2563 if (maxsegmentsize < MMU_PAGESIZE) { 2564 return (DDI_DMA_BADATTR); 2565 } 2566 2567 /* if we're bouncing on seg, seg must be <= addr_hi */ 2568 if ((attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG) && 2569 (attr->dma_attr_seg > attr->dma_attr_addr_hi)) { 2570 return (DDI_DMA_BADATTR); 2571 } 2572 return (DDI_SUCCESS); 2573 } 2574 2575 /* 2576 * rootnex_valid_bind_parms() 2577 * Called in ddi_dma_*_bind_handle path to validate its parameters. 2578 */ 2579 /* ARGSUSED */ 2580 static int 2581 rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr) 2582 { 2583 #if !defined(__amd64) 2584 /* 2585 * we only support up to a 2G-1 transfer size on 32-bit kernels so 2586 * we can track the offset for the obsoleted interfaces. 2587 */ 2588 if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) { 2589 return (DDI_DMA_TOOBIG); 2590 } 2591 #endif 2592 2593 return (DDI_SUCCESS); 2594 } 2595 2596 2597 /* 2598 * rootnex_need_bounce_seg() 2599 * check to see if the buffer lives on both side of the seg. 2600 */ 2601 static boolean_t 2602 rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, rootnex_sglinfo_t *sglinfo) 2603 { 2604 ddi_dma_atyp_t buftype; 2605 rootnex_addr_t raddr; 2606 boolean_t lower_addr; 2607 boolean_t upper_addr; 2608 uint64_t offset; 2609 page_t **pplist; 2610 uint64_t paddr; 2611 uint32_t psize; 2612 uint32_t size; 2613 caddr_t vaddr; 2614 uint_t pcnt; 2615 page_t *pp; 2616 2617 2618 /* shortcuts */ 2619 pplist = dmar_object->dmao_obj.virt_obj.v_priv; 2620 vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 2621 buftype = dmar_object->dmao_type; 2622 size = dmar_object->dmao_size; 2623 2624 lower_addr = B_FALSE; 2625 upper_addr = B_FALSE; 2626 pcnt = 0; 2627 2628 /* 2629 * Process the first page to handle the initial offset of the buffer. 2630 * We'll use the base address we get later when we loop through all 2631 * the pages. 2632 */ 2633 if (buftype == DMA_OTYP_PAGES) { 2634 pp = dmar_object->dmao_obj.pp_obj.pp_pp; 2635 offset = dmar_object->dmao_obj.pp_obj.pp_offset & 2636 MMU_PAGEOFFSET; 2637 paddr = pfn_to_pa(pp->p_pagenum) + offset; 2638 psize = MIN(size, (MMU_PAGESIZE - offset)); 2639 pp = pp->p_next; 2640 sglinfo->si_asp = NULL; 2641 } else if (pplist != NULL) { 2642 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2643 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2644 if (sglinfo->si_asp == NULL) { 2645 sglinfo->si_asp = &kas; 2646 } 2647 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2648 paddr += offset; 2649 psize = MIN(size, (MMU_PAGESIZE - offset)); 2650 pcnt++; 2651 } else { 2652 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2653 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2654 if (sglinfo->si_asp == NULL) { 2655 sglinfo->si_asp = &kas; 2656 } 2657 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 2658 paddr += offset; 2659 psize = MIN(size, (MMU_PAGESIZE - offset)); 2660 vaddr += psize; 2661 } 2662 2663 #ifdef __xpv 2664 /* 2665 * If we're dom0, we're using a real device so we need to load 2666 * the cookies with MFNs instead of PFNs. 2667 */ 2668 raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2669 #else 2670 raddr = paddr; 2671 #endif 2672 2673 if ((raddr + psize) > sglinfo->si_segmask) { 2674 upper_addr = B_TRUE; 2675 } else { 2676 lower_addr = B_TRUE; 2677 } 2678 size -= psize; 2679 2680 /* 2681 * Walk through the rest of the pages in the buffer. Track to see 2682 * if we have pages on both sides of the segment boundary. 2683 */ 2684 while (size > 0) { 2685 /* partial or full page */ 2686 psize = MIN(size, MMU_PAGESIZE); 2687 2688 if (buftype == DMA_OTYP_PAGES) { 2689 /* get the paddr from the page_t */ 2690 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2691 paddr = pfn_to_pa(pp->p_pagenum); 2692 pp = pp->p_next; 2693 } else if (pplist != NULL) { 2694 /* index into the array of page_t's to get the paddr */ 2695 ASSERT(!PP_ISFREE(pplist[pcnt])); 2696 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2697 pcnt++; 2698 } else { 2699 /* call into the VM to get the paddr */ 2700 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, 2701 vaddr)); 2702 vaddr += psize; 2703 } 2704 2705 #ifdef __xpv 2706 /* 2707 * If we're dom0, we're using a real device so we need to load 2708 * the cookies with MFNs instead of PFNs. 2709 */ 2710 raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2711 #else 2712 raddr = paddr; 2713 #endif 2714 2715 if ((raddr + psize) > sglinfo->si_segmask) { 2716 upper_addr = B_TRUE; 2717 } else { 2718 lower_addr = B_TRUE; 2719 } 2720 /* 2721 * if the buffer lives both above and below the segment 2722 * boundary, or the current page is the page immediately 2723 * after the segment, we will use a copy/bounce buffer for 2724 * all pages > seg. 2725 */ 2726 if ((lower_addr && upper_addr) || 2727 (raddr == (sglinfo->si_segmask + 1))) { 2728 return (B_TRUE); 2729 } 2730 2731 size -= psize; 2732 } 2733 2734 return (B_FALSE); 2735 } 2736 2737 2738 /* 2739 * rootnex_get_sgl() 2740 * Called in bind fastpath to get the sgl. Most of this will be replaced 2741 * with a call to the vm layer when vm2.0 comes around... 2742 */ 2743 static void 2744 rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 2745 rootnex_sglinfo_t *sglinfo) 2746 { 2747 ddi_dma_atyp_t buftype; 2748 rootnex_addr_t raddr; 2749 uint64_t last_page; 2750 uint64_t offset; 2751 uint64_t addrhi; 2752 uint64_t addrlo; 2753 uint64_t maxseg; 2754 page_t **pplist; 2755 uint64_t paddr; 2756 uint32_t psize; 2757 uint32_t size; 2758 caddr_t vaddr; 2759 uint_t pcnt; 2760 page_t *pp; 2761 uint_t cnt; 2762 2763 2764 /* shortcuts */ 2765 pplist = dmar_object->dmao_obj.virt_obj.v_priv; 2766 vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 2767 maxseg = sglinfo->si_max_cookie_size; 2768 buftype = dmar_object->dmao_type; 2769 addrhi = sglinfo->si_max_addr; 2770 addrlo = sglinfo->si_min_addr; 2771 size = dmar_object->dmao_size; 2772 2773 pcnt = 0; 2774 cnt = 0; 2775 2776 2777 /* 2778 * check to see if we need to use the copy buffer for pages over 2779 * the segment attr. 2780 */ 2781 sglinfo->si_bounce_on_seg = B_FALSE; 2782 if (sglinfo->si_flags & _DDI_DMA_BOUNCE_ON_SEG) { 2783 sglinfo->si_bounce_on_seg = rootnex_need_bounce_seg( 2784 dmar_object, sglinfo); 2785 } 2786 2787 /* 2788 * if we were passed down a linked list of pages, i.e. pointer to 2789 * page_t, use this to get our physical address and buf offset. 2790 */ 2791 if (buftype == DMA_OTYP_PAGES) { 2792 pp = dmar_object->dmao_obj.pp_obj.pp_pp; 2793 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2794 offset = dmar_object->dmao_obj.pp_obj.pp_offset & 2795 MMU_PAGEOFFSET; 2796 paddr = pfn_to_pa(pp->p_pagenum) + offset; 2797 psize = MIN(size, (MMU_PAGESIZE - offset)); 2798 pp = pp->p_next; 2799 sglinfo->si_asp = NULL; 2800 2801 /* 2802 * We weren't passed down a linked list of pages, but if we were passed 2803 * down an array of pages, use this to get our physical address and buf 2804 * offset. 2805 */ 2806 } else if (pplist != NULL) { 2807 ASSERT((buftype == DMA_OTYP_VADDR) || 2808 (buftype == DMA_OTYP_BUFVADDR)); 2809 2810 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2811 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2812 if (sglinfo->si_asp == NULL) { 2813 sglinfo->si_asp = &kas; 2814 } 2815 2816 ASSERT(!PP_ISFREE(pplist[pcnt])); 2817 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2818 paddr += offset; 2819 psize = MIN(size, (MMU_PAGESIZE - offset)); 2820 pcnt++; 2821 2822 /* 2823 * All we have is a virtual address, we'll need to call into the VM 2824 * to get the physical address. 2825 */ 2826 } else { 2827 ASSERT((buftype == DMA_OTYP_VADDR) || 2828 (buftype == DMA_OTYP_BUFVADDR)); 2829 2830 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2831 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2832 if (sglinfo->si_asp == NULL) { 2833 sglinfo->si_asp = &kas; 2834 } 2835 2836 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 2837 paddr += offset; 2838 psize = MIN(size, (MMU_PAGESIZE - offset)); 2839 vaddr += psize; 2840 } 2841 2842 #ifdef __xpv 2843 /* 2844 * If we're dom0, we're using a real device so we need to load 2845 * the cookies with MFNs instead of PFNs. 2846 */ 2847 raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2848 #else 2849 raddr = paddr; 2850 #endif 2851 2852 /* 2853 * Setup the first cookie with the physical address of the page and the 2854 * size of the page (which takes into account the initial offset into 2855 * the page. 2856 */ 2857 sgl[cnt].dmac_laddress = raddr; 2858 sgl[cnt].dmac_size = psize; 2859 sgl[cnt].dmac_type = 0; 2860 2861 /* 2862 * Save away the buffer offset into the page. We'll need this later in 2863 * the copy buffer code to help figure out the page index within the 2864 * buffer and the offset into the current page. 2865 */ 2866 sglinfo->si_buf_offset = offset; 2867 2868 /* 2869 * If we are using the copy buffer for anything over the segment 2870 * boundary, and this page is over the segment boundary. 2871 * OR 2872 * if the DMA engine can't reach the physical address. 2873 */ 2874 if (((sglinfo->si_bounce_on_seg) && 2875 ((raddr + psize) > sglinfo->si_segmask)) || 2876 ((raddr < addrlo) || ((raddr + psize) > addrhi))) { 2877 /* 2878 * Increase how much copy buffer we use. We always increase by 2879 * pagesize so we don't have to worry about converting offsets. 2880 * Set a flag in the cookies dmac_type to indicate that it uses 2881 * the copy buffer. If this isn't the last cookie, go to the 2882 * next cookie (since we separate each page which uses the copy 2883 * buffer in case the copy buffer is not physically contiguous. 2884 */ 2885 sglinfo->si_copybuf_req += MMU_PAGESIZE; 2886 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 2887 if ((cnt + 1) < sglinfo->si_max_pages) { 2888 cnt++; 2889 sgl[cnt].dmac_laddress = 0; 2890 sgl[cnt].dmac_size = 0; 2891 sgl[cnt].dmac_type = 0; 2892 } 2893 } 2894 2895 /* 2896 * save this page's physical address so we can figure out if the next 2897 * page is physically contiguous. Keep decrementing size until we are 2898 * done with the buffer. 2899 */ 2900 last_page = raddr & MMU_PAGEMASK; 2901 size -= psize; 2902 2903 while (size > 0) { 2904 /* Get the size for this page (i.e. partial or full page) */ 2905 psize = MIN(size, MMU_PAGESIZE); 2906 2907 if (buftype == DMA_OTYP_PAGES) { 2908 /* get the paddr from the page_t */ 2909 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2910 paddr = pfn_to_pa(pp->p_pagenum); 2911 pp = pp->p_next; 2912 } else if (pplist != NULL) { 2913 /* index into the array of page_t's to get the paddr */ 2914 ASSERT(!PP_ISFREE(pplist[pcnt])); 2915 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2916 pcnt++; 2917 } else { 2918 /* call into the VM to get the paddr */ 2919 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, 2920 vaddr)); 2921 vaddr += psize; 2922 } 2923 2924 #ifdef __xpv 2925 /* 2926 * If we're dom0, we're using a real device so we need to load 2927 * the cookies with MFNs instead of PFNs. 2928 */ 2929 raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2930 #else 2931 raddr = paddr; 2932 #endif 2933 2934 /* 2935 * If we are using the copy buffer for anything over the 2936 * segment boundary, and this page is over the segment 2937 * boundary. 2938 * OR 2939 * if the DMA engine can't reach the physical address. 2940 */ 2941 if (((sglinfo->si_bounce_on_seg) && 2942 ((raddr + psize) > sglinfo->si_segmask)) || 2943 ((raddr < addrlo) || ((raddr + psize) > addrhi))) { 2944 2945 sglinfo->si_copybuf_req += MMU_PAGESIZE; 2946 2947 /* 2948 * if there is something in the current cookie, go to 2949 * the next one. We only want one page in a cookie which 2950 * uses the copybuf since the copybuf doesn't have to 2951 * be physically contiguous. 2952 */ 2953 if (sgl[cnt].dmac_size != 0) { 2954 cnt++; 2955 } 2956 sgl[cnt].dmac_laddress = raddr; 2957 sgl[cnt].dmac_size = psize; 2958 #if defined(__amd64) 2959 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 2960 #else 2961 /* 2962 * save the buf offset for 32-bit kernel. used in the 2963 * obsoleted interfaces. 2964 */ 2965 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF | 2966 (dmar_object->dmao_size - size); 2967 #endif 2968 /* if this isn't the last cookie, go to the next one */ 2969 if ((cnt + 1) < sglinfo->si_max_pages) { 2970 cnt++; 2971 sgl[cnt].dmac_laddress = 0; 2972 sgl[cnt].dmac_size = 0; 2973 sgl[cnt].dmac_type = 0; 2974 } 2975 2976 /* 2977 * this page didn't need the copy buffer, if it's not physically 2978 * contiguous, or it would put us over a segment boundary, or it 2979 * puts us over the max cookie size, or the current sgl doesn't 2980 * have anything in it. 2981 */ 2982 } else if (((last_page + MMU_PAGESIZE) != raddr) || 2983 !(raddr & sglinfo->si_segmask) || 2984 ((sgl[cnt].dmac_size + psize) > maxseg) || 2985 (sgl[cnt].dmac_size == 0)) { 2986 /* 2987 * if we're not already in a new cookie, go to the next 2988 * cookie. 2989 */ 2990 if (sgl[cnt].dmac_size != 0) { 2991 cnt++; 2992 } 2993 2994 /* save the cookie information */ 2995 sgl[cnt].dmac_laddress = raddr; 2996 sgl[cnt].dmac_size = psize; 2997 #if defined(__amd64) 2998 sgl[cnt].dmac_type = 0; 2999 #else 3000 /* 3001 * save the buf offset for 32-bit kernel. used in the 3002 * obsoleted interfaces. 3003 */ 3004 sgl[cnt].dmac_type = dmar_object->dmao_size - size; 3005 #endif 3006 3007 /* 3008 * this page didn't need the copy buffer, it is physically 3009 * contiguous with the last page, and it's <= the max cookie 3010 * size. 3011 */ 3012 } else { 3013 sgl[cnt].dmac_size += psize; 3014 3015 /* 3016 * if this exactly == the maximum cookie size, and 3017 * it isn't the last cookie, go to the next cookie. 3018 */ 3019 if (((sgl[cnt].dmac_size + psize) == maxseg) && 3020 ((cnt + 1) < sglinfo->si_max_pages)) { 3021 cnt++; 3022 sgl[cnt].dmac_laddress = 0; 3023 sgl[cnt].dmac_size = 0; 3024 sgl[cnt].dmac_type = 0; 3025 } 3026 } 3027 3028 /* 3029 * save this page's physical address so we can figure out if the 3030 * next page is physically contiguous. Keep decrementing size 3031 * until we are done with the buffer. 3032 */ 3033 last_page = raddr; 3034 size -= psize; 3035 } 3036 3037 /* we're done, save away how many cookies the sgl has */ 3038 if (sgl[cnt].dmac_size == 0) { 3039 ASSERT(cnt < sglinfo->si_max_pages); 3040 sglinfo->si_sgl_size = cnt; 3041 } else { 3042 sglinfo->si_sgl_size = cnt + 1; 3043 } 3044 } 3045 3046 /* 3047 * rootnex_bind_slowpath() 3048 * Call in the bind path if the calling driver can't use the sgl without 3049 * modifying it. We either need to use the copy buffer and/or we will end up 3050 * with a partial bind. 3051 */ 3052 static int 3053 rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 3054 rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag) 3055 { 3056 rootnex_sglinfo_t *sinfo; 3057 rootnex_window_t *window; 3058 ddi_dma_cookie_t *cookie; 3059 size_t copybuf_used; 3060 size_t dmac_size; 3061 boolean_t partial; 3062 off_t cur_offset; 3063 page_t *cur_pp; 3064 major_t mnum; 3065 int e; 3066 int i; 3067 3068 3069 sinfo = &dma->dp_sglinfo; 3070 copybuf_used = 0; 3071 partial = B_FALSE; 3072 3073 /* 3074 * If we're using the copybuf, set the copybuf state in dma struct. 3075 * Needs to be first since it sets the copy buffer size. 3076 */ 3077 if (sinfo->si_copybuf_req != 0) { 3078 e = rootnex_setup_copybuf(hp, dmareq, dma, attr); 3079 if (e != DDI_SUCCESS) { 3080 return (e); 3081 } 3082 } else { 3083 dma->dp_copybuf_size = 0; 3084 } 3085 3086 /* 3087 * Figure out if we need to do a partial mapping. If so, figure out 3088 * if we need to trim the buffers when we munge the sgl. 3089 */ 3090 if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) || 3091 (dma->dp_dma.dmao_size > dma->dp_maxxfer) || 3092 (attr->dma_attr_sgllen < sinfo->si_sgl_size)) { 3093 dma->dp_partial_required = B_TRUE; 3094 if (attr->dma_attr_granular != 1) { 3095 dma->dp_trim_required = B_TRUE; 3096 } 3097 } else { 3098 dma->dp_partial_required = B_FALSE; 3099 dma->dp_trim_required = B_FALSE; 3100 } 3101 3102 /* If we need to do a partial bind, make sure the driver supports it */ 3103 if (dma->dp_partial_required && 3104 !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) { 3105 3106 mnum = ddi_driver_major(dma->dp_dip); 3107 /* 3108 * patchable which allows us to print one warning per major 3109 * number. 3110 */ 3111 if ((rootnex_bind_warn) && 3112 ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) { 3113 rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING; 3114 cmn_err(CE_WARN, "!%s: coding error detected, the " 3115 "driver is using ddi_dma_attr(9S) incorrectly. " 3116 "There is a small risk of data corruption in " 3117 "particular with large I/Os. The driver should be " 3118 "replaced with a corrected version for proper " 3119 "system operation. To disable this warning, add " 3120 "'set rootnex:rootnex_bind_warn=0' to " 3121 "/etc/system(4).", ddi_driver_name(dma->dp_dip)); 3122 } 3123 return (DDI_DMA_TOOBIG); 3124 } 3125 3126 /* 3127 * we might need multiple windows, setup state to handle them. In this 3128 * code path, we will have at least one window. 3129 */ 3130 e = rootnex_setup_windows(hp, dma, attr, kmflag); 3131 if (e != DDI_SUCCESS) { 3132 rootnex_teardown_copybuf(dma); 3133 return (e); 3134 } 3135 3136 window = &dma->dp_window[0]; 3137 cookie = &dma->dp_cookies[0]; 3138 cur_offset = 0; 3139 rootnex_init_win(hp, dma, window, cookie, cur_offset); 3140 if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) { 3141 cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 3142 } 3143 3144 /* loop though all the cookies we got back from get_sgl() */ 3145 for (i = 0; i < sinfo->si_sgl_size; i++) { 3146 /* 3147 * If we're using the copy buffer, check this cookie and setup 3148 * its associated copy buffer state. If this cookie uses the 3149 * copy buffer, make sure we sync this window during dma_sync. 3150 */ 3151 if (dma->dp_copybuf_size > 0) { 3152 rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie, 3153 cur_offset, ©buf_used, &cur_pp); 3154 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3155 window->wd_dosync = B_TRUE; 3156 } 3157 } 3158 3159 /* 3160 * save away the cookie size, since it could be modified in 3161 * the windowing code. 3162 */ 3163 dmac_size = cookie->dmac_size; 3164 3165 /* if we went over max copybuf size */ 3166 if (dma->dp_copybuf_size && 3167 (copybuf_used > dma->dp_copybuf_size)) { 3168 partial = B_TRUE; 3169 e = rootnex_copybuf_window_boundary(hp, dma, &window, 3170 cookie, cur_offset, ©buf_used); 3171 if (e != DDI_SUCCESS) { 3172 rootnex_teardown_copybuf(dma); 3173 rootnex_teardown_windows(dma); 3174 return (e); 3175 } 3176 3177 /* 3178 * if the coookie uses the copy buffer, make sure the 3179 * new window we just moved to is set to sync. 3180 */ 3181 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3182 window->wd_dosync = B_TRUE; 3183 } 3184 DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *, 3185 dma->dp_dip); 3186 3187 /* if the cookie cnt == max sgllen, move to the next window */ 3188 } else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) { 3189 partial = B_TRUE; 3190 ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen); 3191 e = rootnex_sgllen_window_boundary(hp, dma, &window, 3192 cookie, attr, cur_offset); 3193 if (e != DDI_SUCCESS) { 3194 rootnex_teardown_copybuf(dma); 3195 rootnex_teardown_windows(dma); 3196 return (e); 3197 } 3198 3199 /* 3200 * if the coookie uses the copy buffer, make sure the 3201 * new window we just moved to is set to sync. 3202 */ 3203 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3204 window->wd_dosync = B_TRUE; 3205 } 3206 DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *, 3207 dma->dp_dip); 3208 3209 /* else if we will be over maxxfer */ 3210 } else if ((window->wd_size + dmac_size) > 3211 dma->dp_maxxfer) { 3212 partial = B_TRUE; 3213 e = rootnex_maxxfer_window_boundary(hp, dma, &window, 3214 cookie); 3215 if (e != DDI_SUCCESS) { 3216 rootnex_teardown_copybuf(dma); 3217 rootnex_teardown_windows(dma); 3218 return (e); 3219 } 3220 3221 /* 3222 * if the coookie uses the copy buffer, make sure the 3223 * new window we just moved to is set to sync. 3224 */ 3225 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3226 window->wd_dosync = B_TRUE; 3227 } 3228 DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *, 3229 dma->dp_dip); 3230 3231 /* else this cookie fits in the current window */ 3232 } else { 3233 window->wd_cookie_cnt++; 3234 window->wd_size += dmac_size; 3235 } 3236 3237 /* track our offset into the buffer, go to the next cookie */ 3238 ASSERT(dmac_size <= dma->dp_dma.dmao_size); 3239 ASSERT(cookie->dmac_size <= dmac_size); 3240 cur_offset += dmac_size; 3241 cookie++; 3242 } 3243 3244 /* if we ended up with a zero sized window in the end, clean it up */ 3245 if (window->wd_size == 0) { 3246 hp->dmai_nwin--; 3247 window--; 3248 } 3249 3250 ASSERT(window->wd_trim.tr_trim_last == B_FALSE); 3251 3252 if (!partial) { 3253 return (DDI_DMA_MAPPED); 3254 } 3255 3256 ASSERT(dma->dp_partial_required); 3257 return (DDI_DMA_PARTIAL_MAP); 3258 } 3259 3260 3261 /* 3262 * rootnex_setup_copybuf() 3263 * Called in bind slowpath. Figures out if we're going to use the copy 3264 * buffer, and if we do, sets up the basic state to handle it. 3265 */ 3266 static int 3267 rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 3268 rootnex_dma_t *dma, ddi_dma_attr_t *attr) 3269 { 3270 rootnex_sglinfo_t *sinfo; 3271 ddi_dma_attr_t lattr; 3272 size_t max_copybuf; 3273 int cansleep; 3274 int e; 3275 #if !defined(__amd64) 3276 int vmflag; 3277 #endif 3278 3279 3280 sinfo = &dma->dp_sglinfo; 3281 3282 /* read this first so it's consistent through the routine */ 3283 max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK; 3284 3285 /* We need to call into the rootnex on ddi_dma_sync() */ 3286 hp->dmai_rflags &= ~DMP_NOSYNC; 3287 3288 /* make sure the copybuf size <= the max size */ 3289 dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf); 3290 ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0); 3291 3292 #if !defined(__amd64) 3293 /* 3294 * if we don't have kva space to copy to/from, allocate the KVA space 3295 * now. We only do this for the 32-bit kernel. We use seg kpm space for 3296 * the 64-bit kernel. 3297 */ 3298 if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) || 3299 (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) { 3300 3301 /* convert the sleep flags */ 3302 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 3303 vmflag = VM_SLEEP; 3304 } else { 3305 vmflag = VM_NOSLEEP; 3306 } 3307 3308 /* allocate Kernel VA space that we can bcopy to/from */ 3309 dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size, 3310 vmflag); 3311 if (dma->dp_kva == NULL) { 3312 return (DDI_DMA_NORESOURCES); 3313 } 3314 } 3315 #endif 3316 3317 /* convert the sleep flags */ 3318 if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 3319 cansleep = 1; 3320 } else { 3321 cansleep = 0; 3322 } 3323 3324 /* 3325 * Allocate the actual copy buffer. This needs to fit within the DMA 3326 * engine limits, so we can't use kmem_alloc... We don't need 3327 * contiguous memory (sgllen) since we will be forcing windows on 3328 * sgllen anyway. 3329 */ 3330 lattr = *attr; 3331 lattr.dma_attr_align = MMU_PAGESIZE; 3332 /* 3333 * this should be < 0 to indicate no limit, but due to a bug in 3334 * the rootnex, we'll set it to the maximum positive int. 3335 */ 3336 lattr.dma_attr_sgllen = 0x7fffffff; 3337 /* 3338 * if we're using the copy buffer because of seg, use that for our 3339 * upper address limit. 3340 */ 3341 if (sinfo->si_bounce_on_seg) { 3342 lattr.dma_attr_addr_hi = lattr.dma_attr_seg; 3343 } 3344 e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep, 3345 0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL); 3346 if (e != DDI_SUCCESS) { 3347 #if !defined(__amd64) 3348 if (dma->dp_kva != NULL) { 3349 vmem_free(heap_arena, dma->dp_kva, 3350 dma->dp_copybuf_size); 3351 } 3352 #endif 3353 return (DDI_DMA_NORESOURCES); 3354 } 3355 3356 DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip, 3357 size_t, dma->dp_copybuf_size); 3358 3359 return (DDI_SUCCESS); 3360 } 3361 3362 3363 /* 3364 * rootnex_setup_windows() 3365 * Called in bind slowpath to setup the window state. We always have windows 3366 * in the slowpath. Even if the window count = 1. 3367 */ 3368 static int 3369 rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3370 ddi_dma_attr_t *attr, int kmflag) 3371 { 3372 rootnex_window_t *windowp; 3373 rootnex_sglinfo_t *sinfo; 3374 size_t copy_state_size; 3375 size_t win_state_size; 3376 size_t state_available; 3377 size_t space_needed; 3378 uint_t copybuf_win; 3379 uint_t maxxfer_win; 3380 size_t space_used; 3381 uint_t sglwin; 3382 3383 3384 sinfo = &dma->dp_sglinfo; 3385 3386 dma->dp_current_win = 0; 3387 hp->dmai_nwin = 0; 3388 3389 /* If we don't need to do a partial, we only have one window */ 3390 if (!dma->dp_partial_required) { 3391 dma->dp_max_win = 1; 3392 3393 /* 3394 * we need multiple windows, need to figure out the worse case number 3395 * of windows. 3396 */ 3397 } else { 3398 /* 3399 * if we need windows because we need more copy buffer that 3400 * we allow, the worse case number of windows we could need 3401 * here would be (copybuf space required / copybuf space that 3402 * we have) plus one for remainder, and plus 2 to handle the 3403 * extra pages on the trim for the first and last pages of the 3404 * buffer (a page is the minimum window size so under the right 3405 * attr settings, you could have a window for each page). 3406 * The last page will only be hit here if the size is not a 3407 * multiple of the granularity (which theoretically shouldn't 3408 * be the case but never has been enforced, so we could have 3409 * broken things without it). 3410 */ 3411 if (sinfo->si_copybuf_req > dma->dp_copybuf_size) { 3412 ASSERT(dma->dp_copybuf_size > 0); 3413 copybuf_win = (sinfo->si_copybuf_req / 3414 dma->dp_copybuf_size) + 1 + 2; 3415 } else { 3416 copybuf_win = 0; 3417 } 3418 3419 /* 3420 * if we need windows because we have more cookies than the H/W 3421 * can handle, the number of windows we would need here would 3422 * be (cookie count / cookies count H/W supports minus 1[for 3423 * trim]) plus one for remainder. 3424 */ 3425 if (attr->dma_attr_sgllen < sinfo->si_sgl_size) { 3426 sglwin = (sinfo->si_sgl_size / 3427 (attr->dma_attr_sgllen - 1)) + 1; 3428 } else { 3429 sglwin = 0; 3430 } 3431 3432 /* 3433 * if we need windows because we're binding more memory than the 3434 * H/W can transfer at once, the number of windows we would need 3435 * here would be (xfer count / max xfer H/W supports) plus one 3436 * for remainder, and plus 2 to handle the extra pages on the 3437 * trim (see above comment about trim) 3438 */ 3439 if (dma->dp_dma.dmao_size > dma->dp_maxxfer) { 3440 maxxfer_win = (dma->dp_dma.dmao_size / 3441 dma->dp_maxxfer) + 1 + 2; 3442 } else { 3443 maxxfer_win = 0; 3444 } 3445 dma->dp_max_win = copybuf_win + sglwin + maxxfer_win; 3446 ASSERT(dma->dp_max_win > 0); 3447 } 3448 win_state_size = dma->dp_max_win * sizeof (rootnex_window_t); 3449 3450 /* 3451 * Get space for window and potential copy buffer state. Before we 3452 * go and allocate memory, see if we can get away with using what's 3453 * left in the pre-allocted state or the dynamically allocated sgl. 3454 */ 3455 space_used = (uintptr_t)(sinfo->si_sgl_size * 3456 sizeof (ddi_dma_cookie_t)); 3457 3458 /* if we dynamically allocated space for the cookies */ 3459 if (dma->dp_need_to_free_cookie) { 3460 /* if we have more space in the pre-allocted buffer, use it */ 3461 ASSERT(space_used <= dma->dp_cookie_size); 3462 if ((dma->dp_cookie_size - space_used) <= 3463 rootnex_state->r_prealloc_size) { 3464 state_available = rootnex_state->r_prealloc_size; 3465 windowp = (rootnex_window_t *)dma->dp_prealloc_buffer; 3466 3467 /* 3468 * else, we have more free space in the dynamically allocated 3469 * buffer, i.e. the buffer wasn't worse case fragmented so we 3470 * didn't need a lot of cookies. 3471 */ 3472 } else { 3473 state_available = dma->dp_cookie_size - space_used; 3474 windowp = (rootnex_window_t *) 3475 &dma->dp_cookies[sinfo->si_sgl_size]; 3476 } 3477 3478 /* we used the pre-alloced buffer */ 3479 } else { 3480 ASSERT(space_used <= rootnex_state->r_prealloc_size); 3481 state_available = rootnex_state->r_prealloc_size - space_used; 3482 windowp = (rootnex_window_t *) 3483 &dma->dp_cookies[sinfo->si_sgl_size]; 3484 } 3485 3486 /* 3487 * figure out how much state we need to track the copy buffer. Add an 3488 * addition 8 bytes for pointer alignemnt later. 3489 */ 3490 if (dma->dp_copybuf_size > 0) { 3491 copy_state_size = sinfo->si_max_pages * 3492 sizeof (rootnex_pgmap_t); 3493 } else { 3494 copy_state_size = 0; 3495 } 3496 /* add an additional 8 bytes for pointer alignment */ 3497 space_needed = win_state_size + copy_state_size + 0x8; 3498 3499 /* if we have enough space already, use it */ 3500 if (state_available >= space_needed) { 3501 dma->dp_window = windowp; 3502 dma->dp_need_to_free_window = B_FALSE; 3503 3504 /* not enough space, need to allocate more. */ 3505 } else { 3506 dma->dp_window = kmem_alloc(space_needed, kmflag); 3507 if (dma->dp_window == NULL) { 3508 return (DDI_DMA_NORESOURCES); 3509 } 3510 dma->dp_need_to_free_window = B_TRUE; 3511 dma->dp_window_size = space_needed; 3512 DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *, 3513 dma->dp_dip, size_t, space_needed); 3514 } 3515 3516 /* 3517 * we allocate copy buffer state and window state at the same time. 3518 * setup our copy buffer state pointers. Make sure it's aligned. 3519 */ 3520 if (dma->dp_copybuf_size > 0) { 3521 dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t) 3522 &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7); 3523 3524 #if !defined(__amd64) 3525 /* 3526 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to 3527 * false/NULL. Should be quicker to bzero vs loop and set. 3528 */ 3529 bzero(dma->dp_pgmap, copy_state_size); 3530 #endif 3531 } else { 3532 dma->dp_pgmap = NULL; 3533 } 3534 3535 return (DDI_SUCCESS); 3536 } 3537 3538 3539 /* 3540 * rootnex_teardown_copybuf() 3541 * cleans up after rootnex_setup_copybuf() 3542 */ 3543 static void 3544 rootnex_teardown_copybuf(rootnex_dma_t *dma) 3545 { 3546 #if !defined(__amd64) 3547 int i; 3548 3549 /* 3550 * if we allocated kernel heap VMEM space, go through all the pages and 3551 * map out any of the ones that we're mapped into the kernel heap VMEM 3552 * arena. Then free the VMEM space. 3553 */ 3554 if (dma->dp_kva != NULL) { 3555 for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) { 3556 if (dma->dp_pgmap[i].pm_mapped) { 3557 hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr, 3558 MMU_PAGESIZE, HAT_UNLOAD); 3559 dma->dp_pgmap[i].pm_mapped = B_FALSE; 3560 } 3561 } 3562 3563 vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size); 3564 } 3565 3566 #endif 3567 3568 /* if we allocated a copy buffer, free it */ 3569 if (dma->dp_cbaddr != NULL) { 3570 i_ddi_mem_free(dma->dp_cbaddr, NULL); 3571 } 3572 } 3573 3574 3575 /* 3576 * rootnex_teardown_windows() 3577 * cleans up after rootnex_setup_windows() 3578 */ 3579 static void 3580 rootnex_teardown_windows(rootnex_dma_t *dma) 3581 { 3582 /* 3583 * if we had to allocate window state on the last bind (because we 3584 * didn't have enough pre-allocated space in the handle), free it. 3585 */ 3586 if (dma->dp_need_to_free_window) { 3587 kmem_free(dma->dp_window, dma->dp_window_size); 3588 } 3589 } 3590 3591 3592 /* 3593 * rootnex_init_win() 3594 * Called in bind slow path during creation of a new window. Initializes 3595 * window state to default values. 3596 */ 3597 /*ARGSUSED*/ 3598 static void 3599 rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3600 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset) 3601 { 3602 hp->dmai_nwin++; 3603 window->wd_dosync = B_FALSE; 3604 window->wd_offset = cur_offset; 3605 window->wd_size = 0; 3606 window->wd_first_cookie = cookie; 3607 window->wd_cookie_cnt = 0; 3608 window->wd_trim.tr_trim_first = B_FALSE; 3609 window->wd_trim.tr_trim_last = B_FALSE; 3610 window->wd_trim.tr_first_copybuf_win = B_FALSE; 3611 window->wd_trim.tr_last_copybuf_win = B_FALSE; 3612 #if !defined(__amd64) 3613 window->wd_remap_copybuf = dma->dp_cb_remaping; 3614 #endif 3615 } 3616 3617 3618 /* 3619 * rootnex_setup_cookie() 3620 * Called in the bind slow path when the sgl uses the copy buffer. If any of 3621 * the sgl uses the copy buffer, we need to go through each cookie, figure 3622 * out if it uses the copy buffer, and if it does, save away everything we'll 3623 * need during sync. 3624 */ 3625 static void 3626 rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma, 3627 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used, 3628 page_t **cur_pp) 3629 { 3630 boolean_t copybuf_sz_power_2; 3631 rootnex_sglinfo_t *sinfo; 3632 paddr_t paddr; 3633 uint_t pidx; 3634 uint_t pcnt; 3635 off_t poff; 3636 #if defined(__amd64) 3637 pfn_t pfn; 3638 #else 3639 page_t **pplist; 3640 #endif 3641 3642 sinfo = &dma->dp_sglinfo; 3643 3644 /* 3645 * Calculate the page index relative to the start of the buffer. The 3646 * index to the current page for our buffer is the offset into the 3647 * first page of the buffer plus our current offset into the buffer 3648 * itself, shifted of course... 3649 */ 3650 pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT; 3651 ASSERT(pidx < sinfo->si_max_pages); 3652 3653 /* if this cookie uses the copy buffer */ 3654 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3655 /* 3656 * NOTE: we know that since this cookie uses the copy buffer, it 3657 * is <= MMU_PAGESIZE. 3658 */ 3659 3660 /* 3661 * get the offset into the page. For the 64-bit kernel, get the 3662 * pfn which we'll use with seg kpm. 3663 */ 3664 poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 3665 #if defined(__amd64) 3666 /* mfn_to_pfn() is a NOP on i86pc */ 3667 pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT); 3668 #endif /* __amd64 */ 3669 3670 /* figure out if the copybuf size is a power of 2 */ 3671 if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) { 3672 copybuf_sz_power_2 = B_FALSE; 3673 } else { 3674 copybuf_sz_power_2 = B_TRUE; 3675 } 3676 3677 /* This page uses the copy buffer */ 3678 dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE; 3679 3680 /* 3681 * save the copy buffer KVA that we'll use with this page. 3682 * if we still fit within the copybuf, it's a simple add. 3683 * otherwise, we need to wrap over using & or % accordingly. 3684 */ 3685 if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) { 3686 dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr + 3687 *copybuf_used; 3688 } else { 3689 if (copybuf_sz_power_2) { 3690 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 3691 (uintptr_t)dma->dp_cbaddr + 3692 (*copybuf_used & 3693 (dma->dp_copybuf_size - 1))); 3694 } else { 3695 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 3696 (uintptr_t)dma->dp_cbaddr + 3697 (*copybuf_used % dma->dp_copybuf_size)); 3698 } 3699 } 3700 3701 /* 3702 * over write the cookie physical address with the address of 3703 * the physical address of the copy buffer page that we will 3704 * use. 3705 */ 3706 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 3707 dma->dp_pgmap[pidx].pm_cbaddr)) + poff; 3708 3709 #ifdef __xpv 3710 /* 3711 * If we're dom0, we're using a real device so we need to load 3712 * the cookies with MAs instead of PAs. 3713 */ 3714 cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 3715 #else 3716 cookie->dmac_laddress = paddr; 3717 #endif 3718 3719 /* if we have a kernel VA, it's easy, just save that address */ 3720 if ((dmar_object->dmao_type != DMA_OTYP_PAGES) && 3721 (sinfo->si_asp == &kas)) { 3722 /* 3723 * save away the page aligned virtual address of the 3724 * driver buffer. Offsets are handled in the sync code. 3725 */ 3726 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t) 3727 dmar_object->dmao_obj.virt_obj.v_addr + cur_offset) 3728 & MMU_PAGEMASK); 3729 #if !defined(__amd64) 3730 /* 3731 * we didn't need to, and will never need to map this 3732 * page. 3733 */ 3734 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3735 #endif 3736 3737 /* we don't have a kernel VA. We need one for the bcopy. */ 3738 } else { 3739 #if defined(__amd64) 3740 /* 3741 * for the 64-bit kernel, it's easy. We use seg kpm to 3742 * get a Kernel VA for the corresponding pfn. 3743 */ 3744 dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn); 3745 #else 3746 /* 3747 * for the 32-bit kernel, this is a pain. First we'll 3748 * save away the page_t or user VA for this page. This 3749 * is needed in rootnex_dma_win() when we switch to a 3750 * new window which requires us to re-map the copy 3751 * buffer. 3752 */ 3753 pplist = dmar_object->dmao_obj.virt_obj.v_priv; 3754 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3755 dma->dp_pgmap[pidx].pm_pp = *cur_pp; 3756 dma->dp_pgmap[pidx].pm_vaddr = NULL; 3757 } else if (pplist != NULL) { 3758 dma->dp_pgmap[pidx].pm_pp = pplist[pidx]; 3759 dma->dp_pgmap[pidx].pm_vaddr = NULL; 3760 } else { 3761 dma->dp_pgmap[pidx].pm_pp = NULL; 3762 dma->dp_pgmap[pidx].pm_vaddr = (caddr_t) 3763 (((uintptr_t) 3764 dmar_object->dmao_obj.virt_obj.v_addr + 3765 cur_offset) & MMU_PAGEMASK); 3766 } 3767 3768 /* 3769 * save away the page aligned virtual address which was 3770 * allocated from the kernel heap arena (taking into 3771 * account if we need more copy buffer than we alloced 3772 * and use multiple windows to handle this, i.e. &,%). 3773 * NOTE: there isn't and physical memory backing up this 3774 * virtual address space currently. 3775 */ 3776 if ((*copybuf_used + MMU_PAGESIZE) <= 3777 dma->dp_copybuf_size) { 3778 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3779 (((uintptr_t)dma->dp_kva + *copybuf_used) & 3780 MMU_PAGEMASK); 3781 } else { 3782 if (copybuf_sz_power_2) { 3783 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3784 (((uintptr_t)dma->dp_kva + 3785 (*copybuf_used & 3786 (dma->dp_copybuf_size - 1))) & 3787 MMU_PAGEMASK); 3788 } else { 3789 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 3790 (((uintptr_t)dma->dp_kva + 3791 (*copybuf_used % 3792 dma->dp_copybuf_size)) & 3793 MMU_PAGEMASK); 3794 } 3795 } 3796 3797 /* 3798 * if we haven't used up the available copy buffer yet, 3799 * map the kva to the physical page. 3800 */ 3801 if (!dma->dp_cb_remaping && ((*copybuf_used + 3802 MMU_PAGESIZE) <= dma->dp_copybuf_size)) { 3803 dma->dp_pgmap[pidx].pm_mapped = B_TRUE; 3804 if (dma->dp_pgmap[pidx].pm_pp != NULL) { 3805 i86_pp_map(dma->dp_pgmap[pidx].pm_pp, 3806 dma->dp_pgmap[pidx].pm_kaddr); 3807 } else { 3808 i86_va_map(dma->dp_pgmap[pidx].pm_vaddr, 3809 sinfo->si_asp, 3810 dma->dp_pgmap[pidx].pm_kaddr); 3811 } 3812 3813 /* 3814 * we've used up the available copy buffer, this page 3815 * will have to be mapped during rootnex_dma_win() when 3816 * we switch to a new window which requires a re-map 3817 * the copy buffer. (32-bit kernel only) 3818 */ 3819 } else { 3820 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3821 } 3822 #endif 3823 /* go to the next page_t */ 3824 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3825 *cur_pp = (*cur_pp)->p_next; 3826 } 3827 } 3828 3829 /* add to the copy buffer count */ 3830 *copybuf_used += MMU_PAGESIZE; 3831 3832 /* 3833 * This cookie doesn't use the copy buffer. Walk through the pages this 3834 * cookie occupies to reflect this. 3835 */ 3836 } else { 3837 /* 3838 * figure out how many pages the cookie occupies. We need to 3839 * use the original page offset of the buffer and the cookies 3840 * offset in the buffer to do this. 3841 */ 3842 poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET; 3843 pcnt = mmu_btopr(cookie->dmac_size + poff); 3844 3845 while (pcnt > 0) { 3846 #if !defined(__amd64) 3847 /* 3848 * the 32-bit kernel doesn't have seg kpm, so we need 3849 * to map in the driver buffer (if it didn't come down 3850 * with a kernel VA) on the fly. Since this page doesn't 3851 * use the copy buffer, it's not, or will it ever, have 3852 * to be mapped in. 3853 */ 3854 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 3855 #endif 3856 dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE; 3857 3858 /* 3859 * we need to update pidx and cur_pp or we'll loose 3860 * track of where we are. 3861 */ 3862 if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 3863 *cur_pp = (*cur_pp)->p_next; 3864 } 3865 pidx++; 3866 pcnt--; 3867 } 3868 } 3869 } 3870 3871 3872 /* 3873 * rootnex_sgllen_window_boundary() 3874 * Called in the bind slow path when the next cookie causes us to exceed (in 3875 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl 3876 * length supported by the DMA H/W. 3877 */ 3878 static int 3879 rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 3880 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr, 3881 off_t cur_offset) 3882 { 3883 off_t new_offset; 3884 size_t trim_sz; 3885 off_t coffset; 3886 3887 3888 /* 3889 * if we know we'll never have to trim, it's pretty easy. Just move to 3890 * the next window and init it. We're done. 3891 */ 3892 if (!dma->dp_trim_required) { 3893 (*windowp)++; 3894 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3895 (*windowp)->wd_cookie_cnt++; 3896 (*windowp)->wd_size = cookie->dmac_size; 3897 return (DDI_SUCCESS); 3898 } 3899 3900 /* figure out how much we need to trim from the window */ 3901 ASSERT(attr->dma_attr_granular != 0); 3902 if (dma->dp_granularity_power_2) { 3903 trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1); 3904 } else { 3905 trim_sz = (*windowp)->wd_size % attr->dma_attr_granular; 3906 } 3907 3908 /* The window's a whole multiple of granularity. We're done */ 3909 if (trim_sz == 0) { 3910 (*windowp)++; 3911 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 3912 (*windowp)->wd_cookie_cnt++; 3913 (*windowp)->wd_size = cookie->dmac_size; 3914 return (DDI_SUCCESS); 3915 } 3916 3917 /* 3918 * The window's not a whole multiple of granularity, since we know this 3919 * is due to the sgllen, we need to go back to the last cookie and trim 3920 * that one, add the left over part of the old cookie into the new 3921 * window, and then add in the new cookie into the new window. 3922 */ 3923 3924 /* 3925 * make sure the driver isn't making us do something bad... Trimming and 3926 * sgllen == 1 don't go together. 3927 */ 3928 if (attr->dma_attr_sgllen == 1) { 3929 return (DDI_DMA_NOMAPPING); 3930 } 3931 3932 /* 3933 * first, setup the current window to account for the trim. Need to go 3934 * back to the last cookie for this. 3935 */ 3936 cookie--; 3937 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3938 (*windowp)->wd_trim.tr_last_cookie = cookie; 3939 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 3940 ASSERT(cookie->dmac_size > trim_sz); 3941 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3942 (*windowp)->wd_size -= trim_sz; 3943 3944 /* save the buffer offsets for the next window */ 3945 coffset = cookie->dmac_size - trim_sz; 3946 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3947 3948 /* 3949 * set this now in case this is the first window. all other cases are 3950 * set in dma_win() 3951 */ 3952 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 3953 3954 /* 3955 * initialize the next window using what's left over in the previous 3956 * cookie. 3957 */ 3958 (*windowp)++; 3959 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 3960 (*windowp)->wd_cookie_cnt++; 3961 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3962 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 3963 (*windowp)->wd_trim.tr_first_size = trim_sz; 3964 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 3965 (*windowp)->wd_dosync = B_TRUE; 3966 } 3967 3968 /* 3969 * now go back to the current cookie and add it to the new window. set 3970 * the new window size to the what was left over from the previous 3971 * cookie and what's in the current cookie. 3972 */ 3973 cookie++; 3974 (*windowp)->wd_cookie_cnt++; 3975 (*windowp)->wd_size = trim_sz + cookie->dmac_size; 3976 3977 /* 3978 * trim plus the next cookie could put us over maxxfer (a cookie can be 3979 * a max size of maxxfer). Handle that case. 3980 */ 3981 if ((*windowp)->wd_size > dma->dp_maxxfer) { 3982 /* 3983 * maxxfer is already a whole multiple of granularity, and this 3984 * trim will be <= the previous trim (since a cookie can't be 3985 * larger than maxxfer). Make things simple here. 3986 */ 3987 trim_sz = (*windowp)->wd_size - dma->dp_maxxfer; 3988 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 3989 (*windowp)->wd_trim.tr_last_cookie = cookie; 3990 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 3991 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 3992 (*windowp)->wd_size -= trim_sz; 3993 ASSERT((*windowp)->wd_size == dma->dp_maxxfer); 3994 3995 /* save the buffer offsets for the next window */ 3996 coffset = cookie->dmac_size - trim_sz; 3997 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 3998 3999 /* setup the next window */ 4000 (*windowp)++; 4001 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 4002 (*windowp)->wd_cookie_cnt++; 4003 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4004 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 4005 coffset; 4006 (*windowp)->wd_trim.tr_first_size = trim_sz; 4007 } 4008 4009 return (DDI_SUCCESS); 4010 } 4011 4012 4013 /* 4014 * rootnex_copybuf_window_boundary() 4015 * Called in bind slowpath when we get to a window boundary because we used 4016 * up all the copy buffer that we have. 4017 */ 4018 static int 4019 rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 4020 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset, 4021 size_t *copybuf_used) 4022 { 4023 rootnex_sglinfo_t *sinfo; 4024 off_t new_offset; 4025 size_t trim_sz; 4026 paddr_t paddr; 4027 off_t coffset; 4028 uint_t pidx; 4029 off_t poff; 4030 4031 4032 sinfo = &dma->dp_sglinfo; 4033 4034 /* 4035 * the copy buffer should be a whole multiple of page size. We know that 4036 * this cookie is <= MMU_PAGESIZE. 4037 */ 4038 ASSERT(cookie->dmac_size <= MMU_PAGESIZE); 4039 4040 /* 4041 * from now on, all new windows in this bind need to be re-mapped during 4042 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf 4043 * space... 4044 */ 4045 #if !defined(__amd64) 4046 dma->dp_cb_remaping = B_TRUE; 4047 #endif 4048 4049 /* reset copybuf used */ 4050 *copybuf_used = 0; 4051 4052 /* 4053 * if we don't have to trim (since granularity is set to 1), go to the 4054 * next window and add the current cookie to it. We know the current 4055 * cookie uses the copy buffer since we're in this code path. 4056 */ 4057 if (!dma->dp_trim_required) { 4058 (*windowp)++; 4059 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 4060 4061 /* Add this cookie to the new window */ 4062 (*windowp)->wd_cookie_cnt++; 4063 (*windowp)->wd_size += cookie->dmac_size; 4064 *copybuf_used += MMU_PAGESIZE; 4065 return (DDI_SUCCESS); 4066 } 4067 4068 /* 4069 * *** may need to trim, figure it out. 4070 */ 4071 4072 /* figure out how much we need to trim from the window */ 4073 if (dma->dp_granularity_power_2) { 4074 trim_sz = (*windowp)->wd_size & 4075 (hp->dmai_attr.dma_attr_granular - 1); 4076 } else { 4077 trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular; 4078 } 4079 4080 /* 4081 * if the window's a whole multiple of granularity, go to the next 4082 * window, init it, then add in the current cookie. We know the current 4083 * cookie uses the copy buffer since we're in this code path. 4084 */ 4085 if (trim_sz == 0) { 4086 (*windowp)++; 4087 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 4088 4089 /* Add this cookie to the new window */ 4090 (*windowp)->wd_cookie_cnt++; 4091 (*windowp)->wd_size += cookie->dmac_size; 4092 *copybuf_used += MMU_PAGESIZE; 4093 return (DDI_SUCCESS); 4094 } 4095 4096 /* 4097 * *** We figured it out, we definitly need to trim 4098 */ 4099 4100 /* 4101 * make sure the driver isn't making us do something bad... 4102 * Trimming and sgllen == 1 don't go together. 4103 */ 4104 if (hp->dmai_attr.dma_attr_sgllen == 1) { 4105 return (DDI_DMA_NOMAPPING); 4106 } 4107 4108 /* 4109 * first, setup the current window to account for the trim. Need to go 4110 * back to the last cookie for this. Some of the last cookie will be in 4111 * the current window, and some of the last cookie will be in the new 4112 * window. All of the current cookie will be in the new window. 4113 */ 4114 cookie--; 4115 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 4116 (*windowp)->wd_trim.tr_last_cookie = cookie; 4117 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 4118 ASSERT(cookie->dmac_size > trim_sz); 4119 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 4120 (*windowp)->wd_size -= trim_sz; 4121 4122 /* 4123 * we're trimming the last cookie (not the current cookie). So that 4124 * last cookie may have or may not have been using the copy buffer ( 4125 * we know the cookie passed in uses the copy buffer since we're in 4126 * this code path). 4127 * 4128 * If the last cookie doesn't use the copy buffer, nothing special to 4129 * do. However, if it does uses the copy buffer, it will be both the 4130 * last page in the current window and the first page in the next 4131 * window. Since we are reusing the copy buffer (and KVA space on the 4132 * 32-bit kernel), this page will use the end of the copy buffer in the 4133 * current window, and the start of the copy buffer in the next window. 4134 * Track that info... The cookie physical address was already set to 4135 * the copy buffer physical address in setup_cookie.. 4136 */ 4137 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 4138 pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset + 4139 (*windowp)->wd_size) >> MMU_PAGESHIFT; 4140 (*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE; 4141 (*windowp)->wd_trim.tr_last_pidx = pidx; 4142 (*windowp)->wd_trim.tr_last_cbaddr = 4143 dma->dp_pgmap[pidx].pm_cbaddr; 4144 #if !defined(__amd64) 4145 (*windowp)->wd_trim.tr_last_kaddr = 4146 dma->dp_pgmap[pidx].pm_kaddr; 4147 #endif 4148 } 4149 4150 /* save the buffer offsets for the next window */ 4151 coffset = cookie->dmac_size - trim_sz; 4152 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 4153 4154 /* 4155 * set this now in case this is the first window. all other cases are 4156 * set in dma_win() 4157 */ 4158 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 4159 4160 /* 4161 * initialize the next window using what's left over in the previous 4162 * cookie. 4163 */ 4164 (*windowp)++; 4165 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 4166 (*windowp)->wd_cookie_cnt++; 4167 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4168 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 4169 (*windowp)->wd_trim.tr_first_size = trim_sz; 4170 4171 /* 4172 * again, we're tracking if the last cookie uses the copy buffer. 4173 * read the comment above for more info on why we need to track 4174 * additional state. 4175 * 4176 * For the first cookie in the new window, we need reset the physical 4177 * address to DMA into to the start of the copy buffer plus any 4178 * initial page offset which may be present. 4179 */ 4180 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 4181 (*windowp)->wd_dosync = B_TRUE; 4182 (*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE; 4183 (*windowp)->wd_trim.tr_first_pidx = pidx; 4184 (*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr; 4185 poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET; 4186 4187 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) + 4188 poff; 4189 #ifdef __xpv 4190 /* 4191 * If we're dom0, we're using a real device so we need to load 4192 * the cookies with MAs instead of PAs. 4193 */ 4194 (*windowp)->wd_trim.tr_first_paddr = 4195 ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 4196 #else 4197 (*windowp)->wd_trim.tr_first_paddr = paddr; 4198 #endif 4199 4200 #if !defined(__amd64) 4201 (*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva; 4202 #endif 4203 /* account for the cookie copybuf usage in the new window */ 4204 *copybuf_used += MMU_PAGESIZE; 4205 4206 /* 4207 * every piece of code has to have a hack, and here is this 4208 * ones :-) 4209 * 4210 * There is a complex interaction between setup_cookie and the 4211 * copybuf window boundary. The complexity had to be in either 4212 * the maxxfer window, or the copybuf window, and I chose the 4213 * copybuf code. 4214 * 4215 * So in this code path, we have taken the last cookie, 4216 * virtually broken it in half due to the trim, and it happens 4217 * to use the copybuf which further complicates life. At the 4218 * same time, we have already setup the current cookie, which 4219 * is now wrong. More background info: the current cookie uses 4220 * the copybuf, so it is only a page long max. So we need to 4221 * fix the current cookies copy buffer address, physical 4222 * address, and kva for the 32-bit kernel. We due this by 4223 * bumping them by page size (of course, we can't due this on 4224 * the physical address since the copy buffer may not be 4225 * physically contiguous). 4226 */ 4227 cookie++; 4228 dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE; 4229 poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 4230 4231 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 4232 dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff; 4233 #ifdef __xpv 4234 /* 4235 * If we're dom0, we're using a real device so we need to load 4236 * the cookies with MAs instead of PAs. 4237 */ 4238 cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 4239 #else 4240 cookie->dmac_laddress = paddr; 4241 #endif 4242 4243 #if !defined(__amd64) 4244 ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE); 4245 dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE; 4246 #endif 4247 } else { 4248 /* go back to the current cookie */ 4249 cookie++; 4250 } 4251 4252 /* 4253 * add the current cookie to the new window. set the new window size to 4254 * the what was left over from the previous cookie and what's in the 4255 * current cookie. 4256 */ 4257 (*windowp)->wd_cookie_cnt++; 4258 (*windowp)->wd_size = trim_sz + cookie->dmac_size; 4259 ASSERT((*windowp)->wd_size < dma->dp_maxxfer); 4260 4261 /* 4262 * we know that the cookie passed in always uses the copy buffer. We 4263 * wouldn't be here if it didn't. 4264 */ 4265 *copybuf_used += MMU_PAGESIZE; 4266 4267 return (DDI_SUCCESS); 4268 } 4269 4270 4271 /* 4272 * rootnex_maxxfer_window_boundary() 4273 * Called in bind slowpath when we get to a window boundary because we will 4274 * go over maxxfer. 4275 */ 4276 static int 4277 rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 4278 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie) 4279 { 4280 size_t dmac_size; 4281 off_t new_offset; 4282 size_t trim_sz; 4283 off_t coffset; 4284 4285 4286 /* 4287 * calculate how much we have to trim off of the current cookie to equal 4288 * maxxfer. We don't have to account for granularity here since our 4289 * maxxfer already takes that into account. 4290 */ 4291 trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer; 4292 ASSERT(trim_sz <= cookie->dmac_size); 4293 ASSERT(trim_sz <= dma->dp_maxxfer); 4294 4295 /* save cookie size since we need it later and we might change it */ 4296 dmac_size = cookie->dmac_size; 4297 4298 /* 4299 * if we're not trimming the entire cookie, setup the current window to 4300 * account for the trim. 4301 */ 4302 if (trim_sz < cookie->dmac_size) { 4303 (*windowp)->wd_cookie_cnt++; 4304 (*windowp)->wd_trim.tr_trim_last = B_TRUE; 4305 (*windowp)->wd_trim.tr_last_cookie = cookie; 4306 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 4307 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 4308 (*windowp)->wd_size = dma->dp_maxxfer; 4309 4310 /* 4311 * set the adjusted cookie size now in case this is the first 4312 * window. All other windows are taken care of in get win 4313 */ 4314 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 4315 } 4316 4317 /* 4318 * coffset is the current offset within the cookie, new_offset is the 4319 * current offset with the entire buffer. 4320 */ 4321 coffset = dmac_size - trim_sz; 4322 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 4323 4324 /* initialize the next window */ 4325 (*windowp)++; 4326 rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 4327 (*windowp)->wd_cookie_cnt++; 4328 (*windowp)->wd_size = trim_sz; 4329 if (trim_sz < dmac_size) { 4330 (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4331 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 4332 coffset; 4333 (*windowp)->wd_trim.tr_first_size = trim_sz; 4334 } 4335 4336 return (DDI_SUCCESS); 4337 } 4338 4339 4340 /*ARGSUSED*/ 4341 static int 4342 rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4343 off_t off, size_t len, uint_t cache_flags) 4344 { 4345 rootnex_sglinfo_t *sinfo; 4346 rootnex_pgmap_t *cbpage; 4347 rootnex_window_t *win; 4348 ddi_dma_impl_t *hp; 4349 rootnex_dma_t *dma; 4350 caddr_t fromaddr; 4351 caddr_t toaddr; 4352 uint_t psize; 4353 off_t offset; 4354 uint_t pidx; 4355 size_t size; 4356 off_t poff; 4357 int e; 4358 4359 4360 hp = (ddi_dma_impl_t *)handle; 4361 dma = (rootnex_dma_t *)hp->dmai_private; 4362 sinfo = &dma->dp_sglinfo; 4363 4364 /* 4365 * if we don't have any windows, we don't need to sync. A copybuf 4366 * will cause us to have at least one window. 4367 */ 4368 if (dma->dp_window == NULL) { 4369 return (DDI_SUCCESS); 4370 } 4371 4372 /* This window may not need to be sync'd */ 4373 win = &dma->dp_window[dma->dp_current_win]; 4374 if (!win->wd_dosync) { 4375 return (DDI_SUCCESS); 4376 } 4377 4378 /* handle off and len special cases */ 4379 if ((off == 0) || (rootnex_sync_ignore_params)) { 4380 offset = win->wd_offset; 4381 } else { 4382 offset = off; 4383 } 4384 if ((len == 0) || (rootnex_sync_ignore_params)) { 4385 size = win->wd_size; 4386 } else { 4387 size = len; 4388 } 4389 4390 /* check the sync args to make sure they make a little sense */ 4391 if (rootnex_sync_check_parms) { 4392 e = rootnex_valid_sync_parms(hp, win, offset, size, 4393 cache_flags); 4394 if (e != DDI_SUCCESS) { 4395 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]); 4396 return (DDI_FAILURE); 4397 } 4398 } 4399 4400 /* 4401 * special case the first page to handle the offset into the page. The 4402 * offset to the current page for our buffer is the offset into the 4403 * first page of the buffer plus our current offset into the buffer 4404 * itself, masked of course. 4405 */ 4406 poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET; 4407 psize = MIN((MMU_PAGESIZE - poff), size); 4408 4409 /* go through all the pages that we want to sync */ 4410 while (size > 0) { 4411 /* 4412 * Calculate the page index relative to the start of the buffer. 4413 * The index to the current page for our buffer is the offset 4414 * into the first page of the buffer plus our current offset 4415 * into the buffer itself, shifted of course... 4416 */ 4417 pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT; 4418 ASSERT(pidx < sinfo->si_max_pages); 4419 4420 /* 4421 * if this page uses the copy buffer, we need to sync it, 4422 * otherwise, go on to the next page. 4423 */ 4424 cbpage = &dma->dp_pgmap[pidx]; 4425 ASSERT((cbpage->pm_uses_copybuf == B_TRUE) || 4426 (cbpage->pm_uses_copybuf == B_FALSE)); 4427 if (cbpage->pm_uses_copybuf) { 4428 /* cbaddr and kaddr should be page aligned */ 4429 ASSERT(((uintptr_t)cbpage->pm_cbaddr & 4430 MMU_PAGEOFFSET) == 0); 4431 ASSERT(((uintptr_t)cbpage->pm_kaddr & 4432 MMU_PAGEOFFSET) == 0); 4433 4434 /* 4435 * if we're copying for the device, we are going to 4436 * copy from the drivers buffer and to the rootnex 4437 * allocated copy buffer. 4438 */ 4439 if (cache_flags == DDI_DMA_SYNC_FORDEV) { 4440 fromaddr = cbpage->pm_kaddr + poff; 4441 toaddr = cbpage->pm_cbaddr + poff; 4442 DTRACE_PROBE2(rootnex__sync__dev, 4443 dev_info_t *, dma->dp_dip, size_t, psize); 4444 4445 /* 4446 * if we're copying for the cpu/kernel, we are going to 4447 * copy from the rootnex allocated copy buffer to the 4448 * drivers buffer. 4449 */ 4450 } else { 4451 fromaddr = cbpage->pm_cbaddr + poff; 4452 toaddr = cbpage->pm_kaddr + poff; 4453 DTRACE_PROBE2(rootnex__sync__cpu, 4454 dev_info_t *, dma->dp_dip, size_t, psize); 4455 } 4456 4457 bcopy(fromaddr, toaddr, psize); 4458 } 4459 4460 /* 4461 * decrement size until we're done, update our offset into the 4462 * buffer, and get the next page size. 4463 */ 4464 size -= psize; 4465 offset += psize; 4466 psize = MIN(MMU_PAGESIZE, size); 4467 4468 /* page offset is zero for the rest of this loop */ 4469 poff = 0; 4470 } 4471 4472 return (DDI_SUCCESS); 4473 } 4474 4475 /* 4476 * rootnex_dma_sync() 4477 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags. 4478 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC 4479 * is set, ddi_dma_sync() returns immediately passing back success. 4480 */ 4481 /*ARGSUSED*/ 4482 static int 4483 rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4484 off_t off, size_t len, uint_t cache_flags) 4485 { 4486 #if defined(__amd64) && !defined(__xpv) 4487 if (IOMMU_USED(rdip)) { 4488 return (iommulib_nexdma_sync(dip, rdip, handle, off, len, 4489 cache_flags)); 4490 } 4491 #endif 4492 return (rootnex_coredma_sync(dip, rdip, handle, off, len, 4493 cache_flags)); 4494 } 4495 4496 /* 4497 * rootnex_valid_sync_parms() 4498 * checks the parameters passed to sync to verify they are correct. 4499 */ 4500 static int 4501 rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 4502 off_t offset, size_t size, uint_t cache_flags) 4503 { 4504 off_t woffset; 4505 4506 4507 /* 4508 * the first part of the test to make sure the offset passed in is 4509 * within the window. 4510 */ 4511 if (offset < win->wd_offset) { 4512 return (DDI_FAILURE); 4513 } 4514 4515 /* 4516 * second and last part of the test to make sure the offset and length 4517 * passed in is within the window. 4518 */ 4519 woffset = offset - win->wd_offset; 4520 if ((woffset + size) > win->wd_size) { 4521 return (DDI_FAILURE); 4522 } 4523 4524 /* 4525 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should 4526 * be set too. 4527 */ 4528 if ((cache_flags == DDI_DMA_SYNC_FORDEV) && 4529 (hp->dmai_rflags & DDI_DMA_WRITE)) { 4530 return (DDI_SUCCESS); 4531 } 4532 4533 /* 4534 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL 4535 * should be set. Also DDI_DMA_READ should be set in the flags. 4536 */ 4537 if (((cache_flags == DDI_DMA_SYNC_FORCPU) || 4538 (cache_flags == DDI_DMA_SYNC_FORKERNEL)) && 4539 (hp->dmai_rflags & DDI_DMA_READ)) { 4540 return (DDI_SUCCESS); 4541 } 4542 4543 return (DDI_FAILURE); 4544 } 4545 4546 4547 /*ARGSUSED*/ 4548 static int 4549 rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4550 uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 4551 uint_t *ccountp) 4552 { 4553 rootnex_window_t *window; 4554 rootnex_trim_t *trim; 4555 ddi_dma_impl_t *hp; 4556 rootnex_dma_t *dma; 4557 #if !defined(__amd64) 4558 rootnex_sglinfo_t *sinfo; 4559 rootnex_pgmap_t *pmap; 4560 uint_t pidx; 4561 uint_t pcnt; 4562 off_t poff; 4563 int i; 4564 #endif 4565 4566 4567 hp = (ddi_dma_impl_t *)handle; 4568 dma = (rootnex_dma_t *)hp->dmai_private; 4569 #if !defined(__amd64) 4570 sinfo = &dma->dp_sglinfo; 4571 #endif 4572 4573 /* If we try and get a window which doesn't exist, return failure */ 4574 if (win >= hp->dmai_nwin) { 4575 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 4576 return (DDI_FAILURE); 4577 } 4578 4579 /* 4580 * if we don't have any windows, and they're asking for the first 4581 * window, setup the cookie pointer to the first cookie in the bind. 4582 * setup our return values, then increment the cookie since we return 4583 * the first cookie on the stack. 4584 */ 4585 if (dma->dp_window == NULL) { 4586 if (win != 0) { 4587 ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 4588 return (DDI_FAILURE); 4589 } 4590 hp->dmai_cookie = dma->dp_cookies; 4591 *offp = 0; 4592 *lenp = dma->dp_dma.dmao_size; 4593 *ccountp = dma->dp_sglinfo.si_sgl_size; 4594 *cookiep = hp->dmai_cookie[0]; 4595 hp->dmai_cookie++; 4596 return (DDI_SUCCESS); 4597 } 4598 4599 /* sync the old window before moving on to the new one */ 4600 window = &dma->dp_window[dma->dp_current_win]; 4601 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) { 4602 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 4603 DDI_DMA_SYNC_FORCPU); 4604 } 4605 4606 #if !defined(__amd64) 4607 /* 4608 * before we move to the next window, if we need to re-map, unmap all 4609 * the pages in this window. 4610 */ 4611 if (dma->dp_cb_remaping) { 4612 /* 4613 * If we switch to this window again, we'll need to map in 4614 * on the fly next time. 4615 */ 4616 window->wd_remap_copybuf = B_TRUE; 4617 4618 /* 4619 * calculate the page index into the buffer where this window 4620 * starts, and the number of pages this window takes up. 4621 */ 4622 pidx = (sinfo->si_buf_offset + window->wd_offset) >> 4623 MMU_PAGESHIFT; 4624 poff = (sinfo->si_buf_offset + window->wd_offset) & 4625 MMU_PAGEOFFSET; 4626 pcnt = mmu_btopr(window->wd_size + poff); 4627 ASSERT((pidx + pcnt) <= sinfo->si_max_pages); 4628 4629 /* unmap pages which are currently mapped in this window */ 4630 for (i = 0; i < pcnt; i++) { 4631 if (dma->dp_pgmap[pidx].pm_mapped) { 4632 hat_unload(kas.a_hat, 4633 dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE, 4634 HAT_UNLOAD); 4635 dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 4636 } 4637 pidx++; 4638 } 4639 } 4640 #endif 4641 4642 /* 4643 * Move to the new window. 4644 * NOTE: current_win must be set for sync to work right 4645 */ 4646 dma->dp_current_win = win; 4647 window = &dma->dp_window[win]; 4648 4649 /* if needed, adjust the first and/or last cookies for trim */ 4650 trim = &window->wd_trim; 4651 if (trim->tr_trim_first) { 4652 window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr; 4653 window->wd_first_cookie->dmac_size = trim->tr_first_size; 4654 #if !defined(__amd64) 4655 window->wd_first_cookie->dmac_type = 4656 (window->wd_first_cookie->dmac_type & 4657 ROOTNEX_USES_COPYBUF) + window->wd_offset; 4658 #endif 4659 if (trim->tr_first_copybuf_win) { 4660 dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr = 4661 trim->tr_first_cbaddr; 4662 #if !defined(__amd64) 4663 dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr = 4664 trim->tr_first_kaddr; 4665 #endif 4666 } 4667 } 4668 if (trim->tr_trim_last) { 4669 trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr; 4670 trim->tr_last_cookie->dmac_size = trim->tr_last_size; 4671 if (trim->tr_last_copybuf_win) { 4672 dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr = 4673 trim->tr_last_cbaddr; 4674 #if !defined(__amd64) 4675 dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr = 4676 trim->tr_last_kaddr; 4677 #endif 4678 } 4679 } 4680 4681 /* 4682 * setup the cookie pointer to the first cookie in the window. setup 4683 * our return values, then increment the cookie since we return the 4684 * first cookie on the stack. 4685 */ 4686 hp->dmai_cookie = window->wd_first_cookie; 4687 *offp = window->wd_offset; 4688 *lenp = window->wd_size; 4689 *ccountp = window->wd_cookie_cnt; 4690 *cookiep = hp->dmai_cookie[0]; 4691 hp->dmai_cookie++; 4692 4693 #if !defined(__amd64) 4694 /* re-map copybuf if required for this window */ 4695 if (dma->dp_cb_remaping) { 4696 /* 4697 * calculate the page index into the buffer where this 4698 * window starts. 4699 */ 4700 pidx = (sinfo->si_buf_offset + window->wd_offset) >> 4701 MMU_PAGESHIFT; 4702 ASSERT(pidx < sinfo->si_max_pages); 4703 4704 /* 4705 * the first page can get unmapped if it's shared with the 4706 * previous window. Even if the rest of this window is already 4707 * mapped in, we need to still check this one. 4708 */ 4709 pmap = &dma->dp_pgmap[pidx]; 4710 if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) { 4711 if (pmap->pm_pp != NULL) { 4712 pmap->pm_mapped = B_TRUE; 4713 i86_pp_map(pmap->pm_pp, pmap->pm_kaddr); 4714 } else if (pmap->pm_vaddr != NULL) { 4715 pmap->pm_mapped = B_TRUE; 4716 i86_va_map(pmap->pm_vaddr, sinfo->si_asp, 4717 pmap->pm_kaddr); 4718 } 4719 } 4720 pidx++; 4721 4722 /* map in the rest of the pages if required */ 4723 if (window->wd_remap_copybuf) { 4724 window->wd_remap_copybuf = B_FALSE; 4725 4726 /* figure out many pages this window takes up */ 4727 poff = (sinfo->si_buf_offset + window->wd_offset) & 4728 MMU_PAGEOFFSET; 4729 pcnt = mmu_btopr(window->wd_size + poff); 4730 ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages); 4731 4732 /* map pages which require it */ 4733 for (i = 1; i < pcnt; i++) { 4734 pmap = &dma->dp_pgmap[pidx]; 4735 if (pmap->pm_uses_copybuf) { 4736 ASSERT(pmap->pm_mapped == B_FALSE); 4737 if (pmap->pm_pp != NULL) { 4738 pmap->pm_mapped = B_TRUE; 4739 i86_pp_map(pmap->pm_pp, 4740 pmap->pm_kaddr); 4741 } else if (pmap->pm_vaddr != NULL) { 4742 pmap->pm_mapped = B_TRUE; 4743 i86_va_map(pmap->pm_vaddr, 4744 sinfo->si_asp, 4745 pmap->pm_kaddr); 4746 } 4747 } 4748 pidx++; 4749 } 4750 } 4751 } 4752 #endif 4753 4754 /* if the new window uses the copy buffer, sync it for the device */ 4755 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) { 4756 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 4757 DDI_DMA_SYNC_FORDEV); 4758 } 4759 4760 return (DDI_SUCCESS); 4761 } 4762 4763 /* 4764 * rootnex_dma_win() 4765 * called from ddi_dma_getwin() 4766 */ 4767 /*ARGSUSED*/ 4768 static int 4769 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4770 uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 4771 uint_t *ccountp) 4772 { 4773 #if defined(__amd64) && !defined(__xpv) 4774 if (IOMMU_USED(rdip)) { 4775 return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp, 4776 cookiep, ccountp)); 4777 } 4778 #endif 4779 4780 return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp, 4781 cookiep, ccountp)); 4782 } 4783 4784 /* 4785 * ************************ 4786 * obsoleted dma routines 4787 * ************************ 4788 */ 4789 4790 /* 4791 * rootnex_dma_map() 4792 * called from ddi_dma_setup() 4793 * NO IOMMU in 32 bit mode. The below routines doesn't work in 64 bit mode. 4794 */ 4795 /* ARGSUSED */ 4796 static int 4797 rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 4798 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 4799 { 4800 #if defined(__amd64) 4801 /* 4802 * this interface is not supported in 64-bit x86 kernel. See comment in 4803 * rootnex_dma_mctl() 4804 */ 4805 return (DDI_DMA_NORESOURCES); 4806 4807 #else /* 32-bit x86 kernel */ 4808 ddi_dma_handle_t *lhandlep; 4809 ddi_dma_handle_t lhandle; 4810 ddi_dma_cookie_t cookie; 4811 ddi_dma_attr_t dma_attr; 4812 ddi_dma_lim_t *dma_lim; 4813 uint_t ccnt; 4814 int e; 4815 4816 4817 /* 4818 * if the driver is just testing to see if it's possible to do the bind, 4819 * we'll use local state. Otherwise, use the handle pointer passed in. 4820 */ 4821 if (handlep == NULL) { 4822 lhandlep = &lhandle; 4823 } else { 4824 lhandlep = handlep; 4825 } 4826 4827 /* convert the limit structure to a dma_attr one */ 4828 dma_lim = dmareq->dmar_limits; 4829 dma_attr.dma_attr_version = DMA_ATTR_V0; 4830 dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo; 4831 dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi; 4832 dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer; 4833 dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max; 4834 dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max; 4835 dma_attr.dma_attr_granular = dma_lim->dlim_granular; 4836 dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen; 4837 dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize; 4838 dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes; 4839 dma_attr.dma_attr_align = MMU_PAGESIZE; 4840 dma_attr.dma_attr_flags = 0; 4841 4842 e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp, 4843 dmareq->dmar_arg, lhandlep); 4844 if (e != DDI_SUCCESS) { 4845 return (e); 4846 } 4847 4848 e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt); 4849 if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 4850 (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 4851 return (e); 4852 } 4853 4854 /* 4855 * if the driver is just testing to see if it's possible to do the bind, 4856 * free up the local state and return the result. 4857 */ 4858 if (handlep == NULL) { 4859 (void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep); 4860 (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 4861 if (e == DDI_DMA_MAPPED) { 4862 return (DDI_DMA_MAPOK); 4863 } else { 4864 return (DDI_DMA_NOMAPPING); 4865 } 4866 } 4867 4868 return (e); 4869 #endif /* defined(__amd64) */ 4870 } 4871 4872 /* 4873 * rootnex_dma_mctl() 4874 * 4875 * No IOMMU in 32 bit mode. The below routine doesn't work in 64 bit mode. 4876 */ 4877 /* ARGSUSED */ 4878 static int 4879 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 4880 enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp, 4881 uint_t cache_flags) 4882 { 4883 #if defined(__amd64) 4884 /* 4885 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a 4886 * common implementation in genunix, so they no longer have x86 4887 * specific functionality which called into dma_ctl. 4888 * 4889 * The rest of the obsoleted interfaces were never supported in the 4890 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface 4891 * was not ported to the x86 64-bit kernel do to serious x86 rootnex 4892 * implementation issues. 4893 * 4894 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and 4895 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we 4896 * reflect that now too... 4897 * 4898 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are 4899 * not going to put this functionality into the 64-bit x86 kernel now. 4900 * It wasn't ported to the 64-bit kernel for s10, no reason to change 4901 * that in a future release. 4902 */ 4903 return (DDI_FAILURE); 4904 4905 #else /* 32-bit x86 kernel */ 4906 ddi_dma_cookie_t lcookie; 4907 ddi_dma_cookie_t *cookie; 4908 rootnex_window_t *window; 4909 ddi_dma_impl_t *hp; 4910 rootnex_dma_t *dma; 4911 uint_t nwin; 4912 uint_t ccnt; 4913 size_t len; 4914 off_t off; 4915 int e; 4916 4917 4918 /* 4919 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little 4920 * hacky since were optimizing for the current interfaces and so we can 4921 * cleanup the mess in genunix. Hopefully we will remove the this 4922 * obsoleted routines someday soon. 4923 */ 4924 4925 switch (request) { 4926 4927 case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */ 4928 hp = (ddi_dma_impl_t *)handle; 4929 cookie = (ddi_dma_cookie_t *)objpp; 4930 4931 /* 4932 * convert segment to cookie. We don't distinguish between the 4933 * two :-) 4934 */ 4935 *cookie = *hp->dmai_cookie; 4936 *lenp = cookie->dmac_size; 4937 *offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF; 4938 return (DDI_SUCCESS); 4939 4940 case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */ 4941 hp = (ddi_dma_impl_t *)handle; 4942 dma = (rootnex_dma_t *)hp->dmai_private; 4943 4944 if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) { 4945 return (DDI_DMA_STALE); 4946 } 4947 4948 /* handle the case where we don't have any windows */ 4949 if (dma->dp_window == NULL) { 4950 /* 4951 * if seg == NULL, and we don't have any windows, 4952 * return the first cookie in the sgl. 4953 */ 4954 if (*lenp == NULL) { 4955 dma->dp_current_cookie = 0; 4956 hp->dmai_cookie = dma->dp_cookies; 4957 *objpp = (caddr_t)handle; 4958 return (DDI_SUCCESS); 4959 4960 /* if we have more cookies, go to the next cookie */ 4961 } else { 4962 if ((dma->dp_current_cookie + 1) >= 4963 dma->dp_sglinfo.si_sgl_size) { 4964 return (DDI_DMA_DONE); 4965 } 4966 dma->dp_current_cookie++; 4967 hp->dmai_cookie++; 4968 return (DDI_SUCCESS); 4969 } 4970 } 4971 4972 /* We have one or more windows */ 4973 window = &dma->dp_window[dma->dp_current_win]; 4974 4975 /* 4976 * if seg == NULL, return the first cookie in the current 4977 * window 4978 */ 4979 if (*lenp == NULL) { 4980 dma->dp_current_cookie = 0; 4981 hp->dmai_cookie = window->wd_first_cookie; 4982 4983 /* 4984 * go to the next cookie in the window then see if we done with 4985 * this window. 4986 */ 4987 } else { 4988 if ((dma->dp_current_cookie + 1) >= 4989 window->wd_cookie_cnt) { 4990 return (DDI_DMA_DONE); 4991 } 4992 dma->dp_current_cookie++; 4993 hp->dmai_cookie++; 4994 } 4995 *objpp = (caddr_t)handle; 4996 return (DDI_SUCCESS); 4997 4998 case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */ 4999 hp = (ddi_dma_impl_t *)handle; 5000 dma = (rootnex_dma_t *)hp->dmai_private; 5001 5002 if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) { 5003 return (DDI_DMA_STALE); 5004 } 5005 5006 /* if win == NULL, return the first window in the bind */ 5007 if (*offp == NULL) { 5008 nwin = 0; 5009 5010 /* 5011 * else, go to the next window then see if we're done with all 5012 * the windows. 5013 */ 5014 } else { 5015 nwin = dma->dp_current_win + 1; 5016 if (nwin >= hp->dmai_nwin) { 5017 return (DDI_DMA_DONE); 5018 } 5019 } 5020 5021 /* switch to the next window */ 5022 e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len, 5023 &lcookie, &ccnt); 5024 ASSERT(e == DDI_SUCCESS); 5025 if (e != DDI_SUCCESS) { 5026 return (DDI_DMA_STALE); 5027 } 5028 5029 /* reset the cookie back to the first cookie in the window */ 5030 if (dma->dp_window != NULL) { 5031 window = &dma->dp_window[dma->dp_current_win]; 5032 hp->dmai_cookie = window->wd_first_cookie; 5033 } else { 5034 hp->dmai_cookie = dma->dp_cookies; 5035 } 5036 5037 *objpp = (caddr_t)handle; 5038 return (DDI_SUCCESS); 5039 5040 case DDI_DMA_FREE: /* ddi_dma_free() */ 5041 (void) rootnex_dma_unbindhdl(dip, rdip, handle); 5042 (void) rootnex_dma_freehdl(dip, rdip, handle); 5043 if (rootnex_state->r_dvma_call_list_id) { 5044 ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 5045 } 5046 return (DDI_SUCCESS); 5047 5048 case DDI_DMA_IOPB_ALLOC: /* get contiguous DMA-able memory */ 5049 case DDI_DMA_SMEM_ALLOC: /* get contiguous DMA-able memory */ 5050 /* should never get here, handled in genunix */ 5051 ASSERT(0); 5052 return (DDI_FAILURE); 5053 5054 case DDI_DMA_KVADDR: 5055 case DDI_DMA_GETERR: 5056 case DDI_DMA_COFF: 5057 return (DDI_FAILURE); 5058 } 5059 5060 return (DDI_FAILURE); 5061 #endif /* defined(__amd64) */ 5062 } 5063 5064 /* 5065 * ********* 5066 * FMA Code 5067 * ********* 5068 */ 5069 5070 /* 5071 * rootnex_fm_init() 5072 * FMA init busop 5073 */ 5074 /* ARGSUSED */ 5075 static int 5076 rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 5077 ddi_iblock_cookie_t *ibc) 5078 { 5079 *ibc = rootnex_state->r_err_ibc; 5080 5081 return (ddi_system_fmcap); 5082 } 5083 5084 /* 5085 * rootnex_dma_check() 5086 * Function called after a dma fault occurred to find out whether the 5087 * fault address is associated with a driver that is able to handle faults 5088 * and recover from faults. 5089 */ 5090 /* ARGSUSED */ 5091 static int 5092 rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr, 5093 const void *not_used) 5094 { 5095 rootnex_window_t *window; 5096 uint64_t start_addr; 5097 uint64_t fault_addr; 5098 ddi_dma_impl_t *hp; 5099 rootnex_dma_t *dma; 5100 uint64_t end_addr; 5101 size_t csize; 5102 int i; 5103 int j; 5104 5105 5106 /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */ 5107 hp = (ddi_dma_impl_t *)handle; 5108 ASSERT(hp); 5109 5110 dma = (rootnex_dma_t *)hp->dmai_private; 5111 5112 /* Get the address that we need to search for */ 5113 fault_addr = *(uint64_t *)addr; 5114 5115 /* 5116 * if we don't have any windows, we can just walk through all the 5117 * cookies. 5118 */ 5119 if (dma->dp_window == NULL) { 5120 /* for each cookie */ 5121 for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) { 5122 /* 5123 * if the faulted address is within the physical address 5124 * range of the cookie, return DDI_FM_NONFATAL. 5125 */ 5126 if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) && 5127 (fault_addr <= (dma->dp_cookies[i].dmac_laddress + 5128 dma->dp_cookies[i].dmac_size))) { 5129 return (DDI_FM_NONFATAL); 5130 } 5131 } 5132 5133 /* fault_addr not within this DMA handle */ 5134 return (DDI_FM_UNKNOWN); 5135 } 5136 5137 /* we have mutiple windows, walk through each window */ 5138 for (i = 0; i < hp->dmai_nwin; i++) { 5139 window = &dma->dp_window[i]; 5140 5141 /* Go through all the cookies in the window */ 5142 for (j = 0; j < window->wd_cookie_cnt; j++) { 5143 5144 start_addr = window->wd_first_cookie[j].dmac_laddress; 5145 csize = window->wd_first_cookie[j].dmac_size; 5146 5147 /* 5148 * if we are trimming the first cookie in the window, 5149 * and this is the first cookie, adjust the start 5150 * address and size of the cookie to account for the 5151 * trim. 5152 */ 5153 if (window->wd_trim.tr_trim_first && (j == 0)) { 5154 start_addr = window->wd_trim.tr_first_paddr; 5155 csize = window->wd_trim.tr_first_size; 5156 } 5157 5158 /* 5159 * if we are trimming the last cookie in the window, 5160 * and this is the last cookie, adjust the start 5161 * address and size of the cookie to account for the 5162 * trim. 5163 */ 5164 if (window->wd_trim.tr_trim_last && 5165 (j == (window->wd_cookie_cnt - 1))) { 5166 start_addr = window->wd_trim.tr_last_paddr; 5167 csize = window->wd_trim.tr_last_size; 5168 } 5169 5170 end_addr = start_addr + csize; 5171 5172 /* 5173 * if the faulted address is within the physical 5174 * address of the cookie, return DDI_FM_NONFATAL. 5175 */ 5176 if ((fault_addr >= start_addr) && 5177 (fault_addr <= end_addr)) { 5178 return (DDI_FM_NONFATAL); 5179 } 5180 } 5181 } 5182 5183 /* fault_addr not within this DMA handle */ 5184 return (DDI_FM_UNKNOWN); 5185 } 5186 5187 /*ARGSUSED*/ 5188 static int 5189 rootnex_quiesce(dev_info_t *dip) 5190 { 5191 #if defined(__amd64) && !defined(__xpv) 5192 return (immu_quiesce()); 5193 #else 5194 return (DDI_SUCCESS); 5195 #endif 5196 } 5197 5198 #if defined(__xpv) 5199 void 5200 immu_init(void) 5201 { 5202 ; 5203 } 5204 5205 void 5206 immu_startup(void) 5207 { 5208 ; 5209 } 5210 /*ARGSUSED*/ 5211 void 5212 immu_physmem_update(uint64_t addr, uint64_t size) 5213 { 5214 ; 5215 } 5216 #endif 5217