/titanic_41/usr/src/uts/sun4u/starfire/os/ |
H A D | cpu_sgnblk.c | 70 caddr_t cvaddr; in cpu_sgn_mapin() local 116 cvaddr = vmem_alloc(heap_arena, ptob(num_pages), VM_SLEEP); in cpu_sgn_mapin() 118 hat_devload(kas.a_hat, cvaddr, ptob(num_pages), in cpu_sgn_mapin() 121 cpu_sgnblkp[cpuid] = ((cpu_sgnblk_t *)(cvaddr + in cpu_sgn_mapin() 128 ulong_t cvaddr, num_pages; in cpu_sgn_mapout() local 133 if ((cvaddr = (ulong_t)cpu_sgnblkp[cpuid]) == NULL) { in cpu_sgn_mapout() 137 cvaddr &= ~MMU_PAGEOFFSET; in cpu_sgn_mapout() 175 hat_unload(kas.a_hat, (caddr_t)cvaddr, ptob(num_pages), in cpu_sgn_mapout() 177 vmem_free(heap_arena, (caddr_t)cvaddr, ptob(num_pages)); in cpu_sgn_mapout()
|
H A D | pda.c | 546 caddr_t cvaddr; in cpu_p2o_mapin() local 579 cvaddr = vmem_alloc(heap_arena, ptob(num_pages), VM_SLEEP); in cpu_p2o_mapin() 581 hat_devload(kas.a_hat, cvaddr, ptob(num_pages), in cpu_p2o_mapin() 584 return ((post2obp_info_t *)(cvaddr + (ulong_t)cpu_p2o_offset)); in cpu_p2o_mapin() 590 ulong_t cvaddr, num_pages; in cpu_p2o_mapout() local 610 cvaddr = (ulong_t)p2o - cpu_p2o_offset; in cpu_p2o_mapout() 611 if (cvaddr & MMU_PAGEOFFSET) { in cpu_p2o_mapout() 615 cpuid, (uint_t)cvaddr); in cpu_p2o_mapout() 619 hat_unload(kas.a_hat, (caddr_t)cvaddr, ptob(num_pages), in cpu_p2o_mapout() 621 vmem_free(heap_arena, (caddr_t)cvaddr, ptob(num_pages)); in cpu_p2o_mapout()
|
/titanic_41/usr/src/uts/i86pc/io/gfx_private/ |
H A D | gfxp_vm.c | 91 caddr_t cvaddr; in gfxp_map_kernel_space() local 118 cvaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP); in gfxp_map_kernel_space() 119 if (cvaddr == NULL) in gfxp_map_kernel_space() 129 hat_devload(kas.a_hat, cvaddr, ptob(npages), pfn, in gfxp_map_kernel_space() 131 return (cvaddr + pgoffset); in gfxp_map_kernel_space()
|
H A D | gfxp_devmap.c | 71 umem_cookie->cvaddr = kva; in gfxp_umem_cookie_init()
|
/titanic_41/usr/src/uts/sun4u/starfire/cvc/ |
H A D | cvc.c | 1134 caddr_t cvaddr; in cvc_iobuf_mapin() local 1164 cvaddr = vmem_alloc(heap_arena, ptob(num_pages), VM_SLEEP); in cvc_iobuf_mapin() 1166 hat_devload(kas.a_hat, cvaddr, mmu_ptob(num_pages), pfn, in cvc_iobuf_mapin() 1169 return ((caddr_t)(cvaddr + (uint_t)(cvc_iobuf_physaddr in cvc_iobuf_mapin() 1181 caddr_t cvaddr; in cvc_iobuf_mapout() local 1184 if ((cvaddr = cvc_iobufp[cpu_id]) == 0) { in cvc_iobuf_mapout() 1190 num_pages = mmu_btopr(((size_t)((uint64_t)cvaddr & MMU_PAGEOFFSET) + in cvc_iobuf_mapout() 1194 cvaddr = (caddr_t)(((uint64_t)cvaddr & MMU_PAGEMASK)); in cvc_iobuf_mapout() 1196 hat_unload(kas.a_hat, cvaddr, mmu_ptob(num_pages), HAT_UNLOAD_UNLOCK); in cvc_iobuf_mapout() 1197 vmem_free(heap_arena, cvaddr, ptob(num_pages)); in cvc_iobuf_mapout()
|
/titanic_41/usr/src/uts/i86pc/os/ |
H A D | mp_implfuncs.c | 210 caddr_t cvaddr; in psm_map_phys_new() local 231 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); in psm_map_phys_new() 232 if (cvaddr == NULL) in psm_map_phys_new() 234 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base), in psm_map_phys_new() 236 return (cvaddr + pgoffset); in psm_map_phys_new()
|
/titanic_41/usr/src/uts/common/vm/ |
H A D | seg_dev.c | 986 ASSERT(dhp->dh_cvaddr >= cp->cvaddr && in devmap_handle_reduce_len() 987 dhp->dh_cvaddr < (cp->cvaddr + cp->size)); in devmap_handle_reduce_len() 989 (cp->cvaddr + cp->size)); in devmap_handle_reduce_len() 1209 err = as_fault(kas.a_hat, &kas, cookie->cvaddr, in acquire_kpmem_lock() 1228 if (as_fault(kas.a_hat, &kas, cookie->cvaddr, in release_kpmem_lock() 1507 ASSERT(dhp->dh_cvaddr >= cp->cvaddr && in segdev_faultpage() 1508 dhp->dh_cvaddr < (cp->cvaddr + cp->size)); in segdev_faultpage() 1510 (cp->cvaddr + cp->size)); in segdev_faultpage() 1512 (cp->cvaddr + cp->size)); in segdev_faultpage() 1525 cp->cvaddr + off); in segdev_faultpage() [all …]
|
/titanic_41/usr/src/uts/common/sys/ |
H A D | ddidevmap.h | 68 caddr_t cvaddr; /* cookie virtual address. */ member
|
/titanic_41/usr/src/uts/i86pc/io/xsvc/ |
H A D | xsvc.c | 956 umem_cookiep->cvaddr = kva; in xsvc_umem_cookie_alloc() 1096 kva = cp->cvaddr; in xsvc_devmap_unmap()
|
/titanic_41/usr/src/uts/i86pc/io/ |
H A D | rootnex.c | 1035 void *cvaddr; in rootnex_map_regspec() local 1187 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); in rootnex_map_regspec() 1188 if (cvaddr == NULL) in rootnex_map_regspec() 1194 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), in rootnex_map_regspec() 1197 *vaddrp = (caddr_t)cvaddr + pgoffset; in rootnex_map_regspec()
|
/titanic_41/usr/src/uts/common/io/myri10ge/drv/ |
H A D | myri10ge.c | 4251 char *cvaddr, *ptr; in myri10ge_enable_nvidia_ecrc() local 4307 cvaddr = device_arena_alloc(ptob(1), VM_NOSLEEP); in myri10ge_enable_nvidia_ecrc() 4308 if (cvaddr == NULL) in myri10ge_enable_nvidia_ecrc() 4312 hat_devload(kas.a_hat, cvaddr, mmu_ptob(1), in myri10ge_enable_nvidia_ecrc() 4316 ptr = cvaddr + pgoffset; in myri10ge_enable_nvidia_ecrc() 4330 hat_unload(kas.a_hat, cvaddr, ptob(1), HAT_UNLOAD_UNLOCK); in myri10ge_enable_nvidia_ecrc() 4331 device_arena_free(cvaddr, ptob(1)); in myri10ge_enable_nvidia_ecrc()
|
/titanic_41/usr/src/uts/common/os/ |
H A D | sunddi.c | 8398 p->cvaddr = addr; in umem_lockmemory() 8468 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags); in i_ddi_umem_unlock() 8654 p->cvaddr = addr; in ddi_umem_lock() 8780 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); in ddi_umem_iosetup() 8781 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off); in ddi_umem_iosetup() 8784 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); in ddi_umem_iosetup()
|