| /linux/arch/m68k/include/asm/ |
| H A D | bitops.h | 31 static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr) in bset_reg_set_bit() argument 33 char *p = (char *)vaddr + (nr ^ 31) / 8; in bset_reg_set_bit() 41 static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr) in bset_mem_set_bit() argument 43 char *p = (char *)vaddr + (nr ^ 31) / 8; in bset_mem_set_bit() 50 static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) in bfset_mem_set_bit() argument 54 : "d" (nr ^ 31), "o" (*vaddr) in bfset_mem_set_bit() 59 #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr) argument 61 #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr) argument 63 #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ argument 64 bset_mem_set_bit(nr, vaddr) : \ [all …]
|
| /linux/arch/riscv/mm/ |
| H A D | kasan_init.c | 29 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) in kasan_populate_pte() argument 39 ptep = pte_offset_kernel(pmd, vaddr); in kasan_populate_pte() 47 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end); in kasan_populate_pte() 50 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end) in kasan_populate_pmd() argument 61 pmdp = pmd_offset(pud, vaddr); in kasan_populate_pmd() 64 next = pmd_addr_end(vaddr, end); in kasan_populate_pmd() 66 if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) && in kasan_populate_pmd() 67 (next - vaddr) >= PMD_SIZE) { in kasan_populate_pmd() 76 kasan_populate_pte(pmdp, vaddr, next); in kasan_populate_pmd() 77 } while (pmdp++, vaddr = next, vaddr != end); in kasan_populate_pmd() [all …]
|
| /linux/arch/parisc/kernel/ |
| H A D | pci-dma.c | 77 unsigned long vaddr, in map_pte_uncached() argument 81 unsigned long orig_vaddr = vaddr; in map_pte_uncached() 83 vaddr &= ~PMD_MASK; in map_pte_uncached() 84 end = vaddr + size; in map_pte_uncached() 96 vaddr += PAGE_SIZE; in map_pte_uncached() 100 } while (vaddr < end); in map_pte_uncached() 104 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, in map_pmd_uncached() argument 108 unsigned long orig_vaddr = vaddr; in map_pmd_uncached() 110 vaddr &= ~PGDIR_MASK; in map_pmd_uncached() 111 end = vaddr + size; in map_pmd_uncached() [all …]
|
| /linux/arch/arm/mm/ |
| H A D | cache-xsc3l2.c | 88 unsigned long vaddr; in xsc3_l2_inv_range() local 95 vaddr = -1; /* to force the first mapping */ in xsc3_l2_inv_range() 101 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); in xsc3_l2_inv_range() 102 xsc3_l2_clean_mva(vaddr); in xsc3_l2_inv_range() 103 xsc3_l2_inv_mva(vaddr); in xsc3_l2_inv_range() 111 vaddr = l2_map_va(start, vaddr); in xsc3_l2_inv_range() 112 xsc3_l2_inv_mva(vaddr); in xsc3_l2_inv_range() 120 vaddr = l2_map_va(start, vaddr); in xsc3_l2_inv_range() 121 xsc3_l2_clean_mva(vaddr); in xsc3_l2_inv_range() 122 xsc3_l2_inv_mva(vaddr); in xsc3_l2_inv_range() [all …]
|
| /linux/arch/x86/mm/ |
| H A D | mem_encrypt_amd.c | 156 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, in __sme_early_map_unmap_mem() argument 159 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; in __sme_early_map_unmap_mem() 167 __early_make_pgtable((unsigned long)vaddr, pmd); in __sme_early_map_unmap_mem() 169 vaddr += PMD_SIZE; in __sme_early_map_unmap_mem() 256 static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) in enc_dec_hypercall() argument 259 unsigned long vaddr_end = vaddr + size; in enc_dec_hypercall() 261 while (vaddr < vaddr_end) { in enc_dec_hypercall() 266 kpte = lookup_address(vaddr, &level); in enc_dec_hypercall() 281 vaddr = (vaddr & pmask) + psize; in enc_dec_hypercall() 286 static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) in amd_enc_status_change_prepare() argument [all …]
|
| H A D | init_32.c | 107 pmd_t * __init populate_extra_pmd(unsigned long vaddr) in populate_extra_pmd() argument 109 int pgd_idx = pgd_index(vaddr); in populate_extra_pmd() 110 int pmd_idx = pmd_index(vaddr); in populate_extra_pmd() 115 pte_t * __init populate_extra_pte(unsigned long vaddr) in populate_extra_pte() argument 117 int pte_idx = pte_index(vaddr); in populate_extra_pte() 120 pmd = populate_extra_pmd(vaddr); in populate_extra_pte() 132 unsigned long vaddr; in page_table_range_init_count() local 137 vaddr = start; in page_table_range_init_count() 138 pgd_idx = pgd_index(vaddr); in page_table_range_init_count() 139 pmd_idx = pmd_index(vaddr); in page_table_range_init_count() [all …]
|
| H A D | pgtable_32.c | 27 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) in set_pte_vaddr() argument 35 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pte_vaddr() 40 p4d = p4d_offset(pgd, vaddr); in set_pte_vaddr() 45 pud = pud_offset(p4d, vaddr); in set_pte_vaddr() 50 pmd = pmd_offset(pud, vaddr); in set_pte_vaddr() 55 pte = pte_offset_kernel(pmd, vaddr); in set_pte_vaddr() 57 set_pte_at(&init_mm, vaddr, pte, pteval); in set_pte_vaddr() 59 pte_clear(&init_mm, vaddr, pte); in set_pte_vaddr() 65 flush_tlb_one_kernel(vaddr); in set_pte_vaddr()
|
| H A D | init_64.c | 267 static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) in fill_p4d() argument 276 return p4d_offset(pgd, vaddr); in fill_p4d() 279 static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) in fill_pud() argument 288 return pud_offset(p4d, vaddr); in fill_pud() 291 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) in fill_pmd() argument 300 return pmd_offset(pud, vaddr); in fill_pmd() 303 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) in fill_pte() argument 311 return pte_offset_kernel(pmd, vaddr); in fill_pte() 314 static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) in __set_pte_vaddr() argument 316 pmd_t *pmd = fill_pmd(pud, vaddr); in __set_pte_vaddr() [all …]
|
| /linux/arch/parisc/mm/ |
| H A D | fixmap.c | 15 unsigned long vaddr = __fix_to_virt(idx); in set_fixmap() local 16 pgd_t *pgd = pgd_offset_k(vaddr); in set_fixmap() 17 p4d_t *p4d = p4d_offset(pgd, vaddr); in set_fixmap() 18 pud_t *pud = pud_offset(p4d, vaddr); in set_fixmap() 19 pmd_t *pmd = pmd_offset(pud, vaddr); in set_fixmap() 22 pte = pte_offset_kernel(pmd, vaddr); in set_fixmap() 23 set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); in set_fixmap() 24 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); in set_fixmap() 29 unsigned long vaddr = __fix_to_virt(idx); in clear_fixmap() local 30 pte_t *pte = virt_to_kpte(vaddr); in clear_fixmap() [all …]
|
| /linux/mm/ |
| H A D | highmem.c | 165 struct page *__kmap_to_page(void *vaddr) in __kmap_to_page() argument 167 unsigned long base = (unsigned long) vaddr & PAGE_MASK; in __kmap_to_page() 169 unsigned long addr = (unsigned long)vaddr; in __kmap_to_page() 192 return virt_to_page(vaddr); in __kmap_to_page() 247 unsigned long vaddr; in map_new_virtual() local 289 vaddr = PKMAP_ADDR(last_pkmap_nr); in map_new_virtual() 290 set_pte_at(&init_mm, vaddr, in map_new_virtual() 294 set_page_address(page, (void *)vaddr); in map_new_virtual() 296 return vaddr; in map_new_virtual() 309 unsigned long vaddr; in kmap_high() local [all …]
|
| /linux/arch/m68k/sun3x/ |
| H A D | dvma.c | 79 unsigned long vaddr, int len) in dvma_map_cpu() argument 88 vaddr &= PAGE_MASK; in dvma_map_cpu() 90 end = PAGE_ALIGN(vaddr + len); in dvma_map_cpu() 92 pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr); in dvma_map_cpu() 93 pgd = pgd_offset_k(vaddr); in dvma_map_cpu() 94 p4d = p4d_offset(pgd, vaddr); in dvma_map_cpu() 95 pud = pud_offset(p4d, vaddr); in dvma_map_cpu() 101 if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) { in dvma_map_cpu() 106 if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK)) in dvma_map_cpu() 107 end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK; in dvma_map_cpu() [all …]
|
| /linux/drivers/media/common/videobuf2/ |
| H A D | videobuf2-vmalloc.c | 26 void *vaddr; member 47 buf->vaddr = vmalloc_user(buf->size); in vb2_vmalloc_alloc() 48 if (!buf->vaddr) { in vb2_vmalloc_alloc() 68 vfree(buf->vaddr); in vb2_vmalloc_put() 74 unsigned long vaddr, unsigned long size) in vb2_vmalloc_get_userptr() argument 86 offset = vaddr & ~PAGE_MASK; in vb2_vmalloc_get_userptr() 88 vec = vb2_create_framevec(vaddr, size, in vb2_vmalloc_get_userptr() 107 buf->vaddr = (__force void *) in vb2_vmalloc_get_userptr() 110 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1); in vb2_vmalloc_get_userptr() 113 if (!buf->vaddr) in vb2_vmalloc_get_userptr() [all …]
|
| /linux/drivers/net/ethernet/freescale/fman/ |
| H A D | fman_muram.c | 19 unsigned long vaddr) in fman_muram_vbase_to_offset() argument 21 return vaddr - (unsigned long)muram->vbase; in fman_muram_vbase_to_offset() 40 void __iomem *vaddr; in fman_muram_init() local 53 vaddr = ioremap(base, size); in fman_muram_init() 54 if (!vaddr) { in fman_muram_init() 59 ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr, in fman_muram_init() 63 iounmap(vaddr); in fman_muram_init() 67 memset_io(vaddr, 0, (int)size); in fman_muram_init() 69 muram->vbase = vaddr; in fman_muram_init() 106 unsigned long vaddr; in fman_muram_alloc() local [all …]
|
| /linux/drivers/media/platform/chips-media/wave5/ |
| H A D | wave5-vdi.c | 18 if (!vpu_dev->common_mem.vaddr) { in wave5_vdi_allocate_common_memory() 34 &vpu_dev->common_mem.daddr, vpu_dev->common_mem.size, vpu_dev->common_mem.vaddr); in wave5_vdi_allocate_common_memory() 90 if (!vb || !vb->vaddr) { in wave5_vdi_clear_memory() 95 memset(vb->vaddr, 0, vb->size); in wave5_vdi_clear_memory() 102 if (!vb || !vb->vaddr) { in wave5_vdi_write_memory() 112 memcpy(vb->vaddr + offset, data, len); in wave5_vdi_write_memory() 119 void *vaddr; in wave5_vdi_allocate_dma_memory() local 127 vaddr = dma_alloc_coherent(vpu_dev->dev, vb->size, &daddr, GFP_KERNEL); in wave5_vdi_allocate_dma_memory() 128 if (!vaddr) in wave5_vdi_allocate_dma_memory() 130 vb->vaddr = vaddr; in wave5_vdi_allocate_dma_memory() [all …]
|
| /linux/arch/m68k/sun3/ |
| H A D | dvma.c | 23 static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) in dvma_page() argument 35 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { in dvma_page() 36 sun3_put_pte(vaddr, pte); in dvma_page() 37 ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; in dvma_page() 40 return (vaddr + (kaddr & ~PAGE_MASK)); in dvma_page() 49 unsigned long vaddr; in dvma_map_iommu() local 51 vaddr = dvma_btov(baddr); in dvma_map_iommu() 53 end = vaddr + len; in dvma_map_iommu() 55 while(vaddr < end) { in dvma_map_iommu() 56 dvma_page(kaddr, vaddr); in dvma_map_iommu() [all …]
|
| /linux/arch/sh/mm/ |
| H A D | kmap.c | 21 unsigned long vaddr; in kmap_coherent_init() local 24 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); in kmap_coherent_init() 25 kmap_coherent_pte = virt_to_kpte(vaddr); in kmap_coherent_init() 32 unsigned long vaddr; in kmap_coherent() local 43 vaddr = __fix_to_virt(idx); in kmap_coherent() 48 return (void *)vaddr; in kmap_coherent() 54 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; in kunmap_coherent() local 55 enum fixed_addresses idx = __virt_to_fix(vaddr); in kunmap_coherent() 58 __flush_purge_region((void *)vaddr, PAGE_SIZE); in kunmap_coherent() 60 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); in kunmap_coherent() [all …]
|
| /linux/arch/mips/mm/ |
| H A D | pgtable-32.c | 43 unsigned long vaddr; in pagetable_init() local 62 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); in pagetable_init() 63 fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); in pagetable_init() 69 vaddr = PKMAP_BASE; in pagetable_init() 70 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); in pagetable_init() 72 pgd = swapper_pg_dir + pgd_index(vaddr); in pagetable_init() 73 p4d = p4d_offset(pgd, vaddr); in pagetable_init() 74 pud = pud_offset(p4d, vaddr); in pagetable_init() 75 pmd = pmd_offset(pud, vaddr); in pagetable_init() 76 pte = pte_offset_kernel(pmd, vaddr); in pagetable_init()
|
| /linux/drivers/misc/sgi-gru/ |
| H A D | grufault.c | 48 struct vm_area_struct *gru_find_vma(unsigned long vaddr) in gru_find_vma() argument 52 vma = vma_lookup(current->mm, vaddr); in gru_find_vma() 66 static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) in gru_find_lock_gts() argument 73 vma = gru_find_vma(vaddr); in gru_find_lock_gts() 75 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); in gru_find_lock_gts() 83 static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) in gru_alloc_locked_gts() argument 90 vma = gru_find_vma(vaddr); in gru_alloc_locked_gts() 94 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); in gru_alloc_locked_gts() 178 unsigned long vaddr, int write, in non_atomic_pte_lookup() argument 188 if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0) in non_atomic_pte_lookup() [all …]
|
| /linux/arch/xtensa/mm/ |
| H A D | cache.c | 60 unsigned long vaddr) in kmap_invalidate_coherent() argument 62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { in kmap_invalidate_coherent() 82 unsigned long vaddr, unsigned long *paddr) in coherent_kvaddr() argument 85 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); in coherent_kvaddr() 88 void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument 92 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); in clear_user_highpage() 95 kmap_invalidate_coherent(page, vaddr); in clear_user_highpage() 103 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument 107 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, in copy_user_highpage() 109 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, in copy_user_highpage() [all …]
|
| /linux/arch/csky/mm/ |
| H A D | tcm.c | 29 unsigned long vaddr, paddr; local 42 vaddr = __fix_to_virt(FIX_TCM - i); 45 pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); 49 flush_tlb_one(vaddr); 61 vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); 64 pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); 68 flush_tlb_one(vaddr); 111 unsigned long vaddr; local 116 vaddr = gen_pool_alloc(tcm_pool, len); 117 if (!vaddr) [all …]
|
| /linux/drivers/scsi/ |
| H A D | hpsa.h | 172 void __iomem *vaddr; member 423 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); in SA5_submit_command() 424 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); in SA5_submit_command() 430 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); in SA5_submit_command_no_read() 436 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); in SA5_submit_command_ioaccel2() 448 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); in SA5_intr_mask() 449 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); in SA5_intr_mask() 453 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); in SA5_intr_mask() 454 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); in SA5_intr_mask() 465 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); in SA5B_intr_mask() [all …]
|
| /linux/arch/nios2/mm/ |
| H A D | dma-mapping.c | 24 void *vaddr = phys_to_virt(paddr); in arch_sync_dma_for_device() local 28 invalidate_dcache_range((unsigned long)vaddr, in arch_sync_dma_for_device() 29 (unsigned long)(vaddr + size)); in arch_sync_dma_for_device() 37 flush_dcache_range((unsigned long)vaddr, in arch_sync_dma_for_device() 38 (unsigned long)(vaddr + size)); in arch_sync_dma_for_device() 48 void *vaddr = phys_to_virt(paddr); in arch_sync_dma_for_cpu() local 53 invalidate_dcache_range((unsigned long)vaddr, in arch_sync_dma_for_cpu() 54 (unsigned long)(vaddr + size)); in arch_sync_dma_for_cpu()
|
| /linux/include/linux/ |
| H A D | iosys-map.h | 112 void *vaddr; member 123 .vaddr = (vaddr_), \ 183 static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr) in iosys_map_set_vaddr() argument 185 map->vaddr = vaddr; in iosys_map_set_vaddr() 222 return lhs->vaddr == rhs->vaddr; in iosys_map_is_equal() 239 return !map->vaddr; in iosys_map_is_null() 287 memcpy(dst->vaddr + dst_offset, src, len); in iosys_map_memcpy_to() 307 memcpy(dst, src->vaddr + src_offset, len); in iosys_map_memcpy_from() 323 map->vaddr += incr; in iosys_map_incr() 342 memset(dst->vaddr + offset, value, len); in iosys_map_memset() [all …]
|
| /linux/arch/x86/boot/startup/ |
| H A D | sme.c | 77 unsigned long vaddr; member 99 pgd_start = ppd->vaddr & PGDIR_MASK; in sme_clear_pgd() 104 pgd_p = ppd->pgd + pgd_index(ppd->vaddr); in sme_clear_pgd() 116 pgd = ppd->pgd + pgd_index(ppd->vaddr); in sme_prepare_pgd() 124 p4d = p4d_offset(pgd, ppd->vaddr); in sme_prepare_pgd() 132 pud = pud_offset(p4d, ppd->vaddr); in sme_prepare_pgd() 155 pmd = pmd_offset(pud, ppd->vaddr); in sme_populate_pgd_large() 172 pmd = pmd_offset(pud, ppd->vaddr); in sme_populate_pgd() 183 pte = pte_offset_kernel(pmd, ppd->vaddr); in sme_populate_pgd() 190 while (ppd->vaddr < ppd->vaddr_end) { in __sme_map_range_pmd() [all …]
|
| /linux/drivers/xen/xenbus/ |
| H A D | xenbus_client.c | 91 void **vaddr); 92 int (*unmap)(struct xenbus_device *dev, void *vaddr); 390 int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, in xenbus_setup_ring() argument 399 addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); in xenbus_setup_ring() 400 if (!*vaddr) { in xenbus_setup_ring() 415 if (is_vmalloc_addr(*vaddr)) in xenbus_setup_ring() 430 if (*vaddr) in xenbus_setup_ring() 431 free_pages_exact(*vaddr, ring_size); in xenbus_setup_ring() 434 *vaddr = NULL; in xenbus_setup_ring() 449 void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages, in xenbus_teardown_ring() argument [all …]
|