| /linux/sound/soc/codecs/ |
| H A D | lpass-va-macro.c | 22 /* VA macro registers */ 274 /* VA macro */ 298 /* VA core */ 460 static int va_clk_rsc_fs_gen_request(struct va_macro *va, bool enable) in va_clk_rsc_fs_gen_request() argument 462 struct regmap *regmap = va->regmap; in va_clk_rsc_fs_gen_request() 493 static int va_macro_mclk_enable(struct va_macro *va, bool mclk_enable) in va_macro_mclk_enable() argument 495 struct regmap *regmap = va->regmap; in va_macro_mclk_enable() 498 va_clk_rsc_fs_gen_request(va, true); in va_macro_mclk_enable() 502 va_clk_rsc_fs_gen_request(va, false); in va_macro_mclk_enable() 512 struct va_macro *va = snd_soc_component_get_drvdata(comp); in va_macro_mclk_event() local [all …]
|
| /linux/drivers/scsi/qedi/ |
| H A D | qedi_dbg.c | 14 va_list va; in qedi_dbg_err() local 17 va_start(va, fmt); in qedi_dbg_err() 20 vaf.va = &va; in qedi_dbg_err() 28 va_end(va); in qedi_dbg_err() 35 va_list va; in qedi_dbg_warn() local 38 va_start(va, fmt); in qedi_dbg_warn() 41 vaf.va = &va; in qedi_dbg_warn() 53 va_end(va); in qedi_dbg_warn() 60 va_list va; in qedi_dbg_notice() local 63 va_start(va, fmt); in qedi_dbg_notice() [all …]
|
| /linux/mm/ |
| H A D | vmalloc.c | 885 * All vmap_area objects in this tree are sorted by va->va_start 1044 va_size(struct vmap_area *va) in va_size() argument 1046 return (va->va_end - va->va_start); in va_size() 1052 struct vmap_area *va; in get_subtree_max_size() local 1054 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size() 1055 return va ? va->subtree_max_size : 0; in get_subtree_max_size() 1081 struct vmap_area *va; in __find_vmap_area() local 1083 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area() 1084 if (addr < va->va_start) in __find_vmap_area() 1086 else if (addr >= va->va_end) in __find_vmap_area() [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_gpuvm.c | 38 * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a 39 * GPU's virtual address (VA) space and manages the corresponding virtual 44 * all existing GPU VA mappings using this &drm_gem_object as backing buffer. 49 * The GPU VA manager internally uses a rb-tree to manage the 53 * portion of VA space reserved by the kernel. This node is initialized together 54 * with the GPU VA manager instance and removed when the GPU VA manager is 107 * Besides its capability to manage and represent a GPU VA space, the 108 * GPU VA manager also provides functions to let the &drm_gpuvm calculate a 111 * Therefore the DRM GPU VA manager provides an algorithm implementing splitting 112 * and merging of existing GPU VA mappings with the ones that are requested to [all …]
|
| /linux/drivers/iommu/generic_pt/ |
| H A D | pt_iter.h | 24 * Check that VA and last_va fall within the permitted range of VAs. If the 36 prefix = fvalog2_div(range->va, range->max_vasz_lg2 - 1) ? in pt_check_range() 43 if (!fvalog2_div_eq(range->va, prefix, range->max_vasz_lg2) || in pt_check_range() 50 * pt_index_to_va() - Update range->va to the current pts->index 53 * Adjust range->va to match the current index. This is done in a lazy manner 54 * since computing the VA takes several instructions and is rarely required. 61 pts->range->va = fvalog2_set_mod(pts->range->va, lower_va, in pt_index_to_va() 66 * Add index_count_lg2 number of entries to pts's VA and index. The VA will be 91 if (log2_mod(pts->range->va, oasz_lg2)) in pt_entry_fully_covered() 95 if (!log2_div_eq(range->va, range->last_va, oasz_lg2)) in pt_entry_fully_covered() [all …]
|
| H A D | kunit_generic_pt.h | 11 static void do_map(struct kunit *test, pt_vaddr_t va, pt_oaddr_t pa, in do_map() argument 19 ret = iommu_map(&priv->domain, va, pa, len, IOMMU_READ | IOMMU_WRITE, in do_map() 48 * If we were able to use the full VA space this should always be the in __check_all_levels() 119 range.last_va = fvalog2_set_mod_max(range.va, in check_all_levels() 123 * Map a page at the highest VA, this will populate all the levels so we in check_all_levels() 128 range.va = range.last_va - (priv->smallest_pgsz - 1); in check_all_levels() 129 do_map(test, range.va, 0, priv->smallest_pgsz); in check_all_levels() 131 range = pt_make_range(priv->common, range.va, range.last_va); in check_all_levels() 189 static unsigned int ref_best_pgsize(pt_vaddr_t pgsz_bitmap, pt_vaddr_t va, in ref_best_pgsize() argument 197 log2_mod(va, pgsz_lg2) == 0 && in ref_best_pgsize() [all …]
|
| /linux/drivers/scsi/qedf/ |
| H A D | qedf_dbg.c | 13 va_list va; in qedf_dbg_err() local 16 va_start(va, fmt); in qedf_dbg_err() 19 vaf.va = &va; in qedf_dbg_err() 27 va_end(va); in qedf_dbg_err() 34 va_list va; in qedf_dbg_warn() local 37 va_start(va, fmt); in qedf_dbg_warn() 40 vaf.va = &va; in qedf_dbg_warn() 52 va_end(va); in qedf_dbg_warn() 59 va_list va; in qedf_dbg_notice() local 62 va_start(va, fmt); in qedf_dbg_notice() [all …]
|
| /linux/include/drm/ |
| H A D | drm_gpuvm.h | 67 * struct drm_gpuva - structure to track a GPU VA mapping 69 * This structure represents a GPU VA mapping and is associated with a 92 * @va: structure containing the address and range of the &drm_gpuva 96 * @va.addr: the start address 104 } va; member 151 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); 152 void drm_gpuva_remove(struct drm_gpuva *va); 154 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); 155 void drm_gpuva_unlink(struct drm_gpuva *va); 156 void drm_gpuva_unlink_defer(struct drm_gpuva *va); [all …]
|
| /linux/lib/ |
| H A D | test_debug_virtual.c | 25 void *va; in test_debug_virtual_init() local 27 va = (void *)VMALLOC_START; in test_debug_virtual_init() 28 pa = virt_to_phys(va); in test_debug_virtual_init() 30 pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); in test_debug_virtual_init() 37 va = foo; in test_debug_virtual_init() 38 pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); in test_debug_virtual_init()
|
| /linux/tools/testing/selftests/kvm/lib/ |
| H A D | ucall_common.c | 86 va_list va; in ucall_assert() local 95 va_start(va, fmt); in ucall_assert() 96 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); in ucall_assert() 97 va_end(va); in ucall_assert() 107 va_list va; in ucall_fmt() local 112 va_start(va, fmt); in ucall_fmt() 113 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); in ucall_fmt() 114 va_end(va); in ucall_fmt() 124 va_list va; in ucall() local 133 va_start(va, nargs); in ucall() [all …]
|
| /linux/drivers/dio/ |
| H A D | dio.c | 127 void *va; in dio_find() local 139 va = (void *)(pa + DIO_VIRADDRBASE); in dio_find() 141 va = ioremap(pa, PAGE_SIZE); in dio_find() 144 (unsigned char *)va + DIO_IDOFF, 1)) { in dio_find() 146 iounmap(va); in dio_find() 150 prid = DIO_ID(va); in dio_find() 153 secid = DIO_SECID(va); in dio_find() 160 iounmap(va); in dio_find() 200 u_char *va; in dio_init() local 212 va = (void *)(pa + DIO_VIRADDRBASE); in dio_init() [all …]
|
| /linux/arch/riscv/mm/ |
| H A D | init.c | 428 static inline phys_addr_t __init alloc_pte_early(uintptr_t va) in alloc_pte_early() argument 437 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) in alloc_pte_fixmap() argument 442 static phys_addr_t __meminit alloc_pte_late(uintptr_t va) in alloc_pte_late() argument 455 static void __meminit create_pte_mapping(pte_t *ptep, uintptr_t va, phys_addr_t pa, phys_addr_t sz, in create_pte_mapping() argument 458 uintptr_t pte_idx = pte_index(va); in create_pte_mapping() 515 static phys_addr_t __init alloc_pmd_early(uintptr_t va) in alloc_pmd_early() argument 517 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); in alloc_pmd_early() 522 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) in alloc_pmd_fixmap() argument 527 static phys_addr_t __meminit alloc_pmd_late(uintptr_t va) in alloc_pmd_late() argument 537 uintptr_t va, phys_addr_t pa, in create_pmd_mapping() argument [all …]
|
| /linux/fs/ceph/ |
| H A D | ceph_frag.c | 10 unsigned va = ceph_frag_value(a); in ceph_frag_compare() local 12 if (va < vb) in ceph_frag_compare() 14 if (va > vb) in ceph_frag_compare() 16 va = ceph_frag_bits(a); in ceph_frag_compare() 18 if (va < vb) in ceph_frag_compare() 20 if (va > vb) in ceph_frag_compare()
|
| /linux/Documentation/devicetree/bindings/sound/ |
| H A D | cs42l56.txt | 9 - VA-supply, VCP-supply, VLDO-supply : power supplies for the device, 31 0 = 0.5 x VA 32 1 = 0.6 x VA 33 2 = 0.7 x VA 34 3 = 0.8 x VA 35 4 = 0.83 x VA 36 5 = 0.91 x VA 62 VA-supply = <®_audio>;
|
| H A D | cs42l52.txt | 30 0 = 0.5 x VA 31 1 = 0.6 x VA 32 2 = 0.7 x VA 33 3 = 0.8 x VA 34 4 = 0.83 x VA 35 5 = 0.91 x VA
|
| /linux/arch/riscv/include/asm/ |
| H A D | sections.h | 18 static inline bool is_va_kernel_text(uintptr_t va) in is_va_kernel_text() argument 23 return va >= start && va < end; in is_va_kernel_text() 26 static inline bool is_va_kernel_lm_alias_text(uintptr_t va) in is_va_kernel_lm_alias_text() argument 31 return va >= start && va < end; in is_va_kernel_lm_alias_text()
|
| /linux/drivers/infiniband/hw/usnic/ |
| H A D | usnic_uiom.c | 59 usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n", in usnic_uiom_dma_fault() 166 usnic_dbg("va: 0x%lx pa: %pa\n", in usnic_uiom_get_pages() 194 long unsigned va, size; in usnic_uiom_unmap_sorted_intervals() local 197 va = interval->start << PAGE_SHIFT; in usnic_uiom_unmap_sorted_intervals() 201 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals() 202 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals() 203 va += PAGE_SIZE; in usnic_uiom_unmap_sorted_intervals() 220 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; in __usnic_uiom_reg_release() 251 long int va = uiomr->va & PAGE_MASK; in usnic_uiom_map_sorted_intervals() local 259 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { in usnic_uiom_map_sorted_intervals() [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_mmu.c | 176 /** @va: Virtual range targeted by the VM operation. */ 178 /** @va.addr: Start address. */ 181 /** @va.range: Range size. */ 183 } va; member 202 * @map.new_vma: The new VMA object that will be inserted to the VA tree. 229 * We delegate all the VA management to the common drm_gpuvm framework 278 * @mm: Memory management object representing the auto-VA/kernel-VA. 280 * Used to auto-allocate VA space for kernel-managed objects (tiler 283 * For the MCU VM, this is managing the VA range that's used to map 287 * exceed half of the VA space addressable. [all …]
|
| /linux/drivers/misc/vmw_vmci/ |
| H A D | vmci_queue_pair.h | 35 u64 ppn_va; /* Start VA of queue pair PPNs. */ 44 u64 va; /* Start VA of queue pair PPNs. */ member 54 * pass down the VA of the mapped file. Before host support was added 59 * provide the VA of the mapped files. 80 u64 produce_page_file; /* User VA. */ 81 u64 consume_page_file; /* User VA. */ 86 u64 produce_va; /* User VA of the mapped file. */ 87 u64 consume_va; /* User VA of the mapped file. */ 101 * this is a list of PPNs, and on hosted, it is a user VA where the
|
| /linux/drivers/media/platform/mediatek/vcodec/common/ |
| H A D | mtk_vcodec_util.c | 66 mem->va = dma_alloc_attrs(&plat_dev->dev, mem->size, &mem->dma_addr, in mtk_vcodec_mem_alloc() 68 if (!mem->va) { in mtk_vcodec_mem_alloc() 74 mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%zx", id, mem->va, in mtk_vcodec_mem_alloc() 99 if (!mem->va) { in mtk_vcodec_mem_free() 100 mtk_v4l2_err(plat_dev, "%s: Tried to free a NULL VA", __func__); in mtk_vcodec_mem_free() 106 mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%zx", id, mem->va, in mtk_vcodec_mem_free() 109 dma_free_coherent(&plat_dev->dev, mem->size, mem->va, mem->dma_addr); in mtk_vcodec_mem_free() 110 mem->va = NULL; in mtk_vcodec_mem_free()
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_mr.c | 252 void *va; in rxe_mr_copy_xarray() local 261 va = kmap_local_page(page); in rxe_mr_copy_xarray() 263 memcpy(addr, va + page_offset, bytes); in rxe_mr_copy_xarray() 265 memcpy(va + page_offset, addr, bytes); in rxe_mr_copy_xarray() 266 kunmap_local(va); in rxe_mr_copy_xarray() 283 u8 *va; in rxe_mr_copy_dma() local 289 va = kmap_local_page(page); in rxe_mr_copy_dma() 292 memcpy(va + page_offset, addr, bytes); in rxe_mr_copy_dma() 294 memcpy(addr, va + page_offset, bytes); in rxe_mr_copy_dma() 296 kunmap_local(va); in rxe_mr_copy_dma() [all …]
|
| /linux/drivers/media/platform/mediatek/vcodec/decoder/vdec/ |
| H A D | vdec_vp9_if.c | 31 * @va : cpu address 37 unsigned long va; member 227 if (fb->base_y.va == addr) { in vp9_rm_from_fb_use_list() 278 vsi->frm_bufs[ref_idx].buf.fb->base_y.va); in vp9_ref_cnt_fb() 296 if (vsi->sf_ref_fb[i].fb.base_y.va) { in vp9_free_all_sf_ref_fb() 321 if (vsi->sf_ref_fb[idx].fb.base_y.va && in vp9_get_sf_ref_fb() 330 if (vsi->sf_ref_fb[idx].fb.base_y.va == NULL) in vp9_get_sf_ref_fb() 391 if (mem->va) in vp9_alloc_work_buf() 402 /* Set the va again */ in vp9_alloc_work_buf() 403 vsi->mv_buf.va = (unsigned long)mem->va; in vp9_alloc_work_buf() [all …]
|
| /linux/arch/parisc/kernel/ |
| H A D | entry.S | 176 va = r8 /* virtual address for which the trap occurred */ define 189 mfctl %pcoq, va 206 mfctl %pcoq, va 220 mfctl %ior,va 238 mfctl %ior,va 252 mfctl %ior, va 270 mfctl %ior, va 282 mfctl %ior,va 298 mfctl %ior,va 312 mfctl %ior,va [all …]
|
| /linux/arch/arm64/crypto/ |
| H A D | aes-ce-ccm-core.S | 26 .macro dround, va, vb, vk 27 aese \va\().16b, \vk\().16b 28 aesmc \va\().16b, \va\().16b 33 .macro aes_encrypt, va, vb, nr 35 dround \va, \vb, v10 36 dround \va, \vb, v11 38 dround \va, \vb, v12 39 dround \va, \vb, v13 41 dround \va, \vb, \v 43 aese \va\().16b, v4.16b
|
| /linux/drivers/tee/amdtee/ |
| H A D | shm_pool.c | 15 unsigned long va; in pool_op_alloc() local 22 va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in pool_op_alloc() 23 if (!va) in pool_op_alloc() 26 shm->kaddr = (void *)va; in pool_op_alloc() 27 shm->paddr = __psp_pa((void *)va); in pool_op_alloc() 33 free_pages(va, order); in pool_op_alloc()
|