| /linux/drivers/firmware/efi/ |
| H A D | unaccepted_memory.c | 36 unsigned long range_start, range_end; in accept_memory() local 126 range_start = range.start; in accept_memory() 127 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap, in accept_memory() 130 unsigned long len = range_end - range_start; in accept_memory() 132 phys_start = range_start * unit_size + unaccepted->phys_base; in accept_memory() 150 bitmap_clear(unaccepted->bitmap, range_start, len); in accept_memory()
|
| /linux/drivers/firmware/efi/libstub/ |
| H A D | unaccepted_memory.c | 182 unsigned long range_start, range_end; in accept_memory() local 209 range_start = start / unit_size; in accept_memory() 212 for_each_set_bitrange_from(range_start, range_end, in accept_memory() 216 phys_start = range_start * unit_size + unaccepted_table->phys_base; in accept_memory() 221 range_start, range_end - range_start); in accept_memory()
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | fault.c | 111 unsigned long range_start, range_end, i; in fault_opcodes_write() local 125 if (kstrtoul(token, 0, &range_start)) in fault_opcodes_write() 132 range_end = range_start; in fault_opcodes_write() 134 if (range_start == range_end && range_start == -1UL) { in fault_opcodes_write() 140 if (range_start >= bound || range_end >= bound) in fault_opcodes_write() 143 for (i = range_start; i <= range_end; i++) { in fault_opcodes_write()
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_doorbell.c | 212 int range_start = dev->shared_resources.non_cp_doorbells_start; in init_doorbell_bitmap() local 219 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); in init_doorbell_bitmap() 221 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, in init_doorbell_bitmap() 225 if (i >= range_start && i <= range_end) { in init_doorbell_bitmap()
|
| H A D | kfd_device.c | 1332 (*mem_obj)->range_start = found; in kfd_gtt_sa_allocate() 1384 (*mem_obj)->range_start, (*mem_obj)->range_end); in kfd_gtt_sa_allocate() 1387 bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start, in kfd_gtt_sa_allocate() 1388 (*mem_obj)->range_end - (*mem_obj)->range_start + 1); in kfd_gtt_sa_allocate() 1410 mem_obj, mem_obj->range_start, mem_obj->range_end); in kfd_gtt_sa_free() 1415 bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start, in kfd_gtt_sa_free() 1416 mem_obj->range_end - mem_obj->range_start + 1); in kfd_gtt_sa_free()
|
| /linux/net/bridge/ |
| H A D | br_vlan.c | 1992 struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL; in br_vlan_dump_dev() local 2045 if (!range_start) { in br_vlan_dump_dev() 2046 range_start = v; in br_vlan_dump_dev() 2054 if (!br_vlan_global_opts_fill(skb, range_start->vid, in br_vlan_dump_dev() 2056 range_start)) { in br_vlan_dump_dev() 2061 idx += range_end->vid - range_start->vid + 1; in br_vlan_dump_dev() 2063 range_start = v; in br_vlan_dump_dev() 2066 u16 vlan_flags = br_vlan_flags(range_start, pvid); in br_vlan_dump_dev() 2068 if (!br_vlan_fill_vids(skb, range_start->vid, in br_vlan_dump_dev() 2069 range_end->vid, range_start, in br_vlan_dump_dev() [all …]
|
| H A D | br_vlan_options.c | 265 struct net_bridge_vlan *range_start, in br_vlan_process_options() argument 280 if (!range_start || !br_vlan_should_use(range_start)) { in br_vlan_process_options() 290 for (vid = range_start->vid; vid <= range_end->vid; vid++) { in br_vlan_process_options()
|
| /linux/fs/btrfs/ |
| H A D | fiemap.c | 642 u64 range_start; in extent_fiemap() local 660 range_start = round_down(start, sectorsize); in extent_fiemap() 662 prev_extent_end = range_start; in extent_fiemap() 664 btrfs_lock_extent(&inode->io_tree, range_start, range_end, &cached_state); in extent_fiemap() 672 ret = fiemap_search_slot(inode, path, range_start); in extent_fiemap() 707 if (extent_end <= range_start) in extent_fiemap() 844 btrfs_unlock_extent(&inode->io_tree, range_start, range_end, &cached_state); in extent_fiemap()
|
| H A D | extent_io.c | 324 u64 range_start; in lock_delalloc_folios() local 335 range_start = max_t(u64, folio_pos(folio), start); in lock_delalloc_folios() 336 range_len = min_t(u64, folio_next_pos(folio), end + 1) - range_start; in lock_delalloc_folios() 337 btrfs_folio_set_lock(fs_info, folio, range_start, range_len); in lock_delalloc_folios() 339 processed_end = range_start + range_len - 1; in lock_delalloc_folios() 2231 u64 range_start = max_t(u64, eb->start, folio_pos(folio)); in write_one_eb() local 2233 eb->start + eb->len) - range_start; in write_one_eb() 2241 offset_in_folio(folio, range_start)); in write_one_eb() 2315 index = (wbc->range_start >> fs_info->nodesize_bits); in btree_write_cache_pages() 2466 index = wbc->range_start >> PAGE_SHIFT; in extent_write_cache_pages() [all …]
|
| /linux/arch/arm/mm/ |
| H A D | pageattr.c | 29 unsigned long range_start, unsigned long range_end) in range_in_range() argument 31 return start >= range_start && start < range_end && in range_in_range()
|
| /linux/arch/powerpc/sysdev/ |
| H A D | fsl_rio.c | 456 u64 range_start; in fsl_rio_setup() local 530 if (of_property_read_reg(np, 0, &range_start, NULL)) { in fsl_rio_setup() 537 (u32)range_start); in fsl_rio_setup() 555 if (of_property_read_reg(np, 0, &range_start, NULL)) { in fsl_rio_setup() 561 pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start); in fsl_rio_setup() 689 rio_law_start = range_start; in fsl_rio_setup()
|
| /linux/arch/powerpc/platforms/powernv/ |
| H A D | ocxl.c | 225 u16 actag_count, range_start = 0, total_desired = 0; in assign_actags() local 236 link->fn_actags[i].start = range_start; in assign_actags() 238 range_start += actag_count; in assign_actags() 239 WARN_ON(range_start >= PNV_OCXL_ACTAG_MAX); in assign_actags()
|
| /linux/arch/s390/boot/ |
| H A D | physmem_info.c | 278 unsigned long range_start, range_end; in __physmem_alloc_range() local 284 __get_physmem_range(nranges - 1, &range_start, &range_end, false); in __physmem_alloc_range() 290 if (range_start > addr) { in __physmem_alloc_range()
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_svm.c | 288 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end) in xe_svm_range_set_default_attr() argument 300 vma = xe_vm_find_vma_by_addr(vm, range_start); in xe_svm_range_set_default_attr() 315 if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) { in xe_svm_range_set_default_attr() 321 range_start, range_end); in xe_svm_range_set_default_attr() 322 err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start); in xe_svm_range_set_default_attr() 340 u64 range_start; in xe_svm_garbage_collector() local 357 range_start = xe_svm_range_start(range); in xe_svm_garbage_collector() 372 err = xe_svm_range_set_default_attr(vm, range_start, range_end); in xe_svm_garbage_collector()
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gem.c | 465 struct drm_gpuvm *vm, u64 range_start, in get_vma_locked() argument 475 vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end); in get_vma_locked() 477 GEM_WARN_ON(vma->va.addr < range_start); in get_vma_locked() 553 u64 range_start, u64 range_end) in get_and_pin_iova_range_locked() argument 563 vma = get_vma_locked(obj, vm, range_start, range_end); in get_and_pin_iova_range_locked() 582 u64 range_start, u64 range_end) in msm_gem_get_and_pin_iova_range() argument 588 ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end); in msm_gem_get_and_pin_iova_range()
|
| H A D | msm_gem_vma.c | 369 u64 offset, u64 range_start, u64 range_end) in msm_gem_vma_new() argument 387 range_start, range_end, 0); in msm_gem_vma_new() 392 range_start = vma->node.start; in msm_gem_vma_new() 393 range_end = range_start + obj->size; in msm_gem_vma_new() 397 GEM_WARN_ON((range_end - range_start) > obj->size); in msm_gem_vma_new() 400 .va.addr = range_start, in msm_gem_vma_new() 401 .va.range = range_end - range_start, in msm_gem_vma_new()
|
| H A D | msm_gem.h | 190 u64 offset, u64 range_start, u64 range_end); 278 u64 range_start, u64 range_end);
|
| /linux/fs/nfs/ |
| H A D | write.c | 660 trace_nfs_writepages(inode, wbc->range_start, wbc->range_end - wbc->range_start); in nfs_writepages() 701 trace_nfs_writepages_done(inode, wbc->range_start, wbc->range_end - wbc->range_start, err); in nfs_writepages() 2037 loff_t range_start = folio_pos(folio); in nfs_wb_folio() local 2042 .range_start = range_start, in nfs_wb_folio() 2043 .range_end = range_start + len - 1, in nfs_wb_folio() 2047 trace_nfs_writeback_folio(inode, range_start, len); in nfs_wb_folio() 2065 trace_nfs_writeback_folio_done(inode, range_start, len, ret); in nfs_wb_folio()
|
| H A D | nfstrace.h | 300 loff_t range_start, 304 TP_ARGS(inode, range_start, range_end), 311 __field(loff_t, range_start) 322 __entry->range_start = range_start; 332 __entry->range_start, __entry->range_end 340 loff_t range_start, \ 343 TP_ARGS(inode, range_start, range_end))
|
| /linux/drivers/net/ethernet/netronome/nfp/bpf/ |
| H A D | jit.c | 2733 s16 range_start = meta->pkt_cache.range_start; in mem_ldx_data_init_pktcache() local 2739 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); in mem_ldx_data_init_pktcache() 2741 len = range_end - range_start; in mem_ldx_data_init_pktcache() 2760 s16 range_start = meta->pkt_cache.range_start; in mem_ldx_data_from_pktcache_unaligned() local 2761 s16 insn_off = meta->insn.off - range_start; in mem_ldx_data_from_pktcache_unaligned() 2817 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; in mem_ldx_data_from_pktcache_aligned() 2843 u8 off = meta->insn.off - meta->pkt_cache.range_start; in mem_ldx_data_from_pktcache() 4279 s16 range_start = 0, range_end = 0; in nfp_bpf_opt_pkt_cache() local 4330 s16 new_start = range_start; in nfp_bpf_opt_pkt_cache() 4335 if (off < range_start) { in nfp_bpf_opt_pkt_cache() [all …]
|
| /linux/fs/ocfs2/ |
| H A D | file.c | 861 u64 *range_start, u64 *range_end) in ocfs2_zero_extend_get_range() argument 920 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos); in ocfs2_zero_extend_get_range() 932 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, in ocfs2_zero_extend_range() argument 937 u64 zero_pos = range_start; in ocfs2_zero_extend_range() 941 (unsigned long long)range_start, in ocfs2_zero_extend_range() 943 BUG_ON(range_start >= range_end); in ocfs2_zero_extend_range() 970 u64 zero_start, range_start = 0, range_end = 0; in ocfs2_zero_extend() local 980 &range_start, in ocfs2_zero_extend() 989 if (range_start < zero_start) in ocfs2_zero_extend() 990 range_start = zero_start; in ocfs2_zero_extend() [all …]
|
| /linux/fs/iomap/ |
| H A D | buffered-io.c | 128 struct iomap_folio_state *ifs, u64 *range_start, u64 range_end) in ifs_find_dirty_range() argument 132 offset_in_folio(folio, *range_start) >> inode->i_blkbits; in ifs_find_dirty_range() 147 *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits); in ifs_find_dirty_range() 151 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start, in iomap_find_dirty_range() argument 156 if (*range_start >= range_end) in iomap_find_dirty_range() 160 return ifs_find_dirty_range(folio, ifs, range_start, range_end); in iomap_find_dirty_range() 161 return range_end - *range_start; in iomap_find_dirty_range()
|
| /linux/include/trace/events/ |
| H A D | writeback.h | 492 __field(long, range_start) 505 __entry->range_start = (long)wbc->range_start; 519 __entry->range_start,
|
| /linux/drivers/accel/habanalabs/goya/ |
| H A D | goya_coresight.c | 371 u64 range_start, range_end; in goya_etr_validate_address() local 379 range_start = prop->dmmu.start_addr; in goya_etr_validate_address() 382 return hl_mem_area_inside_range(addr, size, range_start, range_end); in goya_etr_validate_address()
|
| /linux/arch/powerpc/kernel/ |
| H A D | fadump.c | 1748 phys_addr_t range_start, range_end; in fadump_setup_param_area() local 1762 range_start = memblock_end_of_DRAM() / 2; in fadump_setup_param_area() 1777 range_start = MIN_RMA * 1024 * 1024; in fadump_setup_param_area() 1783 range_start, in fadump_setup_param_area()
|