/linux/kernel/ |
H A D | range.c | 12 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) in add_range() argument 14 if (start >= end) in add_range() 21 range[nr_range].start = start; in add_range() 30 u64 start, u64 end) in add_range_with_merge() argument 34 if (start >= end) in add_range_with_merge() 37 /* get new start/end: */ in add_range_with_merge() 44 common_start = max(range[i].start, start); in add_range_with_merge() 49 /* new start/end, will add it back at last */ in add_range_with_merge() 50 start = min(range[i].start, start); in add_range_with_merge() 55 range[nr_range - 1].start = 0; in add_range_with_merge() [all …]
|
H A D | resource.c | 37 .start = 0, 45 .start = 0, 122 unsigned long long start, end; in r_show() local 131 start = r->start; in r_show() 134 start = end = 0; in r_show() 139 width, start, in r_show() 146 .start = r_start, 183 resource_size_t start = new->start; in __request_resource() local 187 if (end < start) in __request_resource() 189 if (start < root->start) in __request_resource() [all …]
|
H A D | resource_kunit.c | 24 static struct resource r0 = { .start = R0_START, .end = R0_END }; 25 static struct resource r1 = { .start = R1_START, .end = R1_END }; 26 static struct resource r2 = { .start = R2_START, .end = R2_END }; 27 static struct resource r3 = { .start = R3_START, .end = R3_END }; 28 static struct resource r4 = { .start = R4_START, .end = R4_END }; 39 .r1 = &r1, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 41 .r1 = &r2, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 43 .r1 = &r3, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 45 .r1 = &r4, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 51 .r1 = &r4, .r2 = &r1, .r.start = R1_START, .r.end = R4_END, .ret = true, [all …]
|
/linux/arch/mips/pci/ |
H A D | pci-malta.c | 30 .start = 0x00000000UL, 81 resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; in mips_pcibios_init() local 103 start = GT_READ(GT_PCI0M0LD_OFS); in mips_pcibios_init() 106 end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); in mips_pcibios_init() 112 if (end1 - start1 > end - start) { in mips_pcibios_init() 113 start = start1; in mips_pcibios_init() 117 mask = ~(start ^ end); in mips_pcibios_init() 119 BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && in mips_pcibios_init() 121 gt64120_mem_resource.start = start; in mips_pcibios_init() 123 gt64120_controller.mem_offset = (start & mask) - (map & mask); in mips_pcibios_init() [all …]
|
/linux/fs/btrfs/ |
H A D | extent-io-tree.c | 47 "state leak: start %llu end %llu state %u in tree %d refs %d", in btrfs_extent_state_leak_debug_check() 48 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check() 57 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument 58 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) 61 u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument 73 caller, btrfs_ino(inode), isize, start, end); in __btrfs_debug_check_extent_io_range() 198 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset() 199 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset() 251 if (offset < entry->start) in tree_search_for_insert() 303 if (offset < entry->start) in tree_search_prev_next() [all …]
|
H A D | extent_map.c | 73 static u64 range_end(u64 start, u64 len) in range_end() argument 75 if (start + len < start) in range_end() 77 return start + len; in range_end() 97 u64 end = range_end(em->start, em->len); in tree_insert() 103 if (em->start < entry->start) in tree_insert() 105 else if (em->start >= btrfs_extent_map_end(entry)) in tree_insert() 112 while (parent && em->start >= btrfs_extent_map_end(entry)) { in tree_insert() 117 if (end > entry->start && em->start < btrfs_extent_map_end(entry)) in tree_insert() 122 while (parent && em->start < entry->start) { in tree_insert() 127 if (end > entry->start && em->start < btrfs_extent_map_end(entry)) in tree_insert() [all …]
|
/linux/include/trace/events/ |
H A D | osnoise.h | 107 TP_PROTO(struct task_struct *t, u64 start, u64 duration), 109 TP_ARGS(t, start, duration), 113 __field( u64, start ) 121 __entry->start = start; 125 TP_printk("%8s:%d start %llu.%09u duration %llu ns", 128 __print_ns_to_secs(__entry->start), 129 __print_ns_without_secs(__entry->start), 135 TP_PROTO(int vector, u64 start, u64 duration), 137 TP_ARGS(vector, start, duration), 140 __field( u64, start ) [all …]
|
/linux/drivers/net/dsa/sja1105/ |
H A D | sja1105_ethtool.c | 84 int start; member 95 .start = 31, 102 .start = 23, 109 .start = 15, 116 .start = 7, 124 .start = 27, 131 .start = 26, 138 .start = 25, 145 .start = 24, 152 .start = 23, [all …]
|
/linux/arch/riscv/mm/ |
H A D | tlbflush.c | 38 static void local_flush_tlb_range_threshold_asid(unsigned long start, in local_flush_tlb_range_threshold_asid() argument 54 local_sinval_vma(start, asid); in local_flush_tlb_range_threshold_asid() 55 start += stride; in local_flush_tlb_range_threshold_asid() 62 local_flush_tlb_page_asid(start, asid); in local_flush_tlb_range_threshold_asid() 63 start += stride; in local_flush_tlb_range_threshold_asid() 67 static inline void local_flush_tlb_range_asid(unsigned long start, in local_flush_tlb_range_asid() argument 71 local_flush_tlb_page_asid(start, asid); in local_flush_tlb_range_asid() 75 local_flush_tlb_range_threshold_asid(start, size, stride, asid); in local_flush_tlb_range_asid() 79 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) in local_flush_tlb_kernel_range() argument 81 local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID); in local_flush_tlb_kernel_range() [all …]
|
/linux/arch/arm64/mm/ |
H A D | cache.S | 18 * caches_clean_inval_pou_macro(start,end) [fixup] 24 * - start - virtual start address of region 46 * caches_clean_inval_pou(start,end) 52 * - start - virtual start address of region 62 * caches_clean_inval_user_pou(start,end) 68 * - start - virtual start address of region 85 * icache_inval_pou(start,end) 89 * - start - virtual start address of region 103 * dcache_clean_inval_poc(start, end) 105 * Ensure that any D-cache lines for the interval [start, end) [all …]
|
/linux/tools/perf/util/ |
H A D | block-range.c | 21 assert(old < entry->start); in block_range__debug() 22 assert(entry->start <= entry->end); /* single instruction block; jump to a jump */ in block_range__debug() 39 if (addr < entry->start) in block_range__find() 72 * @start: branch target starting this basic block 77 struct block_range_iter block_range__create(u64 start, u64 end) in block_range__create() argument 88 if (start < entry->start) in block_range__create() 90 else if (start > entry->end) in block_range__create() 97 * Didn't find anything.. there's a hole at @start, however @end might in block_range__create() 108 if (entry->end < start) { in block_range__create() 115 if (next->start <= end) { /* add head: [start...][n->start...] */ in block_range__create() [all …]
|
/linux/arch/nios2/mm/ |
H A D | cacheflush.c | 19 static void __flush_dcache(unsigned long start, unsigned long end) in __flush_dcache() argument 23 start &= ~(cpuinfo.dcache_line_size - 1); in __flush_dcache() 27 if (end > start + cpuinfo.dcache_size) in __flush_dcache() 28 end = start + cpuinfo.dcache_size; in __flush_dcache() 30 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { in __flush_dcache() 38 static void __invalidate_dcache(unsigned long start, unsigned long end) in __invalidate_dcache() argument 42 start &= ~(cpuinfo.dcache_line_size - 1); in __invalidate_dcache() 46 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { in __invalidate_dcache() 54 static void __flush_icache(unsigned long start, unsigned long end) in __flush_icache() argument 58 start &= ~(cpuinfo.icache_line_size - 1); in __flush_icache() [all …]
|
/linux/drivers/gpu/drm/exynos/ |
H A D | regs-fimc.h | 24 /* Y 1st frame start address for output DMA */ 26 /* Y 2nd frame start address for output DMA */ 28 /* Y 3rd frame start address for output DMA */ 30 /* Y 4th frame start address for output DMA */ 32 /* Cb 1st frame start address for output DMA */ 34 /* Cb 2nd frame start address for output DMA */ 36 /* Cb 3rd frame start address for output DMA */ 38 /* Cb 4th frame start address for output DMA */ 40 /* Cr 1st frame start address for output DMA */ 42 /* Cr 2nd frame start address for output DMA */ [all …]
|
/linux/drivers/usb/core/ |
H A D | devices.c | 142 static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, in usb_dump_endpoint_descriptor() argument 148 if (start > end) in usb_dump_endpoint_descriptor() 149 return start; in usb_dump_endpoint_descriptor() 172 return start; in usb_dump_endpoint_descriptor() 183 start += sprintf(start, format_endpt, desc->bEndpointAddress, dir, in usb_dump_endpoint_descriptor() 188 return start; in usb_dump_endpoint_descriptor() 191 static char *usb_dump_interface_descriptor(char *start, char *end, in usb_dump_interface_descriptor() argument 200 if (start > end) in usb_dump_interface_descriptor() 201 return start; in usb_dump_interface_descriptor() 209 start += sprintf(start, format_iface, in usb_dump_interface_descriptor() [all …]
|
/linux/drivers/nvdimm/ |
H A D | badrange.c | 30 bre->start = addr; in append_badrange_entry() 69 if (bre->start == addr) { in add_badrange() 101 void badrange_forget(struct badrange *badrange, phys_addr_t start, in badrange_forget() argument 105 u64 clr_end = start + len - 1; in badrange_forget() 111 * [start, clr_end] is the badrange interval being cleared. in badrange_forget() 112 * [bre->start, bre_end] is the badrange_list entry we're comparing in badrange_forget() 114 * to be modified (update either start or length), deleted, or in badrange_forget() 119 u64 bre_end = bre->start + bre->length - 1; in badrange_forget() 122 if (bre_end < start) in badrange_forget() 124 if (bre->start > clr_end) in badrange_forget() [all …]
|
/linux/fs/btrfs/tests/ |
H A D | extent-map-tests.c | 31 "em leak: em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu offset %llu) refs %d", in free_extent_map_tree() 32 em->start, em->len, em->disk_bytenr, in free_extent_map_tree() 66 u64 start = 0; in test_case_1() local 78 em->start = 0; in test_case_1() 84 ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len); in test_case_1() 100 em->start = SZ_16K; in test_case_1() 106 ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len); in test_case_1() 122 em->start = start; in test_case_1() 124 em->disk_bytenr = start; in test_case_1() 128 ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len); in test_case_1() [all …]
|
/linux/arch/arm/mm/ |
H A D | cache-feroceon-l2.c | 30 * Cache range operations are initiated by writing the start and 33 * [start:end]. 70 static inline void l2_clean_pa_range(unsigned long start, unsigned long end) in l2_clean_pa_range() argument 75 * Make sure 'start' and 'end' reference the same page, as in l2_clean_pa_range() 77 * the start address. in l2_clean_pa_range() 79 BUG_ON((start ^ end) >> PAGE_SHIFT); in l2_clean_pa_range() 81 va_start = l2_get_va(start); in l2_clean_pa_range() 82 va_end = va_start + (end - start); in l2_clean_pa_range() 101 static inline void l2_inv_pa_range(unsigned long start, unsigned long end) in l2_inv_pa_range() argument 106 * Make sure 'start' and 'end' reference the same page, as in l2_inv_pa_range() [all …]
|
/linux/mm/ |
H A D | numa_memblks.c | 27 if (mi->blk[i].start != mi->blk[i].end && in numa_nodemask_from_meminfo() 133 static int __init numa_add_memblk_to(int nid, u64 start, u64 end, in numa_add_memblk_to() argument 137 if (start == end) in numa_add_memblk_to() 141 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { in numa_add_memblk_to() 143 nid, start, end - 1); in numa_add_memblk_to() 152 mi->blk[mi->nr_blks].start = start; in numa_add_memblk_to() 190 * @start: Start address of the new memblk 198 int __init numa_add_memblk(int nid, u64 start, u64 end) in numa_add_memblk() argument 200 return numa_add_memblk_to(nid, start, end, &numa_meminfo); in numa_add_memblk() 206 * @start: Start address of the new memblk [all …]
|
H A D | mseal.c | 21 * It disallows unmapped regions from start to end whether they exist at the 22 * start, in the middle, or at the end of the range, or any combination thereof. 31 * Does the [start, end) range contain any unmapped memory? 34 * - start is part of a valid VMA. 36 * - no gap (unallocated memory) exists between start and end. 39 unsigned long start, unsigned long end) in range_contains_unmapped() argument 42 unsigned long prev_end = start; in range_contains_unmapped() 43 VMA_ITERATOR(vmi, current->mm, start); in range_contains_unmapped() 56 unsigned long start, unsigned long end) in mseal_apply() argument 59 unsigned long curr_start = start; in mseal_apply() [all …]
|
H A D | msync.c | 21 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). 27 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start 32 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument 40 start = untagged_addr(start); in SYSCALL_DEFINE3() 44 if (offset_in_page(start)) in SYSCALL_DEFINE3() 50 end = start + len; in SYSCALL_DEFINE3() 51 if (end < start) in SYSCALL_DEFINE3() 54 if (end == start) in SYSCALL_DEFINE3() 57 * If the interval [start,end) covers some unmapped address ranges, in SYSCALL_DEFINE3() 63 vma = find_vma(mm, start); in SYSCALL_DEFINE3() [all …]
|
/linux/tools/testing/nvdimm/test/ |
H A D | iomap.c | 68 - nfit_res->res.start \ 80 - nfit_res->res.start; in __wrap_devm_ioremap() 91 return nfit_res->buf + offset - nfit_res->res.start; in __wrap_devm_memremap() 118 resource_size_t offset = pgmap->range.start; in __wrap_devm_memremap_pages() 133 return nfit_res->buf + offset - nfit_res->res.start; in __wrap_devm_memremap_pages() 143 return nfit_res->buf + offset - nfit_res->res.start; in __wrap_phys_to_pfn_t() 190 struct resource *parent, resource_size_t start, in __wrap_memunmap() 197 WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start, in __wrap_memunmap() 204 resource_size_t start = *((resource_size_t *) match_data); in nfit_devres_release() 206 return res->start in nfit_devres_release() 215 resource_size_t start = *((resource_size_t *) match_data); match() local 221 nfit_test_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n) nfit_test_release_region() argument 258 nfit_test_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags) nfit_test_request_region() argument 327 __wrap___request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags) __wrap___request_region() argument 351 __wrap___devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name) __wrap___devm_request_region() argument 360 __wrap___release_region(struct resource * parent,resource_size_t start,resource_size_t n) __wrap___release_region() argument 369 __wrap___devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n) __wrap___devm_release_region() argument [all...] |
/linux/arch/x86/kernel/ |
H A D | probe_roms.c | 28 .start = 0xf0000, 35 .start = 0xe0000, 42 .start = 0xc8000, 47 .start = 0, 52 .start = 0, 57 .start = 0, 62 .start = 0, 67 .start = 0, 74 .start = 0xc0000, 128 rom = isa_bus_to_virt(res->start); in find_oprom() [all …]
|
/linux/arch/hexagon/mm/ |
H A D | cache.c | 12 #define spanlines(start, end) \ argument 13 (((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1) 15 void flush_dcache_range(unsigned long start, unsigned long end) in flush_dcache_range() argument 17 unsigned long lines = spanlines(start, end-1); in flush_dcache_range() 20 start &= ~(LINESIZE - 1); in flush_dcache_range() 28 : "r" (start) in flush_dcache_range() 30 start += LINESIZE; in flush_dcache_range() 35 void flush_icache_range(unsigned long start, unsigned long end) in flush_icache_range() argument 37 unsigned long lines = spanlines(start, end-1); in flush_icache_range() 40 start &= ~(LINESIZE - 1); in flush_icache_range() [all …]
|
/linux/arch/mips/bcm63xx/ |
H A D | dev-enet.c | 48 .start = -1, /* filled at runtime */ 53 .start = -1, /* filled at runtime */ 58 .start = -1, /* filled at runtime */ 77 .start = -1, /* filled at runtime */ 82 .start = -1, /* filled at runtime */ 86 .start = -1, /* filled at runtime */ 90 .start = -1, /* filled at runtime */ 111 .start = -1, /* filled at runtime */ 116 .start = -1, /* filled at runtime */ 120 .start = -1, /* filled at runtime */ [all …]
|
/linux/drivers/vhost/ |
H A D | iotlb.c | 16 #define START(map) ((map)->start) macro 21 START, LAST, static inline, vhost_iotlb_itree); 41 * @start: start of the IOVA range 43 * @addr: the address that is mapped to @start 47 * Returns an error last is smaller than start or memory allocation 51 u64 start, u64 last, in vhost_iotlb_add_range_ctx() argument 57 if (last < start) in vhost_iotlb_add_range_ctx() 63 if (start == 0 && last == ULONG_MAX) { in vhost_iotlb_add_range_ctx() 65 int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, in vhost_iotlb_add_range_ctx() 72 start = mid + 1; in vhost_iotlb_add_range_ctx() [all …]
|