Home
last modified time | relevance | path

Searched full:end (Results 1 – 25 of 5028) sorted by relevance

12345678910>>...202

/linux/kernel/
H A Drange.c12 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) in add_range() argument
14 if (start >= end) in add_range()
22 range[nr_range].end = end; in add_range()
30 u64 start, u64 end) in add_range_with_merge() argument
34 if (start >= end) in add_range_with_merge()
37 /* get new start/end: */ in add_range_with_merge()
41 if (!range[i].end) in add_range_with_merge()
45 common_end = min(range[i].end, end); in add_range_with_merge()
49 /* new start/end, will add it back at last */ in add_range_with_merge()
51 end = max(range[i].end, end); in add_range_with_merge()
[all …]
H A Dresource.c38 .end = IO_SPACE_LIMIT,
46 .end = -1,
54 .end = -1,
130 unsigned long long start, end; in r_show()
131 int width = root->end < 0x10000 ? 4 : 8; in r_show()
140 end = r->end; in r_show()
142 start = end = 0; in r_show()
148 width, end,
192 resource_size_t end in __request_resource()
122 unsigned long long start, end; r_show() local
184 resource_size_t end = new->end; __request_resource() local
340 find_next_iomem_res(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res) find_next_iomem_res() argument
393 __walk_iomem_res_desc(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *)) __walk_iomem_res_desc() argument
431 walk_iomem_res_desc(unsigned long desc,unsigned long flags,u64 start,u64 end,void * arg,int (* func)(struct resource *,void *)) walk_iomem_res_desc() argument
444 walk_system_ram_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *)) walk_system_ram_res() argument
459 walk_system_ram_res_rev(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *)) walk_system_ram_res_rev() argument
509 walk_mem_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *)) walk_mem_res() argument
526 resource_size_t start, end; walk_system_ram_range() local
1040 resource_size_t end = start + size - 1; __adjust_resource() local
1097 __reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name) __reserve_region_with_split() argument
1158 reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name) reserve_region_with_split() argument
1363 resource_size_t end; __release_region() local
1464 resource_size_t end; release_mem_region_adjustable() local
1786 resource_size_t end = addr + size - 1; iomem_map_sanity_check() local
1924 resource_size_t end; gfr_start() local
[all...]
/linux/include/linux/ceph/
H A Ddecode.h16 * void *end pointer to end of buffer (last byte + 1)
52 static inline bool ceph_has_room(void **p, void *end, size_t n) in ceph_has_room() argument
54 return end >= *p && n <= end - *p; in ceph_has_room()
57 #define ceph_decode_need(p, end, n, bad) \ argument
59 if (!likely(ceph_has_room(p, end, n))) \
63 #define ceph_decode_64_safe(p, end, v, bad) \ argument
65 ceph_decode_need(p, end, sizeof(u64), bad); \
68 #define ceph_decode_32_safe(p, end, v, bad) \ argument
70 ceph_decode_need(p, end, sizeof(u32), bad); \
73 #define ceph_decode_16_safe(p, end, v, bad) \ argument
[all …]
/linux/arch/arm64/mm/
H A Dcache.S18 * caches_clean_inval_pou_macro(start,end) [fixup]
25 * - end - virtual end address of region
46 * caches_clean_inval_pou(start,end)
53 * - end - virtual end address of region
62 * caches_clean_inval_user_pou(start,end)
69 * - end - virtual end address of region
85 * icache_inval_pou(start,end)
90 * - end - virtual end address of region
103 * dcache_clean_inval_poc(start, end)
105 * Ensure that any D-cache lines for the interval [start, end)
[all …]
/linux/drivers/net/dsa/sja1105/
H A Dsja1105_ethtool.c85 int end; member
96 .end = 24,
103 .end = 16,
110 .end = 8,
117 .end = 0,
125 .end = 27,
132 .end = 26,
139 .end = 25,
146 .end = 24,
153 .end = 23,
[all …]
/linux/arch/mips/pci/
H A Dpci-malta.c31 .end = 0x000fffffUL,
81 resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; in mips_pcibios_init() local
104 end = GT_READ(GT_PCI0M0HD_OFS); in mips_pcibios_init()
106 end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); in mips_pcibios_init()
112 if (end1 - start1 > end - start) { in mips_pcibios_init()
114 end = end1; in mips_pcibios_init()
117 mask = ~(start ^ end); in mips_pcibios_init()
122 gt64120_mem_resource.end = end; in mips_pcibios_init()
126 gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF; in mips_pcibios_init()
127 gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; in mips_pcibios_init()
[all …]
/linux/tools/perf/util/
H A Dblock-range.c22 assert(entry->start <= entry->end); /* single instruction block; jump to a jump */ in block_range__debug()
24 old = entry->end; in block_range__debug()
41 else if (addr > entry->end) in block_range__find()
73 * @end: branch ending this basic block
77 struct block_range_iter block_range__create(u64 start, u64 end) in block_range__create() argument
90 else if (start > entry->end) in block_range__create()
97 * Didn't find anything.. there's a hole at @start, however @end might in block_range__create()
108 if (entry->end < start) { in block_range__create()
115 if (next->start <= end) { /* add head: [start...][n->start...] */ in block_range__create()
122 .end = next->start - 1, in block_range__create()
[all …]
H A Dtime-utils.c21 char *end; in parse_nsec_time() local
23 time_sec = strtoull(str, &end, 10); in parse_nsec_time()
24 if (*end != '.' && *end != '\0') in parse_nsec_time()
27 if (*end == '.') { in parse_nsec_time()
31 if (strlen(++end) > 9) in parse_nsec_time()
34 strncpy(nsec_buf, end, 9); in parse_nsec_time()
41 time_nsec = strtoull(nsec_buf, &end, 10); in parse_nsec_time()
42 if (*end != '\0') in parse_nsec_time()
60 (parse_nsec_time(end_str, &ptime->end) != 0)) { in parse_timestr_sec_nsec()
67 static int split_start_end(char **start, char **end, const char *ostr, char ch) in split_start_end() argument
[all …]
/linux/Documentation/admin-guide/kdump/
H A Dgdbmacros.txt32 end
34 end
46 end
48 end
50 end
52 end
53 end
56 end
75 end
76 end
[all …]
/linux/fs/ceph/
H A Dmdsmap.c58 #define __decode_and_drop_type(p, end, type, bad) \ argument
60 if (*p + sizeof(type) > end) \
65 #define __decode_and_drop_set(p, end, type, bad) \ argument
69 ceph_decode_32_safe(p, end, n, bad); \
71 ceph_decode_need(p, end, need, bad); \
75 #define __decode_and_drop_map(p, end, ktype, vtype, bad) \ argument
79 ceph_decode_32_safe(p, end, n, bad); \
81 ceph_decode_need(p, end, need, bad); \
86 static int __decode_and_drop_compat_set(void **p, void* end) in __decode_and_drop_compat_set() argument
92 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); in __decode_and_drop_compat_set()
[all …]
/linux/arch/microblaze/kernel/cpu/
H A Dcache.c89 * End address can be unaligned which is OK for C implementation.
92 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ argument
96 end = min(start + cache_size, end); \
121 * end address is not aligned, if end is aligned then I have to subtract
125 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ argument
130 end = ((end & align) == end) ? end - line_length : end & align; \
131 count = end - start; \
142 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ argument
146 end = ((end & align) == end) ? end - line_length : end & align; \
147 WARN_ON(end < start); \
[all …]
/linux/arch/nios2/mm/
H A Dcacheflush.c19 static void __flush_dcache(unsigned long start, unsigned long end) in __flush_dcache() argument
24 end += (cpuinfo.dcache_line_size - 1); in __flush_dcache()
25 end &= ~(cpuinfo.dcache_line_size - 1); in __flush_dcache()
27 if (end > start + cpuinfo.dcache_size) in __flush_dcache()
28 end = start + cpuinfo.dcache_size; in __flush_dcache()
30 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { in __flush_dcache()
38 static void __invalidate_dcache(unsigned long start, unsigned long end) in __invalidate_dcache() argument
43 end += (cpuinfo.dcache_line_size - 1); in __invalidate_dcache()
44 end &= ~(cpuinfo.dcache_line_size - 1); in __invalidate_dcache()
46 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { in __invalidate_dcache()
[all …]
/linux/arch/arm64/include/asm/
H A Dcacheflush.h26 * Start addresses are inclusive and end addresses are exclusive; start
27 * addresses should be rounded down, end addresses up.
33 * All functions below apply to the interval [start, end)
35 * - end - virtual end address (exclusive)
37 * caches_clean_inval_pou(start, end)
42 * caches_clean_inval_user_pou(start, end)
48 * icache_inval_pou(start, end)
52 * dcache_clean_inval_poc(start, end)
56 * dcache_inval_poc(start, end)
60 * dcache_clean_poc(start, end)
[all …]
/linux/tools/perf/pmu-events/arch/x86/emeraldrapids/
H A Dfrontend.json7 …"PublicDescription": "Number of times the front-end is resteered when it finds a branch instructio…
55 …ed instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the …
93 "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
99 … an interval where the front-end delivered no uops for a period of at least 1 cycle which was not …
104 …ter an interval where the front-end delivered no uops for a period of 128 cycles which was not int…
110 …ter an interval where the front-end delivered no uops for a period of 128 cycles which was not int…
115 …fter an interval where the front-end delivered no uops for a period of 16 cycles which was not int…
121 …ons that are delivered to the back-end after a front-end stall of at least 16 cycles. During this …
126 "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
132 … an interval where the front-end delivered no uops for a period of at least 2 cycles which was not…
[all …]
/linux/tools/perf/pmu-events/arch/x86/sapphirerapids/
H A Dfrontend.json7 …"PublicDescription": "Number of times the front-end is resteered when it finds a branch instructio…
55 …ed instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the …
93 "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
99 … an interval where the front-end delivered no uops for a period of at least 1 cycle which was not …
104 …ter an interval where the front-end delivered no uops for a period of 128 cycles which was not int…
110 …ter an interval where the front-end delivered no uops for a period of 128 cycles which was not int…
115 …fter an interval where the front-end delivered no uops for a period of 16 cycles which was not int…
121 …ons that are delivered to the back-end after a front-end stall of at least 16 cycles. During this …
126 "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
132 … an interval where the front-end delivered no uops for a period of at least 2 cycles which was not…
[all …]
/linux/arch/riscv/mm/
H A Dkasan_init.c21 * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
29 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) in kasan_populate_pte() argument
47 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end); in kasan_populate_pte()
50 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end) in kasan_populate_pmd() argument
64 next = pmd_addr_end(vaddr, end); in kasan_populate_pmd()
77 } while (pmdp++, vaddr = next, vaddr != end); in kasan_populate_pmd()
81 unsigned long vaddr, unsigned long end) in kasan_populate_pud() argument
95 next = pud_addr_end(vaddr, end); in kasan_populate_pud()
108 } while (pudp++, vaddr = next, vaddr != end); in kasan_populate_pud()
112 unsigned long vaddr, unsigned long end) in kasan_populate_p4d() argument
[all …]
/linux/tools/perf/pmu-events/arch/x86/rocketlake/
H A Dfrontend.json3 "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
7 "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
58 "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
96 "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
102 "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
107 "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
113 "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivere
[all...]
/linux/tools/perf/pmu-events/arch/x86/icelakex/
H A Dfrontend.json3 "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
7 "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
58 "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
96 "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
102 "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
107 "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
113 "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivere
[all...]
/linux/tools/perf/pmu-events/arch/x86/tigerlake/
H A Dfrontend.json3 "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
7 "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
58 "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
96 "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
102 "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
107 "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
113 "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivere
[all...]
/linux/tools/perf/pmu-events/arch/x86/icelake/
H A Dfrontend.json3 "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
7 "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
58 "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
96 "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
102 "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
107 "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
113 "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivere
[all...]
/linux/fs/btrfs/
H A Dextent-io-tree.h125 u64 end; /* inclusive */ member
144 int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
146 bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
149 static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, in btrfs_lock_extent() argument
152 return btrfs_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached); in btrfs_lock_extent()
156 u64 end, struct extent_state **cached) in btrfs_try_lock_extent() argument
158 return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached); in btrfs_try_lock_extent()
170 bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
172 bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
173 void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
[all …]
/linux/tools/perf/pmu-events/arch/x86/graniterapids/
H A Dfrontend.json7 …"PublicDescription": "Number of times the front-end is resteered when it finds a branch instructio…
69 …ed instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the …
116 "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
122 … an interval where the front-end delivered no uops for a period of at least 1 cycle which was not …
127 …ter an interval where the front-end delivered no uops for a period of 128 cycles which was not int…
133 …ter an interval where the front-end delivered no uops for a period of 128 cycles which was not int…
138 …fter an interval where the front-end delivered no uops for a period of 16 cycles which was not int…
144 …ons that are delivered to the back-end after a front-end stall of at least 16 cycles. During this …
149 "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
155 … an interval where the front-end delivered no uops for a period of at least 2 cycles which was not…
[all …]
/linux/mm/
H A Dnuma_memblks.c29 if (mi->blk[i].start != mi->blk[i].end && in numa_nodemask_from_meminfo()
135 static int __init numa_add_memblk_to(int nid, u64 start, u64 end, in numa_add_memblk_to() argument
139 if (start == end) in numa_add_memblk_to()
143 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { in numa_add_memblk_to()
145 nid, start, end - 1); in numa_add_memblk_to()
155 mi->blk[mi->nr_blks].end = end; in numa_add_memblk_to()
193 * @end: End address of the new memblk
200 int __init numa_add_memblk(int nid, u64 start, u64 end) in numa_add_memblk() argument
222 numa_add_reserved_memblk(int nid,u64 start,u64 end) numa_add_reserved_memblk() argument
275 u64 start, end; numa_cleanup_meminfo() local
509 numa_fill_memblks(u64 start,u64 end) numa_fill_memblks() argument
[all...]
/linux/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_page_dirty.c37 * @end: Last currently dirty bit + 1
48 unsigned long end; member
57 return vbo->dirty && (vbo->dirty->start < vbo->dirty->end); in vmw_bo_is_dirty()
79 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
93 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
113 if (dirty->end <= dirty->start) in vmw_bo_dirty_scan_mkwrite()
118 dirty->end - dirty->start); in vmw_bo_dirty_scan_mkwrite()
128 pgoff_t end = dirty->bitmap_size; in vmw_bo_dirty_scan_mkwrite() local
131 clean_record_shared_mapping_range(mapping, offset, end, offset, in vmw_bo_dirty_scan_mkwrite()
133 &start, &end); in vmw_bo_dirty_scan_mkwrite()
[all …]
/linux/net/ceph/
H A Dosdmap.c71 static int crush_decode_uniform_bucket(void **p, void *end, in crush_decode_uniform_bucket() argument
74 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); in crush_decode_uniform_bucket()
75 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); in crush_decode_uniform_bucket()
82 static int crush_decode_list_bucket(void **p, void *end, in crush_decode_list_bucket() argument
86 dout("crush_decode_list_bucket %p to %p\n", *p, end); in crush_decode_list_bucket()
93 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); in crush_decode_list_bucket()
103 static int crush_decode_tree_bucket(void **p, void *end, in crush_decode_tree_bucket() argument
107 dout("crush_decode_tree_bucket %p to %p\n", *p, end); in crush_decode_tree_bucket()
108 ceph_decode_8_safe(p, end, b->num_nodes, bad); in crush_decode_tree_bucket()
112 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); in crush_decode_tree_bucket()
[all …]

12345678910>>...202