/linux/tools/testing/selftests/sync/ |
H A D | sync_wait.c | 35 int fenceA, fenceB, fenceC, merged; in test_fence_multi_timeline_wait() local 46 merged = sync_merge("mergeFence", fenceB, fenceA); in test_fence_multi_timeline_wait() 47 merged = sync_merge("mergeFence", fenceC, merged); in test_fence_multi_timeline_wait() 49 valid = sw_sync_fence_is_valid(merged); in test_fence_multi_timeline_wait() 53 active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE); in test_fence_multi_timeline_wait() 56 ret = sync_wait(merged, 0); in test_fence_multi_timeline_wait() 61 active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE); in test_fence_multi_timeline_wait() 62 signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED); in test_fence_multi_timeline_wait() 67 active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE); in test_fence_multi_timeline_wait() 68 signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED); in test_fence_multi_timeline_wait() [all …]
|
H A D | sync_merge.c | 34 int fence, valid, merged; in test_fence_merge_same_fence() local 44 merged = sync_merge("mergeFence", fence, fence); in test_fence_merge_same_fence() 48 ASSERT(sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED) == 0, in test_fence_merge_same_fence() 52 ASSERT(sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED) == 1, in test_fence_merge_same_fence() 55 sw_sync_fence_destroy(merged); in test_fence_merge_same_fence()
|
H A D | sync_stress_merge.c | 43 int fence, tmpfence, merged, valid; in test_merge_stress_random_merge() local 76 merged = sync_merge("merge", tmpfence, fence); in test_merge_stress_random_merge() 79 fence = merged; in test_merge_stress_random_merge() 81 valid = sw_sync_fence_is_valid(merged); in test_merge_stress_random_merge()
|
H A D | sync_stress_consumer.c | 108 int fence, merged, tmp, valid, it, i; in mpcs_consumer_thread() local 119 merged = sync_merge("name", tmp, fence); in mpcs_consumer_thread() 122 fence = merged; in mpcs_consumer_thread()
|
/linux/tools/crypto/tcrypt/ |
H A D | tcrypt_speed_compare.py | 114 merged = {} 116 merged[alg] = {} 118 if op not in merged[alg]: 119 merged[alg][op] = [] 132 merged[alg][op].append(merged_item) 133 return merged 136 def format(merged): argument 137 for alg in merged.keys(): 138 for op in merged[alg].keys(): 150 if "base_ops" in merged[alg][op][0]: [all …]
|
/linux/tools/perf/tests/ |
H A D | maps.c | 18 struct map_def *merged; member 25 struct map_def *merged = &args->merged[args->i]; in check_maps_cb() local 27 if (map__start(map) != merged->start || in check_maps_cb() 28 map__end(map) != merged->end || in check_maps_cb() 29 strcmp(dso__name(map__dso(map)), merged->name) || in check_maps_cb() 48 static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps) in check_maps() argument 57 .merged = merged, in check_maps() 66 merged[ in check_maps() [all...] |
/linux/Documentation/ABI/testing/ |
H A D | procfs-diskstats | 14 5 reads merged 18 9 writes merged 31 16 discards merged
|
H A D | sysfs-kernel-mm-ksm | 52 When it is set to 0 only pages from the same node are merged, 53 otherwise pages from all nodes can be merged together (default).
|
/linux/tools/testing/vma/ |
H A D | vma.c | 160 struct vm_area_struct *merged; in try_merge_new_vma() 164 merged = merge_new(vmg); in try_merge_new_vma() 165 if (merged) { in try_merge_new_vma() 168 return merged; in try_merge_new_vma() 397 bool merged; in test_merge_new() 425 vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged); in test_merge_new() 429 ASSERT_FALSE(merged); in test_merge_new() 440 vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged); in test_merge_new() 443 ASSERT_TRUE(merged); in test_merge_new() 457 vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged); in test_merge_new() 158 struct vm_area_struct *merged; try_merge_new_vma() local 395 bool merged; test_merge_new() local [all...] |
/linux/block/ |
H A D | badblocks.c | 560 int merged = 0; in behind_merge() local 566 merged = BB_OFFSET(p[behind]) - s; in behind_merge() 567 p[behind] = BB_MAKE(s, BB_LEN(p[behind]) + merged, bad->ack); in behind_merge() 569 WARN_ON((BB_LEN(p[behind]) + merged) >= BB_MAX_LEN); in behind_merge() 572 return merged; in behind_merge() 602 int merged = 0; in front_merge() local 607 merged = min_t(sector_t, sectors, BB_END(p[prev]) - s); in front_merge() 609 merged = min_t(sector_t, sectors, BB_MAX_LEN - BB_LEN(p[prev])); in front_merge() 611 merged > (BB_OFFSET(p[prev + 1]) - BB_END(p[prev]))) { in front_merge() 612 merged = BB_OFFSET(p[prev + 1]) - BB_END(p[prev]); in front_merge() [all …]
|
H A D | bfq-cgroup.c | 238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1); in bfqg_stats_update_io_merged() 359 blkg_rwstat_reset(&stats->merged); in bfqg_stats_reset() 380 blkg_rwstat_add_aux(&to->merged, &from->merged); in bfqg_stats_add_aux() 441 blkg_rwstat_exit(&stats->merged); in bfqg_stats_exit() 462 if (blkg_rwstat_init(&stats->merged, gfp) || in bfqg_stats_init() 1300 .private = offsetof(struct bfq_group, stats.merged), 1343 .private = offsetof(struct bfq_group, stats.merged),
|
/linux/tools/lib/perf/ |
H A D | cpumap.c | 452 struct perf_cpu_map *merged; in perf_cpu_map__merge() 484 merged = cpu_map__trim_new(k, tmp_cpus); in perf_cpu_map__intersect() 487 return merged; in perf_cpu_map__intersect() 496 struct perf_cpu_map *merged = NULL; 520 merged = cpu_map__trim_new(k, tmp_cpus); 522 return merged; 421 struct perf_cpu_map *merged; perf_cpu_map__merge() local 465 struct perf_cpu_map *merged = NULL; perf_cpu_map__intersect() local
|
/linux/mm/kmsan/ |
H A D | init.c | 33 bool merged = false; in kmsan_record_future_shadow_range() local 59 merged = true; in kmsan_record_future_shadow_range() 62 if (merged) in kmsan_record_future_shadow_range()
|
/linux/Documentation/filesystems/xfs/ |
H A D | xfs-maintainer-entry-profile.rst | 10 Patches are generally merged to the for-next branch of the appropriate 12 After a testing period, the for-next branch is merged to the master 15 Kernel code are merged to the xfs-linux tree[0]. 16 Userspace code are merged to the xfsprogs tree[1]. 17 Test cases are merged to the xfstests tree[2]. 18 Ondisk format documentation are merged to the xfs-documentation tree[3]. 110 These patches will be merged back to the for-next branch.
|
/linux/Documentation/block/ |
H A D | stat.rst | 30 read merges requests number of read I/Os merged with in-queue I/O 34 write merges requests number of write I/Os merged with in-queue I/O 41 discard merges requests number of discard I/Os merged with in-queue I/O 64 These values increment when an I/O request is merged with an
|
/linux/fs/bcachefs/ |
H A D | checksum.c | 409 struct bch_csum merged = { 0 }; in bch2_rechecksum_bio() local 442 merged = bch2_checksum_merge(new_csum_type, merged, in bch2_rechecksum_bio() 445 merged = bch2_checksum_bio(c, crc_old.csum_type, in bch2_rechecksum_bio() 448 if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) { in bch2_rechecksum_bio() 455 merged.hi, in bch2_rechecksum_bio() 456 merged.lo); in bch2_rechecksum_bio()
|
/linux/Documentation/filesystems/ |
H A D | overlayfs.rst | 90 merged with the 'upper' object. 117 Where both upper and lower objects are directories, a merged directory 121 "upperdir" are combined into a merged directory:: 124 workdir=/work /merged 129 Then whenever a lookup is requested in such a merged directory, the 132 actual lookups find directories, both are stored and a merged 136 Only the lists of names from directories are merged. Other content 151 When a whiteout is found in the upper level of a merged directory, any 169 When a 'readdir' request is made on a merged directory, the upper and 170 lower directories are each read and the name lists merged in the [all …]
|
/linux/drivers/hwtracing/stm/ |
H A D | policy.c | 204 struct configfs_attribute **merged; in get_policy_node_type() local 211 merged = memcat_p(stp_policy_node_attrs, attrs); in get_policy_node_type() 212 if (!merged) { in get_policy_node_type() 217 type->ct_attrs = merged; in get_policy_node_type()
|
/linux/Documentation/admin-guide/mm/ |
H A D | ksm.rst | 26 KSM's merged pages were originally locked into kernel memory, but can now 46 unmerges whatever it merged in that range. Note: this unmerging call 95 specifies if pages from different NUMA nodes can be merged. 112 * set to 0 to stop ksmd from running but keep merged pages, 114 * set to 2 to stop ksmd and unmerge all pages currently merged, but 123 empty pages are merged with the kernel zero page(s) instead of 131 effective for pages merged after the change. 239 be merged, but some may not be abled to be merged after being checked
|
/linux/Documentation/admin-guide/ |
H A D | iostats.rst | 68 Field 2 -- # of reads merged, field 6 -- # of writes merged (unsigned long) 69 Reads and writes which are adjacent to each other may be merged for 84 Field 6 -- # of writes merged (unsigned long) 115 Field 13 -- # of discards merged (unsigned long) 192 eventual merges. As requests can be merged across partition, this could lead
|
/linux/tools/testing/kunit/ |
H A D | kunit_kernel.py | 176 merged = kunit_config.Kconfig() 185 diff = merged.conflicting_options(partial) 189 merged.merge_in_entries(partial) 190 return merged
|
/linux/drivers/md/ |
H A D | dm-stats.h | 20 bool merged; member
|
/linux/Documentation/ABI/obsolete/ |
H A D | sysfs-bus-iio | 31 Since kernel 5.11 the scan_elements attributes are merged into 66 Since kernel 5.11 the scan_elements attributes are merged into 108 Since kernel 5.11 the scan_elements attributes are merged into 147 Since kernel 5.11 the scan_elements attributes are merged into
|
/linux/tools/perf/tests/config-fragments/ |
H A D | README | 1 This folder is for kernel config fragments that can be merged with
|
/linux/Documentation/process/ |
H A D | 6.Followthrough.rst | 137 burner so that the remaining patches can be worked into shape and merged. 146 merged into the mainline kernel. Congratulations! Once the celebration is 154 longer any question of your code being merged. Resist that temptation, 171 well make it harder for you to get work merged in the future. 187 after it's merged. The next time you post a patch, they will be evaluating 213 chances are that one of the two patches will not be merged, and "mine was 219 long after they have forgotten whose patch actually got merged.
|