/linux/tools/testing/selftests/mm/ |
H A D | map_populate.c | 33 static void parent_f(int sock, unsigned long *smap, int child) in parent_f() argument 40 *smap = 0x22222BAD; in parent_f() 41 ret = msync(smap, MMAP_SZ, MS_SYNC); in parent_f() 54 static int child_f(int sock, unsigned long *smap, int fd) in child_f() argument 58 smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE, in child_f() 60 BUG_ON(smap == MAP_FAILED, "mmap()"); in child_f() 62 BUG_ON(*smap != 0xdeadbabe, "MAP_PRIVATE | MAP_POPULATE changed file"); in child_f() 70 ksft_test_result(*smap != 0x22222BAD, "MAP_POPULATE COW private page\n"); in child_f() 71 ksft_test_result(*smap == 0xdeadbabe, "The mapping state\n"); in child_f() 81 unsigned long *smap; in main() local [all …]
|
/linux/kernel/bpf/ |
H A D | stackmap.c | 45 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) in prealloc_elems_and_freelist() argument 48 (u64)smap->map.value_size; in prealloc_elems_and_freelist() 51 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, in prealloc_elems_and_freelist() 52 smap->map.numa_node); in prealloc_elems_and_freelist() 53 if (!smap->elems) in prealloc_elems_and_freelist() 56 err = pcpu_freelist_init(&smap->freelist); in prealloc_elems_and_freelist() 60 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, in prealloc_elems_and_freelist() 61 smap->map.max_entries); in prealloc_elems_and_freelist() 65 bpf_map_area_free(smap->elems); in prealloc_elems_and_freelist() 73 struct bpf_stack_map *smap; in stack_map_alloc() local [all …]
|
H A D | bpf_inode_storage.c | 40 struct bpf_local_storage_map *smap; in inode_storage_lookup() local 52 smap = (struct bpf_local_storage_map *)map; in inode_storage_lookup() 53 return bpf_local_storage_lookup(inode_storage, smap, cacheit_lockit); in inode_storage_lookup()
|
/linux/fs/btrfs/ |
H A D | bio.c | 486 struct btrfs_io_stripe *smap, int mirror_num) in btrfs_submit_bio() argument 491 bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT; in btrfs_submit_bio() 493 btrfs_bio(bio)->orig_physical = smap->physical; in btrfs_submit_bio() 494 bio->bi_private = smap->dev; in btrfs_submit_bio() 496 btrfs_submit_dev_bio(smap->dev, bio); in btrfs_submit_bio() 529 struct btrfs_io_stripe smap; member 586 btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); in run_one_async_done() 628 struct btrfs_io_stripe *smap, int mirror_num) in btrfs_wq_submit_bio() argument 639 async->smap = *smap; in btrfs_wq_submit_bio() 676 struct btrfs_io_stripe smap; in btrfs_submit_chunk() local [all …]
|
H A D | volumes.h | 683 struct btrfs_io_stripe *smap, int *mirror_num_ret); 685 struct btrfs_io_stripe *smap, u64 logical,
|
H A D | volumes.c | 6362 const struct btrfs_io_stripe *smap, in is_single_device_io() argument 6367 if (!smap) in is_single_device_io() 6550 struct btrfs_io_stripe *smap, int *mirror_num_ret) in btrfs_map_block() argument 6650 if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op, in btrfs_map_block() 6652 ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom); in btrfs_map_block() 8236 struct btrfs_io_stripe *smap, in map_raid56_repair_block() argument 8251 smap->dev = bioc->stripes[i].dev; in map_raid56_repair_block() 8252 smap->physical = bioc->stripes[i].physical + in map_raid56_repair_block() 8271 struct btrfs_io_stripe *smap, u64 logical, in btrfs_map_repair_block() argument 8282 &bioc, smap, &mirror_ret); in btrfs_map_repair_block() [all …]
|
/linux/fs/nilfs2/ |
H A D | page.c | 245 struct address_space *smap) in nilfs_copy_dirty_pages() argument 254 if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1, in nilfs_copy_dirty_pages() 300 struct address_space *smap) in nilfs_copy_back_pages() argument 308 n = filemap_get_folios(smap, &start, ~0UL, &fbatch); in nilfs_copy_back_pages() 329 xa_lock_irq(&smap->i_pages); in nilfs_copy_back_pages() 330 f = __xa_erase(&smap->i_pages, index); in nilfs_copy_back_pages() 332 smap->nrpages--; in nilfs_copy_back_pages() 333 xa_unlock_irq(&smap->i_pages); in nilfs_copy_back_pages()
|
/linux/fs/xfs/ |
H A D | xfs_reflink.c | 1129 struct xfs_bmbt_irec smap; in xfs_reflink_remap_extent() local 1181 &smap, &nimaps, 0); in xfs_reflink_remap_extent() 1184 ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff); in xfs_reflink_remap_extent() 1185 smap_real = xfs_bmap_is_real_extent(&smap); in xfs_reflink_remap_extent() 1191 dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount); in xfs_reflink_remap_extent() 1192 ASSERT(dmap->br_blockcount == smap.br_blockcount); in xfs_reflink_remap_extent() 1194 trace_xfs_reflink_remap_extent_dest(ip, &smap); in xfs_reflink_remap_extent() 1201 if (dmap->br_startblock == smap.br_startblock) { in xfs_reflink_remap_extent() 1202 if (dmap->br_state != smap.br_state) { in xfs_reflink_remap_extent() 1211 smap.br_state == XFS_EXT_UNWRITTEN) in xfs_reflink_remap_extent() [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | local_storage.c | 99 if (!local_storage || local_storage->smap) in BPF_PROG() 176 if (!sk->sk_bpf_storage || sk->sk_bpf_storage->smap) in BPF_PROG()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | hwmon.c | 209 DECLARE_BITMAP(smap, BITS_PER_TYPE(sensor_map)); in mlx5_hwmon_init_sensors_indexes() 214 bitmap_from_u64(smap, sensor_map); in mlx5_hwmon_init_sensors_indexes() 216 for_each_set_bit(bit_pos, smap, BITS_PER_TYPE(sensor_map)) { in mlx5_hwmon_init_sensors_indexes()
|
/linux/lib/zstd/common/ |
H A D | cpu.h | 175 B(smap, 20)
|
/linux/fs/ |
H A D | dax.c | 2005 const struct iomap *smap = &it_src->iomap; in dax_range_compare_iter() local 2011 len = min(len, min(smap->length, dmap->length)); in dax_range_compare_iter() 2013 if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { in dax_range_compare_iter() 2018 if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { in dax_range_compare_iter() 2024 ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), in dax_range_compare_iter()
|
/linux/include/linux/ |
H A D | bpf.h | 148 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 150 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
|
/linux/arch/x86/kvm/svm/ |
H A D | svm.c | 4793 bool smep, smap, is_user; in svm_check_emulate_instruction() local 4897 smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP); in svm_check_emulate_instruction() 4899 if (smap && (!smep || is_user)) { in svm_check_emulate_instruction()
|
/linux/Documentation/virt/kvm/x86/ |
H A D | mmu.rst | 192 Contains the value of cr4.smap && !cr0.wp for which the page is valid
|
/linux/arch/x86/kvm/mmu/ |
H A D | mmu.c | 210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP); 230 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
|
/linux/tools/arch/x86/kcpuid/ |
H A D | cpuid.csv | 209 7, 0, ebx, 20, smap , Supervisor mode access prevention
|