Home
last modified time | relevance | path

Searched refs:regions (Results 1 – 25 of 372) sorted by relevance

12345678910>>...15

/linux/arch/powerpc/mm/nohash/
H A Dkaslr_booke.c23 struct regions { struct
38 struct regions __initdata regions; argument
113 if (regions.reserved_mem < 0) in overlaps_reserved_region()
117 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); in overlaps_reserved_region()
125 while (len >= (regions.reserved_mem_addr_cells + in overlaps_reserved_region()
126 regions.reserved_mem_size_cells)) { in overlaps_reserved_region()
128 if (regions.reserved_mem_addr_cells == 2) in overlaps_reserved_region()
131 reg += regions.reserved_mem_addr_cells; in overlaps_reserved_region()
132 len -= 4 * regions.reserved_mem_addr_cells; in overlaps_reserved_region()
135 if (regions.reserved_mem_size_cells == 2) in overlaps_reserved_region()
[all …]
/linux/drivers/mtd/chips/
H A Djedec_probe.c275 const uint32_t regions[6]; member
307 .regions = {
319 .regions = {
334 .regions = {
349 .regions = {
364 .regions = {
379 .regions = {
395 .regions = {
412 .regions = {
429 .regions = {
[all …]
/linux/mm/damon/tests/
H A Dvaddr-kunit.h44 * discontiguous regions which cover every mapped areas. However, the three
45 * regions should not include the two biggest unmapped areas in the original
47 * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack.
54 * three regions and returns. For more detail, refer to the comment of
60 * mapped. To cover every mappings, the three regions should start with 10,
63 * unmapped areas, and thus it should be converted to three regions of 10-25,
69 struct damon_addr_range regions[3] = {0}; in damon_test_three_regions_in_vmas() local
84 __damon_va_three_regions(&mm, regions); in damon_test_three_regions_in_vmas()
86 KUNIT_EXPECT_EQ(test, 10ul, regions[ in damon_test_three_regions_in_vmas()
130 damon_do_test_apply_three_regions(struct kunit * test,unsigned long * regions,int nr_regions,struct damon_addr_range * three_regions,unsigned long * expected,int nr_expected) damon_do_test_apply_three_regions() argument
164 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions1() local
186 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions2() local
210 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions3() local
235 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions4() local
[all...]
/linux/drivers/gpu/drm/nouveau/nvkm/nvfw/
H A Dacr.c130 hdr->regions.no_regions); in flcn_acr_desc_dump()
132 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_dump()
135 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump()
137 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump()
139 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump()
141 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump()
143 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump()
145 hdr->regions.region_props[i].client_mask); in flcn_acr_desc_dump()
173 hdr->regions.no_regions); in flcn_acr_desc_v1_dump()
175 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_v1_dump()
[all …]
/linux/drivers/vfio/platform/
H A Dvfio_platform_common.c144 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region), in vfio_platform_regions_init()
146 if (!vdev->regions) in vfio_platform_regions_init()
153 vdev->regions[i].addr = res->start; in vfio_platform_regions_init()
154 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init()
155 vdev->regions[i].flags = 0; in vfio_platform_regions_init()
159 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO; in vfio_platform_regions_init()
160 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_platform_regions_init()
162 vdev->regions[i].flags |= in vfio_platform_regions_init()
169 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_platform_regions_init()
170 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_platform_regions_init()
[all …]
/linux/tools/testing/memblock/tests/
H A Dalloc_exact_nid_api.c30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check()
31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check()
82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check()
83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check()
143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check()
144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check()
200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
258 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
[all …]
H A Dalloc_nid_api.c66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check()
118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check()
169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check()
221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check()
311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check()
363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check()
416 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_with_space_check()
417 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_nid_top_down_reserved_with_space_check()
481 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_reserved_full_merge_generic_check()
543 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_no_space_check()
[all …]
H A Dbasic_api.c17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check()
22 ASSERT_NE(memblock.reserved.regions, NULL); in memblock_initialization_check()
45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check()
78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check()
121 rgn1 = &memblock.memory.regions[0]; in memblock_add_disjoint_check()
122 rgn2 = &memblock.memory.regions[1]; in memblock_add_disjoint_check()
175 rgn = &memblock.memory.regions[0]; in memblock_add_overlap_top_check()
227 rgn = &memblock.memory.regions[0]; in memblock_add_overlap_bottom_check()
276 rgn = &memblock.memory.regions[0]; in memblock_add_within_check()
347 rgn = &memblock.memory.regions[0]; in memblock_add_between_check()
[all …]
H A Dalloc_api.c26 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_simple_check()
73 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_top_down_disjoint_check()
74 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_top_down_disjoint_check()
125 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_before_check()
168 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_after_check()
217 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_second_fit_check()
266 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_in_between_generic_check()
416 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_limited_space_generic_check()
450 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_no_memory_generic_check()
484 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_too_large_generic_check()
[all …]
H A Dalloc_helpers_api.c20 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_simple_generic_check()
63 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_misaligned_generic_check()
110 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_high_addr_check()
153 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_no_space_above_check()
190 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_min_addr_cap_check()
236 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_high_addr_check()
278 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_no_space_above_check()
314 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_min_addr_cap_check()
/linux/drivers/vfio/cdx/
H A Dmain.c19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region), in vfio_cdx_open_device()
21 if (!vdev->regions) in vfio_cdx_open_device()
27 vdev->regions[i].addr = res->start; in vfio_cdx_open_device()
28 vdev->regions[i].size = resource_size(res); in vfio_cdx_open_device()
29 vdev->regions[i].type = res->flags; in vfio_cdx_open_device()
34 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_cdx_open_device()
35 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_cdx_open_device()
36 vdev->regions[i].flags |= in vfio_cdx_open_device()
38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_cdx_open_device()
40 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE; in vfio_cdx_open_device()
[all …]
/linux/drivers/vfio/fsl-mc/
H A Dvfio_fsl_mc.c30 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), in vfio_fsl_mc_open_device()
32 if (!vdev->regions) in vfio_fsl_mc_open_device()
36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device()
39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device()
40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device()
41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device()
46 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_fsl_mc_open_device()
47 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_fsl_mc_open_device()
48 vdev->regions[i].flags |= in vfio_fsl_mc_open_device()
50 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_fsl_mc_open_device()
[all …]
/linux/mm/
H A Dmemblock.c39 * Memblock is a method of managing memory regions during the early
44 * regions. There are several types of these collections:
50 * * ``reserved`` - describes the regions that were allocated
58 * which contains an array of memory regions along with
66 * arrays during addition of new regions. This feature should be used
116 .memory.regions = memblock_memory_init_regions,
120 .reserved.regions = memblock_reserved_init_regions,
130 .regions = memblock_physmem_init_regions,
145 for (i = 0, rgn = &memblock_type->regions[0]; \
147 i++, rgn = &memblock_type->regions[
[all...]
/linux/tools/testing/selftests/damon/
H A Daccess_memory_even.c5 * Receives number of regions and size of each region from user. Allocate the
6 * regions and repeatedly access even numbered (starting from zero) regions.
16 char **regions; in main() local
29 regions = malloc(sizeof(*regions) * nr_regions); in main()
31 regions[i] = malloc(sz_region); in main()
36 memset(regions[i], i, sz_region); in main()
H A Daccess_memory.c13 char **regions; in main() local
30 regions = malloc(sizeof(*regions) * nr_regions); in main()
32 regions[i] = malloc(sz_region); in main()
38 memset(regions[i], i, sz_region); in main()
/linux/drivers/virt/nitro_enclaves/
H A Dne_misc_dev_test.c117 phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, in ne_misc_dev_test_merge_phys_contig_memory_regions()
118 sizeof(*phys_contig_mem_regions.regions), in ne_misc_dev_test_merge_phys_contig_memory_regions()
120 KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions()
135 KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start, in ne_misc_dev_test_merge_phys_contig_memory_regions()
137 KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]), in ne_misc_dev_test_merge_phys_contig_memory_regions()
141 kunit_kfree(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions()
/linux/drivers/net/dsa/sja1105/
H A Dsja1105_devlink.c85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions()
87 if (!priv->regions) in sja1105_setup_devlink_regions()
97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions()
99 kfree(priv->regions); in sja1105_setup_devlink_regions()
103 priv->regions[i] = region; in sja1105_setup_devlink_regions()
115 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions()
117 kfree(priv->regions); in sja1105_teardown_devlink_regions()
/linux/Documentation/admin-guide/device-mapper/
H A Ddm-clone.rst58 3. A small metadata device - it records which regions are already valid in the
59 destination device, i.e., which regions have already been hydrated, or have
68 dm-clone divides the source and destination devices in fixed sized regions.
77 Reads and writes from/to hydrated regions are serviced from the destination
93 as a hint to skip hydration of the regions covered by the request, i.e., it
111 A message `hydration_threshold <#regions>` can be used to set the maximum number
112 of regions being copied, the default being 1 region.
116 region size. A message `hydration_batch_size <#regions>` can be used to tune the
118 dm-clone trying to batch together contiguous regions, so we copy the data in
119 batches of this many regions.
[all …]
/linux/mm/damon/
H A Dvaddr.c56 * Functions for the initial monitoring target regions construction
60 * Size-evenly split a region into 'nr_pieces' small regions
107 * Find three regions separated by two biggest unmapped regions
110 * regions an array of three address ranges that results will be saved
112 * This function receives an address space and finds three regions in it which
113 * separated by the two biggest unmapped regions in the space. Please refer to
120 struct damon_addr_range regions[3]) in __damon_va_three_regions()
163 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions()
164 regions[ in __damon_va_three_regions()
240 struct damon_addr_range regions[3]; __damon_va_init_regions() local
[all...]
/linux/Documentation/admin-guide/mm/damon/
H A Dlru_sort.rst31 DAMON_LRU_SORT finds hot pages (pages of memory regions that showing access
33 memory regions that showing no access for a time that longer than a
85 Access frequency threshold for hot memory regions identification in permil.
94 Time threshold for cold memory regions identification in microseconds.
179 Minimum number of monitoring regions.
181 The minimal number of monitoring regions of DAMON for the cold memory
190 Maximum number of monitoring regions.
192 The maximum number of monitoring regions of DAMON for the cold memory
225 Number of hot memory regions that tried to be LRU-sorted.
230 Total bytes of hot memory regions that tried to be LRU-sorted.
[all …]
/linux/Documentation/mm/damon/
H A Ddesign.rst88 and updates the monitoring target address regions so that entire memory
97 address regions is just wasteful. However, because DAMON can deal with some
98 level of noise using the adaptive regions adjustment mechanism, tracking every
104 distinct regions that cover every mapped area of the address space. The two
105 gaps between the three regions are the two biggest unmapped areas in the given
115 (small mmap()-ed regions and munmap()-ed regions)
151 ``update interval``, ``minimum number of regions``, and ``maximum number of
152 regions``.
199 controllable by setting the number of regions. DAMON allows users to set the
200 minimum and the maximum number of regions for the trade-off.
[all …]
/linux/drivers/net/wireless/ath/ath10k/
H A Dcoredump.c1298 .regions = qca6174_hw10_mem_regions,
1307 .regions = qca6174_hw10_mem_regions,
1316 .regions = qca6174_hw10_mem_regions,
1325 .regions = qca6174_hw21_mem_regions,
1334 .regions = qca6174_hw30_mem_regions,
1343 .regions = qca6174_hw30_mem_regions,
1352 .regions = qca6174_hw30_sdio_mem_regions,
1361 .regions = qca6174_hw30_mem_regions,
1370 .regions = qca988x_hw20_mem_regions,
1379 .regions = qca9984_hw10_mem_regions,
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/acr/
H A Dgp102.c203 desc->regions.no_regions = 2; in gp102_acr_load_setup()
204 desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; in gp102_acr_load_setup()
205 desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; in gp102_acr_load_setup()
206 desc->regions.region_props[0].region_id = 1; in gp102_acr_load_setup()
207 desc->regions.region_props[0].read_mask = 0xf; in gp102_acr_load_setup()
208 desc->regions.region_props[0].write_mask = 0xc; in gp102_acr_load_setup()
209 desc->regions.region_props[0].client_mask = 0x2; in gp102_acr_load_setup()
210 desc->regions.region_props[0].shadow_mem_start_addr = acr->shadow_start >> 8; in gp102_acr_load_setup()
/linux/Documentation/networking/devlink/
H A Ddevlink-region.rst7 ``devlink`` regions enable access to driver defined address regions using
10 Each device can create and register its own supported address regions. The
22 address regions that are otherwise inaccessible to the user.
54 # Show all of the exposed regions with region sizes:
81 As regions are likely very device or driver specific, no generic regions are
83 specific regions a driver supports.
/linux/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_dmabuf.c96 struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0]; in igt_dmabuf_import_same_driver_lmem()
220 struct intel_memory_region **regions, in igt_dmabuf_import_same_driver() argument
234 regions, num_regions); in igt_dmabuf_import_same_driver()
278 if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) { in igt_dmabuf_import_same_driver()
326 struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_smem()
334 struct intel_memory_region *regions[2]; in igt_dmabuf_import_same_driver_lmem_smem() local
336 if (!i915->mm.regions[INTEL_REGION_LMEM_0]) in igt_dmabuf_import_same_driver_lmem_smem()
339 regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0]; in igt_dmabuf_import_same_driver_lmem_smem()
340 regions[1] = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_lmem_smem()
341 return igt_dmabuf_import_same_driver(i915, regions, 2); in igt_dmabuf_import_same_driver_lmem_smem()

12345678910>>...15