/linux/arch/powerpc/mm/nohash/ |
H A D | kaslr_booke.c | 23 struct regions { struct 38 struct regions __initdata regions; argument 113 if (regions.reserved_mem < 0) in overlaps_reserved_region() 117 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); in overlaps_reserved_region() 125 while (len >= (regions.reserved_mem_addr_cells + in overlaps_reserved_region() 126 regions.reserved_mem_size_cells)) { in overlaps_reserved_region() 128 if (regions.reserved_mem_addr_cells == 2) in overlaps_reserved_region() 131 reg += regions.reserved_mem_addr_cells; in overlaps_reserved_region() 132 len -= 4 * regions.reserved_mem_addr_cells; in overlaps_reserved_region() 135 if (regions.reserved_mem_size_cells == 2) in overlaps_reserved_region() [all …]
|
/linux/drivers/mtd/chips/ |
H A D | jedec_probe.c | 275 const uint32_t regions[6]; member 307 .regions = { 319 .regions = { 334 .regions = { 349 .regions = { 364 .regions = { 379 .regions = { 395 .regions = { 412 .regions = { 429 .regions = { [all …]
|
/linux/mm/damon/ |
H A D | vaddr.c | 56 * Functions for the initial monitoring target regions construction 60 * Size-evenly split a region into 'nr_pieces' small regions 107 * Find three regions separated by two biggest unmapped regions 110 * regions an array of three address ranges that results will be saved 112 * This function receives an address space and finds three regions in it which 113 * separated by the two biggest unmapped regions in the space. Please refer to 120 struct damon_addr_range regions[3]) in __damon_va_three_regions() 163 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions() 164 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); in __damon_va_three_regions() 165 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); in __damon_va_three_regions() [all …]
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | dm-clone.rst | 58 3. A small metadata device - it records which regions are already valid in the 59 destination device, i.e., which regions have already been hydrated, or have 65 Regions section in Design 68 dm-clone divides the source and destination devices in fixed sized regions. 69 Regions are the unit of hydration, i.e., the minimum amount of data copied from 77 Reads and writes from/to hydrated regions are serviced from the destination 93 as a hint to skip hydration of the regions covered by the request, i.e., it 111 A message `hydration_threshold <#regions>` can be used to set the maximum number 112 of regions being copied, the default being 1 region. 116 region size. A message `hydration_batch_size <#regions>` can be used to tune the [all …]
|
/linux/tools/testing/selftests/damon/ |
H A D | damon_nr_regions.py | 11 Create process of the given 'real_nr_regions' regions, monitor it using 21 # stat every monitored regions 46 print('tried regions update failed: %s' % err) 52 print('tried regions is not collected') 58 print('tried regions is not created') 72 print('number of regions that collected are:') 79 # test min_nr_regions larger than real nr regions 82 # test max_nr_regions smaller than real nr regions 85 # test online-tuned max_nr_regions that smaller than real nr regions 89 # stat every monitored regions [all …]
|
H A D | access_memory.c | 13 char **regions; in main() local 30 regions = malloc(sizeof(*regions) * nr_regions); in main() 32 regions[i] = malloc(sz_region); in main() 38 memset(regions[i], i, sz_region); in main()
|
H A D | damos_tried_regions.py | 10 # repeatedly access even-numbered ones in 14 regions of 10 MiB size 14 # stat every monitored regions 36 print('tried regions update failed: %s' % err) 42 print('tried regions is not collected') 48 print('tried regions is not created')
|
/linux/Documentation/mm/damon/ |
H A D | design.rst | 88 and updates the monitoring target address regions so that entire memory 97 address regions is just wasteful. However, because DAMON can deal with some 98 level of noise using the adaptive regions adjustment mechanism, tracking every 104 distinct regions that cover every mapped area of the address space. The two 105 gaps between the three regions are the two biggest unmapped areas in the given 115 (small mmap()-ed regions and munmap()-ed regions) 151 ``update interval``, ``minimum number of regions``, and ``maximum number of 152 regions``. 199 controllable by setting the number of regions. DAMON allows users to set the 200 minimum and the maximum number of regions for the trade-off. [all …]
|
/linux/Documentation/networking/devlink/ |
H A D | devlink-region.rst | 7 ``devlink`` regions enable access to driver defined address regions using 10 Each device can create and register its own supported address regions. The 15 Regions may optionally support triggering snapshots on demand. 22 address regions that are otherwise inaccessible to the user. 24 Regions may also be used to provide an additional way to debug complex error 27 Regions may optionally support capturing a snapshot on demand via the 34 Regions may optionally allow directly reading from their contents without a 54 # Show all of the exposed regions with region sizes: 81 As regions are likely very device or driver specific, no generic regions are 83 specific regions a driver supports.
|
/linux/drivers/vfio/cdx/ |
H A D | main.c | 19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region), in vfio_cdx_open_device() 21 if (!vdev->regions) in vfio_cdx_open_device() 27 vdev->regions[i].addr = res->start; in vfio_cdx_open_device() 28 vdev->regions[i].size = resource_size(res); in vfio_cdx_open_device() 29 vdev->regions[i].type = res->flags; in vfio_cdx_open_device() 31 * Only regions addressed with PAGE granularity may be in vfio_cdx_open_device() 34 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_cdx_open_device() 35 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_cdx_open_device() 36 vdev->regions[i].flags |= in vfio_cdx_open_device() 38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_cdx_open_device() [all …]
|
/linux/tools/testing/memblock/tests/ |
H A D | basic_api.c | 17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check() 22 ASSERT_NE(memblock.reserved.regions, NULL); in memblock_initialization_check() 37 * and size to the collection of available memory regions (memblock.memory). 45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check() 70 * NUMA node and memory flags to the collection of available memory regions. 78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check() 114 * available memory regions (memblock.memory). The total size and 121 rgn1 = &memblock.memory.regions[0]; in memblock_add_disjoint_check() 122 rgn2 = &memblock.memory.regions[1]; in memblock_add_disjoint_check() 167 * and has size of two regions minus their intersection. The total size of [all …]
|
H A D | alloc_nid_api.c | 66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check() 118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check() 169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check() 221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check() 307 * Expect a merge of both regions. Only the region size gets updated. 311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check() 359 * Expect a merge of regions. Only the region size gets updated. 363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check() 399 * there are two reserved regions at the borders, with a gap big enough to fit 416 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_with_space_check() [all …]
|
H A D | alloc_exact_nid_api.c | 30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check() 31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check() 82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check() 83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check() 143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check() 144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check() 200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 258 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_low_check() [all …]
|
H A D | alloc_api.c | 26 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_simple_check() 73 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_top_down_disjoint_check() 74 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_top_down_disjoint_check() 121 * Expect a merge of both regions. Only the region size gets updated. 125 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_before_check() 163 * Expect a merge of both regions. Both the base address and size of the region 168 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_after_check() 204 * A test that tries to allocate memory when there are two reserved regions with 217 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_second_fit_check() 254 * A test that tries to allocate memory when there are two reserved regions with [all …]
|
/linux/Documentation/core-api/kho/ |
H A D | concepts.rst | 9 regions, which could contain serialized system states, across kexec. 17 that describes preserved memory regions. These regions contain either 20 memory regions from KHO FDT. 30 Scratch Regions 38 We guarantee that we always have such regions through the scratch regions: On 39 first boot KHO allocates several physically contiguous memory regions. Since 40 after kexec these regions will be used by early memory allocations, there is a 45 used to explicitly define size of the scratch regions. 46 The scratch regions are declared as CMA when page allocator is initialized so
|
/linux/drivers/virt/nitro_enclaves/ |
H A D | ne_misc_dev_test.c | 23 * regions = {} 34 * regions = {} 45 * regions = { 58 * regions = { 72 * regions = { 87 * regions = { 102 * regions = { 117 phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, in ne_misc_dev_test_merge_phys_contig_memory_regions() 118 sizeof(*phys_contig_mem_regions.regions), in ne_misc_dev_test_merge_phys_contig_memory_regions() 120 KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions() [all …]
|
/linux/mm/ |
H A D | memblock.c | 45 * Memblock is a method of managing memory regions during the early 50 * regions. There are several types of these collections: 56 * * ``reserved`` - describes the regions that were allocated 64 * which contains an array of memory regions along with 72 * arrays during addition of new regions. This feature should be used 129 .memory.regions = memblock_memory_init_regions, 133 .reserved.regions = memblock_reserved_init_regions, 143 .regions = memblock_physmem_init_regions, 158 for (i = 0, rgn = &memblock_type->regions[0]; \ 160 i++, rgn = &memblock_type->regions[i]) [all …]
|
/linux/Documentation/admin-guide/mm/damon/ |
H A D | lru_sort.rst | 31 DAMON_LRU_SORT finds hot pages (pages of memory regions that showing access 33 memory regions that showing no access for a time that longer than a 85 Access frequency threshold for hot memory regions identification in permil. 94 Time threshold for cold memory regions identification in microseconds. 179 Minimum number of monitoring regions. 181 The minimal number of monitoring regions of DAMON for the cold memory 190 Maximum number of monitoring regions. 192 The maximum number of monitoring regions of DAMON for the cold memory 225 Number of hot memory regions that tried to be LRU-sorted. 230 Total bytes of hot memory regions that tried to be LRU-sorted. [all …]
|
H A D | reclaim.rst | 33 DAMON_RECLAIM finds memory regions that didn't accessed for specific time 36 out memory regions that didn't accessed longer time first. System 77 Time threshold for cold memory regions identification in microseconds. 200 Minimum number of monitoring regions. 202 The minimal number of monitoring regions of DAMON for the cold memory 210 Maximum number of monitoring regions. 212 The maximum number of monitoring regions of DAMON for the cold memory 223 against. That is, DAMON_RECLAIM will find cold memory regions in this region 232 against. That is, DAMON_RECLAIM will find cold memory regions in this region 255 Number of memory regions that tried to be reclaimed by DAMON_RECLAIM. [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/nvfw/ |
H A D | acr.c | 130 hdr->regions.no_regions); in flcn_acr_desc_dump() 132 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_dump() 135 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump() 137 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump() 139 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump() 141 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump() 143 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump() 145 hdr->regions.region_props[i].client_mask); in flcn_acr_desc_dump() 173 hdr->regions.no_regions); in flcn_acr_desc_v1_dump() 175 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_v1_dump() [all …]
|
/linux/drivers/vfio/fsl-mc/ |
H A D | vfio_fsl_mc.c | 30 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), in vfio_fsl_mc_open_device() 32 if (!vdev->regions) in vfio_fsl_mc_open_device() 36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device() 39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device() 40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device() 41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device() 43 * Only regions addressed with PAGE granularity may be in vfio_fsl_mc_open_device() 46 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_fsl_mc_open_device() 47 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_fsl_mc_open_device() 48 vdev->regions[i].flags |= in vfio_fsl_mc_open_device() [all …]
|
/linux/include/linux/ |
H A D | damon.h | 73 * regions are merged into a new region, both @nr_accesses and @age of the new 74 * region are set as region size-weighted average of those of the two regions. 91 * @nr_regions: Number of monitoring target regions of this target. 92 * @regions_list: Head of the monitoring target regions of this target. 118 * @DAMOS_MIGRATE_HOT: Migrate the regions prioritizing warmer regions. 119 * @DAMOS_MIGRATE_COLD: Migrate the regions prioritizing colder regions. 228 * For selecting regions within the quota, DAMON prioritizes current scheme's 229 * target memory regions using the &struct damon_operations->get_scheme_score. 286 * appropriate memory regions. Else, DAMON checks &metric of the system for at 306 * @nr_tried: Total number of regions that the scheme is tried to be applied. [all …]
|
/linux/drivers/net/dsa/sja1105/ |
H A D | sja1105_devlink.c | 7 /* Since devlink regions have a fixed size and the static config has a variable 85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions() 87 if (!priv->regions) in sja1105_setup_devlink_regions() 97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions() 99 kfree(priv->regions); in sja1105_setup_devlink_regions() 103 priv->regions[i] = region; in sja1105_setup_devlink_regions() 115 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions() 117 kfree(priv->regions); in sja1105_teardown_devlink_regions()
|
/linux/Documentation/devicetree/bindings/fpga/ |
H A D | fpga-region.yaml | 25 FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in 26 the Device Tree. FPGA Regions provide a way to program FPGAs under device tree 62 * A persona may create more regions. 72 will be used to gate the busses. Traffic to other regions is not affected. 76 * An FPGA image may create a set of reprogrammable regions, each having its 86 * A base image may set up a set of partial reconfiguration regions that may 106 Figure 1: An FPGA set up with a base image that created three regions. Each 131 FPGA Regions represent FPGA's and FPGA PR regions in the device tree. An FPGA 158 These FPGA regions are children of FPGA bridges which are then children of the 167 FPGA Regions do not inherit their ancestor FPGA regions' bridges. This prevents [all …]
|
/linux/drivers/soc/qcom/ |
H A D | smem.c | 43 * the partition and holds properties for the two internal memory regions. The 44 * two regions are cached and non-cached memory respectively. Each region 273 * @num_regions: number of @regions 274 * @regions: list of the memory regions defining the shared memory 288 struct smem_region regions[] __counted_by(num_regions); 468 header = smem->regions[0].virt_base; in qcom_smem_alloc_global() 557 header = smem->regions[0].virt_base; in qcom_smem_get_global() 565 region = &smem->regions[i]; in qcom_smem_get_global() 746 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space() 749 if (ret > __smem->regions[0].size) in qcom_smem_get_free_space() [all …]
|