Home
last modified time | relevance | path

Searched full:regions (Results 1 – 25 of 1141) sorted by relevance

12345678910>>...46

/linux/mm/damon/
H A Dvaddr-test.h
H A Dvaddr.c56 * Functions for the initial monitoring target regions construction
60 * Size-evenly split a region into 'nr_pieces' small regions
104 * Find three regions separated by two biggest unmapped regions
107 * regions an array of three address ranges that results will be saved
109 * This function receives an address space and finds three regions in it which
110 * separated by the two biggest unmapped regions in the space. Please refer to
117 struct damon_addr_range regions[3]) in __damon_va_three_regions()
160 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions()
161 regions[ in __damon_va_three_regions()
238 struct damon_addr_range regions[3]; __damon_va_init_regions() local
[all...]
/linux/arch/powerpc/mm/nohash/
H A Dkaslr_booke.c23 struct regions { struct
38 struct regions __initdata regions; argument
113 if (regions.reserved_mem < 0) in overlaps_reserved_region()
117 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); in overlaps_reserved_region()
125 while (len >= (regions.reserved_mem_addr_cells + in overlaps_reserved_region()
126 regions.reserved_mem_size_cells)) { in overlaps_reserved_region()
128 if (regions.reserved_mem_addr_cells == 2) in overlaps_reserved_region()
131 reg += regions.reserved_mem_addr_cells; in overlaps_reserved_region()
132 len -= 4 * regions.reserved_mem_addr_cells; in overlaps_reserved_region()
135 if (regions.reserved_mem_size_cells == 2) in overlaps_reserved_region()
[all …]
/linux/drivers/mtd/chips/
H A Djedec_probe.c275 const uint32_t regions[6]; member
307 .regions = {
319 .regions = {
334 .regions = {
349 .regions = {
364 .regions = {
379 .regions = {
395 .regions = {
412 .regions = {
429 .regions = {
[all …]
/linux/Documentation/admin-guide/device-mapper/
H A Ddm-clone.rst58 3. A small metadata device - it records which regions are already valid in the
59 destination device, i.e., which regions have already been hydrated, or have
65 Regions section in Design
68 dm-clone divides the source and destination devices in fixed sized regions.
69 Regions are the unit of hydration, i.e., the minimum amount of data copied from
77 Reads and writes from/to hydrated regions are serviced from the destination
93 as a hint to skip hydration of the regions covered by the request, i.e., it
111 A message `hydration_threshold <#regions>` can be used to set the maximum number
112 of regions being copied, the default being 1 region.
116 region size. A message `hydration_batch_size <#regions>` can be used to tune the
[all …]
/linux/tools/testing/selftests/damon/
H A Ddamon_nr_regions.py11 Create process of the given 'real_nr_regions' regions, monitor it using
21 # stat every monitored regions
46 print('tried regions update failed: %s' % err)
52 print('tried regions is not collected')
58 print('tried regions is not created')
71 print('number of regions that collected are:')
78 # test min_nr_regions larger than real nr regions
81 # test max_nr_regions smaller than real nr regions
84 # test online-tuned max_nr_regions that smaller than real nr regions
88 # stat every monitored regions
[all...]
H A Daccess_memory_even.c5 * Receives number of regions and size of each region from user. Allocate the
6 * regions and repeatedly access even numbered (starting from zero) regions.
16 char **regions; in main() local
31 regions = malloc(sizeof(*regions) * nr_regions); in main()
33 regions[i] = malloc(sz_region); in main()
38 memset(regions[i], i, sz_region); in main()
H A Daccess_memory.c13 char **regions; in main() local
30 regions = malloc(sizeof(*regions) * nr_regions); in main()
32 regions[i] = malloc(sz_region); in main()
38 memset(regions[i], i, sz_region); in main()
/linux/Documentation/networking/devlink/
H A Ddevlink-region.rst7 ``devlink`` regions enable access to driver defined address regions using
10 Each device can create and register its own supported address regions. The
15 Regions may optionally support triggering snapshots on demand.
22 address regions that are otherwise inaccessible to the user.
24 Regions may also be used to provide an additional way to debug complex error
27 Regions may optionally support capturing a snapshot on demand via the
34 Regions may optionally allow directly reading from their contents without a
54 # Show all of the exposed regions with region sizes:
81 As regions are likely very device or driver specific, no generic regions are
83 specific regions a driver supports.
/linux/drivers/vfio/platform/
H A Dvfio_platform_common.c144 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region), in vfio_platform_regions_init()
146 if (!vdev->regions) in vfio_platform_regions_init()
153 vdev->regions[i].addr = res->start; in vfio_platform_regions_init()
154 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init()
155 vdev->regions[i].flags = 0; in vfio_platform_regions_init()
159 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO; in vfio_platform_regions_init()
160 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_platform_regions_init()
162 vdev->regions[i].flags |= in vfio_platform_regions_init()
166 * Only regions addressed with PAGE granularity may be in vfio_platform_regions_init()
169 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_platform_regions_init()
[all …]
/linux/tools/testing/memblock/tests/
H A Dbasic_api.c17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check()
22 ASSERT_NE(memblock.reserved.regions, NULL); in memblock_initialization_check()
37 * and size to the collection of available memory regions (memblock.memory).
45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check()
70 * NUMA node and memory flags to the collection of available memory regions.
78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check()
114 * available memory regions (memblock.memory). The total size and
121 rgn1 = &memblock.memory.regions[0]; in memblock_add_disjoint_check()
122 rgn2 = &memblock.memory.regions[1]; in memblock_add_disjoint_check()
167 * and has size of two regions minus their intersection. The total size of
[all …]
H A Dalloc_nid_api.c66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check()
118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check()
169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check()
221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check()
307 * Expect a merge of both regions. Only the region size gets updated.
311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check()
359 * Expect a merge of regions. Only the region size gets updated.
363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check()
399 * there are two reserved regions at the borders, with a gap big enough to fit
416 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_with_space_check()
[all …]
H A Dalloc_exact_nid_api.c30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check()
31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check()
82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check()
83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check()
143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check()
144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check()
200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
258 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_low_check()
[all …]
H A Dalloc_api.c26 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_simple_check()
73 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_top_down_disjoint_check()
74 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_top_down_disjoint_check()
121 * Expect a merge of both regions. Only the region size gets updated.
125 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_before_check()
163 * Expect a merge of both regions. Both the base address and size of the region
168 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_after_check()
204 * A test that tries to allocate memory when there are two reserved regions with
217 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_second_fit_check()
254 * A test that tries to allocate memory when there are two reserved regions with
[all …]
/linux/mm/
H A Dmemblock.c39 * Memblock is a method of managing memory regions during the early
44 * regions. There are several types of these collections:
50 * * ``reserved`` - describes the regions that were allocated
58 * which contains an array of memory regions along with
66 * arrays during addition of new regions. This feature should be used
116 .memory.regions = memblock_memory_init_regions,
120 .reserved.regions = memblock_reserved_init_regions,
130 .regions = memblock_physmem_init_regions,
145 for (i = 0, rgn = &memblock_type->regions[0]; \
147 i++, rgn = &memblock_type->regions[i])
[all …]
/linux/drivers/virt/nitro_enclaves/
H A Dne_misc_dev_test.c23 * regions = {}
34 * regions = {}
45 * regions = {
58 * regions = {
72 * regions = {
87 * regions = {
102 * regions = {
117 phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, in ne_misc_dev_test_merge_phys_contig_memory_regions()
118 sizeof(*phys_contig_mem_regions.regions), in ne_misc_dev_test_merge_phys_contig_memory_regions()
120 KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions()
[all …]
/linux/Documentation/admin-guide/mm/damon/
H A Dlru_sort.rst31 DAMON_LRU_SORT finds hot pages (pages of memory regions that showing access
33 memory regions that showing no access for a time that longer than a
85 Access frequency threshold for hot memory regions identification in permil.
94 Time threshold for cold memory regions identification in microseconds.
179 Minimum number of monitoring regions.
181 The minimal number of monitoring regions of DAMON for the cold memory
190 Maximum number of monitoring regions.
192 The maximum number of monitoring regions of DAMON for the cold memory
225 Number of hot memory regions that tried to be LRU-sorted.
230 Total bytes of hot memory regions that tried to be LRU-sorted.
[all …]
H A Dreclaim.rst33 DAMON_RECLAIM finds memory regions that didn't accessed for specific time
36 out memory regions that didn't accessed longer time first. System
77 Time threshold for cold memory regions identification in microseconds.
200 Minimum number of monitoring regions.
202 The minimal number of monitoring regions of DAMON for the cold memory
210 Maximum number of monitoring regions.
212 The maximum number of monitoring regions of DAMON for the cold memory
223 against. That is, DAMON_RECLAIM will find cold memory regions in this region
232 against. That is, DAMON_RECLAIM will find cold memory regions in this region
255 Number of memory regions that tried to be reclaimed by DAMON_RECLAIM.
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/nvfw/
H A Dacr.c130 hdr->regions.no_regions); in flcn_acr_desc_dump()
132 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_dump()
135 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump()
137 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump()
139 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump()
141 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump()
143 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump()
145 hdr->regions.region_props[i].client_mask); in flcn_acr_desc_dump()
173 hdr->regions.no_regions); in flcn_acr_desc_v1_dump()
175 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_v1_dump()
[all …]
/linux/drivers/vfio/fsl-mc/
H A Dvfio_fsl_mc.c30 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), in vfio_fsl_mc_open_device()
32 if (!vdev->regions) in vfio_fsl_mc_open_device()
36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device()
39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device()
40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device()
41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device()
43 * Only regions addressed with PAGE granularity may be in vfio_fsl_mc_open_device()
46 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_fsl_mc_open_device()
47 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_fsl_mc_open_device()
48 vdev->regions[i].flags |= in vfio_fsl_mc_open_device()
[all …]
/linux/drivers/md/
H A Ddm-bio-prison-v1.c29 struct prison_region regions[] __counted_by(num_locks);
47 prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL); in dm_bio_prison_create()
53 spin_lock_init(&prison->regions[i].lock); in dm_bio_prison_create()
54 prison->regions[i].cell = RB_ROOT; in dm_bio_prison_create()
184 spin_lock_irq(&prison->regions[l].lock); in bio_detain()
185 r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result); in bio_detain()
186 spin_unlock_irq(&prison->regions[l].lock); in bio_detain()
232 spin_lock_irq(&prison->regions[l].lock); in dm_cell_release()
233 __cell_release(&prison->regions[l].cell, cell, bios); in dm_cell_release()
234 spin_unlock_irq(&prison->regions[l].lock); in dm_cell_release()
[all …]
/linux/drivers/net/dsa/sja1105/
H A Dsja1105_devlink.c7 /* Since devlink regions have a fixed size and the static config has a variable
85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions()
87 if (!priv->regions) in sja1105_setup_devlink_regions()
97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions()
99 kfree(priv->regions); in sja1105_setup_devlink_regions()
103 priv->regions[i] = region; in sja1105_setup_devlink_regions()
115 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions()
117 kfree(priv->regions); in sja1105_teardown_devlink_regions()
/linux/Documentation/devicetree/bindings/fpga/
H A Dfpga-region.yaml25 FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in
26 the Device Tree. FPGA Regions provide a way to program FPGAs under device tree
62 * A persona may create more regions.
72 will be used to gate the busses. Traffic to other regions is not affected.
76 * An FPGA image may create a set of reprogrammable regions, each having its
86 * A base image may set up a set of partial reconfiguration regions that may
106 Figure 1: An FPGA set up with a base image that created three regions. Each
131 FPGA Regions represent FPGA's and FPGA PR regions in the device tree. An FPGA
158 These FPGA regions are children of FPGA bridges which are then children of the
167 FPGA Regions do not inherit their ancestor FPGA regions' bridges. This prevents
[all …]
/linux/drivers/perf/
H A Dmarvell_cn10k_tad_pmu.c32 struct tad_region *regions; member
53 new += readq(tad_pmu->regions[i].base + in tad_pmu_event_counter_read()
71 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_stop()
92 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
100 writeq_relaxed(reg_val, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
258 struct tad_region *regions; in tad_pmu_probe() local
299 regions = devm_kcalloc(&pdev->dev, tad_cnt, in tad_pmu_probe()
300 sizeof(*regions), GFP_KERNEL); in tad_pmu_probe()
301 if (!regions) in tad_pmu_probe()
304 /* ioremap the distributed TAD pmu regions */ in tad_pmu_probe()
[all …]
/linux/drivers/net/ipa/
H A Dipa_mem.c23 /* "Canary" value placed between memory regions to detect overflow */
61 * Set up the shared memory regions in IPA local memory. This involves
62 * zero-filling memory regions, and in the case of header memory, telling
66 * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
86 * the processing context and modem memory regions. in ipa_mem_setup()
256 DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { }; in ipa_mem_valid()
262 dev_err(dev, "too many memory regions (%u > %u)\n", in ipa_mem_valid()
270 if (__test_and_set_bit(mem->id, regions)) { in ipa_mem_valid()
275 /* Defined regions have non-zero size and/or canary count */ in ipa_mem_valid()
280 /* Now see if any required regions are not defined */ in ipa_mem_valid()
[all …]

12345678910>>...46