Home
last modified time | relevance | path

Searched full:region (Results 1 – 25 of 2314) sorted by relevance

12345678910>>...93

/linux/drivers/hv/
H A Dmshv_regions.c5 * Memory region management for mshv_root module.
24 * in a region.
25 * @region : Pointer to the memory region structure.
27 * @page_offset: Offset into the region's pages array to start processing.
31 * This function scans the region's pages starting from @page_offset,
42 static long mshv_region_process_chunk(struct mshv_mem_region *region, in mshv_region_process_chunk() argument
45 int (*handler)(struct mshv_mem_region *region, in mshv_region_process_chunk() argument
55 page = region->pages[page_offset]; in mshv_region_process_chunk()
68 page = region->pages[page_offset + count]; in mshv_region_process_chunk()
79 ret = handler(region, flags, page_offset, count); in mshv_region_process_chunk()
[all …]
/linux/Documentation/devicetree/bindings/cache/
H A Dqcom,llcc.yaml82 - description: LLCC0 base register region
98 - description: LLCC0 base register region
99 - description: LLCC1 base register region
100 - description: LLCC broadcast OR register region
101 - description: LLCC broadcast AND register region
102 - description: LLCC scratchpad broadcast OR register region
103 - description: LLCC scratchpad broadcast AND register region
125 - description: LLCC0 base register region
126 - description: LLCC broadcast base register region
142 - description: LLCC0 base register region
[all …]
/linux/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c248 * range addressed by a single page table into a low and high region
250 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
391 * maximum page table size for a memory region will be when the in vm_nr_pages_required()
463 * Force GUEST_MEMFD for the primary memory region if necessary, e.g. in __vm_create()
480 * MMIO region would prevent silently clobbering the MMIO region. in __vm_create()
483 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
563 struct userspace_mem_region *region; in kvm_vm_restart() local
569 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
570 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, &region->region); in kvm_vm_restart()
576 ret, errno, region->region.slot, in kvm_vm_restart()
[all …]
/linux/tools/testing/selftests/vfio/
H A Dvfio_dma_mapping_test.c139 struct dma_region region; in TEST_F() local
145 region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); in TEST_F()
148 if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED) in TEST_F()
151 ASSERT_NE(region.vaddr, MAP_FAILED); in TEST_F()
153 region.iova = iova_allocator_alloc(self->iova_allocator, size); in TEST_F()
154 region.size = size; in TEST_F()
156 iommu_map(self->iommu, &region); in TEST_F()
157 printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova); in TEST_F()
159 ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr)); in TEST_F()
161 rc = iommu_mapping_get(device_bdf, region.iova, &mapping); in TEST_F()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum1_acl_tcam.c14 struct mlxsw_sp_acl_tcam_region *region; member
62 struct mlxsw_sp1_acl_tcam_region *region) in mlxsw_sp1_acl_ctcam_region_catchall_add() argument
67 mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, in mlxsw_sp1_acl_ctcam_region_catchall_add()
68 &region->catchall.cchunk, in mlxsw_sp1_acl_ctcam_region_catchall_add()
81 err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion, in mlxsw_sp1_acl_ctcam_region_catchall_add()
82 &region->catchall.cchunk, in mlxsw_sp1_acl_ctcam_region_catchall_add()
83 &region->catchall.centry, in mlxsw_sp1_acl_ctcam_region_catchall_add()
87 region->catchall.rulei = rulei; in mlxsw_sp1_acl_ctcam_region_catchall_add()
95 mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk); in mlxsw_sp1_acl_ctcam_region_catchall_add()
101 struct mlxsw_sp1_acl_tcam_region *region) in mlxsw_sp1_acl_ctcam_region_catchall_del() argument
[all …]
H A Dspectrum_acl_ctcam.c15 struct mlxsw_sp_acl_tcam_region *region, in mlxsw_sp_acl_ctcam_region_resize() argument
21 region->key_type, new_size, region->id, in mlxsw_sp_acl_ctcam_region_resize()
22 region->tcam_region_info); in mlxsw_sp_acl_ctcam_region_resize()
28 struct mlxsw_sp_acl_tcam_region *region, in mlxsw_sp_acl_ctcam_region_move() argument
34 region->tcam_region_info, src_offset, in mlxsw_sp_acl_ctcam_region_move()
35 region->tcam_region_info, dst_offset, size); in mlxsw_sp_acl_ctcam_region_move()
46 struct mlxsw_sp_acl_tcam_region *region = cregion->region; in mlxsw_sp_acl_ctcam_region_entry_insert() local
61 region->tcam_region_info, in mlxsw_sp_acl_ctcam_region_entry_insert()
65 mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); in mlxsw_sp_acl_ctcam_region_entry_insert()
94 cregion->region->tcam_region_info, in mlxsw_sp_acl_ctcam_region_entry_remove()
[all …]
H A Dspectrum_acl_tcam.c109 struct mutex lock; /* guards region list updates */
146 struct mutex lock; /* Protects consistency of region, region2 pointers
149 struct mlxsw_sp_acl_tcam_region *region; member
169 struct mlxsw_sp_acl_tcam_region *region; member
210 struct mlxsw_sp_acl_tcam_region *region; in mlxsw_sp_acl_tcam_group_update() local
215 list_for_each_entry(region, &group->region_list, list) { in mlxsw_sp_acl_tcam_group_update()
219 if (region->list.next != &group->region_list && in mlxsw_sp_acl_tcam_group_update()
220 list_next_entry(region, list)->vregion == region->vregion) in mlxsw_sp_acl_tcam_group_update()
223 region in mlxsw_sp_acl_tcam_group_update()
385 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group,struct mlxsw_sp_acl_tcam_region * region,unsigned int priority,struct mlxsw_sp_acl_tcam_region * next_region) mlxsw_sp_acl_tcam_group_region_attach() argument
433 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region) mlxsw_sp_acl_tcam_group_region_detach() argument
571 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region) mlxsw_sp_acl_tcam_region_alloc() argument
599 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region) mlxsw_sp_acl_tcam_region_free() argument
611 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region) mlxsw_sp_acl_tcam_region_enable() argument
622 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region) mlxsw_sp_acl_tcam_region_disable() argument
638 struct mlxsw_sp_acl_tcam_region *region; mlxsw_sp_acl_tcam_region_create() local
686 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region) mlxsw_sp_acl_tcam_region_destroy() argument
903 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region) mlxsw_sp_acl_tcam_chunk_create() argument
1087 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_entry * entry,struct mlxsw_sp_acl_rule_info * rulei) mlxsw_sp_acl_tcam_entry_action_replace() argument
1213 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx) mlxsw_sp_acl_tcam_vchunk_migrate_start() argument
1244 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx,int * credits) mlxsw_sp_acl_tcam_vchunk_migrate_one() argument
[all...]
/linux/Documentation/networking/devlink/
H A Ddevlink-region.rst4 Devlink Region
11 region can then be accessed via the devlink region interface.
13 Region snapshots are collected by the driver, and can be accessed via read
17 Snapshot identifiers are scoped to the devlink instance, not a region.
21 The major benefit to creating a region is to provide access to internal
29 requested snapshots must implement the ``.snapshot`` callback for the region
38 region should implement the ``.read`` callback in the ``devlink_region_ops``
48 $ devlink region help
49 $ devlink region show [ DEV/REGION ]
50 $ devlink region del DEV/REGION snapshot SNAPSHOT_ID
[all …]
H A Diosm.rst59 each having one secure bin file and at least one Loadmap/Region file. For flashing
61 data required for flashing. The data like region count and address of each region
91 b) Flashing the Loadmap/Region file
109 - The summary of exception details logged as part of this region.
111 - This region contains the details related to the exception occurred in the
114 - This region contains the logs related to the modem CDD driver.
116 - This region contains the eeprom logs.
118 - This region contains the current instance of bootloader logs.
120 - This region contains the previous instance of bootloader logs.
123 Region commands
[all …]
/linux/drivers/acpi/acpica/
H A Devregion.c4 * Module Name: evregion - Operation Region support
40 * an installed default region handler.
82 * PARAMETERS: region_obj - Internal region object
85 * region_offset - Where in the region to read or write
92 * DESCRIPTION: Dispatch an address space or operation region access to
95 * NOTE: During early initialization, we always install the default region
97 * region address spaces are always available as per the ACPI specification.
127 /* Ensure that there is a handler associated with this region */ in acpi_ev_address_space_dispatch()
129 handler_desc = region_obj->region.handler; in acpi_ev_address_space_dispatch()
132 "No handler for Region [%4.4s] (%p) [%s]", in acpi_ev_address_space_dispatch()
[all …]
/linux/drivers/acpi/pmic/
H A DKconfig4 bool "PMIC (Power Management Integrated Circuit) operation region support"
7 region of the PMIC chip. The operation region can be used
14 bool "ACPI operation region support for Bay Trail Crystal Cove PMIC"
17 This config adds ACPI operation region support for the Bay Trail
21 bool "ACPI operation region support for Cherry Trail Crystal Cove PMIC"
24 This config adds ACPI operation region support for the Cherry Trail
28 bool "ACPI operation region support for XPower AXP288 PMIC"
31 This config adds ACPI operation region support for XPower AXP288 PMIC.
34 bool "ACPI operation region support for BXT WhiskeyCove PMIC"
37 This config adds ACPI operation region support for BXT WhiskeyCove PMIC.
[all …]
/linux/arch/arm/mm/
H A Dpmsa-v7.c19 struct region { struct
25 static struct region __initdata mem[MPU_MAX_REGIONS]; argument
27 static struct region __initdata xip[MPU_MAX_REGIONS];
46 /* Region number */
52 /* Data-side / unified region attributes */
54 /* Region access control register */
60 /* Region size register */
66 /* Region base address register */
76 /* Optional instruction-side region attributes */
78 /* I-side Region access control register */
[all …]
/linux/Documentation/devicetree/bindings/fpga/
H A Dfpga-region.yaml4 $id: http://devicetree.org/schemas/fpga/fpga-region.yaml#
7 title: FPGA Region
17 - FPGA Region
44 Partial Reconfiguration Region (PRR)
51 into a PRR must fit and must use a subset of the region's connections.
52 * The busses within the FPGA are split such that each region gets its own
69 * During Partial Reconfiguration of a specific region, that region's bridge
105 region (PRR0-2) gets its own split of the busses that is independently gated by
112 When a DT overlay that targets an FPGA Region is applied, the FPGA Region will
121 When the overlay is removed, the child nodes will be removed and the FPGA Region
[all …]
/linux/arch/arm64/mm/
H A Dcache.S20 * Ensure that the I and D caches are coherent within specified region.
21 * This is typically used when code has been written to a memory region,
24 * - start - virtual start address of region
25 * - end - virtual end address of region
48 * Ensure that the I and D caches are coherent within specified region.
49 * This is typically used when code has been written to a memory region,
52 * - start - virtual start address of region
53 * - end - virtual end address of region
64 * Ensure that the I and D caches are coherent within specified region.
65 * This is typically used when code has been written to a memory region,
[all …]
/linux/drivers/platform/x86/intel/pmt/
H A Ddiscovery-kunit.c27 struct telemetry_region *region = &feature_group->regions[i]; in validate_pmt_regions() local
29 kunit_info(test, " - Region %d: cdie_mask=%u, package_id=%u, partition=%u, segment=%u,", in validate_pmt_regions()
30 i, region->plat_info.cdie_mask, region->plat_info.package_id, in validate_pmt_regions()
31 region->plat_info.partition, region->plat_info.segment); in validate_pmt_regions()
33 region->plat_info.bus_number, region->plat_info.device_number, in validate_pmt_regions()
34 region->plat_info.function_number, region->guid); in validate_pmt_regions()
35 kunit_info(test, "\t\taddr=%p, size=%zu, num_rmids=%u", region->addr, region->size, in validate_pmt_regions()
36 region->num_rmids); in validate_pmt_regions()
39 KUNIT_ASSERT_GE(test, region->plat_info.cdie_mask, 0); in validate_pmt_regions()
40 KUNIT_ASSERT_GE(test, region->plat_info.package_id, 0); in validate_pmt_regions()
[all …]
/linux/drivers/fpga/tests/
H A Dfpga-region-test.c3 * KUnit test for the FPGA Region
14 #include <linux/fpga/fpga-region.h>
32 struct fpga_region *region; member
64 * of the Region.
90 static int fake_region_get_bridges(struct fpga_region *region) in fake_region_get_bridges() argument
92 struct fpga_bridge *bridge = region->priv; in fake_region_get_bridges()
94 return fpga_bridge_get_to_list(bridge->dev.parent, region->info, &region->bridge_list); in fake_region_get_bridges()
105 struct fpga_region *region; in fpga_region_test_class_find() local
107 region = fpga_region_class_find(NULL, ctx->region_dev, fake_region_match); in fpga_region_test_class_find()
108 KUNIT_EXPECT_PTR_EQ(test, region, ctx->region); in fpga_region_test_class_find()
[all …]
/linux/drivers/virt/acrn/
H A Dmm.c19 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) in modify_region() argument
30 regions->regions_gpa = virt_to_phys(region); in modify_region()
35 "Failed to set memory region for VM[%u]!\n", vm->vmid); in modify_region()
42 * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
46 * @size: Size of the region.
55 struct vm_memory_region_op *region; in acrn_mm_region_add() local
58 region = kzalloc(sizeof(*region), GFP_KERNEL); in acrn_mm_region_add()
59 if (!region) in acrn_mm_region_add()
62 region->type = ACRN_MEM_REGION_ADD; in acrn_mm_region_add()
63 region->user_vm_pa = user_gpa; in acrn_mm_region_add()
[all …]
/linux/mm/
H A Dnommu.c87 * region. This test is intentionally done in reverse order, in kobjsize()
419 * initialise the percpu counter for VM and region record slabs, initialise VMA
434 * validate the region tree
435 * - the caller must hold the region lock
440 struct vm_region *region, *last; in validate_nommu_regions() local
452 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
455 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()
456 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions()
457 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()
469 * add a region into the global tree
[all …]
/linux/drivers/gpu/drm/amd/display/dc/hwss/dcn351/
H A Ddcn351_hwseq.c83 * ONO Region 11, DCPG 19: dsc3
84 * ONO Region 10, DCPG 3: dchubp3, dpp3
85 * ONO Region 9, DCPG 18: dsc2
86 * ONO Region 8, DCPG 2: dchubp2, dpp2
87 * ONO Region 7, DCPG 17: dsc1
88 * ONO Region 6, DCPG 1: dchubp1, dpp1
89 * ONO Region 5, DCPG 16: dsc0
90 * ONO Region 4, DCPG 0: dchubp0, dpp0
91 * ONO Region 3, DCPG 25: hpo - SKIPPED. Should be kept on
92 * ONO Region 2, DCPG 24: mpc opp optc dwb
[all …]
/linux/arch/x86/boot/compressed/
H A Dkaslr.c143 * memmap=nn@ss specifies usable region, should in parse_memmap()
152 * system can use. Region above the limit should be avoided. in parse_memmap()
362 * Avoid the region that is unsafe to overlap during in mem_avoid_init()
401 * overlap region with the lowest address.
463 static void store_slot_info(struct mem_vector *region, unsigned long image_size) in store_slot_info() argument
470 slot_area.addr = region->start; in store_slot_info()
471 slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN; in store_slot_info()
478 * Skip as many 1GB huge pages as possible in the passed region
482 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size) in process_gb_huge_pages() argument
489 store_slot_info(region, image_size); in process_gb_huge_pages()
[all …]
/linux/drivers/s390/cio/
H A Dvfio_ccw_chp.c22 struct ccw_schib_region *region; in vfio_ccw_schib_region_read() local
25 if (pos + count > sizeof(*region)) in vfio_ccw_schib_region_read()
29 region = private->region[i].data; in vfio_ccw_schib_region_read()
36 memcpy(region, &sch->schib, sizeof(*region)); in vfio_ccw_schib_region_read()
38 if (copy_to_user(buf, (void *)region + pos, count)) { in vfio_ccw_schib_region_read()
59 struct vfio_ccw_region *region) in vfio_ccw_schib_region_release() argument
86 struct ccw_crw_region *region; in vfio_ccw_crw_region_read() local
90 if (pos + count > sizeof(*region)) in vfio_ccw_crw_region_read()
100 region = private->region[i].data; in vfio_ccw_crw_region_read()
103 memcpy(&region->crw, &crw->crw, sizeof(region->crw)); in vfio_ccw_crw_region_read()
[all …]
H A Dvfio_ccw_async.c3 * Async I/O region for vfio_ccw
20 struct ccw_cmd_region *region; in vfio_ccw_async_region_read() local
23 if (pos + count > sizeof(*region)) in vfio_ccw_async_region_read()
27 region = private->region[i].data; in vfio_ccw_async_region_read()
28 if (copy_to_user(buf, (void *)region + pos, count)) in vfio_ccw_async_region_read()
42 struct ccw_cmd_region *region; in vfio_ccw_async_region_write() local
45 if (pos + count > sizeof(*region)) in vfio_ccw_async_region_write()
51 region = private->region[i].data; in vfio_ccw_async_region_write()
52 if (copy_from_user((void *)region + pos, buf, count)) { in vfio_ccw_async_region_write()
59 ret = region->ret_code ? region->ret_code : count; in vfio_ccw_async_region_write()
[all …]
/linux/tools/testing/memblock/tests/
H A Dbasic_api.c38 * Expect to create a new entry. The region counter and total memory get
47 struct region r = { in memblock_add_simple_check()
71 * Expect to create a new entry. The region counter and total memory get
80 struct region r = { in memblock_add_node_simple_check()
115 * region counter fields get updated.
124 struct region r1 = { in memblock_add_disjoint_check()
128 struct region r2 = { in memblock_add_disjoint_check()
166 * Expect to merge the two entries into one region that starts at r2.base
168 * the available memory is updated, and the region counter stays the same.
177 struct region r1 = { in memblock_add_overlap_top_check()
[all …]
/linux/drivers/acpi/
H A Dnvs.c3 * nvs.c - Routines for saving and restoring ACPI NVS memory region
40 struct nvs_region *region; in acpi_nvs_register() local
42 region = kmalloc(sizeof(*region), GFP_KERNEL); in acpi_nvs_register()
43 if (!region) in acpi_nvs_register()
45 region->phys_start = start; in acpi_nvs_register()
46 region->size = size; in acpi_nvs_register()
47 list_add_tail(&region->node, &nvs_region_list); in acpi_nvs_register()
56 struct nvs_region *region; in acpi_nvs_for_each_region() local
58 list_for_each_entry(region, &nvs_region_list, node) { in acpi_nvs_for_each_region()
59 rc = func(region->phys_start, region->size, data); in acpi_nvs_for_each_region()
[all …]
/linux/include/linux/
H A Ddm-region-hash.h6 * Device-Mapper dirty region hash interface.
18 * Region hash
25 * States a region can have.
35 * Region hash create/destroy.
54 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region);
58 * Get region size and key (ie. number of the region).
64 * Get/set/update region state (and dirty log).
67 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block);
68 void dm_rh_set_state(struct dm_region_hash *rh, region_t region,
71 /* Non-zero errors_handled leaves the state of the region NOSYNC */
[all …]

12345678910>>...93