Home
last modified time | relevance | path

Searched refs:memory (Results 1 – 25 of 2664) sorted by relevance

12345678910>>...107

/linux/tools/testing/selftests/memory-hotplug/
H A Dmem-on-off-test.sh25 if ! ls $SYSFS/devices/system/memory/memory* > /dev/null 2>&1; then
26 echo $msg memory hotplug is not supported >&2
30 if ! grep -q 1 $SYSFS/devices/system/memory/memory*/removable; then
31 echo $msg no hot-pluggable memory >&2
43 for memory in $SYSFS/devices/system/memory/memory*; do
44 if grep -q 1 $memory/removable &&
45 grep -q $state $memory/state; then
46 echo ${memory##/*/memory}
63 grep -q online $SYSFS/devices/system/memory/memory$1/state
68 grep -q offline $SYSFS/devices/system/memory/memory$1/state
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/core/
H A Dmemory.c30 nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_put() argument
39 kfree(memory->tags); in nvkm_memory_tags_put()
40 memory->tags = NULL; in nvkm_memory_tags_put()
48 nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_get() argument
56 if ((tags = memory->tags)) { in nvkm_memory_tags_get()
94 *ptags = memory->tags = tags; in nvkm_memory_tags_get()
101 struct nvkm_memory *memory) in nvkm_memory_ctor() argument
103 memory->func = func; in nvkm_memory_ctor()
104 kref_init(&memory->kref); in nvkm_memory_ctor()
110 struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref); in nvkm_memory_del() local
[all …]
H A Dfirmware.c113 #define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory)
116 nvkm_firmware_mem_sgl(struct nvkm_memory *memory) in nvkm_firmware_mem_sgl() argument
118 struct nvkm_firmware *fw = nvkm_firmware_mem(memory); in nvkm_firmware_mem_sgl()
132 nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_firmware_mem_map() argument
135 struct nvkm_firmware *fw = nvkm_firmware_mem(memory); in nvkm_firmware_mem_map()
137 .memory = &fw->mem.memory, in nvkm_firmware_mem_map()
139 .sgl = nvkm_firmware_mem_sgl(memory), in nvkm_firmware_mem_map()
149 nvkm_firmware_mem_size(struct nvkm_memory *memory) in nvkm_firmware_mem_size() argument
151 struct scatterlist *sgl = nvkm_firmware_mem_sgl(memory); in nvkm_firmware_mem_size()
157 nvkm_firmware_mem_addr(struct nvkm_memory *memory) in nvkm_firmware_mem_addr() argument
[all …]
/linux/Documentation/admin-guide/mm/
H A Dmemory-hotplug.rst5 This document describes generic Linux support for memory hot(un)plug with
14 memory available to a machine at runtime. In the simplest case, it consists of
20 - The physical memory available to a machine can be adjusted at runtime, up- or
21 downgrading the memory capacity. This dynamic memory resizing, sometimes
26 example is replacing failing memory modules.
28 - Reducing energy consumption either by physically unplugging memory modules or
29 by logically unplugging (parts of) memory modules from Linux.
31 Further, the basic memory hot(un)plug infrastructure in Linux is nowadays also
32 used to expose persistent memory, other performance-differentiated memory and
33 reserved memory regions as ordinary system RAM to Linux.
[all …]
H A Dnumaperf.rst8 Some platforms may have multiple types of memory attached to a compute
9 node. These disparate memory ranges may share some characteristics, such
13 A system supports such heterogeneous memory by grouping each memory type
15 characteristics. Some memory may share the same node as a CPU, and others
16 are provided as memory only nodes. While memory only nodes do not provide
19 nodes with local memory and a memory only node for each of compute node::
30 A "memory initiator" is a node containing one or more devices such as
31 CPUs or separate memory I/O devices that can initiate memory requests.
32 A "memory target" is a node containing one or more physical address
33 ranges accessible from one or more memory initiators.
[all …]
H A Dconcepts.rst5 The memory management in Linux is a complex system that evolved over the
7 systems from MMU-less microcontrollers to supercomputers. The memory
19 The physical memory in a computer system is a limited resource and
20 even for systems that support memory hotplug there is a hard limit on
21 the amount of memory that can be installed. The physical memory is not
27 All this makes dealing directly with physical memory quite complex and
28 to avoid this complexity a concept of virtual memory was developed.
30 The virtual memory abstracts the details of physical memory from the
32 physical memory (demand paging) and provides a mechanism for the
35 With virtual memory, each and every memory access uses a virtual
[all …]
/linux/Documentation/arch/arm64/
H A Dkdump.rst2 crashkernel memory reservation on arm64
9 reserved memory is needed to pre-load the kdump kernel and boot such
12 That reserved memory for kdump is adapted to be able to minimally
19 Through the kernel parameters below, memory can be reserved accordingly
21 large chunk of memomy can be found. The low memory reservation needs to
22 be considered if the crashkernel is reserved from the high memory area.
28 Low memory and high memory
31 For kdump reservations, low memory is the memory area under a specific
34 vmcore dumping can be ignored. On arm64, the low memory upper bound is
37 whole system RAM is low memory. Outside of the low memory described
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dmem.c22 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
31 struct nvkm_memory memory; member
43 nvkm_mem_target(struct nvkm_memory *memory) in nvkm_mem_target() argument
45 return nvkm_mem(memory)->target; in nvkm_mem_target()
49 nvkm_mem_page(struct nvkm_memory *memory) in nvkm_mem_page() argument
55 nvkm_mem_addr(struct nvkm_memory *memory) in nvkm_mem_addr() argument
57 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_addr()
64 nvkm_mem_size(struct nvkm_memory *memory) in nvkm_mem_size() argument
66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size()
70 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_mem_map_dma() argument
[all …]
H A Dumem.c38 struct nvkm_memory *memory = NULL; in nvkm_umem_search() local
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
59 return memory ? memory : ERR_PTR(-ENOENT); in nvkm_umem_search()
98 int ret = nvkm_mem_map_host(umem->memory, &umem->map); in nvkm_umem_map()
103 *length = nvkm_memory_size(umem->memory); in nvkm_umem_map()
109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map()
130 nvkm_memory_unref(&umem->memory); in nvkm_umem_dtor()
178 &umem->memory); in nvkm_umem_new()
186 args->v0.page = nvkm_memory_page(umem->memory); in nvkm_umem_new()
[all …]
/linux/Documentation/ABI/testing/
H A Dsysfs-devices-memory1 What: /sys/devices/system/memory
5 The /sys/devices/system/memory contains a snapshot of the
6 internal state of the kernel memory blocks. Files could be
9 Users: hotplug memory add/remove tools
12 What: /sys/devices/system/memory/memoryX/removable
16 The file /sys/devices/system/memory/memoryX/removable is a
17 legacy interface used to indicated whether a memory block is
19 "1" if and only if the kernel supports memory offlining.
20 Users: hotplug memory remove tools
24 What: /sys/devices/system/memory/memoryX/phys_device
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
H A Dbase.c34 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_load() local
35 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_load()
39 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_load()
41 nvkm_wo32(memory, i, iobj->suspend[i / 4]); in nvkm_instobj_load()
45 nvkm_done(memory); in nvkm_instobj_load()
54 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_save() local
55 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_save()
63 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_save()
65 iobj->suspend[i / 4] = nvkm_ro32(memory, i); in nvkm_instobj_save()
69 nvkm_done(memory); in nvkm_instobj_save()
[all …]
H A Dnv50.c44 #define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
57 nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32_slow() argument
59 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_wr32_slow()
76 nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32_slow() argument
78 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_rd32_slow()
103 nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32() argument
105 iowrite32_native(data, nv50_instobj(memory)->map + offset); in nv50_instobj_wr32()
109 nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32() argument
111 return ioread32_native(nv50_instobj(memory)->map + offset); in nv50_instobj_rd32()
125 struct nvkm_memory *memory = &iobj->base.memory; in nv50_instobj_kmap() local
[all …]
H A Dgk20a.c59 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory)
116 gk20a_instobj_target(struct nvkm_memory *memory) in gk20a_instobj_target() argument
122 gk20a_instobj_page(struct nvkm_memory *memory) in gk20a_instobj_page() argument
128 gk20a_instobj_addr(struct nvkm_memory *memory) in gk20a_instobj_addr() argument
130 return (u64)gk20a_instobj(memory)->mn->offset << 12; in gk20a_instobj_addr()
134 gk20a_instobj_size(struct nvkm_memory *memory) in gk20a_instobj_size() argument
136 return (u64)gk20a_instobj(memory)->mn->length << 12; in gk20a_instobj_size()
151 imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); in gk20a_instobj_iommu_recycle_vaddr()
174 gk20a_instobj_acquire_dma(struct nvkm_memory *memory) in gk20a_instobj_acquire_dma() argument
176 struct gk20a_instobj *node = gk20a_instobj(memory); in gk20a_instobj_acquire_dma()
[all …]
/linux/Documentation/mm/
H A Dmemory-model.rst7 Physical memory in a system may be addressed in different ways. The
8 simplest case is when the physical memory starts at address 0 and
13 different memory banks are attached to different CPUs.
15 Linux abstracts this diversity using one of the two memory models:
17 memory models it supports, what the default memory model is and
20 All the memory models track the status of physical page frames using
23 Regardless of the selected memory model, there exists one-to-one
27 Each memory model defines :c:func:`pfn_to_page` and :c:func:`page_to_pfn`
34 The simplest memory model is FLATMEM. This model is suitable for
36 memory.
[all …]
H A Dhmm.rst5 Provide infrastructure and helpers to integrate non-conventional memory (device
6 memory like GPU on board memory) into regular kernel path, with the cornerstone
7 of this being specialized struct page for such memory (see sections 5 to 7 of
18 related to using device specific memory allocators. In the second section, I
22 fifth section deals with how device memory is represented inside the kernel.
28 Problems of using a device specific memory allocator
31 Devices with a large amount of on board memory (several gigabytes) like GPUs
32 have historically managed their memory through dedicated driver specific APIs.
33 This creates a disconnect between memory allocated and managed by a device
34 driver and regular application memory (private anonymous, shared memory, or
[all …]
H A Dnuma.rst12 or more CPUs, local memory, and/or IO buses. For brevity and to
26 Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible
30 Memory access time and effective memory bandwidth varies depending on how far
31 away the cell containing the CPU or IO bus making the memory access is from the
32 cell containing the target memory. For example, access to memory by CPUs
34 bandwidths than accesses to memory on other, remote cells. NUMA platforms
39 memory bandwidth. However, to achieve scalable memory bandwidth, system and
40 application software must arrange for a large majority of the memory references
41 [cache misses] to be to "local" memory--memory on the same cell, if any--or
42 to the closest cell with memory.
[all …]
/linux/Documentation/core-api/
H A Dmemory-hotplug.rst12 There are six types of notification defined in ``include/linux/memory.h``:
15 Generated before new memory becomes available in order to be able to
16 prepare subsystems to handle memory. The page allocator is still unable
17 to allocate from the new memory.
23 Generated when memory has successfully brought online. The callback may
24 allocate pages from the new memory.
27 Generated to begin the process of offlining memory. Allocations are no
28 longer possible from the memory but some of the memory to be offlined
29 is still in use. The callback can be used to free memory known to a
30 subsystem from the indicated memory block.
[all …]
/linux/drivers/staging/octeon/
H A Dethernet-mem.c49 char *memory; in cvm_oct_free_hw_skbuff() local
52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff()
53 if (memory) { in cvm_oct_free_hw_skbuff()
55 *(struct sk_buff **)(memory - sizeof(void *)); in cvm_oct_free_hw_skbuff()
59 } while (memory); in cvm_oct_free_hw_skbuff()
79 char *memory; in cvm_oct_fill_hw_memory() local
94 memory = kmalloc(size + 256, GFP_ATOMIC); in cvm_oct_fill_hw_memory()
95 if (unlikely(!memory)) { in cvm_oct_fill_hw_memory()
100 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); in cvm_oct_fill_hw_memory()
101 *((char **)fpa - 1) = memory; in cvm_oct_fill_hw_memory()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dram.c24 #define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
32 struct nvkm_memory memory; member
39 nvkm_vram_kmap(struct nvkm_memory *memory, struct nvkm_memory **pmemory) in nvkm_vram_kmap() argument
41 return nvkm_instobj_wrap(nvkm_vram(memory)->ram->fb->subdev.device, memory, pmemory); in nvkm_vram_kmap()
45 nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_vram_map() argument
48 struct nvkm_vram *vram = nvkm_vram(memory); in nvkm_vram_map()
50 .memory = &vram->memory, in nvkm_vram_map()
59 nvkm_vram_size(struct nvkm_memory *memory) in nvkm_vram_size() argument
61 return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT; in nvkm_vram_size()
65 nvkm_vram_addr(struct nvkm_memory *memory) in nvkm_vram_addr() argument
[all …]
/linux/fs/btrfs/tests/
H A Dextent-io-tests.c671 static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory, in dump_eb_and_memory_contents() argument
678 if (memcmp(addr, memory + i, 1) != 0) { in dump_eb_and_memory_contents()
681 i, *(u8 *)addr, *(u8 *)(memory + i)); in dump_eb_and_memory_contents()
687 static int verify_eb_and_memory(struct extent_buffer *eb, void *memory, in verify_eb_and_memory() argument
693 if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) { in verify_eb_and_memory()
694 dump_eb_and_memory_contents(eb, memory, test_name); in verify_eb_and_memory()
705 static void init_eb_and_memory(struct extent_buffer *eb, void *memory) in init_eb_and_memory() argument
707 get_random_bytes(memory, eb->len); in init_eb_and_memory()
708 write_extent_buffer(eb, memory, 0, eb->len); in init_eb_and_memory()
715 void *memory = NULL; in test_eb_mem_ops() local
[all …]
/linux/Documentation/userspace-api/media/v4l/
H A Ddev-mem2mem.rst9 A V4L2 memory-to-memory device can compress, decompress, transform, or
10 otherwise convert video data from one format into another format, in memory.
11 Such memory-to-memory devices set the ``V4L2_CAP_VIDEO_M2M`` or
12 ``V4L2_CAP_VIDEO_M2M_MPLANE`` capability. Examples of memory-to-memory
16 A memory-to-memory video node acts just like a normal video node, but it
17 supports both output (sending frames from memory to the hardware)
19 memory) stream I/O. An application will have to setup the stream I/O for
23 Memory-to-memory devices function as a shared resource: you can
32 One of the most common memory-to-memory device is the codec. Codecs
35 See :ref:`codec-controls`. More details on how to use codec memory-to-memory
/linux/Documentation/arch/powerpc/
H A Dfirmware-assisted-dump.rst14 - Fadump uses the same firmware interfaces and memory reservation model
16 - Unlike phyp dump, FADump exports the memory dump through /proc/vmcore
21 - Unlike phyp dump, FADump allows user to release all the memory reserved
35 - Once the dump is copied out, the memory that held the dump
44 - The first kernel registers the sections of memory with the
46 These registered sections of memory are reserved by the first
50 low memory regions (boot memory) from source to destination area.
54 The term 'boot memory' means size of the low memory chunk
56 booted with restricted memory. By default, the boot memory
58 Alternatively, user can also specify boot memory size
[all …]
/linux/tools/testing/memblock/tests/
H A Dbasic_api.c17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check()
18 ASSERT_EQ(memblock.memory.cnt, 0); in memblock_initialization_check()
19 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); in memblock_initialization_check()
20 ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0); in memblock_initialization_check()
24 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); in memblock_initialization_check()
45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check()
60 ASSERT_EQ(memblock.memory.cnt, 1); in memblock_add_simple_check()
61 ASSERT_EQ(memblock.memory.total_size, r.size); in memblock_add_simple_check()
78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check()
97 ASSERT_EQ(memblock.memory.cnt, 1); in memblock_add_node_simple_check()
[all …]
/linux/arch/arm64/boot/dts/ti/
H A Dk3-am68-sk-som.dtsi12 memory@80000000 {
13 device_type = "memory";
20 reserved_memory: reserved-memory {
30 mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
36 mcu_r5fss0_core0_memory_region: r5f-memory@a0100000 {
42 mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 {
48 mcu_r5fss0_core1_memory_region: r5f-memory@a1100000 {
54 main_r5fss0_core0_dma_memory_region: r5f-dma-memory@a2000000 {
60 main_r5fss0_core0_memory_region: r5f-memory@a2100000 {
66 main_r5fss0_core1_dma_memory_region: r5f-dma-memory@a3000000 {
[all …]
/linux/Documentation/admin-guide/mm/damon/
H A Dreclaim.rst8 be used for proactive and lightweight reclamation under light memory pressure.
10 to be selectively used for different level of memory pressure and requirements.
15 On general memory over-committed systems, proactively reclaiming cold pages
16 helps saving memory and reducing latency spikes that incurred by the direct
20 Free Pages Reporting [3]_ based memory over-commit virtualization systems are
22 memory to host, and the host reallocates the reported memory to other guests.
23 As a result, the memory of the systems are fully utilized. However, the
24 guests could be not so memory-frugal, mainly because some kernel subsystems and
25 user-space applications are designed to use as much memory as available. Then,
26 guests could report only small amount of memory as free to host, results in
[all …]

12345678910>>...107