Home
last modified time | relevance | path

Searched full:pages (Results 1 – 25 of 2355) sorted by relevance

12345678910>>...95

/linux/mm/
H A Dpercpu-vm.c23 * pcpu_get_pages - get temp pages array
30 * Pointer to temp pages array on success.
34 static struct page **pages; in pcpu_get_pages() local
35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
39 if (!pages) in pcpu_get_pages()
40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages()
41 return pages; in pcpu_get_pages()
45 * pcpu_free_pages - free pages which were allocated for @chunk
46 * @chunk: chunk pages were allocated for
47 * @pages
55 pcpu_free_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_free_pages() argument
83 pcpu_alloc_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end,gfp_t gfp) pcpu_alloc_pages() argument
154 pcpu_unmap_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_unmap_pages() argument
193 __pcpu_map_pages(unsigned long addr,struct page ** pages,int nr_pages) __pcpu_map_pages() argument
215 pcpu_map_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_map_pages() argument
279 struct page **pages; pcpu_populate_chunk() local
315 struct page **pages; pcpu_depopulate_chunk() local
[all...]
H A Dballoon_compaction.c5 * Common interface for making balloon pages movable by compaction.
31 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page
34 * @pages: pages to enqueue - allocated using balloon_page_alloc.
36 * Driver must call this function to properly enqueue balloon pages before
39 * Return: number of pages that were enqueued.
42 struct list_head *pages) in balloon_page_list_enqueue() argument
49 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
60 * balloon_page_list_dequeue() - removes pages from balloon's page list and
61 * returns a list of the pages.
63 * @pages: pointer to the list of pages that would be returned to the caller.
[all …]
H A Dgup_test.c10 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument
19 put_page(pages[i]); in put_back_pages()
25 unpin_user_pages(pages, nr_pages); in put_back_pages()
29 unpin_user_pages(pages, nr_pages); in put_back_pages()
32 put_page(pages[i]); in put_back_pages()
39 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument
50 folio = page_folio(pages[i]); in verify_dma_pinned()
53 "pages[%lu] is NOT dma-pinned\n", i)) { in verify_dma_pinned()
59 "pages[%lu] is NOT pinnable but pinned\n", in verify_dma_pinned()
69 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() argument
[all …]
H A Dgup.c31 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument
38 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages()
42 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages()
49 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
50 struct page *page = *pages; in sanity_check_pinned_pages()
180 * Pages that were pinned via pin_user_pages*() must be released via either
182 * that such pages can be separately tracked and uniquely handled. In
263 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
264 * @pages: array of pages to be maybe marked dirty, and definitely released.
265 * @npages: number of pages in the @pages array.
[all …]
/linux/net/ceph/
H A Dpagevec.c13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument
19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector()
20 put_page(pages[i]); in ceph_put_page_vector()
22 kvfree(pages); in ceph_put_page_vector()
26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument
31 __free_pages(pages[i], 0); in ceph_release_page_vector()
32 kfree(pages); in ceph_release_page_vector()
37 * allocate a vector new pages
41 struct page **pages; in ceph_alloc_page_vector() local
44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector()
[all …]
/linux/Documentation/mm/
H A Dunevictable-lru.rst34 main memory will have over 32 million 4k pages in a single node. When a large
35 fraction of these pages are not evictable for any reason [see below], vmscan
37 of pages that are evictable. This can result in a situation where all CPUs are
41 The unevictable list addresses the following classes of unevictable pages:
51 The infrastructure may also be able to handle other conditions that make pages
104 lru_list enum element). The memory controller tracks the movement of pages to
108 not attempt to reclaim pages on the unevictable list. This has a couple of
111 (1) Because the pages are "hidden" from reclaim on the unevictable list, the
112 reclaim process can be more efficient, dealing only with pages that have a
115 (2) On the other hand, if too many of the pages charged to the control group
[all …]
/linux/Documentation/admin-guide/mm/
H A Dksm.rst18 which have been registered with it, looking for pages of identical
21 content). The amount of pages that KSM daemon scans in a single pass
25 KSM only merges anonymous (private) pages, never pagecache (file) pages.
26 KSM's merged pages were originally locked into kernel memory, but can now
27 be swapped out just like other user pages (but sharing is broken when they
45 to cancel that advice and restore unshared pages: whereupon KSM
55 cannot contain any pages which KSM could actually merge; even if
80 how many pages to scan before ksmd goes to sleep
95 specifies if pages from different NUMA nodes can be merged.
96 When set to 0, ksm merges only pages which physically reside
[all …]
H A Dzswap.rst8 Zswap is a lightweight compressed cache for swap pages. It takes pages that are
26 Zswap evicts pages from compressed cache on an LRU basis to the backing swap
40 When zswap is disabled at runtime it will stop storing pages that are
42 back into memory all of the pages stored in the compressed pool. The
43 pages stored in zswap will remain in the compressed pool until they are
45 pages out of the compressed pool, a swapoff on the swap device(s) will
46 fault back into memory all swapped out pages, including those in the
52 Zswap receives pages for compression from the swap subsystem and is able to
53 evict pages from its own compressed pool on an LRU basis and write them back to
60 pages are freed. The pool is not preallocated.
[all …]
H A Dconcepts.rst41 The physical system memory is divided into page frames, or pages. The
48 pages. These mappings are described by page tables that allow
53 addresses of actual pages used by the software. The tables at higher
54 levels contain physical addresses of the pages belonging to the lower
64 Huge Pages
75 Many modern CPU architectures allow mapping of the memory pages
77 it is possible to map 2M and even 1G pages using entries in the second
78 and the third level page tables. In Linux such pages are called
79 `huge`. Usage of huge pages significantly reduces pressure on TLB,
83 memory with the huge pages. The first one is `HugeTLB filesystem`, or
[all …]
H A Dtranshuge.rst11 using huge pages for the backing of virtual memory with huge pages
51 increments of a power-of-2 number of pages. mTHP can back anonymous
66 collapses sequences of basic pages into PMD-sized huge pages.
123 Transparent Huge Pages globally. This is because ``madvise(...,
125 PMD-sized huge pages unconditionally.
155 pages unless hugepages are immediately available. Clearly if we spend CPU
157 use hugepages later instead of regular pages. This isn't always
171 allocation failure and directly reclaim pages and compact
178 to reclaim pages and wake kcompactd to compact memory so that
180 of khugepaged to then install the THP pages later.
[all …]
H A Didle_page_tracking.rst8 The idle page tracking feature allows to track which memory pages are being
37 Only accesses to user memory pages are tracked. These are pages mapped to a
38 process address space, page cache and buffer pages, swap cache pages. For other
39 page types (e.g. SLAB pages) an attempt to mark a page idle is silently ignored,
40 and hence such pages are never reported idle.
42 For huge pages the idle flag is set only on the head page, so one has to read
43 ``/proc/kpageflags`` in order to correctly count idle huge pages.
50 That said, in order to estimate the amount of pages that are not used by a
53 1. Mark all the workload's pages as idle by setting corresponding bits in
54 ``/sys/kernel/mm/page_idle/bitmap``. The pages can be found by reading
[all …]
H A Dpagemap.rst37 swap. Unmapped pages return a null PFN. This allows determining
38 precisely which pages are mapped (or in swap) and comparing mapped
39 pages between processes.
44 for pages part of a larger allocation (e.g., THP) can differ: bit 56 is set
45 if all pages part of the corresponding large allocation are *certainly*
117 An order N block has 2^N physically contiguous pages, with the BUDDY flag
120 A compound page with order N consists of 2^N physically contiguous pages.
123 pages are hugeTLB pages (Documentation/admin-guide/mm/hugetlbpage.rst),
125 However in this interface, only huge/giga pages are made visible
136 Identical memory pages dynamically shared between one or more processes.
[all …]
/linux/drivers/gpu/drm/i915/gem/selftests/
H A Dhuge_gem_object.c12 struct sg_table *pages) in huge_free_pages() argument
18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages()
24 sg_free_table(pages); in huge_free_pages()
25 kfree(pages); in huge_free_pages()
34 struct sg_table *pages; in huge_get_pages() local
41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages()
42 if (!pages) in huge_get_pages()
45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages()
46 kfree(pages); in huge_get_pages()
50 sg = pages->sgl; in huge_get_pages()
[all …]
/linux/io_uring/
H A Dmemmap.c18 static void *io_mem_alloc_compound(struct page **pages, int nr_pages, in io_mem_alloc_compound() argument
35 pages[i] = page + i; in io_mem_alloc_compound()
43 struct page **pages; in io_pin_pages() local
59 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in io_pin_pages()
60 if (!pages) in io_pin_pages()
64 pages); in io_pin_pages()
65 /* success, mapped all pages */ in io_pin_pages()
68 return pages; in io_pin_pages()
73 /* if we did partial map, release any pages we did get */ in io_pin_pages()
75 unpin_user_pages(pages, ret); in io_pin_pages()
[all …]
/linux/fs/isofs/
H A Dcompress.c37 * to one zisofs block. Store the data in the @pages array with @pcount
42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument
68 if (!pages[i]) in zisofs_uncompress_block()
70 memzero_page(pages[i], 0, PAGE_SIZE); in zisofs_uncompress_block()
71 SetPageUptodate(pages[i]); in zisofs_uncompress_block()
121 if (pages[curpage]) { in zisofs_uncompress_block()
122 stream.next_out = kmap_local_page(pages[curpage]) in zisofs_uncompress_block()
174 if (pages[curpage]) { in zisofs_uncompress_block()
175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block()
176 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block()
[all …]
/linux/drivers/xen/
H A Dxlate_mmu.c47 /* Break down the pages in 4KB chunk and call fn for each gfn */
48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument
57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn()
71 struct page **pages; member
99 struct page *page = info->pages[info->index++]; in remap_pte_fn()
148 struct page **pages) in xen_xlate_remap_gfn_array() argument
163 data.pages = pages; in xen_xlate_remap_gfn_array()
184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument
186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range()
205 * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
[all …]
/linux/Documentation/admin-guide/sysctl/
H A Dvm.rst90 admin_reserve_kbytes defaults to min(3% of free pages, 8MB)
119 huge pages although processes will also directly compact memory as required.
129 Note that compaction has a non-trivial system-wide impact as pages
148 allowed to examine the unevictable lru (mlocked pages) for pages to compact.
151 compaction from moving pages that are unevictable. Default value is 1.
160 and maintain the ability to produce huge pages / higher-order pages.
181 Contains, as a percentage of total available memory that contains free pages
182 and reclaimable pages, the number of pages at which the background kernel
199 Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
216 Contains, as a percentage of total available memory that contains free pages
[all …]
/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_pages.c22 struct sg_table *pages) in __i915_gem_object_set_pages() argument
34 /* Make the pages coherent with the GPU (flushing any swapin). */ in __i915_gem_object_set_pages()
39 drm_clflush_sg(pages); in __i915_gem_object_set_pages()
43 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
45 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
48 obj->mm.pages = pages; in __i915_gem_object_set_pages()
50 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); in __i915_gem_object_set_pages()
58 * 64K or 4K pages, although in practice this will depend on a number of in __i915_gem_object_set_pages()
119 /* Ensure that the associated pages are gathered from the backing storage
122 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
[all …]
/linux/include/drm/ttm/
H A Dttm_tt.h44 * struct ttm_tt - This is a structure holding the pages, caching- and aperture
49 /** @pages: Array of pages backing the data. */
50 struct page **pages; member
56 * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
58 * pages back in, and unset the flag. Drivers should in general never
61 * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
64 * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
66 * TTM swapping out such pages. Also important is to prevent TTM from
67 * ever directly mapping these pages.
74 * still valid to use TTM to map the pages directly. This is useful when
[all …]
/linux/drivers/misc/
H A Dvmw_balloon.c8 * acts like a "balloon" that can be inflated to reclaim physical pages by
10 * freeing up the underlying machine pages so they can be allocated to
51 /* Maximum number of refused pages we accumulate during inflation cycle */
147 * ballooned pages (up to 512).
149 * pages that are about to be deflated from the
152 * for 2MB pages.
155 * pages.
240 struct list_head pages; member
315 * @batch_max_pages: maximum pages that can be locked/unlocked.
317 * Indicates the number of pages that the hypervisor can lock or unlock
[all …]
/linux/tools/perf/pmu-events/arch/x86/amdzen4/
H A Dmemory.json40 "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 4k pages.",
46 … misses with L2 DTLB hits for coalesced pages. A coalesced page is a 16k page created from four ad…
52 "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 2M pages.",
58 "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 1G pages.",
64 …fDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 4k pages.",
70 …able walks are requested) for coalesced pages. A coalesced page is a 16k page created from four ad…
76 …fDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 2M pages.",
82 …fDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 1G pages.",
123 …tion fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 4k pages.",
129 …tion fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 2M pages.",
[all …]
/linux/include/xen/
H A Dxen-ops.h68 struct page **pages);
70 int nr, struct page **pages);
81 struct page **pages) in xen_xlate_remap_gfn_array() argument
87 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument
98 * @vma: VMA to map the pages into
99 * @addr: Address at which to map the pages
104 * @domid: Domain owning the pages
105 * @pages: Array of pages if this domain has an auto-translated physmap
118 struct page **pages) in xen_remap_domain_gfn_array() argument
122 prot, domid, pages); in xen_remap_domain_gfn_array()
[all …]
/linux/kernel/power/
H A Dsnapshot.c145 * List of PBEs needed for restoring the pages that were allocated before
152 /* struct linked_page is used to build chains of pages */
162 * List of "safe" pages (ie. pages that were not used by the image kernel
181 * @safe_needed: Get pages that were not used before hibernation (restore only)
184 * only use memory pages that do not conflict with the pages used before
185 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
283 * a linked list of pages called 'the chain'.
297 gfp_t gfp_mask; /* mask for allocating pages */
546 unsigned long pages; create_zone_bm_rtree() local
931 unsigned long bits, pfn, pages; memory_bm_next_pfn() local
1827 unsigned long saveable, size, max_size, count, highmem, pages = 0; hibernate_preallocate_memory() local
[all...]
/linux/Documentation/virt/kvm/x86/
H A Dmmu.rst66 pages, pae, pse, pse36, cr0.wp, and 1GB pages. Emulated hardware also
118 Shadow pages
125 A nonleaf spte allows the hardware mmu to reach the leaf pages and
126 is not related to a translation directly. It points to other shadow pages.
131 Leaf ptes point at guest pages.
150 Shadow pages contain the following information:
156 Examples include real mode translation, large guest pages backed by small
157 host pages, and gpa->hpa translations when NPT or EPT is active.
166 so multiple shadow pages are needed to shadow one guest page.
167 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
[all …]
/linux/kernel/dma/
H A Dremap.c17 return area->pages; in dma_common_find_pages()
21 * Remaps an array of PAGE_SIZE pages into another vm_area.
24 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap()
32 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap()
44 struct page **pages; in dma_common_contiguous_remap() local
48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
49 if (!pages) in dma_common_contiguous_remap()
52 pages[i] = page++; in dma_common_contiguous_remap()
53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
[all …]

12345678910>>...95