/linux/drivers/iommu/iommufd/ |
H A D | pages.c | 70 * allocation can hold about 26M of 4k pages and 13G of 2M pages in an 164 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument 168 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned() 170 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned() 173 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument 177 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned() 179 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned() 182 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument 190 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin() 196 * covers a portion of the first and last pages in the range. [all …]
|
H A D | io_pagetable.c | 24 struct iopt_pages *pages; member 43 if (!iter->area->pages) { in iopt_area_contig_init() 66 !iter->area->pages) { in iopt_area_contig_next() 196 * The area takes a slice of the pages from start_bytes to start_byte + length 199 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() argument 205 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area() 221 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area() 225 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area() 271 switch (elm->pages->type) { in iopt_alloc_area_pages() 273 start = elm->start_byte + (uintptr_t)elm->pages->uptr; in iopt_alloc_area_pages() [all …]
|
/linux/mm/ |
H A D | percpu-vm.c | 23 * pcpu_get_pages - get temp pages array 30 * Pointer to temp pages array on success. 34 static struct page **pages; in pcpu_get_pages() local 35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages() 39 if (!pages) in pcpu_get_pages() 40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages() 41 return pages; in pcpu_get_pages() 45 * pcpu_free_pages - free pages which were allocated for @chunk 46 * @chunk: chunk pages were allocated for 47 * @pages 55 pcpu_free_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_free_pages() argument 83 pcpu_alloc_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end,gfp_t gfp) pcpu_alloc_pages() argument 154 pcpu_unmap_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_unmap_pages() argument 193 __pcpu_map_pages(unsigned long addr,struct page ** pages,int nr_pages) __pcpu_map_pages() argument 215 pcpu_map_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_map_pages() argument 279 struct page **pages; pcpu_populate_chunk() local 315 struct page **pages; pcpu_depopulate_chunk() local [all...] |
H A D | balloon_compaction.c | 5 * Common interface for making balloon pages movable by compaction. 30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 33 * @pages: pages to enqueue - allocated using balloon_page_alloc. 35 * Driver must call this function to properly enqueue balloon pages before 38 * Return: number of pages that were enqueued. 41 struct list_head *pages) in balloon_page_list_enqueue() argument 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 59 * balloon_page_list_dequeue() - removes pages from balloon's page list and 60 * returns a list of the pages. 62 * @pages: pointer to the list of pages that would be returned to the caller. [all …]
|
H A D | gup_test.c | 10 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument 19 put_page(pages[i]); in put_back_pages() 25 unpin_user_pages(pages, nr_pages); in put_back_pages() 29 unpin_user_pages(pages, nr_pages); in put_back_pages() 32 put_page(pages[i]); in put_back_pages() 39 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument 50 folio = page_folio(pages[i]); in verify_dma_pinned() 53 "pages[%lu] is NOT dma-pinned\n", i)) { in verify_dma_pinned() 59 "pages[%lu] is NOT pinnable but pinned\n", in verify_dma_pinned() 69 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() argument [all …]
|
H A D | gup.c | 35 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument 42 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages() 46 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages() 53 for (; npages; npages--, pages++) { in sanity_check_pinned_pages() 54 struct page *page = *pages; in sanity_check_pinned_pages() 186 * Pages that were pinned via pin_user_pages*() must be released via either 188 * that such pages can be separately tracked and uniquely handled. In 269 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 270 * @pages: array of pages to be maybe marked dirty, and definitely released. 271 * @npages: number of pages in the @pages array. [all …]
|
/linux/net/ceph/ |
H A D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 37 * allocate a vector new pages 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() [all …]
|
/linux/Documentation/mm/ |
H A D | unevictable-lru.rst | 34 main memory will have over 32 million 4k pages in a single node. When a large 35 fraction of these pages are not evictable for any reason [see below], vmscan 37 of pages that are evictable. This can result in a situation where all CPUs are 41 The unevictable list addresses the following classes of unevictable pages: 51 The infrastructure may also be able to handle other conditions that make pages 104 lru_list enum element). The memory controller tracks the movement of pages to 108 not attempt to reclaim pages on the unevictable list. This has a couple of 111 (1) Because the pages are "hidden" from reclaim on the unevictable list, the 112 reclaim process can be more efficient, dealing only with pages that have a 115 (2) On the other hand, if too many of the pages charged to the control group [all …]
|
/linux/Documentation/admin-guide/mm/ |
H A D | ksm.rst | 18 which have been registered with it, looking for pages of identical 21 content). The amount of pages that KSM daemon scans in a single pass 25 KSM only merges anonymous (private) pages, never pagecache (file) pages. 26 KSM's merged pages were originally locked into kernel memory, but can now 27 be swapped out just like other user pages (but sharing is broken when they 45 to cancel that advice and restore unshared pages: whereupon KSM 55 cannot contain any pages which KSM could actually merge; even if 80 how many pages to scan before ksmd goes to sleep 95 specifies if pages from different NUMA nodes can be merged. 96 When set to 0, ksm merges only pages which physically reside [all …]
|
H A D | concepts.rst | 41 The physical system memory is divided into page frames, or pages. The 48 pages. These mappings are described by page tables that allow 53 addresses of actual pages used by the software. The tables at higher 54 levels contain physical addresses of the pages belonging to the lower 64 Huge Pages 75 Many modern CPU architectures allow mapping of the memory pages 77 it is possible to map 2M and even 1G pages using entries in the second 78 and the third level page tables. In Linux such pages are called 79 `huge`. Usage of huge pages significantly reduces pressure on TLB, 83 memory with the huge pages. The first one is `HugeTLB filesystem`, or [all …]
|
H A D | idle_page_tracking.rst | 8 The idle page tracking feature allows to track which memory pages are being 37 Only accesses to user memory pages are tracked. These are pages mapped to a 38 process address space, page cache and buffer pages, swap cache pages. For other 39 page types (e.g. SLAB pages) an attempt to mark a page idle is silently ignored, 40 and hence such pages are never reported idle. 42 For huge pages the idle flag is set only on the head page, so one has to read 43 ``/proc/kpageflags`` in order to correctly count idle huge pages. 50 That said, in order to estimate the amount of pages that are not used by a 53 1. Mark all the workload's pages as idle by setting corresponding bits in 54 ``/sys/kernel/mm/page_idle/bitmap``. The pages can be found by reading [all …]
|
H A D | transhuge.rst | 11 using huge pages for the backing of virtual memory with huge pages 51 increments of a power-of-2 number of pages. mTHP can back anonymous 66 collapses sequences of basic pages into PMD-sized huge pages. 150 pages unless hugepages are immediately available. Clearly if we spend CPU 152 use hugepages later instead of regular pages. This isn't always 166 allocation failure and directly reclaim pages and compact 173 to reclaim pages and wake kcompactd to compact memory so that 175 of khugepaged to then install the THP pages later. 181 pages and wake kcompactd to compact memory so that THP is 207 "underused". A THP is underused if the number of zero-filled pages in [all …]
|
H A D | pagemap.rst | 36 swap. Unmapped pages return a null PFN. This allows determining 37 precisely which pages are mapped (or in swap) and comparing mapped 38 pages between processes. 100 An order N block has 2^N physically contiguous pages, with the BUDDY flag 103 A compound page with order N consists of 2^N physically contiguous pages. 106 pages are hugeTLB pages (Documentation/admin-guide/mm/hugetlbpage.rst), 108 However in this interface, only huge/giga pages are made visible 119 Identical memory pages dynamically shared between one or more processes. 121 Contiguous pages which construct THP of any size and mapped by any granularity. 158 not a candidate for LRU page reclaims, e.g. ramfs pages, [all …]
|
/linux/drivers/gpu/drm/i915/gem/selftests/ |
H A D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 42 if (!pages) in huge_get_pages() 45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 46 kfree(pages); in huge_get_pages() 50 sg = pages->sgl; in huge_get_pages() [all …]
|
/linux/fs/isofs/ |
H A D | compress.c | 37 * to one zisofs block. Store the data in the @pages array with @pcount 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument 68 if (!pages[i]) in zisofs_uncompress_block() 70 memzero_page(pages[i], 0, PAGE_SIZE); in zisofs_uncompress_block() 71 SetPageUptodate(pages[i]); in zisofs_uncompress_block() 121 if (pages[curpage]) { in zisofs_uncompress_block() 122 stream.next_out = kmap_local_page(pages[curpage]) in zisofs_uncompress_block() 174 if (pages[curpage]) { in zisofs_uncompress_block() 175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block() 176 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block() [all …]
|
/linux/drivers/media/pci/intel/ipu6/ |
H A D | ipu6-dma.c | 23 struct page **pages; member 49 * Ensure that the allocated pages are zeroed, and that any data in __clear_buffer() 62 struct page **pages; in __alloc_buffer() local 65 pages = kvzalloc(array_size, GFP_KERNEL); in __alloc_buffer() 66 if (!pages) in __alloc_buffer() 74 pages[i] = alloc_pages(gfp, order); in __alloc_buffer() 75 while (!pages[i] && order) in __alloc_buffer() 76 pages[i] = alloc_pages(gfp, --order); in __alloc_buffer() 77 if (!pages[i]) in __alloc_buffer() 81 split_page(pages[i], order); in __alloc_buffer() [all …]
|
/linux/drivers/xen/ |
H A D | xlate_mmu.c | 47 /* Break down the pages in 4KB chunk and call fn for each gfn */ 48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument 57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn() 71 struct page **pages; member 99 struct page *page = info->pages[info->index++]; in remap_pte_fn() 148 struct page **pages) in xen_xlate_remap_gfn_array() argument 163 data.pages = pages; in xen_xlate_remap_gfn_array() 184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range() 205 * xen_xlate_map_ballooned_pages - map a new set of ballooned pages [all …]
|
/linux/Documentation/admin-guide/sysctl/ |
H A D | vm.rst | 88 admin_reserve_kbytes defaults to min(3% of free pages, 8MB) 117 huge pages although processes will also directly compact memory as required. 127 Note that compaction has a non-trivial system-wide impact as pages 140 allowed to examine the unevictable lru (mlocked pages) for pages to compact. 143 compaction from moving pages that are unevictable. Default value is 1. 165 Contains, as a percentage of total available memory that contains free pages 166 and reclaimable pages, the number of pages at which the background kernel 183 Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any 200 Contains, as a percentage of total available memory that contains free pages 201 and reclaimable pages, the number of pages at which a process which is [all …]
|
/linux/drivers/misc/ |
H A D | vmw_balloon.c | 8 * acts like a "balloon" that can be inflated to reclaim physical pages by 10 * freeing up the underlying machine pages so they can be allocated to 51 /* Maximum number of refused pages we accumulate during inflation cycle */ 147 * ballooned pages (up to 512). 149 * pages that are about to be deflated from the 152 * for 2MB pages. 155 * pages. 240 struct list_head pages; member 315 * @batch_max_pages: maximum pages that can be locked/unlocked. 317 * Indicates the number of pages that the hypervisor can lock or unlock [all …]
|
/linux/tools/perf/pmu-events/arch/x86/amdzen4/ |
H A D | memory.json | 40 "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 4k pages.", 46 … misses with L2 DTLB hits for coalesced pages. A coalesced page is a 16k page created from four ad… 52 "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 2M pages.", 58 "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 1G pages.", 64 …fDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 4k pages.", 70 …able walks are requested) for coalesced pages. A coalesced page is a 16k page created from four ad… 76 …fDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 2M pages.", 82 …fDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 1G pages.", 123 …tion fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 4k pages.", 129 …tion fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 2M pages.", [all …]
|
/linux/drivers/gpu/drm/ttm/ |
H A D | ttm_pool.c | 26 /* Pooling of allocated pages is necessary because changing the caching 63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); 79 /* Allocate pages of size 1 << order with the given gfp_flags */ 89 * Mapping pages directly into an userspace process and calling in ttm_pool_alloc_page() 132 /* Reset the caching and pages of size 1 << order */ 163 /* Apply a new caching to an array of pages */ 185 /* Map pages of 1 << order size and fill the DMA address array */ 212 /* Unmap pages of 1 << order size */ 224 /* Give pages into a specific pool_type */ 237 list_add(&p->lru, &pt->pages); in ttm_pool_type_give() [all …]
|
/linux/include/xen/ |
H A D | xen-ops.h | 69 struct page **pages); 71 int nr, struct page **pages); 82 struct page **pages) in xen_xlate_remap_gfn_array() argument 88 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 99 * @vma: VMA to map the pages into 100 * @addr: Address at which to map the pages 105 * @domid: Domain owning the pages 106 * @pages: Array of pages if this domain has an auto-translated physmap 119 struct page **pages) in xen_remap_domain_gfn_array() argument 123 prot, domid, pages); in xen_remap_domain_gfn_array() [all …]
|
/linux/kernel/power/ |
H A D | snapshot.c | 145 * List of PBEs needed for restoring the pages that were allocated before 152 /* struct linked_page is used to build chains of pages */ 162 * List of "safe" pages (ie. pages that were not used by the image kernel 181 * @safe_needed: Get pages that were not used before hibernation (restore only) 184 * only use memory pages that do not conflict with the pages used before 185 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them 283 * a linked list of pages called 'the chain'. 297 gfp_t gfp_mask; /* mask for allocating pages */ 546 unsigned long pages; create_zone_bm_rtree() local 931 unsigned long bits, pfn, pages; memory_bm_next_pfn() local 1832 unsigned long saveable, size, max_size, count, highmem, pages = 0; hibernate_preallocate_memory() local [all...] |
/linux/Documentation/virt/kvm/x86/ |
H A D | mmu.rst | 66 pages, pae, pse, pse36, cr0.wp, and 1GB pages. Emulated hardware also 118 Shadow pages 125 A nonleaf spte allows the hardware mmu to reach the leaf pages and 126 is not related to a translation directly. It points to other shadow pages. 131 Leaf ptes point at guest pages. 150 Shadow pages contain the following information: 156 Examples include real mode translation, large guest pages backed by small 157 host pages, and gpa->hpa translations when NPT or EPT is active. 166 so multiple shadow pages are needed to shadow one guest page. 167 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the [all …]
|
/linux/Documentation/arch/x86/ |
H A D | sgx.rst | 37 SGX utilizes an *Enclave Page Cache (EPC)* to store pages that are associated 39 Unlike pages used for regular memory, pages can only be accessed from outside of 56 Regular EPC pages contain the code and data of an enclave. 59 Thread Control Structure pages define the entry points to an enclave and 63 Version Array pages contain 512 slots, each of which can contain a version 69 The processor tracks EPC pages in a hardware metadata structure called the 95 pages and establish enclave page permissions. 108 adding and removing of enclave pages. When an enclave accesses an address 151 use since the reset, enclave pages may be in an inconsistent state. This might 153 reinitializes all enclave pages so that they can be allocated and re-used. [all …]
|