1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2025 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/kref.h> 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 11 #include "xe_page_reclaim.h" 12 13 #include "xe_gt_stats.h" 14 #include "xe_macros.h" 15 #include "xe_pat.h" 16 #include "xe_sa.h" 17 #include "xe_tlb_inval_types.h" 18 19 /** 20 * xe_page_reclaim_skip() - Decide whether PRL should be skipped for a VMA 21 * @tile: Tile owning the VMA 22 * @vma: VMA under consideration 23 * 24 * PPC flushing may be handled by HW for specific PAT encodings. 25 * Skip PPC flushing/Page Reclaim for scenarios below due to redundant 26 * flushes. 27 * - pat_index is transient display (1) 28 * 29 * Return: true when page reclamation is unnecessary, false otherwise. 30 */ 31 bool xe_page_reclaim_skip(struct xe_tile *tile, struct xe_vma *vma) 32 { 33 u8 l3_policy; 34 35 l3_policy = xe_pat_index_get_l3_policy(tile->xe, vma->attr.pat_index); 36 37 /* 38 * - l3_policy: 0=WB, 1=XD ("WB - Transient Display"), 3=UC 39 * Transient display flushes is taken care by HW, l3_policy = 1. 40 * 41 * HW will sequence these transient flushes at various sync points so 42 * any event of page reclamation will hit these sync points before 43 * page reclamation could execute. 44 */ 45 return (l3_policy == XE_L3_POLICY_XD); 46 } 47 48 /** 49 * xe_page_reclaim_create_prl_bo() - Back a PRL with a suballocated GGTT BO 50 * @tlb_inval: TLB invalidation frontend associated with the request 51 * @prl: page reclaim list data that bo will copy from 52 * @fence: tlb invalidation fence that page reclaim action is paired to 53 * 54 * Suballocates a 4K BO out of the tile reclaim pool, copies the PRL CPU 55 * copy into the BO and queues the buffer for release when @fence signals. 56 * 57 * Return: struct drm_suballoc pointer on success or ERR_PTR on failure. 58 */ 59 struct drm_suballoc *xe_page_reclaim_create_prl_bo(struct xe_tlb_inval *tlb_inval, 60 struct xe_page_reclaim_list *prl, 61 struct xe_tlb_inval_fence *fence) 62 { 63 struct xe_gt *gt = container_of(tlb_inval, struct xe_gt, tlb_inval); 64 struct xe_tile *tile = gt_to_tile(gt); 65 /* (+1) for NULL page_reclaim_entry to indicate end of list */ 66 int prl_size = min(prl->num_entries + 1, XE_PAGE_RECLAIM_MAX_ENTRIES) * 67 sizeof(struct xe_guc_page_reclaim_entry); 68 struct drm_suballoc *prl_sa; 69 70 /* Maximum size of PRL is 1 4K-page */ 71 prl_sa = __xe_sa_bo_new(tile->mem.reclaim_pool, 72 prl_size, GFP_ATOMIC); 73 if (IS_ERR(prl_sa)) 74 return prl_sa; 75 76 memcpy(xe_sa_bo_cpu_addr(prl_sa), prl->entries, 77 prl_size); 78 xe_sa_bo_flush_write(prl_sa); 79 /* Queue up sa_bo_free on tlb invalidation fence signal */ 80 xe_sa_bo_free(prl_sa, &fence->base); 81 82 return prl_sa; 83 } 84 85 /** 86 * xe_page_reclaim_list_invalidate() - Mark a PRL as invalid 87 * @prl: Page reclaim list to reset 88 * 89 * Clears the entries pointer and marks the list as invalid so 90 * future use knows PRL is unusable. It is expected that the entries 91 * have already been released. 92 */ 93 void xe_page_reclaim_list_invalidate(struct xe_page_reclaim_list *prl) 94 { 95 xe_page_reclaim_entries_put(prl->entries); 96 prl->entries = NULL; 97 prl->num_entries = XE_PAGE_RECLAIM_INVALID_LIST; 98 } 99 100 /** 101 * xe_page_reclaim_list_init() - Initialize a page reclaim list 102 * @prl: Page reclaim list to initialize 103 * 104 * NULLs both values in list to prepare on initalization. 105 */ 106 void xe_page_reclaim_list_init(struct xe_page_reclaim_list *prl) 107 { 108 prl->entries = NULL; 109 prl->num_entries = 0; 110 } 111 112 /** 113 * xe_page_reclaim_list_alloc_entries() - Allocate page reclaim list entries 114 * @prl: Page reclaim list to allocate entries for 115 * 116 * Allocate one 4K page for the PRL entries, otherwise assign prl->entries to NULL. 117 */ 118 int xe_page_reclaim_list_alloc_entries(struct xe_page_reclaim_list *prl) 119 { 120 struct page *page; 121 122 if (XE_WARN_ON(prl->entries)) 123 return 0; 124 125 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 126 if (page) { 127 prl->entries = page_address(page); 128 prl->num_entries = 0; 129 } 130 131 return page ? 0 : -ENOMEM; 132 } 133