1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 #include "xe_pt_walk.h" 6 7 /** 8 * DOC: GPU page-table tree walking. 9 * The utilities in this file are similar to the CPU page-table walk 10 * utilities in mm/pagewalk.c. The main difference is that we distinguish 11 * the various levels of a page-table tree with an unsigned integer rather 12 * than by name. 0 is the lowest level, and page-tables with level 0 can 13 * not be directories pointing to lower levels, whereas all other levels 14 * can. The user of the utilities determines the highest level. 15 * 16 * Nomenclature: 17 * Each struct xe_ptw, regardless of level is referred to as a page table, and 18 * multiple page tables typically form a page table tree with page tables at 19 * intermediate levels being page directories pointing at page tables at lower 20 * levels. A shared page table for a given address range is a page-table which 21 * is neither fully within nor fully outside the address range and that can 22 * thus be shared by two or more address ranges. 23 * 24 * Please keep this code generic so that it can used as a drm-wide page- 25 * table walker should other drivers find use for it. 26 */ 27 static u64 xe_pt_addr_end(u64 addr, u64 end, unsigned int level, 28 const struct xe_pt_walk *walk) 29 { 30 u64 size = 1ull << walk->shifts[level]; 31 u64 tmp = round_up(addr + 1, size); 32 33 return min_t(u64, tmp, end); 34 } 35 36 static bool xe_pt_next(pgoff_t *offset, u64 *addr, u64 next, u64 end, 37 unsigned int level, const struct xe_pt_walk *walk) 38 { 39 pgoff_t step = 1; 40 41 /* Shared pt walk skips to the last pagetable */ 42 if (unlikely(walk->shared_pt_mode)) { 43 unsigned int shift = walk->shifts[level]; 44 u64 skip_to = round_down(end, 1ull << shift); 45 46 if (skip_to > next) { 47 step += (skip_to - next) >> shift; 48 next = skip_to; 49 } 50 } 51 52 *addr = next; 53 *offset += step; 54 55 return next != end; 56 } 57 58 /** 59 * xe_pt_walk_range() - Walk a range of a gpu page table tree with callbacks 60 * for each page-table entry in all levels. 61 * @parent: The root page table for walk start. 62 * @level: The root page table level. 63 * @addr: Virtual address start. 64 * @end: Virtual address end + 1. 65 * @walk: Walk info. 66 * 67 * Similar to the CPU page-table walker, this is a helper to walk 68 * a gpu page table and call a provided callback function for each entry. 69 * 70 * Return: 0 on success, negative error code on error. The error is 71 * propagated from the callback and on error the walk is terminated. 72 */ 73 int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level, 74 u64 addr, u64 end, struct xe_pt_walk *walk) 75 { 76 pgoff_t offset = xe_pt_offset(addr, level, walk); 77 struct xe_ptw **entries = parent->children ? parent->children : NULL; 78 const struct xe_pt_walk_ops *ops = walk->ops; 79 enum page_walk_action action; 80 struct xe_ptw *child; 81 int err = 0; 82 u64 next; 83 84 do { 85 next = xe_pt_addr_end(addr, end, level, walk); 86 if (walk->shared_pt_mode && xe_pt_covers(addr, next, level, 87 walk)) 88 continue; 89 again: 90 action = ACTION_SUBTREE; 91 child = entries ? entries[offset] : NULL; 92 err = ops->pt_entry(parent, offset, level, addr, next, 93 &child, &action, walk); 94 if (err) 95 break; 96 97 /* Probably not needed yet for gpu pagetable walk. */ 98 if (unlikely(action == ACTION_AGAIN)) 99 goto again; 100 101 if (likely(!level || !child || action == ACTION_CONTINUE)) 102 continue; 103 104 err = xe_pt_walk_range(child, level - 1, addr, next, walk); 105 106 if (!err && ops->pt_post_descend) 107 err = ops->pt_post_descend(parent, offset, level, addr, 108 next, &child, &action, walk); 109 if (err) 110 break; 111 112 } while (xe_pt_next(&offset, &addr, next, end, level, walk)); 113 114 return err; 115 } 116 117 /** 118 * xe_pt_walk_shared() - Walk shared page tables of a page-table tree. 119 * @parent: Root page table directory. 120 * @level: Level of the root. 121 * @addr: Start address. 122 * @end: Last address + 1. 123 * @walk: Walk info. 124 * 125 * This function is similar to xe_pt_walk_range() but it skips page tables 126 * that are private to the range. Since the root (or @parent) page table is 127 * typically also a shared page table this function is different in that it 128 * calls the pt_entry callback and the post_descend callback also for the 129 * root. The root can be detected in the callbacks by checking whether 130 * parent == *child. 131 * Walking only the shared page tables is common for unbind-type operations 132 * where the page-table entries for an address range are cleared or detached 133 * from the main page-table tree. 134 * 135 * Return: 0 on success, negative error code on error: If a callback 136 * returns an error, the walk will be terminated and the error returned by 137 * this function. 138 */ 139 int xe_pt_walk_shared(struct xe_ptw *parent, unsigned int level, 140 u64 addr, u64 end, struct xe_pt_walk *walk) 141 { 142 const struct xe_pt_walk_ops *ops = walk->ops; 143 enum page_walk_action action = ACTION_SUBTREE; 144 struct xe_ptw *child = parent; 145 int err; 146 147 walk->shared_pt_mode = true; 148 err = walk->ops->pt_entry(parent, 0, level + 1, addr, end, 149 &child, &action, walk); 150 151 if (err || action != ACTION_SUBTREE) 152 return err; 153 154 err = xe_pt_walk_range(parent, level, addr, end, walk); 155 if (!err && ops->pt_post_descend) { 156 err = ops->pt_post_descend(parent, 0, level + 1, addr, end, 157 &child, &action, walk); 158 } 159 return err; 160 } 161