1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #ifndef _XE_MIGRATE_ 7 #define _XE_MIGRATE_ 8 9 #include <linux/types.h> 10 11 struct dma_fence; 12 struct drm_pagemap_addr; 13 struct iosys_map; 14 struct ttm_resource; 15 16 struct xe_bo; 17 struct xe_gt; 18 struct xe_tlb_inval_job; 19 struct xe_exec_queue; 20 struct xe_migrate; 21 struct xe_migrate_pt_update; 22 struct xe_sync_entry; 23 struct xe_pt; 24 struct xe_tile; 25 struct xe_vm; 26 struct xe_vm_pgtable_update; 27 struct xe_vma; 28 29 enum xe_sriov_vf_ccs_rw_ctxs; 30 31 /** 32 * struct xe_migrate_pt_update_ops - Callbacks for the 33 * xe_migrate_update_pgtables() function. 34 */ 35 struct xe_migrate_pt_update_ops { 36 /** 37 * @populate: Populate a command buffer or page-table with ptes. 38 * @pt_update: Embeddable callback argument. 39 * @tile: The tile for the current operation. 40 * @map: struct iosys_map into the memory to be populated. 41 * @pos: If @map is NULL, map into the memory to be populated. 42 * @ofs: qword offset into @map, unused if @map is NULL. 43 * @num_qwords: Number of qwords to write. 44 * @update: Information about the PTEs to be inserted. 45 * 46 * This interface is intended to be used as a callback into the 47 * page-table system to populate command buffers or shared 48 * page-tables with PTEs. 49 */ 50 void (*populate)(struct xe_migrate_pt_update *pt_update, 51 struct xe_tile *tile, struct iosys_map *map, 52 void *pos, u32 ofs, u32 num_qwords, 53 const struct xe_vm_pgtable_update *update); 54 /** 55 * @clear: Clear a command buffer or page-table with ptes. 56 * @pt_update: Embeddable callback argument. 57 * @tile: The tile for the current operation. 58 * @map: struct iosys_map into the memory to be populated. 59 * @pos: If @map is NULL, map into the memory to be populated. 60 * @ofs: qword offset into @map, unused if @map is NULL. 61 * @num_qwords: Number of qwords to write. 62 * @update: Information about the PTEs to be inserted. 63 * 64 * This interface is intended to be used as a callback into the 65 * page-table system to populate command buffers or shared 66 * page-tables with PTEs. 67 */ 68 void (*clear)(struct xe_migrate_pt_update *pt_update, 69 struct xe_tile *tile, struct iosys_map *map, 70 void *pos, u32 ofs, u32 num_qwords, 71 const struct xe_vm_pgtable_update *update); 72 73 /** 74 * @pre_commit: Callback to be called just before arming the 75 * sched_job. 76 * @pt_update: Pointer to embeddable callback argument. 77 * 78 * Return: 0 on success, negative error code on error. 79 */ 80 int (*pre_commit)(struct xe_migrate_pt_update *pt_update); 81 }; 82 83 /** 84 * struct xe_migrate_pt_update - Argument to the 85 * struct xe_migrate_pt_update_ops callbacks. 86 * 87 * Intended to be subclassed to support additional arguments if necessary. 88 */ 89 struct xe_migrate_pt_update { 90 /** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */ 91 const struct xe_migrate_pt_update_ops *ops; 92 /** @vops: VMA operations */ 93 struct xe_vma_ops *vops; 94 /** @job: The job if a GPU page-table update. NULL otherwise */ 95 struct xe_sched_job *job; 96 /** 97 * @ijob: The TLB invalidation job for primary GT. NULL otherwise 98 */ 99 struct xe_tlb_inval_job *ijob; 100 /** 101 * @mjob: The TLB invalidation job for media GT. NULL otherwise 102 */ 103 struct xe_tlb_inval_job *mjob; 104 /** @tile_id: Tile ID of the update */ 105 u8 tile_id; 106 }; 107 108 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile); 109 int xe_migrate_init(struct xe_migrate *m); 110 111 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, 112 unsigned long npages, 113 struct drm_pagemap_addr *src_addr, 114 u64 dst_addr); 115 116 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, 117 unsigned long npages, 118 u64 src_addr, 119 struct drm_pagemap_addr *dst_addr); 120 121 struct dma_fence *xe_migrate_copy(struct xe_migrate *m, 122 struct xe_bo *src_bo, 123 struct xe_bo *dst_bo, 124 struct ttm_resource *src, 125 struct ttm_resource *dst, 126 bool copy_only_ccs); 127 128 int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, 129 struct xe_bo *src_bo, 130 enum xe_sriov_vf_ccs_rw_ctxs read_write); 131 132 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate); 133 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate); 134 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, 135 unsigned long offset, void *buf, int len, 136 int write); 137 138 #define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0) 139 #define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1) 140 #define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \ 141 XE_MIGRATE_CLEAR_FLAG_CCS_DATA) 142 struct dma_fence *xe_migrate_clear(struct xe_migrate *m, 143 struct xe_bo *bo, 144 struct ttm_resource *dst, 145 u32 clear_flags); 146 147 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m); 148 149 struct dma_fence * 150 xe_migrate_update_pgtables(struct xe_migrate *m, 151 struct xe_migrate_pt_update *pt_update); 152 153 void xe_migrate_wait(struct xe_migrate *m); 154 155 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q); 156 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q); 157 158 #endif 159