1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 29 #ifndef VMWGFX_BO_H 30 #define VMWGFX_BO_H 31 32 #include "device_include/svga_reg.h" 33 34 #include <drm/ttm/ttm_bo.h> 35 #include <drm/ttm/ttm_placement.h> 36 37 #include <linux/rbtree_types.h> 38 #include <linux/types.h> 39 #include <linux/xarray.h> 40 41 struct vmw_bo_dirty; 42 struct vmw_fence_obj; 43 struct vmw_private; 44 struct vmw_resource; 45 struct vmw_surface; 46 47 enum vmw_bo_domain { 48 VMW_BO_DOMAIN_SYS = BIT(0), 49 VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1), 50 VMW_BO_DOMAIN_VRAM = BIT(2), 51 VMW_BO_DOMAIN_GMR = BIT(3), 52 VMW_BO_DOMAIN_MOB = BIT(4), 53 }; 54 55 struct vmw_bo_params { 56 u32 domain; 57 u32 busy_domain; 58 enum ttm_bo_type bo_type; 59 size_t size; 60 bool pin; 61 struct dma_resv *resv; 62 struct sg_table *sg; 63 }; 64 65 /** 66 * struct vmw_bo - TTM buffer object with vmwgfx additions 67 * @tbo: The TTM buffer object 68 * @placement: The preferred placement for this buffer object 69 * @places: The chosen places for the preferred placement. 70 * @busy_places: Chosen busy places for the preferred placement 71 * @map: Kmap object for semi-persistent mappings 72 * @res_tree: RB tree of resources using this buffer object as a backing MOB 73 * @res_prios: Eviction priority counts for attached resources 74 * @map_count: The number of currently active maps. Will differ from the 75 * cpu_writers because it includes kernel maps. 76 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when 77 * increased. May be decreased without reservation. 78 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB 79 * @dirty: structure for user-space dirty-tracking 80 */ 81 struct vmw_bo { 82 struct ttm_buffer_object tbo; 83 84 struct ttm_placement placement; 85 struct ttm_place places[5]; 86 struct ttm_place busy_places[5]; 87 88 /* Protected by reservation */ 89 struct ttm_bo_kmap_obj map; 90 91 struct rb_root res_tree; 92 u32 res_prios[TTM_MAX_BO_PRIORITY]; 93 struct xarray detached_resources; 94 95 atomic_t map_count; 96 atomic_t cpu_writers; 97 /* Not ref-counted. Protected by binding_mutex */ 98 struct vmw_resource *dx_query_ctx; 99 struct vmw_bo_dirty *dirty; 100 101 bool is_dumb; 102 struct vmw_surface *dumb_surface; 103 }; 104 105 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain); 106 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo); 107 108 int vmw_bo_create(struct vmw_private *dev_priv, 109 struct vmw_bo_params *params, 110 struct vmw_bo **p_bo); 111 112 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 113 struct drm_file *file_priv); 114 115 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 116 struct vmw_bo *buf, 117 bool interruptible); 118 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 119 struct vmw_bo *buf, 120 bool interruptible); 121 int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, 122 struct vmw_bo *bo, 123 bool interruptible); 124 void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin); 125 int vmw_bo_unpin(struct vmw_private *vmw_priv, 126 struct vmw_bo *bo, 127 bool interruptible); 128 129 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 130 SVGAGuestPtr *ptr); 131 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 132 struct drm_file *file_priv); 133 void vmw_bo_fence_single(struct ttm_buffer_object *bo, 134 struct vmw_fence_obj *fence); 135 136 void *vmw_bo_map_and_cache(struct vmw_bo *vbo); 137 void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size); 138 void vmw_bo_unmap(struct vmw_bo *vbo); 139 140 void vmw_bo_move_notify(struct ttm_buffer_object *bo, 141 struct ttm_resource *mem); 142 void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 143 144 void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); 145 void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); 146 struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo); 147 148 int vmw_user_bo_lookup(struct drm_file *filp, 149 u32 handle, 150 struct vmw_bo **out); 151 152 /** 153 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority 154 * according to attached resources 155 * @vbo: The struct vmw_bo 156 */ 157 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) 158 { 159 int i = ARRAY_SIZE(vbo->res_prios); 160 161 while (i--) { 162 if (vbo->res_prios[i]) { 163 vbo->tbo.priority = i; 164 return; 165 } 166 } 167 168 vbo->tbo.priority = 3; 169 } 170 171 /** 172 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource 173 * eviction priority 174 * @vbo: The struct vmw_bo 175 * @prio: The resource priority 176 * 177 * After being notified, the code assigns the highest resource eviction priority 178 * to the backing buffer object (mob). 179 */ 180 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio) 181 { 182 if (vbo->res_prios[prio]++ == 0) 183 vmw_bo_prio_adjust(vbo); 184 } 185 186 /** 187 * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain 188 * priority being removed 189 * @vbo: The struct vmw_bo 190 * @prio: The resource priority 191 * 192 * After being notified, the code assigns the highest resource eviction priority 193 * to the backing buffer object (mob). 194 */ 195 static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio) 196 { 197 if (--vbo->res_prios[prio] == 0) 198 vmw_bo_prio_adjust(vbo); 199 } 200 201 static inline void vmw_bo_unreference(struct vmw_bo **buf) 202 { 203 struct vmw_bo *tmp_buf = *buf; 204 205 *buf = NULL; 206 if (tmp_buf) 207 ttm_bo_put(&tmp_buf->tbo); 208 } 209 210 static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) 211 { 212 ttm_bo_get(&buf->tbo); 213 return buf; 214 } 215 216 static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo) 217 { 218 drm_gem_object_get(&vbo->tbo.base); 219 return vbo; 220 } 221 222 static inline void vmw_user_bo_unref(struct vmw_bo **buf) 223 { 224 struct vmw_bo *tmp_buf = *buf; 225 226 *buf = NULL; 227 if (tmp_buf) 228 drm_gem_object_put(&tmp_buf->tbo.base); 229 } 230 231 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) 232 { 233 return container_of((gobj), struct vmw_bo, tbo.base); 234 } 235 236 #endif // VMWGFX_BO_H 237