1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 #include "xe_drm_client.h" 6 7 #include <drm/drm_print.h> 8 #include <drm/xe_drm.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/types.h> 12 13 #include "xe_bo.h" 14 #include "xe_bo_types.h" 15 #include "xe_device_types.h" 16 #include "xe_exec_queue.h" 17 #include "xe_force_wake.h" 18 #include "xe_gt.h" 19 #include "xe_hw_engine.h" 20 #include "xe_pm.h" 21 #include "xe_trace.h" 22 23 /** 24 * DOC: DRM Client usage stats 25 * 26 * The drm/xe driver implements the DRM client usage stats specification as 27 * documented in :ref:`drm-client-usage-stats`. 28 * 29 * Example of the output showing the implemented key value pairs and entirety of 30 * the currently possible format options: 31 * 32 * :: 33 * 34 * pos: 0 35 * flags: 0100002 36 * mnt_id: 26 37 * ino: 685 38 * drm-driver: xe 39 * drm-client-id: 3 40 * drm-pdev: 0000:03:00.0 41 * drm-total-system: 0 42 * drm-shared-system: 0 43 * drm-active-system: 0 44 * drm-resident-system: 0 45 * drm-purgeable-system: 0 46 * drm-total-gtt: 192 KiB 47 * drm-shared-gtt: 0 48 * drm-active-gtt: 0 49 * drm-resident-gtt: 192 KiB 50 * drm-total-vram0: 23992 KiB 51 * drm-shared-vram0: 16 MiB 52 * drm-active-vram0: 0 53 * drm-resident-vram0: 23992 KiB 54 * drm-total-stolen: 0 55 * drm-shared-stolen: 0 56 * drm-active-stolen: 0 57 * drm-resident-stolen: 0 58 * drm-cycles-rcs: 28257900 59 * drm-total-cycles-rcs: 7655183225 60 * drm-cycles-bcs: 0 61 * drm-total-cycles-bcs: 7655183225 62 * drm-cycles-vcs: 0 63 * drm-total-cycles-vcs: 7655183225 64 * drm-engine-capacity-vcs: 2 65 * drm-cycles-vecs: 0 66 * drm-total-cycles-vecs: 7655183225 67 * drm-engine-capacity-vecs: 2 68 * drm-cycles-ccs: 0 69 * drm-total-cycles-ccs: 7655183225 70 * drm-engine-capacity-ccs: 4 71 * 72 * Possible `drm-cycles-` key names are: `rcs`, `ccs`, `bcs`, `vcs`, `vecs` and 73 * "other". 74 */ 75 76 /** 77 * xe_drm_client_alloc() - Allocate drm client 78 * @void: No arg 79 * 80 * Allocate drm client struct to track client memory against 81 * same till client life. Call this API whenever new client 82 * has opened xe device. 83 * 84 * Return: pointer to client struct or NULL if can't allocate 85 */ 86 struct xe_drm_client *xe_drm_client_alloc(void) 87 { 88 struct xe_drm_client *client; 89 90 client = kzalloc(sizeof(*client), GFP_KERNEL); 91 if (!client) 92 return NULL; 93 94 kref_init(&client->kref); 95 96 #ifdef CONFIG_PROC_FS 97 spin_lock_init(&client->bos_lock); 98 INIT_LIST_HEAD(&client->bos_list); 99 #endif 100 return client; 101 } 102 103 /** 104 * __xe_drm_client_free() - Free client struct 105 * @kref: The reference 106 * 107 * This frees client struct. Call this API when xe device is closed 108 * by drm client. 109 * 110 * Return: void 111 */ 112 void __xe_drm_client_free(struct kref *kref) 113 { 114 struct xe_drm_client *client = 115 container_of(kref, typeof(*client), kref); 116 117 kfree(client); 118 } 119 120 #ifdef CONFIG_PROC_FS 121 /** 122 * xe_drm_client_add_bo() - Add BO for tracking client mem usage 123 * @client: The drm client ptr 124 * @bo: The xe BO ptr 125 * 126 * Add all BO created by individual drm client by calling this function. 127 * This helps in tracking client memory usage. 128 * 129 * Return: void 130 */ 131 void xe_drm_client_add_bo(struct xe_drm_client *client, 132 struct xe_bo *bo) 133 { 134 XE_WARN_ON(bo->client); 135 XE_WARN_ON(!list_empty(&bo->client_link)); 136 137 spin_lock(&client->bos_lock); 138 bo->client = xe_drm_client_get(client); 139 list_add_tail(&bo->client_link, &client->bos_list); 140 spin_unlock(&client->bos_lock); 141 } 142 143 /** 144 * xe_drm_client_remove_bo() - Remove BO for tracking client mem usage 145 * @bo: The xe BO ptr 146 * 147 * Remove all BO removed by individual drm client by calling this function. 148 * This helps in tracking client memory usage. 149 * 150 * Return: void 151 */ 152 void xe_drm_client_remove_bo(struct xe_bo *bo) 153 { 154 struct xe_drm_client *client = bo->client; 155 156 spin_lock(&client->bos_lock); 157 list_del(&bo->client_link); 158 spin_unlock(&client->bos_lock); 159 160 xe_drm_client_put(client); 161 } 162 163 static void bo_meminfo(struct xe_bo *bo, 164 struct drm_memory_stats stats[TTM_NUM_MEM_TYPES]) 165 { 166 u64 sz = bo->size; 167 u32 mem_type; 168 169 if (bo->placement.placement) 170 mem_type = bo->placement.placement->mem_type; 171 else 172 mem_type = XE_PL_TT; 173 174 if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base)) 175 stats[mem_type].shared += sz; 176 else 177 stats[mem_type].private += sz; 178 179 if (xe_bo_has_pages(bo)) { 180 stats[mem_type].resident += sz; 181 182 if (!dma_resv_test_signaled(bo->ttm.base.resv, 183 DMA_RESV_USAGE_BOOKKEEP)) 184 stats[mem_type].active += sz; 185 else if (mem_type == XE_PL_SYSTEM) 186 stats[mem_type].purgeable += sz; 187 } 188 } 189 190 static void show_meminfo(struct drm_printer *p, struct drm_file *file) 191 { 192 struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {}; 193 struct xe_file *xef = file->driver_priv; 194 struct ttm_device *bdev = &xef->xe->ttm; 195 struct ttm_resource_manager *man; 196 struct xe_drm_client *client; 197 struct drm_gem_object *obj; 198 struct xe_bo *bo; 199 unsigned int id; 200 u32 mem_type; 201 202 client = xef->client; 203 204 /* Public objects. */ 205 spin_lock(&file->table_lock); 206 idr_for_each_entry(&file->object_idr, obj, id) { 207 struct xe_bo *bo = gem_to_xe_bo(obj); 208 209 bo_meminfo(bo, stats); 210 } 211 spin_unlock(&file->table_lock); 212 213 /* Internal objects. */ 214 spin_lock(&client->bos_lock); 215 list_for_each_entry(bo, &client->bos_list, client_link) { 216 if (!kref_get_unless_zero(&bo->ttm.base.refcount)) 217 continue; 218 bo_meminfo(bo, stats); 219 xe_bo_put(bo); 220 } 221 spin_unlock(&client->bos_lock); 222 223 for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) { 224 if (!xe_mem_type_to_name[mem_type]) 225 continue; 226 227 man = ttm_manager_type(bdev, mem_type); 228 229 if (man) { 230 drm_print_memory_stats(p, 231 &stats[mem_type], 232 DRM_GEM_OBJECT_RESIDENT | 233 (mem_type != XE_PL_SYSTEM ? 0 : 234 DRM_GEM_OBJECT_PURGEABLE), 235 xe_mem_type_to_name[mem_type]); 236 } 237 } 238 } 239 240 static void show_run_ticks(struct drm_printer *p, struct drm_file *file) 241 { 242 unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { }; 243 struct xe_file *xef = file->driver_priv; 244 struct xe_device *xe = xef->xe; 245 struct xe_gt *gt; 246 struct xe_hw_engine *hwe; 247 struct xe_exec_queue *q; 248 u64 gpu_timestamp; 249 250 xe_pm_runtime_get(xe); 251 252 /* Accumulate all the exec queues from this client */ 253 mutex_lock(&xef->exec_queue.lock); 254 xa_for_each(&xef->exec_queue.xa, i, q) 255 xe_exec_queue_update_run_ticks(q); 256 mutex_unlock(&xef->exec_queue.lock); 257 258 /* Get the total GPU cycles */ 259 for_each_gt(gt, xe, gt_id) { 260 enum xe_force_wake_domains fw; 261 262 hwe = xe_gt_any_hw_engine(gt); 263 if (!hwe) 264 continue; 265 266 fw = xe_hw_engine_to_fw_domain(hwe); 267 if (xe_force_wake_get(gt_to_fw(gt), fw)) { 268 hwe = NULL; 269 break; 270 } 271 272 gpu_timestamp = xe_hw_engine_read_timestamp(hwe); 273 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw)); 274 break; 275 } 276 277 xe_pm_runtime_put(xe); 278 279 if (unlikely(!hwe)) 280 return; 281 282 for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) { 283 const char *class_name; 284 285 for_each_gt(gt, xe, gt_id) 286 capacity[class] += gt->user_engines.instances_per_class[class]; 287 288 /* 289 * Engines may be fused off or not exposed to userspace. Don't 290 * return anything if this entire class is not available 291 */ 292 if (!capacity[class]) 293 continue; 294 295 class_name = xe_hw_engine_class_to_str(class); 296 drm_printf(p, "drm-cycles-%s:\t%llu\n", 297 class_name, xef->run_ticks[class]); 298 drm_printf(p, "drm-total-cycles-%s:\t%llu\n", 299 class_name, gpu_timestamp); 300 301 if (capacity[class] > 1) 302 drm_printf(p, "drm-engine-capacity-%s:\t%lu\n", 303 class_name, capacity[class]); 304 } 305 } 306 307 /** 308 * xe_drm_client_fdinfo() - Callback for fdinfo interface 309 * @p: The drm_printer ptr 310 * @file: The drm_file ptr 311 * 312 * This is callabck for drm fdinfo interface. Register this callback 313 * in drm driver ops for show_fdinfo. 314 * 315 * Return: void 316 */ 317 void xe_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file) 318 { 319 show_meminfo(p, file); 320 show_run_ticks(p, file); 321 } 322 #endif 323