1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019 Collabora Ltd */ 3 4 #include <linux/completion.h> 5 #include <linux/iopoll.h> 6 #include <linux/iosys-map.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/slab.h> 9 #include <linux/uaccess.h> 10 11 #include <drm/drm_file.h> 12 #include <drm/drm_gem_shmem_helper.h> 13 #include <drm/panfrost_drm.h> 14 15 #include "panfrost_device.h" 16 #include "panfrost_features.h" 17 #include "panfrost_gem.h" 18 #include "panfrost_issues.h" 19 #include "panfrost_job.h" 20 #include "panfrost_mmu.h" 21 #include "panfrost_perfcnt.h" 22 #include "panfrost_regs.h" 23 24 #define COUNTERS_PER_BLOCK 64 25 #define BYTES_PER_COUNTER 4 26 #define BLOCKS_PER_COREGROUP 8 27 #define V4_SHADERS_PER_COREGROUP 4 28 29 struct panfrost_perfcnt { 30 struct panfrost_gem_mapping *mapping; 31 size_t bosize; 32 void *buf; 33 struct panfrost_file_priv *user; 34 struct mutex lock; 35 struct completion dump_comp; 36 }; 37 38 void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev) 39 { 40 complete(&pfdev->perfcnt->dump_comp); 41 } 42 43 void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev) 44 { 45 gpu_write(pfdev, GPU_CMD, GPU_CMD_CLEAN_CACHES); 46 } 47 48 static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) 49 { 50 u64 gpuva; 51 int ret; 52 53 reinit_completion(&pfdev->perfcnt->dump_comp); 54 gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; 55 gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva)); 56 gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva)); 57 gpu_write(pfdev, GPU_INT_CLEAR, 58 GPU_IRQ_CLEAN_CACHES_COMPLETED | 59 GPU_IRQ_PERFCNT_SAMPLE_COMPLETED); 60 gpu_write(pfdev, GPU_CMD, GPU_CMD_PERFCNT_SAMPLE); 61 ret = wait_for_completion_interruptible_timeout(&pfdev->perfcnt->dump_comp, 62 msecs_to_jiffies(1000)); 63 if (!ret) 64 ret = -ETIMEDOUT; 65 else if (ret > 0) 66 ret = 0; 67 68 return ret; 69 } 70 71 static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, 72 struct drm_file *file_priv, 73 unsigned int counterset) 74 { 75 struct panfrost_file_priv *user = file_priv->driver_priv; 76 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 77 struct iosys_map map; 78 struct drm_gem_shmem_object *bo; 79 u32 cfg, as; 80 int ret; 81 82 if (user == perfcnt->user) 83 return 0; 84 else if (perfcnt->user) 85 return -EBUSY; 86 87 ret = pm_runtime_get_sync(pfdev->base.dev); 88 if (ret < 0) 89 goto err_put_pm; 90 91 bo = drm_gem_shmem_create(&pfdev->base, perfcnt->bosize); 92 if (IS_ERR(bo)) { 93 ret = PTR_ERR(bo); 94 goto err_put_pm; 95 } 96 97 /* Map the perfcnt buf in the address space attached to file_priv. */ 98 ret = panfrost_gem_open(&bo->base, file_priv); 99 if (ret) 100 goto err_put_bo; 101 102 perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), 103 user); 104 if (!perfcnt->mapping) { 105 ret = -EINVAL; 106 goto err_close_bo; 107 } 108 109 ret = drm_gem_vmap(&bo->base, &map); 110 if (ret) 111 goto err_put_mapping; 112 perfcnt->buf = map.vaddr; 113 114 panfrost_gem_internal_set_label(&bo->base, "Perfcnt sample buffer"); 115 116 /* 117 * Invalidate the cache and clear the counters to start from a fresh 118 * state. 119 */ 120 reinit_completion(&pfdev->perfcnt->dump_comp); 121 gpu_write(pfdev, GPU_INT_CLEAR, 122 GPU_IRQ_CLEAN_CACHES_COMPLETED | 123 GPU_IRQ_PERFCNT_SAMPLE_COMPLETED); 124 gpu_write(pfdev, GPU_CMD, GPU_CMD_PERFCNT_CLEAR); 125 gpu_write(pfdev, GPU_CMD, GPU_CMD_CLEAN_INV_CACHES); 126 ret = wait_for_completion_timeout(&pfdev->perfcnt->dump_comp, 127 msecs_to_jiffies(1000)); 128 if (!ret) { 129 ret = -ETIMEDOUT; 130 goto err_vunmap; 131 } 132 133 ret = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); 134 if (ret < 0) 135 goto err_vunmap; 136 137 as = ret; 138 cfg = GPU_PERFCNT_CFG_AS(as) | 139 GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL); 140 141 /* 142 * Bifrost GPUs have 2 set of counters, but we're only interested by 143 * the first one for now. 144 */ 145 if (panfrost_model_is_bifrost(pfdev)) 146 cfg |= GPU_PERFCNT_CFG_SETSEL(counterset); 147 148 gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0xffffffff); 149 gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0xffffffff); 150 gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0xffffffff); 151 152 /* 153 * Due to PRLAM-8186 we need to disable the Tiler before we enable HW 154 * counters. 155 */ 156 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) 157 gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0); 158 else 159 gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); 160 161 gpu_write(pfdev, GPU_PERFCNT_CFG, cfg); 162 163 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) 164 gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); 165 166 /* The BO ref is retained by the mapping. */ 167 drm_gem_object_put(&bo->base); 168 169 perfcnt->user = user; 170 171 return 0; 172 173 err_vunmap: 174 drm_gem_vunmap(&bo->base, &map); 175 err_put_mapping: 176 panfrost_gem_mapping_put(perfcnt->mapping); 177 err_close_bo: 178 panfrost_gem_close(&bo->base, file_priv); 179 err_put_bo: 180 drm_gem_object_put(&bo->base); 181 err_put_pm: 182 pm_runtime_put(pfdev->base.dev); 183 return ret; 184 } 185 186 static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, 187 struct drm_file *file_priv) 188 { 189 struct panfrost_file_priv *user = file_priv->driver_priv; 190 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 191 struct iosys_map map = IOSYS_MAP_INIT_VADDR(perfcnt->buf); 192 193 if (user != perfcnt->user) 194 return -EINVAL; 195 196 gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0x0); 197 gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0x0); 198 gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0x0); 199 gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0); 200 gpu_write(pfdev, GPU_PERFCNT_CFG, 201 GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); 202 203 perfcnt->user = NULL; 204 drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map); 205 perfcnt->buf = NULL; 206 panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); 207 panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); 208 panfrost_gem_mapping_put(perfcnt->mapping); 209 perfcnt->mapping = NULL; 210 pm_runtime_put_autosuspend(pfdev->base.dev); 211 212 return 0; 213 } 214 215 int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, 216 struct drm_file *file_priv) 217 { 218 struct panfrost_device *pfdev = to_panfrost_device(dev); 219 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 220 struct drm_panfrost_perfcnt_enable *req = data; 221 int ret; 222 223 ret = panfrost_unstable_ioctl_check(); 224 if (ret) 225 return ret; 226 227 /* Only Bifrost GPUs have 2 set of counters. */ 228 if (req->counterset > (panfrost_model_is_bifrost(pfdev) ? 1 : 0)) 229 return -EINVAL; 230 231 mutex_lock(&perfcnt->lock); 232 if (req->enable) 233 ret = panfrost_perfcnt_enable_locked(pfdev, file_priv, 234 req->counterset); 235 else 236 ret = panfrost_perfcnt_disable_locked(pfdev, file_priv); 237 mutex_unlock(&perfcnt->lock); 238 239 return ret; 240 } 241 242 int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, 243 struct drm_file *file_priv) 244 { 245 struct panfrost_device *pfdev = to_panfrost_device(dev); 246 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 247 struct drm_panfrost_perfcnt_dump *req = data; 248 void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr; 249 int ret; 250 251 ret = panfrost_unstable_ioctl_check(); 252 if (ret) 253 return ret; 254 255 mutex_lock(&perfcnt->lock); 256 if (perfcnt->user != file_priv->driver_priv) { 257 ret = -EINVAL; 258 goto out; 259 } 260 261 ret = panfrost_perfcnt_dump_locked(pfdev); 262 if (ret) 263 goto out; 264 265 if (copy_to_user(user_ptr, perfcnt->buf, perfcnt->bosize)) 266 ret = -EFAULT; 267 268 out: 269 mutex_unlock(&perfcnt->lock); 270 271 return ret; 272 } 273 274 void panfrost_perfcnt_close(struct drm_file *file_priv) 275 { 276 struct panfrost_file_priv *pfile = file_priv->driver_priv; 277 struct panfrost_device *pfdev = pfile->pfdev; 278 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 279 280 pm_runtime_get_sync(pfdev->base.dev); 281 mutex_lock(&perfcnt->lock); 282 if (perfcnt->user == pfile) 283 panfrost_perfcnt_disable_locked(pfdev, file_priv); 284 mutex_unlock(&perfcnt->lock); 285 pm_runtime_put_autosuspend(pfdev->base.dev); 286 } 287 288 int panfrost_perfcnt_init(struct panfrost_device *pfdev) 289 { 290 struct panfrost_perfcnt *perfcnt; 291 size_t size; 292 293 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_V4)) { 294 unsigned int ncoregroups; 295 296 ncoregroups = hweight64(pfdev->features.l2_present); 297 size = ncoregroups * BLOCKS_PER_COREGROUP * 298 COUNTERS_PER_BLOCK * BYTES_PER_COUNTER; 299 } else { 300 unsigned int nl2c, ncores; 301 302 /* 303 * TODO: define a macro to extract the number of l2 caches from 304 * mem_features. 305 */ 306 nl2c = ((pfdev->features.mem_features >> 8) & GENMASK(3, 0)) + 1; 307 308 /* 309 * shader_present might be sparse, but the counters layout 310 * forces to dump unused regions too, hence the fls64() call 311 * instead of hweight64(). 312 */ 313 ncores = fls64(pfdev->features.shader_present); 314 315 /* 316 * There's always one JM and one Tiler block, hence the '+ 2' 317 * here. 318 */ 319 size = (nl2c + ncores + 2) * 320 COUNTERS_PER_BLOCK * BYTES_PER_COUNTER; 321 } 322 323 perfcnt = devm_kzalloc(pfdev->base.dev, sizeof(*perfcnt), GFP_KERNEL); 324 if (!perfcnt) 325 return -ENOMEM; 326 327 perfcnt->bosize = size; 328 329 /* Start with everything disabled. */ 330 gpu_write(pfdev, GPU_PERFCNT_CFG, 331 GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); 332 gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0); 333 gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0); 334 gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0); 335 gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0); 336 337 init_completion(&perfcnt->dump_comp); 338 mutex_init(&perfcnt->lock); 339 pfdev->perfcnt = perfcnt; 340 341 return 0; 342 } 343 344 void panfrost_perfcnt_fini(struct panfrost_device *pfdev) 345 { 346 /* Disable everything before leaving. */ 347 gpu_write(pfdev, GPU_PERFCNT_CFG, 348 GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); 349 gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0); 350 gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0); 351 gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0); 352 gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0); 353 } 354