1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved. 3 */ 4 5 #include <linux/kref.h> 6 #include <linux/uaccess.h> 7 8 #include "msm_gpu.h" 9 10 int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof) 11 { 12 /* 13 * Since pm_runtime and sysprof_active are both refcounts, we 14 * call apply the new value first, and then unwind the previous 15 * value 16 */ 17 18 switch (sysprof) { 19 default: 20 return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof); 21 case 2: 22 pm_runtime_get_sync(&gpu->pdev->dev); 23 fallthrough; 24 case 1: 25 refcount_inc(&gpu->sysprof_active); 26 fallthrough; 27 case 0: 28 break; 29 } 30 31 /* unwind old value: */ 32 switch (ctx->sysprof) { 33 case 2: 34 pm_runtime_put_autosuspend(&gpu->pdev->dev); 35 fallthrough; 36 case 1: 37 refcount_dec(&gpu->sysprof_active); 38 fallthrough; 39 case 0: 40 break; 41 } 42 43 ctx->sysprof = sysprof; 44 45 return 0; 46 } 47 48 void __msm_context_destroy(struct kref *kref) 49 { 50 struct msm_context *ctx = container_of(kref, 51 struct msm_context, ref); 52 int i; 53 54 for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) { 55 if (!ctx->entities[i]) 56 continue; 57 58 drm_sched_entity_destroy(ctx->entities[i]); 59 kfree(ctx->entities[i]); 60 } 61 62 drm_gpuvm_put(ctx->vm); 63 kfree(ctx->comm); 64 kfree(ctx->cmdline); 65 kfree(ctx); 66 } 67 68 void msm_submitqueue_destroy(struct kref *kref) 69 { 70 struct msm_gpu_submitqueue *queue = container_of(kref, 71 struct msm_gpu_submitqueue, ref); 72 73 idr_destroy(&queue->fence_idr); 74 75 if (queue->entity == &queue->_vm_bind_entity[0]) 76 drm_sched_entity_destroy(queue->entity); 77 78 msm_context_put(queue->ctx); 79 80 kfree(queue); 81 } 82 83 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx, 84 u32 id) 85 { 86 struct msm_gpu_submitqueue *entry; 87 88 if (!ctx) 89 return NULL; 90 91 read_lock(&ctx->queuelock); 92 93 list_for_each_entry(entry, &ctx->submitqueues, node) { 94 if (entry->id == id) { 95 kref_get(&entry->ref); 96 read_unlock(&ctx->queuelock); 97 98 return entry; 99 } 100 } 101 102 read_unlock(&ctx->queuelock); 103 return NULL; 104 } 105 106 void msm_submitqueue_close(struct msm_context *ctx) 107 { 108 struct msm_gpu_submitqueue *queue, *tmp; 109 110 if (!ctx) 111 return; 112 113 /* 114 * No lock needed in close and there won't 115 * be any more user ioctls coming our way 116 */ 117 list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) { 118 if (queue->entity == &queue->_vm_bind_entity[0]) 119 drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 120 list_del(&queue->node); 121 msm_submitqueue_put(queue); 122 } 123 124 if (!ctx->vm) 125 return; 126 127 msm_gem_vm_close(ctx->vm); 128 } 129 130 static struct drm_sched_entity * 131 get_sched_entity(struct msm_context *ctx, struct msm_ringbuffer *ring, 132 unsigned ring_nr, enum drm_sched_priority sched_prio) 133 { 134 static DEFINE_MUTEX(entity_lock); 135 unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio; 136 137 /* We should have already validated that the requested priority is 138 * valid by the time we get here. 139 */ 140 if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities))) 141 return ERR_PTR(-EINVAL); 142 143 mutex_lock(&entity_lock); 144 145 if (!ctx->entities[idx]) { 146 struct drm_sched_entity *entity; 147 struct drm_gpu_scheduler *sched = &ring->sched; 148 int ret; 149 150 entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL); 151 152 ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL); 153 if (ret) { 154 mutex_unlock(&entity_lock); 155 kfree(entity); 156 return ERR_PTR(ret); 157 } 158 159 ctx->entities[idx] = entity; 160 } 161 162 mutex_unlock(&entity_lock); 163 164 return ctx->entities[idx]; 165 } 166 167 int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx, 168 u32 prio, u32 flags, u32 *id) 169 { 170 struct msm_drm_private *priv = drm->dev_private; 171 struct msm_gpu_submitqueue *queue; 172 enum drm_sched_priority sched_prio; 173 unsigned ring_nr; 174 int ret; 175 176 if (!ctx) 177 return -ENODEV; 178 179 if (!priv->gpu) 180 return -ENODEV; 181 182 if (flags & MSM_SUBMITQUEUE_VM_BIND) { 183 unsigned sz; 184 185 /* Not allowed for kernel managed VMs (ie. kernel allocs VA) */ 186 if (!msm_context_is_vmbind(ctx)) 187 return -EINVAL; 188 189 if (prio) 190 return -EINVAL; 191 192 sz = struct_size(queue, _vm_bind_entity, 1); 193 queue = kzalloc(sz, GFP_KERNEL); 194 } else { 195 extern int enable_preemption; 196 bool preemption_supported = 197 priv->gpu->nr_rings == 1 && enable_preemption != 0; 198 199 if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) 200 return -EINVAL; 201 202 ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); 203 if (ret) 204 return ret; 205 206 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 207 } 208 209 if (!queue) 210 return -ENOMEM; 211 212 kref_init(&queue->ref); 213 queue->flags = flags; 214 215 if (flags & MSM_SUBMITQUEUE_VM_BIND) { 216 struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched; 217 218 queue->entity = &queue->_vm_bind_entity[0]; 219 220 drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL, 221 &sched, 1, NULL); 222 } else { 223 queue->ring_nr = ring_nr; 224 225 queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], 226 ring_nr, sched_prio); 227 } 228 229 if (IS_ERR(queue->entity)) { 230 ret = PTR_ERR(queue->entity); 231 kfree(queue); 232 return ret; 233 } 234 235 write_lock(&ctx->queuelock); 236 237 queue->ctx = msm_context_get(ctx); 238 queue->id = ctx->queueid++; 239 240 if (id) 241 *id = queue->id; 242 243 idr_init(&queue->fence_idr); 244 spin_lock_init(&queue->idr_lock); 245 mutex_init(&queue->lock); 246 247 list_add_tail(&queue->node, &ctx->submitqueues); 248 249 write_unlock(&ctx->queuelock); 250 251 return 0; 252 } 253 254 /* 255 * Create the default submit-queue (id==0), used for backwards compatibility 256 * for userspace that pre-dates the introduction of submitqueues. 257 */ 258 int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx) 259 { 260 struct msm_drm_private *priv = drm->dev_private; 261 int default_prio, max_priority; 262 263 if (!priv->gpu) 264 return -ENODEV; 265 266 max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1; 267 268 /* 269 * Pick a medium priority level as default. Lower numeric value is 270 * higher priority, so round-up to pick a priority that is not higher 271 * than the middle priority level. 272 */ 273 default_prio = DIV_ROUND_UP(max_priority, 2); 274 275 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL); 276 } 277 278 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue, 279 struct drm_msm_submitqueue_query *args) 280 { 281 size_t size = min_t(size_t, args->len, sizeof(queue->faults)); 282 int ret; 283 284 /* If a zero length was passed in, return the data size we expect */ 285 if (!args->len) { 286 args->len = sizeof(queue->faults); 287 return 0; 288 } 289 290 /* Set the length to the actual size of the data */ 291 args->len = size; 292 293 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size); 294 295 return ret ? -EFAULT : 0; 296 } 297 298 int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx, 299 struct drm_msm_submitqueue_query *args) 300 { 301 struct msm_gpu_submitqueue *queue; 302 int ret = -EINVAL; 303 304 if (args->pad) 305 return -EINVAL; 306 307 queue = msm_submitqueue_get(ctx, args->id); 308 if (!queue) 309 return -ENOENT; 310 311 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS) 312 ret = msm_submitqueue_query_faults(queue, args); 313 314 msm_submitqueue_put(queue); 315 316 return ret; 317 } 318 319 int msm_submitqueue_remove(struct msm_context *ctx, u32 id) 320 { 321 struct msm_gpu_submitqueue *entry; 322 323 if (!ctx) 324 return 0; 325 326 /* 327 * id 0 is the "default" queue and can't be destroyed 328 * by the user 329 */ 330 if (!id) 331 return -ENOENT; 332 333 write_lock(&ctx->queuelock); 334 335 list_for_each_entry(entry, &ctx->submitqueues, node) { 336 if (entry->id == id) { 337 list_del(&entry->node); 338 write_unlock(&ctx->queuelock); 339 340 msm_submitqueue_put(entry); 341 return 0; 342 } 343 } 344 345 write_unlock(&ctx->queuelock); 346 return -ENOENT; 347 } 348 349