1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved. 3 */ 4 5 #include <linux/kref.h> 6 #include <linux/uaccess.h> 7 8 #include "msm_gpu.h" 9 10 int msm_file_private_set_sysprof(struct msm_file_private *ctx, 11 struct msm_gpu *gpu, int sysprof) 12 { 13 /* 14 * Since pm_runtime and sysprof_active are both refcounts, we 15 * call apply the new value first, and then unwind the previous 16 * value 17 */ 18 19 switch (sysprof) { 20 default: 21 return -EINVAL; 22 case 2: 23 pm_runtime_get_sync(&gpu->pdev->dev); 24 fallthrough; 25 case 1: 26 refcount_inc(&gpu->sysprof_active); 27 fallthrough; 28 case 0: 29 break; 30 } 31 32 /* unwind old value: */ 33 switch (ctx->sysprof) { 34 case 2: 35 pm_runtime_put_autosuspend(&gpu->pdev->dev); 36 fallthrough; 37 case 1: 38 refcount_dec(&gpu->sysprof_active); 39 fallthrough; 40 case 0: 41 break; 42 } 43 44 ctx->sysprof = sysprof; 45 46 return 0; 47 } 48 49 void __msm_file_private_destroy(struct kref *kref) 50 { 51 struct msm_file_private *ctx = container_of(kref, 52 struct msm_file_private, ref); 53 int i; 54 55 for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) { 56 if (!ctx->entities[i]) 57 continue; 58 59 drm_sched_entity_destroy(ctx->entities[i]); 60 kfree(ctx->entities[i]); 61 } 62 63 msm_gem_address_space_put(ctx->aspace); 64 kfree(ctx->comm); 65 kfree(ctx->cmdline); 66 kfree(ctx); 67 } 68 69 void msm_submitqueue_destroy(struct kref *kref) 70 { 71 struct msm_gpu_submitqueue *queue = container_of(kref, 72 struct msm_gpu_submitqueue, ref); 73 74 idr_destroy(&queue->fence_idr); 75 76 msm_file_private_put(queue->ctx); 77 78 kfree(queue); 79 } 80 81 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, 82 u32 id) 83 { 84 struct msm_gpu_submitqueue *entry; 85 86 if (!ctx) 87 return NULL; 88 89 read_lock(&ctx->queuelock); 90 91 list_for_each_entry(entry, &ctx->submitqueues, node) { 92 if (entry->id == id) { 93 kref_get(&entry->ref); 94 read_unlock(&ctx->queuelock); 95 96 return entry; 97 } 98 } 99 100 read_unlock(&ctx->queuelock); 101 return NULL; 102 } 103 104 void msm_submitqueue_close(struct msm_file_private *ctx) 105 { 106 struct msm_gpu_submitqueue *entry, *tmp; 107 108 if (!ctx) 109 return; 110 111 /* 112 * No lock needed in close and there won't 113 * be any more user ioctls coming our way 114 */ 115 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) { 116 list_del(&entry->node); 117 msm_submitqueue_put(entry); 118 } 119 } 120 121 static struct drm_sched_entity * 122 get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring, 123 unsigned ring_nr, enum drm_sched_priority sched_prio) 124 { 125 static DEFINE_MUTEX(entity_lock); 126 unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio; 127 128 /* We should have already validated that the requested priority is 129 * valid by the time we get here. 130 */ 131 if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities))) 132 return ERR_PTR(-EINVAL); 133 134 mutex_lock(&entity_lock); 135 136 if (!ctx->entities[idx]) { 137 struct drm_sched_entity *entity; 138 struct drm_gpu_scheduler *sched = &ring->sched; 139 int ret; 140 141 entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL); 142 143 ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL); 144 if (ret) { 145 mutex_unlock(&entity_lock); 146 kfree(entity); 147 return ERR_PTR(ret); 148 } 149 150 ctx->entities[idx] = entity; 151 } 152 153 mutex_unlock(&entity_lock); 154 155 return ctx->entities[idx]; 156 } 157 158 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, 159 u32 prio, u32 flags, u32 *id) 160 { 161 struct msm_drm_private *priv = drm->dev_private; 162 struct msm_gpu_submitqueue *queue; 163 enum drm_sched_priority sched_prio; 164 extern int enable_preemption; 165 bool preemption_supported; 166 unsigned ring_nr; 167 int ret; 168 169 if (!ctx) 170 return -ENODEV; 171 172 if (!priv->gpu) 173 return -ENODEV; 174 175 preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0; 176 177 if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) 178 return -EINVAL; 179 180 ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); 181 if (ret) 182 return ret; 183 184 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 185 186 if (!queue) 187 return -ENOMEM; 188 189 kref_init(&queue->ref); 190 queue->flags = flags; 191 queue->ring_nr = ring_nr; 192 193 queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], 194 ring_nr, sched_prio); 195 if (IS_ERR(queue->entity)) { 196 ret = PTR_ERR(queue->entity); 197 kfree(queue); 198 return ret; 199 } 200 201 write_lock(&ctx->queuelock); 202 203 queue->ctx = msm_file_private_get(ctx); 204 queue->id = ctx->queueid++; 205 206 if (id) 207 *id = queue->id; 208 209 idr_init(&queue->fence_idr); 210 spin_lock_init(&queue->idr_lock); 211 mutex_init(&queue->lock); 212 213 list_add_tail(&queue->node, &ctx->submitqueues); 214 215 write_unlock(&ctx->queuelock); 216 217 return 0; 218 } 219 220 /* 221 * Create the default submit-queue (id==0), used for backwards compatibility 222 * for userspace that pre-dates the introduction of submitqueues. 223 */ 224 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx) 225 { 226 struct msm_drm_private *priv = drm->dev_private; 227 int default_prio, max_priority; 228 229 if (!priv->gpu) 230 return -ENODEV; 231 232 max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1; 233 234 /* 235 * Pick a medium priority level as default. Lower numeric value is 236 * higher priority, so round-up to pick a priority that is not higher 237 * than the middle priority level. 238 */ 239 default_prio = DIV_ROUND_UP(max_priority, 2); 240 241 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL); 242 } 243 244 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue, 245 struct drm_msm_submitqueue_query *args) 246 { 247 size_t size = min_t(size_t, args->len, sizeof(queue->faults)); 248 int ret; 249 250 /* If a zero length was passed in, return the data size we expect */ 251 if (!args->len) { 252 args->len = sizeof(queue->faults); 253 return 0; 254 } 255 256 /* Set the length to the actual size of the data */ 257 args->len = size; 258 259 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size); 260 261 return ret ? -EFAULT : 0; 262 } 263 264 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, 265 struct drm_msm_submitqueue_query *args) 266 { 267 struct msm_gpu_submitqueue *queue; 268 int ret = -EINVAL; 269 270 if (args->pad) 271 return -EINVAL; 272 273 queue = msm_submitqueue_get(ctx, args->id); 274 if (!queue) 275 return -ENOENT; 276 277 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS) 278 ret = msm_submitqueue_query_faults(queue, args); 279 280 msm_submitqueue_put(queue); 281 282 return ret; 283 } 284 285 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id) 286 { 287 struct msm_gpu_submitqueue *entry; 288 289 if (!ctx) 290 return 0; 291 292 /* 293 * id 0 is the "default" queue and can't be destroyed 294 * by the user 295 */ 296 if (!id) 297 return -ENOENT; 298 299 write_lock(&ctx->queuelock); 300 301 list_for_each_entry(entry, &ctx->submitqueues, node) { 302 if (entry->id == id) { 303 list_del(&entry->node); 304 write_unlock(&ctx->queuelock); 305 306 msm_submitqueue_put(entry); 307 return 0; 308 } 309 } 310 311 write_unlock(&ctx->queuelock); 312 return -ENOENT; 313 } 314 315