1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include "drm/drm_drv.h"
8
9 #include "msm_gpu.h"
10 #include "msm_gem.h"
11 #include "msm_mmu.h"
12 #include "msm_fence.h"
13 #include "msm_gpu_trace.h"
14 //#include "adreno/adreno_gpu.h"
15
16 #include <generated/utsrelease.h>
17 #include <linux/string_helpers.h>
18 #include <linux/devcoredump.h>
19 #include <linux/sched/task.h>
20
21 /*
22 * Power Management:
23 */
24
enable_pwrrail(struct msm_gpu * gpu)25 static int enable_pwrrail(struct msm_gpu *gpu)
26 {
27 struct drm_device *dev = gpu->dev;
28 int ret = 0;
29
30 if (gpu->gpu_reg) {
31 ret = regulator_enable(gpu->gpu_reg);
32 if (ret) {
33 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
34 return ret;
35 }
36 }
37
38 if (gpu->gpu_cx) {
39 ret = regulator_enable(gpu->gpu_cx);
40 if (ret) {
41 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
42 return ret;
43 }
44 }
45
46 return 0;
47 }
48
disable_pwrrail(struct msm_gpu * gpu)49 static int disable_pwrrail(struct msm_gpu *gpu)
50 {
51 if (gpu->gpu_cx)
52 regulator_disable(gpu->gpu_cx);
53 if (gpu->gpu_reg)
54 regulator_disable(gpu->gpu_reg);
55 return 0;
56 }
57
enable_clk(struct msm_gpu * gpu)58 static int enable_clk(struct msm_gpu *gpu)
59 {
60 if (gpu->core_clk && gpu->fast_rate)
61 dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
62
63 /* Set the RBBM timer rate to 19.2Mhz */
64 if (gpu->rbbmtimer_clk)
65 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
66
67 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
68 }
69
disable_clk(struct msm_gpu * gpu)70 static int disable_clk(struct msm_gpu *gpu)
71 {
72 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
73
74 /*
75 * Set the clock to a deliberately low rate. On older targets the clock
76 * speed had to be non zero to avoid problems. On newer targets this
77 * will be rounded down to zero anyway so it all works out.
78 */
79 if (gpu->core_clk)
80 dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
81
82 if (gpu->rbbmtimer_clk)
83 clk_set_rate(gpu->rbbmtimer_clk, 0);
84
85 return 0;
86 }
87
enable_axi(struct msm_gpu * gpu)88 static int enable_axi(struct msm_gpu *gpu)
89 {
90 return clk_prepare_enable(gpu->ebi1_clk);
91 }
92
disable_axi(struct msm_gpu * gpu)93 static int disable_axi(struct msm_gpu *gpu)
94 {
95 clk_disable_unprepare(gpu->ebi1_clk);
96 return 0;
97 }
98
msm_gpu_pm_resume(struct msm_gpu * gpu)99 int msm_gpu_pm_resume(struct msm_gpu *gpu)
100 {
101 int ret;
102
103 DBG("%s", gpu->name);
104 trace_msm_gpu_resume(0);
105
106 ret = enable_pwrrail(gpu);
107 if (ret)
108 return ret;
109
110 ret = enable_clk(gpu);
111 if (ret)
112 return ret;
113
114 ret = enable_axi(gpu);
115 if (ret)
116 return ret;
117
118 msm_devfreq_resume(gpu);
119
120 gpu->needs_hw_init = true;
121
122 return 0;
123 }
124
msm_gpu_pm_suspend(struct msm_gpu * gpu)125 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
126 {
127 int ret;
128
129 DBG("%s", gpu->name);
130 trace_msm_gpu_suspend(0);
131
132 msm_devfreq_suspend(gpu);
133
134 ret = disable_axi(gpu);
135 if (ret)
136 return ret;
137
138 ret = disable_clk(gpu);
139 if (ret)
140 return ret;
141
142 ret = disable_pwrrail(gpu);
143 if (ret)
144 return ret;
145
146 gpu->suspend_count++;
147
148 return 0;
149 }
150
msm_gpu_show_fdinfo(struct msm_gpu * gpu,struct msm_context * ctx,struct drm_printer * p)151 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
152 struct drm_printer *p)
153 {
154 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
155 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
156 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
157 }
158
msm_gpu_hw_init(struct msm_gpu * gpu)159 int msm_gpu_hw_init(struct msm_gpu *gpu)
160 {
161 int ret;
162
163 WARN_ON(!mutex_is_locked(&gpu->lock));
164
165 if (!gpu->needs_hw_init)
166 return 0;
167
168 disable_irq(gpu->irq);
169 ret = gpu->funcs->hw_init(gpu);
170 if (!ret)
171 gpu->needs_hw_init = false;
172 enable_irq(gpu->irq);
173
174 return ret;
175 }
176
177 #ifdef CONFIG_DEV_COREDUMP
msm_gpu_devcoredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)178 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
179 size_t count, void *data, size_t datalen)
180 {
181 struct msm_gpu *gpu = data;
182 struct drm_print_iterator iter;
183 struct drm_printer p;
184 struct msm_gpu_state *state;
185
186 state = msm_gpu_crashstate_get(gpu);
187 if (!state)
188 return 0;
189
190 iter.data = buffer;
191 iter.offset = 0;
192 iter.start = offset;
193 iter.remain = count;
194
195 p = drm_coredump_printer(&iter);
196
197 drm_printf(&p, "---\n");
198 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
199 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
200 drm_printf(&p, "time: %lld.%09ld\n",
201 state->time.tv_sec, state->time.tv_nsec);
202 if (state->comm)
203 drm_printf(&p, "comm: %s\n", state->comm);
204 if (state->cmd)
205 drm_printf(&p, "cmdline: %s\n", state->cmd);
206
207 gpu->funcs->show(gpu, state, &p);
208
209 msm_gpu_crashstate_put(gpu);
210
211 return count - iter.remain;
212 }
213
msm_gpu_devcoredump_free(void * data)214 static void msm_gpu_devcoredump_free(void *data)
215 {
216 struct msm_gpu *gpu = data;
217
218 msm_gpu_crashstate_put(gpu);
219 }
220
msm_gpu_crashstate_get_bo(struct msm_gpu_state * state,struct drm_gem_object * obj,u64 iova,bool full,size_t offset,size_t size)221 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
222 struct drm_gem_object *obj, u64 iova,
223 bool full, size_t offset, size_t size)
224 {
225 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
226 struct msm_gem_object *msm_obj = to_msm_bo(obj);
227
228 /* Don't record write only objects */
229 state_bo->size = size;
230 state_bo->flags = msm_obj->flags;
231 state_bo->iova = iova;
232
233 BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
234
235 memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
236
237 if (full) {
238 void *ptr;
239
240 state_bo->data = kvmalloc(size, GFP_KERNEL);
241 if (!state_bo->data)
242 goto out;
243
244 ptr = msm_gem_get_vaddr_active(obj);
245 if (IS_ERR(ptr)) {
246 kvfree(state_bo->data);
247 state_bo->data = NULL;
248 goto out;
249 }
250
251 memcpy(state_bo->data, ptr + offset, size);
252 msm_gem_put_vaddr_locked(obj);
253 }
254 out:
255 state->nr_bos++;
256 }
257
crashstate_get_bos(struct msm_gpu_state * state,struct msm_gem_submit * submit)258 static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
259 {
260 extern bool rd_full;
261
262 if (msm_context_is_vmbind(submit->queue->ctx)) {
263 struct drm_exec exec;
264 struct drm_gpuva *vma;
265 unsigned cnt = 0;
266
267 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
268 drm_exec_until_all_locked(&exec) {
269 cnt = 0;
270
271 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
272 drm_exec_retry_on_contention(&exec);
273
274 drm_gpuvm_for_each_va (vma, submit->vm) {
275 if (!vma->gem.obj)
276 continue;
277
278 cnt++;
279 drm_exec_lock_obj(&exec, vma->gem.obj);
280 drm_exec_retry_on_contention(&exec);
281 }
282
283 }
284
285 drm_gpuvm_for_each_va (vma, submit->vm)
286 cnt++;
287
288 state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
289
290 drm_gpuvm_for_each_va (vma, submit->vm) {
291 bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
292
293 /* Skip MAP_NULL/PRR VMAs: */
294 if (!vma->gem.obj)
295 continue;
296
297 msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
298 dump, vma->gem.offset, vma->va.range);
299 }
300
301 drm_exec_fini(&exec);
302 } else {
303 state->bos = kcalloc(submit->nr_bos,
304 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
305
306 for (int i = 0; state->bos && i < submit->nr_bos; i++) {
307 struct drm_gem_object *obj = submit->bos[i].obj;;
308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
309
310 msm_gem_lock(obj);
311 msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
312 dump, 0, obj->size);
313 msm_gem_unlock(obj);
314 }
315 }
316 }
317
crashstate_get_vm_logs(struct msm_gpu_state * state,struct msm_gem_vm * vm)318 static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
319 {
320 uint32_t vm_log_len = (1 << vm->log_shift);
321 uint32_t vm_log_mask = vm_log_len - 1;
322 int first;
323
324 /* Bail if no log, or empty log: */
325 if (!vm->log || !vm->log[0].op)
326 return;
327
328 mutex_lock(&vm->mmu_lock);
329
330 /*
331 * log_idx is the next entry to overwrite, meaning it is the oldest, or
332 * first, entry (other than the special case handled below where the
333 * log hasn't wrapped around yet)
334 */
335 first = vm->log_idx;
336
337 if (!vm->log[first].op) {
338 /*
339 * If the next log entry has not been written yet, then only
340 * entries 0 to idx-1 are valid (ie. we haven't wrapped around
341 * yet)
342 */
343 state->nr_vm_logs = MAX(0, first - 1);
344 first = 0;
345 } else {
346 state->nr_vm_logs = vm_log_len;
347 }
348
349 state->vm_logs = kmalloc_array(
350 state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
351 for (int i = 0; i < state->nr_vm_logs; i++) {
352 int idx = (i + first) & vm_log_mask;
353
354 state->vm_logs[i] = vm->log[idx];
355 }
356
357 mutex_unlock(&vm->mmu_lock);
358 }
359
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,struct msm_gpu_fault_info * fault_info,char * comm,char * cmd)360 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
361 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
362 char *comm, char *cmd)
363 {
364 struct msm_gpu_state *state;
365
366 /* Check if the target supports capturing crash state */
367 if (!gpu->funcs->gpu_state_get)
368 return;
369
370 /* Only save one crash state at a time */
371 if (gpu->crashstate)
372 return;
373
374 state = gpu->funcs->gpu_state_get(gpu);
375 if (IS_ERR_OR_NULL(state))
376 return;
377
378 /* Fill in the additional crash state information */
379 state->comm = kstrdup(comm, GFP_KERNEL);
380 state->cmd = kstrdup(cmd, GFP_KERNEL);
381 if (fault_info)
382 state->fault_info = *fault_info;
383
384 if (submit && state->fault_info.ttbr0) {
385 struct msm_gpu_fault_info *info = &state->fault_info;
386 struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
387
388 msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
389 &info->asid);
390 msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
391 }
392
393 if (submit) {
394 crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
395 crashstate_get_bos(state, submit);
396 }
397
398 /* Set the active crash state to be dumped on failure */
399 gpu->crashstate = state;
400
401 dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
402 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
403 }
404 #else
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,struct msm_gpu_fault_info * fault_info,char * comm,char * cmd)405 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
406 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
407 char *comm, char *cmd)
408 {
409 }
410 #endif
411
412 /*
413 * Hangcheck detection for locked gpu:
414 */
415
416 static struct msm_gem_submit *
find_submit(struct msm_ringbuffer * ring,uint32_t fence)417 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
418 {
419 struct msm_gem_submit *submit;
420 unsigned long flags;
421
422 spin_lock_irqsave(&ring->submit_lock, flags);
423 list_for_each_entry(submit, &ring->submits, node) {
424 if (submit->seqno == fence) {
425 spin_unlock_irqrestore(&ring->submit_lock, flags);
426 return submit;
427 }
428 }
429 spin_unlock_irqrestore(&ring->submit_lock, flags);
430
431 return NULL;
432 }
433
434 static void retire_submits(struct msm_gpu *gpu);
435
get_comm_cmdline(struct msm_gem_submit * submit,char ** comm,char ** cmd)436 static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
437 {
438 struct msm_context *ctx = submit->queue->ctx;
439 struct task_struct *task;
440
441 WARN_ON(!mutex_is_locked(&submit->gpu->lock));
442
443 /* Note that kstrdup will return NULL if argument is NULL: */
444 *comm = kstrdup(ctx->comm, GFP_KERNEL);
445 *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
446
447 task = get_pid_task(submit->pid, PIDTYPE_PID);
448 if (!task)
449 return;
450
451 if (!*comm)
452 *comm = kstrdup(task->comm, GFP_KERNEL);
453
454 if (!*cmd)
455 *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
456
457 put_task_struct(task);
458 }
459
recover_worker(struct kthread_work * work)460 static void recover_worker(struct kthread_work *work)
461 {
462 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
463 struct drm_device *dev = gpu->dev;
464 struct msm_drm_private *priv = dev->dev_private;
465 struct msm_gem_submit *submit;
466 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
467 char *comm = NULL, *cmd = NULL;
468 struct task_struct *task;
469 int i;
470
471 mutex_lock(&gpu->lock);
472
473 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
474
475 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
476
477 /*
478 * If the submit retired while we were waiting for the worker to run,
479 * or waiting to acquire the gpu lock, then nothing more to do.
480 */
481 if (!submit)
482 goto out_unlock;
483
484 /* Increment the fault counts */
485 submit->queue->faults++;
486
487 task = get_pid_task(submit->pid, PIDTYPE_PID);
488 if (!task)
489 gpu->global_faults++;
490 else {
491 struct msm_gem_vm *vm = to_msm_vm(submit->vm);
492
493 vm->faults++;
494
495 /*
496 * If userspace has opted-in to VM_BIND (and therefore userspace
497 * management of the VM), faults mark the VM as unusable. This
498 * matches vulkan expectations (vulkan is the main target for
499 * VM_BIND).
500 */
501 if (!vm->managed)
502 msm_gem_vm_unusable(submit->vm);
503 }
504
505 get_comm_cmdline(submit, &comm, &cmd);
506
507 if (comm && cmd) {
508 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
509 gpu->name, comm, cmd);
510
511 msm_rd_dump_submit(priv->hangrd, submit,
512 "offending task: %s (%s)", comm, cmd);
513 } else {
514 DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
515
516 msm_rd_dump_submit(priv->hangrd, submit, NULL);
517 }
518
519 /* Record the crash state */
520 pm_runtime_get_sync(&gpu->pdev->dev);
521 msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
522
523 kfree(cmd);
524 kfree(comm);
525
526 /*
527 * Update all the rings with the latest and greatest fence.. this
528 * needs to happen after msm_rd_dump_submit() to ensure that the
529 * bo's referenced by the offending submit are still around.
530 */
531 for (i = 0; i < gpu->nr_rings; i++) {
532 struct msm_ringbuffer *ring = gpu->rb[i];
533
534 uint32_t fence = ring->memptrs->fence;
535
536 /*
537 * For the current (faulting?) ring/submit advance the fence by
538 * one more to clear the faulting submit
539 */
540 if (ring == cur_ring)
541 ring->memptrs->fence = ++fence;
542
543 msm_update_fence(ring->fctx, fence);
544 }
545
546 if (msm_gpu_active(gpu)) {
547 /* retire completed submits, plus the one that hung: */
548 retire_submits(gpu);
549
550 gpu->funcs->recover(gpu);
551
552 /*
553 * Replay all remaining submits starting with highest priority
554 * ring
555 */
556 for (i = 0; i < gpu->nr_rings; i++) {
557 struct msm_ringbuffer *ring = gpu->rb[i];
558 unsigned long flags;
559
560 spin_lock_irqsave(&ring->submit_lock, flags);
561 list_for_each_entry(submit, &ring->submits, node) {
562 /*
563 * If the submit uses an unusable vm make sure
564 * we don't actually run it
565 */
566 if (to_msm_vm(submit->vm)->unusable)
567 submit->nr_cmds = 0;
568 gpu->funcs->submit(gpu, submit);
569 }
570 spin_unlock_irqrestore(&ring->submit_lock, flags);
571 }
572 }
573
574 pm_runtime_put(&gpu->pdev->dev);
575
576 out_unlock:
577 mutex_unlock(&gpu->lock);
578
579 msm_gpu_retire(gpu);
580 }
581
msm_gpu_fault_crashstate_capture(struct msm_gpu * gpu,struct msm_gpu_fault_info * fault_info)582 void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
583 {
584 struct msm_gem_submit *submit;
585 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
586 char *comm = NULL, *cmd = NULL;
587
588 mutex_lock(&gpu->lock);
589
590 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
591 if (submit && submit->fault_dumped)
592 goto resume_smmu;
593
594 if (submit) {
595 get_comm_cmdline(submit, &comm, &cmd);
596
597 /*
598 * When we get GPU iova faults, we can get 1000s of them,
599 * but we really only want to log the first one.
600 */
601 submit->fault_dumped = true;
602 }
603
604 /* Record the crash state */
605 pm_runtime_get_sync(&gpu->pdev->dev);
606 msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
607 pm_runtime_put_sync(&gpu->pdev->dev);
608
609 kfree(cmd);
610 kfree(comm);
611
612 resume_smmu:
613 mutex_unlock(&gpu->lock);
614 }
615
hangcheck_timer_reset(struct msm_gpu * gpu)616 static void hangcheck_timer_reset(struct msm_gpu *gpu)
617 {
618 struct msm_drm_private *priv = gpu->dev->dev_private;
619 mod_timer(&gpu->hangcheck_timer,
620 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
621 }
622
made_progress(struct msm_gpu * gpu,struct msm_ringbuffer * ring)623 static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
624 {
625 if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
626 return false;
627
628 if (!gpu->funcs->progress)
629 return false;
630
631 if (!gpu->funcs->progress(gpu, ring))
632 return false;
633
634 ring->hangcheck_progress_retries++;
635 return true;
636 }
637
hangcheck_handler(struct timer_list * t)638 static void hangcheck_handler(struct timer_list *t)
639 {
640 struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
641 struct drm_device *dev = gpu->dev;
642 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
643 uint32_t fence = ring->memptrs->fence;
644
645 if (fence != ring->hangcheck_fence) {
646 /* some progress has been made.. ya! */
647 ring->hangcheck_fence = fence;
648 ring->hangcheck_progress_retries = 0;
649 } else if (fence_before(fence, ring->fctx->last_fence) &&
650 !made_progress(gpu, ring)) {
651 /* no progress and not done.. hung! */
652 ring->hangcheck_fence = fence;
653 ring->hangcheck_progress_retries = 0;
654 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
655 gpu->name, ring->id);
656 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
657 gpu->name, fence);
658 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
659 gpu->name, ring->fctx->last_fence);
660
661 kthread_queue_work(gpu->worker, &gpu->recover_work);
662 }
663
664 /* if still more pending work, reset the hangcheck timer: */
665 if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
666 hangcheck_timer_reset(gpu);
667
668 /* workaround for missing irq: */
669 msm_gpu_retire(gpu);
670 }
671
672 /*
673 * Performance Counters:
674 */
675
676 /* called under perf_lock */
update_hw_cntrs(struct msm_gpu * gpu,uint32_t ncntrs,uint32_t * cntrs)677 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
678 {
679 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
680 int i, n = min(ncntrs, gpu->num_perfcntrs);
681
682 /* read current values: */
683 for (i = 0; i < gpu->num_perfcntrs; i++)
684 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
685
686 /* update cntrs: */
687 for (i = 0; i < n; i++)
688 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
689
690 /* save current values: */
691 for (i = 0; i < gpu->num_perfcntrs; i++)
692 gpu->last_cntrs[i] = current_cntrs[i];
693
694 return n;
695 }
696
update_sw_cntrs(struct msm_gpu * gpu)697 static void update_sw_cntrs(struct msm_gpu *gpu)
698 {
699 ktime_t time;
700 uint32_t elapsed;
701 unsigned long flags;
702
703 spin_lock_irqsave(&gpu->perf_lock, flags);
704 if (!gpu->perfcntr_active)
705 goto out;
706
707 time = ktime_get();
708 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
709
710 gpu->totaltime += elapsed;
711 if (gpu->last_sample.active)
712 gpu->activetime += elapsed;
713
714 gpu->last_sample.active = msm_gpu_active(gpu);
715 gpu->last_sample.time = time;
716
717 out:
718 spin_unlock_irqrestore(&gpu->perf_lock, flags);
719 }
720
msm_gpu_perfcntr_start(struct msm_gpu * gpu)721 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
722 {
723 unsigned long flags;
724
725 pm_runtime_get_sync(&gpu->pdev->dev);
726
727 spin_lock_irqsave(&gpu->perf_lock, flags);
728 /* we could dynamically enable/disable perfcntr registers too.. */
729 gpu->last_sample.active = msm_gpu_active(gpu);
730 gpu->last_sample.time = ktime_get();
731 gpu->activetime = gpu->totaltime = 0;
732 gpu->perfcntr_active = true;
733 update_hw_cntrs(gpu, 0, NULL);
734 spin_unlock_irqrestore(&gpu->perf_lock, flags);
735 }
736
msm_gpu_perfcntr_stop(struct msm_gpu * gpu)737 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
738 {
739 gpu->perfcntr_active = false;
740 pm_runtime_put_sync(&gpu->pdev->dev);
741 }
742
743 /* returns -errno or # of cntrs sampled */
msm_gpu_perfcntr_sample(struct msm_gpu * gpu,uint32_t * activetime,uint32_t * totaltime,uint32_t ncntrs,uint32_t * cntrs)744 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
745 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
746 {
747 unsigned long flags;
748 int ret;
749
750 spin_lock_irqsave(&gpu->perf_lock, flags);
751
752 if (!gpu->perfcntr_active) {
753 ret = -EINVAL;
754 goto out;
755 }
756
757 *activetime = gpu->activetime;
758 *totaltime = gpu->totaltime;
759
760 gpu->activetime = gpu->totaltime = 0;
761
762 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
763
764 out:
765 spin_unlock_irqrestore(&gpu->perf_lock, flags);
766
767 return ret;
768 }
769
770 /*
771 * Cmdstream submission/retirement:
772 */
773
retire_submit(struct msm_gpu * gpu,struct msm_ringbuffer * ring,struct msm_gem_submit * submit)774 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
775 struct msm_gem_submit *submit)
776 {
777 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
778 volatile struct msm_gpu_submit_stats *stats;
779 u64 elapsed, clock = 0, cycles;
780 unsigned long flags;
781
782 stats = &ring->memptrs->stats[index];
783 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
784 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
785 do_div(elapsed, 192);
786
787 cycles = stats->cpcycles_end - stats->cpcycles_start;
788
789 /* Calculate the clock frequency from the number of CP cycles */
790 if (elapsed) {
791 clock = cycles * 1000;
792 do_div(clock, elapsed);
793 }
794
795 submit->queue->ctx->elapsed_ns += elapsed;
796 submit->queue->ctx->cycles += cycles;
797
798 trace_msm_gpu_submit_retired(submit, elapsed, clock,
799 stats->alwayson_start, stats->alwayson_end);
800
801 msm_submit_retire(submit);
802
803 pm_runtime_mark_last_busy(&gpu->pdev->dev);
804
805 spin_lock_irqsave(&ring->submit_lock, flags);
806 list_del(&submit->node);
807 spin_unlock_irqrestore(&ring->submit_lock, flags);
808
809 /* Update devfreq on transition from active->idle: */
810 mutex_lock(&gpu->active_lock);
811 gpu->active_submits--;
812 WARN_ON(gpu->active_submits < 0);
813 if (!gpu->active_submits) {
814 msm_devfreq_idle(gpu);
815 pm_runtime_put_autosuspend(&gpu->pdev->dev);
816 }
817
818 mutex_unlock(&gpu->active_lock);
819
820 msm_gem_submit_put(submit);
821 }
822
retire_submits(struct msm_gpu * gpu)823 static void retire_submits(struct msm_gpu *gpu)
824 {
825 int i;
826
827 /* Retire the commits starting with highest priority */
828 for (i = 0; i < gpu->nr_rings; i++) {
829 struct msm_ringbuffer *ring = gpu->rb[i];
830
831 while (true) {
832 struct msm_gem_submit *submit = NULL;
833 unsigned long flags;
834
835 spin_lock_irqsave(&ring->submit_lock, flags);
836 submit = list_first_entry_or_null(&ring->submits,
837 struct msm_gem_submit, node);
838 spin_unlock_irqrestore(&ring->submit_lock, flags);
839
840 /*
841 * If no submit, we are done. If submit->fence hasn't
842 * been signalled, then later submits are not signalled
843 * either, so we are also done.
844 */
845 if (submit && dma_fence_is_signaled(submit->hw_fence)) {
846 retire_submit(gpu, ring, submit);
847 } else {
848 break;
849 }
850 }
851 }
852
853 wake_up_all(&gpu->retire_event);
854 }
855
retire_worker(struct kthread_work * work)856 static void retire_worker(struct kthread_work *work)
857 {
858 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
859
860 retire_submits(gpu);
861 }
862
863 /* call from irq handler to schedule work to retire bo's */
msm_gpu_retire(struct msm_gpu * gpu)864 void msm_gpu_retire(struct msm_gpu *gpu)
865 {
866 int i;
867
868 for (i = 0; i < gpu->nr_rings; i++)
869 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
870
871 kthread_queue_work(gpu->worker, &gpu->retire_work);
872 update_sw_cntrs(gpu);
873 }
874
875 /* add bo's to gpu's ring, and kick gpu: */
msm_gpu_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)876 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
877 {
878 struct msm_ringbuffer *ring = submit->ring;
879 unsigned long flags;
880
881 WARN_ON(!mutex_is_locked(&gpu->lock));
882
883 pm_runtime_get_sync(&gpu->pdev->dev);
884
885 msm_gpu_hw_init(gpu);
886
887 submit->seqno = submit->hw_fence->seqno;
888
889 update_sw_cntrs(gpu);
890
891 /*
892 * ring->submits holds a ref to the submit, to deal with the case
893 * that a submit completes before msm_ioctl_gem_submit() returns.
894 */
895 msm_gem_submit_get(submit);
896
897 spin_lock_irqsave(&ring->submit_lock, flags);
898 list_add_tail(&submit->node, &ring->submits);
899 spin_unlock_irqrestore(&ring->submit_lock, flags);
900
901 /* Update devfreq on transition from idle->active: */
902 mutex_lock(&gpu->active_lock);
903 if (!gpu->active_submits) {
904 pm_runtime_get(&gpu->pdev->dev);
905 msm_devfreq_active(gpu);
906 }
907 gpu->active_submits++;
908 mutex_unlock(&gpu->active_lock);
909
910 gpu->funcs->submit(gpu, submit);
911 submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
912
913 pm_runtime_put(&gpu->pdev->dev);
914 hangcheck_timer_reset(gpu);
915 }
916
917 /*
918 * Init/Cleanup:
919 */
920
irq_handler(int irq,void * data)921 static irqreturn_t irq_handler(int irq, void *data)
922 {
923 struct msm_gpu *gpu = data;
924 return gpu->funcs->irq(gpu);
925 }
926
get_clocks(struct platform_device * pdev,struct msm_gpu * gpu)927 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
928 {
929 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
930
931 if (ret < 1) {
932 gpu->nr_clocks = 0;
933 return ret;
934 }
935
936 gpu->nr_clocks = ret;
937
938 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
939 gpu->nr_clocks, "core");
940
941 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
942 gpu->nr_clocks, "rbbmtimer");
943
944 return 0;
945 }
946
947 /* Return a new address space for a msm_drm_private instance */
948 struct drm_gpuvm *
msm_gpu_create_private_vm(struct msm_gpu * gpu,struct task_struct * task,bool kernel_managed)949 msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
950 bool kernel_managed)
951 {
952 struct drm_gpuvm *vm = NULL;
953
954 if (!gpu)
955 return NULL;
956
957 /*
958 * If the target doesn't support private address spaces then return
959 * the global one
960 */
961 if (gpu->funcs->create_private_vm) {
962 vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
963 if (!IS_ERR(vm))
964 to_msm_vm(vm)->pid = get_pid(task_pid(task));
965 }
966
967 if (IS_ERR_OR_NULL(vm))
968 vm = drm_gpuvm_get(gpu->vm);
969
970 return vm;
971 }
972
msm_gpu_init(struct drm_device * drm,struct platform_device * pdev,struct msm_gpu * gpu,const struct msm_gpu_funcs * funcs,const char * name,struct msm_gpu_config * config)973 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
974 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
975 const char *name, struct msm_gpu_config *config)
976 {
977 struct msm_drm_private *priv = drm->dev_private;
978 int i, ret, nr_rings = config->nr_rings;
979 void *memptrs;
980 uint64_t memptrs_iova;
981
982 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
983 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
984
985 gpu->dev = drm;
986 gpu->funcs = funcs;
987 gpu->name = name;
988
989 gpu->worker = kthread_run_worker(0, "gpu-worker");
990 if (IS_ERR(gpu->worker)) {
991 ret = PTR_ERR(gpu->worker);
992 gpu->worker = NULL;
993 goto fail;
994 }
995
996 sched_set_fifo_low(gpu->worker->task);
997
998 mutex_init(&gpu->active_lock);
999 mutex_init(&gpu->lock);
1000 init_waitqueue_head(&gpu->retire_event);
1001 kthread_init_work(&gpu->retire_work, retire_worker);
1002 kthread_init_work(&gpu->recover_work, recover_worker);
1003
1004 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
1005
1006 /*
1007 * If progress detection is supported, halve the hangcheck timer
1008 * duration, as it takes two iterations of the hangcheck handler
1009 * to detect a hang.
1010 */
1011 if (funcs->progress)
1012 priv->hangcheck_period /= 2;
1013
1014 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
1015
1016 spin_lock_init(&gpu->perf_lock);
1017
1018
1019 /* Map registers: */
1020 gpu->mmio = msm_ioremap(pdev, config->ioname);
1021 if (IS_ERR(gpu->mmio)) {
1022 ret = PTR_ERR(gpu->mmio);
1023 goto fail;
1024 }
1025
1026 /* Get Interrupt: */
1027 gpu->irq = platform_get_irq(pdev, 0);
1028 if (gpu->irq < 0) {
1029 ret = gpu->irq;
1030 goto fail;
1031 }
1032
1033 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
1034 IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
1035 if (ret) {
1036 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
1037 goto fail;
1038 }
1039
1040 ret = get_clocks(pdev, gpu);
1041 if (ret)
1042 goto fail;
1043
1044 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
1045 DBG("ebi1_clk: %p", gpu->ebi1_clk);
1046 if (IS_ERR(gpu->ebi1_clk))
1047 gpu->ebi1_clk = NULL;
1048
1049 /* Acquire regulators: */
1050 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
1051 DBG("gpu_reg: %p", gpu->gpu_reg);
1052 if (IS_ERR(gpu->gpu_reg))
1053 gpu->gpu_reg = NULL;
1054
1055 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
1056 DBG("gpu_cx: %p", gpu->gpu_cx);
1057 if (IS_ERR(gpu->gpu_cx))
1058 gpu->gpu_cx = NULL;
1059
1060 platform_set_drvdata(pdev, &gpu->adreno_smmu);
1061
1062 msm_devfreq_init(gpu);
1063
1064 gpu->vm = gpu->funcs->create_vm(gpu, pdev);
1065 if (IS_ERR(gpu->vm)) {
1066 ret = PTR_ERR(gpu->vm);
1067 goto fail;
1068 }
1069
1070 memptrs = msm_gem_kernel_new(drm,
1071 sizeof(struct msm_rbmemptrs) * nr_rings,
1072 check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
1073 &memptrs_iova);
1074
1075 if (IS_ERR(memptrs)) {
1076 ret = PTR_ERR(memptrs);
1077 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
1078 goto fail;
1079 }
1080
1081 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
1082
1083 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
1084 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
1085 ARRAY_SIZE(gpu->rb));
1086 nr_rings = ARRAY_SIZE(gpu->rb);
1087 }
1088
1089 /* Create ringbuffer(s): */
1090 for (i = 0; i < nr_rings; i++) {
1091 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
1092
1093 if (IS_ERR(gpu->rb[i])) {
1094 ret = PTR_ERR(gpu->rb[i]);
1095 DRM_DEV_ERROR(drm->dev,
1096 "could not create ringbuffer %d: %d\n", i, ret);
1097 goto fail;
1098 }
1099
1100 memptrs += sizeof(struct msm_rbmemptrs);
1101 memptrs_iova += sizeof(struct msm_rbmemptrs);
1102 }
1103
1104 gpu->nr_rings = nr_rings;
1105
1106 refcount_set(&gpu->sysprof_active, 1);
1107
1108 return 0;
1109
1110 fail:
1111 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1112 msm_ringbuffer_destroy(gpu->rb[i]);
1113 gpu->rb[i] = NULL;
1114 }
1115
1116 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
1117
1118 platform_set_drvdata(pdev, NULL);
1119 return ret;
1120 }
1121
msm_gpu_cleanup(struct msm_gpu * gpu)1122 void msm_gpu_cleanup(struct msm_gpu *gpu)
1123 {
1124 int i;
1125
1126 DBG("%s", gpu->name);
1127
1128 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1129 msm_ringbuffer_destroy(gpu->rb[i]);
1130 gpu->rb[i] = NULL;
1131 }
1132
1133 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
1134
1135 if (!IS_ERR_OR_NULL(gpu->vm)) {
1136 struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
1137 mmu->funcs->detach(mmu);
1138 drm_gpuvm_put(gpu->vm);
1139 }
1140
1141 if (gpu->worker) {
1142 kthread_destroy_worker(gpu->worker);
1143 }
1144
1145 msm_devfreq_cleanup(gpu);
1146
1147 platform_set_drvdata(gpu->pdev, NULL);
1148 }
1149