1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include "drm/drm_drv.h"
8
9 #include "msm_gpu.h"
10 #include "msm_gem.h"
11 #include "msm_mmu.h"
12 #include "msm_fence.h"
13 #include "msm_gpu_trace.h"
14 //#include "adreno/adreno_gpu.h"
15
16 #include <generated/utsrelease.h>
17 #include <linux/string_helpers.h>
18 #include <linux/devcoredump.h>
19 #include <linux/sched/task.h>
20
21 /*
22 * Power Management:
23 */
24
enable_pwrrail(struct msm_gpu * gpu)25 static int enable_pwrrail(struct msm_gpu *gpu)
26 {
27 struct drm_device *dev = gpu->dev;
28 int ret = 0;
29
30 if (gpu->gpu_reg) {
31 ret = regulator_enable(gpu->gpu_reg);
32 if (ret) {
33 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
34 return ret;
35 }
36 }
37
38 if (gpu->gpu_cx) {
39 ret = regulator_enable(gpu->gpu_cx);
40 if (ret) {
41 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
42 return ret;
43 }
44 }
45
46 return 0;
47 }
48
disable_pwrrail(struct msm_gpu * gpu)49 static int disable_pwrrail(struct msm_gpu *gpu)
50 {
51 if (gpu->gpu_cx)
52 regulator_disable(gpu->gpu_cx);
53 if (gpu->gpu_reg)
54 regulator_disable(gpu->gpu_reg);
55 return 0;
56 }
57
enable_clk(struct msm_gpu * gpu)58 static int enable_clk(struct msm_gpu *gpu)
59 {
60 if (gpu->core_clk && gpu->fast_rate)
61 dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
62
63 /* Set the RBBM timer rate to 19.2Mhz */
64 if (gpu->rbbmtimer_clk)
65 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
66
67 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
68 }
69
disable_clk(struct msm_gpu * gpu)70 static int disable_clk(struct msm_gpu *gpu)
71 {
72 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
73
74 /*
75 * Set the clock to a deliberately low rate. On older targets the clock
76 * speed had to be non zero to avoid problems. On newer targets this
77 * will be rounded down to zero anyway so it all works out.
78 */
79 if (gpu->core_clk)
80 dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
81
82 if (gpu->rbbmtimer_clk)
83 clk_set_rate(gpu->rbbmtimer_clk, 0);
84
85 return 0;
86 }
87
enable_axi(struct msm_gpu * gpu)88 static int enable_axi(struct msm_gpu *gpu)
89 {
90 return clk_prepare_enable(gpu->ebi1_clk);
91 }
92
disable_axi(struct msm_gpu * gpu)93 static int disable_axi(struct msm_gpu *gpu)
94 {
95 clk_disable_unprepare(gpu->ebi1_clk);
96 return 0;
97 }
98
msm_gpu_pm_resume(struct msm_gpu * gpu)99 int msm_gpu_pm_resume(struct msm_gpu *gpu)
100 {
101 int ret;
102
103 DBG("%s", gpu->name);
104 trace_msm_gpu_resume(0);
105
106 ret = enable_pwrrail(gpu);
107 if (ret)
108 return ret;
109
110 ret = enable_clk(gpu);
111 if (ret)
112 return ret;
113
114 ret = enable_axi(gpu);
115 if (ret)
116 return ret;
117
118 msm_devfreq_resume(gpu);
119
120 gpu->needs_hw_init = true;
121
122 return 0;
123 }
124
msm_gpu_pm_suspend(struct msm_gpu * gpu)125 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
126 {
127 int ret;
128
129 DBG("%s", gpu->name);
130 trace_msm_gpu_suspend(0);
131
132 msm_devfreq_suspend(gpu);
133
134 ret = disable_axi(gpu);
135 if (ret)
136 return ret;
137
138 ret = disable_clk(gpu);
139 if (ret)
140 return ret;
141
142 ret = disable_pwrrail(gpu);
143 if (ret)
144 return ret;
145
146 gpu->suspend_count++;
147
148 return 0;
149 }
150
msm_gpu_show_fdinfo(struct msm_gpu * gpu,struct msm_context * ctx,struct drm_printer * p)151 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
152 struct drm_printer *p)
153 {
154 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
155 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
156 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
157 }
158
msm_gpu_hw_init(struct msm_gpu * gpu)159 int msm_gpu_hw_init(struct msm_gpu *gpu)
160 {
161 int ret;
162
163 WARN_ON(!mutex_is_locked(&gpu->lock));
164
165 if (!gpu->needs_hw_init)
166 return 0;
167
168 disable_irq(gpu->irq);
169 ret = gpu->funcs->hw_init(gpu);
170 if (!ret)
171 gpu->needs_hw_init = false;
172 enable_irq(gpu->irq);
173
174 return ret;
175 }
176
177 #ifdef CONFIG_DEV_COREDUMP
msm_gpu_devcoredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)178 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
179 size_t count, void *data, size_t datalen)
180 {
181 struct msm_gpu *gpu = data;
182 struct drm_print_iterator iter;
183 struct drm_printer p;
184 struct msm_gpu_state *state;
185
186 state = msm_gpu_crashstate_get(gpu);
187 if (!state)
188 return 0;
189
190 iter.data = buffer;
191 iter.offset = 0;
192 iter.start = offset;
193 iter.remain = count;
194
195 p = drm_coredump_printer(&iter);
196
197 drm_printf(&p, "---\n");
198 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
199 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
200 drm_printf(&p, "time: %ptSp\n", &state->time);
201 if (state->comm)
202 drm_printf(&p, "comm: %s\n", state->comm);
203 if (state->cmd)
204 drm_printf(&p, "cmdline: %s\n", state->cmd);
205
206 gpu->funcs->show(gpu, state, &p);
207
208 msm_gpu_crashstate_put(gpu);
209
210 return count - iter.remain;
211 }
212
msm_gpu_devcoredump_free(void * data)213 static void msm_gpu_devcoredump_free(void *data)
214 {
215 struct msm_gpu *gpu = data;
216
217 msm_gpu_crashstate_put(gpu);
218 }
219
msm_gpu_crashstate_get_bo(struct msm_gpu_state * state,struct drm_gem_object * obj,u64 iova,bool full,size_t offset,size_t size)220 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
221 struct drm_gem_object *obj, u64 iova,
222 bool full, size_t offset, size_t size)
223 {
224 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
225 struct msm_gem_object *msm_obj = to_msm_bo(obj);
226
227 /* Don't record write only objects */
228 state_bo->size = size;
229 state_bo->flags = msm_obj->flags;
230 state_bo->iova = iova;
231
232 BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
233
234 memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
235
236 if (full) {
237 void *ptr;
238
239 state_bo->data = kvmalloc(size, GFP_KERNEL);
240 if (!state_bo->data)
241 goto out;
242
243 ptr = msm_gem_get_vaddr_active(obj);
244 if (IS_ERR(ptr)) {
245 kvfree(state_bo->data);
246 state_bo->data = NULL;
247 goto out;
248 }
249
250 memcpy(state_bo->data, ptr + offset, size);
251 msm_gem_put_vaddr_locked(obj);
252 }
253 out:
254 state->nr_bos++;
255 }
256
crashstate_get_bos(struct msm_gpu_state * state,struct msm_gem_submit * submit)257 static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
258 {
259 extern bool rd_full;
260
261 if (msm_context_is_vmbind(submit->queue->ctx)) {
262 struct drm_exec exec;
263 struct drm_gpuva *vma;
264 unsigned cnt = 0;
265
266 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
267 drm_exec_until_all_locked(&exec) {
268 cnt = 0;
269
270 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
271 drm_exec_retry_on_contention(&exec);
272
273 drm_gpuvm_for_each_va (vma, submit->vm) {
274 if (!vma->gem.obj)
275 continue;
276
277 cnt++;
278 drm_exec_lock_obj(&exec, vma->gem.obj);
279 drm_exec_retry_on_contention(&exec);
280 }
281
282 }
283
284 drm_gpuvm_for_each_va (vma, submit->vm)
285 cnt++;
286
287 state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
288
289 if (state->bos)
290 drm_gpuvm_for_each_va(vma, submit->vm) {
291 bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
292
293 /* Skip MAP_NULL/PRR VMAs: */
294 if (!vma->gem.obj)
295 continue;
296
297 msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
298 dump, vma->gem.offset, vma->va.range);
299 }
300
301 drm_exec_fini(&exec);
302 } else {
303 state->bos = kcalloc(submit->nr_bos,
304 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
305
306 for (int i = 0; state->bos && i < submit->nr_bos; i++) {
307 struct drm_gem_object *obj = submit->bos[i].obj;
308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
309
310 msm_gem_lock(obj);
311 msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
312 dump, 0, obj->size);
313 msm_gem_unlock(obj);
314 }
315 }
316 }
317
crashstate_get_vm_logs(struct msm_gpu_state * state,struct msm_gem_vm * vm)318 static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
319 {
320 uint32_t vm_log_len = (1 << vm->log_shift);
321 uint32_t vm_log_mask = vm_log_len - 1;
322 int first;
323
324 /* Bail if no log, or empty log: */
325 if (!vm->log || !vm->log[0].op)
326 return;
327
328 mutex_lock(&vm->mmu_lock);
329
330 /*
331 * log_idx is the next entry to overwrite, meaning it is the oldest, or
332 * first, entry (other than the special case handled below where the
333 * log hasn't wrapped around yet)
334 */
335 first = vm->log_idx;
336
337 if (!vm->log[first].op) {
338 /*
339 * If the next log entry has not been written yet, then only
340 * entries 0 to idx-1 are valid (ie. we haven't wrapped around
341 * yet)
342 */
343 state->nr_vm_logs = MAX(0, first - 1);
344 first = 0;
345 } else {
346 state->nr_vm_logs = vm_log_len;
347 }
348
349 state->vm_logs = kmalloc_objs(vm->log[0], state->nr_vm_logs);
350 if (!state->vm_logs) {
351 state->nr_vm_logs = 0;
352 }
353
354 for (int i = 0; i < state->nr_vm_logs; i++) {
355 int idx = (i + first) & vm_log_mask;
356
357 state->vm_logs[i] = vm->log[idx];
358 }
359
360 mutex_unlock(&vm->mmu_lock);
361 }
362
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,struct msm_gpu_fault_info * fault_info,char * comm,char * cmd)363 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
364 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
365 char *comm, char *cmd)
366 {
367 struct msm_gpu_state *state;
368
369 /* Check if the target supports capturing crash state */
370 if (!gpu->funcs->gpu_state_get)
371 return;
372
373 /* Only save one crash state at a time */
374 if (gpu->crashstate)
375 return;
376
377 state = gpu->funcs->gpu_state_get(gpu);
378 if (IS_ERR_OR_NULL(state))
379 return;
380
381 /* Fill in the additional crash state information */
382 state->comm = kstrdup(comm, GFP_KERNEL);
383 state->cmd = kstrdup(cmd, GFP_KERNEL);
384 if (fault_info)
385 state->fault_info = *fault_info;
386
387 if (submit && state->fault_info.ttbr0) {
388 struct msm_gpu_fault_info *info = &state->fault_info;
389 struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
390
391 msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
392 &info->asid);
393 msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
394 }
395
396 if (submit) {
397 crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
398 crashstate_get_bos(state, submit);
399 }
400
401 /* Set the active crash state to be dumped on failure */
402 gpu->crashstate = state;
403
404 dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
405 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
406 }
407 #else
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,struct msm_gpu_fault_info * fault_info,char * comm,char * cmd)408 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
409 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
410 char *comm, char *cmd)
411 {
412 }
413 #endif
414
415 /*
416 * Hangcheck detection for locked gpu:
417 */
418
419 static struct msm_gem_submit *
find_submit(struct msm_ringbuffer * ring,uint32_t fence)420 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
421 {
422 struct msm_gem_submit *submit;
423 unsigned long flags;
424
425 spin_lock_irqsave(&ring->submit_lock, flags);
426 list_for_each_entry(submit, &ring->submits, node) {
427 if (submit->seqno == fence) {
428 spin_unlock_irqrestore(&ring->submit_lock, flags);
429 return submit;
430 }
431 }
432 spin_unlock_irqrestore(&ring->submit_lock, flags);
433
434 return NULL;
435 }
436
437 static void retire_submits(struct msm_gpu *gpu);
438
get_comm_cmdline(struct msm_gem_submit * submit,char ** comm,char ** cmd)439 static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
440 {
441 struct msm_context *ctx = submit->queue->ctx;
442 struct task_struct *task;
443
444 WARN_ON(!mutex_is_locked(&submit->gpu->lock));
445
446 /* Note that kstrdup will return NULL if argument is NULL: */
447 *comm = kstrdup(ctx->comm, GFP_KERNEL);
448 *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
449
450 task = get_pid_task(submit->pid, PIDTYPE_PID);
451 if (!task)
452 return;
453
454 if (!*comm)
455 *comm = kstrdup(task->comm, GFP_KERNEL);
456
457 if (!*cmd)
458 *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
459
460 put_task_struct(task);
461 }
462
recover_worker(struct kthread_work * work)463 static void recover_worker(struct kthread_work *work)
464 {
465 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
466 struct drm_device *dev = gpu->dev;
467 struct msm_drm_private *priv = dev->dev_private;
468 struct msm_gem_submit *submit;
469 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
470 char *comm = NULL, *cmd = NULL;
471 struct task_struct *task;
472 int i;
473
474 mutex_lock(&gpu->lock);
475
476 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
477
478 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
479
480 /*
481 * If the submit retired while we were waiting for the worker to run,
482 * or waiting to acquire the gpu lock, then nothing more to do.
483 */
484 if (!submit)
485 goto out_unlock;
486
487 /* Increment the fault counts */
488 submit->queue->faults++;
489
490 task = get_pid_task(submit->pid, PIDTYPE_PID);
491 if (!task)
492 gpu->global_faults++;
493 else {
494 struct msm_gem_vm *vm = to_msm_vm(submit->vm);
495
496 vm->faults++;
497
498 /*
499 * If userspace has opted-in to VM_BIND (and therefore userspace
500 * management of the VM), faults mark the VM as unusable. This
501 * matches vulkan expectations (vulkan is the main target for
502 * VM_BIND).
503 */
504 if (!vm->managed)
505 msm_gem_vm_unusable(submit->vm);
506 }
507
508 get_comm_cmdline(submit, &comm, &cmd);
509
510 if (comm && cmd) {
511 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
512 gpu->name, comm, cmd);
513
514 msm_rd_dump_submit(priv->hangrd, submit,
515 "offending task: %s (%s)", comm, cmd);
516 } else {
517 DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
518
519 msm_rd_dump_submit(priv->hangrd, submit, NULL);
520 }
521
522 /* Record the crash state */
523 pm_runtime_get_sync(&gpu->pdev->dev);
524 msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
525
526 kfree(cmd);
527 kfree(comm);
528
529 /*
530 * Update all the rings with the latest and greatest fence.. this
531 * needs to happen after msm_rd_dump_submit() to ensure that the
532 * bo's referenced by the offending submit are still around.
533 */
534 for (i = 0; i < gpu->nr_rings; i++) {
535 struct msm_ringbuffer *ring = gpu->rb[i];
536
537 uint32_t fence = ring->memptrs->fence;
538
539 /*
540 * For the current (faulting?) ring/submit advance the fence by
541 * one more to clear the faulting submit
542 */
543 if (ring == cur_ring)
544 ring->memptrs->fence = ++fence;
545
546 msm_update_fence(ring->fctx, fence);
547 }
548
549 if (msm_gpu_active(gpu)) {
550 /* retire completed submits, plus the one that hung: */
551 retire_submits(gpu);
552
553 gpu->funcs->recover(gpu);
554
555 /*
556 * Replay all remaining submits starting with highest priority
557 * ring
558 */
559 for (i = 0; i < gpu->nr_rings; i++) {
560 struct msm_ringbuffer *ring = gpu->rb[i];
561 unsigned long flags;
562
563 spin_lock_irqsave(&ring->submit_lock, flags);
564 list_for_each_entry(submit, &ring->submits, node) {
565 /*
566 * If the submit uses an unusable vm make sure
567 * we don't actually run it
568 */
569 if (to_msm_vm(submit->vm)->unusable)
570 submit->nr_cmds = 0;
571 gpu->funcs->submit(gpu, submit);
572 }
573 spin_unlock_irqrestore(&ring->submit_lock, flags);
574 }
575 }
576
577 pm_runtime_put(&gpu->pdev->dev);
578
579 out_unlock:
580 mutex_unlock(&gpu->lock);
581
582 msm_gpu_retire(gpu);
583 }
584
msm_gpu_fault_crashstate_capture(struct msm_gpu * gpu,struct msm_gpu_fault_info * fault_info)585 void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
586 {
587 struct msm_gem_submit *submit;
588 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
589 char *comm = NULL, *cmd = NULL;
590
591 mutex_lock(&gpu->lock);
592
593 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
594 if (submit && submit->fault_dumped)
595 goto resume_smmu;
596
597 if (submit) {
598 get_comm_cmdline(submit, &comm, &cmd);
599
600 /*
601 * When we get GPU iova faults, we can get 1000s of them,
602 * but we really only want to log the first one.
603 */
604 submit->fault_dumped = true;
605 }
606
607 /* Record the crash state */
608 pm_runtime_get_sync(&gpu->pdev->dev);
609 msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
610 pm_runtime_put_sync(&gpu->pdev->dev);
611
612 kfree(cmd);
613 kfree(comm);
614
615 resume_smmu:
616 mutex_unlock(&gpu->lock);
617 }
618
hangcheck_timer_reset(struct msm_gpu * gpu)619 static void hangcheck_timer_reset(struct msm_gpu *gpu)
620 {
621 struct msm_drm_private *priv = gpu->dev->dev_private;
622 mod_timer(&gpu->hangcheck_timer,
623 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
624 }
625
made_progress(struct msm_gpu * gpu,struct msm_ringbuffer * ring)626 static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
627 {
628 if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
629 return false;
630
631 if (!gpu->funcs->progress)
632 return false;
633
634 if (!gpu->funcs->progress(gpu, ring))
635 return false;
636
637 ring->hangcheck_progress_retries++;
638 return true;
639 }
640
hangcheck_handler(struct timer_list * t)641 static void hangcheck_handler(struct timer_list *t)
642 {
643 struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
644 struct drm_device *dev = gpu->dev;
645 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
646 uint32_t fence = ring->memptrs->fence;
647
648 if (fence != ring->hangcheck_fence) {
649 /* some progress has been made.. ya! */
650 ring->hangcheck_fence = fence;
651 ring->hangcheck_progress_retries = 0;
652 } else if (fence_before(fence, ring->fctx->last_fence) &&
653 !made_progress(gpu, ring)) {
654 /* no progress and not done.. hung! */
655 ring->hangcheck_fence = fence;
656 ring->hangcheck_progress_retries = 0;
657 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
658 gpu->name, ring->id);
659 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
660 gpu->name, fence);
661 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
662 gpu->name, ring->fctx->last_fence);
663
664 kthread_queue_work(gpu->worker, &gpu->recover_work);
665 }
666
667 /* if still more pending work, reset the hangcheck timer: */
668 if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
669 hangcheck_timer_reset(gpu);
670
671 /* workaround for missing irq: */
672 msm_gpu_retire(gpu);
673 }
674
675 /*
676 * Performance Counters:
677 */
678
679 /* called under perf_lock */
update_hw_cntrs(struct msm_gpu * gpu,uint32_t ncntrs,uint32_t * cntrs)680 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
681 {
682 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
683 int i, n = min(ncntrs, gpu->num_perfcntrs);
684
685 /* read current values: */
686 for (i = 0; i < gpu->num_perfcntrs; i++)
687 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
688
689 /* update cntrs: */
690 for (i = 0; i < n; i++)
691 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
692
693 /* save current values: */
694 for (i = 0; i < gpu->num_perfcntrs; i++)
695 gpu->last_cntrs[i] = current_cntrs[i];
696
697 return n;
698 }
699
update_sw_cntrs(struct msm_gpu * gpu)700 static void update_sw_cntrs(struct msm_gpu *gpu)
701 {
702 ktime_t time;
703 uint32_t elapsed;
704 unsigned long flags;
705
706 spin_lock_irqsave(&gpu->perf_lock, flags);
707 if (!gpu->perfcntr_active)
708 goto out;
709
710 time = ktime_get();
711 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
712
713 gpu->totaltime += elapsed;
714 if (gpu->last_sample.active)
715 gpu->activetime += elapsed;
716
717 gpu->last_sample.active = msm_gpu_active(gpu);
718 gpu->last_sample.time = time;
719
720 out:
721 spin_unlock_irqrestore(&gpu->perf_lock, flags);
722 }
723
msm_gpu_perfcntr_start(struct msm_gpu * gpu)724 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
725 {
726 unsigned long flags;
727
728 pm_runtime_get_sync(&gpu->pdev->dev);
729
730 spin_lock_irqsave(&gpu->perf_lock, flags);
731 /* we could dynamically enable/disable perfcntr registers too.. */
732 gpu->last_sample.active = msm_gpu_active(gpu);
733 gpu->last_sample.time = ktime_get();
734 gpu->activetime = gpu->totaltime = 0;
735 gpu->perfcntr_active = true;
736 update_hw_cntrs(gpu, 0, NULL);
737 spin_unlock_irqrestore(&gpu->perf_lock, flags);
738 }
739
msm_gpu_perfcntr_stop(struct msm_gpu * gpu)740 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
741 {
742 gpu->perfcntr_active = false;
743 pm_runtime_put_sync(&gpu->pdev->dev);
744 }
745
746 /* returns -errno or # of cntrs sampled */
msm_gpu_perfcntr_sample(struct msm_gpu * gpu,uint32_t * activetime,uint32_t * totaltime,uint32_t ncntrs,uint32_t * cntrs)747 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
748 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
749 {
750 unsigned long flags;
751 int ret;
752
753 spin_lock_irqsave(&gpu->perf_lock, flags);
754
755 if (!gpu->perfcntr_active) {
756 ret = -EINVAL;
757 goto out;
758 }
759
760 *activetime = gpu->activetime;
761 *totaltime = gpu->totaltime;
762
763 gpu->activetime = gpu->totaltime = 0;
764
765 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
766
767 out:
768 spin_unlock_irqrestore(&gpu->perf_lock, flags);
769
770 return ret;
771 }
772
773 /*
774 * Cmdstream submission/retirement:
775 */
776
retire_submit(struct msm_gpu * gpu,struct msm_ringbuffer * ring,struct msm_gem_submit * submit)777 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
778 struct msm_gem_submit *submit)
779 {
780 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
781 volatile struct msm_gpu_submit_stats *stats;
782 u64 elapsed, clock = 0, cycles;
783 unsigned long flags;
784
785 stats = &ring->memptrs->stats[index];
786 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
787 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
788 do_div(elapsed, 192);
789
790 cycles = stats->cpcycles_end - stats->cpcycles_start;
791
792 /* Calculate the clock frequency from the number of CP cycles */
793 if (elapsed) {
794 clock = cycles * 1000;
795 do_div(clock, elapsed);
796 }
797
798 submit->queue->ctx->elapsed_ns += elapsed;
799 submit->queue->ctx->cycles += cycles;
800
801 trace_msm_gpu_submit_retired(submit, elapsed, clock,
802 stats->alwayson_start, stats->alwayson_end);
803
804 msm_submit_retire(submit);
805
806 pm_runtime_mark_last_busy(&gpu->pdev->dev);
807
808 spin_lock_irqsave(&ring->submit_lock, flags);
809 list_del(&submit->node);
810 spin_unlock_irqrestore(&ring->submit_lock, flags);
811
812 /* Update devfreq on transition from active->idle: */
813 mutex_lock(&gpu->active_lock);
814 gpu->active_submits--;
815 WARN_ON(gpu->active_submits < 0);
816 if (!gpu->active_submits) {
817 msm_devfreq_idle(gpu);
818 pm_runtime_put_autosuspend(&gpu->pdev->dev);
819 }
820
821 mutex_unlock(&gpu->active_lock);
822
823 msm_gem_submit_put(submit);
824 }
825
retire_submits(struct msm_gpu * gpu)826 static void retire_submits(struct msm_gpu *gpu)
827 {
828 int i;
829
830 /* Retire the commits starting with highest priority */
831 for (i = 0; i < gpu->nr_rings; i++) {
832 struct msm_ringbuffer *ring = gpu->rb[i];
833
834 while (true) {
835 struct msm_gem_submit *submit = NULL;
836 unsigned long flags;
837
838 spin_lock_irqsave(&ring->submit_lock, flags);
839 submit = list_first_entry_or_null(&ring->submits,
840 struct msm_gem_submit, node);
841 spin_unlock_irqrestore(&ring->submit_lock, flags);
842
843 /*
844 * If no submit, we are done. If submit->fence hasn't
845 * been signalled, then later submits are not signalled
846 * either, so we are also done.
847 */
848 if (submit && dma_fence_is_signaled(submit->hw_fence)) {
849 retire_submit(gpu, ring, submit);
850 } else {
851 break;
852 }
853 }
854 }
855
856 wake_up_all(&gpu->retire_event);
857 }
858
retire_worker(struct kthread_work * work)859 static void retire_worker(struct kthread_work *work)
860 {
861 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
862
863 retire_submits(gpu);
864 }
865
866 /* call from irq handler to schedule work to retire bo's */
msm_gpu_retire(struct msm_gpu * gpu)867 void msm_gpu_retire(struct msm_gpu *gpu)
868 {
869 int i;
870
871 for (i = 0; i < gpu->nr_rings; i++)
872 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
873
874 kthread_queue_work(gpu->worker, &gpu->retire_work);
875 update_sw_cntrs(gpu);
876 }
877
878 /* add bo's to gpu's ring, and kick gpu: */
msm_gpu_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)879 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
880 {
881 struct msm_ringbuffer *ring = submit->ring;
882 unsigned long flags;
883
884 WARN_ON(!mutex_is_locked(&gpu->lock));
885
886 pm_runtime_get_sync(&gpu->pdev->dev);
887
888 msm_gpu_hw_init(gpu);
889
890 submit->seqno = submit->hw_fence->seqno;
891
892 update_sw_cntrs(gpu);
893
894 /*
895 * ring->submits holds a ref to the submit, to deal with the case
896 * that a submit completes before msm_ioctl_gem_submit() returns.
897 */
898 msm_gem_submit_get(submit);
899
900 spin_lock_irqsave(&ring->submit_lock, flags);
901 list_add_tail(&submit->node, &ring->submits);
902 spin_unlock_irqrestore(&ring->submit_lock, flags);
903
904 /* Update devfreq on transition from idle->active: */
905 mutex_lock(&gpu->active_lock);
906 if (!gpu->active_submits) {
907 pm_runtime_get(&gpu->pdev->dev);
908 msm_devfreq_active(gpu);
909 }
910 gpu->active_submits++;
911 mutex_unlock(&gpu->active_lock);
912
913 gpu->funcs->submit(gpu, submit);
914 submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
915
916 pm_runtime_put(&gpu->pdev->dev);
917 hangcheck_timer_reset(gpu);
918 }
919
920 /*
921 * Init/Cleanup:
922 */
923
irq_handler(int irq,void * data)924 static irqreturn_t irq_handler(int irq, void *data)
925 {
926 struct msm_gpu *gpu = data;
927 return gpu->funcs->irq(gpu);
928 }
929
get_clocks(struct platform_device * pdev,struct msm_gpu * gpu)930 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
931 {
932 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
933
934 if (ret < 1) {
935 gpu->nr_clocks = 0;
936 return ret;
937 }
938
939 gpu->nr_clocks = ret;
940
941 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
942 gpu->nr_clocks, "core");
943
944 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
945 gpu->nr_clocks, "rbbmtimer");
946
947 return 0;
948 }
949
950 /* Return a new address space for a msm_drm_private instance */
951 struct drm_gpuvm *
msm_gpu_create_private_vm(struct msm_gpu * gpu,struct task_struct * task,bool kernel_managed)952 msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
953 bool kernel_managed)
954 {
955 struct drm_gpuvm *vm = NULL;
956
957 if (!gpu)
958 return NULL;
959
960 /*
961 * If the target doesn't support private address spaces then return
962 * the global one
963 */
964 if (gpu->funcs->create_private_vm) {
965 vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
966 if (!IS_ERR(vm))
967 to_msm_vm(vm)->pid = get_pid(task_pid(task));
968 }
969
970 if (IS_ERR_OR_NULL(vm))
971 vm = drm_gpuvm_get(gpu->vm);
972
973 return vm;
974 }
975
msm_gpu_init(struct drm_device * drm,struct platform_device * pdev,struct msm_gpu * gpu,const struct msm_gpu_funcs * funcs,const char * name,struct msm_gpu_config * config)976 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
977 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
978 const char *name, struct msm_gpu_config *config)
979 {
980 struct msm_drm_private *priv = drm->dev_private;
981 int i, ret, nr_rings = config->nr_rings;
982 void *memptrs;
983 uint64_t memptrs_iova;
984
985 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
986 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
987
988 gpu->dev = drm;
989 gpu->funcs = funcs;
990 gpu->name = name;
991
992 gpu->worker = kthread_run_worker(0, "gpu-worker");
993 if (IS_ERR(gpu->worker)) {
994 ret = PTR_ERR(gpu->worker);
995 gpu->worker = NULL;
996 goto fail;
997 }
998
999 sched_set_fifo_low(gpu->worker->task);
1000
1001 mutex_init(&gpu->active_lock);
1002 mutex_init(&gpu->lock);
1003 init_waitqueue_head(&gpu->retire_event);
1004 kthread_init_work(&gpu->retire_work, retire_worker);
1005 kthread_init_work(&gpu->recover_work, recover_worker);
1006
1007 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
1008
1009 /*
1010 * If progress detection is supported, halve the hangcheck timer
1011 * duration, as it takes two iterations of the hangcheck handler
1012 * to detect a hang.
1013 */
1014 if (funcs->progress)
1015 priv->hangcheck_period /= 2;
1016
1017 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
1018
1019 spin_lock_init(&gpu->perf_lock);
1020
1021
1022 /* Map registers: */
1023 gpu->mmio = msm_ioremap(pdev, config->ioname);
1024 if (IS_ERR(gpu->mmio)) {
1025 ret = PTR_ERR(gpu->mmio);
1026 goto fail;
1027 }
1028
1029 /* Get Interrupt: */
1030 gpu->irq = platform_get_irq(pdev, 0);
1031 if (gpu->irq < 0) {
1032 ret = gpu->irq;
1033 goto fail;
1034 }
1035
1036 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
1037 IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
1038 if (ret) {
1039 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
1040 goto fail;
1041 }
1042
1043 ret = get_clocks(pdev, gpu);
1044 if (ret)
1045 goto fail;
1046
1047 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
1048 DBG("ebi1_clk: %p", gpu->ebi1_clk);
1049 if (IS_ERR(gpu->ebi1_clk))
1050 gpu->ebi1_clk = NULL;
1051
1052 /* Acquire regulators: */
1053 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
1054 DBG("gpu_reg: %p", gpu->gpu_reg);
1055 if (IS_ERR(gpu->gpu_reg))
1056 gpu->gpu_reg = NULL;
1057
1058 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
1059 DBG("gpu_cx: %p", gpu->gpu_cx);
1060 if (IS_ERR(gpu->gpu_cx))
1061 gpu->gpu_cx = NULL;
1062
1063 platform_set_drvdata(pdev, &gpu->adreno_smmu);
1064
1065 msm_devfreq_init(gpu);
1066
1067 gpu->vm = gpu->funcs->create_vm(gpu, pdev);
1068 if (IS_ERR(gpu->vm)) {
1069 ret = PTR_ERR(gpu->vm);
1070 goto fail;
1071 }
1072
1073 memptrs = msm_gem_kernel_new(drm,
1074 sizeof(struct msm_rbmemptrs) * nr_rings,
1075 check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
1076 &memptrs_iova);
1077
1078 if (IS_ERR(memptrs)) {
1079 ret = PTR_ERR(memptrs);
1080 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
1081 goto fail;
1082 }
1083
1084 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
1085
1086 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
1087 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
1088 ARRAY_SIZE(gpu->rb));
1089 nr_rings = ARRAY_SIZE(gpu->rb);
1090 }
1091
1092 /* Create ringbuffer(s): */
1093 for (i = 0; i < nr_rings; i++) {
1094 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
1095
1096 if (IS_ERR(gpu->rb[i])) {
1097 ret = PTR_ERR(gpu->rb[i]);
1098 DRM_DEV_ERROR(drm->dev,
1099 "could not create ringbuffer %d: %d\n", i, ret);
1100 goto fail;
1101 }
1102
1103 memptrs += sizeof(struct msm_rbmemptrs);
1104 memptrs_iova += sizeof(struct msm_rbmemptrs);
1105 }
1106
1107 gpu->nr_rings = nr_rings;
1108
1109 refcount_set(&gpu->sysprof_active, 1);
1110
1111 return 0;
1112
1113 fail:
1114 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1115 msm_ringbuffer_destroy(gpu->rb[i]);
1116 gpu->rb[i] = NULL;
1117 }
1118
1119 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
1120
1121 platform_set_drvdata(pdev, NULL);
1122 return ret;
1123 }
1124
msm_gpu_cleanup(struct msm_gpu * gpu)1125 void msm_gpu_cleanup(struct msm_gpu *gpu)
1126 {
1127 int i;
1128
1129 DBG("%s", gpu->name);
1130
1131 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1132 msm_ringbuffer_destroy(gpu->rb[i]);
1133 gpu->rb[i] = NULL;
1134 }
1135
1136 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
1137
1138 if (!IS_ERR_OR_NULL(gpu->vm)) {
1139 struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
1140 mmu->funcs->detach(mmu);
1141 drm_gpuvm_put(gpu->vm);
1142 }
1143
1144 if (gpu->worker) {
1145 kthread_destroy_worker(gpu->worker);
1146 }
1147
1148 msm_devfreq_cleanup(gpu);
1149
1150 platform_set_drvdata(gpu->pdev, NULL);
1151 }
1152