xref: /linux/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c (revision ec8c17e5ecb4a5a74069687ccb6d2cfe1851302e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
7 
8 #include <generated/utsrelease.h>
9 
10 #include "msm_disp_snapshot.h"
11 
msm_disp_state_dump_regs(u32 ** reg,u32 aligned_len,void __iomem * base_addr)12 static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
13 {
14 	u32 len_padded;
15 	u32 num_rows;
16 	u32 x0, x4, x8, xc;
17 	void __iomem *addr;
18 	u32 *dump_addr = NULL;
19 	void __iomem *end_addr;
20 	int i;
21 
22 	len_padded = aligned_len * REG_DUMP_ALIGN;
23 	num_rows = aligned_len / REG_DUMP_ALIGN;
24 
25 	addr = base_addr;
26 	end_addr = base_addr + aligned_len;
27 
28 	*reg = kvzalloc(len_padded, GFP_KERNEL);
29 	if (!*reg)
30 		return;
31 
32 	dump_addr = *reg;
33 	for (i = 0; i < num_rows; i++) {
34 		x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
35 		x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
36 		x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
37 		xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
38 
39 		dump_addr[i * 4] = x0;
40 		dump_addr[i * 4 + 1] = x4;
41 		dump_addr[i * 4 + 2] = x8;
42 		dump_addr[i * 4 + 3] = xc;
43 
44 		addr += REG_DUMP_ALIGN;
45 	}
46 }
47 
msm_disp_state_print_regs(const u32 * dump_addr,u32 len,void __iomem * base_addr,struct drm_printer * p)48 static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
49 		void __iomem *base_addr, struct drm_printer *p)
50 {
51 	int i;
52 	void __iomem *addr;
53 	u32 num_rows;
54 
55 	if (!dump_addr) {
56 		drm_printf(p, "Registers not stored\n");
57 		return;
58 	}
59 
60 	addr = base_addr;
61 	num_rows = len / REG_DUMP_ALIGN;
62 
63 	for (i = 0; i < num_rows; i++) {
64 		drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
65 				(unsigned long)(addr - base_addr),
66 				dump_addr[i * 4], dump_addr[i * 4 + 1],
67 				dump_addr[i * 4 + 2], dump_addr[i * 4 + 3]);
68 		addr += REG_DUMP_ALIGN;
69 	}
70 }
71 
msm_disp_state_print(struct msm_disp_state * state,struct drm_printer * p)72 void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
73 {
74 	struct msm_disp_state_block *block, *tmp;
75 
76 	if (!p) {
77 		DRM_ERROR("invalid drm printer\n");
78 		return;
79 	}
80 
81 	drm_printf(p, "---\n");
82 	drm_printf(p, "kernel: " UTS_RELEASE "\n");
83 	drm_printf(p, "module: " KBUILD_MODNAME "\n");
84 	drm_printf(p, "dpu devcoredump\n");
85 	drm_printf(p, "time: %lld.%09ld\n",
86 		state->time.tv_sec, state->time.tv_nsec);
87 
88 	list_for_each_entry_safe(block, tmp, &state->blocks, node) {
89 		drm_printf(p, "====================%s================\n", block->name);
90 		msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
91 	}
92 
93 	drm_printf(p, "===================dpu drm state================\n");
94 
95 	if (state->atomic_state)
96 		drm_atomic_print_new_state(state->atomic_state, p);
97 }
98 
msm_disp_capture_atomic_state(struct msm_disp_state * disp_state)99 static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
100 {
101 	struct drm_device *ddev;
102 	struct drm_modeset_acquire_ctx ctx;
103 
104 	ktime_get_real_ts64(&disp_state->time);
105 
106 	ddev = disp_state->drm_dev;
107 
108 	drm_modeset_acquire_init(&ctx, 0);
109 
110 	while (drm_modeset_lock_all_ctx(ddev, &ctx) != 0)
111 		drm_modeset_backoff(&ctx);
112 
113 	disp_state->atomic_state = drm_atomic_helper_duplicate_state(ddev,
114 			&ctx);
115 	drm_modeset_drop_locks(&ctx);
116 	drm_modeset_acquire_fini(&ctx);
117 }
118 
msm_disp_snapshot_capture_state(struct msm_disp_state * disp_state)119 void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
120 {
121 	struct msm_drm_private *priv;
122 	struct drm_device *drm_dev;
123 	struct msm_kms *kms;
124 	int i;
125 
126 	drm_dev = disp_state->drm_dev;
127 	priv = drm_dev->dev_private;
128 	kms = priv->kms;
129 
130 	for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
131 		if (!priv->dp[i])
132 			continue;
133 
134 		msm_dp_snapshot(disp_state, priv->dp[i]);
135 	}
136 
137 	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
138 		if (!priv->dsi[i])
139 			continue;
140 
141 		msm_dsi_snapshot(disp_state, priv->dsi[i]);
142 	}
143 
144 	if (kms->funcs->snapshot)
145 		kms->funcs->snapshot(disp_state, kms);
146 
147 	msm_disp_capture_atomic_state(disp_state);
148 }
149 
msm_disp_state_free(void * data)150 void msm_disp_state_free(void *data)
151 {
152 	struct msm_disp_state *disp_state = data;
153 	struct msm_disp_state_block *block, *tmp;
154 
155 	if (disp_state->atomic_state) {
156 		drm_atomic_state_put(disp_state->atomic_state);
157 		disp_state->atomic_state = NULL;
158 	}
159 
160 	list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
161 		list_del(&block->node);
162 		kvfree(block->state);
163 		kfree(block);
164 	}
165 
166 	kfree(disp_state);
167 }
168 
msm_disp_snapshot_add_block(struct msm_disp_state * disp_state,u32 len,void __iomem * base_addr,const char * fmt,...)169 void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
170 		void __iomem *base_addr, const char *fmt, ...)
171 {
172 	struct msm_disp_state_block *new_blk;
173 	struct va_format vaf;
174 	va_list va;
175 
176 	new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
177 	if (!new_blk)
178 		return;
179 
180 	va_start(va, fmt);
181 
182 	vaf.fmt = fmt;
183 	vaf.va = &va;
184 	snprintf(new_blk->name, sizeof(new_blk->name), "%pV", &vaf);
185 
186 	va_end(va);
187 
188 	INIT_LIST_HEAD(&new_blk->node);
189 	new_blk->size = ALIGN(len, REG_DUMP_ALIGN);
190 	new_blk->base_addr = base_addr;
191 
192 	msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
193 	list_add_tail(&new_blk->node, &disp_state->blocks);
194 }
195