xref: /linux/drivers/gpu/drm/vc4/vc4_perfmon.c (revision befcc89362383208f62b15887592758165459e3d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Broadcom
4  */
5 
6 /**
7  * DOC: VC4 V3D performance monitor module
8  *
9  * The V3D block provides 16 hardware counters which can count various events.
10  */
11 
12 #include "vc4_drv.h"
13 #include "vc4_regs.h"
14 
15 #define VC4_PERFMONID_MIN	1
16 #define VC4_PERFMONID_MAX	U32_MAX
17 
vc4_perfmon_get(struct vc4_perfmon * perfmon)18 void vc4_perfmon_get(struct vc4_perfmon *perfmon)
19 {
20 	struct vc4_dev *vc4;
21 
22 	if (!perfmon)
23 		return;
24 
25 	vc4 = perfmon->dev;
26 	if (WARN_ON_ONCE(vc4->is_vc5))
27 		return;
28 
29 	refcount_inc(&perfmon->refcnt);
30 }
31 
vc4_perfmon_put(struct vc4_perfmon * perfmon)32 void vc4_perfmon_put(struct vc4_perfmon *perfmon)
33 {
34 	struct vc4_dev *vc4;
35 
36 	if (!perfmon)
37 		return;
38 
39 	vc4 = perfmon->dev;
40 	if (WARN_ON_ONCE(vc4->is_vc5))
41 		return;
42 
43 	if (refcount_dec_and_test(&perfmon->refcnt))
44 		kfree(perfmon);
45 }
46 
vc4_perfmon_start(struct vc4_dev * vc4,struct vc4_perfmon * perfmon)47 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
48 {
49 	unsigned int i;
50 	u32 mask;
51 
52 	if (WARN_ON_ONCE(vc4->is_vc5))
53 		return;
54 
55 	if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
56 		return;
57 
58 	for (i = 0; i < perfmon->ncounters; i++)
59 		V3D_WRITE(V3D_PCTRS(i), perfmon->events[i]);
60 
61 	mask = GENMASK(perfmon->ncounters - 1, 0);
62 	V3D_WRITE(V3D_PCTRC, mask);
63 	V3D_WRITE(V3D_PCTRE, V3D_PCTRE_EN | mask);
64 	vc4->active_perfmon = perfmon;
65 }
66 
vc4_perfmon_stop(struct vc4_dev * vc4,struct vc4_perfmon * perfmon,bool capture)67 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
68 		      bool capture)
69 {
70 	unsigned int i;
71 
72 	if (WARN_ON_ONCE(vc4->is_vc5))
73 		return;
74 
75 	if (WARN_ON_ONCE(!vc4->active_perfmon ||
76 			 perfmon != vc4->active_perfmon))
77 		return;
78 
79 	if (capture) {
80 		for (i = 0; i < perfmon->ncounters; i++)
81 			perfmon->counters[i] += V3D_READ(V3D_PCTR(i));
82 	}
83 
84 	V3D_WRITE(V3D_PCTRE, 0);
85 	vc4->active_perfmon = NULL;
86 }
87 
vc4_perfmon_find(struct vc4_file * vc4file,int id)88 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
89 {
90 	struct vc4_dev *vc4 = vc4file->dev;
91 	struct vc4_perfmon *perfmon;
92 
93 	if (WARN_ON_ONCE(vc4->is_vc5))
94 		return NULL;
95 
96 	mutex_lock(&vc4file->perfmon.lock);
97 	perfmon = idr_find(&vc4file->perfmon.idr, id);
98 	vc4_perfmon_get(perfmon);
99 	mutex_unlock(&vc4file->perfmon.lock);
100 
101 	return perfmon;
102 }
103 
vc4_perfmon_open_file(struct vc4_file * vc4file)104 void vc4_perfmon_open_file(struct vc4_file *vc4file)
105 {
106 	struct vc4_dev *vc4 = vc4file->dev;
107 
108 	if (WARN_ON_ONCE(vc4->is_vc5))
109 		return;
110 
111 	mutex_init(&vc4file->perfmon.lock);
112 	idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
113 	vc4file->dev = vc4;
114 }
115 
vc4_perfmon_idr_del(int id,void * elem,void * data)116 static int vc4_perfmon_idr_del(int id, void *elem, void *data)
117 {
118 	struct vc4_perfmon *perfmon = elem;
119 	struct vc4_dev *vc4 = (struct vc4_dev *)data;
120 
121 	/* If the active perfmon is being destroyed, stop it first */
122 	if (perfmon == vc4->active_perfmon)
123 		vc4_perfmon_stop(vc4, perfmon, false);
124 
125 	vc4_perfmon_put(perfmon);
126 
127 	return 0;
128 }
129 
vc4_perfmon_close_file(struct vc4_file * vc4file)130 void vc4_perfmon_close_file(struct vc4_file *vc4file)
131 {
132 	struct vc4_dev *vc4 = vc4file->dev;
133 
134 	if (WARN_ON_ONCE(vc4->is_vc5))
135 		return;
136 
137 	mutex_lock(&vc4file->perfmon.lock);
138 	idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
139 	idr_destroy(&vc4file->perfmon.idr);
140 	mutex_unlock(&vc4file->perfmon.lock);
141 	mutex_destroy(&vc4file->perfmon.lock);
142 }
143 
vc4_perfmon_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)144 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
145 			     struct drm_file *file_priv)
146 {
147 	struct vc4_dev *vc4 = to_vc4_dev(dev);
148 	struct vc4_file *vc4file = file_priv->driver_priv;
149 	struct drm_vc4_perfmon_create *req = data;
150 	struct vc4_perfmon *perfmon;
151 	unsigned int i;
152 	int ret;
153 
154 	if (WARN_ON_ONCE(vc4->is_vc5))
155 		return -ENODEV;
156 
157 	if (!vc4->v3d) {
158 		DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
159 		return -ENODEV;
160 	}
161 
162 	/* Number of monitored counters cannot exceed HW limits. */
163 	if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS ||
164 	    !req->ncounters)
165 		return -EINVAL;
166 
167 	/* Make sure all events are valid. */
168 	for (i = 0; i < req->ncounters; i++) {
169 		if (req->events[i] >= VC4_PERFCNT_NUM_EVENTS)
170 			return -EINVAL;
171 	}
172 
173 	perfmon = kzalloc(struct_size(perfmon, counters, req->ncounters),
174 			  GFP_KERNEL);
175 	if (!perfmon)
176 		return -ENOMEM;
177 	perfmon->dev = vc4;
178 
179 	for (i = 0; i < req->ncounters; i++)
180 		perfmon->events[i] = req->events[i];
181 
182 	perfmon->ncounters = req->ncounters;
183 
184 	refcount_set(&perfmon->refcnt, 1);
185 
186 	mutex_lock(&vc4file->perfmon.lock);
187 	ret = idr_alloc(&vc4file->perfmon.idr, perfmon, VC4_PERFMONID_MIN,
188 			VC4_PERFMONID_MAX, GFP_KERNEL);
189 	mutex_unlock(&vc4file->perfmon.lock);
190 
191 	if (ret < 0) {
192 		kfree(perfmon);
193 		return ret;
194 	}
195 
196 	req->id = ret;
197 	return 0;
198 }
199 
vc4_perfmon_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)200 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
201 			      struct drm_file *file_priv)
202 {
203 	struct vc4_dev *vc4 = to_vc4_dev(dev);
204 	struct vc4_file *vc4file = file_priv->driver_priv;
205 	struct drm_vc4_perfmon_destroy *req = data;
206 	struct vc4_perfmon *perfmon;
207 
208 	if (WARN_ON_ONCE(vc4->is_vc5))
209 		return -ENODEV;
210 
211 	if (!vc4->v3d) {
212 		DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
213 		return -ENODEV;
214 	}
215 
216 	mutex_lock(&vc4file->perfmon.lock);
217 	perfmon = idr_remove(&vc4file->perfmon.idr, req->id);
218 	mutex_unlock(&vc4file->perfmon.lock);
219 
220 	if (!perfmon)
221 		return -EINVAL;
222 
223 	vc4_perfmon_put(perfmon);
224 	return 0;
225 }
226 
vc4_perfmon_get_values_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)227 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
228 				 struct drm_file *file_priv)
229 {
230 	struct vc4_dev *vc4 = to_vc4_dev(dev);
231 	struct vc4_file *vc4file = file_priv->driver_priv;
232 	struct drm_vc4_perfmon_get_values *req = data;
233 	struct vc4_perfmon *perfmon;
234 	int ret;
235 
236 	if (WARN_ON_ONCE(vc4->is_vc5))
237 		return -ENODEV;
238 
239 	if (!vc4->v3d) {
240 		DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
241 		return -ENODEV;
242 	}
243 
244 	mutex_lock(&vc4file->perfmon.lock);
245 	perfmon = idr_find(&vc4file->perfmon.idr, req->id);
246 	vc4_perfmon_get(perfmon);
247 	mutex_unlock(&vc4file->perfmon.lock);
248 
249 	if (!perfmon)
250 		return -EINVAL;
251 
252 	if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->counters,
253 			 perfmon->ncounters * sizeof(u64)))
254 		ret = -EFAULT;
255 	else
256 		ret = 0;
257 
258 	vc4_perfmon_put(perfmon);
259 	return ret;
260 }
261