1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #include <drm/drm_file.h>
7 #include <linux/pm_runtime.h>
8
9 #include "ivpu_drv.h"
10 #include "ivpu_gem.h"
11 #include "ivpu_hw.h"
12 #include "ivpu_jsm_msg.h"
13 #include "ivpu_ms.h"
14 #include "ivpu_pm.h"
15
16 #define MS_INFO_BUFFER_SIZE SZ_64K
17 #define MS_NUM_BUFFERS 2
18 #define MS_READ_PERIOD_MULTIPLIER 2
19 #define MS_MIN_SAMPLE_PERIOD_NS 1000000
20
21 static struct ivpu_ms_instance *
get_instance_by_mask(struct ivpu_file_priv * file_priv,u64 metric_mask)22 get_instance_by_mask(struct ivpu_file_priv *file_priv, u64 metric_mask)
23 {
24 struct ivpu_ms_instance *ms;
25
26 lockdep_assert_held(&file_priv->ms_lock);
27
28 list_for_each_entry(ms, &file_priv->ms_instance_list, ms_instance_node)
29 if (ms->mask == metric_mask)
30 return ms;
31
32 return NULL;
33 }
34
ivpu_ms_start_ioctl(struct drm_device * dev,void * data,struct drm_file * file)35 int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
36 {
37 struct ivpu_file_priv *file_priv = file->driver_priv;
38 struct drm_ivpu_metric_streamer_start *args = data;
39 struct ivpu_device *vdev = file_priv->vdev;
40 struct ivpu_ms_instance *ms;
41 u32 sample_size;
42 u64 buf_size;
43 int ret;
44
45 if (!args->metric_group_mask || !args->read_period_samples ||
46 args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
47 return -EINVAL;
48
49 ret = ivpu_rpm_get(vdev);
50 if (ret < 0)
51 return ret;
52
53 mutex_lock(&file_priv->ms_lock);
54
55 if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
56 ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n",
57 args->metric_group_mask);
58 ret = -EALREADY;
59 goto unlock;
60 }
61
62 ms = kzalloc_obj(*ms);
63 if (!ms) {
64 ret = -ENOMEM;
65 goto unlock;
66 }
67
68 ms->mask = args->metric_group_mask;
69
70 ret = ivpu_jsm_metric_streamer_info(vdev, ms->mask, 0, 0, &sample_size, NULL);
71 if (ret)
72 goto err_free_ms;
73
74 buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size *
75 MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS);
76 if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) {
77 ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n",
78 buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global));
79 ret = -EINVAL;
80 goto err_free_ms;
81 }
82
83 ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
84 if (!ms->bo) {
85 ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size);
86 ret = -ENOMEM;
87 goto err_free_ms;
88 }
89
90 ms->buff_size = ivpu_bo_size(ms->bo) / MS_NUM_BUFFERS;
91 ms->active_buff_vpu_addr = ms->bo->vpu_addr;
92 ms->inactive_buff_vpu_addr = ms->bo->vpu_addr + ms->buff_size;
93 ms->active_buff_ptr = ivpu_bo_vaddr(ms->bo);
94 ms->inactive_buff_ptr = ivpu_bo_vaddr(ms->bo) + ms->buff_size;
95
96 ret = ivpu_jsm_metric_streamer_start(vdev, ms->mask, args->sampling_period_ns,
97 ms->active_buff_vpu_addr, ms->buff_size);
98 if (ret)
99 goto err_free_bo;
100
101 args->sample_size = sample_size;
102 args->max_data_size = ivpu_bo_size(ms->bo);
103 list_add_tail(&ms->ms_instance_node, &file_priv->ms_instance_list);
104 goto unlock;
105
106 err_free_bo:
107 ivpu_bo_free(ms->bo);
108 err_free_ms:
109 kfree(ms);
110 unlock:
111 mutex_unlock(&file_priv->ms_lock);
112
113 ivpu_rpm_put(vdev);
114 return ret;
115 }
116
117 static int
copy_leftover_bytes(struct ivpu_ms_instance * ms,void __user * user_ptr,u64 user_size,u64 * user_bytes_copied)118 copy_leftover_bytes(struct ivpu_ms_instance *ms,
119 void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
120 {
121 u64 copy_bytes;
122
123 if (ms->leftover_bytes) {
124 copy_bytes = min(user_size - *user_bytes_copied, ms->leftover_bytes);
125 if (copy_to_user(user_ptr + *user_bytes_copied, ms->leftover_addr, copy_bytes))
126 return -EFAULT;
127
128 ms->leftover_bytes -= copy_bytes;
129 ms->leftover_addr += copy_bytes;
130 *user_bytes_copied += copy_bytes;
131 }
132
133 return 0;
134 }
135
136 static int
copy_samples_to_user(struct ivpu_device * vdev,struct ivpu_ms_instance * ms,void __user * user_ptr,u64 user_size,u64 * user_bytes_copied)137 copy_samples_to_user(struct ivpu_device *vdev, struct ivpu_ms_instance *ms,
138 void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
139 {
140 u64 bytes_written;
141 int ret;
142
143 *user_bytes_copied = 0;
144
145 ret = copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
146 if (ret)
147 return ret;
148
149 if (*user_bytes_copied == user_size)
150 return 0;
151
152 ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, ms->inactive_buff_vpu_addr,
153 ms->buff_size, &bytes_written);
154 if (ret)
155 return ret;
156
157 swap(ms->active_buff_vpu_addr, ms->inactive_buff_vpu_addr);
158 swap(ms->active_buff_ptr, ms->inactive_buff_ptr);
159
160 ms->leftover_bytes = bytes_written;
161 ms->leftover_addr = ms->inactive_buff_ptr;
162
163 return copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
164 }
165
ivpu_ms_get_data_ioctl(struct drm_device * dev,void * data,struct drm_file * file)166 int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
167 {
168 struct drm_ivpu_metric_streamer_get_data *args = data;
169 struct ivpu_file_priv *file_priv = file->driver_priv;
170 struct ivpu_device *vdev = file_priv->vdev;
171 struct ivpu_ms_instance *ms;
172 u64 bytes_written;
173 int ret;
174
175 if (!args->metric_group_mask)
176 return -EINVAL;
177
178 ret = ivpu_rpm_get(vdev);
179 if (ret < 0)
180 return ret;
181
182 mutex_lock(&file_priv->ms_lock);
183
184 ms = get_instance_by_mask(file_priv, args->metric_group_mask);
185 if (!ms) {
186 ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n",
187 args->metric_group_mask);
188 ret = -EINVAL;
189 goto unlock;
190 }
191
192 if (!args->buffer_size) {
193 ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, 0, 0, &bytes_written);
194 if (ret)
195 goto unlock;
196 args->data_size = bytes_written + ms->leftover_bytes;
197 goto unlock;
198 }
199
200 if (!args->buffer_ptr) {
201 ret = -EINVAL;
202 goto unlock;
203 }
204
205 ret = copy_samples_to_user(vdev, ms, u64_to_user_ptr(args->buffer_ptr),
206 args->buffer_size, &args->data_size);
207 unlock:
208 mutex_unlock(&file_priv->ms_lock);
209
210 ivpu_rpm_put(vdev);
211 return ret;
212 }
213
free_instance(struct ivpu_file_priv * file_priv,struct ivpu_ms_instance * ms)214 static void free_instance(struct ivpu_file_priv *file_priv, struct ivpu_ms_instance *ms)
215 {
216 lockdep_assert_held(&file_priv->ms_lock);
217
218 list_del(&ms->ms_instance_node);
219 ivpu_jsm_metric_streamer_stop(file_priv->vdev, ms->mask);
220 ivpu_bo_free(ms->bo);
221 kfree(ms);
222 }
223
ivpu_ms_stop_ioctl(struct drm_device * dev,void * data,struct drm_file * file)224 int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
225 {
226 struct ivpu_file_priv *file_priv = file->driver_priv;
227 struct drm_ivpu_metric_streamer_stop *args = data;
228 struct ivpu_device *vdev = file_priv->vdev;
229 struct ivpu_ms_instance *ms;
230 int ret;
231
232 if (!args->metric_group_mask)
233 return -EINVAL;
234
235 ret = ivpu_rpm_get(vdev);
236 if (ret < 0)
237 return ret;
238
239 mutex_lock(&file_priv->ms_lock);
240
241 ms = get_instance_by_mask(file_priv, args->metric_group_mask);
242 if (ms)
243 free_instance(file_priv, ms);
244
245 mutex_unlock(&file_priv->ms_lock);
246
247 ivpu_rpm_put(vdev);
248 return ms ? 0 : -EINVAL;
249 }
250
get_ms_info_bo(struct ivpu_file_priv * file_priv)251 static inline struct ivpu_bo *get_ms_info_bo(struct ivpu_file_priv *file_priv)
252 {
253 lockdep_assert_held(&file_priv->ms_lock);
254
255 if (file_priv->ms_info_bo)
256 return file_priv->ms_info_bo;
257
258 file_priv->ms_info_bo = ivpu_bo_create_global(file_priv->vdev, MS_INFO_BUFFER_SIZE,
259 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
260 return file_priv->ms_info_bo;
261 }
262
ivpu_ms_get_info_ioctl(struct drm_device * dev,void * data,struct drm_file * file)263 int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
264 {
265 struct drm_ivpu_metric_streamer_get_data *args = data;
266 struct ivpu_file_priv *file_priv = file->driver_priv;
267 struct ivpu_device *vdev = file_priv->vdev;
268 struct ivpu_bo *bo;
269 u64 info_size;
270 int ret;
271
272 if (!args->metric_group_mask)
273 return -EINVAL;
274
275 if (!args->buffer_size)
276 return ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask,
277 0, 0, NULL, &args->data_size);
278 if (!args->buffer_ptr)
279 return -EINVAL;
280
281 mutex_lock(&file_priv->ms_lock);
282
283 bo = get_ms_info_bo(file_priv);
284 if (!bo) {
285 ret = -ENOMEM;
286 goto unlock;
287 }
288
289 ret = ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, bo->vpu_addr,
290 ivpu_bo_size(bo), NULL, &info_size);
291 if (ret)
292 goto unlock;
293
294 if (args->buffer_size < info_size) {
295 ret = -ENOSPC;
296 goto unlock;
297 }
298
299 if (copy_to_user(u64_to_user_ptr(args->buffer_ptr), ivpu_bo_vaddr(bo), info_size))
300 ret = -EFAULT;
301
302 args->data_size = info_size;
303 unlock:
304 mutex_unlock(&file_priv->ms_lock);
305
306 return ret;
307 }
308
ivpu_ms_cleanup(struct ivpu_file_priv * file_priv)309 void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
310 {
311 struct ivpu_ms_instance *ms, *tmp;
312 struct ivpu_device *vdev = file_priv->vdev;
313
314 pm_runtime_get_sync(vdev->drm.dev);
315
316 mutex_lock(&file_priv->ms_lock);
317
318 if (file_priv->ms_info_bo) {
319 ivpu_bo_free(file_priv->ms_info_bo);
320 file_priv->ms_info_bo = NULL;
321 }
322
323 list_for_each_entry_safe(ms, tmp, &file_priv->ms_instance_list, ms_instance_node)
324 free_instance(file_priv, ms);
325
326 mutex_unlock(&file_priv->ms_lock);
327
328 pm_runtime_put_autosuspend(vdev->drm.dev);
329 }
330
ivpu_ms_cleanup_all(struct ivpu_device * vdev)331 void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
332 {
333 struct ivpu_file_priv *file_priv;
334 unsigned long ctx_id;
335
336 mutex_lock(&vdev->context_list_lock);
337
338 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
339 ivpu_ms_cleanup(file_priv);
340
341 mutex_unlock(&vdev->context_list_lock);
342 }
343