1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #include <drm/drm_file.h>
7
8 #include "ivpu_drv.h"
9 #include "ivpu_gem.h"
10 #include "ivpu_jsm_msg.h"
11 #include "ivpu_ms.h"
12 #include "ivpu_pm.h"
13
14 #define MS_INFO_BUFFER_SIZE SZ_16K
15 #define MS_NUM_BUFFERS 2
16 #define MS_READ_PERIOD_MULTIPLIER 2
17 #define MS_MIN_SAMPLE_PERIOD_NS 1000000
18
19 static struct ivpu_ms_instance *
get_instance_by_mask(struct ivpu_file_priv * file_priv,u64 metric_mask)20 get_instance_by_mask(struct ivpu_file_priv *file_priv, u64 metric_mask)
21 {
22 struct ivpu_ms_instance *ms;
23
24 lockdep_assert_held(&file_priv->ms_lock);
25
26 list_for_each_entry(ms, &file_priv->ms_instance_list, ms_instance_node)
27 if (ms->mask == metric_mask)
28 return ms;
29
30 return NULL;
31 }
32
ivpu_ms_start_ioctl(struct drm_device * dev,void * data,struct drm_file * file)33 int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
34 {
35 struct ivpu_file_priv *file_priv = file->driver_priv;
36 struct drm_ivpu_metric_streamer_start *args = data;
37 struct ivpu_device *vdev = file_priv->vdev;
38 struct ivpu_ms_instance *ms;
39 u64 single_buff_size;
40 u32 sample_size;
41 int ret;
42
43 if (!args->metric_group_mask || !args->read_period_samples ||
44 args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
45 return -EINVAL;
46
47 mutex_lock(&file_priv->ms_lock);
48
49 if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
50 ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask);
51 ret = -EALREADY;
52 goto unlock;
53 }
54
55 ms = kzalloc(sizeof(*ms), GFP_KERNEL);
56 if (!ms) {
57 ret = -ENOMEM;
58 goto unlock;
59 }
60
61 ms->mask = args->metric_group_mask;
62
63 ret = ivpu_jsm_metric_streamer_info(vdev, ms->mask, 0, 0, &sample_size, NULL);
64 if (ret)
65 goto err_free_ms;
66
67 single_buff_size = sample_size *
68 ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER);
69 ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS),
70 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
71 if (!ms->bo) {
72 ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size);
73 ret = -ENOMEM;
74 goto err_free_ms;
75 }
76
77 ms->buff_size = ivpu_bo_size(ms->bo) / MS_NUM_BUFFERS;
78 ms->active_buff_vpu_addr = ms->bo->vpu_addr;
79 ms->inactive_buff_vpu_addr = ms->bo->vpu_addr + ms->buff_size;
80 ms->active_buff_ptr = ivpu_bo_vaddr(ms->bo);
81 ms->inactive_buff_ptr = ivpu_bo_vaddr(ms->bo) + ms->buff_size;
82
83 ret = ivpu_jsm_metric_streamer_start(vdev, ms->mask, args->sampling_period_ns,
84 ms->active_buff_vpu_addr, ms->buff_size);
85 if (ret)
86 goto err_free_bo;
87
88 args->sample_size = sample_size;
89 args->max_data_size = ivpu_bo_size(ms->bo);
90 list_add_tail(&ms->ms_instance_node, &file_priv->ms_instance_list);
91 goto unlock;
92
93 err_free_bo:
94 ivpu_bo_free(ms->bo);
95 err_free_ms:
96 kfree(ms);
97 unlock:
98 mutex_unlock(&file_priv->ms_lock);
99 return ret;
100 }
101
102 static int
copy_leftover_bytes(struct ivpu_ms_instance * ms,void __user * user_ptr,u64 user_size,u64 * user_bytes_copied)103 copy_leftover_bytes(struct ivpu_ms_instance *ms,
104 void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
105 {
106 u64 copy_bytes;
107
108 if (ms->leftover_bytes) {
109 copy_bytes = min(user_size - *user_bytes_copied, ms->leftover_bytes);
110 if (copy_to_user(user_ptr + *user_bytes_copied, ms->leftover_addr, copy_bytes))
111 return -EFAULT;
112
113 ms->leftover_bytes -= copy_bytes;
114 ms->leftover_addr += copy_bytes;
115 *user_bytes_copied += copy_bytes;
116 }
117
118 return 0;
119 }
120
121 static int
copy_samples_to_user(struct ivpu_device * vdev,struct ivpu_ms_instance * ms,void __user * user_ptr,u64 user_size,u64 * user_bytes_copied)122 copy_samples_to_user(struct ivpu_device *vdev, struct ivpu_ms_instance *ms,
123 void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
124 {
125 u64 bytes_written;
126 int ret;
127
128 *user_bytes_copied = 0;
129
130 ret = copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
131 if (ret)
132 return ret;
133
134 if (*user_bytes_copied == user_size)
135 return 0;
136
137 ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, ms->inactive_buff_vpu_addr,
138 ms->buff_size, &bytes_written);
139 if (ret)
140 return ret;
141
142 swap(ms->active_buff_vpu_addr, ms->inactive_buff_vpu_addr);
143 swap(ms->active_buff_ptr, ms->inactive_buff_ptr);
144
145 ms->leftover_bytes = bytes_written;
146 ms->leftover_addr = ms->inactive_buff_ptr;
147
148 return copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
149 }
150
ivpu_ms_get_data_ioctl(struct drm_device * dev,void * data,struct drm_file * file)151 int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
152 {
153 struct drm_ivpu_metric_streamer_get_data *args = data;
154 struct ivpu_file_priv *file_priv = file->driver_priv;
155 struct ivpu_device *vdev = file_priv->vdev;
156 struct ivpu_ms_instance *ms;
157 u64 bytes_written;
158 int ret;
159
160 if (!args->metric_group_mask)
161 return -EINVAL;
162
163 mutex_lock(&file_priv->ms_lock);
164
165 ms = get_instance_by_mask(file_priv, args->metric_group_mask);
166 if (!ms) {
167 ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask);
168 ret = -EINVAL;
169 goto unlock;
170 }
171
172 if (!args->buffer_size) {
173 ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, 0, 0, &bytes_written);
174 if (ret)
175 goto unlock;
176 args->data_size = bytes_written + ms->leftover_bytes;
177 goto unlock;
178 }
179
180 if (!args->buffer_ptr) {
181 ret = -EINVAL;
182 goto unlock;
183 }
184
185 ret = copy_samples_to_user(vdev, ms, u64_to_user_ptr(args->buffer_ptr),
186 args->buffer_size, &args->data_size);
187 unlock:
188 mutex_unlock(&file_priv->ms_lock);
189
190 return ret;
191 }
192
free_instance(struct ivpu_file_priv * file_priv,struct ivpu_ms_instance * ms)193 static void free_instance(struct ivpu_file_priv *file_priv, struct ivpu_ms_instance *ms)
194 {
195 lockdep_assert_held(&file_priv->ms_lock);
196
197 list_del(&ms->ms_instance_node);
198 ivpu_jsm_metric_streamer_stop(file_priv->vdev, ms->mask);
199 ivpu_bo_free(ms->bo);
200 kfree(ms);
201 }
202
ivpu_ms_stop_ioctl(struct drm_device * dev,void * data,struct drm_file * file)203 int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
204 {
205 struct ivpu_file_priv *file_priv = file->driver_priv;
206 struct drm_ivpu_metric_streamer_stop *args = data;
207 struct ivpu_ms_instance *ms;
208
209 if (!args->metric_group_mask)
210 return -EINVAL;
211
212 mutex_lock(&file_priv->ms_lock);
213
214 ms = get_instance_by_mask(file_priv, args->metric_group_mask);
215 if (ms)
216 free_instance(file_priv, ms);
217
218 mutex_unlock(&file_priv->ms_lock);
219
220 return ms ? 0 : -EINVAL;
221 }
222
get_ms_info_bo(struct ivpu_file_priv * file_priv)223 static inline struct ivpu_bo *get_ms_info_bo(struct ivpu_file_priv *file_priv)
224 {
225 lockdep_assert_held(&file_priv->ms_lock);
226
227 if (file_priv->ms_info_bo)
228 return file_priv->ms_info_bo;
229
230 file_priv->ms_info_bo = ivpu_bo_create_global(file_priv->vdev, MS_INFO_BUFFER_SIZE,
231 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
232 return file_priv->ms_info_bo;
233 }
234
ivpu_ms_get_info_ioctl(struct drm_device * dev,void * data,struct drm_file * file)235 int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
236 {
237 struct drm_ivpu_metric_streamer_get_data *args = data;
238 struct ivpu_file_priv *file_priv = file->driver_priv;
239 struct ivpu_device *vdev = file_priv->vdev;
240 struct ivpu_bo *bo;
241 u64 info_size;
242 int ret;
243
244 if (!args->metric_group_mask)
245 return -EINVAL;
246
247 if (!args->buffer_size)
248 return ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask,
249 0, 0, NULL, &args->data_size);
250 if (!args->buffer_ptr)
251 return -EINVAL;
252
253 mutex_lock(&file_priv->ms_lock);
254
255 bo = get_ms_info_bo(file_priv);
256 if (!bo) {
257 ret = -ENOMEM;
258 goto unlock;
259 }
260
261 ret = ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, bo->vpu_addr,
262 ivpu_bo_size(bo), NULL, &info_size);
263 if (ret)
264 goto unlock;
265
266 if (args->buffer_size < info_size) {
267 ret = -ENOSPC;
268 goto unlock;
269 }
270
271 if (copy_to_user(u64_to_user_ptr(args->buffer_ptr), ivpu_bo_vaddr(bo), info_size))
272 ret = -EFAULT;
273
274 args->data_size = info_size;
275 unlock:
276 mutex_unlock(&file_priv->ms_lock);
277
278 return ret;
279 }
280
ivpu_ms_cleanup(struct ivpu_file_priv * file_priv)281 void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
282 {
283 struct ivpu_ms_instance *ms, *tmp;
284
285 mutex_lock(&file_priv->ms_lock);
286
287 if (file_priv->ms_info_bo) {
288 ivpu_bo_free(file_priv->ms_info_bo);
289 file_priv->ms_info_bo = NULL;
290 }
291
292 list_for_each_entry_safe(ms, tmp, &file_priv->ms_instance_list, ms_instance_node)
293 free_instance(file_priv, ms);
294
295 mutex_unlock(&file_priv->ms_lock);
296 }
297
ivpu_ms_cleanup_all(struct ivpu_device * vdev)298 void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
299 {
300 struct ivpu_file_priv *file_priv;
301 unsigned long ctx_id;
302
303 mutex_lock(&vdev->context_list_lock);
304
305 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
306 ivpu_ms_cleanup(file_priv);
307
308 mutex_unlock(&vdev->context_list_lock);
309 }
310