xref: /linux/drivers/media/mc/mc-request.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Media device request objects
4  *
5  * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6  * Copyright (C) 2018 Intel Corporation
7  * Copyright (C) 2018 Google, Inc.
8  *
9  * Author: Hans Verkuil <hverkuil@kernel.org>
10  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
11  */
12 
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
16 
17 #include <media/media-device.h>
18 #include <media/media-request.h>
19 
20 static const char * const request_state[] = {
21 	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
22 	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
23 	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
24 	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
25 	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
26 	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
27 };
28 
29 static const char *
30 media_request_state_str(enum media_request_state state)
31 {
32 	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
33 
34 	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
35 		return "invalid";
36 	return request_state[state];
37 }
38 
39 static void media_request_clean(struct media_request *req)
40 {
41 	struct media_request_object *obj, *obj_safe;
42 
43 	/* Just a sanity check. No other code path is allowed to change this. */
44 	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
45 	WARN_ON(req->updating_count);
46 	WARN_ON(req->access_count);
47 
48 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
49 		media_request_object_unbind(obj);
50 		media_request_object_put(obj);
51 	}
52 
53 	req->updating_count = 0;
54 	req->access_count = 0;
55 	WARN_ON(req->num_incomplete_objects);
56 	req->num_incomplete_objects = 0;
57 	wake_up_interruptible_all(&req->poll_wait);
58 }
59 
60 static void media_request_release(struct kref *kref)
61 {
62 	struct media_request *req =
63 		container_of(kref, struct media_request, kref);
64 	struct media_device *mdev = req->mdev;
65 
66 	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
67 
68 	/* No other users, no need for a spinlock */
69 	req->state = MEDIA_REQUEST_STATE_CLEANING;
70 
71 	media_request_clean(req);
72 
73 	if (mdev->ops->req_free)
74 		mdev->ops->req_free(req);
75 	else
76 		kfree(req);
77 }
78 
79 void media_request_put(struct media_request *req)
80 {
81 	kref_put(&req->kref, media_request_release);
82 }
83 EXPORT_SYMBOL_GPL(media_request_put);
84 
85 static int media_request_close(struct inode *inode, struct file *filp)
86 {
87 	struct media_request *req = filp->private_data;
88 
89 	media_request_put(req);
90 	return 0;
91 }
92 
93 static __poll_t media_request_poll(struct file *filp,
94 				   struct poll_table_struct *wait)
95 {
96 	struct media_request *req = filp->private_data;
97 	unsigned long flags;
98 	__poll_t ret = 0;
99 
100 	if (!(poll_requested_events(wait) & EPOLLPRI))
101 		return 0;
102 
103 	poll_wait(filp, &req->poll_wait, wait);
104 	spin_lock_irqsave(&req->lock, flags);
105 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
106 		ret = EPOLLPRI;
107 		goto unlock;
108 	}
109 	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
110 		ret = EPOLLERR;
111 		goto unlock;
112 	}
113 
114 unlock:
115 	spin_unlock_irqrestore(&req->lock, flags);
116 	return ret;
117 }
118 
119 static long media_request_ioctl_queue(struct media_request *req)
120 {
121 	struct media_device *mdev = req->mdev;
122 	enum media_request_state state;
123 	unsigned long flags;
124 	int ret;
125 
126 	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
127 
128 	/*
129 	 * Ensure the request that is validated will be the one that gets queued
130 	 * next by serialising the queueing process. This mutex is also used
131 	 * to serialize with canceling a vb2 queue and with setting values such
132 	 * as controls in a request.
133 	 */
134 	mutex_lock(&mdev->req_queue_mutex);
135 
136 	media_request_get(req);
137 
138 	spin_lock_irqsave(&req->lock, flags);
139 	if (req->state == MEDIA_REQUEST_STATE_IDLE)
140 		req->state = MEDIA_REQUEST_STATE_VALIDATING;
141 	state = req->state;
142 	spin_unlock_irqrestore(&req->lock, flags);
143 	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
144 		dev_dbg(mdev->dev,
145 			"request: unable to queue %s, request in state %s\n",
146 			req->debug_str, media_request_state_str(state));
147 		media_request_put(req);
148 		mutex_unlock(&mdev->req_queue_mutex);
149 		return -EBUSY;
150 	}
151 
152 	ret = mdev->ops->req_validate(req);
153 
154 	/*
155 	 * If the req_validate was successful, then we mark the state as QUEUED
156 	 * and call req_queue. The reason we set the state first is that this
157 	 * allows req_queue to unbind or complete the queued objects in case
158 	 * they are immediately 'consumed'. State changes from QUEUED to another
159 	 * state can only happen if either the driver changes the state or if
160 	 * the user cancels the vb2 queue. The driver can only change the state
161 	 * after each object is queued through the req_queue op (and note that
162 	 * that op cannot fail), so setting the state to QUEUED up front is
163 	 * safe.
164 	 *
165 	 * The other reason for changing the state is if the vb2 queue is
166 	 * canceled, and that uses the req_queue_mutex which is still locked
167 	 * while req_queue is called, so that's safe as well.
168 	 */
169 	spin_lock_irqsave(&req->lock, flags);
170 	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
171 			 : MEDIA_REQUEST_STATE_QUEUED;
172 	spin_unlock_irqrestore(&req->lock, flags);
173 
174 	if (!ret)
175 		mdev->ops->req_queue(req);
176 
177 	mutex_unlock(&mdev->req_queue_mutex);
178 
179 	if (ret) {
180 		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
181 			req->debug_str, ret);
182 		media_request_put(req);
183 	}
184 
185 	return ret;
186 }
187 
188 static long media_request_ioctl_reinit(struct media_request *req)
189 {
190 	struct media_device *mdev = req->mdev;
191 	unsigned long flags;
192 
193 	spin_lock_irqsave(&req->lock, flags);
194 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
195 	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
196 		dev_dbg(mdev->dev,
197 			"request: %s not in idle or complete state, cannot reinit\n",
198 			req->debug_str);
199 		spin_unlock_irqrestore(&req->lock, flags);
200 		return -EBUSY;
201 	}
202 	if (req->access_count) {
203 		dev_dbg(mdev->dev,
204 			"request: %s is being accessed, cannot reinit\n",
205 			req->debug_str);
206 		spin_unlock_irqrestore(&req->lock, flags);
207 		return -EBUSY;
208 	}
209 	req->state = MEDIA_REQUEST_STATE_CLEANING;
210 	spin_unlock_irqrestore(&req->lock, flags);
211 
212 	media_request_clean(req);
213 
214 	spin_lock_irqsave(&req->lock, flags);
215 	req->state = MEDIA_REQUEST_STATE_IDLE;
216 	spin_unlock_irqrestore(&req->lock, flags);
217 
218 	return 0;
219 }
220 
221 static long media_request_ioctl(struct file *filp, unsigned int cmd,
222 				unsigned long arg)
223 {
224 	struct media_request *req = filp->private_data;
225 
226 	switch (cmd) {
227 	case MEDIA_REQUEST_IOC_QUEUE:
228 		return media_request_ioctl_queue(req);
229 	case MEDIA_REQUEST_IOC_REINIT:
230 		return media_request_ioctl_reinit(req);
231 	default:
232 		return -ENOIOCTLCMD;
233 	}
234 }
235 
236 static const struct file_operations request_fops = {
237 	.owner = THIS_MODULE,
238 	.poll = media_request_poll,
239 	.unlocked_ioctl = media_request_ioctl,
240 #ifdef CONFIG_COMPAT
241 	.compat_ioctl = media_request_ioctl,
242 #endif /* CONFIG_COMPAT */
243 	.release = media_request_close,
244 };
245 
246 struct media_request *
247 media_request_get_by_fd(struct media_device *mdev, int request_fd)
248 {
249 	struct media_request *req;
250 
251 	if (!mdev || !mdev->ops ||
252 	    !mdev->ops->req_validate || !mdev->ops->req_queue)
253 		return ERR_PTR(-EBADR);
254 
255 	CLASS(fd, f)(request_fd);
256 	if (fd_empty(f))
257 		goto err;
258 
259 	if (fd_file(f)->f_op != &request_fops)
260 		goto err;
261 	req = fd_file(f)->private_data;
262 	if (req->mdev != mdev)
263 		goto err;
264 
265 	/*
266 	 * Note: as long as someone has an open filehandle of the request,
267 	 * the request can never be released. The fdget() above ensures that
268 	 * even if userspace closes the request filehandle, the release()
269 	 * fop won't be called, so the media_request_get() always succeeds
270 	 * and there is no race condition where the request was released
271 	 * before media_request_get() is called.
272 	 */
273 	media_request_get(req);
274 	return req;
275 
276 err:
277 	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
278 	return ERR_PTR(-EINVAL);
279 }
280 EXPORT_SYMBOL_GPL(media_request_get_by_fd);
281 
282 int media_request_alloc(struct media_device *mdev, int *alloc_fd)
283 {
284 	struct media_request *req;
285 	int ret;
286 
287 	/* Either both are NULL or both are non-NULL */
288 	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
289 		return -ENOMEM;
290 
291 	if (mdev->ops->req_alloc)
292 		req = mdev->ops->req_alloc(mdev);
293 	else
294 		req = kzalloc(sizeof(*req), GFP_KERNEL);
295 	if (!req)
296 		return -ENOMEM;
297 
298 	req->mdev = mdev;
299 	req->state = MEDIA_REQUEST_STATE_IDLE;
300 	req->num_incomplete_objects = 0;
301 	kref_init(&req->kref);
302 	INIT_LIST_HEAD(&req->objects);
303 	spin_lock_init(&req->lock);
304 	init_waitqueue_head(&req->poll_wait);
305 	req->updating_count = 0;
306 	req->access_count = 0;
307 
308 	FD_PREPARE(fdf, O_CLOEXEC,
309 		   anon_inode_getfile("request", &request_fops, NULL,
310 				      O_CLOEXEC));
311 	if (fdf.err) {
312 		ret = fdf.err;
313 		goto err_free_req;
314 	}
315 
316 	fd_prepare_file(fdf)->private_data = req;
317 
318 	*alloc_fd = fd_publish(fdf);
319 
320 	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
321 		 atomic_inc_return(&mdev->request_id), *alloc_fd);
322 	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
323 
324 	return 0;
325 
326 err_free_req:
327 	if (mdev->ops->req_free)
328 		mdev->ops->req_free(req);
329 	else
330 		kfree(req);
331 
332 	return ret;
333 }
334 
335 static void media_request_object_release(struct kref *kref)
336 {
337 	struct media_request_object *obj =
338 		container_of(kref, struct media_request_object, kref);
339 	struct media_request *req = obj->req;
340 
341 	if (WARN_ON(req))
342 		media_request_object_unbind(obj);
343 	obj->ops->release(obj);
344 }
345 
346 struct media_request_object *
347 media_request_object_find(struct media_request *req,
348 			  const struct media_request_object_ops *ops,
349 			  void *priv)
350 {
351 	struct media_request_object *obj;
352 	struct media_request_object *found = NULL;
353 	unsigned long flags;
354 
355 	if (WARN_ON(!ops || !priv))
356 		return NULL;
357 
358 	spin_lock_irqsave(&req->lock, flags);
359 	list_for_each_entry(obj, &req->objects, list) {
360 		if (obj->ops == ops && obj->priv == priv) {
361 			media_request_object_get(obj);
362 			found = obj;
363 			break;
364 		}
365 	}
366 	spin_unlock_irqrestore(&req->lock, flags);
367 	return found;
368 }
369 EXPORT_SYMBOL_GPL(media_request_object_find);
370 
371 void media_request_object_put(struct media_request_object *obj)
372 {
373 	kref_put(&obj->kref, media_request_object_release);
374 }
375 EXPORT_SYMBOL_GPL(media_request_object_put);
376 
377 void media_request_object_init(struct media_request_object *obj)
378 {
379 	obj->ops = NULL;
380 	obj->req = NULL;
381 	obj->priv = NULL;
382 	obj->completed = false;
383 	INIT_LIST_HEAD(&obj->list);
384 	kref_init(&obj->kref);
385 }
386 EXPORT_SYMBOL_GPL(media_request_object_init);
387 
388 int media_request_object_bind(struct media_request *req,
389 			      const struct media_request_object_ops *ops,
390 			      void *priv, bool is_buffer,
391 			      struct media_request_object *obj)
392 {
393 	unsigned long flags;
394 	int ret = -EBUSY;
395 
396 	if (WARN_ON(!ops->release))
397 		return -EBADR;
398 
399 	spin_lock_irqsave(&req->lock, flags);
400 
401 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
402 		    req->state != MEDIA_REQUEST_STATE_QUEUED))
403 		goto unlock;
404 
405 	obj->req = req;
406 	obj->ops = ops;
407 	obj->priv = priv;
408 
409 	if (is_buffer)
410 		list_add_tail(&obj->list, &req->objects);
411 	else
412 		list_add(&obj->list, &req->objects);
413 	req->num_incomplete_objects++;
414 	ret = 0;
415 
416 unlock:
417 	spin_unlock_irqrestore(&req->lock, flags);
418 	return ret;
419 }
420 EXPORT_SYMBOL_GPL(media_request_object_bind);
421 
422 void media_request_object_unbind(struct media_request_object *obj)
423 {
424 	struct media_request *req = obj->req;
425 	unsigned long flags;
426 	bool completed = false;
427 
428 	if (WARN_ON(!req))
429 		return;
430 
431 	spin_lock_irqsave(&req->lock, flags);
432 	list_del(&obj->list);
433 	obj->req = NULL;
434 
435 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
436 		goto unlock;
437 
438 	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
439 		goto unlock;
440 
441 	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
442 		if (!obj->completed)
443 			req->num_incomplete_objects--;
444 		goto unlock;
445 	}
446 
447 	if (WARN_ON(!req->num_incomplete_objects))
448 		goto unlock;
449 
450 	req->num_incomplete_objects--;
451 	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
452 	    !req->num_incomplete_objects) {
453 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
454 		completed = true;
455 		wake_up_interruptible_all(&req->poll_wait);
456 	}
457 
458 unlock:
459 	spin_unlock_irqrestore(&req->lock, flags);
460 	if (obj->ops->unbind)
461 		obj->ops->unbind(obj);
462 	if (completed)
463 		media_request_put(req);
464 }
465 EXPORT_SYMBOL_GPL(media_request_object_unbind);
466 
467 void media_request_object_complete(struct media_request_object *obj)
468 {
469 	struct media_request *req = obj->req;
470 	unsigned long flags;
471 	bool completed = false;
472 
473 	spin_lock_irqsave(&req->lock, flags);
474 	if (obj->completed)
475 		goto unlock;
476 	obj->completed = true;
477 	if (WARN_ON(!req->num_incomplete_objects) ||
478 	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
479 		goto unlock;
480 
481 	if (!--req->num_incomplete_objects) {
482 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
483 		wake_up_interruptible_all(&req->poll_wait);
484 		completed = true;
485 	}
486 unlock:
487 	spin_unlock_irqrestore(&req->lock, flags);
488 	if (completed)
489 		media_request_put(req);
490 }
491 EXPORT_SYMBOL_GPL(media_request_object_complete);
492