xref: /linux/drivers/media/mc/mc-request.c (revision c789a7f40288c19004f786a6da67c3733d38c6af)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Media device request objects
4  *
5  * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6  * Copyright (C) 2018 Intel Corporation
7  * Copyright (C) 2018 Google, Inc.
8  *
9  * Author: Hans Verkuil <hverkuil@kernel.org>
10  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
11  */
12 
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
16 
17 #include <media/media-device.h>
18 #include <media/media-request.h>
19 
20 static const char * const request_state[] = {
21 	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
22 	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
23 	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
24 	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
25 	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
26 	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
27 };
28 
29 static const char *
30 media_request_state_str(enum media_request_state state)
31 {
32 	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
33 
34 	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
35 		return "invalid";
36 	return request_state[state];
37 }
38 
39 static void media_request_clean(struct media_request *req)
40 {
41 	struct media_request_object *obj, *obj_safe;
42 
43 	/* Just a sanity check. No other code path is allowed to change this. */
44 	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
45 	WARN_ON(req->updating_count);
46 	WARN_ON(req->access_count);
47 
48 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
49 		media_request_object_unbind(obj);
50 		media_request_object_put(obj);
51 	}
52 
53 	req->updating_count = 0;
54 	req->access_count = 0;
55 	WARN_ON(req->num_incomplete_objects);
56 	req->num_incomplete_objects = 0;
57 	req->manual_completion = false;
58 	wake_up_interruptible_all(&req->poll_wait);
59 }
60 
61 static void media_request_release(struct kref *kref)
62 {
63 	struct media_request *req =
64 		container_of(kref, struct media_request, kref);
65 	struct media_device *mdev = req->mdev;
66 
67 	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
68 
69 	/* No other users, no need for a spinlock */
70 	req->state = MEDIA_REQUEST_STATE_CLEANING;
71 
72 	media_request_clean(req);
73 
74 	if (mdev->ops->req_free)
75 		mdev->ops->req_free(req);
76 	else
77 		kfree(req);
78 }
79 
80 void media_request_put(struct media_request *req)
81 {
82 	kref_put(&req->kref, media_request_release);
83 }
84 EXPORT_SYMBOL_GPL(media_request_put);
85 
86 static int media_request_close(struct inode *inode, struct file *filp)
87 {
88 	struct media_request *req = filp->private_data;
89 
90 	media_request_put(req);
91 	return 0;
92 }
93 
94 static __poll_t media_request_poll(struct file *filp,
95 				   struct poll_table_struct *wait)
96 {
97 	struct media_request *req = filp->private_data;
98 	unsigned long flags;
99 	__poll_t ret = 0;
100 
101 	if (!(poll_requested_events(wait) & EPOLLPRI))
102 		return 0;
103 
104 	poll_wait(filp, &req->poll_wait, wait);
105 	spin_lock_irqsave(&req->lock, flags);
106 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
107 		ret = EPOLLPRI;
108 		goto unlock;
109 	}
110 	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
111 		ret = EPOLLERR;
112 		goto unlock;
113 	}
114 
115 unlock:
116 	spin_unlock_irqrestore(&req->lock, flags);
117 	return ret;
118 }
119 
120 static long media_request_ioctl_queue(struct media_request *req)
121 {
122 	struct media_device *mdev = req->mdev;
123 	enum media_request_state state;
124 	unsigned long flags;
125 	int ret;
126 
127 	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
128 
129 	/*
130 	 * Ensure the request that is validated will be the one that gets queued
131 	 * next by serialising the queueing process. This mutex is also used
132 	 * to serialize with canceling a vb2 queue and with setting values such
133 	 * as controls in a request.
134 	 */
135 	mutex_lock(&mdev->req_queue_mutex);
136 
137 	media_request_get(req);
138 
139 	spin_lock_irqsave(&req->lock, flags);
140 	if (req->state == MEDIA_REQUEST_STATE_IDLE)
141 		req->state = MEDIA_REQUEST_STATE_VALIDATING;
142 	state = req->state;
143 	spin_unlock_irqrestore(&req->lock, flags);
144 	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
145 		dev_dbg(mdev->dev,
146 			"request: unable to queue %s, request in state %s\n",
147 			req->debug_str, media_request_state_str(state));
148 		media_request_put(req);
149 		mutex_unlock(&mdev->req_queue_mutex);
150 		return -EBUSY;
151 	}
152 
153 	ret = mdev->ops->req_validate(req);
154 
155 	/*
156 	 * If the req_validate was successful, then we mark the state as QUEUED
157 	 * and call req_queue. The reason we set the state first is that this
158 	 * allows req_queue to unbind or complete the queued objects in case
159 	 * they are immediately 'consumed'. State changes from QUEUED to another
160 	 * state can only happen if either the driver changes the state or if
161 	 * the user cancels the vb2 queue. The driver can only change the state
162 	 * after each object is queued through the req_queue op (and note that
163 	 * that op cannot fail), so setting the state to QUEUED up front is
164 	 * safe.
165 	 *
166 	 * The other reason for changing the state is if the vb2 queue is
167 	 * canceled, and that uses the req_queue_mutex which is still locked
168 	 * while req_queue is called, so that's safe as well.
169 	 */
170 	spin_lock_irqsave(&req->lock, flags);
171 	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
172 			 : MEDIA_REQUEST_STATE_QUEUED;
173 	spin_unlock_irqrestore(&req->lock, flags);
174 
175 	if (!ret)
176 		mdev->ops->req_queue(req);
177 
178 	mutex_unlock(&mdev->req_queue_mutex);
179 
180 	if (ret) {
181 		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
182 			req->debug_str, ret);
183 		media_request_put(req);
184 	}
185 
186 	return ret;
187 }
188 
189 static long media_request_ioctl_reinit(struct media_request *req)
190 {
191 	struct media_device *mdev = req->mdev;
192 	unsigned long flags;
193 
194 	spin_lock_irqsave(&req->lock, flags);
195 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
196 	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
197 		dev_dbg(mdev->dev,
198 			"request: %s not in idle or complete state, cannot reinit\n",
199 			req->debug_str);
200 		spin_unlock_irqrestore(&req->lock, flags);
201 		return -EBUSY;
202 	}
203 	if (req->access_count) {
204 		dev_dbg(mdev->dev,
205 			"request: %s is being accessed, cannot reinit\n",
206 			req->debug_str);
207 		spin_unlock_irqrestore(&req->lock, flags);
208 		return -EBUSY;
209 	}
210 	req->state = MEDIA_REQUEST_STATE_CLEANING;
211 	spin_unlock_irqrestore(&req->lock, flags);
212 
213 	media_request_clean(req);
214 
215 	spin_lock_irqsave(&req->lock, flags);
216 	req->state = MEDIA_REQUEST_STATE_IDLE;
217 	spin_unlock_irqrestore(&req->lock, flags);
218 
219 	return 0;
220 }
221 
222 static long media_request_ioctl(struct file *filp, unsigned int cmd,
223 				unsigned long arg)
224 {
225 	struct media_request *req = filp->private_data;
226 
227 	switch (cmd) {
228 	case MEDIA_REQUEST_IOC_QUEUE:
229 		return media_request_ioctl_queue(req);
230 	case MEDIA_REQUEST_IOC_REINIT:
231 		return media_request_ioctl_reinit(req);
232 	default:
233 		return -ENOIOCTLCMD;
234 	}
235 }
236 
237 static const struct file_operations request_fops = {
238 	.owner = THIS_MODULE,
239 	.poll = media_request_poll,
240 	.unlocked_ioctl = media_request_ioctl,
241 #ifdef CONFIG_COMPAT
242 	.compat_ioctl = media_request_ioctl,
243 #endif /* CONFIG_COMPAT */
244 	.release = media_request_close,
245 };
246 
247 struct media_request *
248 media_request_get_by_fd(struct media_device *mdev, int request_fd)
249 {
250 	struct media_request *req;
251 
252 	if (!mdev || !mdev->ops ||
253 	    !mdev->ops->req_validate || !mdev->ops->req_queue)
254 		return ERR_PTR(-EBADR);
255 
256 	CLASS(fd, f)(request_fd);
257 	if (fd_empty(f))
258 		goto err;
259 
260 	if (fd_file(f)->f_op != &request_fops)
261 		goto err;
262 	req = fd_file(f)->private_data;
263 	if (req->mdev != mdev)
264 		goto err;
265 
266 	/*
267 	 * Note: as long as someone has an open filehandle of the request,
268 	 * the request can never be released. The fdget() above ensures that
269 	 * even if userspace closes the request filehandle, the release()
270 	 * fop won't be called, so the media_request_get() always succeeds
271 	 * and there is no race condition where the request was released
272 	 * before media_request_get() is called.
273 	 */
274 	media_request_get(req);
275 	return req;
276 
277 err:
278 	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
279 	return ERR_PTR(-EINVAL);
280 }
281 EXPORT_SYMBOL_GPL(media_request_get_by_fd);
282 
283 int media_request_alloc(struct media_device *mdev, int *alloc_fd)
284 {
285 	struct media_request *req;
286 	int ret;
287 
288 	/* Either both are NULL or both are non-NULL */
289 	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
290 		return -ENOMEM;
291 
292 	if (mdev->ops->req_alloc)
293 		req = mdev->ops->req_alloc(mdev);
294 	else
295 		req = kzalloc(sizeof(*req), GFP_KERNEL);
296 	if (!req)
297 		return -ENOMEM;
298 
299 	req->mdev = mdev;
300 	req->state = MEDIA_REQUEST_STATE_IDLE;
301 	req->num_incomplete_objects = 0;
302 	req->manual_completion = false;
303 	kref_init(&req->kref);
304 	INIT_LIST_HEAD(&req->objects);
305 	spin_lock_init(&req->lock);
306 	init_waitqueue_head(&req->poll_wait);
307 	req->updating_count = 0;
308 	req->access_count = 0;
309 
310 	FD_PREPARE(fdf, O_CLOEXEC,
311 		   anon_inode_getfile("request", &request_fops, NULL,
312 				      O_CLOEXEC));
313 	if (fdf.err) {
314 		ret = fdf.err;
315 		goto err_free_req;
316 	}
317 
318 	fd_prepare_file(fdf)->private_data = req;
319 
320 	*alloc_fd = fd_publish(fdf);
321 
322 	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
323 		 atomic_inc_return(&mdev->request_id), *alloc_fd);
324 	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
325 
326 	return 0;
327 
328 err_free_req:
329 	if (mdev->ops->req_free)
330 		mdev->ops->req_free(req);
331 	else
332 		kfree(req);
333 
334 	return ret;
335 }
336 
337 static void media_request_object_release(struct kref *kref)
338 {
339 	struct media_request_object *obj =
340 		container_of(kref, struct media_request_object, kref);
341 	struct media_request *req = obj->req;
342 
343 	if (WARN_ON(req))
344 		media_request_object_unbind(obj);
345 	obj->ops->release(obj);
346 }
347 
348 struct media_request_object *
349 media_request_object_find(struct media_request *req,
350 			  const struct media_request_object_ops *ops,
351 			  void *priv)
352 {
353 	struct media_request_object *obj;
354 	struct media_request_object *found = NULL;
355 	unsigned long flags;
356 
357 	if (WARN_ON(!ops || !priv))
358 		return NULL;
359 
360 	spin_lock_irqsave(&req->lock, flags);
361 	list_for_each_entry(obj, &req->objects, list) {
362 		if (obj->ops == ops && obj->priv == priv) {
363 			media_request_object_get(obj);
364 			found = obj;
365 			break;
366 		}
367 	}
368 	spin_unlock_irqrestore(&req->lock, flags);
369 	return found;
370 }
371 EXPORT_SYMBOL_GPL(media_request_object_find);
372 
373 void media_request_object_put(struct media_request_object *obj)
374 {
375 	kref_put(&obj->kref, media_request_object_release);
376 }
377 EXPORT_SYMBOL_GPL(media_request_object_put);
378 
379 void media_request_object_init(struct media_request_object *obj)
380 {
381 	obj->ops = NULL;
382 	obj->req = NULL;
383 	obj->priv = NULL;
384 	obj->completed = false;
385 	INIT_LIST_HEAD(&obj->list);
386 	kref_init(&obj->kref);
387 }
388 EXPORT_SYMBOL_GPL(media_request_object_init);
389 
390 int media_request_object_bind(struct media_request *req,
391 			      const struct media_request_object_ops *ops,
392 			      void *priv, bool is_buffer,
393 			      struct media_request_object *obj)
394 {
395 	unsigned long flags;
396 	int ret = -EBUSY;
397 
398 	if (WARN_ON(!ops->release))
399 		return -EBADR;
400 
401 	spin_lock_irqsave(&req->lock, flags);
402 
403 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
404 		    req->state != MEDIA_REQUEST_STATE_QUEUED))
405 		goto unlock;
406 
407 	obj->req = req;
408 	obj->ops = ops;
409 	obj->priv = priv;
410 
411 	if (is_buffer)
412 		list_add_tail(&obj->list, &req->objects);
413 	else
414 		list_add(&obj->list, &req->objects);
415 	req->num_incomplete_objects++;
416 	ret = 0;
417 
418 unlock:
419 	spin_unlock_irqrestore(&req->lock, flags);
420 	return ret;
421 }
422 EXPORT_SYMBOL_GPL(media_request_object_bind);
423 
424 void media_request_object_unbind(struct media_request_object *obj)
425 {
426 	struct media_request *req = obj->req;
427 	unsigned long flags;
428 	bool completed = false;
429 
430 	if (WARN_ON(!req))
431 		return;
432 
433 	spin_lock_irqsave(&req->lock, flags);
434 	list_del(&obj->list);
435 	obj->req = NULL;
436 
437 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
438 		goto unlock;
439 
440 	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
441 		goto unlock;
442 
443 	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
444 		if (!obj->completed)
445 			req->num_incomplete_objects--;
446 		goto unlock;
447 	}
448 
449 	if (WARN_ON(!req->num_incomplete_objects))
450 		goto unlock;
451 
452 	req->num_incomplete_objects--;
453 	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
454 	    !req->num_incomplete_objects && !req->manual_completion) {
455 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
456 		completed = true;
457 		wake_up_interruptible_all(&req->poll_wait);
458 	}
459 
460 unlock:
461 	spin_unlock_irqrestore(&req->lock, flags);
462 	if (obj->ops->unbind)
463 		obj->ops->unbind(obj);
464 	if (completed)
465 		media_request_put(req);
466 }
467 EXPORT_SYMBOL_GPL(media_request_object_unbind);
468 
469 void media_request_object_complete(struct media_request_object *obj)
470 {
471 	struct media_request *req = obj->req;
472 	unsigned long flags;
473 	bool completed = false;
474 
475 	spin_lock_irqsave(&req->lock, flags);
476 	if (obj->completed)
477 		goto unlock;
478 	obj->completed = true;
479 	if (WARN_ON(!req->num_incomplete_objects) ||
480 	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
481 		goto unlock;
482 
483 	if (!--req->num_incomplete_objects && !req->manual_completion) {
484 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
485 		wake_up_interruptible_all(&req->poll_wait);
486 		completed = true;
487 	}
488 unlock:
489 	spin_unlock_irqrestore(&req->lock, flags);
490 	if (completed)
491 		media_request_put(req);
492 }
493 EXPORT_SYMBOL_GPL(media_request_object_complete);
494 
495 void media_request_manual_complete(struct media_request *req)
496 {
497 	bool completed = false;
498 	unsigned long flags;
499 
500 	if (WARN_ON_ONCE(!req))
501 		return;
502 
503 	spin_lock_irqsave(&req->lock, flags);
504 
505 	if (WARN_ON_ONCE(!req->manual_completion))
506 		goto unlock;
507 
508 	if (WARN_ON_ONCE(req->state != MEDIA_REQUEST_STATE_QUEUED))
509 		goto unlock;
510 
511 	req->manual_completion = false;
512 	/*
513 	 * It is expected that all other objects in this request are
514 	 * completed when this function is called. WARN if that is
515 	 * not the case.
516 	 */
517 	if (!WARN_ON(req->num_incomplete_objects)) {
518 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
519 		wake_up_interruptible_all(&req->poll_wait);
520 		completed = true;
521 	}
522 
523 unlock:
524 	spin_unlock_irqrestore(&req->lock, flags);
525 	if (completed)
526 		media_request_put(req);
527 }
528 EXPORT_SYMBOL_GPL(media_request_manual_complete);
529