xref: /linux/drivers/media/mc/mc-request.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Media device request objects
4  *
5  * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6  * Copyright (C) 2018 Intel Corporation
7  * Copyright (C) 2018 Google, Inc.
8  *
9  * Author: Hans Verkuil <hverkuil@kernel.org>
10  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
11  */
12 
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
16 
17 #include <media/media-device.h>
18 #include <media/media-request.h>
19 
20 static const char * const request_state[] = {
21 	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
22 	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
23 	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
24 	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
25 	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
26 	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
27 };
28 
29 static const char *
30 media_request_state_str(enum media_request_state state)
31 {
32 	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
33 
34 	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
35 		return "invalid";
36 	return request_state[state];
37 }
38 
39 static void media_request_clean(struct media_request *req)
40 {
41 	struct media_request_object *obj, *obj_safe;
42 
43 	/* Just a sanity check. No other code path is allowed to change this. */
44 	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
45 	WARN_ON(req->updating_count);
46 	WARN_ON(req->access_count);
47 
48 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
49 		media_request_object_unbind(obj);
50 		media_request_object_put(obj);
51 	}
52 
53 	req->updating_count = 0;
54 	req->access_count = 0;
55 	WARN_ON(req->num_incomplete_objects);
56 	req->num_incomplete_objects = 0;
57 	req->manual_completion = false;
58 	wake_up_interruptible_all(&req->poll_wait);
59 }
60 
61 static void media_request_release(struct kref *kref)
62 {
63 	struct media_request *req =
64 		container_of(kref, struct media_request, kref);
65 	struct media_device *mdev = req->mdev;
66 
67 	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
68 
69 	/* No other users, no need for a spinlock */
70 	req->state = MEDIA_REQUEST_STATE_CLEANING;
71 
72 	media_request_clean(req);
73 
74 	if (mdev->ops->req_free)
75 		mdev->ops->req_free(req);
76 	else
77 		kfree(req);
78 	atomic_dec(&mdev->num_requests);
79 }
80 
81 void media_request_put(struct media_request *req)
82 {
83 	kref_put(&req->kref, media_request_release);
84 }
85 EXPORT_SYMBOL_GPL(media_request_put);
86 
87 static int media_request_close(struct inode *inode, struct file *filp)
88 {
89 	struct media_request *req = filp->private_data;
90 
91 	media_request_put(req);
92 	return 0;
93 }
94 
95 static __poll_t media_request_poll(struct file *filp,
96 				   struct poll_table_struct *wait)
97 {
98 	struct media_request *req = filp->private_data;
99 	unsigned long flags;
100 	__poll_t ret = 0;
101 
102 	if (!(poll_requested_events(wait) & EPOLLPRI))
103 		return 0;
104 
105 	poll_wait(filp, &req->poll_wait, wait);
106 	spin_lock_irqsave(&req->lock, flags);
107 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
108 		ret = EPOLLPRI;
109 		goto unlock;
110 	}
111 	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
112 		ret = EPOLLERR;
113 		goto unlock;
114 	}
115 
116 unlock:
117 	spin_unlock_irqrestore(&req->lock, flags);
118 	return ret;
119 }
120 
121 static long media_request_ioctl_queue(struct media_request *req)
122 {
123 	struct media_device *mdev = req->mdev;
124 	enum media_request_state state;
125 	unsigned long flags;
126 	int ret;
127 
128 	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
129 
130 	/*
131 	 * Ensure the request that is validated will be the one that gets queued
132 	 * next by serialising the queueing process. This mutex is also used
133 	 * to serialize with canceling a vb2 queue and with setting values such
134 	 * as controls in a request.
135 	 */
136 	mutex_lock(&mdev->req_queue_mutex);
137 
138 	media_request_get(req);
139 
140 	spin_lock_irqsave(&req->lock, flags);
141 	if (req->state == MEDIA_REQUEST_STATE_IDLE)
142 		req->state = MEDIA_REQUEST_STATE_VALIDATING;
143 	state = req->state;
144 	spin_unlock_irqrestore(&req->lock, flags);
145 	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
146 		dev_dbg(mdev->dev,
147 			"request: unable to queue %s, request in state %s\n",
148 			req->debug_str, media_request_state_str(state));
149 		media_request_put(req);
150 		mutex_unlock(&mdev->req_queue_mutex);
151 		return -EBUSY;
152 	}
153 
154 	ret = mdev->ops->req_validate(req);
155 
156 	/*
157 	 * If the req_validate was successful, then we mark the state as QUEUED
158 	 * and call req_queue. The reason we set the state first is that this
159 	 * allows req_queue to unbind or complete the queued objects in case
160 	 * they are immediately 'consumed'. State changes from QUEUED to another
161 	 * state can only happen if either the driver changes the state or if
162 	 * the user cancels the vb2 queue. The driver can only change the state
163 	 * after each object is queued through the req_queue op (and note that
164 	 * that op cannot fail), so setting the state to QUEUED up front is
165 	 * safe.
166 	 *
167 	 * The other reason for changing the state is if the vb2 queue is
168 	 * canceled, and that uses the req_queue_mutex which is still locked
169 	 * while req_queue is called, so that's safe as well.
170 	 */
171 	spin_lock_irqsave(&req->lock, flags);
172 	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
173 			 : MEDIA_REQUEST_STATE_QUEUED;
174 	spin_unlock_irqrestore(&req->lock, flags);
175 
176 	if (!ret)
177 		mdev->ops->req_queue(req);
178 
179 	mutex_unlock(&mdev->req_queue_mutex);
180 
181 	if (ret) {
182 		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
183 			req->debug_str, ret);
184 		media_request_put(req);
185 	}
186 
187 	return ret;
188 }
189 
190 static long media_request_ioctl_reinit(struct media_request *req)
191 {
192 	struct media_device *mdev = req->mdev;
193 	unsigned long flags;
194 
195 	spin_lock_irqsave(&req->lock, flags);
196 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
197 	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
198 		dev_dbg(mdev->dev,
199 			"request: %s not in idle or complete state, cannot reinit\n",
200 			req->debug_str);
201 		spin_unlock_irqrestore(&req->lock, flags);
202 		return -EBUSY;
203 	}
204 	if (req->access_count) {
205 		dev_dbg(mdev->dev,
206 			"request: %s is being accessed, cannot reinit\n",
207 			req->debug_str);
208 		spin_unlock_irqrestore(&req->lock, flags);
209 		return -EBUSY;
210 	}
211 	req->state = MEDIA_REQUEST_STATE_CLEANING;
212 	spin_unlock_irqrestore(&req->lock, flags);
213 
214 	media_request_clean(req);
215 
216 	spin_lock_irqsave(&req->lock, flags);
217 	req->state = MEDIA_REQUEST_STATE_IDLE;
218 	spin_unlock_irqrestore(&req->lock, flags);
219 
220 	return 0;
221 }
222 
223 static long media_request_ioctl(struct file *filp, unsigned int cmd,
224 				unsigned long arg)
225 {
226 	struct media_request *req = filp->private_data;
227 
228 	switch (cmd) {
229 	case MEDIA_REQUEST_IOC_QUEUE:
230 		return media_request_ioctl_queue(req);
231 	case MEDIA_REQUEST_IOC_REINIT:
232 		return media_request_ioctl_reinit(req);
233 	default:
234 		return -ENOIOCTLCMD;
235 	}
236 }
237 
238 static const struct file_operations request_fops = {
239 	.owner = THIS_MODULE,
240 	.poll = media_request_poll,
241 	.unlocked_ioctl = media_request_ioctl,
242 #ifdef CONFIG_COMPAT
243 	.compat_ioctl = media_request_ioctl,
244 #endif /* CONFIG_COMPAT */
245 	.release = media_request_close,
246 };
247 
248 struct media_request *
249 media_request_get_by_fd(struct media_device *mdev, int request_fd)
250 {
251 	struct media_request *req;
252 
253 	if (!mdev || !mdev->ops ||
254 	    !mdev->ops->req_validate || !mdev->ops->req_queue)
255 		return ERR_PTR(-EBADR);
256 
257 	CLASS(fd, f)(request_fd);
258 	if (fd_empty(f))
259 		goto err;
260 
261 	if (fd_file(f)->f_op != &request_fops)
262 		goto err;
263 	req = fd_file(f)->private_data;
264 	if (req->mdev != mdev)
265 		goto err;
266 
267 	/*
268 	 * Note: as long as someone has an open filehandle of the request,
269 	 * the request can never be released. The fdget() above ensures that
270 	 * even if userspace closes the request filehandle, the release()
271 	 * fop won't be called, so the media_request_get() always succeeds
272 	 * and there is no race condition where the request was released
273 	 * before media_request_get() is called.
274 	 */
275 	media_request_get(req);
276 	return req;
277 
278 err:
279 	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
280 	return ERR_PTR(-EINVAL);
281 }
282 EXPORT_SYMBOL_GPL(media_request_get_by_fd);
283 
284 int media_request_alloc(struct media_device *mdev, int *alloc_fd)
285 {
286 	struct media_request *req;
287 	int ret;
288 
289 	/* Either both are NULL or both are non-NULL */
290 	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
291 		return -ENOMEM;
292 
293 	if (mdev->ops->req_alloc)
294 		req = mdev->ops->req_alloc(mdev);
295 	else
296 		req = kzalloc_obj(*req, GFP_KERNEL);
297 	if (!req)
298 		return -ENOMEM;
299 
300 	req->mdev = mdev;
301 	req->state = MEDIA_REQUEST_STATE_IDLE;
302 	req->num_incomplete_objects = 0;
303 	req->manual_completion = false;
304 	kref_init(&req->kref);
305 	INIT_LIST_HEAD(&req->objects);
306 	spin_lock_init(&req->lock);
307 	init_waitqueue_head(&req->poll_wait);
308 	req->updating_count = 0;
309 	req->access_count = 0;
310 
311 	FD_PREPARE(fdf, O_CLOEXEC,
312 		   anon_inode_getfile("request", &request_fops, NULL,
313 				      O_CLOEXEC));
314 	if (fdf.err) {
315 		ret = fdf.err;
316 		goto err_free_req;
317 	}
318 
319 	fd_prepare_file(fdf)->private_data = req;
320 
321 	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
322 		 atomic_inc_return(&mdev->request_id), fd_prepare_fd(fdf));
323 	atomic_inc(&mdev->num_requests);
324 	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
325 
326 	*alloc_fd = fd_publish(fdf);
327 
328 	return 0;
329 
330 err_free_req:
331 	if (mdev->ops->req_free)
332 		mdev->ops->req_free(req);
333 	else
334 		kfree(req);
335 
336 	return ret;
337 }
338 
339 static void media_request_object_release(struct kref *kref)
340 {
341 	struct media_request_object *obj =
342 		container_of(kref, struct media_request_object, kref);
343 	struct media_request *req = obj->req;
344 	struct media_device *mdev = obj->mdev;
345 
346 	if (WARN_ON(req))
347 		media_request_object_unbind(obj);
348 	obj->ops->release(obj);
349 	atomic_dec(&mdev->num_request_objects);
350 }
351 
352 struct media_request_object *
353 media_request_object_find(struct media_request *req,
354 			  const struct media_request_object_ops *ops,
355 			  void *priv)
356 {
357 	struct media_request_object *obj;
358 	struct media_request_object *found = NULL;
359 	unsigned long flags;
360 
361 	if (WARN_ON(!ops || !priv))
362 		return NULL;
363 
364 	spin_lock_irqsave(&req->lock, flags);
365 	list_for_each_entry(obj, &req->objects, list) {
366 		if (obj->ops == ops && obj->priv == priv) {
367 			media_request_object_get(obj);
368 			found = obj;
369 			break;
370 		}
371 	}
372 	spin_unlock_irqrestore(&req->lock, flags);
373 	return found;
374 }
375 EXPORT_SYMBOL_GPL(media_request_object_find);
376 
377 void media_request_object_put(struct media_request_object *obj)
378 {
379 	kref_put(&obj->kref, media_request_object_release);
380 }
381 EXPORT_SYMBOL_GPL(media_request_object_put);
382 
383 void media_request_object_init(struct media_request_object *obj)
384 {
385 	obj->ops = NULL;
386 	obj->req = NULL;
387 	obj->priv = NULL;
388 	obj->completed = false;
389 	INIT_LIST_HEAD(&obj->list);
390 	kref_init(&obj->kref);
391 }
392 EXPORT_SYMBOL_GPL(media_request_object_init);
393 
394 int media_request_object_bind(struct media_request *req,
395 			      const struct media_request_object_ops *ops,
396 			      void *priv, bool is_buffer,
397 			      struct media_request_object *obj)
398 {
399 	unsigned long flags;
400 	int ret = -EBUSY;
401 
402 	if (WARN_ON(!ops->release))
403 		return -EBADR;
404 
405 	spin_lock_irqsave(&req->lock, flags);
406 
407 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
408 		    req->state != MEDIA_REQUEST_STATE_QUEUED))
409 		goto unlock;
410 
411 	obj->req = req;
412 	obj->ops = ops;
413 	obj->priv = priv;
414 	obj->mdev = req->mdev;
415 
416 	if (is_buffer)
417 		list_add_tail(&obj->list, &req->objects);
418 	else
419 		list_add(&obj->list, &req->objects);
420 	req->num_incomplete_objects++;
421 	ret = 0;
422 	atomic_inc(&obj->mdev->num_request_objects);
423 
424 unlock:
425 	spin_unlock_irqrestore(&req->lock, flags);
426 	return ret;
427 }
428 EXPORT_SYMBOL_GPL(media_request_object_bind);
429 
430 void media_request_object_unbind(struct media_request_object *obj)
431 {
432 	struct media_request *req = obj->req;
433 	unsigned long flags;
434 	bool completed = false;
435 
436 	if (WARN_ON(!req))
437 		return;
438 
439 	spin_lock_irqsave(&req->lock, flags);
440 	list_del(&obj->list);
441 	obj->req = NULL;
442 
443 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
444 		goto unlock;
445 
446 	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
447 		goto unlock;
448 
449 	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
450 		if (!obj->completed)
451 			req->num_incomplete_objects--;
452 		goto unlock;
453 	}
454 
455 	if (WARN_ON(!req->num_incomplete_objects))
456 		goto unlock;
457 
458 	req->num_incomplete_objects--;
459 	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
460 	    !req->num_incomplete_objects && !req->manual_completion) {
461 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
462 		completed = true;
463 		wake_up_interruptible_all(&req->poll_wait);
464 	}
465 
466 unlock:
467 	spin_unlock_irqrestore(&req->lock, flags);
468 	if (obj->ops->unbind)
469 		obj->ops->unbind(obj);
470 	if (completed)
471 		media_request_put(req);
472 }
473 EXPORT_SYMBOL_GPL(media_request_object_unbind);
474 
475 void media_request_object_complete(struct media_request_object *obj)
476 {
477 	struct media_request *req = obj->req;
478 	unsigned long flags;
479 	bool completed = false;
480 
481 	spin_lock_irqsave(&req->lock, flags);
482 	if (obj->completed)
483 		goto unlock;
484 	obj->completed = true;
485 	if (WARN_ON(!req->num_incomplete_objects) ||
486 	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
487 		goto unlock;
488 
489 	if (!--req->num_incomplete_objects && !req->manual_completion) {
490 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
491 		wake_up_interruptible_all(&req->poll_wait);
492 		completed = true;
493 	}
494 unlock:
495 	spin_unlock_irqrestore(&req->lock, flags);
496 	if (completed)
497 		media_request_put(req);
498 }
499 EXPORT_SYMBOL_GPL(media_request_object_complete);
500 
501 void media_request_manual_complete(struct media_request *req)
502 {
503 	bool completed = false;
504 	unsigned long flags;
505 
506 	if (WARN_ON_ONCE(!req))
507 		return;
508 
509 	spin_lock_irqsave(&req->lock, flags);
510 
511 	if (WARN_ON_ONCE(!req->manual_completion))
512 		goto unlock;
513 
514 	if (WARN_ON_ONCE(req->state != MEDIA_REQUEST_STATE_QUEUED))
515 		goto unlock;
516 
517 	req->manual_completion = false;
518 	/*
519 	 * It is expected that all other objects in this request are
520 	 * completed when this function is called. WARN if that is
521 	 * not the case.
522 	 */
523 	if (!WARN_ON(req->num_incomplete_objects)) {
524 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
525 		wake_up_interruptible_all(&req->poll_wait);
526 		completed = true;
527 	}
528 
529 unlock:
530 	spin_unlock_irqrestore(&req->lock, flags);
531 	if (completed)
532 		media_request_put(req);
533 }
534 EXPORT_SYMBOL_GPL(media_request_manual_complete);
535