xref: /linux/drivers/media/mc/mc-request.c (revision 97a48d1aab549acb9b7f4a80d484f59710643199)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Media device request objects
4  *
5  * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6  * Copyright (C) 2018 Intel Corporation
7  * Copyright (C) 2018 Google, Inc.
8  *
9  * Author: Hans Verkuil <hverkuil@kernel.org>
10  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
11  */
12 
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
16 
17 #include <media/media-device.h>
18 #include <media/media-request.h>
19 
20 static const char * const request_state[] = {
21 	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
22 	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
23 	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
24 	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
25 	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
26 	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
27 };
28 
29 static const char *
media_request_state_str(enum media_request_state state)30 media_request_state_str(enum media_request_state state)
31 {
32 	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
33 
34 	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
35 		return "invalid";
36 	return request_state[state];
37 }
38 
media_request_clean(struct media_request * req)39 static void media_request_clean(struct media_request *req)
40 {
41 	struct media_request_object *obj, *obj_safe;
42 
43 	/* Just a sanity check. No other code path is allowed to change this. */
44 	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
45 	WARN_ON(req->updating_count);
46 	WARN_ON(req->access_count);
47 
48 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
49 		media_request_object_unbind(obj);
50 		media_request_object_put(obj);
51 	}
52 
53 	req->updating_count = 0;
54 	req->access_count = 0;
55 	WARN_ON(req->num_incomplete_objects);
56 	req->num_incomplete_objects = 0;
57 	req->manual_completion = false;
58 	wake_up_interruptible_all(&req->poll_wait);
59 }
60 
media_request_release(struct kref * kref)61 static void media_request_release(struct kref *kref)
62 {
63 	struct media_request *req =
64 		container_of(kref, struct media_request, kref);
65 	struct media_device *mdev = req->mdev;
66 
67 	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
68 
69 	/* No other users, no need for a spinlock */
70 	req->state = MEDIA_REQUEST_STATE_CLEANING;
71 
72 	media_request_clean(req);
73 
74 	if (mdev->ops->req_free)
75 		mdev->ops->req_free(req);
76 	else
77 		kfree(req);
78 	atomic_dec(&mdev->num_requests);
79 }
80 
media_request_put(struct media_request * req)81 void media_request_put(struct media_request *req)
82 {
83 	kref_put(&req->kref, media_request_release);
84 }
85 EXPORT_SYMBOL_GPL(media_request_put);
86 
media_request_close(struct inode * inode,struct file * filp)87 static int media_request_close(struct inode *inode, struct file *filp)
88 {
89 	struct media_request *req = filp->private_data;
90 
91 	media_request_put(req);
92 	return 0;
93 }
94 
media_request_poll(struct file * filp,struct poll_table_struct * wait)95 static __poll_t media_request_poll(struct file *filp,
96 				   struct poll_table_struct *wait)
97 {
98 	struct media_request *req = filp->private_data;
99 	unsigned long flags;
100 	__poll_t ret = 0;
101 
102 	if (!(poll_requested_events(wait) & EPOLLPRI))
103 		return 0;
104 
105 	poll_wait(filp, &req->poll_wait, wait);
106 	spin_lock_irqsave(&req->lock, flags);
107 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
108 		ret = EPOLLPRI;
109 		goto unlock;
110 	}
111 	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
112 		ret = EPOLLERR;
113 		goto unlock;
114 	}
115 
116 unlock:
117 	spin_unlock_irqrestore(&req->lock, flags);
118 	return ret;
119 }
120 
media_request_ioctl_queue(struct media_request * req)121 static long media_request_ioctl_queue(struct media_request *req)
122 {
123 	struct media_device *mdev = req->mdev;
124 	enum media_request_state state;
125 	unsigned long flags;
126 	int ret;
127 
128 	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
129 
130 	/*
131 	 * Ensure the request that is validated will be the one that gets queued
132 	 * next by serialising the queueing process. This mutex is also used
133 	 * to serialize with canceling a vb2 queue and with setting values such
134 	 * as controls in a request.
135 	 */
136 	mutex_lock(&mdev->req_queue_mutex);
137 
138 	media_request_get(req);
139 
140 	spin_lock_irqsave(&req->lock, flags);
141 	if (req->state == MEDIA_REQUEST_STATE_IDLE)
142 		req->state = MEDIA_REQUEST_STATE_VALIDATING;
143 	state = req->state;
144 	spin_unlock_irqrestore(&req->lock, flags);
145 	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
146 		dev_dbg(mdev->dev,
147 			"request: unable to queue %s, request in state %s\n",
148 			req->debug_str, media_request_state_str(state));
149 		media_request_put(req);
150 		mutex_unlock(&mdev->req_queue_mutex);
151 		return -EBUSY;
152 	}
153 
154 	ret = mdev->ops->req_validate(req);
155 
156 	/*
157 	 * If the req_validate was successful, then we mark the state as QUEUED
158 	 * and call req_queue. The reason we set the state first is that this
159 	 * allows req_queue to unbind or complete the queued objects in case
160 	 * they are immediately 'consumed'. State changes from QUEUED to another
161 	 * state can only happen if either the driver changes the state or if
162 	 * the user cancels the vb2 queue. The driver can only change the state
163 	 * after each object is queued through the req_queue op (and note that
164 	 * that op cannot fail), so setting the state to QUEUED up front is
165 	 * safe.
166 	 *
167 	 * The other reason for changing the state is if the vb2 queue is
168 	 * canceled, and that uses the req_queue_mutex which is still locked
169 	 * while req_queue is called, so that's safe as well.
170 	 */
171 	spin_lock_irqsave(&req->lock, flags);
172 	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
173 			 : MEDIA_REQUEST_STATE_QUEUED;
174 	spin_unlock_irqrestore(&req->lock, flags);
175 
176 	if (!ret)
177 		mdev->ops->req_queue(req);
178 
179 	mutex_unlock(&mdev->req_queue_mutex);
180 
181 	if (ret) {
182 		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
183 			req->debug_str, ret);
184 		media_request_put(req);
185 	}
186 
187 	return ret;
188 }
189 
media_request_ioctl_reinit(struct media_request * req)190 static long media_request_ioctl_reinit(struct media_request *req)
191 {
192 	struct media_device *mdev = req->mdev;
193 	unsigned long flags;
194 
195 	mutex_lock(&mdev->req_queue_mutex);
196 
197 	spin_lock_irqsave(&req->lock, flags);
198 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
199 	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
200 		dev_dbg(mdev->dev,
201 			"request: %s not in idle or complete state, cannot reinit\n",
202 			req->debug_str);
203 		spin_unlock_irqrestore(&req->lock, flags);
204 		mutex_unlock(&mdev->req_queue_mutex);
205 		return -EBUSY;
206 	}
207 	if (req->access_count) {
208 		dev_dbg(mdev->dev,
209 			"request: %s is being accessed, cannot reinit\n",
210 			req->debug_str);
211 		spin_unlock_irqrestore(&req->lock, flags);
212 		mutex_unlock(&mdev->req_queue_mutex);
213 		return -EBUSY;
214 	}
215 	req->state = MEDIA_REQUEST_STATE_CLEANING;
216 	spin_unlock_irqrestore(&req->lock, flags);
217 
218 	media_request_clean(req);
219 
220 	spin_lock_irqsave(&req->lock, flags);
221 	req->state = MEDIA_REQUEST_STATE_IDLE;
222 	spin_unlock_irqrestore(&req->lock, flags);
223 	mutex_unlock(&mdev->req_queue_mutex);
224 
225 	return 0;
226 }
227 
media_request_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)228 static long media_request_ioctl(struct file *filp, unsigned int cmd,
229 				unsigned long arg)
230 {
231 	struct media_request *req = filp->private_data;
232 
233 	switch (cmd) {
234 	case MEDIA_REQUEST_IOC_QUEUE:
235 		return media_request_ioctl_queue(req);
236 	case MEDIA_REQUEST_IOC_REINIT:
237 		return media_request_ioctl_reinit(req);
238 	default:
239 		return -ENOIOCTLCMD;
240 	}
241 }
242 
243 static const struct file_operations request_fops = {
244 	.owner = THIS_MODULE,
245 	.poll = media_request_poll,
246 	.unlocked_ioctl = media_request_ioctl,
247 #ifdef CONFIG_COMPAT
248 	.compat_ioctl = media_request_ioctl,
249 #endif /* CONFIG_COMPAT */
250 	.release = media_request_close,
251 };
252 
253 struct media_request *
media_request_get_by_fd(struct media_device * mdev,int request_fd)254 media_request_get_by_fd(struct media_device *mdev, int request_fd)
255 {
256 	struct media_request *req;
257 
258 	if (!mdev || !mdev->ops ||
259 	    !mdev->ops->req_validate || !mdev->ops->req_queue)
260 		return ERR_PTR(-EBADR);
261 
262 	CLASS(fd, f)(request_fd);
263 	if (fd_empty(f))
264 		goto err;
265 
266 	if (fd_file(f)->f_op != &request_fops)
267 		goto err;
268 	req = fd_file(f)->private_data;
269 	if (req->mdev != mdev)
270 		goto err;
271 
272 	/*
273 	 * Note: as long as someone has an open filehandle of the request,
274 	 * the request can never be released. The fdget() above ensures that
275 	 * even if userspace closes the request filehandle, the release()
276 	 * fop won't be called, so the media_request_get() always succeeds
277 	 * and there is no race condition where the request was released
278 	 * before media_request_get() is called.
279 	 */
280 	media_request_get(req);
281 	return req;
282 
283 err:
284 	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
285 	return ERR_PTR(-EINVAL);
286 }
287 EXPORT_SYMBOL_GPL(media_request_get_by_fd);
288 
media_request_alloc(struct media_device * mdev,int * alloc_fd)289 int media_request_alloc(struct media_device *mdev, int *alloc_fd)
290 {
291 	struct media_request *req;
292 	int ret;
293 
294 	/* Either both are NULL or both are non-NULL */
295 	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
296 		return -ENOMEM;
297 
298 	if (mdev->ops->req_alloc)
299 		req = mdev->ops->req_alloc(mdev);
300 	else
301 		req = kzalloc_obj(*req);
302 	if (!req)
303 		return -ENOMEM;
304 
305 	req->mdev = mdev;
306 	req->state = MEDIA_REQUEST_STATE_IDLE;
307 	req->num_incomplete_objects = 0;
308 	req->manual_completion = false;
309 	kref_init(&req->kref);
310 	INIT_LIST_HEAD(&req->objects);
311 	spin_lock_init(&req->lock);
312 	init_waitqueue_head(&req->poll_wait);
313 	req->updating_count = 0;
314 	req->access_count = 0;
315 
316 	FD_PREPARE(fdf, O_CLOEXEC,
317 		   anon_inode_getfile("request", &request_fops, NULL,
318 				      O_CLOEXEC));
319 	if (fdf.err) {
320 		ret = fdf.err;
321 		goto err_free_req;
322 	}
323 
324 	fd_prepare_file(fdf)->private_data = req;
325 
326 	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
327 		 atomic_inc_return(&mdev->request_id), fd_prepare_fd(fdf));
328 	atomic_inc(&mdev->num_requests);
329 	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
330 
331 	*alloc_fd = fd_publish(fdf);
332 
333 	return 0;
334 
335 err_free_req:
336 	if (mdev->ops->req_free)
337 		mdev->ops->req_free(req);
338 	else
339 		kfree(req);
340 
341 	return ret;
342 }
343 
media_request_object_release(struct kref * kref)344 static void media_request_object_release(struct kref *kref)
345 {
346 	struct media_request_object *obj =
347 		container_of(kref, struct media_request_object, kref);
348 	struct media_request *req = obj->req;
349 	struct media_device *mdev = obj->mdev;
350 
351 	if (WARN_ON(req))
352 		media_request_object_unbind(obj);
353 	obj->ops->release(obj);
354 	atomic_dec(&mdev->num_request_objects);
355 }
356 
357 struct media_request_object *
media_request_object_find(struct media_request * req,const struct media_request_object_ops * ops,void * priv)358 media_request_object_find(struct media_request *req,
359 			  const struct media_request_object_ops *ops,
360 			  void *priv)
361 {
362 	struct media_request_object *obj;
363 	struct media_request_object *found = NULL;
364 	unsigned long flags;
365 
366 	if (WARN_ON(!ops || !priv))
367 		return NULL;
368 
369 	spin_lock_irqsave(&req->lock, flags);
370 	list_for_each_entry(obj, &req->objects, list) {
371 		if (obj->ops == ops && obj->priv == priv) {
372 			media_request_object_get(obj);
373 			found = obj;
374 			break;
375 		}
376 	}
377 	spin_unlock_irqrestore(&req->lock, flags);
378 	return found;
379 }
380 EXPORT_SYMBOL_GPL(media_request_object_find);
381 
media_request_object_put(struct media_request_object * obj)382 void media_request_object_put(struct media_request_object *obj)
383 {
384 	kref_put(&obj->kref, media_request_object_release);
385 }
386 EXPORT_SYMBOL_GPL(media_request_object_put);
387 
media_request_object_init(struct media_request_object * obj)388 void media_request_object_init(struct media_request_object *obj)
389 {
390 	obj->ops = NULL;
391 	obj->req = NULL;
392 	obj->priv = NULL;
393 	obj->completed = false;
394 	INIT_LIST_HEAD(&obj->list);
395 	kref_init(&obj->kref);
396 }
397 EXPORT_SYMBOL_GPL(media_request_object_init);
398 
media_request_object_bind(struct media_request * req,const struct media_request_object_ops * ops,void * priv,bool is_buffer,struct media_request_object * obj)399 int media_request_object_bind(struct media_request *req,
400 			      const struct media_request_object_ops *ops,
401 			      void *priv, bool is_buffer,
402 			      struct media_request_object *obj)
403 {
404 	unsigned long flags;
405 	int ret = -EBUSY;
406 
407 	if (WARN_ON(!ops->release))
408 		return -EBADR;
409 
410 	spin_lock_irqsave(&req->lock, flags);
411 
412 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
413 		    req->state != MEDIA_REQUEST_STATE_QUEUED))
414 		goto unlock;
415 
416 	obj->req = req;
417 	obj->ops = ops;
418 	obj->priv = priv;
419 	obj->mdev = req->mdev;
420 
421 	if (is_buffer)
422 		list_add_tail(&obj->list, &req->objects);
423 	else
424 		list_add(&obj->list, &req->objects);
425 	req->num_incomplete_objects++;
426 	ret = 0;
427 	atomic_inc(&obj->mdev->num_request_objects);
428 
429 unlock:
430 	spin_unlock_irqrestore(&req->lock, flags);
431 	return ret;
432 }
433 EXPORT_SYMBOL_GPL(media_request_object_bind);
434 
media_request_object_unbind(struct media_request_object * obj)435 void media_request_object_unbind(struct media_request_object *obj)
436 {
437 	struct media_request *req = obj->req;
438 	unsigned long flags;
439 	bool completed = false;
440 
441 	if (WARN_ON(!req))
442 		return;
443 
444 	spin_lock_irqsave(&req->lock, flags);
445 	list_del(&obj->list);
446 	obj->req = NULL;
447 
448 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
449 		goto unlock;
450 
451 	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
452 		goto unlock;
453 
454 	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
455 		if (!obj->completed)
456 			req->num_incomplete_objects--;
457 		goto unlock;
458 	}
459 
460 	if (WARN_ON(!req->num_incomplete_objects))
461 		goto unlock;
462 
463 	req->num_incomplete_objects--;
464 	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
465 	    !req->num_incomplete_objects && !req->manual_completion) {
466 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
467 		completed = true;
468 		wake_up_interruptible_all(&req->poll_wait);
469 	}
470 
471 unlock:
472 	spin_unlock_irqrestore(&req->lock, flags);
473 	if (obj->ops->unbind)
474 		obj->ops->unbind(obj);
475 	if (completed)
476 		media_request_put(req);
477 }
478 EXPORT_SYMBOL_GPL(media_request_object_unbind);
479 
media_request_object_complete(struct media_request_object * obj)480 void media_request_object_complete(struct media_request_object *obj)
481 {
482 	struct media_request *req = obj->req;
483 	unsigned long flags;
484 	bool completed = false;
485 
486 	spin_lock_irqsave(&req->lock, flags);
487 	if (obj->completed)
488 		goto unlock;
489 	obj->completed = true;
490 	if (WARN_ON(!req->num_incomplete_objects) ||
491 	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
492 		goto unlock;
493 
494 	if (!--req->num_incomplete_objects && !req->manual_completion) {
495 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
496 		wake_up_interruptible_all(&req->poll_wait);
497 		completed = true;
498 	}
499 unlock:
500 	spin_unlock_irqrestore(&req->lock, flags);
501 	if (completed)
502 		media_request_put(req);
503 }
504 EXPORT_SYMBOL_GPL(media_request_object_complete);
505 
media_request_manual_complete(struct media_request * req)506 void media_request_manual_complete(struct media_request *req)
507 {
508 	bool completed = false;
509 	unsigned long flags;
510 
511 	if (WARN_ON_ONCE(!req))
512 		return;
513 
514 	spin_lock_irqsave(&req->lock, flags);
515 
516 	if (WARN_ON_ONCE(!req->manual_completion))
517 		goto unlock;
518 
519 	if (WARN_ON_ONCE(req->state != MEDIA_REQUEST_STATE_QUEUED))
520 		goto unlock;
521 
522 	req->manual_completion = false;
523 	/*
524 	 * It is expected that all other objects in this request are
525 	 * completed when this function is called. WARN if that is
526 	 * not the case.
527 	 */
528 	if (!WARN_ON(req->num_incomplete_objects)) {
529 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
530 		wake_up_interruptible_all(&req->poll_wait);
531 		completed = true;
532 	}
533 
534 unlock:
535 	spin_unlock_irqrestore(&req->lock, flags);
536 	if (completed)
537 		media_request_put(req);
538 }
539 EXPORT_SYMBOL_GPL(media_request_manual_complete);
540