xref: /linux/drivers/media/mc/mc-request.c (revision fa76c775be27bdc49cc2d39f8ebdb926a9d53294)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Media device request objects
4   *
5   * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6   * Copyright (C) 2018 Intel Corporation
7   * Copyright (C) 2018 Google, Inc.
8   *
9   * Author: Hans Verkuil <hans.verkuil@cisco.com>
10   * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
11   */
12  
13  #include <linux/anon_inodes.h>
14  #include <linux/file.h>
15  #include <linux/refcount.h>
16  
17  #include <media/media-device.h>
18  #include <media/media-request.h>
19  
20  static const char * const request_state[] = {
21  	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
22  	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
23  	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
24  	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
25  	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
26  	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
27  };
28  
29  static const char *
30  media_request_state_str(enum media_request_state state)
31  {
32  	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
33  
34  	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
35  		return "invalid";
36  	return request_state[state];
37  }
38  
39  static void media_request_clean(struct media_request *req)
40  {
41  	struct media_request_object *obj, *obj_safe;
42  
43  	/* Just a sanity check. No other code path is allowed to change this. */
44  	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
45  	WARN_ON(req->updating_count);
46  	WARN_ON(req->access_count);
47  
48  	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
49  		media_request_object_unbind(obj);
50  		media_request_object_put(obj);
51  	}
52  
53  	req->updating_count = 0;
54  	req->access_count = 0;
55  	WARN_ON(req->num_incomplete_objects);
56  	req->num_incomplete_objects = 0;
57  	wake_up_interruptible_all(&req->poll_wait);
58  }
59  
60  static void media_request_release(struct kref *kref)
61  {
62  	struct media_request *req =
63  		container_of(kref, struct media_request, kref);
64  	struct media_device *mdev = req->mdev;
65  
66  	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
67  
68  	/* No other users, no need for a spinlock */
69  	req->state = MEDIA_REQUEST_STATE_CLEANING;
70  
71  	media_request_clean(req);
72  
73  	if (mdev->ops->req_free)
74  		mdev->ops->req_free(req);
75  	else
76  		kfree(req);
77  }
78  
79  void media_request_put(struct media_request *req)
80  {
81  	kref_put(&req->kref, media_request_release);
82  }
83  EXPORT_SYMBOL_GPL(media_request_put);
84  
85  static int media_request_close(struct inode *inode, struct file *filp)
86  {
87  	struct media_request *req = filp->private_data;
88  
89  	media_request_put(req);
90  	return 0;
91  }
92  
93  static __poll_t media_request_poll(struct file *filp,
94  				   struct poll_table_struct *wait)
95  {
96  	struct media_request *req = filp->private_data;
97  	unsigned long flags;
98  	__poll_t ret = 0;
99  
100  	if (!(poll_requested_events(wait) & EPOLLPRI))
101  		return 0;
102  
103  	poll_wait(filp, &req->poll_wait, wait);
104  	spin_lock_irqsave(&req->lock, flags);
105  	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
106  		ret = EPOLLPRI;
107  		goto unlock;
108  	}
109  	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
110  		ret = EPOLLERR;
111  		goto unlock;
112  	}
113  
114  unlock:
115  	spin_unlock_irqrestore(&req->lock, flags);
116  	return ret;
117  }
118  
119  static long media_request_ioctl_queue(struct media_request *req)
120  {
121  	struct media_device *mdev = req->mdev;
122  	enum media_request_state state;
123  	unsigned long flags;
124  	int ret;
125  
126  	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
127  
128  	/*
129  	 * Ensure the request that is validated will be the one that gets queued
130  	 * next by serialising the queueing process. This mutex is also used
131  	 * to serialize with canceling a vb2 queue and with setting values such
132  	 * as controls in a request.
133  	 */
134  	mutex_lock(&mdev->req_queue_mutex);
135  
136  	media_request_get(req);
137  
138  	spin_lock_irqsave(&req->lock, flags);
139  	if (req->state == MEDIA_REQUEST_STATE_IDLE)
140  		req->state = MEDIA_REQUEST_STATE_VALIDATING;
141  	state = req->state;
142  	spin_unlock_irqrestore(&req->lock, flags);
143  	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
144  		dev_dbg(mdev->dev,
145  			"request: unable to queue %s, request in state %s\n",
146  			req->debug_str, media_request_state_str(state));
147  		media_request_put(req);
148  		mutex_unlock(&mdev->req_queue_mutex);
149  		return -EBUSY;
150  	}
151  
152  	ret = mdev->ops->req_validate(req);
153  
154  	/*
155  	 * If the req_validate was successful, then we mark the state as QUEUED
156  	 * and call req_queue. The reason we set the state first is that this
157  	 * allows req_queue to unbind or complete the queued objects in case
158  	 * they are immediately 'consumed'. State changes from QUEUED to another
159  	 * state can only happen if either the driver changes the state or if
160  	 * the user cancels the vb2 queue. The driver can only change the state
161  	 * after each object is queued through the req_queue op (and note that
162  	 * that op cannot fail), so setting the state to QUEUED up front is
163  	 * safe.
164  	 *
165  	 * The other reason for changing the state is if the vb2 queue is
166  	 * canceled, and that uses the req_queue_mutex which is still locked
167  	 * while req_queue is called, so that's safe as well.
168  	 */
169  	spin_lock_irqsave(&req->lock, flags);
170  	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
171  			 : MEDIA_REQUEST_STATE_QUEUED;
172  	spin_unlock_irqrestore(&req->lock, flags);
173  
174  	if (!ret)
175  		mdev->ops->req_queue(req);
176  
177  	mutex_unlock(&mdev->req_queue_mutex);
178  
179  	if (ret) {
180  		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
181  			req->debug_str, ret);
182  		media_request_put(req);
183  	}
184  
185  	return ret;
186  }
187  
188  static long media_request_ioctl_reinit(struct media_request *req)
189  {
190  	struct media_device *mdev = req->mdev;
191  	unsigned long flags;
192  
193  	spin_lock_irqsave(&req->lock, flags);
194  	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
195  	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
196  		dev_dbg(mdev->dev,
197  			"request: %s not in idle or complete state, cannot reinit\n",
198  			req->debug_str);
199  		spin_unlock_irqrestore(&req->lock, flags);
200  		return -EBUSY;
201  	}
202  	if (req->access_count) {
203  		dev_dbg(mdev->dev,
204  			"request: %s is being accessed, cannot reinit\n",
205  			req->debug_str);
206  		spin_unlock_irqrestore(&req->lock, flags);
207  		return -EBUSY;
208  	}
209  	req->state = MEDIA_REQUEST_STATE_CLEANING;
210  	spin_unlock_irqrestore(&req->lock, flags);
211  
212  	media_request_clean(req);
213  
214  	spin_lock_irqsave(&req->lock, flags);
215  	req->state = MEDIA_REQUEST_STATE_IDLE;
216  	spin_unlock_irqrestore(&req->lock, flags);
217  
218  	return 0;
219  }
220  
221  static long media_request_ioctl(struct file *filp, unsigned int cmd,
222  				unsigned long arg)
223  {
224  	struct media_request *req = filp->private_data;
225  
226  	switch (cmd) {
227  	case MEDIA_REQUEST_IOC_QUEUE:
228  		return media_request_ioctl_queue(req);
229  	case MEDIA_REQUEST_IOC_REINIT:
230  		return media_request_ioctl_reinit(req);
231  	default:
232  		return -ENOIOCTLCMD;
233  	}
234  }
235  
236  static const struct file_operations request_fops = {
237  	.owner = THIS_MODULE,
238  	.poll = media_request_poll,
239  	.unlocked_ioctl = media_request_ioctl,
240  #ifdef CONFIG_COMPAT
241  	.compat_ioctl = media_request_ioctl,
242  #endif /* CONFIG_COMPAT */
243  	.release = media_request_close,
244  };
245  
246  struct media_request *
247  media_request_get_by_fd(struct media_device *mdev, int request_fd)
248  {
249  	struct fd f;
250  	struct media_request *req;
251  
252  	if (!mdev || !mdev->ops ||
253  	    !mdev->ops->req_validate || !mdev->ops->req_queue)
254  		return ERR_PTR(-EBADR);
255  
256  	f = fdget(request_fd);
257  	if (!f.file)
258  		goto err_no_req_fd;
259  
260  	if (f.file->f_op != &request_fops)
261  		goto err_fput;
262  	req = f.file->private_data;
263  	if (req->mdev != mdev)
264  		goto err_fput;
265  
266  	/*
267  	 * Note: as long as someone has an open filehandle of the request,
268  	 * the request can never be released. The fdget() above ensures that
269  	 * even if userspace closes the request filehandle, the release()
270  	 * fop won't be called, so the media_request_get() always succeeds
271  	 * and there is no race condition where the request was released
272  	 * before media_request_get() is called.
273  	 */
274  	media_request_get(req);
275  	fdput(f);
276  
277  	return req;
278  
279  err_fput:
280  	fdput(f);
281  
282  err_no_req_fd:
283  	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
284  	return ERR_PTR(-EINVAL);
285  }
286  EXPORT_SYMBOL_GPL(media_request_get_by_fd);
287  
288  int media_request_alloc(struct media_device *mdev, int *alloc_fd)
289  {
290  	struct media_request *req;
291  	struct file *filp;
292  	int fd;
293  	int ret;
294  
295  	/* Either both are NULL or both are non-NULL */
296  	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
297  		return -ENOMEM;
298  
299  	if (mdev->ops->req_alloc)
300  		req = mdev->ops->req_alloc(mdev);
301  	else
302  		req = kzalloc(sizeof(*req), GFP_KERNEL);
303  	if (!req)
304  		return -ENOMEM;
305  
306  	fd = get_unused_fd_flags(O_CLOEXEC);
307  	if (fd < 0) {
308  		ret = fd;
309  		goto err_free_req;
310  	}
311  
312  	filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
313  	if (IS_ERR(filp)) {
314  		ret = PTR_ERR(filp);
315  		goto err_put_fd;
316  	}
317  
318  	filp->private_data = req;
319  	req->mdev = mdev;
320  	req->state = MEDIA_REQUEST_STATE_IDLE;
321  	req->num_incomplete_objects = 0;
322  	kref_init(&req->kref);
323  	INIT_LIST_HEAD(&req->objects);
324  	spin_lock_init(&req->lock);
325  	init_waitqueue_head(&req->poll_wait);
326  	req->updating_count = 0;
327  	req->access_count = 0;
328  
329  	*alloc_fd = fd;
330  
331  	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
332  		 atomic_inc_return(&mdev->request_id), fd);
333  	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
334  
335  	fd_install(fd, filp);
336  
337  	return 0;
338  
339  err_put_fd:
340  	put_unused_fd(fd);
341  
342  err_free_req:
343  	if (mdev->ops->req_free)
344  		mdev->ops->req_free(req);
345  	else
346  		kfree(req);
347  
348  	return ret;
349  }
350  
351  static void media_request_object_release(struct kref *kref)
352  {
353  	struct media_request_object *obj =
354  		container_of(kref, struct media_request_object, kref);
355  	struct media_request *req = obj->req;
356  
357  	if (WARN_ON(req))
358  		media_request_object_unbind(obj);
359  	obj->ops->release(obj);
360  }
361  
362  struct media_request_object *
363  media_request_object_find(struct media_request *req,
364  			  const struct media_request_object_ops *ops,
365  			  void *priv)
366  {
367  	struct media_request_object *obj;
368  	struct media_request_object *found = NULL;
369  	unsigned long flags;
370  
371  	if (WARN_ON(!ops || !priv))
372  		return NULL;
373  
374  	spin_lock_irqsave(&req->lock, flags);
375  	list_for_each_entry(obj, &req->objects, list) {
376  		if (obj->ops == ops && obj->priv == priv) {
377  			media_request_object_get(obj);
378  			found = obj;
379  			break;
380  		}
381  	}
382  	spin_unlock_irqrestore(&req->lock, flags);
383  	return found;
384  }
385  EXPORT_SYMBOL_GPL(media_request_object_find);
386  
387  void media_request_object_put(struct media_request_object *obj)
388  {
389  	kref_put(&obj->kref, media_request_object_release);
390  }
391  EXPORT_SYMBOL_GPL(media_request_object_put);
392  
393  void media_request_object_init(struct media_request_object *obj)
394  {
395  	obj->ops = NULL;
396  	obj->req = NULL;
397  	obj->priv = NULL;
398  	obj->completed = false;
399  	INIT_LIST_HEAD(&obj->list);
400  	kref_init(&obj->kref);
401  }
402  EXPORT_SYMBOL_GPL(media_request_object_init);
403  
404  int media_request_object_bind(struct media_request *req,
405  			      const struct media_request_object_ops *ops,
406  			      void *priv, bool is_buffer,
407  			      struct media_request_object *obj)
408  {
409  	unsigned long flags;
410  	int ret = -EBUSY;
411  
412  	if (WARN_ON(!ops->release))
413  		return -EBADR;
414  
415  	spin_lock_irqsave(&req->lock, flags);
416  
417  	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
418  		goto unlock;
419  
420  	obj->req = req;
421  	obj->ops = ops;
422  	obj->priv = priv;
423  
424  	if (is_buffer)
425  		list_add_tail(&obj->list, &req->objects);
426  	else
427  		list_add(&obj->list, &req->objects);
428  	req->num_incomplete_objects++;
429  	ret = 0;
430  
431  unlock:
432  	spin_unlock_irqrestore(&req->lock, flags);
433  	return ret;
434  }
435  EXPORT_SYMBOL_GPL(media_request_object_bind);
436  
437  void media_request_object_unbind(struct media_request_object *obj)
438  {
439  	struct media_request *req = obj->req;
440  	unsigned long flags;
441  	bool completed = false;
442  
443  	if (WARN_ON(!req))
444  		return;
445  
446  	spin_lock_irqsave(&req->lock, flags);
447  	list_del(&obj->list);
448  	obj->req = NULL;
449  
450  	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
451  		goto unlock;
452  
453  	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
454  		goto unlock;
455  
456  	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
457  		if (!obj->completed)
458  			req->num_incomplete_objects--;
459  		goto unlock;
460  	}
461  
462  	if (WARN_ON(!req->num_incomplete_objects))
463  		goto unlock;
464  
465  	req->num_incomplete_objects--;
466  	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
467  	    !req->num_incomplete_objects) {
468  		req->state = MEDIA_REQUEST_STATE_COMPLETE;
469  		completed = true;
470  		wake_up_interruptible_all(&req->poll_wait);
471  	}
472  
473  unlock:
474  	spin_unlock_irqrestore(&req->lock, flags);
475  	if (obj->ops->unbind)
476  		obj->ops->unbind(obj);
477  	if (completed)
478  		media_request_put(req);
479  }
480  EXPORT_SYMBOL_GPL(media_request_object_unbind);
481  
482  void media_request_object_complete(struct media_request_object *obj)
483  {
484  	struct media_request *req = obj->req;
485  	unsigned long flags;
486  	bool completed = false;
487  
488  	spin_lock_irqsave(&req->lock, flags);
489  	if (obj->completed)
490  		goto unlock;
491  	obj->completed = true;
492  	if (WARN_ON(!req->num_incomplete_objects) ||
493  	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
494  		goto unlock;
495  
496  	if (!--req->num_incomplete_objects) {
497  		req->state = MEDIA_REQUEST_STATE_COMPLETE;
498  		wake_up_interruptible_all(&req->poll_wait);
499  		completed = true;
500  	}
501  unlock:
502  	spin_unlock_irqrestore(&req->lock, flags);
503  	if (completed)
504  		media_request_put(req);
505  }
506  EXPORT_SYMBOL_GPL(media_request_object_complete);
507