xref: /linux/drivers/gpu/drm/scheduler/sched_fence.c (revision 96ac6d435100450f0565708d9b885ea2a7400e0a)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include <drm/gpu_scheduler.h>
29 
30 static struct kmem_cache *sched_fence_slab;
31 
32 static int __init drm_sched_fence_slab_init(void)
33 {
34 	sched_fence_slab = kmem_cache_create(
35 		"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
36 		SLAB_HWCACHE_ALIGN, NULL);
37 	if (!sched_fence_slab)
38 		return -ENOMEM;
39 
40 	return 0;
41 }
42 
43 static void __exit drm_sched_fence_slab_fini(void)
44 {
45 	rcu_barrier();
46 	kmem_cache_destroy(sched_fence_slab);
47 }
48 
49 void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
50 {
51 	int ret = dma_fence_signal(&fence->scheduled);
52 
53 	if (!ret)
54 		DMA_FENCE_TRACE(&fence->scheduled,
55 				"signaled from irq context\n");
56 	else
57 		DMA_FENCE_TRACE(&fence->scheduled,
58 				"was already signaled\n");
59 }
60 
61 void drm_sched_fence_finished(struct drm_sched_fence *fence)
62 {
63 	int ret = dma_fence_signal(&fence->finished);
64 
65 	if (!ret)
66 		DMA_FENCE_TRACE(&fence->finished,
67 				"signaled from irq context\n");
68 	else
69 		DMA_FENCE_TRACE(&fence->finished,
70 				"was already signaled\n");
71 }
72 
73 static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
74 {
75 	return "drm_sched";
76 }
77 
78 static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
79 {
80 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
81 	return (const char *)fence->sched->name;
82 }
83 
84 /**
85  * drm_sched_fence_free - free up the fence memory
86  *
87  * @rcu: RCU callback head
88  *
89  * Free up the fence memory after the RCU grace period.
90  */
91 static void drm_sched_fence_free(struct rcu_head *rcu)
92 {
93 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
94 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
95 
96 	kmem_cache_free(sched_fence_slab, fence);
97 }
98 
99 /**
100  * drm_sched_fence_release_scheduled - callback that fence can be freed
101  *
102  * @fence: fence
103  *
104  * This function is called when the reference count becomes zero.
105  * It just RCU schedules freeing up the fence.
106  */
107 static void drm_sched_fence_release_scheduled(struct dma_fence *f)
108 {
109 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
110 
111 	dma_fence_put(fence->parent);
112 	call_rcu(&fence->finished.rcu, drm_sched_fence_free);
113 }
114 
115 /**
116  * drm_sched_fence_release_finished - drop extra reference
117  *
118  * @f: fence
119  *
120  * Drop the extra reference from the scheduled fence to the base fence.
121  */
122 static void drm_sched_fence_release_finished(struct dma_fence *f)
123 {
124 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
125 
126 	dma_fence_put(&fence->scheduled);
127 }
128 
129 const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
130 	.get_driver_name = drm_sched_fence_get_driver_name,
131 	.get_timeline_name = drm_sched_fence_get_timeline_name,
132 	.release = drm_sched_fence_release_scheduled,
133 };
134 
135 const struct dma_fence_ops drm_sched_fence_ops_finished = {
136 	.get_driver_name = drm_sched_fence_get_driver_name,
137 	.get_timeline_name = drm_sched_fence_get_timeline_name,
138 	.release = drm_sched_fence_release_finished,
139 };
140 
141 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
142 {
143 	if (f->ops == &drm_sched_fence_ops_scheduled)
144 		return container_of(f, struct drm_sched_fence, scheduled);
145 
146 	if (f->ops == &drm_sched_fence_ops_finished)
147 		return container_of(f, struct drm_sched_fence, finished);
148 
149 	return NULL;
150 }
151 EXPORT_SYMBOL(to_drm_sched_fence);
152 
153 struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
154 					       void *owner)
155 {
156 	struct drm_sched_fence *fence = NULL;
157 	unsigned seq;
158 
159 	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
160 	if (fence == NULL)
161 		return NULL;
162 
163 	fence->owner = owner;
164 	fence->sched = entity->rq->sched;
165 	spin_lock_init(&fence->lock);
166 
167 	seq = atomic_inc_return(&entity->fence_seq);
168 	dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
169 		       &fence->lock, entity->fence_context, seq);
170 	dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
171 		       &fence->lock, entity->fence_context + 1, seq);
172 
173 	return fence;
174 }
175 
176 module_init(drm_sched_fence_slab_init);
177 module_exit(drm_sched_fence_slab_fini);
178 
179 MODULE_DESCRIPTION("DRM GPU scheduler");
180 MODULE_LICENSE("GPL and additional rights");
181