xref: /linux/drivers/gpu/drm/i915/i915_scheduler.h (revision e2f3496e93be3238de2e2e6bfc83b3a83c084ce5)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #ifndef _I915_SCHEDULER_H_
8 #define _I915_SCHEDULER_H_
9 
10 #include <linux/bitops.h>
11 #include <linux/kernel.h>
12 
13 #include <uapi/drm/i915_drm.h>
14 
15 struct drm_i915_private;
16 struct i915_request;
17 struct intel_engine_cs;
18 
19 enum {
20 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
21 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
22 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
23 
24 	I915_PRIORITY_INVALID = INT_MIN
25 };
26 
27 #define I915_USER_PRIORITY_SHIFT 1
28 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
29 
30 #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
31 #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
32 
33 #define I915_PRIORITY_NEWCLIENT	((u8)BIT(0))
34 
35 struct i915_sched_attr {
36 	/**
37 	 * @priority: execution and service priority
38 	 *
39 	 * All clients are equal, but some are more equal than others!
40 	 *
41 	 * Requests from a context with a greater (more positive) value of
42 	 * @priority will be executed before those with a lower @priority
43 	 * value, forming a simple QoS.
44 	 *
45 	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
46 	 */
47 	int priority;
48 };
49 
50 /*
51  * "People assume that time is a strict progression of cause to effect, but
52  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
53  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
54  *
55  * Requests exist in a complex web of interdependencies. Each request
56  * has to wait for some other request to complete before it is ready to be run
57  * (e.g. we have to wait until the pixels have been rendering into a texture
58  * before we can copy from it). We track the readiness of a request in terms
59  * of fences, but we also need to keep the dependency tree for the lifetime
60  * of the request (beyond the life of an individual fence). We use the tree
61  * at various points to reorder the requests whilst keeping the requests
62  * in order with respect to their various dependencies.
63  *
64  * There is no active component to the "scheduler". As we know the dependency
65  * DAG of each request, we are able to insert it into a sorted queue when it
66  * is ready, and are able to reorder its portion of the graph to accommodate
67  * dynamic priority changes.
68  */
69 struct i915_sched_node {
70 	struct list_head signalers_list; /* those before us, we depend upon */
71 	struct list_head waiters_list; /* those after us, they depend upon us */
72 	struct list_head link;
73 	struct i915_sched_attr attr;
74 };
75 
76 struct i915_dependency {
77 	struct i915_sched_node *signaler;
78 	struct list_head signal_link;
79 	struct list_head wait_link;
80 	struct list_head dfs_link;
81 	unsigned long flags;
82 #define I915_DEPENDENCY_ALLOC BIT(0)
83 };
84 
85 void i915_sched_node_init(struct i915_sched_node *node);
86 
87 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
88 				      struct i915_sched_node *signal,
89 				      struct i915_dependency *dep,
90 				      unsigned long flags);
91 
92 int i915_sched_node_add_dependency(struct drm_i915_private *i915,
93 				   struct i915_sched_node *node,
94 				   struct i915_sched_node *signal);
95 
96 void i915_sched_node_fini(struct drm_i915_private *i915,
97 			  struct i915_sched_node *node);
98 
99 void i915_schedule(struct i915_request *request,
100 		   const struct i915_sched_attr *attr);
101 
102 struct list_head *
103 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
104 
105 #endif /* _I915_SCHEDULER_H_ */
106