xref: /linux/drivers/gpu/drm/lima/lima_sched.h (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3 
4 #ifndef __LIMA_SCHED_H__
5 #define __LIMA_SCHED_H__
6 
7 #include <drm/gpu_scheduler.h>
8 #include <linux/list.h>
9 #include <linux/xarray.h>
10 
11 struct lima_device;
12 struct lima_vm;
13 
14 struct lima_sched_error_task {
15 	struct list_head list;
16 	void *data;
17 	u32 size;
18 };
19 
20 struct lima_sched_task {
21 	struct drm_sched_job base;
22 
23 	struct lima_vm *vm;
24 	void *frame;
25 
26 	struct xarray deps;
27 	unsigned long last_dep;
28 
29 	struct lima_bo **bos;
30 	int num_bos;
31 
32 	bool recoverable;
33 	struct lima_bo *heap;
34 
35 	/* pipe fence */
36 	struct dma_fence *fence;
37 };
38 
39 struct lima_sched_context {
40 	struct drm_sched_entity base;
41 };
42 
43 #define LIMA_SCHED_PIPE_MAX_MMU       8
44 #define LIMA_SCHED_PIPE_MAX_L2_CACHE  2
45 #define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
46 
47 struct lima_ip;
48 
49 struct lima_sched_pipe {
50 	struct drm_gpu_scheduler base;
51 
52 	u64 fence_context;
53 	u32 fence_seqno;
54 	spinlock_t fence_lock;
55 
56 	struct lima_device *ldev;
57 
58 	struct lima_sched_task *current_task;
59 	struct lima_vm *current_vm;
60 
61 	struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
62 	int num_mmu;
63 
64 	struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
65 	int num_l2_cache;
66 
67 	struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
68 	int num_processor;
69 
70 	struct lima_ip *bcast_processor;
71 	struct lima_ip *bcast_mmu;
72 
73 	u32 done;
74 	bool error;
75 	atomic_t task;
76 
77 	int frame_size;
78 	struct kmem_cache *task_slab;
79 
80 	int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
81 	void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
82 	void (*task_fini)(struct lima_sched_pipe *pipe);
83 	void (*task_error)(struct lima_sched_pipe *pipe);
84 	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
85 	int (*task_recover)(struct lima_sched_pipe *pipe);
86 
87 	struct work_struct recover_work;
88 };
89 
90 int lima_sched_task_init(struct lima_sched_task *task,
91 			 struct lima_sched_context *context,
92 			 struct lima_bo **bos, int num_bos,
93 			 struct lima_vm *vm);
94 void lima_sched_task_fini(struct lima_sched_task *task);
95 
96 int lima_sched_context_init(struct lima_sched_pipe *pipe,
97 			    struct lima_sched_context *context,
98 			    atomic_t *guilty);
99 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
100 			     struct lima_sched_context *context);
101 struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
102 						struct lima_sched_task *task);
103 
104 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
105 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
106 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
107 
108 static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
109 {
110 	pipe->error = true;
111 	pipe->task_mmu_error(pipe);
112 }
113 
114 int lima_sched_slab_init(void);
115 void lima_sched_slab_fini(void);
116 
117 #endif
118