xref: /linux/drivers/gpu/drm/msm/msm_kms.h (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #ifndef __MSM_KMS_H__
9 #define __MSM_KMS_H__
10 
11 #include <linux/clk.h>
12 #include <linux/regulator/consumer.h>
13 
14 #include "msm_drv.h"
15 
16 #ifdef CONFIG_DRM_MSM_KMS
17 
18 #define MAX_PLANE	4
19 
20 /* As there are different display controller blocks depending on the
21  * snapdragon version, the kms support is split out and the appropriate
22  * implementation is loaded at runtime.  The kms module is responsible
23  * for constructing the appropriate planes/crtcs/encoders/connectors.
24  */
25 struct msm_kms_funcs {
26 	/* hw initialization: */
27 	int (*hw_init)(struct msm_kms *kms);
28 	/* irq handling: */
29 	void (*irq_preinstall)(struct msm_kms *kms);
30 	int (*irq_postinstall)(struct msm_kms *kms);
31 	void (*irq_uninstall)(struct msm_kms *kms);
32 	irqreturn_t (*irq)(struct msm_kms *kms);
33 	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
34 	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
35 
36 	/*
37 	 * Atomic commit handling:
38 	 *
39 	 * Note that in the case of async commits, the funcs which take
40 	 * a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
41 	 * might not be evenly balanced with ->prepare_commit(), however
42 	 * each crtc that effected by a ->prepare_commit() (potentially
43 	 * multiple times) will eventually (at end of vsync period) be
44 	 * flushed and completed.
45 	 *
46 	 * This has some implications about tracking of cleanup state,
47 	 * for example SMP blocks to release after commit completes.  Ie.
48 	 * cleanup state should be also duplicated in the various
49 	 * duplicate_state() methods, as the current cleanup state at
50 	 * ->complete_commit() time may have accumulated cleanup work
51 	 * from multiple commits.
52 	 */
53 
54 	/**
55 	 * Enable/disable power/clks needed for hw access done in other
56 	 * commit related methods.
57 	 *
58 	 * If mdp4 is migrated to runpm, we could probably drop these
59 	 * and use runpm directly.
60 	 */
61 	void (*enable_commit)(struct msm_kms *kms);
62 	void (*disable_commit)(struct msm_kms *kms);
63 
64 	/**
65 	 * @check_mode_changed:
66 	 *
67 	 * Verify if the commit requires a full modeset on one of CRTCs.
68 	 */
69 	int (*check_mode_changed)(struct msm_kms *kms, struct drm_atomic_state *state);
70 
71 	/**
72 	 * Prepare for atomic commit.  This is called after any previous
73 	 * (async or otherwise) commit has completed.
74 	 */
75 	void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
76 
77 	/**
78 	 * Flush an atomic commit.  This is called after the hardware
79 	 * updates have already been pushed down to effected planes/
80 	 * crtcs/encoders/connectors.
81 	 */
82 	void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask);
83 
84 	/**
85 	 * Wait for any in-progress flush to complete on the specified
86 	 * crtcs.  This should not block if there is no in-progress
87 	 * commit (ie. don't just wait for a vblank), as it will also
88 	 * be called before ->prepare_commit() to ensure any potential
89 	 * "async" commit has completed.
90 	 */
91 	void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask);
92 
93 	/**
94 	 * Clean up after commit is completed.  This is called after
95 	 * ->wait_flush(), to give the backend a chance to do any
96 	 * post-commit cleanup.
97 	 */
98 	void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask);
99 
100 	/*
101 	 * Format handling:
102 	 */
103 
104 	/* misc: */
105 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
106 			struct drm_encoder *encoder);
107 	/* cleanup: */
108 	void (*destroy)(struct msm_kms *kms);
109 
110 	/* snapshot: */
111 	void (*snapshot)(struct msm_disp_state *disp_state, struct msm_kms *kms);
112 
113 #ifdef CONFIG_DEBUG_FS
114 	/* debugfs: */
115 	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
116 #endif
117 };
118 
119 struct msm_kms;
120 
121 /*
122  * A per-crtc timer for pending async atomic flushes.  Scheduled to expire
123  * shortly before vblank to flush pending async updates.
124  */
125 struct msm_pending_timer {
126 	struct msm_hrtimer_work work;
127 	struct kthread_worker *worker;
128 	struct msm_kms *kms;
129 	unsigned crtc_idx;
130 };
131 
132 /* Commit/Event thread specific structure */
133 struct msm_drm_thread {
134 	struct drm_device *dev;
135 	struct kthread_worker *worker;
136 };
137 
138 struct msm_kms {
139 	const struct msm_kms_funcs *funcs;
140 	struct drm_device *dev;
141 
142 	struct hdmi *hdmi;
143 
144 	struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
145 
146 	struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
147 
148 	/* irq number to be passed on to msm_irq_install */
149 	int irq;
150 	bool irq_requested;
151 
152 	/* rate limit the snapshot capture to once per attach */
153 	atomic_t fault_snapshot_capture;
154 
155 	/* mapper-id used to request GEM buffer mapped for scanout: */
156 	struct drm_gpuvm *vm;
157 
158 	/* disp snapshot support */
159 	struct kthread_worker *dump_worker;
160 	struct kthread_work dump_work;
161 	struct mutex dump_mutex;
162 
163 	/*
164 	 * For async commit, where ->flush_commit() and later happens
165 	 * from the crtc's pending_timer close to end of the frame:
166 	 */
167 	struct mutex commit_lock[MAX_CRTCS];
168 	unsigned pending_crtc_mask;
169 	struct msm_pending_timer pending_timers[MAX_CRTCS];
170 
171 	struct workqueue_struct *wq;
172 	struct msm_drm_thread event_thread[MAX_CRTCS];
173 };
174 
175 static inline int msm_kms_init(struct msm_kms *kms,
176 		const struct msm_kms_funcs *funcs)
177 {
178 	unsigned i, ret;
179 
180 	for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
181 		mutex_init(&kms->commit_lock[i]);
182 
183 	kms->funcs = funcs;
184 
185 	kms->wq = alloc_ordered_workqueue("msm", 0);
186 	if (!kms->wq)
187 		return -ENOMEM;
188 
189 	for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
190 		ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
191 		if (ret) {
192 			return ret;
193 		}
194 	}
195 
196 	return 0;
197 }
198 
199 static inline void msm_kms_destroy(struct msm_kms *kms)
200 {
201 	unsigned i;
202 
203 	for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
204 		msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
205 
206 	destroy_workqueue(kms->wq);
207 }
208 
209 #define for_each_crtc_mask(dev, crtc, crtc_mask) \
210 	drm_for_each_crtc(crtc, dev) \
211 		for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
212 
213 #define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \
214 	drm_for_each_crtc_reverse(crtc, dev) \
215 		for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
216 
217 int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
218 void msm_drm_kms_post_init(struct device *dev);
219 void msm_drm_kms_unregister(struct device *dev);
220 void msm_drm_kms_uninit(struct device *dev);
221 
222 #else /* ! CONFIG_DRM_MSM_KMS */
223 
224 static inline int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
225 {
226 	return -ENODEV;
227 }
228 
229 static inline void msm_drm_kms_post_init(struct device *dev)
230 {
231 }
232 
233 static inline void msm_drm_kms_unregister(struct device *dev)
234 {
235 }
236 
237 static inline void msm_drm_kms_uninit(struct device *dev)
238 {
239 }
240 
241 #endif
242 
243 #endif /* __MSM_KMS_H__ */
244