xref: /linux/drivers/gpu/drm/xe/xe_force_wake.c (revision f5bd7da05a5988506dedcb3e67aecb3a13a4cdf0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_force_wake.h"
7 
8 #include <drm/drm_util.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "regs/xe_reg_defs.h"
12 #include "xe_gt.h"
13 #include "xe_gt_printk.h"
14 #include "xe_mmio.h"
15 #include "xe_sriov.h"
16 
17 #define XE_FORCE_WAKE_ACK_TIMEOUT_MS	50
18 
19 static const char *str_wake_sleep(bool wake)
20 {
21 	return wake ? "wake" : "sleep";
22 }
23 
24 static void mark_domain_initialized(struct xe_force_wake *fw,
25 				    enum xe_force_wake_domain_id id)
26 {
27 	fw->initialized_domains |= BIT(id);
28 }
29 
30 static void init_domain(struct xe_force_wake *fw,
31 			enum xe_force_wake_domain_id id,
32 			struct xe_reg reg, struct xe_reg ack)
33 {
34 	struct xe_force_wake_domain *domain = &fw->domains[id];
35 
36 	domain->id = id;
37 	domain->reg_ctl = reg;
38 	domain->reg_ack = ack;
39 	domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
40 	domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
41 
42 	mark_domain_initialized(fw, id);
43 }
44 
45 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
46 {
47 	struct xe_device *xe = gt_to_xe(gt);
48 
49 	fw->gt = gt;
50 	spin_lock_init(&fw->lock);
51 
52 	if (xe->info.graphics_verx100 >= 1270) {
53 		init_domain(fw, XE_FW_DOMAIN_ID_GT,
54 			    FORCEWAKE_GT,
55 			    FORCEWAKE_ACK_GT_MTL);
56 	} else {
57 		init_domain(fw, XE_FW_DOMAIN_ID_GT,
58 			    FORCEWAKE_GT,
59 			    FORCEWAKE_ACK_GT);
60 	}
61 }
62 
63 void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
64 {
65 	int i, j;
66 
67 	if (xe_gt_is_main_type(gt))
68 		init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
69 			    FORCEWAKE_RENDER,
70 			    FORCEWAKE_ACK_RENDER);
71 
72 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
73 		if (!(gt->info.engine_mask & BIT(i)))
74 			continue;
75 
76 		init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
77 			    FORCEWAKE_MEDIA_VDBOX(j),
78 			    FORCEWAKE_ACK_MEDIA_VDBOX(j));
79 	}
80 
81 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
82 		if (!(gt->info.engine_mask & BIT(i)))
83 			continue;
84 
85 		init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
86 			    FORCEWAKE_MEDIA_VEBOX(j),
87 			    FORCEWAKE_ACK_MEDIA_VEBOX(j));
88 	}
89 
90 	if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
91 		init_domain(fw, XE_FW_DOMAIN_ID_GSC,
92 			    FORCEWAKE_GSC,
93 			    FORCEWAKE_ACK_GSC);
94 }
95 
96 static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
97 {
98 	if (IS_SRIOV_VF(gt_to_xe(gt)))
99 		return;
100 
101 	xe_mmio_write32(&gt->mmio, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
102 }
103 
104 static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
105 {
106 	u32 value;
107 	int ret;
108 
109 	if (IS_SRIOV_VF(gt_to_xe(gt)))
110 		return 0;
111 
112 	ret = xe_mmio_wait32(&gt->mmio, domain->reg_ack, domain->val, wake ? domain->val : 0,
113 			     XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
114 			     &value, true);
115 	if (ret)
116 		xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
117 			  domain->id, str_wake_sleep(wake), ERR_PTR(ret),
118 			  domain->reg_ack.addr, value);
119 	if (value == ~0) {
120 		xe_gt_err(gt,
121 			  "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
122 			  domain->id, str_wake_sleep(wake));
123 		ret = -EIO;
124 	}
125 
126 	return ret;
127 }
128 
129 static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
130 {
131 	__domain_ctl(gt, domain, true);
132 }
133 
134 static int domain_wake_wait(struct xe_gt *gt,
135 			    struct xe_force_wake_domain *domain)
136 {
137 	return __domain_wait(gt, domain, true);
138 }
139 
140 static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
141 {
142 	__domain_ctl(gt, domain, false);
143 }
144 
145 static int domain_sleep_wait(struct xe_gt *gt,
146 			     struct xe_force_wake_domain *domain)
147 {
148 	return __domain_wait(gt, domain, false);
149 }
150 
151 /**
152  * xe_force_wake_get() : Increase the domain refcount
153  * @fw: struct xe_force_wake
154  * @domains: forcewake domains to get refcount on
155  *
156  * This function wakes up @domains if they are asleep and takes references.
157  * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized
158  * domains will be considered for refcount and it is a caller responsibility
159  * to check returned ref if it includes any specific domain by using
160  * xe_force_wake_ref_has_domain() function. Caller must call
161  * xe_force_wake_put() function to decrease incremented refcounts.
162  *
163  * When possible, scope-based forcewake (through CLASS(xe_force_wake, ...) or
164  * xe_with_force_wake()) should be used instead of direct calls to this
165  * function.  Direct usage of get/put should only be used when the function
166  * has goto-based flows that can interfere with scope-based cleanup, or when
167  * the lifetime of the forcewake reference does not match a specific scope
168  * (e.g., forcewake obtained in one function and released in a different one).
169  *
170  * Return: opaque reference to woken domains or zero if none of requested
171  * domains were awake.
172  */
173 unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw,
174 					    enum xe_force_wake_domains domains)
175 {
176 	struct xe_gt *gt = fw->gt;
177 	struct xe_force_wake_domain *domain;
178 	unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0;
179 	unsigned int tmp, ref_rqst;
180 	unsigned long flags;
181 
182 	xe_gt_assert(gt, is_power_of_2(domains));
183 	xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL);
184 	xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains);
185 
186 	ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains;
187 	spin_lock_irqsave(&fw->lock, flags);
188 	for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) {
189 		if (!domain->ref++) {
190 			awake_rqst |= BIT(domain->id);
191 			domain_wake(gt, domain);
192 		}
193 		ref_incr |= BIT(domain->id);
194 	}
195 	for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) {
196 		if (domain_wake_wait(gt, domain) == 0) {
197 			fw->awake_domains |= BIT(domain->id);
198 		} else {
199 			awake_failed |= BIT(domain->id);
200 			--domain->ref;
201 		}
202 	}
203 	ref_incr &= ~awake_failed;
204 	spin_unlock_irqrestore(&fw->lock, flags);
205 
206 	xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n",
207 		   str_plural(hweight_long(awake_failed)), awake_failed);
208 
209 	if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains)
210 		ref_incr |= XE_FORCEWAKE_ALL;
211 
212 	return ref_incr;
213 }
214 
215 /**
216  * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0
217  * @fw: Pointer to the force wake structure
218  * @fw_ref: return of xe_force_wake_get()
219  *
220  * This function reduces the reference counts for domains in fw_ref. If
221  * refcount for any of the specified domain reaches 0, it puts the domain to sleep
222  * and waits for acknowledgment for domain to sleep within 50 milisec timeout.
223  * Warns in case of timeout of ack from domain.
224  */
225 void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref)
226 {
227 	struct xe_gt *gt = fw->gt;
228 	struct xe_force_wake_domain *domain;
229 	unsigned int tmp, sleep = 0;
230 	unsigned long flags;
231 	int ack_fail = 0;
232 
233 	/*
234 	 * Avoid unnecessary lock and unlock when the function is called
235 	 * in error path of individual domains.
236 	 */
237 	if (!fw_ref)
238 		return;
239 
240 	if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
241 		fw_ref = fw->initialized_domains;
242 
243 	spin_lock_irqsave(&fw->lock, flags);
244 	for_each_fw_domain_masked(domain, fw_ref, fw, tmp) {
245 		xe_gt_assert(gt, domain->ref);
246 
247 		if (!--domain->ref) {
248 			sleep |= BIT(domain->id);
249 			domain_sleep(gt, domain);
250 		}
251 	}
252 	for_each_fw_domain_masked(domain, sleep, fw, tmp) {
253 		if (domain_sleep_wait(gt, domain) == 0)
254 			fw->awake_domains &= ~BIT(domain->id);
255 		else
256 			ack_fail |= BIT(domain->id);
257 	}
258 	spin_unlock_irqrestore(&fw->lock, flags);
259 
260 	xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n",
261 		   str_plural(hweight_long(ack_fail)), ack_fail);
262 }
263 
264 const char *xe_force_wake_domain_to_str(enum xe_force_wake_domain_id id)
265 {
266 	switch (id) {
267 	case XE_FW_DOMAIN_ID_GT:
268 		return "GT";
269 	case XE_FW_DOMAIN_ID_RENDER:
270 		return "Render";
271 	case XE_FW_DOMAIN_ID_MEDIA:
272 		return "Media";
273 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX0:
274 		return "VDBox0";
275 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX1:
276 		return "VDBox1";
277 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX2:
278 		return "VDBox2";
279 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX3:
280 		return "VDBox3";
281 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX4:
282 		return "VDBox4";
283 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX5:
284 		return "VDBox5";
285 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX6:
286 		return "VDBox6";
287 	case XE_FW_DOMAIN_ID_MEDIA_VDBOX7:
288 		return "VDBox7";
289 	case XE_FW_DOMAIN_ID_MEDIA_VEBOX0:
290 		return "VEBox0";
291 	case XE_FW_DOMAIN_ID_MEDIA_VEBOX1:
292 		return "VEBox1";
293 	case XE_FW_DOMAIN_ID_MEDIA_VEBOX2:
294 		return "VEBox2";
295 	case XE_FW_DOMAIN_ID_MEDIA_VEBOX3:
296 		return "VEBox3";
297 	case XE_FW_DOMAIN_ID_GSC:
298 		return "GSC";
299 	default:
300 		return "Unknown";
301 	}
302 }
303