xref: /linux/drivers/gpu/drm/xe/xe_force_wake.c (revision 220994d61cebfc04f071d69049127657c7e8191b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_force_wake.h"
7 
8 #include <drm/drm_util.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "regs/xe_reg_defs.h"
12 #include "xe_gt.h"
13 #include "xe_gt_printk.h"
14 #include "xe_mmio.h"
15 #include "xe_sriov.h"
16 
17 #define XE_FORCE_WAKE_ACK_TIMEOUT_MS	50
18 
str_wake_sleep(bool wake)19 static const char *str_wake_sleep(bool wake)
20 {
21 	return wake ? "wake" : "sleep";
22 }
23 
mark_domain_initialized(struct xe_force_wake * fw,enum xe_force_wake_domain_id id)24 static void mark_domain_initialized(struct xe_force_wake *fw,
25 				    enum xe_force_wake_domain_id id)
26 {
27 	fw->initialized_domains |= BIT(id);
28 }
29 
init_domain(struct xe_force_wake * fw,enum xe_force_wake_domain_id id,struct xe_reg reg,struct xe_reg ack)30 static void init_domain(struct xe_force_wake *fw,
31 			enum xe_force_wake_domain_id id,
32 			struct xe_reg reg, struct xe_reg ack)
33 {
34 	struct xe_force_wake_domain *domain = &fw->domains[id];
35 
36 	domain->id = id;
37 	domain->reg_ctl = reg;
38 	domain->reg_ack = ack;
39 	domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
40 	domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
41 
42 	mark_domain_initialized(fw, id);
43 }
44 
xe_force_wake_init_gt(struct xe_gt * gt,struct xe_force_wake * fw)45 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
46 {
47 	struct xe_device *xe = gt_to_xe(gt);
48 
49 	fw->gt = gt;
50 	spin_lock_init(&fw->lock);
51 
52 	if (xe->info.graphics_verx100 >= 1270) {
53 		init_domain(fw, XE_FW_DOMAIN_ID_GT,
54 			    FORCEWAKE_GT,
55 			    FORCEWAKE_ACK_GT_MTL);
56 	} else {
57 		init_domain(fw, XE_FW_DOMAIN_ID_GT,
58 			    FORCEWAKE_GT,
59 			    FORCEWAKE_ACK_GT);
60 	}
61 }
62 
xe_force_wake_init_engines(struct xe_gt * gt,struct xe_force_wake * fw)63 void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
64 {
65 	int i, j;
66 
67 	if (xe_gt_is_main_type(gt))
68 		init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
69 			    FORCEWAKE_RENDER,
70 			    FORCEWAKE_ACK_RENDER);
71 
72 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
73 		if (!(gt->info.engine_mask & BIT(i)))
74 			continue;
75 
76 		init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
77 			    FORCEWAKE_MEDIA_VDBOX(j),
78 			    FORCEWAKE_ACK_MEDIA_VDBOX(j));
79 	}
80 
81 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
82 		if (!(gt->info.engine_mask & BIT(i)))
83 			continue;
84 
85 		init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
86 			    FORCEWAKE_MEDIA_VEBOX(j),
87 			    FORCEWAKE_ACK_MEDIA_VEBOX(j));
88 	}
89 
90 	if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
91 		init_domain(fw, XE_FW_DOMAIN_ID_GSC,
92 			    FORCEWAKE_GSC,
93 			    FORCEWAKE_ACK_GSC);
94 }
95 
__domain_ctl(struct xe_gt * gt,struct xe_force_wake_domain * domain,bool wake)96 static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
97 {
98 	if (IS_SRIOV_VF(gt_to_xe(gt)))
99 		return;
100 
101 	xe_mmio_write32(&gt->mmio, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
102 }
103 
__domain_wait(struct xe_gt * gt,struct xe_force_wake_domain * domain,bool wake)104 static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
105 {
106 	u32 value;
107 	int ret;
108 
109 	if (IS_SRIOV_VF(gt_to_xe(gt)))
110 		return 0;
111 
112 	ret = xe_mmio_wait32(&gt->mmio, domain->reg_ack, domain->val, wake ? domain->val : 0,
113 			     XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
114 			     &value, true);
115 	if (ret)
116 		xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
117 			  domain->id, str_wake_sleep(wake), ERR_PTR(ret),
118 			  domain->reg_ack.addr, value);
119 	if (value == ~0) {
120 		xe_gt_err(gt,
121 			  "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
122 			  domain->id, str_wake_sleep(wake));
123 		ret = -EIO;
124 	}
125 
126 	return ret;
127 }
128 
domain_wake(struct xe_gt * gt,struct xe_force_wake_domain * domain)129 static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
130 {
131 	__domain_ctl(gt, domain, true);
132 }
133 
domain_wake_wait(struct xe_gt * gt,struct xe_force_wake_domain * domain)134 static int domain_wake_wait(struct xe_gt *gt,
135 			    struct xe_force_wake_domain *domain)
136 {
137 	return __domain_wait(gt, domain, true);
138 }
139 
domain_sleep(struct xe_gt * gt,struct xe_force_wake_domain * domain)140 static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
141 {
142 	__domain_ctl(gt, domain, false);
143 }
144 
domain_sleep_wait(struct xe_gt * gt,struct xe_force_wake_domain * domain)145 static int domain_sleep_wait(struct xe_gt *gt,
146 			     struct xe_force_wake_domain *domain)
147 {
148 	return __domain_wait(gt, domain, false);
149 }
150 
151 #define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
152 	for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
153 		for_each_if((domain__ = ((fw__)->domains + \
154 					 (ffs(tmp__) - 1))) && \
155 					 domain__->reg_ctl.addr)
156 
157 /**
158  * xe_force_wake_get() : Increase the domain refcount
159  * @fw: struct xe_force_wake
160  * @domains: forcewake domains to get refcount on
161  *
162  * This function wakes up @domains if they are asleep and takes references.
163  * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized
164  * domains will be considered for refcount and it is a caller responsibility
165  * to check returned ref if it includes any specific domain by using
166  * xe_force_wake_ref_has_domain() function. Caller must call
167  * xe_force_wake_put() function to decrease incremented refcounts.
168  *
169  * Return: opaque reference to woken domains or zero if none of requested
170  * domains were awake.
171  */
xe_force_wake_get(struct xe_force_wake * fw,enum xe_force_wake_domains domains)172 unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw,
173 					    enum xe_force_wake_domains domains)
174 {
175 	struct xe_gt *gt = fw->gt;
176 	struct xe_force_wake_domain *domain;
177 	unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0;
178 	unsigned int tmp, ref_rqst;
179 	unsigned long flags;
180 
181 	xe_gt_assert(gt, is_power_of_2(domains));
182 	xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL);
183 	xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains);
184 
185 	ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains;
186 	spin_lock_irqsave(&fw->lock, flags);
187 	for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) {
188 		if (!domain->ref++) {
189 			awake_rqst |= BIT(domain->id);
190 			domain_wake(gt, domain);
191 		}
192 		ref_incr |= BIT(domain->id);
193 	}
194 	for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) {
195 		if (domain_wake_wait(gt, domain) == 0) {
196 			fw->awake_domains |= BIT(domain->id);
197 		} else {
198 			awake_failed |= BIT(domain->id);
199 			--domain->ref;
200 		}
201 	}
202 	ref_incr &= ~awake_failed;
203 	spin_unlock_irqrestore(&fw->lock, flags);
204 
205 	xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n",
206 		   str_plural(hweight_long(awake_failed)), awake_failed);
207 
208 	if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains)
209 		ref_incr |= XE_FORCEWAKE_ALL;
210 
211 	return ref_incr;
212 }
213 
214 /**
215  * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0
216  * @fw: Pointer to the force wake structure
217  * @fw_ref: return of xe_force_wake_get()
218  *
219  * This function reduces the reference counts for domains in fw_ref. If
220  * refcount for any of the specified domain reaches 0, it puts the domain to sleep
221  * and waits for acknowledgment for domain to sleep within 50 milisec timeout.
222  * Warns in case of timeout of ack from domain.
223  */
xe_force_wake_put(struct xe_force_wake * fw,unsigned int fw_ref)224 void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref)
225 {
226 	struct xe_gt *gt = fw->gt;
227 	struct xe_force_wake_domain *domain;
228 	unsigned int tmp, sleep = 0;
229 	unsigned long flags;
230 	int ack_fail = 0;
231 
232 	/*
233 	 * Avoid unnecessary lock and unlock when the function is called
234 	 * in error path of individual domains.
235 	 */
236 	if (!fw_ref)
237 		return;
238 
239 	if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
240 		fw_ref = fw->initialized_domains;
241 
242 	spin_lock_irqsave(&fw->lock, flags);
243 	for_each_fw_domain_masked(domain, fw_ref, fw, tmp) {
244 		xe_gt_assert(gt, domain->ref);
245 
246 		if (!--domain->ref) {
247 			sleep |= BIT(domain->id);
248 			domain_sleep(gt, domain);
249 		}
250 	}
251 	for_each_fw_domain_masked(domain, sleep, fw, tmp) {
252 		if (domain_sleep_wait(gt, domain) == 0)
253 			fw->awake_domains &= ~BIT(domain->id);
254 		else
255 			ack_fail |= BIT(domain->id);
256 	}
257 	spin_unlock_irqrestore(&fw->lock, flags);
258 
259 	xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n",
260 		   str_plural(hweight_long(ack_fail)), ack_fail);
261 }
262