xref: /linux/drivers/gpu/drm/xe/xe_force_wake.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_force_wake.h"
7 
8 #include <drm/drm_util.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "regs/xe_reg_defs.h"
12 #include "xe_gt.h"
13 #include "xe_gt_printk.h"
14 #include "xe_mmio.h"
15 #include "xe_sriov.h"
16 
17 #define XE_FORCE_WAKE_ACK_TIMEOUT_MS	50
18 
19 static const char *str_wake_sleep(bool wake)
20 {
21 	return wake ? "wake" : "sleep";
22 }
23 
24 static void domain_init(struct xe_force_wake_domain *domain,
25 			enum xe_force_wake_domain_id id,
26 			struct xe_reg reg, struct xe_reg ack)
27 {
28 	domain->id = id;
29 	domain->reg_ctl = reg;
30 	domain->reg_ack = ack;
31 	domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
32 	domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
33 }
34 
35 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
36 {
37 	struct xe_device *xe = gt_to_xe(gt);
38 
39 	fw->gt = gt;
40 	spin_lock_init(&fw->lock);
41 
42 	/* Assuming gen11+ so assert this assumption is correct */
43 	xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
44 
45 	if (xe->info.graphics_verx100 >= 1270) {
46 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
47 			    XE_FW_DOMAIN_ID_GT,
48 			    FORCEWAKE_GT,
49 			    FORCEWAKE_ACK_GT_MTL);
50 	} else {
51 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
52 			    XE_FW_DOMAIN_ID_GT,
53 			    FORCEWAKE_GT,
54 			    FORCEWAKE_ACK_GT);
55 	}
56 }
57 
58 void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
59 {
60 	int i, j;
61 
62 	/* Assuming gen11+ so assert this assumption is correct */
63 	xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
64 
65 	if (!xe_gt_is_media_type(gt))
66 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
67 			    XE_FW_DOMAIN_ID_RENDER,
68 			    FORCEWAKE_RENDER,
69 			    FORCEWAKE_ACK_RENDER);
70 
71 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
72 		if (!(gt->info.engine_mask & BIT(i)))
73 			continue;
74 
75 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
76 			    XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
77 			    FORCEWAKE_MEDIA_VDBOX(j),
78 			    FORCEWAKE_ACK_MEDIA_VDBOX(j));
79 	}
80 
81 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
82 		if (!(gt->info.engine_mask & BIT(i)))
83 			continue;
84 
85 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
86 			    XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
87 			    FORCEWAKE_MEDIA_VEBOX(j),
88 			    FORCEWAKE_ACK_MEDIA_VEBOX(j));
89 	}
90 
91 	if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
92 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC],
93 			    XE_FW_DOMAIN_ID_GSC,
94 			    FORCEWAKE_GSC,
95 			    FORCEWAKE_ACK_GSC);
96 }
97 
98 static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
99 {
100 	if (IS_SRIOV_VF(gt_to_xe(gt)))
101 		return;
102 
103 	xe_mmio_write32(gt, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
104 }
105 
106 static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
107 {
108 	u32 value;
109 	int ret;
110 
111 	if (IS_SRIOV_VF(gt_to_xe(gt)))
112 		return 0;
113 
114 	ret = xe_mmio_wait32(gt, domain->reg_ack, domain->val, wake ? domain->val : 0,
115 			     XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
116 			     &value, true);
117 	if (ret)
118 		xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
119 			     domain->id, str_wake_sleep(wake), ERR_PTR(ret),
120 			     domain->reg_ack.addr, value);
121 
122 	return ret;
123 }
124 
125 static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
126 {
127 	__domain_ctl(gt, domain, true);
128 }
129 
130 static int domain_wake_wait(struct xe_gt *gt,
131 			    struct xe_force_wake_domain *domain)
132 {
133 	return __domain_wait(gt, domain, true);
134 }
135 
136 static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
137 {
138 	__domain_ctl(gt, domain, false);
139 }
140 
141 static int domain_sleep_wait(struct xe_gt *gt,
142 			     struct xe_force_wake_domain *domain)
143 {
144 	return __domain_wait(gt, domain, false);
145 }
146 
147 #define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
148 	for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
149 		for_each_if((domain__ = ((fw__)->domains + \
150 					 (ffs(tmp__) - 1))) && \
151 					 domain__->reg_ctl.addr)
152 
153 int xe_force_wake_get(struct xe_force_wake *fw,
154 		      enum xe_force_wake_domains domains)
155 {
156 	struct xe_gt *gt = fw->gt;
157 	struct xe_force_wake_domain *domain;
158 	enum xe_force_wake_domains tmp, woken = 0;
159 	unsigned long flags;
160 	int ret = 0;
161 
162 	spin_lock_irqsave(&fw->lock, flags);
163 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
164 		if (!domain->ref++) {
165 			woken |= BIT(domain->id);
166 			domain_wake(gt, domain);
167 		}
168 	}
169 	for_each_fw_domain_masked(domain, woken, fw, tmp) {
170 		ret |= domain_wake_wait(gt, domain);
171 	}
172 	fw->awake_domains |= woken;
173 	spin_unlock_irqrestore(&fw->lock, flags);
174 
175 	return ret;
176 }
177 
178 int xe_force_wake_put(struct xe_force_wake *fw,
179 		      enum xe_force_wake_domains domains)
180 {
181 	struct xe_gt *gt = fw->gt;
182 	struct xe_force_wake_domain *domain;
183 	enum xe_force_wake_domains tmp, sleep = 0;
184 	unsigned long flags;
185 	int ret = 0;
186 
187 	spin_lock_irqsave(&fw->lock, flags);
188 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
189 		if (!--domain->ref) {
190 			sleep |= BIT(domain->id);
191 			domain_sleep(gt, domain);
192 		}
193 	}
194 	for_each_fw_domain_masked(domain, sleep, fw, tmp) {
195 		ret |= domain_sleep_wait(gt, domain);
196 	}
197 	fw->awake_domains &= ~sleep;
198 	spin_unlock_irqrestore(&fw->lock, flags);
199 
200 	return ret;
201 }
202