xref: /linux/drivers/gpu/drm/xe/xe_force_wake.c (revision 24b10e5f8e0d2bee1a10fc67011ea5d936c1a389)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_force_wake.h"
7 
8 #include <drm/drm_util.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "regs/xe_reg_defs.h"
12 #include "xe_gt.h"
13 #include "xe_mmio.h"
14 
15 #define XE_FORCE_WAKE_ACK_TIMEOUT_MS	50
16 
17 static struct xe_gt *
18 fw_to_gt(struct xe_force_wake *fw)
19 {
20 	return fw->gt;
21 }
22 
23 static struct xe_device *
24 fw_to_xe(struct xe_force_wake *fw)
25 {
26 	return gt_to_xe(fw_to_gt(fw));
27 }
28 
29 static void domain_init(struct xe_force_wake_domain *domain,
30 			enum xe_force_wake_domain_id id,
31 			struct xe_reg reg, struct xe_reg ack, u32 val, u32 mask)
32 {
33 	domain->id = id;
34 	domain->reg_ctl = reg;
35 	domain->reg_ack = ack;
36 	domain->val = val;
37 	domain->mask = mask;
38 }
39 
40 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
41 {
42 	struct xe_device *xe = gt_to_xe(gt);
43 
44 	fw->gt = gt;
45 	spin_lock_init(&fw->lock);
46 
47 	/* Assuming gen11+ so assert this assumption is correct */
48 	xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
49 
50 	if (xe->info.graphics_verx100 >= 1270) {
51 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
52 			    XE_FW_DOMAIN_ID_GT,
53 			    FORCEWAKE_GT,
54 			    FORCEWAKE_ACK_GT_MTL,
55 			    BIT(0), BIT(16));
56 	} else {
57 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
58 			    XE_FW_DOMAIN_ID_GT,
59 			    FORCEWAKE_GT,
60 			    FORCEWAKE_ACK_GT,
61 			    BIT(0), BIT(16));
62 	}
63 }
64 
65 void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
66 {
67 	int i, j;
68 
69 	/* Assuming gen11+ so assert this assumption is correct */
70 	xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
71 
72 	if (!xe_gt_is_media_type(gt))
73 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
74 			    XE_FW_DOMAIN_ID_RENDER,
75 			    FORCEWAKE_RENDER,
76 			    FORCEWAKE_ACK_RENDER,
77 			    BIT(0), BIT(16));
78 
79 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
80 		if (!(gt->info.engine_mask & BIT(i)))
81 			continue;
82 
83 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
84 			    XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
85 			    FORCEWAKE_MEDIA_VDBOX(j),
86 			    FORCEWAKE_ACK_MEDIA_VDBOX(j),
87 			    BIT(0), BIT(16));
88 	}
89 
90 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
91 		if (!(gt->info.engine_mask & BIT(i)))
92 			continue;
93 
94 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
95 			    XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
96 			    FORCEWAKE_MEDIA_VEBOX(j),
97 			    FORCEWAKE_ACK_MEDIA_VEBOX(j),
98 			    BIT(0), BIT(16));
99 	}
100 
101 	if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
102 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC],
103 			    XE_FW_DOMAIN_ID_GSC,
104 			    FORCEWAKE_GSC,
105 			    FORCEWAKE_ACK_GSC,
106 			    BIT(0), BIT(16));
107 }
108 
109 static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
110 {
111 	xe_mmio_write32(gt, domain->reg_ctl, domain->mask | domain->val);
112 }
113 
114 static int domain_wake_wait(struct xe_gt *gt,
115 			    struct xe_force_wake_domain *domain)
116 {
117 	return xe_mmio_wait32(gt, domain->reg_ack, domain->val, domain->val,
118 			      XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
119 			      NULL, true);
120 }
121 
122 static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
123 {
124 	xe_mmio_write32(gt, domain->reg_ctl, domain->mask);
125 }
126 
127 static int domain_sleep_wait(struct xe_gt *gt,
128 			     struct xe_force_wake_domain *domain)
129 {
130 	return xe_mmio_wait32(gt, domain->reg_ack, domain->val, 0,
131 			      XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
132 			      NULL, true);
133 }
134 
135 #define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
136 	for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
137 		for_each_if((domain__ = ((fw__)->domains + \
138 					 (ffs(tmp__) - 1))) && \
139 					 domain__->reg_ctl.addr)
140 
141 int xe_force_wake_get(struct xe_force_wake *fw,
142 		      enum xe_force_wake_domains domains)
143 {
144 	struct xe_device *xe = fw_to_xe(fw);
145 	struct xe_gt *gt = fw_to_gt(fw);
146 	struct xe_force_wake_domain *domain;
147 	enum xe_force_wake_domains tmp, woken = 0;
148 	unsigned long flags;
149 	int ret, ret2 = 0;
150 
151 	spin_lock_irqsave(&fw->lock, flags);
152 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
153 		if (!domain->ref++) {
154 			woken |= BIT(domain->id);
155 			domain_wake(gt, domain);
156 		}
157 	}
158 	for_each_fw_domain_masked(domain, woken, fw, tmp) {
159 		ret = domain_wake_wait(gt, domain);
160 		ret2 |= ret;
161 		if (ret)
162 			drm_notice(&xe->drm, "Force wake domain (%d) failed to ack wake, ret=%d\n",
163 				   domain->id, ret);
164 	}
165 	fw->awake_domains |= woken;
166 	spin_unlock_irqrestore(&fw->lock, flags);
167 
168 	return ret2;
169 }
170 
171 int xe_force_wake_put(struct xe_force_wake *fw,
172 		      enum xe_force_wake_domains domains)
173 {
174 	struct xe_device *xe = fw_to_xe(fw);
175 	struct xe_gt *gt = fw_to_gt(fw);
176 	struct xe_force_wake_domain *domain;
177 	enum xe_force_wake_domains tmp, sleep = 0;
178 	unsigned long flags;
179 	int ret, ret2 = 0;
180 
181 	spin_lock_irqsave(&fw->lock, flags);
182 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
183 		if (!--domain->ref) {
184 			sleep |= BIT(domain->id);
185 			domain_sleep(gt, domain);
186 		}
187 	}
188 	for_each_fw_domain_masked(domain, sleep, fw, tmp) {
189 		ret = domain_sleep_wait(gt, domain);
190 		ret2 |= ret;
191 		if (ret)
192 			drm_notice(&xe->drm, "Force wake domain (%d) failed to ack sleep, ret=%d\n",
193 				   domain->id, ret);
194 	}
195 	fw->awake_domains &= ~sleep;
196 	spin_unlock_irqrestore(&fw->lock, flags);
197 
198 	return ret2;
199 }
200