xref: /linux/drivers/gpu/drm/xe/xe_force_wake.c (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_force_wake.h"
7 
8 #include <drm/drm_util.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "regs/xe_reg_defs.h"
12 #include "xe_gt.h"
13 #include "xe_mmio.h"
14 
15 #define XE_FORCE_WAKE_ACK_TIMEOUT_MS	50
16 
17 static struct xe_gt *
18 fw_to_gt(struct xe_force_wake *fw)
19 {
20 	return fw->gt;
21 }
22 
23 static struct xe_device *
24 fw_to_xe(struct xe_force_wake *fw)
25 {
26 	return gt_to_xe(fw_to_gt(fw));
27 }
28 
29 static void domain_init(struct xe_force_wake_domain *domain,
30 			enum xe_force_wake_domain_id id,
31 			struct xe_reg reg, struct xe_reg ack, u32 val, u32 mask)
32 {
33 	domain->id = id;
34 	domain->reg_ctl = reg;
35 	domain->reg_ack = ack;
36 	domain->val = val;
37 	domain->mask = mask;
38 }
39 
40 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
41 {
42 	struct xe_device *xe = gt_to_xe(gt);
43 
44 	fw->gt = gt;
45 	mutex_init(&fw->lock);
46 
47 	/* Assuming gen11+ so assert this assumption is correct */
48 	XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
49 
50 	if (xe->info.graphics_verx100 >= 1270) {
51 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
52 			    XE_FW_DOMAIN_ID_GT,
53 			    FORCEWAKE_GT,
54 			    FORCEWAKE_ACK_GT_MTL,
55 			    BIT(0), BIT(16));
56 	} else {
57 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
58 			    XE_FW_DOMAIN_ID_GT,
59 			    FORCEWAKE_GT,
60 			    FORCEWAKE_ACK_GT,
61 			    BIT(0), BIT(16));
62 	}
63 }
64 
65 void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
66 {
67 	int i, j;
68 
69 	/* Assuming gen11+ so assert this assumption is correct */
70 	XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
71 
72 	if (!xe_gt_is_media_type(gt))
73 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
74 			    XE_FW_DOMAIN_ID_RENDER,
75 			    FORCEWAKE_RENDER,
76 			    FORCEWAKE_ACK_RENDER,
77 			    BIT(0), BIT(16));
78 
79 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
80 		if (!(gt->info.engine_mask & BIT(i)))
81 			continue;
82 
83 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
84 			    XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
85 			    FORCEWAKE_MEDIA_VDBOX(j),
86 			    FORCEWAKE_ACK_MEDIA_VDBOX(j),
87 			    BIT(0), BIT(16));
88 	}
89 
90 	for (i = XE_HW_ENGINE_VECS0, j =0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
91 		if (!(gt->info.engine_mask & BIT(i)))
92 			continue;
93 
94 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
95 			    XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
96 			    FORCEWAKE_MEDIA_VEBOX(j),
97 			    FORCEWAKE_ACK_MEDIA_VEBOX(j),
98 			    BIT(0), BIT(16));
99 	}
100 }
101 
102 static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
103 {
104 	xe_mmio_write32(gt, domain->reg_ctl, domain->mask | domain->val);
105 }
106 
107 static int domain_wake_wait(struct xe_gt *gt,
108 			    struct xe_force_wake_domain *domain)
109 {
110 	return xe_mmio_wait32(gt, domain->reg_ack, domain->val, domain->val,
111 			      XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
112 			      NULL, false);
113 }
114 
115 static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
116 {
117 	xe_mmio_write32(gt, domain->reg_ctl, domain->mask);
118 }
119 
120 static int domain_sleep_wait(struct xe_gt *gt,
121 			     struct xe_force_wake_domain *domain)
122 {
123 	return xe_mmio_wait32(gt, domain->reg_ack, 0, domain->val,
124 			      XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
125 			      NULL, false);
126 }
127 
128 #define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
129 	for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
130 		for_each_if((domain__ = ((fw__)->domains + \
131 					 (ffs(tmp__) - 1))) && \
132 					 domain__->reg_ctl.addr)
133 
134 int xe_force_wake_get(struct xe_force_wake *fw,
135 		      enum xe_force_wake_domains domains)
136 {
137 	struct xe_device *xe = fw_to_xe(fw);
138 	struct xe_gt *gt = fw_to_gt(fw);
139 	struct xe_force_wake_domain *domain;
140 	enum xe_force_wake_domains tmp, woken = 0;
141 	int ret, ret2 = 0;
142 
143 	mutex_lock(&fw->lock);
144 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
145 		if (!domain->ref++) {
146 			woken |= BIT(domain->id);
147 			domain_wake(gt, domain);
148 		}
149 	}
150 	for_each_fw_domain_masked(domain, woken, fw, tmp) {
151 		ret = domain_wake_wait(gt, domain);
152 		ret2 |= ret;
153 		if (ret)
154 			drm_notice(&xe->drm, "Force wake domain (%d) failed to ack wake, ret=%d\n",
155 				   domain->id, ret);
156 	}
157 	fw->awake_domains |= woken;
158 	mutex_unlock(&fw->lock);
159 
160 	return ret2;
161 }
162 
163 int xe_force_wake_put(struct xe_force_wake *fw,
164 		      enum xe_force_wake_domains domains)
165 {
166 	struct xe_device *xe = fw_to_xe(fw);
167 	struct xe_gt *gt = fw_to_gt(fw);
168 	struct xe_force_wake_domain *domain;
169 	enum xe_force_wake_domains tmp, sleep = 0;
170 	int ret, ret2 = 0;
171 
172 	mutex_lock(&fw->lock);
173 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
174 		if (!--domain->ref) {
175 			sleep |= BIT(domain->id);
176 			domain_sleep(gt, domain);
177 		}
178 	}
179 	for_each_fw_domain_masked(domain, sleep, fw, tmp) {
180 		ret = domain_sleep_wait(gt, domain);
181 		ret2 |= ret;
182 		if (ret)
183 			drm_notice(&xe->drm, "Force wake domain (%d) failed to ack sleep, ret=%d\n",
184 				   domain->id, ret);
185 	}
186 	fw->awake_domains &= ~sleep;
187 	mutex_unlock(&fw->lock);
188 
189 	return ret2;
190 }
191