xref: /linux/drivers/gpu/drm/i915/gvt/opregion.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include <linux/acpi.h>
25 #include "i915_drv.h"
26 #include "gvt.h"
27 
28 static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
29 {
30 	u8 *buf;
31 	int i;
32 
33 	if (WARN((vgpu_opregion(vgpu)->va),
34 			"vgpu%d: opregion has been initialized already.\n",
35 			vgpu->id))
36 		return -EINVAL;
37 
38 	vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
39 			__GFP_ZERO,
40 			get_order(INTEL_GVT_OPREGION_SIZE));
41 
42 	if (!vgpu_opregion(vgpu)->va)
43 		return -ENOMEM;
44 
45 	memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
46 	       INTEL_GVT_OPREGION_SIZE);
47 
48 	for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
49 		vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
50 
51 	/* for unknown reason, the value in LID field is incorrect
52 	 * which block the windows guest, so workaround it by force
53 	 * setting it to "OPEN"
54 	 */
55 	buf = (u8 *)vgpu_opregion(vgpu)->va;
56 	buf[INTEL_GVT_OPREGION_CLID] = 0x3;
57 
58 	return 0;
59 }
60 
61 static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
62 {
63 	u64 mfn;
64 	int i, ret;
65 
66 	for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
67 		mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
68 			+ i * PAGE_SIZE);
69 		if (mfn == INTEL_GVT_INVALID_ADDR) {
70 			gvt_vgpu_err("fail to get MFN from VA\n");
71 			return -EINVAL;
72 		}
73 		ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
74 				vgpu_opregion(vgpu)->gfn[i],
75 				mfn, 1, map);
76 		if (ret) {
77 			gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
78 				ret);
79 			return ret;
80 		}
81 	}
82 	return 0;
83 }
84 
85 /**
86  * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
87  * @vgpu: a vGPU
88  *
89  */
90 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
91 {
92 	gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
93 
94 	if (!vgpu_opregion(vgpu)->va)
95 		return;
96 
97 	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
98 		map_vgpu_opregion(vgpu, false);
99 		free_pages((unsigned long)vgpu_opregion(vgpu)->va,
100 				get_order(INTEL_GVT_OPREGION_SIZE));
101 
102 		vgpu_opregion(vgpu)->va = NULL;
103 	}
104 }
105 
106 /**
107  * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
108  * @vgpu: a vGPU
109  * @gpa: guest physical address of opregion
110  *
111  * Returns:
112  * Zero on success, negative error code if failed.
113  */
114 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
115 {
116 	int ret;
117 
118 	gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
119 
120 	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
121 		gvt_dbg_core("emulate opregion from kernel\n");
122 
123 		ret = init_vgpu_opregion(vgpu, gpa);
124 		if (ret)
125 			return ret;
126 
127 		ret = map_vgpu_opregion(vgpu, true);
128 		if (ret)
129 			return ret;
130 	}
131 
132 	return 0;
133 }
134 
135 /**
136  * intel_gvt_clean_opregion - clean host opergion related stuffs
137  * @gvt: a GVT device
138  *
139  */
140 void intel_gvt_clean_opregion(struct intel_gvt *gvt)
141 {
142 	memunmap(gvt->opregion.opregion_va);
143 	gvt->opregion.opregion_va = NULL;
144 }
145 
146 /**
147  * intel_gvt_init_opregion - initialize host opergion related stuffs
148  * @gvt: a GVT device
149  *
150  * Returns:
151  * Zero on success, negative error code if failed.
152  */
153 int intel_gvt_init_opregion(struct intel_gvt *gvt)
154 {
155 	gvt_dbg_core("init host opregion\n");
156 
157 	pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
158 			&gvt->opregion.opregion_pa);
159 
160 	gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
161 					     INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
162 	if (!gvt->opregion.opregion_va) {
163 		gvt_err("fail to map host opregion\n");
164 		return -EFAULT;
165 	}
166 	return 0;
167 }
168 
169 #define GVT_OPREGION_FUNC(scic)					\
170 	({							\
171 	 u32 __ret;						\
172 	 __ret = (scic & OPREGION_SCIC_FUNC_MASK) >>		\
173 	 OPREGION_SCIC_FUNC_SHIFT;				\
174 	 __ret;							\
175 	 })
176 
177 #define GVT_OPREGION_SUBFUNC(scic)				\
178 	({							\
179 	 u32 __ret;						\
180 	 __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >>		\
181 	 OPREGION_SCIC_SUBFUNC_SHIFT;				\
182 	 __ret;							\
183 	 })
184 
185 static const char *opregion_func_name(u32 func)
186 {
187 	const char *name = NULL;
188 
189 	switch (func) {
190 	case 0 ... 3:
191 	case 5:
192 	case 7 ... 15:
193 		name = "Reserved";
194 		break;
195 
196 	case 4:
197 		name = "Get BIOS Data";
198 		break;
199 
200 	case 6:
201 		name = "System BIOS Callbacks";
202 		break;
203 
204 	default:
205 		name = "Unknown";
206 		break;
207 	}
208 	return name;
209 }
210 
211 static const char *opregion_subfunc_name(u32 subfunc)
212 {
213 	const char *name = NULL;
214 
215 	switch (subfunc) {
216 	case 0:
217 		name = "Supported Calls";
218 		break;
219 
220 	case 1:
221 		name = "Requested Callbacks";
222 		break;
223 
224 	case 2 ... 3:
225 	case 8 ... 9:
226 		name = "Reserved";
227 		break;
228 
229 	case 5:
230 		name = "Boot Display";
231 		break;
232 
233 	case 6:
234 		name = "TV-Standard/Video-Connector";
235 		break;
236 
237 	case 7:
238 		name = "Internal Graphics";
239 		break;
240 
241 	case 10:
242 		name = "Spread Spectrum Clocks";
243 		break;
244 
245 	case 11:
246 		name = "Get AKSV";
247 		break;
248 
249 	default:
250 		name = "Unknown";
251 		break;
252 	}
253 	return name;
254 };
255 
256 static bool querying_capabilities(u32 scic)
257 {
258 	u32 func, subfunc;
259 
260 	func = GVT_OPREGION_FUNC(scic);
261 	subfunc = GVT_OPREGION_SUBFUNC(scic);
262 
263 	if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
264 		subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
265 		|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
266 		 subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
267 		|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
268 		 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
269 		return true;
270 	}
271 	return false;
272 }
273 
274 /**
275  * intel_vgpu_emulate_opregion_request - emulating OpRegion request
276  * @vgpu: a vGPU
277  * @swsci: SWSCI request
278  *
279  * Returns:
280  * Zero on success, negative error code if failed
281  */
282 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
283 {
284 	u32 *scic, *parm;
285 	u32 func, subfunc;
286 
287 	scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
288 	parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
289 
290 	if (!(swsci & SWSCI_SCI_SELECT)) {
291 		gvt_vgpu_err("requesting SMI service\n");
292 		return 0;
293 	}
294 	/* ignore non 0->1 trasitions */
295 	if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
296 				& SWSCI_SCI_TRIGGER) ||
297 			!(swsci & SWSCI_SCI_TRIGGER)) {
298 		return 0;
299 	}
300 
301 	func = GVT_OPREGION_FUNC(*scic);
302 	subfunc = GVT_OPREGION_SUBFUNC(*scic);
303 	if (!querying_capabilities(*scic)) {
304 		gvt_vgpu_err("requesting runtime service: func \"%s\","
305 				" subfunc \"%s\"\n",
306 				opregion_func_name(func),
307 				opregion_subfunc_name(subfunc));
308 		/*
309 		 * emulate exit status of function call, '0' means
310 		 * "failure, generic, unsupported or unknown cause"
311 		 */
312 		*scic &= ~OPREGION_SCIC_EXIT_MASK;
313 		return 0;
314 	}
315 
316 	*scic = 0;
317 	*parm = 0;
318 	return 0;
319 }
320