xref: /linux/drivers/gpu/drm/i915/gvt/cfg_space.c (revision e08a1d97d33e2ac05cd368b955f9fdc2823f15fd)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Jike Song <jike.song@intel.com>
26  *
27  * Contributors:
28  *    Zhi Wang <zhi.a.wang@intel.com>
29  *    Min He <min.he@intel.com>
30  *    Bing Niu <bing.niu@intel.com>
31  *
32  */
33 
34 #include "i915_drv.h"
35 #include "gvt.h"
36 
37 enum {
38 	INTEL_GVT_PCI_BAR_GTTMMIO = 0,
39 	INTEL_GVT_PCI_BAR_APERTURE,
40 	INTEL_GVT_PCI_BAR_PIO,
41 	INTEL_GVT_PCI_BAR_MAX,
42 };
43 
44 /**
45  * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
46  *
47  * Returns:
48  * Zero on success, negative error code if failed.
49  */
50 int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
51 	void *p_data, unsigned int bytes)
52 {
53 	struct intel_vgpu *vgpu = __vgpu;
54 
55 	if (WARN_ON(bytes > 4))
56 		return -EINVAL;
57 
58 	if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
59 		return -EINVAL;
60 
61 	memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
62 	return 0;
63 }
64 
65 static int map_aperture(struct intel_vgpu *vgpu, bool map)
66 {
67 	u64 first_gfn, first_mfn;
68 	u64 val;
69 	int ret;
70 
71 	if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
72 		return 0;
73 
74 	val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
75 	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
76 		val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
77 	else
78 		val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
79 
80 	first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
81 	first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
82 
83 	ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
84 						  first_mfn,
85 						  vgpu_aperture_sz(vgpu)
86 						  >> PAGE_SHIFT, map,
87 						  GVT_MAP_APERTURE);
88 	if (ret)
89 		return ret;
90 
91 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
92 	return 0;
93 }
94 
95 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
96 {
97 	u64 start, end;
98 	u64 val;
99 	int ret;
100 
101 	if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
102 		return 0;
103 
104 	val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
105 	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
106 		start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
107 	else
108 		start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
109 
110 	start &= ~GENMASK(3, 0);
111 	end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
112 
113 	ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
114 	if (ret)
115 		return ret;
116 
117 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
118 	return 0;
119 }
120 
121 static int emulate_pci_command_write(struct intel_vgpu *vgpu,
122 	unsigned int offset, void *p_data, unsigned int bytes)
123 {
124 	u8 old = vgpu_cfg_space(vgpu)[offset];
125 	u8 new = *(u8 *)p_data;
126 	u8 changed = old ^ new;
127 	int ret;
128 
129 	if (!(changed & PCI_COMMAND_MEMORY))
130 		return 0;
131 
132 	if (old & PCI_COMMAND_MEMORY) {
133 		ret = trap_gttmmio(vgpu, false);
134 		if (ret)
135 			return ret;
136 		ret = map_aperture(vgpu, false);
137 		if (ret)
138 			return ret;
139 	} else {
140 		ret = trap_gttmmio(vgpu, true);
141 		if (ret)
142 			return ret;
143 		ret = map_aperture(vgpu, true);
144 		if (ret)
145 			return ret;
146 	}
147 
148 	memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
149 	return 0;
150 }
151 
152 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
153 	void *p_data, unsigned int bytes)
154 {
155 	unsigned int bar_index =
156 		(rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
157 	u32 new = *(u32 *)(p_data);
158 	bool lo = IS_ALIGNED(offset, 8);
159 	u64 size;
160 	int ret = 0;
161 	bool mmio_enabled =
162 		vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
163 
164 	if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
165 		return -EINVAL;
166 
167 	if (new == 0xffffffff) {
168 		/*
169 		 * Power-up software can determine how much address
170 		 * space the device requires by writing a value of
171 		 * all 1's to the register and then reading the value
172 		 * back. The device will return 0's in all don't-care
173 		 * address bits.
174 		 */
175 		size = vgpu->cfg_space.bar[bar_index].size;
176 		if (lo) {
177 			new = rounddown(new, size);
178 		} else {
179 			u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
180 			/* for 32bit mode bar it returns all-0 in upper 32
181 			 * bit, for 64bit mode bar it will calculate the
182 			 * size with lower 32bit and return the corresponding
183 			 * value
184 			 */
185 			if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
186 				new &= (~(size-1)) >> 32;
187 			else
188 				new = 0;
189 		}
190 		/*
191 		 * Unmapp & untrap the BAR, since guest hasn't configured a
192 		 * valid GPA
193 		 */
194 		switch (bar_index) {
195 		case INTEL_GVT_PCI_BAR_GTTMMIO:
196 			ret = trap_gttmmio(vgpu, false);
197 			break;
198 		case INTEL_GVT_PCI_BAR_APERTURE:
199 			ret = map_aperture(vgpu, false);
200 			break;
201 		}
202 		intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
203 	} else {
204 		/*
205 		 * Unmapp & untrap the old BAR first, since guest has
206 		 * re-configured the BAR
207 		 */
208 		switch (bar_index) {
209 		case INTEL_GVT_PCI_BAR_GTTMMIO:
210 			ret = trap_gttmmio(vgpu, false);
211 			break;
212 		case INTEL_GVT_PCI_BAR_APERTURE:
213 			ret = map_aperture(vgpu, false);
214 			break;
215 		}
216 		intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
217 		/* Track the new BAR */
218 		if (mmio_enabled) {
219 			switch (bar_index) {
220 			case INTEL_GVT_PCI_BAR_GTTMMIO:
221 				ret = trap_gttmmio(vgpu, true);
222 				break;
223 			case INTEL_GVT_PCI_BAR_APERTURE:
224 				ret = map_aperture(vgpu, true);
225 				break;
226 			}
227 		}
228 	}
229 	return ret;
230 }
231 
232 /**
233  * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
234  *
235  * Returns:
236  * Zero on success, negative error code if failed.
237  */
238 int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
239 	void *p_data, unsigned int bytes)
240 {
241 	struct intel_vgpu *vgpu = __vgpu;
242 	int ret;
243 
244 	if (WARN_ON(bytes > 4))
245 		return -EINVAL;
246 
247 	if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
248 		return -EINVAL;
249 
250 	/* First check if it's PCI_COMMAND */
251 	if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
252 		if (WARN_ON(bytes > 2))
253 			return -EINVAL;
254 		return emulate_pci_command_write(vgpu, offset, p_data, bytes);
255 	}
256 
257 	switch (rounddown(offset, 4)) {
258 	case PCI_BASE_ADDRESS_0:
259 	case PCI_BASE_ADDRESS_1:
260 	case PCI_BASE_ADDRESS_2:
261 	case PCI_BASE_ADDRESS_3:
262 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
263 			return -EINVAL;
264 		return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
265 
266 	case INTEL_GVT_PCI_SWSCI:
267 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
268 			return -EINVAL;
269 		ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
270 		if (ret)
271 			return ret;
272 		break;
273 
274 	case INTEL_GVT_PCI_OPREGION:
275 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
276 			return -EINVAL;
277 		ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
278 		if (ret)
279 			return ret;
280 
281 		memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
282 		break;
283 	default:
284 		memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
285 		break;
286 	}
287 	return 0;
288 }
289