xref: /linux/drivers/vfio/pci/virtio/legacy_io.c (revision a3ebb59eee2e558e8f8f27fc3f75cd367f17cd8e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4  */
5 
6 #include <linux/device.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/pci.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/types.h>
12 #include <linux/uaccess.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/virtio_pci.h>
16 #include <linux/virtio_net.h>
17 #include <linux/virtio_pci_admin.h>
18 
19 #include "common.h"
20 
21 static int
virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device * virtvdev,loff_t pos,char __user * buf,size_t count,bool read)22 virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device *virtvdev,
23 			     loff_t pos, char __user *buf,
24 			     size_t count, bool read)
25 {
26 	bool msix_enabled =
27 		(virtvdev->core_device.irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
28 	struct pci_dev *pdev = virtvdev->core_device.pdev;
29 	u8 *bar0_buf = virtvdev->bar0_virtual_buf;
30 	bool common;
31 	u8 offset;
32 	int ret;
33 
34 	common = pos < VIRTIO_PCI_CONFIG_OFF(msix_enabled);
35 	/* offset within the relevant configuration area */
36 	offset = common ? pos : pos - VIRTIO_PCI_CONFIG_OFF(msix_enabled);
37 	mutex_lock(&virtvdev->bar_mutex);
38 	if (read) {
39 		if (common)
40 			ret = virtio_pci_admin_legacy_common_io_read(pdev, offset,
41 					count, bar0_buf + pos);
42 		else
43 			ret = virtio_pci_admin_legacy_device_io_read(pdev, offset,
44 					count, bar0_buf + pos);
45 		if (ret)
46 			goto out;
47 		if (copy_to_user(buf, bar0_buf + pos, count))
48 			ret = -EFAULT;
49 	} else {
50 		if (copy_from_user(bar0_buf + pos, buf, count)) {
51 			ret = -EFAULT;
52 			goto out;
53 		}
54 
55 		if (common)
56 			ret = virtio_pci_admin_legacy_common_io_write(pdev, offset,
57 					count, bar0_buf + pos);
58 		else
59 			ret = virtio_pci_admin_legacy_device_io_write(pdev, offset,
60 					count, bar0_buf + pos);
61 	}
62 out:
63 	mutex_unlock(&virtvdev->bar_mutex);
64 	return ret;
65 }
66 
67 static int
virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device * virtvdev,loff_t pos,char __user * buf,size_t count,bool read)68 virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device *virtvdev,
69 		     loff_t pos, char __user *buf,
70 		     size_t count, bool read)
71 {
72 	struct vfio_pci_core_device *core_device = &virtvdev->core_device;
73 	struct pci_dev *pdev = core_device->pdev;
74 	u16 queue_notify;
75 	int ret;
76 
77 	if (!(le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO))
78 		return -EIO;
79 
80 	if (pos + count > virtvdev->bar0_virtual_buf_size)
81 		return -EINVAL;
82 
83 	ret = pm_runtime_resume_and_get(&pdev->dev);
84 	if (ret) {
85 		pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret);
86 		return -EIO;
87 	}
88 
89 	switch (pos) {
90 	case VIRTIO_PCI_QUEUE_NOTIFY:
91 		if (count != sizeof(queue_notify)) {
92 			ret = -EINVAL;
93 			goto end;
94 		}
95 		if (read) {
96 			ret = vfio_pci_core_ioread16(core_device, true, &queue_notify,
97 						     virtvdev->notify_addr);
98 			if (ret)
99 				goto end;
100 			if (copy_to_user(buf, &queue_notify,
101 					 sizeof(queue_notify))) {
102 				ret = -EFAULT;
103 				goto end;
104 			}
105 		} else {
106 			if (copy_from_user(&queue_notify, buf, count)) {
107 				ret = -EFAULT;
108 				goto end;
109 			}
110 			ret = vfio_pci_core_iowrite16(core_device, true, queue_notify,
111 						      virtvdev->notify_addr);
112 		}
113 		break;
114 	default:
115 		ret = virtiovf_issue_legacy_rw_cmd(virtvdev, pos, buf, count,
116 						   read);
117 	}
118 
119 end:
120 	pm_runtime_put(&pdev->dev);
121 	return ret ? ret : count;
122 }
123 
virtiovf_pci_read_config(struct vfio_device * core_vdev,char __user * buf,size_t count,loff_t * ppos)124 static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
125 					char __user *buf, size_t count,
126 					loff_t *ppos)
127 {
128 	struct virtiovf_pci_core_device *virtvdev = container_of(
129 		core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
130 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
131 	size_t register_offset;
132 	loff_t copy_offset;
133 	size_t copy_count;
134 	__le32 val32;
135 	__le16 val16;
136 	u8 val8;
137 	int ret;
138 
139 	ret = vfio_pci_core_read(core_vdev, buf, count, ppos);
140 	if (ret < 0)
141 		return ret;
142 
143 	if (vfio_pci_core_range_intersect_range(pos, count, PCI_DEVICE_ID,
144 						sizeof(val16), &copy_offset,
145 						&copy_count, &register_offset)) {
146 		val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET);
147 		if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count))
148 			return -EFAULT;
149 	}
150 
151 	if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) &&
152 	    vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND,
153 						sizeof(val16), &copy_offset,
154 						&copy_count, &register_offset)) {
155 		if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset,
156 				   copy_count))
157 			return -EFAULT;
158 		val16 |= cpu_to_le16(PCI_COMMAND_IO);
159 		if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
160 				 copy_count))
161 			return -EFAULT;
162 	}
163 
164 	if (vfio_pci_core_range_intersect_range(pos, count, PCI_REVISION_ID,
165 						sizeof(val8), &copy_offset,
166 						&copy_count, &register_offset)) {
167 		/* Transional needs to have revision 0 */
168 		val8 = 0;
169 		if (copy_to_user(buf + copy_offset, &val8, copy_count))
170 			return -EFAULT;
171 	}
172 
173 	if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
174 						sizeof(val32), &copy_offset,
175 						&copy_count, &register_offset)) {
176 		u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1);
177 		u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0);
178 
179 		val32 = cpu_to_le32((pci_base_addr_0 & bar_mask) | PCI_BASE_ADDRESS_SPACE_IO);
180 		if (copy_to_user(buf + copy_offset, (void *)&val32 + register_offset, copy_count))
181 			return -EFAULT;
182 	}
183 
184 	if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_ID,
185 						sizeof(val16), &copy_offset,
186 						&copy_count, &register_offset)) {
187 		/*
188 		 * Transitional devices use the PCI subsystem device id as
189 		 * virtio device id, same as legacy driver always did.
190 		 */
191 		val16 = cpu_to_le16(VIRTIO_ID_NET);
192 		if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
193 				 copy_count))
194 			return -EFAULT;
195 	}
196 
197 	if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID,
198 						sizeof(val16), &copy_offset,
199 						&copy_count, &register_offset)) {
200 		val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET);
201 		if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
202 				 copy_count))
203 			return -EFAULT;
204 	}
205 
206 	return count;
207 }
208 
virtiovf_pci_core_read(struct vfio_device * core_vdev,char __user * buf,size_t count,loff_t * ppos)209 ssize_t virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
210 			       size_t count, loff_t *ppos)
211 {
212 	struct virtiovf_pci_core_device *virtvdev = container_of(
213 		core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
214 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
215 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
216 
217 	if (!count)
218 		return 0;
219 
220 	if (index == VFIO_PCI_CONFIG_REGION_INDEX)
221 		return virtiovf_pci_read_config(core_vdev, buf, count, ppos);
222 
223 	if (index == VFIO_PCI_BAR0_REGION_INDEX)
224 		return virtiovf_pci_bar0_rw(virtvdev, pos, buf, count, true);
225 
226 	return vfio_pci_core_read(core_vdev, buf, count, ppos);
227 }
228 
virtiovf_pci_write_config(struct vfio_device * core_vdev,const char __user * buf,size_t count,loff_t * ppos)229 static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev,
230 					 const char __user *buf, size_t count,
231 					 loff_t *ppos)
232 {
233 	struct virtiovf_pci_core_device *virtvdev = container_of(
234 		core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
235 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
236 	size_t register_offset;
237 	loff_t copy_offset;
238 	size_t copy_count;
239 
240 	if (vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND,
241 						sizeof(virtvdev->pci_cmd),
242 						&copy_offset, &copy_count,
243 						&register_offset)) {
244 		if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset,
245 				   buf + copy_offset,
246 				   copy_count))
247 			return -EFAULT;
248 	}
249 
250 	if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
251 						sizeof(virtvdev->pci_base_addr_0),
252 						&copy_offset, &copy_count,
253 						&register_offset)) {
254 		if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset,
255 				   buf + copy_offset,
256 				   copy_count))
257 			return -EFAULT;
258 	}
259 
260 	return vfio_pci_core_write(core_vdev, buf, count, ppos);
261 }
262 
virtiovf_pci_core_write(struct vfio_device * core_vdev,const char __user * buf,size_t count,loff_t * ppos)263 ssize_t virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
264 				size_t count, loff_t *ppos)
265 {
266 	struct virtiovf_pci_core_device *virtvdev = container_of(
267 		core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
268 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
269 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
270 
271 	if (!count)
272 		return 0;
273 
274 	if (index == VFIO_PCI_CONFIG_REGION_INDEX)
275 		return virtiovf_pci_write_config(core_vdev, buf, count, ppos);
276 
277 	if (index == VFIO_PCI_BAR0_REGION_INDEX)
278 		return virtiovf_pci_bar0_rw(virtvdev, pos, (char __user *)buf, count, false);
279 
280 	return vfio_pci_core_write(core_vdev, buf, count, ppos);
281 }
282 
virtiovf_pci_ioctl_get_region_info(struct vfio_device * core_vdev,struct vfio_region_info * info,struct vfio_info_cap * caps)283 int virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev,
284 				       struct vfio_region_info *info,
285 				       struct vfio_info_cap *caps)
286 {
287 	struct virtiovf_pci_core_device *virtvdev = container_of(
288 		core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
289 
290 	if (info->index != VFIO_PCI_BAR0_REGION_INDEX)
291 		return vfio_pci_ioctl_get_region_info(core_vdev, info, caps);
292 
293 	info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
294 	info->size = virtvdev->bar0_virtual_buf_size;
295 	info->flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE;
296 	return 0;
297 }
298 
virtiovf_set_notify_addr(struct virtiovf_pci_core_device * virtvdev)299 static int virtiovf_set_notify_addr(struct virtiovf_pci_core_device *virtvdev)
300 {
301 	struct vfio_pci_core_device *core_device = &virtvdev->core_device;
302 	int ret;
303 
304 	/*
305 	 * Setup the BAR where the 'notify' exists to be used by vfio as well
306 	 * This will let us mmap it only once and use it when needed.
307 	 */
308 	ret = vfio_pci_core_setup_barmap(core_device,
309 					 virtvdev->notify_bar);
310 	if (ret)
311 		return ret;
312 
313 	virtvdev->notify_addr = core_device->barmap[virtvdev->notify_bar] +
314 			virtvdev->notify_offset;
315 	return 0;
316 }
317 
virtiovf_open_legacy_io(struct virtiovf_pci_core_device * virtvdev)318 int virtiovf_open_legacy_io(struct virtiovf_pci_core_device *virtvdev)
319 {
320 	if (!virtvdev->bar0_virtual_buf)
321 		return 0;
322 
323 	/*
324 	 * Upon close_device() the vfio_pci_core_disable() is called
325 	 * and will close all the previous mmaps, so it seems that the
326 	 * valid life cycle for the 'notify' addr is per open/close.
327 	 */
328 	return virtiovf_set_notify_addr(virtvdev);
329 }
330 
virtiovf_get_device_config_size(unsigned short device)331 static int virtiovf_get_device_config_size(unsigned short device)
332 {
333 	/* Network card */
334 	return offsetofend(struct virtio_net_config, status);
335 }
336 
virtiovf_read_notify_info(struct virtiovf_pci_core_device * virtvdev)337 static int virtiovf_read_notify_info(struct virtiovf_pci_core_device *virtvdev)
338 {
339 	u64 offset;
340 	int ret;
341 	u8 bar;
342 
343 	ret = virtio_pci_admin_legacy_io_notify_info(virtvdev->core_device.pdev,
344 				VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM,
345 				&bar, &offset);
346 	if (ret)
347 		return ret;
348 
349 	virtvdev->notify_bar = bar;
350 	virtvdev->notify_offset = offset;
351 	return 0;
352 }
353 
virtiovf_bar0_exists(struct pci_dev * pdev)354 static bool virtiovf_bar0_exists(struct pci_dev *pdev)
355 {
356 	struct resource *res = pdev->resource;
357 
358 	return res->flags;
359 }
360 
virtiovf_support_legacy_io(struct pci_dev * pdev)361 bool virtiovf_support_legacy_io(struct pci_dev *pdev)
362 {
363 	/* For now, the legacy IO functionality is supported only for virtio-net */
364 	return pdev->device == 0x1041 && virtio_pci_admin_has_legacy_io(pdev) &&
365 	       !virtiovf_bar0_exists(pdev);
366 }
367 
virtiovf_init_legacy_io(struct virtiovf_pci_core_device * virtvdev)368 int virtiovf_init_legacy_io(struct virtiovf_pci_core_device *virtvdev)
369 {
370 	struct pci_dev *pdev = virtvdev->core_device.pdev;
371 	int ret;
372 
373 	ret = virtiovf_read_notify_info(virtvdev);
374 	if (ret)
375 		return ret;
376 
377 	virtvdev->bar0_virtual_buf_size = VIRTIO_PCI_CONFIG_OFF(true) +
378 				virtiovf_get_device_config_size(pdev->device);
379 	BUILD_BUG_ON(!is_power_of_2(virtvdev->bar0_virtual_buf_size));
380 	virtvdev->bar0_virtual_buf = kzalloc(virtvdev->bar0_virtual_buf_size,
381 					     GFP_KERNEL);
382 	if (!virtvdev->bar0_virtual_buf)
383 		return -ENOMEM;
384 	mutex_init(&virtvdev->bar_mutex);
385 	return 0;
386 }
387 
virtiovf_release_legacy_io(struct virtiovf_pci_core_device * virtvdev)388 void virtiovf_release_legacy_io(struct virtiovf_pci_core_device *virtvdev)
389 {
390 	kfree(virtvdev->bar0_virtual_buf);
391 }
392 
virtiovf_legacy_io_reset_done(struct pci_dev * pdev)393 void virtiovf_legacy_io_reset_done(struct pci_dev *pdev)
394 {
395 	struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev);
396 
397 	virtvdev->pci_cmd = 0;
398 }
399