xref: /linux/drivers/xen/xen-pciback/passthrough.c (revision 48dea9a700c8728cc31a1dd44588b97578de86ee)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Backend - Provides restricted access to the real PCI bus topology
4  *               to the frontend
5  *
6  *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
7  */
8 
9 #include <linux/list.h>
10 #include <linux/pci.h>
11 #include <linux/mutex.h>
12 #include "pciback.h"
13 
14 struct passthrough_dev_data {
15 	/* Access to dev_list must be protected by lock */
16 	struct list_head dev_list;
17 	struct mutex lock;
18 };
19 
20 static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
21 					       unsigned int domain,
22 					       unsigned int bus,
23 					       unsigned int devfn)
24 {
25 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
26 	struct pci_dev_entry *dev_entry;
27 	struct pci_dev *dev = NULL;
28 
29 	mutex_lock(&dev_data->lock);
30 
31 	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
32 		if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
33 		    && bus == (unsigned int)dev_entry->dev->bus->number
34 		    && devfn == dev_entry->dev->devfn) {
35 			dev = dev_entry->dev;
36 			break;
37 		}
38 	}
39 
40 	mutex_unlock(&dev_data->lock);
41 
42 	return dev;
43 }
44 
45 static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
46 				   struct pci_dev *dev,
47 				   int devid, publish_pci_dev_cb publish_cb)
48 {
49 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
50 	struct pci_dev_entry *dev_entry;
51 	unsigned int domain, bus, devfn;
52 	int err;
53 
54 	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
55 	if (!dev_entry)
56 		return -ENOMEM;
57 	dev_entry->dev = dev;
58 
59 	mutex_lock(&dev_data->lock);
60 	list_add_tail(&dev_entry->list, &dev_data->dev_list);
61 	mutex_unlock(&dev_data->lock);
62 
63 	/* Publish this device. */
64 	domain = (unsigned int)pci_domain_nr(dev->bus);
65 	bus = (unsigned int)dev->bus->number;
66 	devfn = dev->devfn;
67 	err = publish_cb(pdev, domain, bus, devfn, devid);
68 
69 	return err;
70 }
71 
72 static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
73 					struct pci_dev *dev, bool lock)
74 {
75 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
76 	struct pci_dev_entry *dev_entry, *t;
77 	struct pci_dev *found_dev = NULL;
78 
79 	mutex_lock(&dev_data->lock);
80 
81 	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
82 		if (dev_entry->dev == dev) {
83 			list_del(&dev_entry->list);
84 			found_dev = dev_entry->dev;
85 			kfree(dev_entry);
86 		}
87 	}
88 
89 	mutex_unlock(&dev_data->lock);
90 
91 	if (found_dev) {
92 		if (lock)
93 			device_lock(&found_dev->dev);
94 		pcistub_put_pci_dev(found_dev);
95 		if (lock)
96 			device_unlock(&found_dev->dev);
97 	}
98 }
99 
100 static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
101 {
102 	struct passthrough_dev_data *dev_data;
103 
104 	dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
105 	if (!dev_data)
106 		return -ENOMEM;
107 
108 	mutex_init(&dev_data->lock);
109 
110 	INIT_LIST_HEAD(&dev_data->dev_list);
111 
112 	pdev->pci_dev_data = dev_data;
113 
114 	return 0;
115 }
116 
117 static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
118 					 publish_pci_root_cb publish_root_cb)
119 {
120 	int err = 0;
121 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
122 	struct pci_dev_entry *dev_entry, *e;
123 	struct pci_dev *dev;
124 	int found;
125 	unsigned int domain, bus;
126 
127 	mutex_lock(&dev_data->lock);
128 
129 	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
130 		/* Only publish this device as a root if none of its
131 		 * parent bridges are exported
132 		 */
133 		found = 0;
134 		dev = dev_entry->dev->bus->self;
135 		for (; !found && dev != NULL; dev = dev->bus->self) {
136 			list_for_each_entry(e, &dev_data->dev_list, list) {
137 				if (dev == e->dev) {
138 					found = 1;
139 					break;
140 				}
141 			}
142 		}
143 
144 		domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
145 		bus = (unsigned int)dev_entry->dev->bus->number;
146 
147 		if (!found) {
148 			err = publish_root_cb(pdev, domain, bus);
149 			if (err)
150 				break;
151 		}
152 	}
153 
154 	mutex_unlock(&dev_data->lock);
155 
156 	return err;
157 }
158 
159 static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
160 {
161 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
162 	struct pci_dev_entry *dev_entry, *t;
163 
164 	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
165 		struct pci_dev *dev = dev_entry->dev;
166 		list_del(&dev_entry->list);
167 		device_lock(&dev->dev);
168 		pcistub_put_pci_dev(dev);
169 		device_unlock(&dev->dev);
170 		kfree(dev_entry);
171 	}
172 
173 	kfree(dev_data);
174 	pdev->pci_dev_data = NULL;
175 }
176 
177 static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
178 					struct xen_pcibk_device *pdev,
179 					unsigned int *domain, unsigned int *bus,
180 					unsigned int *devfn)
181 {
182 	*domain = pci_domain_nr(pcidev->bus);
183 	*bus = pcidev->bus->number;
184 	*devfn = pcidev->devfn;
185 	return 1;
186 }
187 
188 const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
189 	.name           = "passthrough",
190 	.init           = __xen_pcibk_init_devices,
191 	.free		= __xen_pcibk_release_devices,
192 	.find           = __xen_pcibk_get_pcifront_dev,
193 	.publish        = __xen_pcibk_publish_pci_roots,
194 	.release        = __xen_pcibk_release_pci_dev,
195 	.add            = __xen_pcibk_add_pci_dev,
196 	.get            = __xen_pcibk_get_pci_dev,
197 };
198