xref: /linux/drivers/xen/xen-pciback/vpci.c (revision 8fc4e4aa2bfca8d32e8bc2a01526ea2da450e6cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Backend - Provides a Virtual PCI bus (with real devices)
4  *               to the frontend
5  *
6  *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #define dev_fmt pr_fmt
11 
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/pci.h>
15 #include <linux/mutex.h>
16 #include "pciback.h"
17 
18 #define PCI_SLOT_MAX 32
19 
20 struct vpci_dev_data {
21 	/* Access to dev_list must be protected by lock */
22 	struct list_head dev_list[PCI_SLOT_MAX];
23 	struct mutex lock;
24 };
25 
26 static inline struct list_head *list_first(struct list_head *head)
27 {
28 	return head->next;
29 }
30 
31 static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
32 					       unsigned int domain,
33 					       unsigned int bus,
34 					       unsigned int devfn)
35 {
36 	struct pci_dev_entry *entry;
37 	struct pci_dev *dev = NULL;
38 	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
39 
40 	if (domain != 0 || bus != 0)
41 		return NULL;
42 
43 	if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
44 		mutex_lock(&vpci_dev->lock);
45 
46 		list_for_each_entry(entry,
47 				    &vpci_dev->dev_list[PCI_SLOT(devfn)],
48 				    list) {
49 			if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
50 				dev = entry->dev;
51 				break;
52 			}
53 		}
54 
55 		mutex_unlock(&vpci_dev->lock);
56 	}
57 	return dev;
58 }
59 
60 static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
61 {
62 	if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
63 	    && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
64 		return 1;
65 
66 	return 0;
67 }
68 
69 static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
70 				   struct pci_dev *dev, int devid,
71 				   publish_pci_dev_cb publish_cb)
72 {
73 	int err = 0, slot, func = PCI_FUNC(dev->devfn);
74 	struct pci_dev_entry *t, *dev_entry;
75 	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
76 
77 	if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
78 		err = -EFAULT;
79 		xenbus_dev_fatal(pdev->xdev, err,
80 				 "Can't export bridges on the virtual PCI bus");
81 		goto out;
82 	}
83 
84 	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
85 	if (!dev_entry) {
86 		err = -ENOMEM;
87 		xenbus_dev_fatal(pdev->xdev, err,
88 				 "Error adding entry to virtual PCI bus");
89 		goto out;
90 	}
91 
92 	dev_entry->dev = dev;
93 
94 	mutex_lock(&vpci_dev->lock);
95 
96 	/*
97 	 * Keep multi-function devices together on the virtual PCI bus, except
98 	 * that we want to keep virtual functions at func 0 on their own. They
99 	 * aren't multi-function devices and hence their presence at func 0
100 	 * may cause guests to not scan the other functions.
101 	 */
102 	if (!dev->is_virtfn || func) {
103 		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
104 			if (list_empty(&vpci_dev->dev_list[slot]))
105 				continue;
106 
107 			t = list_entry(list_first(&vpci_dev->dev_list[slot]),
108 				       struct pci_dev_entry, list);
109 			if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
110 				continue;
111 
112 			if (match_slot(dev, t->dev)) {
113 				dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
114 					 slot, func);
115 				list_add_tail(&dev_entry->list,
116 					      &vpci_dev->dev_list[slot]);
117 				goto unlock;
118 			}
119 		}
120 	}
121 
122 	/* Assign to a new slot on the virtual PCI bus */
123 	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
124 		if (list_empty(&vpci_dev->dev_list[slot])) {
125 			dev_info(&dev->dev, "vpci: assign to virtual slot %d\n",
126 				 slot);
127 			list_add_tail(&dev_entry->list,
128 				      &vpci_dev->dev_list[slot]);
129 			goto unlock;
130 		}
131 	}
132 
133 	err = -ENOMEM;
134 	xenbus_dev_fatal(pdev->xdev, err,
135 			 "No more space on root virtual PCI bus");
136 
137 unlock:
138 	mutex_unlock(&vpci_dev->lock);
139 
140 	/* Publish this device. */
141 	if (!err)
142 		err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
143 	else
144 		kfree(dev_entry);
145 
146 out:
147 	return err;
148 }
149 
150 static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
151 					struct pci_dev *dev, bool lock)
152 {
153 	int slot;
154 	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
155 	struct pci_dev *found_dev = NULL;
156 
157 	mutex_lock(&vpci_dev->lock);
158 
159 	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
160 		struct pci_dev_entry *e;
161 
162 		list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
163 			if (e->dev == dev) {
164 				list_del(&e->list);
165 				found_dev = e->dev;
166 				kfree(e);
167 				goto out;
168 			}
169 		}
170 	}
171 
172 out:
173 	mutex_unlock(&vpci_dev->lock);
174 
175 	if (found_dev) {
176 		if (lock)
177 			device_lock(&found_dev->dev);
178 		pcistub_put_pci_dev(found_dev);
179 		if (lock)
180 			device_unlock(&found_dev->dev);
181 	}
182 }
183 
184 static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
185 {
186 	int slot;
187 	struct vpci_dev_data *vpci_dev;
188 
189 	vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
190 	if (!vpci_dev)
191 		return -ENOMEM;
192 
193 	mutex_init(&vpci_dev->lock);
194 
195 	for (slot = 0; slot < PCI_SLOT_MAX; slot++)
196 		INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
197 
198 	pdev->pci_dev_data = vpci_dev;
199 
200 	return 0;
201 }
202 
203 static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
204 					 publish_pci_root_cb publish_cb)
205 {
206 	/* The Virtual PCI bus has only one root */
207 	return publish_cb(pdev, 0, 0);
208 }
209 
210 static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
211 {
212 	int slot;
213 	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
214 
215 	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
216 		struct pci_dev_entry *e, *tmp;
217 		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
218 					 list) {
219 			struct pci_dev *dev = e->dev;
220 			list_del(&e->list);
221 			device_lock(&dev->dev);
222 			pcistub_put_pci_dev(dev);
223 			device_unlock(&dev->dev);
224 			kfree(e);
225 		}
226 	}
227 
228 	kfree(vpci_dev);
229 	pdev->pci_dev_data = NULL;
230 }
231 
232 static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
233 					struct xen_pcibk_device *pdev,
234 					unsigned int *domain, unsigned int *bus,
235 					unsigned int *devfn)
236 {
237 	struct pci_dev_entry *entry;
238 	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
239 	int found = 0, slot;
240 
241 	mutex_lock(&vpci_dev->lock);
242 	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
243 		list_for_each_entry(entry,
244 			    &vpci_dev->dev_list[slot],
245 			    list) {
246 			if (entry->dev == pcidev) {
247 				found = 1;
248 				*domain = 0;
249 				*bus = 0;
250 				*devfn = PCI_DEVFN(slot,
251 					 PCI_FUNC(pcidev->devfn));
252 			}
253 		}
254 	}
255 	mutex_unlock(&vpci_dev->lock);
256 	return found;
257 }
258 
259 const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
260 	.name		= "vpci",
261 	.init		= __xen_pcibk_init_devices,
262 	.free		= __xen_pcibk_release_devices,
263 	.find		= __xen_pcibk_get_pcifront_dev,
264 	.publish	= __xen_pcibk_publish_pci_roots,
265 	.release	= __xen_pcibk_release_pci_dev,
266 	.add		= __xen_pcibk_add_pci_dev,
267 	.get		= __xen_pcibk_get_pci_dev,
268 };
269