1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Backend - Provides a Virtual PCI bus (with real devices) 4 * to the frontend 5 * 6 * Author: Ryan Wilson <hap9@epoch.ncsc.mil> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 #define dev_fmt pr_fmt 11 12 #include <linux/list.h> 13 #include <linux/slab.h> 14 #include <linux/pci.h> 15 #include <linux/mutex.h> 16 #include "pciback.h" 17 18 #define PCI_SLOT_MAX 32 19 20 struct vpci_dev_data { 21 /* Access to dev_list must be protected by lock */ 22 struct list_head dev_list[PCI_SLOT_MAX]; 23 struct mutex lock; 24 }; 25 26 static inline struct list_head *list_first(struct list_head *head) 27 { 28 return head->next; 29 } 30 31 static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, 32 unsigned int domain, 33 unsigned int bus, 34 unsigned int devfn) 35 { 36 struct pci_dev_entry *entry; 37 struct pci_dev *dev = NULL; 38 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; 39 40 if (domain != 0 || bus != 0) 41 return NULL; 42 43 if (PCI_SLOT(devfn) < PCI_SLOT_MAX) { 44 mutex_lock(&vpci_dev->lock); 45 46 list_for_each_entry(entry, 47 &vpci_dev->dev_list[PCI_SLOT(devfn)], 48 list) { 49 if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) { 50 dev = entry->dev; 51 break; 52 } 53 } 54 55 mutex_unlock(&vpci_dev->lock); 56 } 57 return dev; 58 } 59 60 static inline int match_slot(struct pci_dev *l, struct pci_dev *r) 61 { 62 if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus) 63 && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn)) 64 return 1; 65 66 return 0; 67 } 68 69 static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, 70 struct pci_dev *dev, int devid, 71 publish_pci_dev_cb publish_cb) 72 { 73 int err = 0, slot, func = -1; 74 struct pci_dev_entry *t, *dev_entry; 75 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; 76 77 if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) { 78 err = -EFAULT; 79 xenbus_dev_fatal(pdev->xdev, err, 80 "Can't export bridges on the virtual PCI bus"); 81 goto out; 82 } 83 84 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL); 85 if (!dev_entry) { 86 err = -ENOMEM; 87 xenbus_dev_fatal(pdev->xdev, err, 88 "Error adding entry to virtual PCI bus"); 89 goto out; 90 } 91 92 dev_entry->dev = dev; 93 94 mutex_lock(&vpci_dev->lock); 95 96 /* 97 * Keep multi-function devices together on the virtual PCI bus, except 98 * virtual functions. 99 */ 100 if (!dev->is_virtfn) { 101 for (slot = 0; slot < PCI_SLOT_MAX; slot++) { 102 if (list_empty(&vpci_dev->dev_list[slot])) 103 continue; 104 105 t = list_entry(list_first(&vpci_dev->dev_list[slot]), 106 struct pci_dev_entry, list); 107 108 if (match_slot(dev, t->dev)) { 109 dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n", 110 slot, PCI_FUNC(dev->devfn)); 111 list_add_tail(&dev_entry->list, 112 &vpci_dev->dev_list[slot]); 113 func = PCI_FUNC(dev->devfn); 114 goto unlock; 115 } 116 } 117 } 118 119 /* Assign to a new slot on the virtual PCI bus */ 120 for (slot = 0; slot < PCI_SLOT_MAX; slot++) { 121 if (list_empty(&vpci_dev->dev_list[slot])) { 122 dev_info(&dev->dev, "vpci: assign to virtual slot %d\n", 123 slot); 124 list_add_tail(&dev_entry->list, 125 &vpci_dev->dev_list[slot]); 126 func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn); 127 goto unlock; 128 } 129 } 130 131 err = -ENOMEM; 132 xenbus_dev_fatal(pdev->xdev, err, 133 "No more space on root virtual PCI bus"); 134 135 unlock: 136 mutex_unlock(&vpci_dev->lock); 137 138 /* Publish this device. */ 139 if (!err) 140 err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid); 141 else 142 kfree(dev_entry); 143 144 out: 145 return err; 146 } 147 148 static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 149 struct pci_dev *dev, bool lock) 150 { 151 int slot; 152 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; 153 struct pci_dev *found_dev = NULL; 154 155 mutex_lock(&vpci_dev->lock); 156 157 for (slot = 0; slot < PCI_SLOT_MAX; slot++) { 158 struct pci_dev_entry *e; 159 160 list_for_each_entry(e, &vpci_dev->dev_list[slot], list) { 161 if (e->dev == dev) { 162 list_del(&e->list); 163 found_dev = e->dev; 164 kfree(e); 165 goto out; 166 } 167 } 168 } 169 170 out: 171 mutex_unlock(&vpci_dev->lock); 172 173 if (found_dev) { 174 if (lock) 175 device_lock(&found_dev->dev); 176 pcistub_put_pci_dev(found_dev); 177 if (lock) 178 device_unlock(&found_dev->dev); 179 } 180 } 181 182 static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) 183 { 184 int slot; 185 struct vpci_dev_data *vpci_dev; 186 187 vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL); 188 if (!vpci_dev) 189 return -ENOMEM; 190 191 mutex_init(&vpci_dev->lock); 192 193 for (slot = 0; slot < PCI_SLOT_MAX; slot++) 194 INIT_LIST_HEAD(&vpci_dev->dev_list[slot]); 195 196 pdev->pci_dev_data = vpci_dev; 197 198 return 0; 199 } 200 201 static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev, 202 publish_pci_root_cb publish_cb) 203 { 204 /* The Virtual PCI bus has only one root */ 205 return publish_cb(pdev, 0, 0); 206 } 207 208 static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev) 209 { 210 int slot; 211 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; 212 213 for (slot = 0; slot < PCI_SLOT_MAX; slot++) { 214 struct pci_dev_entry *e, *tmp; 215 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot], 216 list) { 217 struct pci_dev *dev = e->dev; 218 list_del(&e->list); 219 device_lock(&dev->dev); 220 pcistub_put_pci_dev(dev); 221 device_unlock(&dev->dev); 222 kfree(e); 223 } 224 } 225 226 kfree(vpci_dev); 227 pdev->pci_dev_data = NULL; 228 } 229 230 static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev, 231 struct xen_pcibk_device *pdev, 232 unsigned int *domain, unsigned int *bus, 233 unsigned int *devfn) 234 { 235 struct pci_dev_entry *entry; 236 struct pci_dev *dev = NULL; 237 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; 238 int found = 0, slot; 239 240 mutex_lock(&vpci_dev->lock); 241 for (slot = 0; slot < PCI_SLOT_MAX; slot++) { 242 list_for_each_entry(entry, 243 &vpci_dev->dev_list[slot], 244 list) { 245 dev = entry->dev; 246 if (dev && dev->bus->number == pcidev->bus->number 247 && pci_domain_nr(dev->bus) == 248 pci_domain_nr(pcidev->bus) 249 && dev->devfn == pcidev->devfn) { 250 found = 1; 251 *domain = 0; 252 *bus = 0; 253 *devfn = PCI_DEVFN(slot, 254 PCI_FUNC(pcidev->devfn)); 255 } 256 } 257 } 258 mutex_unlock(&vpci_dev->lock); 259 return found; 260 } 261 262 const struct xen_pcibk_backend xen_pcibk_vpci_backend = { 263 .name = "vpci", 264 .init = __xen_pcibk_init_devices, 265 .free = __xen_pcibk_release_devices, 266 .find = __xen_pcibk_get_pcifront_dev, 267 .publish = __xen_pcibk_publish_pci_roots, 268 .release = __xen_pcibk_release_pci_dev, 269 .add = __xen_pcibk_add_pci_dev, 270 .get = __xen_pcibk_get_pci_dev, 271 }; 272