xref: /freebsd/sys/dev/pci/pci_iov.c (revision 89e3d5671ba13dceca272d5b159c9bd805f3f504)
1 /*-
2  * Copyright (c) 2013-2015 Sandvine Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_bus.h"
31 
32 #include <sys/param.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/fcntl.h>
38 #include <sys/ioccom.h>
39 #include <sys/iov.h>
40 #include <sys/linker.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/pciio.h>
46 #include <sys/queue.h>
47 #include <sys/rman.h>
48 #include <sys/sysctl.h>
49 
50 #include <machine/bus.h>
51 #include <machine/stdarg.h>
52 
53 #include <sys/nv.h>
54 #include <sys/iov_schema.h>
55 
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pci_iov.h>
59 #include <dev/pci/pci_private.h>
60 #include <dev/pci/pci_iov_private.h>
61 #include <dev/pci/schema_private.h>
62 
63 #include "pcib_if.h"
64 
65 static MALLOC_DEFINE(M_SRIOV, "sr_iov", "PCI SR-IOV allocations");
66 
67 static d_ioctl_t pci_iov_ioctl;
68 
69 static struct cdevsw iov_cdevsw = {
70 	.d_version = D_VERSION,
71 	.d_name = "iov",
72 	.d_ioctl = pci_iov_ioctl
73 };
74 
75 SYSCTL_DECL(_hw_pci);
76 
77 /*
78  * The maximum amount of memory we will allocate for user configuration of an
79  * SR-IOV device.  1MB ought to be enough for anyone, but leave this
80  * configurable just in case.
81  */
82 static u_long pci_iov_max_config = 1024 * 1024;
83 SYSCTL_ULONG(_hw_pci, OID_AUTO, iov_max_config, CTLFLAG_RWTUN,
84     &pci_iov_max_config, 0, "Maximum allowed size of SR-IOV configuration.");
85 
86 #define IOV_READ(d, r, w) \
87 	pci_read_config((d)->cfg.dev, (d)->cfg.iov->iov_pos + r, w)
88 
89 #define IOV_WRITE(d, r, v, w) \
90 	pci_write_config((d)->cfg.dev, (d)->cfg.iov->iov_pos + r, v, w)
91 
92 static nvlist_t	*pci_iov_build_schema(nvlist_t **pf_schema,
93 		    nvlist_t **vf_schema);
94 static void	pci_iov_build_pf_schema(nvlist_t *schema,
95 		    nvlist_t **driver_schema);
96 static void	pci_iov_build_vf_schema(nvlist_t *schema,
97 		    nvlist_t **driver_schema);
98 static int	pci_iov_delete_iov_children(struct pci_devinfo *dinfo);
99 static nvlist_t	*pci_iov_get_pf_subsystem_schema(void);
100 static nvlist_t	*pci_iov_get_vf_subsystem_schema(void);
101 
102 int
103 pci_iov_attach_name(device_t dev, struct nvlist *pf_schema,
104     struct nvlist *vf_schema, const char *fmt, ...)
105 {
106 	char buf[NAME_MAX + 1];
107 	va_list ap;
108 
109 	va_start(ap, fmt);
110 	vsnprintf(buf, sizeof(buf), fmt, ap);
111 	va_end(ap);
112 	return (PCI_IOV_ATTACH(device_get_parent(dev), dev, pf_schema,
113 	    vf_schema, buf));
114 }
115 
116 int
117 pci_iov_attach_method(device_t bus, device_t dev, nvlist_t *pf_schema,
118     nvlist_t *vf_schema, const char *name)
119 {
120 	device_t pcib;
121 	struct pci_devinfo *dinfo;
122 	struct pcicfg_iov *iov;
123 	nvlist_t *schema;
124 	uint32_t version;
125 	int error;
126 	int iov_pos;
127 
128 	dinfo = device_get_ivars(dev);
129 	pcib = device_get_parent(bus);
130 	schema = NULL;
131 
132 	error = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos);
133 
134 	if (error != 0)
135 		return (error);
136 
137 	version = pci_read_config(dev, iov_pos, 4);
138 	if (PCI_EXTCAP_VER(version) != 1) {
139 		if (bootverbose)
140 			device_printf(dev,
141 			    "Unsupported version of SR-IOV (%d) detected\n",
142 			    PCI_EXTCAP_VER(version));
143 
144 		return (ENXIO);
145 	}
146 
147 	iov = malloc(sizeof(*dinfo->cfg.iov), M_SRIOV, M_WAITOK | M_ZERO);
148 
149 	mtx_lock(&Giant);
150 	if (dinfo->cfg.iov != NULL) {
151 		error = EBUSY;
152 		goto cleanup;
153 	}
154 	iov->iov_pos = iov_pos;
155 
156 	schema = pci_iov_build_schema(&pf_schema, &vf_schema);
157 	if (schema == NULL) {
158 		error = ENOMEM;
159 		goto cleanup;
160 	}
161 
162 	error = pci_iov_validate_schema(schema);
163 	if (error != 0)
164 		goto cleanup;
165 	iov->iov_schema = schema;
166 
167 	iov->iov_cdev = make_dev(&iov_cdevsw, device_get_unit(dev),
168 	    UID_ROOT, GID_WHEEL, 0600, "iov/%s", name);
169 
170 	if (iov->iov_cdev == NULL) {
171 		error = ENOMEM;
172 		goto cleanup;
173 	}
174 
175 	dinfo->cfg.iov = iov;
176 	iov->iov_cdev->si_drv1 = dinfo;
177 	mtx_unlock(&Giant);
178 
179 	return (0);
180 
181 cleanup:
182 	nvlist_destroy(schema);
183 	nvlist_destroy(pf_schema);
184 	nvlist_destroy(vf_schema);
185 	free(iov, M_SRIOV);
186 	mtx_unlock(&Giant);
187 	return (error);
188 }
189 
190 int
191 pci_iov_detach_method(device_t bus, device_t dev)
192 {
193 	struct pci_devinfo *dinfo;
194 	struct pcicfg_iov *iov;
195 	int error;
196 
197 	mtx_lock(&Giant);
198 	dinfo = device_get_ivars(dev);
199 	iov = dinfo->cfg.iov;
200 
201 	if (iov == NULL) {
202 		mtx_unlock(&Giant);
203 		return (0);
204 	}
205 
206 	if ((iov->iov_flags & IOV_BUSY) != 0) {
207 		mtx_unlock(&Giant);
208 		return (EBUSY);
209 	}
210 
211 	error = pci_iov_delete_iov_children(dinfo);
212 	if (error != 0) {
213 		mtx_unlock(&Giant);
214 		return (error);
215 	}
216 
217 	dinfo->cfg.iov = NULL;
218 
219 	if (iov->iov_cdev) {
220 		destroy_dev(iov->iov_cdev);
221 		iov->iov_cdev = NULL;
222 	}
223 	nvlist_destroy(iov->iov_schema);
224 
225 	free(iov, M_SRIOV);
226 	mtx_unlock(&Giant);
227 
228 	return (0);
229 }
230 
231 static nvlist_t *
232 pci_iov_build_schema(nvlist_t **pf, nvlist_t **vf)
233 {
234 	nvlist_t *schema, *pf_driver, *vf_driver;
235 
236 	/* We always take ownership of the schemas. */
237 	pf_driver = *pf;
238 	*pf = NULL;
239 	vf_driver = *vf;
240 	*vf = NULL;
241 
242 	schema = pci_iov_schema_alloc_node();
243 	if (schema == NULL)
244 		goto cleanup;
245 
246 	pci_iov_build_pf_schema(schema, &pf_driver);
247 	pci_iov_build_vf_schema(schema, &vf_driver);
248 
249 	if (nvlist_error(schema) != 0)
250 		goto cleanup;
251 
252 	return (schema);
253 
254 cleanup:
255 	nvlist_destroy(schema);
256 	nvlist_destroy(pf_driver);
257 	nvlist_destroy(vf_driver);
258 	return (NULL);
259 }
260 
261 static void
262 pci_iov_build_pf_schema(nvlist_t *schema, nvlist_t **driver_schema)
263 {
264 	nvlist_t *pf_schema, *iov_schema;
265 
266 	pf_schema = pci_iov_schema_alloc_node();
267 	if (pf_schema == NULL) {
268 		nvlist_set_error(schema, ENOMEM);
269 		return;
270 	}
271 
272 	iov_schema = pci_iov_get_pf_subsystem_schema();
273 
274 	/*
275 	 * Note that if either *driver_schema or iov_schema is NULL, then
276 	 * nvlist_move_nvlist will put the schema in the error state and
277 	 * SR-IOV will fail to initialize later, so we don't have to explicitly
278 	 * handle that case.
279 	 */
280 	nvlist_move_nvlist(pf_schema, DRIVER_CONFIG_NAME, *driver_schema);
281 	nvlist_move_nvlist(pf_schema, IOV_CONFIG_NAME, iov_schema);
282 	nvlist_move_nvlist(schema, PF_CONFIG_NAME, pf_schema);
283 	*driver_schema = NULL;
284 }
285 
286 static void
287 pci_iov_build_vf_schema(nvlist_t *schema, nvlist_t **driver_schema)
288 {
289 	nvlist_t *vf_schema, *iov_schema;
290 
291 	vf_schema = pci_iov_schema_alloc_node();
292 	if (vf_schema == NULL) {
293 		nvlist_set_error(schema, ENOMEM);
294 		return;
295 	}
296 
297 	iov_schema = pci_iov_get_vf_subsystem_schema();
298 
299 	/*
300 	 * Note that if either *driver_schema or iov_schema is NULL, then
301 	 * nvlist_move_nvlist will put the schema in the error state and
302 	 * SR-IOV will fail to initialize later, so we don't have to explicitly
303 	 * handle that case.
304 	 */
305 	nvlist_move_nvlist(vf_schema, DRIVER_CONFIG_NAME, *driver_schema);
306 	nvlist_move_nvlist(vf_schema, IOV_CONFIG_NAME, iov_schema);
307 	nvlist_move_nvlist(schema, VF_SCHEMA_NAME, vf_schema);
308 	*driver_schema = NULL;
309 }
310 
311 static nvlist_t *
312 pci_iov_get_pf_subsystem_schema(void)
313 {
314 	nvlist_t *pf;
315 
316 	pf = pci_iov_schema_alloc_node();
317 	if (pf == NULL)
318 		return (NULL);
319 
320 	pci_iov_schema_add_uint16(pf, "num_vfs", IOV_SCHEMA_REQUIRED, -1);
321 	pci_iov_schema_add_string(pf, "device", IOV_SCHEMA_REQUIRED, NULL);
322 
323 	return (pf);
324 }
325 
326 static nvlist_t *
327 pci_iov_get_vf_subsystem_schema(void)
328 {
329 	nvlist_t *vf;
330 
331 	vf = pci_iov_schema_alloc_node();
332 	if (vf == NULL)
333 		return (NULL);
334 
335 	pci_iov_schema_add_bool(vf, "passthrough", IOV_SCHEMA_HASDEFAULT, 0);
336 
337 	return (vf);
338 }
339 
340 static int
341 pci_iov_alloc_bar(struct pci_devinfo *dinfo, int bar, pci_addr_t bar_shift)
342 {
343 	struct resource *res;
344 	struct pcicfg_iov *iov;
345 	device_t dev, bus;
346 	rman_res_t start, end;
347 	pci_addr_t bar_size;
348 	int rid;
349 
350 	iov = dinfo->cfg.iov;
351 	dev = dinfo->cfg.dev;
352 	bus = device_get_parent(dev);
353 	rid = iov->iov_pos + PCIR_SRIOV_BAR(bar);
354 	bar_size = 1 << bar_shift;
355 
356 	res = pci_alloc_multi_resource(bus, dev, SYS_RES_MEMORY, &rid, 0,
357 	    ~0, 1, iov->iov_num_vfs, RF_ACTIVE);
358 
359 	if (res == NULL)
360 		return (ENXIO);
361 
362 	iov->iov_bar[bar].res = res;
363 	iov->iov_bar[bar].bar_size = bar_size;
364 	iov->iov_bar[bar].bar_shift = bar_shift;
365 
366 	start = rman_get_start(res);
367 	end = rman_get_end(res);
368 	return (rman_manage_region(&iov->rman, start, end));
369 }
370 
371 static void
372 pci_iov_add_bars(struct pcicfg_iov *iov, struct pci_devinfo *dinfo)
373 {
374 	struct pci_iov_bar *bar;
375 	uint64_t bar_start;
376 	int i;
377 
378 	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
379 		bar = &iov->iov_bar[i];
380 		if (bar->res != NULL) {
381 			bar_start = rman_get_start(bar->res) +
382 			    dinfo->cfg.vf.index * bar->bar_size;
383 
384 			pci_add_bar(dinfo->cfg.dev, PCIR_BAR(i), bar_start,
385 			    bar->bar_shift);
386 		}
387 	}
388 }
389 
390 static int
391 pci_iov_parse_config(struct pcicfg_iov *iov, struct pci_iov_arg *arg,
392     nvlist_t **ret)
393 {
394 	void *packed_config;
395 	nvlist_t *config;
396 	int error;
397 
398 	config = NULL;
399 	packed_config = NULL;
400 
401 	if (arg->len > pci_iov_max_config) {
402 		error = EMSGSIZE;
403 		goto out;
404 	}
405 
406 	packed_config = malloc(arg->len, M_SRIOV, M_WAITOK);
407 
408 	error = copyin(arg->config, packed_config, arg->len);
409 	if (error != 0)
410 		goto out;
411 
412 	config = nvlist_unpack(packed_config, arg->len, NV_FLAG_IGNORE_CASE);
413 	if (config == NULL) {
414 		error = EINVAL;
415 		goto out;
416 	}
417 
418 	error = pci_iov_schema_validate_config(iov->iov_schema, config);
419 	if (error != 0)
420 		goto out;
421 
422 	error = nvlist_error(config);
423 	if (error != 0)
424 		goto out;
425 
426 	*ret = config;
427 	config = NULL;
428 
429 out:
430 	nvlist_destroy(config);
431 	free(packed_config, M_SRIOV);
432 	return (error);
433 }
434 
435 /*
436  * Set the ARI_EN bit in the lowest-numbered PCI function with the SR-IOV
437  * capability.  This bit is only writeable on the lowest-numbered PF but
438  * affects all PFs on the device.
439  */
440 static int
441 pci_iov_set_ari(device_t bus)
442 {
443 	device_t lowest;
444 	device_t *devlist;
445 	int i, error, devcount, lowest_func, lowest_pos, iov_pos, dev_func;
446 	uint16_t iov_ctl;
447 
448 	/* If ARI is disabled on the downstream port there is nothing to do. */
449 	if (!PCIB_ARI_ENABLED(device_get_parent(bus)))
450 		return (0);
451 
452 	error = device_get_children(bus, &devlist, &devcount);
453 
454 	if (error != 0)
455 		return (error);
456 
457 	lowest = NULL;
458 	for (i = 0; i < devcount; i++) {
459 		if (pci_find_extcap(devlist[i], PCIZ_SRIOV, &iov_pos) == 0) {
460 			dev_func = pci_get_function(devlist[i]);
461 			if (lowest == NULL || dev_func < lowest_func) {
462 				lowest = devlist[i];
463 				lowest_func = dev_func;
464 				lowest_pos = iov_pos;
465 			}
466 		}
467 	}
468 	free(devlist, M_TEMP);
469 
470 	/*
471 	 * If we called this function some device must have the SR-IOV
472 	 * capability.
473 	 */
474 	KASSERT(lowest != NULL,
475 	    ("Could not find child of %s with SR-IOV capability",
476 	    device_get_nameunit(bus)));
477 
478 	iov_ctl = pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2);
479 	iov_ctl |= PCIM_SRIOV_ARI_EN;
480 	pci_write_config(lowest, lowest_pos + PCIR_SRIOV_CTL, iov_ctl, 2);
481 	if ((pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2) &
482 	    PCIM_SRIOV_ARI_EN) == 0) {
483 		device_printf(lowest, "failed to enable ARI\n");
484 		return (ENXIO);
485 	}
486 	return (0);
487 }
488 
489 static int
490 pci_iov_config_page_size(struct pci_devinfo *dinfo)
491 {
492 	uint32_t page_cap, page_size;
493 
494 	page_cap = IOV_READ(dinfo, PCIR_SRIOV_PAGE_CAP, 4);
495 
496 	/*
497 	 * If the system page size is less than the smallest SR-IOV page size
498 	 * then round up to the smallest SR-IOV page size.
499 	 */
500 	if (PAGE_SHIFT < PCI_SRIOV_BASE_PAGE_SHIFT)
501 		page_size = (1 << 0);
502 	else
503 		page_size = (1 << (PAGE_SHIFT - PCI_SRIOV_BASE_PAGE_SHIFT));
504 
505 	/* Check that the device supports the system page size. */
506 	if (!(page_size & page_cap))
507 		return (ENXIO);
508 
509 	IOV_WRITE(dinfo, PCIR_SRIOV_PAGE_SIZE, page_size, 4);
510 	return (0);
511 }
512 
513 static int
514 pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *config)
515 {
516 	const nvlist_t *device, *driver_config;
517 
518 	device = nvlist_get_nvlist(config, PF_CONFIG_NAME);
519 	driver_config = nvlist_get_nvlist(device, DRIVER_CONFIG_NAME);
520 	return (PCI_IOV_INIT(dev, num_vfs, driver_config));
521 }
522 
523 static int
524 pci_iov_init_rman(device_t pf, struct pcicfg_iov *iov)
525 {
526 	int error;
527 
528 	iov->rman.rm_start = 0;
529 	iov->rman.rm_end = ~0;
530 	iov->rman.rm_type = RMAN_ARRAY;
531 	snprintf(iov->rman_name, sizeof(iov->rman_name), "%s VF I/O memory",
532 	    device_get_nameunit(pf));
533 	iov->rman.rm_descr = iov->rman_name;
534 
535 	error = rman_init(&iov->rman);
536 	if (error != 0)
537 		return (error);
538 
539 	iov->iov_flags |= IOV_RMAN_INITED;
540 	return (0);
541 }
542 
543 static int
544 pci_iov_alloc_bar_ea(struct pci_devinfo *dinfo, int bar)
545 {
546 	struct pcicfg_iov *iov;
547 	rman_res_t start, end;
548 	struct resource *res;
549 	struct resource_list *rl;
550 	struct resource_list_entry *rle;
551 
552 	rl = &dinfo->resources;
553 	iov = dinfo->cfg.iov;
554 
555 	rle = resource_list_find(rl, SYS_RES_MEMORY,
556 	    iov->iov_pos + PCIR_SRIOV_BAR(bar));
557 	if (rle == NULL)
558 		rle = resource_list_find(rl, SYS_RES_IOPORT,
559 		    iov->iov_pos + PCIR_SRIOV_BAR(bar));
560 	if (rle == NULL)
561 		return (ENXIO);
562 	res = rle->res;
563 
564 	iov->iov_bar[bar].res = res;
565 	iov->iov_bar[bar].bar_size = rman_get_size(res) / iov->iov_num_vfs;
566 	iov->iov_bar[bar].bar_shift = pci_mapsize(iov->iov_bar[bar].bar_size);
567 
568 	start = rman_get_start(res);
569 	end = rman_get_end(res);
570 
571 	return (rman_manage_region(&iov->rman, start, end));
572 }
573 
574 static int
575 pci_iov_setup_bars(struct pci_devinfo *dinfo)
576 {
577 	device_t dev;
578 	struct pcicfg_iov *iov;
579 	pci_addr_t bar_value, testval;
580 	int i, last_64, error;
581 
582 	iov = dinfo->cfg.iov;
583 	dev = dinfo->cfg.dev;
584 	last_64 = 0;
585 
586 	pci_add_resources_ea(device_get_parent(dev), dev, 1);
587 
588 	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
589 		/* First, try to use BARs allocated with EA */
590 		error = pci_iov_alloc_bar_ea(dinfo, i);
591 		if (error == 0)
592 			continue;
593 
594 		/* Allocate legacy-BAR only if EA is not enabled */
595 		if (pci_ea_is_enabled(dev, iov->iov_pos + PCIR_SRIOV_BAR(i)))
596 			continue;
597 
598 		/*
599 		 * If a PCI BAR is a 64-bit wide BAR, then it spans two
600 		 * consecutive registers.  Therefore if the last BAR that
601 		 * we looked at was a 64-bit BAR, we need to skip this
602 		 * register as it's the second half of the last BAR.
603 		 */
604 		if (!last_64) {
605 			pci_read_bar(dev,
606 			    iov->iov_pos + PCIR_SRIOV_BAR(i),
607 			    &bar_value, &testval, &last_64);
608 
609 			if (testval != 0) {
610 				error = pci_iov_alloc_bar(dinfo, i,
611 				   pci_mapsize(testval));
612 				if (error != 0)
613 					return (error);
614 			}
615 		} else
616 			last_64 = 0;
617 	}
618 
619 	return (0);
620 }
621 
622 static void
623 pci_iov_enumerate_vfs(struct pci_devinfo *dinfo, const nvlist_t *config,
624     uint16_t first_rid, uint16_t rid_stride)
625 {
626 	char device_name[VF_MAX_NAME];
627 	const nvlist_t *device, *driver_config, *iov_config;
628 	device_t bus, dev, vf;
629 	struct pcicfg_iov *iov;
630 	struct pci_devinfo *vfinfo;
631 	int i, error;
632 	uint16_t vid, did, next_rid;
633 
634 	iov = dinfo->cfg.iov;
635 	dev = dinfo->cfg.dev;
636 	bus = device_get_parent(dev);
637 	next_rid = first_rid;
638 	vid = pci_get_vendor(dev);
639 	did = IOV_READ(dinfo, PCIR_SRIOV_VF_DID, 2);
640 
641 	for (i = 0; i < iov->iov_num_vfs; i++, next_rid += rid_stride) {
642 		snprintf(device_name, sizeof(device_name), VF_PREFIX"%d", i);
643 		device = nvlist_get_nvlist(config, device_name);
644 		iov_config = nvlist_get_nvlist(device, IOV_CONFIG_NAME);
645 		driver_config = nvlist_get_nvlist(device, DRIVER_CONFIG_NAME);
646 
647 		vf = PCI_CREATE_IOV_CHILD(bus, dev, next_rid, vid, did);
648 		if (vf == NULL)
649 			break;
650 
651 		/*
652 		 * If we are creating passthrough devices then force the ppt
653 		 * driver to attach to prevent a VF driver from claiming the
654 		 * VFs.
655 		 */
656 		if (nvlist_get_bool(iov_config, "passthrough"))
657 			device_set_devclass_fixed(vf, "ppt");
658 
659 		vfinfo = device_get_ivars(vf);
660 
661 		vfinfo->cfg.iov = iov;
662 		vfinfo->cfg.vf.index = i;
663 
664 		pci_iov_add_bars(iov, vfinfo);
665 
666 		error = PCI_IOV_ADD_VF(dev, i, driver_config);
667 		if (error != 0) {
668 			device_printf(dev, "Failed to add VF %d\n", i);
669 			device_delete_child(bus, vf);
670 		}
671 	}
672 
673 	bus_generic_attach(bus);
674 }
675 
676 static int
677 pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg)
678 {
679 	device_t bus, dev;
680 	struct pci_devinfo *dinfo;
681 	struct pcicfg_iov *iov;
682 	nvlist_t *config;
683 	int i, error;
684 	uint16_t rid_off, rid_stride;
685 	uint16_t first_rid, last_rid;
686 	uint16_t iov_ctl;
687 	uint16_t num_vfs, total_vfs;
688 	int iov_inited;
689 
690 	mtx_lock(&Giant);
691 	dinfo = cdev->si_drv1;
692 	iov = dinfo->cfg.iov;
693 	dev = dinfo->cfg.dev;
694 	bus = device_get_parent(dev);
695 	iov_inited = 0;
696 	config = NULL;
697 
698 	if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) {
699 		mtx_unlock(&Giant);
700 		return (EBUSY);
701 	}
702 	iov->iov_flags |= IOV_BUSY;
703 
704 	error = pci_iov_parse_config(iov, arg, &config);
705 	if (error != 0)
706 		goto out;
707 
708 	num_vfs = pci_iov_config_get_num_vfs(config);
709 	total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2);
710 	if (num_vfs > total_vfs) {
711 		error = EINVAL;
712 		goto out;
713 	}
714 
715 	error = pci_iov_config_page_size(dinfo);
716 	if (error != 0)
717 		goto out;
718 
719 	error = pci_iov_set_ari(bus);
720 	if (error != 0)
721 		goto out;
722 
723 	error = pci_iov_init(dev, num_vfs, config);
724 	if (error != 0)
725 		goto out;
726 	iov_inited = 1;
727 
728 	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2);
729 
730 	rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2);
731 	rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2);
732 
733 	first_rid = pci_get_rid(dev) + rid_off;
734 	last_rid = first_rid + (num_vfs - 1) * rid_stride;
735 
736 	/* We don't yet support allocating extra bus numbers for VFs. */
737 	if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) {
738 		error = ENOSPC;
739 		goto out;
740 	}
741 
742 	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
743 	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
744 	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
745 
746 	error = pci_iov_init_rman(dev, iov);
747 	if (error != 0)
748 		goto out;
749 
750 	iov->iov_num_vfs = num_vfs;
751 
752 	error = pci_iov_setup_bars(dinfo);
753 	if (error != 0)
754 		goto out;
755 
756 	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
757 	iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE;
758 	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
759 
760 	/* Per specification, we must wait 100ms before accessing VFs. */
761 	pause("iov", roundup(hz, 10));
762 	pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride);
763 
764 	nvlist_destroy(config);
765 	iov->iov_flags &= ~IOV_BUSY;
766 	mtx_unlock(&Giant);
767 
768 	return (0);
769 out:
770 	if (iov_inited)
771 		PCI_IOV_UNINIT(dev);
772 
773 	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
774 		if (iov->iov_bar[i].res != NULL) {
775 			pci_release_resource(bus, dev, SYS_RES_MEMORY,
776 			    iov->iov_pos + PCIR_SRIOV_BAR(i),
777 			    iov->iov_bar[i].res);
778 			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
779 			    iov->iov_pos + PCIR_SRIOV_BAR(i));
780 			iov->iov_bar[i].res = NULL;
781 		}
782 	}
783 
784 	if (iov->iov_flags & IOV_RMAN_INITED) {
785 		rman_fini(&iov->rman);
786 		iov->iov_flags &= ~IOV_RMAN_INITED;
787 	}
788 
789 	nvlist_destroy(config);
790 	iov->iov_num_vfs = 0;
791 	iov->iov_flags &= ~IOV_BUSY;
792 	mtx_unlock(&Giant);
793 	return (error);
794 }
795 
796 void
797 pci_iov_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
798 {
799 	struct pcicfg_iov *iov;
800 
801 	iov = dinfo->cfg.iov;
802 
803 	IOV_WRITE(dinfo, PCIR_SRIOV_PAGE_SIZE, iov->iov_page_size, 4);
804 	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, iov->iov_num_vfs, 2);
805 	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov->iov_ctl, 2);
806 }
807 
808 void
809 pci_iov_cfg_save(device_t dev, struct pci_devinfo *dinfo)
810 {
811 	struct pcicfg_iov *iov;
812 
813 	iov = dinfo->cfg.iov;
814 
815 	iov->iov_page_size = IOV_READ(dinfo, PCIR_SRIOV_PAGE_SIZE, 4);
816 	iov->iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
817 }
818 
819 /* Return true if child is a VF of the given PF. */
820 static int
821 pci_iov_is_child_vf(struct pcicfg_iov *pf, device_t child)
822 {
823 	struct pci_devinfo *vfinfo;
824 
825 	vfinfo = device_get_ivars(child);
826 
827 	if (!(vfinfo->cfg.flags & PCICFG_VF))
828 		return (0);
829 
830 	return (pf == vfinfo->cfg.iov);
831 }
832 
833 static int
834 pci_iov_delete_iov_children(struct pci_devinfo *dinfo)
835 {
836 	device_t bus, dev, vf, *devlist;
837 	struct pcicfg_iov *iov;
838 	int i, error, devcount;
839 	uint32_t iov_ctl;
840 
841 	mtx_assert(&Giant, MA_OWNED);
842 
843 	iov = dinfo->cfg.iov;
844 	dev = dinfo->cfg.dev;
845 	bus = device_get_parent(dev);
846 	devlist = NULL;
847 
848 	iov->iov_flags |= IOV_BUSY;
849 
850 	error = device_get_children(bus, &devlist, &devcount);
851 
852 	if (error != 0)
853 		goto out;
854 
855 	for (i = 0; i < devcount; i++) {
856 		vf = devlist[i];
857 
858 		if (!pci_iov_is_child_vf(iov, vf))
859 			continue;
860 
861 		error = device_detach(vf);
862 		if (error != 0) {
863 			device_printf(dev,
864 			   "Could not disable SR-IOV: failed to detach VF %s\n",
865 			    device_get_nameunit(vf));
866 			goto out;
867 		}
868 	}
869 
870 	for (i = 0; i < devcount; i++) {
871 		vf = devlist[i];
872 
873 		if (pci_iov_is_child_vf(iov, vf))
874 			device_delete_child(bus, vf);
875 	}
876 	PCI_IOV_UNINIT(dev);
877 
878 	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
879 	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
880 	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
881 	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2);
882 
883 	iov->iov_num_vfs = 0;
884 
885 	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
886 		if (iov->iov_bar[i].res != NULL) {
887 			pci_release_resource(bus, dev, SYS_RES_MEMORY,
888 			    iov->iov_pos + PCIR_SRIOV_BAR(i),
889 			    iov->iov_bar[i].res);
890 			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
891 			    iov->iov_pos + PCIR_SRIOV_BAR(i));
892 			iov->iov_bar[i].res = NULL;
893 		}
894 	}
895 
896 	if (iov->iov_flags & IOV_RMAN_INITED) {
897 		rman_fini(&iov->rman);
898 		iov->iov_flags &= ~IOV_RMAN_INITED;
899 	}
900 
901 	error = 0;
902 out:
903 	free(devlist, M_TEMP);
904 	iov->iov_flags &= ~IOV_BUSY;
905 	return (error);
906 }
907 
908 static int
909 pci_iov_delete(struct cdev *cdev)
910 {
911 	struct pci_devinfo *dinfo;
912 	struct pcicfg_iov *iov;
913 	int error;
914 
915 	mtx_lock(&Giant);
916 	dinfo = cdev->si_drv1;
917 	iov = dinfo->cfg.iov;
918 
919 	if ((iov->iov_flags & IOV_BUSY) != 0) {
920 		error = EBUSY;
921 		goto out;
922 	}
923 	if (iov->iov_num_vfs == 0) {
924 		error = ECHILD;
925 		goto out;
926 	}
927 
928 	error = pci_iov_delete_iov_children(dinfo);
929 
930 out:
931 	mtx_unlock(&Giant);
932 	return (error);
933 }
934 
935 static int
936 pci_iov_get_schema_ioctl(struct cdev *cdev, struct pci_iov_schema *output)
937 {
938 	struct pci_devinfo *dinfo;
939 	void *packed;
940 	size_t output_len, size;
941 	int error;
942 
943 	packed = NULL;
944 
945 	mtx_lock(&Giant);
946 	dinfo = cdev->si_drv1;
947 	packed = nvlist_pack(dinfo->cfg.iov->iov_schema, &size);
948 	mtx_unlock(&Giant);
949 
950 	if (packed == NULL) {
951 		error = ENOMEM;
952 		goto fail;
953 	}
954 
955 	output_len = output->len;
956 	output->len = size;
957 	if (size <= output_len) {
958 		error = copyout(packed, output->schema, size);
959 
960 		if (error != 0)
961 			goto fail;
962 
963 		output->error = 0;
964 	} else
965 		/*
966 		 * If we return an error then the ioctl code won't copyout
967 		 * output back to userland, so we flag the error in the struct
968 		 * instead.
969 		 */
970 		output->error = EMSGSIZE;
971 
972 	error = 0;
973 
974 fail:
975 	free(packed, M_NVLIST);
976 
977 	return (error);
978 }
979 
980 static int
981 pci_iov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
982     struct thread *td)
983 {
984 
985 	switch (cmd) {
986 	case IOV_CONFIG:
987 		return (pci_iov_config(dev, (struct pci_iov_arg *)data));
988 	case IOV_DELETE:
989 		return (pci_iov_delete(dev));
990 	case IOV_GET_SCHEMA:
991 		return (pci_iov_get_schema_ioctl(dev,
992 		    (struct pci_iov_schema *)data));
993 	default:
994 		return (EINVAL);
995 	}
996 }
997 
998 struct resource *
999 pci_vf_alloc_mem_resource(device_t dev, device_t child, int *rid,
1000     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1001 {
1002 	struct pci_devinfo *dinfo;
1003 	struct pcicfg_iov *iov;
1004 	struct pci_map *map;
1005 	struct resource *res;
1006 	struct resource_list_entry *rle;
1007 	rman_res_t bar_start, bar_end;
1008 	pci_addr_t bar_length;
1009 	int error;
1010 
1011 	dinfo = device_get_ivars(child);
1012 	iov = dinfo->cfg.iov;
1013 
1014 	map = pci_find_bar(child, *rid);
1015 	if (map == NULL)
1016 		return (NULL);
1017 
1018 	bar_length = 1 << map->pm_size;
1019 	bar_start = map->pm_value;
1020 	bar_end = bar_start + bar_length - 1;
1021 
1022 	/* Make sure that the resource fits the constraints. */
1023 	if (bar_start >= end || bar_end <= bar_start || count != 1)
1024 		return (NULL);
1025 
1026 	/* Clamp the resource to the constraints if necessary. */
1027 	if (bar_start < start)
1028 		bar_start = start;
1029 	if (bar_end > end)
1030 		bar_end = end;
1031 	bar_length = bar_end - bar_start + 1;
1032 
1033 	res = rman_reserve_resource(&iov->rman, bar_start, bar_end,
1034 	    bar_length, flags, child);
1035 	if (res == NULL)
1036 		return (NULL);
1037 
1038 	rle = resource_list_add(&dinfo->resources, SYS_RES_MEMORY, *rid,
1039 	    bar_start, bar_end, 1);
1040 	if (rle == NULL) {
1041 		rman_release_resource(res);
1042 		return (NULL);
1043 	}
1044 
1045 	rman_set_rid(res, *rid);
1046 
1047 	if (flags & RF_ACTIVE) {
1048 		error = bus_activate_resource(child, SYS_RES_MEMORY, *rid, res);
1049 		if (error != 0) {
1050 			resource_list_delete(&dinfo->resources, SYS_RES_MEMORY,
1051 			    *rid);
1052 			rman_release_resource(res);
1053 			return (NULL);
1054 		}
1055 	}
1056 	rle->res = res;
1057 
1058 	return (res);
1059 }
1060 
1061 int
1062 pci_vf_release_mem_resource(device_t dev, device_t child, int rid,
1063     struct resource *r)
1064 {
1065 	struct pci_devinfo *dinfo;
1066 	struct resource_list_entry *rle;
1067 	int error;
1068 
1069 	dinfo = device_get_ivars(child);
1070 
1071 	if (rman_get_flags(r) & RF_ACTIVE) {
1072 		error = bus_deactivate_resource(child, SYS_RES_MEMORY, rid, r);
1073 		if (error != 0)
1074 			return (error);
1075 	}
1076 
1077 	rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, rid);
1078 	if (rle != NULL) {
1079 		rle->res = NULL;
1080 		resource_list_delete(&dinfo->resources, SYS_RES_MEMORY,
1081 		    rid);
1082 	}
1083 
1084 	return (rman_release_resource(r));
1085 }
1086