1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support routines for initializing a PCI subsystem
4 *
5 * Extruded from code written by
6 * Dave Rusling (david.rusling@reo.mts.dec.com)
7 * David Mosberger (davidm@cs.arizona.edu)
8 * David Miller (davem@redhat.com)
9 *
10 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
11 * PCI-PCI bridges cleanup, sorted resource allocation.
12 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
13 * Converted to allocation in 3 passes, which gives
14 * tighter packing. Prefetchable range support.
15 */
16
17 #include <linux/bitops.h>
18 #include <linux/bug.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/cache.h>
26 #include <linux/limits.h>
27 #include <linux/sizes.h>
28 #include <linux/slab.h>
29 #include <linux/acpi.h>
30 #include "pci.h"
31
32 #define PCI_RES_TYPE_MASK \
33 (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
34 IORESOURCE_MEM_64)
35
36 unsigned int pci_flags;
37 EXPORT_SYMBOL_GPL(pci_flags);
38
39 struct pci_dev_resource {
40 struct list_head list;
41 struct resource *res;
42 struct pci_dev *dev;
43 resource_size_t start;
44 resource_size_t end;
45 resource_size_t add_size;
46 resource_size_t min_align;
47 unsigned long flags;
48 };
49
free_list(struct list_head * head)50 static void free_list(struct list_head *head)
51 {
52 struct pci_dev_resource *dev_res, *tmp;
53
54 list_for_each_entry_safe(dev_res, tmp, head, list) {
55 list_del(&dev_res->list);
56 kfree(dev_res);
57 }
58 }
59
60 /**
61 * add_to_list() - Add a new resource tracker to the list
62 * @head: Head of the list
63 * @dev: Device to which the resource belongs
64 * @res: Resource to be tracked
65 * @add_size: Additional size to be optionally added to the resource
66 * @min_align: Minimum memory window alignment
67 */
add_to_list(struct list_head * head,struct pci_dev * dev,struct resource * res,resource_size_t add_size,resource_size_t min_align)68 static int add_to_list(struct list_head *head, struct pci_dev *dev,
69 struct resource *res, resource_size_t add_size,
70 resource_size_t min_align)
71 {
72 struct pci_dev_resource *tmp;
73
74 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
75 if (!tmp)
76 return -ENOMEM;
77
78 tmp->res = res;
79 tmp->dev = dev;
80 tmp->start = res->start;
81 tmp->end = res->end;
82 tmp->flags = res->flags;
83 tmp->add_size = add_size;
84 tmp->min_align = min_align;
85
86 list_add(&tmp->list, head);
87
88 return 0;
89 }
90
remove_from_list(struct list_head * head,struct resource * res)91 static void remove_from_list(struct list_head *head, struct resource *res)
92 {
93 struct pci_dev_resource *dev_res, *tmp;
94
95 list_for_each_entry_safe(dev_res, tmp, head, list) {
96 if (dev_res->res == res) {
97 list_del(&dev_res->list);
98 kfree(dev_res);
99 break;
100 }
101 }
102 }
103
res_to_dev_res(struct list_head * head,struct resource * res)104 static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
105 struct resource *res)
106 {
107 struct pci_dev_resource *dev_res;
108
109 list_for_each_entry(dev_res, head, list) {
110 if (dev_res->res == res)
111 return dev_res;
112 }
113
114 return NULL;
115 }
116
get_res_add_size(struct list_head * head,struct resource * res)117 static resource_size_t get_res_add_size(struct list_head *head,
118 struct resource *res)
119 {
120 struct pci_dev_resource *dev_res;
121
122 dev_res = res_to_dev_res(head, res);
123 return dev_res ? dev_res->add_size : 0;
124 }
125
get_res_add_align(struct list_head * head,struct resource * res)126 static resource_size_t get_res_add_align(struct list_head *head,
127 struct resource *res)
128 {
129 struct pci_dev_resource *dev_res;
130
131 dev_res = res_to_dev_res(head, res);
132 return dev_res ? dev_res->min_align : 0;
133 }
134
restore_dev_resource(struct pci_dev_resource * dev_res)135 static void restore_dev_resource(struct pci_dev_resource *dev_res)
136 {
137 struct resource *res = dev_res->res;
138
139 if (WARN_ON_ONCE(res->parent))
140 return;
141
142 res->start = dev_res->start;
143 res->end = dev_res->end;
144 res->flags = dev_res->flags;
145 }
146
147 /*
148 * Helper function for sizing routines. Assigned resources have non-NULL
149 * parent resource.
150 *
151 * Return first unassigned resource of the correct type. If there is none,
152 * return first assigned resource of the correct type. If none of the
153 * above, return NULL.
154 *
155 * Returning an assigned resource of the correct type allows the caller to
156 * distinguish between already assigned and no resource of the correct type.
157 */
find_bus_resource_of_type(struct pci_bus * bus,unsigned long type_mask,unsigned long type)158 static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
159 unsigned long type_mask,
160 unsigned long type)
161 {
162 struct resource *r, *r_assigned = NULL;
163
164 pci_bus_for_each_resource(bus, r) {
165 if (!r || r == &ioport_resource || r == &iomem_resource)
166 continue;
167
168 if ((r->flags & type_mask) != type)
169 continue;
170
171 if (!r->parent)
172 return r;
173 if (!r_assigned)
174 r_assigned = r;
175 }
176 return r_assigned;
177 }
178
179 /**
180 * pbus_select_window_for_type - Select bridge window for a resource type
181 * @bus: PCI bus
182 * @type: Resource type (resource flags can be passed as is)
183 *
184 * Select the bridge window based on a resource @type.
185 *
186 * For memory resources, the selection is done as follows:
187 *
188 * Any non-prefetchable resource is put into the non-prefetchable window.
189 *
190 * If there is no prefetchable MMIO window, put all memory resources into the
191 * non-prefetchable window.
192 *
193 * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
194 * resources into it and place 32-bit prefetchable memory into the
195 * non-prefetchable window.
196 *
197 * Otherwise, put all prefetchable resources into the prefetchable window.
198 *
199 * Return: the bridge window resource or NULL if no bridge window is found.
200 */
pbus_select_window_for_type(struct pci_bus * bus,unsigned long type)201 static struct resource *pbus_select_window_for_type(struct pci_bus *bus,
202 unsigned long type)
203 {
204 int iores_type = type & IORESOURCE_TYPE_BITS; /* w/o 64bit & pref */
205 struct resource *mmio, *mmio_pref, *win;
206
207 type &= PCI_RES_TYPE_MASK; /* with 64bit & pref */
208
209 if ((iores_type != IORESOURCE_IO) && (iores_type != IORESOURCE_MEM))
210 return NULL;
211
212 if (pci_is_root_bus(bus)) {
213 win = find_bus_resource_of_type(bus, type, type);
214 if (win)
215 return win;
216
217 type &= ~IORESOURCE_MEM_64;
218 win = find_bus_resource_of_type(bus, type, type);
219 if (win)
220 return win;
221
222 type &= ~IORESOURCE_PREFETCH;
223 return find_bus_resource_of_type(bus, type, type);
224 }
225
226 switch (iores_type) {
227 case IORESOURCE_IO:
228 return pci_bus_resource_n(bus, PCI_BUS_BRIDGE_IO_WINDOW);
229
230 case IORESOURCE_MEM:
231 mmio = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_MEM_WINDOW);
232 mmio_pref = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_PREF_MEM_WINDOW);
233
234 if (!(type & IORESOURCE_PREFETCH) ||
235 !(mmio_pref->flags & IORESOURCE_MEM))
236 return mmio;
237
238 if ((type & IORESOURCE_MEM_64) ||
239 !(mmio_pref->flags & IORESOURCE_MEM_64))
240 return mmio_pref;
241
242 return mmio;
243 default:
244 return NULL;
245 }
246 }
247
248 /**
249 * pbus_select_window - Select bridge window for a resource
250 * @bus: PCI bus
251 * @res: Resource
252 *
253 * Select the bridge window for @res. If the resource is already assigned,
254 * return the current bridge window.
255 *
256 * For memory resources, the selection is done as follows:
257 *
258 * Any non-prefetchable resource is put into the non-prefetchable window.
259 *
260 * If there is no prefetchable MMIO window, put all memory resources into the
261 * non-prefetchable window.
262 *
263 * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
264 * resources into it and place 32-bit prefetchable memory into the
265 * non-prefetchable window.
266 *
267 * Otherwise, put all prefetchable resources into the prefetchable window.
268 *
269 * Return: the bridge window resource or NULL if no bridge window is found.
270 */
pbus_select_window(struct pci_bus * bus,const struct resource * res)271 struct resource *pbus_select_window(struct pci_bus *bus,
272 const struct resource *res)
273 {
274 if (res->parent)
275 return res->parent;
276
277 return pbus_select_window_for_type(bus, res->flags);
278 }
279
pdev_resources_assignable(struct pci_dev * dev)280 static bool pdev_resources_assignable(struct pci_dev *dev)
281 {
282 u16 class = dev->class >> 8, command;
283
284 /* Don't touch classless devices or host bridges or IOAPICs */
285 if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
286 return false;
287
288 /* Don't touch IOAPIC devices already enabled by firmware */
289 if (class == PCI_CLASS_SYSTEM_PIC) {
290 pci_read_config_word(dev, PCI_COMMAND, &command);
291 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
292 return false;
293 }
294
295 return true;
296 }
297
pdev_resource_assignable(struct pci_dev * dev,struct resource * res)298 static bool pdev_resource_assignable(struct pci_dev *dev, struct resource *res)
299 {
300 int idx = pci_resource_num(dev, res);
301
302 if (!res->flags)
303 return false;
304
305 if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END &&
306 res->flags & IORESOURCE_DISABLED)
307 return false;
308
309 return true;
310 }
311
pdev_resource_should_fit(struct pci_dev * dev,struct resource * res)312 static bool pdev_resource_should_fit(struct pci_dev *dev, struct resource *res)
313 {
314 if (res->parent)
315 return false;
316
317 if (res->flags & IORESOURCE_PCI_FIXED)
318 return false;
319
320 return pdev_resource_assignable(dev, res);
321 }
322
323 /* Sort resources by alignment */
pdev_sort_resources(struct pci_dev * dev,struct list_head * head)324 static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
325 {
326 struct resource *r;
327 int i;
328
329 if (!pdev_resources_assignable(dev))
330 return;
331
332 pci_dev_for_each_resource(dev, r, i) {
333 const char *r_name = pci_resource_name(dev, i);
334 struct pci_dev_resource *dev_res, *tmp;
335 resource_size_t r_align;
336 struct list_head *n;
337
338 if (!pdev_resource_should_fit(dev, r))
339 continue;
340
341 r_align = pci_resource_alignment(dev, r);
342 if (!r_align) {
343 pci_warn(dev, "%s %pR: alignment must not be zero\n",
344 r_name, r);
345 continue;
346 }
347
348 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
349 if (!tmp)
350 panic("%s: kzalloc() failed!\n", __func__);
351 tmp->res = r;
352 tmp->dev = dev;
353 tmp->start = r->start;
354 tmp->end = r->end;
355 tmp->flags = r->flags;
356
357 /* Fallback is smallest one or list is empty */
358 n = head;
359 list_for_each_entry(dev_res, head, list) {
360 resource_size_t align;
361
362 align = pci_resource_alignment(dev_res->dev,
363 dev_res->res);
364
365 if (r_align > align) {
366 n = &dev_res->list;
367 break;
368 }
369 }
370 /* Insert it just before n */
371 list_add_tail(&tmp->list, n);
372 }
373 }
374
pci_resource_is_optional(const struct pci_dev * dev,int resno)375 bool pci_resource_is_optional(const struct pci_dev *dev, int resno)
376 {
377 const struct resource *res = pci_resource_n(dev, resno);
378
379 if (pci_resource_is_iov(resno))
380 return true;
381 if (resno == PCI_ROM_RESOURCE && !(res->flags & IORESOURCE_ROM_ENABLE))
382 return true;
383
384 return false;
385 }
386
reset_resource(struct pci_dev * dev,struct resource * res)387 static inline void reset_resource(struct pci_dev *dev, struct resource *res)
388 {
389 int idx = pci_resource_num(dev, res);
390
391 if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END) {
392 res->flags |= IORESOURCE_UNSET;
393 return;
394 }
395
396 res->start = 0;
397 res->end = 0;
398 res->flags = 0;
399 }
400
401 /**
402 * reassign_resources_sorted() - Satisfy any additional resource requests
403 *
404 * @realloc_head: Head of the list tracking requests requiring
405 * additional resources
406 * @head: Head of the list tracking requests with allocated
407 * resources
408 *
409 * Walk through each element of the realloc_head and try to procure additional
410 * resources for the element, provided the element is in the head list.
411 */
reassign_resources_sorted(struct list_head * realloc_head,struct list_head * head)412 static void reassign_resources_sorted(struct list_head *realloc_head,
413 struct list_head *head)
414 {
415 struct pci_dev_resource *add_res, *tmp;
416 struct pci_dev_resource *dev_res;
417 struct pci_dev *dev;
418 struct resource *res;
419 const char *res_name;
420 resource_size_t add_size, align;
421 int idx;
422
423 list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
424 bool found_match = false;
425
426 res = add_res->res;
427 dev = add_res->dev;
428 idx = pci_resource_num(dev, res);
429
430 /*
431 * Skip resource that failed the earlier assignment and is
432 * not optional as it would just fail again.
433 */
434 if (!res->parent && resource_size(res) &&
435 !pci_resource_is_optional(dev, idx))
436 goto out;
437
438 /* Skip this resource if not found in head list */
439 list_for_each_entry(dev_res, head, list) {
440 if (dev_res->res == res) {
441 found_match = true;
442 break;
443 }
444 }
445 if (!found_match) /* Just skip */
446 continue;
447
448 res_name = pci_resource_name(dev, idx);
449 add_size = add_res->add_size;
450 align = add_res->min_align;
451 if (!res->parent) {
452 resource_set_range(res, align,
453 resource_size(res) + add_size);
454 if (pci_assign_resource(dev, idx)) {
455 pci_dbg(dev,
456 "%s %pR: ignoring failure in optional allocation\n",
457 res_name, res);
458 }
459 } else if (add_size > 0) {
460 res->flags |= add_res->flags &
461 (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
462 if (pci_reassign_resource(dev, idx, add_size, align))
463 pci_info(dev, "%s %pR: failed to add optional %llx\n",
464 res_name, res,
465 (unsigned long long) add_size);
466 }
467 out:
468 list_del(&add_res->list);
469 kfree(add_res);
470 }
471 }
472
473 /**
474 * assign_requested_resources_sorted() - Satisfy resource requests
475 *
476 * @head: Head of the list tracking requests for resources
477 * @fail_head: Head of the list tracking requests that could not be
478 * allocated
479 * @optional: Assign also optional resources
480 *
481 * Satisfy resource requests of each element in the list. Add requests that
482 * could not be satisfied to the failed_list.
483 */
assign_requested_resources_sorted(struct list_head * head,struct list_head * fail_head,bool optional)484 static void assign_requested_resources_sorted(struct list_head *head,
485 struct list_head *fail_head,
486 bool optional)
487 {
488 struct pci_dev_resource *dev_res;
489 struct resource *res;
490 struct pci_dev *dev;
491 bool optional_res;
492 int idx;
493
494 list_for_each_entry(dev_res, head, list) {
495 res = dev_res->res;
496 dev = dev_res->dev;
497 idx = pci_resource_num(dev, res);
498 optional_res = pci_resource_is_optional(dev, idx);
499
500 if (!resource_size(res))
501 continue;
502
503 if (!optional && optional_res)
504 continue;
505
506 if (pci_assign_resource(dev, idx)) {
507 if (fail_head) {
508 add_to_list(fail_head, dev, res,
509 0 /* don't care */,
510 0 /* don't care */);
511 }
512 }
513 }
514 }
515
pci_fail_res_type_mask(struct list_head * fail_head)516 static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
517 {
518 struct pci_dev_resource *fail_res;
519 unsigned long mask = 0;
520
521 /* Check failed type */
522 list_for_each_entry(fail_res, fail_head, list)
523 mask |= fail_res->flags;
524
525 /*
526 * One pref failed resource will set IORESOURCE_MEM, as we can
527 * allocate pref in non-pref range. Will release all assigned
528 * non-pref sibling resources according to that bit.
529 */
530 return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
531 }
532
pci_need_to_release(unsigned long mask,struct resource * res)533 static bool pci_need_to_release(unsigned long mask, struct resource *res)
534 {
535 if (res->flags & IORESOURCE_IO)
536 return !!(mask & IORESOURCE_IO);
537
538 /* Check pref at first */
539 if (res->flags & IORESOURCE_PREFETCH) {
540 if (mask & IORESOURCE_PREFETCH)
541 return true;
542 /* Count pref if its parent is non-pref */
543 else if ((mask & IORESOURCE_MEM) &&
544 !(res->parent->flags & IORESOURCE_PREFETCH))
545 return true;
546 else
547 return false;
548 }
549
550 if (res->flags & IORESOURCE_MEM)
551 return !!(mask & IORESOURCE_MEM);
552
553 return false; /* Should not get here */
554 }
555
556 /* Return: @true if assignment of a required resource failed. */
pci_required_resource_failed(struct list_head * fail_head,unsigned long type)557 static bool pci_required_resource_failed(struct list_head *fail_head,
558 unsigned long type)
559 {
560 struct pci_dev_resource *fail_res;
561
562 type &= PCI_RES_TYPE_MASK;
563
564 list_for_each_entry(fail_res, fail_head, list) {
565 int idx = pci_resource_num(fail_res->dev, fail_res->res);
566
567 if (type && (fail_res->flags & PCI_RES_TYPE_MASK) != type)
568 continue;
569
570 if (!pci_resource_is_optional(fail_res->dev, idx))
571 return true;
572 }
573 return false;
574 }
575
__assign_resources_sorted(struct list_head * head,struct list_head * realloc_head,struct list_head * fail_head)576 static void __assign_resources_sorted(struct list_head *head,
577 struct list_head *realloc_head,
578 struct list_head *fail_head)
579 {
580 /*
581 * Should not assign requested resources at first. They could be
582 * adjacent, so later reassign can not reallocate them one by one in
583 * parent resource window.
584 *
585 * Try to assign required and any optional resources at beginning
586 * (add_size included). If all required resources were successfully
587 * assigned, get out early. If could not do that, we still try to
588 * assign required at first, then try to reassign some optional
589 * resources.
590 *
591 * Separate three resource type checking if we need to release
592 * assigned resource after requested + add_size try.
593 *
594 * 1. If IO port assignment fails, will release assigned IO
595 * port.
596 * 2. If pref MMIO assignment fails, release assigned pref
597 * MMIO. If assigned pref MMIO's parent is non-pref MMIO
598 * and non-pref MMIO assignment fails, will release that
599 * assigned pref MMIO.
600 * 3. If non-pref MMIO assignment fails or pref MMIO
601 * assignment fails, will release assigned non-pref MMIO.
602 */
603 LIST_HEAD(save_head);
604 LIST_HEAD(local_fail_head);
605 LIST_HEAD(dummy_head);
606 struct pci_dev_resource *save_res;
607 struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
608 struct resource *res;
609 struct pci_dev *dev;
610 unsigned long fail_type;
611 resource_size_t add_align, align;
612
613 if (!realloc_head)
614 realloc_head = &dummy_head;
615
616 /* Check if optional add_size is there */
617 if (list_empty(realloc_head))
618 goto assign;
619
620 /* Save original start, end, flags etc at first */
621 list_for_each_entry(dev_res, head, list) {
622 if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
623 free_list(&save_head);
624 goto assign;
625 }
626 }
627
628 /* Update res in head list with add_size in realloc_head list */
629 list_for_each_entry_safe(dev_res, tmp_res, head, list) {
630 res = dev_res->res;
631
632 res->end += get_res_add_size(realloc_head, res);
633
634 /*
635 * There are two kinds of additional resources in the list:
636 * 1. bridge resource -- IORESOURCE_STARTALIGN
637 * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
638 * Here just fix the additional alignment for bridge
639 */
640 if (!(res->flags & IORESOURCE_STARTALIGN))
641 continue;
642
643 add_align = get_res_add_align(realloc_head, res);
644
645 /*
646 * The "head" list is sorted by alignment so resources with
647 * bigger alignment will be assigned first. After we
648 * change the alignment of a dev_res in "head" list, we
649 * need to reorder the list by alignment to make it
650 * consistent.
651 */
652 if (add_align > res->start) {
653 resource_set_range(res, add_align, resource_size(res));
654
655 list_for_each_entry(dev_res2, head, list) {
656 align = pci_resource_alignment(dev_res2->dev,
657 dev_res2->res);
658 if (add_align > align) {
659 list_move_tail(&dev_res->list,
660 &dev_res2->list);
661 break;
662 }
663 }
664 }
665
666 }
667
668 assign:
669 assign_requested_resources_sorted(head, &local_fail_head, true);
670
671 /* All non-optional resources assigned? */
672 if (list_empty(&local_fail_head)) {
673 /* Remove head list from realloc_head list */
674 list_for_each_entry(dev_res, head, list)
675 remove_from_list(realloc_head, dev_res->res);
676 free_list(&save_head);
677 goto out;
678 }
679
680 /* Without realloc_head and only optional fails, nothing more to do. */
681 if (!pci_required_resource_failed(&local_fail_head, 0) &&
682 list_empty(realloc_head)) {
683 list_for_each_entry(save_res, &save_head, list) {
684 struct resource *res = save_res->res;
685
686 if (res->parent)
687 continue;
688
689 restore_dev_resource(save_res);
690 }
691 free_list(&local_fail_head);
692 free_list(&save_head);
693 goto out;
694 }
695
696 /* Check failed type */
697 fail_type = pci_fail_res_type_mask(&local_fail_head);
698 /* Remove not need to be released assigned res from head list etc */
699 list_for_each_entry_safe(dev_res, tmp_res, head, list) {
700 res = dev_res->res;
701
702 if (res->parent && !pci_need_to_release(fail_type, res)) {
703 /* Remove it from realloc_head list */
704 remove_from_list(realloc_head, res);
705 remove_from_list(&save_head, res);
706 list_del(&dev_res->list);
707 kfree(dev_res);
708 }
709 }
710
711 free_list(&local_fail_head);
712 /* Release assigned resource */
713 list_for_each_entry(dev_res, head, list) {
714 res = dev_res->res;
715 dev = dev_res->dev;
716
717 pci_release_resource(dev, pci_resource_num(dev, res));
718 restore_dev_resource(dev_res);
719 }
720 /* Restore start/end/flags from saved list */
721 list_for_each_entry(save_res, &save_head, list)
722 restore_dev_resource(save_res);
723 free_list(&save_head);
724
725 /* Satisfy the must-have resource requests */
726 assign_requested_resources_sorted(head, NULL, false);
727
728 /* Try to satisfy any additional optional resource requests */
729 if (!list_empty(realloc_head))
730 reassign_resources_sorted(realloc_head, head);
731
732 out:
733 /* Reset any failed resource, cannot use fail_head as it can be NULL. */
734 list_for_each_entry(dev_res, head, list) {
735 res = dev_res->res;
736 dev = dev_res->dev;
737
738 if (res->parent)
739 continue;
740
741 if (fail_head) {
742 add_to_list(fail_head, dev, res,
743 0 /* don't care */,
744 0 /* don't care */);
745 }
746
747 reset_resource(dev, res);
748 }
749
750 free_list(head);
751 }
752
pdev_assign_resources_sorted(struct pci_dev * dev,struct list_head * add_head,struct list_head * fail_head)753 static void pdev_assign_resources_sorted(struct pci_dev *dev,
754 struct list_head *add_head,
755 struct list_head *fail_head)
756 {
757 LIST_HEAD(head);
758
759 pdev_sort_resources(dev, &head);
760 __assign_resources_sorted(&head, add_head, fail_head);
761
762 }
763
pbus_assign_resources_sorted(const struct pci_bus * bus,struct list_head * realloc_head,struct list_head * fail_head)764 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
765 struct list_head *realloc_head,
766 struct list_head *fail_head)
767 {
768 struct pci_dev *dev;
769 LIST_HEAD(head);
770
771 list_for_each_entry(dev, &bus->devices, bus_list)
772 pdev_sort_resources(dev, &head);
773
774 __assign_resources_sorted(&head, realloc_head, fail_head);
775 }
776
pci_setup_cardbus(struct pci_bus * bus)777 void pci_setup_cardbus(struct pci_bus *bus)
778 {
779 struct pci_dev *bridge = bus->self;
780 struct resource *res;
781 struct pci_bus_region region;
782
783 pci_info(bridge, "CardBus bridge to %pR\n",
784 &bus->busn_res);
785
786 res = bus->resource[0];
787 pcibios_resource_to_bus(bridge->bus, ®ion, res);
788 if (res->parent && res->flags & IORESOURCE_IO) {
789 /*
790 * The IO resource is allocated a range twice as large as it
791 * would normally need. This allows us to set both IO regs.
792 */
793 pci_info(bridge, " bridge window %pR\n", res);
794 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
795 region.start);
796 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
797 region.end);
798 }
799
800 res = bus->resource[1];
801 pcibios_resource_to_bus(bridge->bus, ®ion, res);
802 if (res->parent && res->flags & IORESOURCE_IO) {
803 pci_info(bridge, " bridge window %pR\n", res);
804 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
805 region.start);
806 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
807 region.end);
808 }
809
810 res = bus->resource[2];
811 pcibios_resource_to_bus(bridge->bus, ®ion, res);
812 if (res->parent && res->flags & IORESOURCE_MEM) {
813 pci_info(bridge, " bridge window %pR\n", res);
814 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
815 region.start);
816 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
817 region.end);
818 }
819
820 res = bus->resource[3];
821 pcibios_resource_to_bus(bridge->bus, ®ion, res);
822 if (res->parent && res->flags & IORESOURCE_MEM) {
823 pci_info(bridge, " bridge window %pR\n", res);
824 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
825 region.start);
826 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
827 region.end);
828 }
829 }
830 EXPORT_SYMBOL(pci_setup_cardbus);
831
832 /*
833 * Initialize bridges with base/limit values we have collected. PCI-to-PCI
834 * Bridge Architecture Specification rev. 1.1 (1998) requires that if there
835 * are no I/O ports or memory behind the bridge, the corresponding range
836 * must be turned off by writing base value greater than limit to the
837 * bridge's base/limit registers.
838 *
839 * Note: care must be taken when updating I/O base/limit registers of
840 * bridges which support 32-bit I/O. This update requires two config space
841 * writes, so it's quite possible that an I/O window of the bridge will
842 * have some undesirable address (e.g. 0) after the first write. Ditto
843 * 64-bit prefetchable MMIO.
844 */
pci_setup_bridge_io(struct pci_dev * bridge)845 static void pci_setup_bridge_io(struct pci_dev *bridge)
846 {
847 struct resource *res;
848 const char *res_name;
849 struct pci_bus_region region;
850 unsigned long io_mask;
851 u8 io_base_lo, io_limit_lo;
852 u16 l;
853 u32 io_upper16;
854
855 io_mask = PCI_IO_RANGE_MASK;
856 if (bridge->io_window_1k)
857 io_mask = PCI_IO_1K_RANGE_MASK;
858
859 /* Set up the top and bottom of the PCI I/O segment for this bus */
860 res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
861 res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
862 pcibios_resource_to_bus(bridge->bus, ®ion, res);
863 if (res->parent && res->flags & IORESOURCE_IO) {
864 pci_read_config_word(bridge, PCI_IO_BASE, &l);
865 io_base_lo = (region.start >> 8) & io_mask;
866 io_limit_lo = (region.end >> 8) & io_mask;
867 l = ((u16) io_limit_lo << 8) | io_base_lo;
868 /* Set up upper 16 bits of I/O base/limit */
869 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
870 pci_info(bridge, " %s %pR\n", res_name, res);
871 } else {
872 /* Clear upper 16 bits of I/O base/limit */
873 io_upper16 = 0;
874 l = 0x00f0;
875 }
876 /* Temporarily disable the I/O range before updating PCI_IO_BASE */
877 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
878 /* Update lower 16 bits of I/O base/limit */
879 pci_write_config_word(bridge, PCI_IO_BASE, l);
880 /* Update upper 16 bits of I/O base/limit */
881 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
882 }
883
pci_setup_bridge_mmio(struct pci_dev * bridge)884 static void pci_setup_bridge_mmio(struct pci_dev *bridge)
885 {
886 struct resource *res;
887 const char *res_name;
888 struct pci_bus_region region;
889 u32 l;
890
891 /* Set up the top and bottom of the PCI Memory segment for this bus */
892 res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
893 res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
894 pcibios_resource_to_bus(bridge->bus, ®ion, res);
895 if (res->parent && res->flags & IORESOURCE_MEM) {
896 l = (region.start >> 16) & 0xfff0;
897 l |= region.end & 0xfff00000;
898 pci_info(bridge, " %s %pR\n", res_name, res);
899 } else {
900 l = 0x0000fff0;
901 }
902 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
903 }
904
pci_setup_bridge_mmio_pref(struct pci_dev * bridge)905 static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
906 {
907 struct resource *res;
908 const char *res_name;
909 struct pci_bus_region region;
910 u32 l, bu, lu;
911
912 /*
913 * Clear out the upper 32 bits of PREF limit. If
914 * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables
915 * PREF range, which is ok.
916 */
917 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
918
919 /* Set up PREF base/limit */
920 bu = lu = 0;
921 res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
922 res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
923 pcibios_resource_to_bus(bridge->bus, ®ion, res);
924 if (res->parent && res->flags & IORESOURCE_PREFETCH) {
925 l = (region.start >> 16) & 0xfff0;
926 l |= region.end & 0xfff00000;
927 if (res->flags & IORESOURCE_MEM_64) {
928 bu = upper_32_bits(region.start);
929 lu = upper_32_bits(region.end);
930 }
931 pci_info(bridge, " %s %pR\n", res_name, res);
932 } else {
933 l = 0x0000fff0;
934 }
935 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
936
937 /* Set the upper 32 bits of PREF base & limit */
938 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
939 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
940 }
941
__pci_setup_bridge(struct pci_bus * bus,unsigned long type)942 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
943 {
944 struct pci_dev *bridge = bus->self;
945
946 pci_info(bridge, "PCI bridge to %pR\n", &bus->busn_res);
947
948 if (type & IORESOURCE_IO)
949 pci_setup_bridge_io(bridge);
950
951 if (type & IORESOURCE_MEM)
952 pci_setup_bridge_mmio(bridge);
953
954 if (type & IORESOURCE_PREFETCH)
955 pci_setup_bridge_mmio_pref(bridge);
956
957 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
958 }
959
pci_setup_one_bridge_window(struct pci_dev * bridge,int resno)960 static void pci_setup_one_bridge_window(struct pci_dev *bridge, int resno)
961 {
962 switch (resno) {
963 case PCI_BRIDGE_IO_WINDOW:
964 pci_setup_bridge_io(bridge);
965 break;
966 case PCI_BRIDGE_MEM_WINDOW:
967 pci_setup_bridge_mmio(bridge);
968 break;
969 case PCI_BRIDGE_PREF_MEM_WINDOW:
970 pci_setup_bridge_mmio_pref(bridge);
971 break;
972 default:
973 return;
974 }
975 }
976
pcibios_setup_bridge(struct pci_bus * bus,unsigned long type)977 void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
978 {
979 }
980
pci_setup_bridge(struct pci_bus * bus)981 static void pci_setup_bridge(struct pci_bus *bus)
982 {
983 unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
984 IORESOURCE_PREFETCH;
985
986 pcibios_setup_bridge(bus, type);
987 __pci_setup_bridge(bus, type);
988 }
989
990
pci_claim_bridge_resource(struct pci_dev * bridge,int i)991 int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
992 {
993 int ret = -EINVAL;
994
995 if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
996 return 0;
997
998 if (pci_claim_resource(bridge, i) == 0)
999 return 0; /* Claimed the window */
1000
1001 if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1002 return 0;
1003
1004 if (i > PCI_BRIDGE_PREF_MEM_WINDOW)
1005 return -EINVAL;
1006
1007 /* Try to clip the resource and claim the smaller window */
1008 if (pci_bus_clip_resource(bridge, i))
1009 ret = pci_claim_resource(bridge, i);
1010
1011 pci_setup_one_bridge_window(bridge, i);
1012
1013 return ret;
1014 }
1015
1016 /*
1017 * Check whether the bridge supports optional I/O and prefetchable memory
1018 * ranges. If not, the respective base/limit registers must be read-only
1019 * and read as 0.
1020 */
pci_bridge_check_ranges(struct pci_bus * bus)1021 static void pci_bridge_check_ranges(struct pci_bus *bus)
1022 {
1023 struct pci_dev *bridge = bus->self;
1024 struct resource *b_res;
1025
1026 b_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
1027 b_res->flags |= IORESOURCE_MEM;
1028
1029 if (bridge->io_window) {
1030 b_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
1031 b_res->flags |= IORESOURCE_IO;
1032 }
1033
1034 if (bridge->pref_window) {
1035 b_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1036 b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
1037 if (bridge->pref_64_window) {
1038 b_res->flags |= IORESOURCE_MEM_64 |
1039 PCI_PREF_RANGE_TYPE_64;
1040 }
1041 }
1042 }
1043
calculate_iosize(resource_size_t size,resource_size_t min_size,resource_size_t size1,resource_size_t add_size,resource_size_t children_add_size,resource_size_t old_size,resource_size_t align)1044 static resource_size_t calculate_iosize(resource_size_t size,
1045 resource_size_t min_size,
1046 resource_size_t size1,
1047 resource_size_t add_size,
1048 resource_size_t children_add_size,
1049 resource_size_t old_size,
1050 resource_size_t align)
1051 {
1052 if (size < min_size)
1053 size = min_size;
1054 if (old_size == 1)
1055 old_size = 0;
1056 /*
1057 * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the
1058 * struct pci_bus.
1059 */
1060 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
1061 size = (size & 0xff) + ((size & ~0xffUL) << 2);
1062 #endif
1063 size = size + size1;
1064
1065 size = max(size, add_size) + children_add_size;
1066 return ALIGN(max(size, old_size), align);
1067 }
1068
calculate_memsize(resource_size_t size,resource_size_t min_size,resource_size_t add_size,resource_size_t children_add_size,resource_size_t old_size,resource_size_t align)1069 static resource_size_t calculate_memsize(resource_size_t size,
1070 resource_size_t min_size,
1071 resource_size_t add_size,
1072 resource_size_t children_add_size,
1073 resource_size_t old_size,
1074 resource_size_t align)
1075 {
1076 if (size < min_size)
1077 size = min_size;
1078 if (old_size == 1)
1079 old_size = 0;
1080
1081 size = max(size, add_size) + children_add_size;
1082 return ALIGN(max(size, old_size), align);
1083 }
1084
pcibios_window_alignment(struct pci_bus * bus,unsigned long type)1085 resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
1086 unsigned long type)
1087 {
1088 return 1;
1089 }
1090
1091 #define PCI_P2P_DEFAULT_MEM_ALIGN SZ_1M
1092 #define PCI_P2P_DEFAULT_IO_ALIGN SZ_4K
1093 #define PCI_P2P_DEFAULT_IO_ALIGN_1K SZ_1K
1094
window_alignment(struct pci_bus * bus,unsigned long type)1095 static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
1096 {
1097 resource_size_t align = 1, arch_align;
1098
1099 if (type & IORESOURCE_MEM)
1100 align = PCI_P2P_DEFAULT_MEM_ALIGN;
1101 else if (type & IORESOURCE_IO) {
1102 /*
1103 * Per spec, I/O windows are 4K-aligned, but some bridges have
1104 * an extension to support 1K alignment.
1105 */
1106 if (bus->self && bus->self->io_window_1k)
1107 align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
1108 else
1109 align = PCI_P2P_DEFAULT_IO_ALIGN;
1110 }
1111
1112 arch_align = pcibios_window_alignment(bus, type);
1113 return max(align, arch_align);
1114 }
1115
1116 /**
1117 * pbus_size_io() - Size the I/O window of a given bus
1118 *
1119 * @bus: The bus
1120 * @min_size: The minimum I/O window that must be allocated
1121 * @add_size: Additional optional I/O window
1122 * @realloc_head: Track the additional I/O window on this list
1123 *
1124 * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these
1125 * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI
1126 * devices are limited to 256 bytes. We must be careful with the ISA
1127 * aliasing though.
1128 */
pbus_size_io(struct pci_bus * bus,resource_size_t min_size,resource_size_t add_size,struct list_head * realloc_head)1129 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
1130 resource_size_t add_size,
1131 struct list_head *realloc_head)
1132 {
1133 struct pci_dev *dev;
1134 struct resource *b_res = pbus_select_window_for_type(bus, IORESOURCE_IO);
1135 resource_size_t size = 0, size0 = 0, size1 = 0;
1136 resource_size_t children_add_size = 0;
1137 resource_size_t min_align, align;
1138
1139 if (!b_res)
1140 return;
1141
1142 /* If resource is already assigned, nothing more to do */
1143 if (b_res->parent)
1144 return;
1145
1146 min_align = window_alignment(bus, IORESOURCE_IO);
1147 list_for_each_entry(dev, &bus->devices, bus_list) {
1148 struct resource *r;
1149
1150 pci_dev_for_each_resource(dev, r) {
1151 unsigned long r_size;
1152
1153 if (r->parent || !(r->flags & IORESOURCE_IO))
1154 continue;
1155
1156 if (!pdev_resource_assignable(dev, r))
1157 continue;
1158
1159 r_size = resource_size(r);
1160 if (r_size < SZ_1K)
1161 /* Might be re-aligned for ISA */
1162 size += r_size;
1163 else
1164 size1 += r_size;
1165
1166 align = pci_resource_alignment(dev, r);
1167 if (align > min_align)
1168 min_align = align;
1169
1170 if (realloc_head)
1171 children_add_size += get_res_add_size(realloc_head, r);
1172 }
1173 }
1174
1175 size0 = calculate_iosize(size, min_size, size1, 0, 0,
1176 resource_size(b_res), min_align);
1177
1178 if (size0)
1179 b_res->flags &= ~IORESOURCE_DISABLED;
1180
1181 size1 = size0;
1182 if (realloc_head && (add_size > 0 || children_add_size > 0)) {
1183 size1 = calculate_iosize(size, min_size, size1, add_size,
1184 children_add_size, resource_size(b_res),
1185 min_align);
1186 }
1187
1188 if (!size0 && !size1) {
1189 if (bus->self && (b_res->start || b_res->end))
1190 pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
1191 b_res, &bus->busn_res);
1192 b_res->flags |= IORESOURCE_DISABLED;
1193 return;
1194 }
1195
1196 resource_set_range(b_res, min_align, size0);
1197 b_res->flags |= IORESOURCE_STARTALIGN;
1198 if (bus->self && size1 > size0 && realloc_head) {
1199 b_res->flags &= ~IORESOURCE_DISABLED;
1200 add_to_list(realloc_head, bus->self, b_res, size1-size0,
1201 min_align);
1202 pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
1203 b_res, &bus->busn_res,
1204 (unsigned long long) size1 - size0);
1205 }
1206 }
1207
calculate_mem_align(resource_size_t * aligns,int max_order)1208 static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
1209 int max_order)
1210 {
1211 resource_size_t align = 0;
1212 resource_size_t min_align = 0;
1213 int order;
1214
1215 for (order = 0; order <= max_order; order++) {
1216 resource_size_t align1 = 1;
1217
1218 align1 <<= order + __ffs(SZ_1M);
1219
1220 if (!align)
1221 min_align = align1;
1222 else if (ALIGN(align + min_align, min_align) < align1)
1223 min_align = align1 >> 1;
1224 align += aligns[order];
1225 }
1226
1227 return min_align;
1228 }
1229
1230 /**
1231 * pbus_upstream_space_available - Check no upstream resource limits allocation
1232 * @bus: The bus
1233 * @res: The resource to help select the correct bridge window
1234 * @size: The size required from the bridge window
1235 * @align: Required alignment for the resource
1236 *
1237 * Check that @size can fit inside the upstream bridge resources that are
1238 * already assigned. Select the upstream bridge window based on the type of
1239 * @res.
1240 *
1241 * Return: %true if enough space is available on all assigned upstream
1242 * resources.
1243 */
pbus_upstream_space_available(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align)1244 static bool pbus_upstream_space_available(struct pci_bus *bus,
1245 struct resource *res,
1246 resource_size_t size,
1247 resource_size_t align)
1248 {
1249 struct resource_constraint constraint = {
1250 .max = RESOURCE_SIZE_MAX,
1251 .align = align,
1252 };
1253 struct pci_bus *downstream = bus;
1254
1255 while ((bus = bus->parent)) {
1256 if (pci_is_root_bus(bus))
1257 break;
1258
1259 res = pbus_select_window(bus, res);
1260 if (!res)
1261 return false;
1262 if (!res->parent)
1263 continue;
1264
1265 if (resource_size(res) >= size) {
1266 struct resource gap = {};
1267
1268 if (find_resource_space(res, &gap, size, &constraint) == 0) {
1269 gap.flags = res->flags;
1270 pci_dbg(bus->self,
1271 "Assigned bridge window %pR to %pR free space at %pR\n",
1272 res, &bus->busn_res, &gap);
1273 return true;
1274 }
1275 }
1276
1277 if (bus->self) {
1278 pci_info(bus->self,
1279 "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
1280 res, &bus->busn_res,
1281 (unsigned long long)size,
1282 pci_name(downstream->self),
1283 &downstream->busn_res);
1284 }
1285
1286 return false;
1287 }
1288
1289 return true;
1290 }
1291
1292 /**
1293 * pbus_size_mem() - Size the memory window of a given bus
1294 *
1295 * @bus: The bus
1296 * @type: The type of bridge resource
1297 * @min_size: The minimum memory window that must be allocated
1298 * @add_size: Additional optional memory window
1299 * @realloc_head: Track the additional memory window on this list
1300 *
1301 * Calculate the size of the bus resource for @type and minimal alignment
1302 * which guarantees that all child resources fit in this size.
1303 *
1304 * Set the bus resource start/end to indicate the required size if there an
1305 * available unassigned bus resource of the desired @type.
1306 *
1307 * Add optional resource requests to the @realloc_head list if it is
1308 * supplied.
1309 */
pbus_size_mem(struct pci_bus * bus,unsigned long type,resource_size_t min_size,resource_size_t add_size,struct list_head * realloc_head)1310 static void pbus_size_mem(struct pci_bus *bus, unsigned long type,
1311 resource_size_t min_size,
1312 resource_size_t add_size,
1313 struct list_head *realloc_head)
1314 {
1315 struct pci_dev *dev;
1316 resource_size_t min_align, win_align, align, size, size0, size1 = 0;
1317 resource_size_t aligns[28]; /* Alignments from 1MB to 128TB */
1318 int order, max_order;
1319 struct resource *b_res = pbus_select_window_for_type(bus, type);
1320 resource_size_t children_add_size = 0;
1321 resource_size_t children_add_align = 0;
1322 resource_size_t add_align = 0;
1323 resource_size_t relaxed_align;
1324 resource_size_t old_size;
1325
1326 if (!b_res)
1327 return;
1328
1329 /* If resource is already assigned, nothing more to do */
1330 if (b_res->parent)
1331 return;
1332
1333 memset(aligns, 0, sizeof(aligns));
1334 max_order = 0;
1335 size = 0;
1336
1337 list_for_each_entry(dev, &bus->devices, bus_list) {
1338 struct resource *r;
1339 int i;
1340
1341 pci_dev_for_each_resource(dev, r, i) {
1342 const char *r_name = pci_resource_name(dev, i);
1343 resource_size_t r_size;
1344
1345 if (!pdev_resources_assignable(dev) ||
1346 !pdev_resource_should_fit(dev, r))
1347 continue;
1348 if (b_res != pbus_select_window(bus, r))
1349 continue;
1350
1351 r_size = resource_size(r);
1352
1353 /* Put SRIOV requested res to the optional list */
1354 if (realloc_head && pci_resource_is_optional(dev, i)) {
1355 add_align = max(pci_resource_alignment(dev, r), add_align);
1356 add_to_list(realloc_head, dev, r, 0, 0 /* Don't care */);
1357 children_add_size += r_size;
1358 continue;
1359 }
1360
1361 /*
1362 * aligns[0] is for 1MB (since bridge memory
1363 * windows are always at least 1MB aligned), so
1364 * keep "order" from being negative for smaller
1365 * resources.
1366 */
1367 align = pci_resource_alignment(dev, r);
1368 order = __ffs(align) - __ffs(SZ_1M);
1369 if (order < 0)
1370 order = 0;
1371 if (order >= ARRAY_SIZE(aligns)) {
1372 pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n",
1373 r_name, r, (unsigned long long) align);
1374 r->flags = 0;
1375 continue;
1376 }
1377 size += max(r_size, align);
1378 /*
1379 * Exclude ranges with size > align from calculation of
1380 * the alignment.
1381 */
1382 if (r_size <= align)
1383 aligns[order] += align;
1384 if (order > max_order)
1385 max_order = order;
1386
1387 if (realloc_head) {
1388 children_add_size += get_res_add_size(realloc_head, r);
1389 children_add_align = get_res_add_align(realloc_head, r);
1390 add_align = max(add_align, children_add_align);
1391 }
1392 }
1393 }
1394
1395 old_size = resource_size(b_res);
1396 win_align = window_alignment(bus, b_res->flags);
1397 min_align = calculate_mem_align(aligns, max_order);
1398 min_align = max(min_align, win_align);
1399 size0 = calculate_memsize(size, min_size, 0, 0, old_size, min_align);
1400
1401 if (size0) {
1402 resource_set_range(b_res, min_align, size0);
1403 b_res->flags &= ~IORESOURCE_DISABLED;
1404 }
1405
1406 if (bus->self && size0 &&
1407 !pbus_upstream_space_available(bus, b_res, size0, min_align)) {
1408 relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
1409 relaxed_align = max(relaxed_align, win_align);
1410 min_align = min(min_align, relaxed_align);
1411 size0 = calculate_memsize(size, min_size, 0, 0, old_size, win_align);
1412 resource_set_range(b_res, min_align, size0);
1413 pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
1414 b_res, &bus->busn_res);
1415 }
1416
1417 if (realloc_head && (add_size > 0 || children_add_size > 0)) {
1418 add_align = max(min_align, add_align);
1419 size1 = calculate_memsize(size, min_size, add_size, children_add_size,
1420 old_size, add_align);
1421
1422 if (bus->self && size1 &&
1423 !pbus_upstream_space_available(bus, b_res, size1, add_align)) {
1424 relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
1425 relaxed_align = max(relaxed_align, win_align);
1426 min_align = min(min_align, relaxed_align);
1427 size1 = calculate_memsize(size, min_size, add_size, children_add_size,
1428 old_size, win_align);
1429 pci_info(bus->self,
1430 "bridge window %pR to %pR requires relaxed alignment rules\n",
1431 b_res, &bus->busn_res);
1432 }
1433 }
1434
1435 if (!size0 && !size1) {
1436 if (bus->self && (b_res->start || b_res->end))
1437 pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
1438 b_res, &bus->busn_res);
1439 b_res->flags |= IORESOURCE_DISABLED;
1440 return;
1441 }
1442
1443 resource_set_range(b_res, min_align, size0);
1444 b_res->flags |= IORESOURCE_STARTALIGN;
1445 if (bus->self && size1 > size0 && realloc_head) {
1446 b_res->flags &= ~IORESOURCE_DISABLED;
1447 add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
1448 pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
1449 b_res, &bus->busn_res,
1450 (unsigned long long) (size1 - size0),
1451 (unsigned long long) add_align);
1452 }
1453 }
1454
pci_cardbus_resource_alignment(struct resource * res)1455 unsigned long pci_cardbus_resource_alignment(struct resource *res)
1456 {
1457 if (res->flags & IORESOURCE_IO)
1458 return pci_cardbus_io_size;
1459 if (res->flags & IORESOURCE_MEM)
1460 return pci_cardbus_mem_size;
1461 return 0;
1462 }
1463
pci_bus_size_cardbus(struct pci_bus * bus,struct list_head * realloc_head)1464 static void pci_bus_size_cardbus(struct pci_bus *bus,
1465 struct list_head *realloc_head)
1466 {
1467 struct pci_dev *bridge = bus->self;
1468 struct resource *b_res;
1469 resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
1470 u16 ctrl;
1471
1472 b_res = &bridge->resource[PCI_CB_BRIDGE_IO_0_WINDOW];
1473 if (b_res->parent)
1474 goto handle_b_res_1;
1475 /*
1476 * Reserve some resources for CardBus. We reserve a fixed amount
1477 * of bus space for CardBus bridges.
1478 */
1479 resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
1480 b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1481 if (realloc_head) {
1482 b_res->end -= pci_cardbus_io_size;
1483 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1484 pci_cardbus_io_size);
1485 }
1486
1487 handle_b_res_1:
1488 b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
1489 if (b_res->parent)
1490 goto handle_b_res_2;
1491 resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
1492 b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1493 if (realloc_head) {
1494 b_res->end -= pci_cardbus_io_size;
1495 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1496 pci_cardbus_io_size);
1497 }
1498
1499 handle_b_res_2:
1500 /* MEM1 must not be pref MMIO */
1501 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1502 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
1503 ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
1504 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1505 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1506 }
1507
1508 /* Check whether prefetchable memory is supported by this bridge. */
1509 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1510 if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
1511 ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
1512 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1513 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1514 }
1515
1516 b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_0_WINDOW];
1517 if (b_res->parent)
1518 goto handle_b_res_3;
1519 /*
1520 * If we have prefetchable memory support, allocate two regions.
1521 * Otherwise, allocate one region of twice the size.
1522 */
1523 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
1524 resource_set_range(b_res, pci_cardbus_mem_size,
1525 pci_cardbus_mem_size);
1526 b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
1527 IORESOURCE_STARTALIGN;
1528 if (realloc_head) {
1529 b_res->end -= pci_cardbus_mem_size;
1530 add_to_list(realloc_head, bridge, b_res,
1531 pci_cardbus_mem_size, pci_cardbus_mem_size);
1532 }
1533
1534 /* Reduce that to half */
1535 b_res_3_size = pci_cardbus_mem_size;
1536 }
1537
1538 handle_b_res_3:
1539 b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
1540 if (b_res->parent)
1541 goto handle_done;
1542 resource_set_range(b_res, pci_cardbus_mem_size, b_res_3_size);
1543 b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
1544 if (realloc_head) {
1545 b_res->end -= b_res_3_size;
1546 add_to_list(realloc_head, bridge, b_res, b_res_3_size,
1547 pci_cardbus_mem_size);
1548 }
1549
1550 handle_done:
1551 ;
1552 }
1553
__pci_bus_size_bridges(struct pci_bus * bus,struct list_head * realloc_head)1554 void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
1555 {
1556 struct pci_dev *dev;
1557 resource_size_t additional_io_size = 0, additional_mmio_size = 0,
1558 additional_mmio_pref_size = 0;
1559 struct resource *pref;
1560 struct pci_host_bridge *host;
1561 int hdr_type;
1562
1563 list_for_each_entry(dev, &bus->devices, bus_list) {
1564 struct pci_bus *b = dev->subordinate;
1565 if (!b)
1566 continue;
1567
1568 switch (dev->hdr_type) {
1569 case PCI_HEADER_TYPE_CARDBUS:
1570 pci_bus_size_cardbus(b, realloc_head);
1571 break;
1572
1573 case PCI_HEADER_TYPE_BRIDGE:
1574 default:
1575 __pci_bus_size_bridges(b, realloc_head);
1576 break;
1577 }
1578 }
1579
1580 /* The root bus? */
1581 if (pci_is_root_bus(bus)) {
1582 host = to_pci_host_bridge(bus->bridge);
1583 if (!host->size_windows)
1584 return;
1585 pci_bus_for_each_resource(bus, pref)
1586 if (pref && (pref->flags & IORESOURCE_PREFETCH))
1587 break;
1588 hdr_type = -1; /* Intentionally invalid - not a PCI device. */
1589 } else {
1590 pref = &bus->self->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1591 hdr_type = bus->self->hdr_type;
1592 }
1593
1594 switch (hdr_type) {
1595 case PCI_HEADER_TYPE_CARDBUS:
1596 /* Don't size CardBuses yet */
1597 break;
1598
1599 case PCI_HEADER_TYPE_BRIDGE:
1600 pci_bridge_check_ranges(bus);
1601 if (bus->self->is_hotplug_bridge) {
1602 additional_io_size = pci_hotplug_io_size;
1603 additional_mmio_size = pci_hotplug_mmio_size;
1604 additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
1605 }
1606 fallthrough;
1607 default:
1608 pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
1609 additional_io_size, realloc_head);
1610
1611 if (pref && (pref->flags & IORESOURCE_PREFETCH)) {
1612 pbus_size_mem(bus,
1613 IORESOURCE_MEM | IORESOURCE_PREFETCH |
1614 (pref->flags & IORESOURCE_MEM_64),
1615 realloc_head ? 0 : additional_mmio_pref_size,
1616 additional_mmio_pref_size, realloc_head);
1617 }
1618
1619 pbus_size_mem(bus, IORESOURCE_MEM,
1620 realloc_head ? 0 : additional_mmio_size,
1621 additional_mmio_size, realloc_head);
1622 break;
1623 }
1624 }
1625
pci_bus_size_bridges(struct pci_bus * bus)1626 void pci_bus_size_bridges(struct pci_bus *bus)
1627 {
1628 __pci_bus_size_bridges(bus, NULL);
1629 }
1630 EXPORT_SYMBOL(pci_bus_size_bridges);
1631
assign_fixed_resource_on_bus(struct pci_bus * b,struct resource * r)1632 static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
1633 {
1634 struct resource *parent_r;
1635 unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
1636 IORESOURCE_PREFETCH;
1637
1638 pci_bus_for_each_resource(b, parent_r) {
1639 if (!parent_r)
1640 continue;
1641
1642 if ((r->flags & mask) == (parent_r->flags & mask) &&
1643 resource_contains(parent_r, r))
1644 request_resource(parent_r, r);
1645 }
1646 }
1647
1648 /*
1649 * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are
1650 * skipped by pbus_assign_resources_sorted().
1651 */
pdev_assign_fixed_resources(struct pci_dev * dev)1652 static void pdev_assign_fixed_resources(struct pci_dev *dev)
1653 {
1654 struct resource *r;
1655
1656 pci_dev_for_each_resource(dev, r) {
1657 struct pci_bus *b;
1658
1659 if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
1660 !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
1661 continue;
1662
1663 b = dev->bus;
1664 while (b && !r->parent) {
1665 assign_fixed_resource_on_bus(b, r);
1666 b = b->parent;
1667 }
1668 }
1669 }
1670
__pci_bus_assign_resources(const struct pci_bus * bus,struct list_head * realloc_head,struct list_head * fail_head)1671 void __pci_bus_assign_resources(const struct pci_bus *bus,
1672 struct list_head *realloc_head,
1673 struct list_head *fail_head)
1674 {
1675 struct pci_bus *b;
1676 struct pci_dev *dev;
1677
1678 pbus_assign_resources_sorted(bus, realloc_head, fail_head);
1679
1680 list_for_each_entry(dev, &bus->devices, bus_list) {
1681 pdev_assign_fixed_resources(dev);
1682
1683 b = dev->subordinate;
1684 if (!b)
1685 continue;
1686
1687 __pci_bus_assign_resources(b, realloc_head, fail_head);
1688
1689 switch (dev->hdr_type) {
1690 case PCI_HEADER_TYPE_BRIDGE:
1691 if (!pci_is_enabled(dev))
1692 pci_setup_bridge(b);
1693 break;
1694
1695 case PCI_HEADER_TYPE_CARDBUS:
1696 pci_setup_cardbus(b);
1697 break;
1698
1699 default:
1700 pci_info(dev, "not setting up bridge for bus %04x:%02x\n",
1701 pci_domain_nr(b), b->number);
1702 break;
1703 }
1704 }
1705 }
1706
pci_bus_assign_resources(const struct pci_bus * bus)1707 void pci_bus_assign_resources(const struct pci_bus *bus)
1708 {
1709 __pci_bus_assign_resources(bus, NULL, NULL);
1710 }
1711 EXPORT_SYMBOL(pci_bus_assign_resources);
1712
pci_claim_device_resources(struct pci_dev * dev)1713 static void pci_claim_device_resources(struct pci_dev *dev)
1714 {
1715 int i;
1716
1717 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
1718 struct resource *r = &dev->resource[i];
1719
1720 if (!r->flags || r->parent)
1721 continue;
1722
1723 pci_claim_resource(dev, i);
1724 }
1725 }
1726
pci_claim_bridge_resources(struct pci_dev * dev)1727 static void pci_claim_bridge_resources(struct pci_dev *dev)
1728 {
1729 int i;
1730
1731 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
1732 struct resource *r = &dev->resource[i];
1733
1734 if (!r->flags || r->parent)
1735 continue;
1736
1737 pci_claim_bridge_resource(dev, i);
1738 }
1739 }
1740
pci_bus_allocate_dev_resources(struct pci_bus * b)1741 static void pci_bus_allocate_dev_resources(struct pci_bus *b)
1742 {
1743 struct pci_dev *dev;
1744 struct pci_bus *child;
1745
1746 list_for_each_entry(dev, &b->devices, bus_list) {
1747 pci_claim_device_resources(dev);
1748
1749 child = dev->subordinate;
1750 if (child)
1751 pci_bus_allocate_dev_resources(child);
1752 }
1753 }
1754
pci_bus_allocate_resources(struct pci_bus * b)1755 static void pci_bus_allocate_resources(struct pci_bus *b)
1756 {
1757 struct pci_bus *child;
1758
1759 /*
1760 * Carry out a depth-first search on the PCI bus tree to allocate
1761 * bridge apertures. Read the programmed bridge bases and
1762 * recursively claim the respective bridge resources.
1763 */
1764 if (b->self) {
1765 pci_read_bridge_bases(b);
1766 pci_claim_bridge_resources(b->self);
1767 }
1768
1769 list_for_each_entry(child, &b->children, node)
1770 pci_bus_allocate_resources(child);
1771 }
1772
pci_bus_claim_resources(struct pci_bus * b)1773 void pci_bus_claim_resources(struct pci_bus *b)
1774 {
1775 pci_bus_allocate_resources(b);
1776 pci_bus_allocate_dev_resources(b);
1777 }
1778 EXPORT_SYMBOL(pci_bus_claim_resources);
1779
__pci_bridge_assign_resources(const struct pci_dev * bridge,struct list_head * add_head,struct list_head * fail_head)1780 static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
1781 struct list_head *add_head,
1782 struct list_head *fail_head)
1783 {
1784 struct pci_bus *b;
1785
1786 pdev_assign_resources_sorted((struct pci_dev *)bridge,
1787 add_head, fail_head);
1788
1789 b = bridge->subordinate;
1790 if (!b)
1791 return;
1792
1793 __pci_bus_assign_resources(b, add_head, fail_head);
1794
1795 switch (bridge->class >> 8) {
1796 case PCI_CLASS_BRIDGE_PCI:
1797 pci_setup_bridge(b);
1798 break;
1799
1800 case PCI_CLASS_BRIDGE_CARDBUS:
1801 pci_setup_cardbus(b);
1802 break;
1803
1804 default:
1805 pci_info(bridge, "not setting up bridge for bus %04x:%02x\n",
1806 pci_domain_nr(b), b->number);
1807 break;
1808 }
1809 }
1810
pci_bridge_release_resources(struct pci_bus * bus,struct resource * b_win)1811 static void pci_bridge_release_resources(struct pci_bus *bus,
1812 struct resource *b_win)
1813 {
1814 struct pci_dev *dev = bus->self;
1815 int idx, ret;
1816
1817 if (!b_win->parent)
1818 return;
1819
1820 idx = pci_resource_num(dev, b_win);
1821
1822 /* If there are children, release them all */
1823 release_child_resources(b_win);
1824
1825 ret = pci_release_resource(dev, idx);
1826 if (ret)
1827 return;
1828
1829 pci_setup_one_bridge_window(dev, idx);
1830 }
1831
1832 enum release_type {
1833 leaf_only,
1834 whole_subtree,
1835 };
1836
1837 /*
1838 * Try to release PCI bridge resources from leaf bridge, so we can allocate
1839 * a larger window later.
1840 */
pci_bus_release_bridge_resources(struct pci_bus * bus,struct resource * b_win,enum release_type rel_type)1841 static void pci_bus_release_bridge_resources(struct pci_bus *bus,
1842 struct resource *b_win,
1843 enum release_type rel_type)
1844 {
1845 struct pci_dev *dev;
1846 bool is_leaf_bridge = true;
1847
1848 list_for_each_entry(dev, &bus->devices, bus_list) {
1849 struct pci_bus *b = dev->subordinate;
1850 struct resource *res;
1851
1852 if (!b)
1853 continue;
1854
1855 is_leaf_bridge = false;
1856
1857 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1858 continue;
1859
1860 if (rel_type != whole_subtree)
1861 continue;
1862
1863 pci_bus_for_each_resource(b, res) {
1864 if (res->parent != b_win)
1865 continue;
1866
1867 pci_bus_release_bridge_resources(b, res, rel_type);
1868 }
1869 }
1870
1871 if (pci_is_root_bus(bus))
1872 return;
1873
1874 if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1875 return;
1876
1877 if ((rel_type == whole_subtree) || is_leaf_bridge)
1878 pci_bridge_release_resources(bus, b_win);
1879 }
1880
pci_bus_dump_res(struct pci_bus * bus)1881 static void pci_bus_dump_res(struct pci_bus *bus)
1882 {
1883 struct resource *res;
1884 int i;
1885
1886 pci_bus_for_each_resource(bus, res, i) {
1887 if (!res || !res->end || !res->flags)
1888 continue;
1889
1890 dev_info(&bus->dev, "resource %d %pR\n", i, res);
1891 }
1892 }
1893
pci_bus_dump_resources(struct pci_bus * bus)1894 static void pci_bus_dump_resources(struct pci_bus *bus)
1895 {
1896 struct pci_bus *b;
1897 struct pci_dev *dev;
1898
1899
1900 pci_bus_dump_res(bus);
1901
1902 list_for_each_entry(dev, &bus->devices, bus_list) {
1903 b = dev->subordinate;
1904 if (!b)
1905 continue;
1906
1907 pci_bus_dump_resources(b);
1908 }
1909 }
1910
pci_bus_get_depth(struct pci_bus * bus)1911 static int pci_bus_get_depth(struct pci_bus *bus)
1912 {
1913 int depth = 0;
1914 struct pci_bus *child_bus;
1915
1916 list_for_each_entry(child_bus, &bus->children, node) {
1917 int ret;
1918
1919 ret = pci_bus_get_depth(child_bus);
1920 if (ret + 1 > depth)
1921 depth = ret + 1;
1922 }
1923
1924 return depth;
1925 }
1926
1927 /*
1928 * -1: undefined, will auto detect later
1929 * 0: disabled by user
1930 * 1: disabled by auto detect
1931 * 2: enabled by user
1932 * 3: enabled by auto detect
1933 */
1934 enum enable_type {
1935 undefined = -1,
1936 user_disabled,
1937 auto_disabled,
1938 user_enabled,
1939 auto_enabled,
1940 };
1941
1942 static enum enable_type pci_realloc_enable = undefined;
pci_realloc_get_opt(char * str)1943 void __init pci_realloc_get_opt(char *str)
1944 {
1945 if (!strncmp(str, "off", 3))
1946 pci_realloc_enable = user_disabled;
1947 else if (!strncmp(str, "on", 2))
1948 pci_realloc_enable = user_enabled;
1949 }
pci_realloc_enabled(enum enable_type enable)1950 static bool pci_realloc_enabled(enum enable_type enable)
1951 {
1952 return enable >= user_enabled;
1953 }
1954
1955 #if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
iov_resources_unassigned(struct pci_dev * dev,void * data)1956 static int iov_resources_unassigned(struct pci_dev *dev, void *data)
1957 {
1958 int i;
1959 bool *unassigned = data;
1960
1961 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1962 int idx = pci_resource_num_from_vf_bar(i);
1963 struct resource *r = &dev->resource[idx];
1964 struct pci_bus_region region;
1965
1966 /* Not assigned or rejected by kernel? */
1967 if (!r->flags)
1968 continue;
1969
1970 pcibios_resource_to_bus(dev->bus, ®ion, r);
1971 if (!region.start) {
1972 *unassigned = true;
1973 return 1; /* Return early from pci_walk_bus() */
1974 }
1975 }
1976
1977 return 0;
1978 }
1979
pci_realloc_detect(struct pci_bus * bus,enum enable_type enable_local)1980 static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1981 enum enable_type enable_local)
1982 {
1983 bool unassigned = false;
1984 struct pci_host_bridge *host;
1985
1986 if (enable_local != undefined)
1987 return enable_local;
1988
1989 host = pci_find_host_bridge(bus);
1990 if (host->preserve_config)
1991 return auto_disabled;
1992
1993 pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
1994 if (unassigned)
1995 return auto_enabled;
1996
1997 return enable_local;
1998 }
1999 #else
pci_realloc_detect(struct pci_bus * bus,enum enable_type enable_local)2000 static enum enable_type pci_realloc_detect(struct pci_bus *bus,
2001 enum enable_type enable_local)
2002 {
2003 return enable_local;
2004 }
2005 #endif
2006
adjust_bridge_window(struct pci_dev * bridge,struct resource * res,struct list_head * add_list,resource_size_t new_size)2007 static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
2008 struct list_head *add_list,
2009 resource_size_t new_size)
2010 {
2011 resource_size_t add_size, size = resource_size(res);
2012
2013 if (res->parent)
2014 return;
2015
2016 if (!new_size)
2017 return;
2018
2019 if (new_size > size) {
2020 add_size = new_size - size;
2021 pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
2022 &add_size);
2023 } else if (new_size < size) {
2024 add_size = size - new_size;
2025 pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
2026 &add_size);
2027 } else {
2028 return;
2029 }
2030
2031 resource_set_size(res, new_size);
2032
2033 /* If the resource is part of the add_list, remove it now */
2034 if (add_list)
2035 remove_from_list(add_list, res);
2036 }
2037
remove_dev_resource(struct resource * avail,struct pci_dev * dev,struct resource * res)2038 static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
2039 struct resource *res)
2040 {
2041 resource_size_t size, align, tmp;
2042
2043 size = resource_size(res);
2044 if (!size)
2045 return;
2046
2047 align = pci_resource_alignment(dev, res);
2048 align = align ? ALIGN(avail->start, align) - avail->start : 0;
2049 tmp = align + size;
2050 avail->start = min(avail->start + tmp, avail->end + 1);
2051 }
2052
remove_dev_resources(struct pci_dev * dev,struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM])2053 static void remove_dev_resources(struct pci_dev *dev,
2054 struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM])
2055 {
2056 struct resource *res, *b_win;
2057 int idx;
2058
2059 pci_dev_for_each_resource(dev, res) {
2060 b_win = pbus_select_window(dev->bus, res);
2061 if (!b_win)
2062 continue;
2063
2064 idx = pci_resource_num(dev->bus->self, b_win);
2065 idx -= PCI_BRIDGE_RESOURCES;
2066
2067 remove_dev_resource(&available[idx], dev, res);
2068 }
2069 }
2070
2071 #define ALIGN_DOWN_IF_NONZERO(addr, align) \
2072 ((align) ? ALIGN_DOWN((addr), (align)) : (addr))
2073
2074 /*
2075 * io, mmio and mmio_pref contain the total amount of bridge window space
2076 * available. This includes the minimal space needed to cover all the
2077 * existing devices on the bus and the possible extra space that can be
2078 * shared with the bridges.
2079 */
pci_bus_distribute_available_resources(struct pci_bus * bus,struct list_head * add_list,struct resource available_in[PCI_P2P_BRIDGE_RESOURCE_NUM])2080 static void pci_bus_distribute_available_resources(struct pci_bus *bus,
2081 struct list_head *add_list,
2082 struct resource available_in[PCI_P2P_BRIDGE_RESOURCE_NUM])
2083 {
2084 struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM];
2085 unsigned int normal_bridges = 0, hotplug_bridges = 0;
2086 struct pci_dev *dev, *bridge = bus->self;
2087 resource_size_t per_bridge[PCI_P2P_BRIDGE_RESOURCE_NUM];
2088 resource_size_t align;
2089 int i;
2090
2091 for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
2092 struct resource *res =
2093 pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i);
2094
2095 available[i] = available_in[i];
2096
2097 /*
2098 * The alignment of this bridge is yet to be considered,
2099 * hence it must be done now before extending its bridge
2100 * window.
2101 */
2102 align = pci_resource_alignment(bridge, res);
2103 if (!res->parent && align)
2104 available[i].start = min(ALIGN(available[i].start, align),
2105 available[i].end + 1);
2106
2107 /*
2108 * Now that we have adjusted for alignment, update the
2109 * bridge window resources to fill as much remaining
2110 * resource space as possible.
2111 */
2112 adjust_bridge_window(bridge, res, add_list,
2113 resource_size(&available[i]));
2114 }
2115
2116 /*
2117 * Calculate how many hotplug bridges and normal bridges there
2118 * are on this bus. We will distribute the additional available
2119 * resources between hotplug bridges.
2120 */
2121 for_each_pci_bridge(dev, bus) {
2122 if (dev->is_hotplug_bridge)
2123 hotplug_bridges++;
2124 else
2125 normal_bridges++;
2126 }
2127
2128 if (!(hotplug_bridges + normal_bridges))
2129 return;
2130
2131 /*
2132 * Calculate the amount of space we can forward from "bus" to any
2133 * downstream buses, i.e., the space left over after assigning the
2134 * BARs and windows on "bus".
2135 */
2136 list_for_each_entry(dev, &bus->devices, bus_list) {
2137 if (!dev->is_virtfn)
2138 remove_dev_resources(dev, available);
2139 }
2140
2141 /*
2142 * If there is at least one hotplug bridge on this bus it gets all
2143 * the extra resource space that was left after the reductions
2144 * above.
2145 *
2146 * If there are no hotplug bridges the extra resource space is
2147 * split between non-hotplug bridges. This is to allow possible
2148 * hotplug bridges below them to get the extra space as well.
2149 */
2150 for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
2151 per_bridge[i] = div64_ul(resource_size(&available[i]),
2152 hotplug_bridges ?: normal_bridges);
2153 }
2154
2155 for_each_pci_bridge(dev, bus) {
2156 struct resource *res;
2157 struct pci_bus *b;
2158
2159 b = dev->subordinate;
2160 if (!b)
2161 continue;
2162 if (hotplug_bridges && !dev->is_hotplug_bridge)
2163 continue;
2164
2165 for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
2166 res = pci_resource_n(dev, PCI_BRIDGE_RESOURCES + i);
2167
2168 /*
2169 * Make sure the split resource space is properly
2170 * aligned for bridge windows (align it down to
2171 * avoid going above what is available).
2172 */
2173 align = pci_resource_alignment(dev, res);
2174 resource_set_size(&available[i],
2175 ALIGN_DOWN_IF_NONZERO(per_bridge[i],
2176 align));
2177
2178 /*
2179 * The per_bridge holds the extra resource space
2180 * that can be added for each bridge but there is
2181 * the minimal already reserved as well so adjust
2182 * x.start down accordingly to cover the whole
2183 * space.
2184 */
2185 available[i].start -= resource_size(res);
2186 }
2187
2188 pci_bus_distribute_available_resources(b, add_list, available);
2189
2190 for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++)
2191 available[i].start += available[i].end + 1;
2192 }
2193 }
2194
pci_bridge_distribute_available_resources(struct pci_dev * bridge,struct list_head * add_list)2195 static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
2196 struct list_head *add_list)
2197 {
2198 struct resource *res, available[PCI_P2P_BRIDGE_RESOURCE_NUM];
2199 unsigned int i;
2200
2201 if (!bridge->is_hotplug_bridge)
2202 return;
2203
2204 pci_dbg(bridge, "distributing available resources\n");
2205
2206 /* Take the initial extra resources from the hotplug port */
2207 for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
2208 res = pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i);
2209 available[i] = *res;
2210 }
2211
2212 pci_bus_distribute_available_resources(bridge->subordinate,
2213 add_list, available);
2214 }
2215
pci_bridge_resources_not_assigned(struct pci_dev * dev)2216 static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
2217 {
2218 const struct resource *r;
2219
2220 /*
2221 * If the child device's resources are not yet assigned it means we
2222 * are configuring them (not the boot firmware), so we should be
2223 * able to extend the upstream bridge resources in the same way we
2224 * do with the normal hotplug case.
2225 */
2226 r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
2227 if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2228 return false;
2229 r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
2230 if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2231 return false;
2232 r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
2233 if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2234 return false;
2235
2236 return true;
2237 }
2238
2239 static void
pci_root_bus_distribute_available_resources(struct pci_bus * bus,struct list_head * add_list)2240 pci_root_bus_distribute_available_resources(struct pci_bus *bus,
2241 struct list_head *add_list)
2242 {
2243 struct pci_dev *dev, *bridge = bus->self;
2244
2245 for_each_pci_bridge(dev, bus) {
2246 struct pci_bus *b;
2247
2248 b = dev->subordinate;
2249 if (!b)
2250 continue;
2251
2252 /*
2253 * Need to check "bridge" here too because it is NULL
2254 * in case of root bus.
2255 */
2256 if (bridge && pci_bridge_resources_not_assigned(dev))
2257 pci_bridge_distribute_available_resources(dev, add_list);
2258 else
2259 pci_root_bus_distribute_available_resources(b, add_list);
2260 }
2261 }
2262
pci_prepare_next_assign_round(struct list_head * fail_head,int tried_times,enum release_type rel_type)2263 static void pci_prepare_next_assign_round(struct list_head *fail_head,
2264 int tried_times,
2265 enum release_type rel_type)
2266 {
2267 struct pci_dev_resource *fail_res;
2268
2269 pr_info("PCI: No. %d try to assign unassigned res\n", tried_times + 1);
2270
2271 /*
2272 * Try to release leaf bridge's resources that aren't big
2273 * enough to contain child device resources.
2274 */
2275 list_for_each_entry(fail_res, fail_head, list) {
2276 struct pci_bus *bus = fail_res->dev->bus;
2277 struct resource *b_win;
2278
2279 b_win = pbus_select_window_for_type(bus, fail_res->flags);
2280 if (!b_win)
2281 continue;
2282 pci_bus_release_bridge_resources(bus, b_win, rel_type);
2283 }
2284
2285 /* Restore size and flags */
2286 list_for_each_entry(fail_res, fail_head, list)
2287 restore_dev_resource(fail_res);
2288
2289 free_list(fail_head);
2290 }
2291
2292 /*
2293 * First try will not touch PCI bridge res.
2294 * Second and later try will clear small leaf bridge res.
2295 * Will stop till to the max depth if can not find good one.
2296 */
pci_assign_unassigned_root_bus_resources(struct pci_bus * bus)2297 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
2298 {
2299 LIST_HEAD(realloc_head);
2300 /* List of resources that want additional resources */
2301 struct list_head *add_list = NULL;
2302 int tried_times = 0;
2303 enum release_type rel_type = leaf_only;
2304 LIST_HEAD(fail_head);
2305 int pci_try_num = 1;
2306 enum enable_type enable_local;
2307
2308 /* Don't realloc if asked to do so */
2309 enable_local = pci_realloc_detect(bus, pci_realloc_enable);
2310 if (pci_realloc_enabled(enable_local)) {
2311 int max_depth = pci_bus_get_depth(bus);
2312
2313 pci_try_num = max_depth + 1;
2314 dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
2315 max_depth, pci_try_num);
2316 }
2317
2318 while (1) {
2319 /*
2320 * Last try will use add_list, otherwise will try good to
2321 * have as must have, so can realloc parent bridge resource
2322 */
2323 if (tried_times + 1 == pci_try_num)
2324 add_list = &realloc_head;
2325 /*
2326 * Depth first, calculate sizes and alignments of all
2327 * subordinate buses.
2328 */
2329 __pci_bus_size_bridges(bus, add_list);
2330
2331 pci_root_bus_distribute_available_resources(bus, add_list);
2332
2333 /* Depth last, allocate resources and update the hardware. */
2334 __pci_bus_assign_resources(bus, add_list, &fail_head);
2335 if (WARN_ON_ONCE(add_list && !list_empty(add_list)))
2336 free_list(add_list);
2337 tried_times++;
2338
2339 /* Any device complain? */
2340 if (list_empty(&fail_head))
2341 break;
2342
2343 if (tried_times >= pci_try_num) {
2344 if (enable_local == undefined) {
2345 dev_info(&bus->dev,
2346 "Some PCI device resources are unassigned, try booting with pci=realloc\n");
2347 } else if (enable_local == auto_enabled) {
2348 dev_info(&bus->dev,
2349 "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
2350 }
2351 free_list(&fail_head);
2352 break;
2353 }
2354
2355 /* Third times and later will not check if it is leaf */
2356 if (tried_times + 1 > 2)
2357 rel_type = whole_subtree;
2358
2359 pci_prepare_next_assign_round(&fail_head, tried_times, rel_type);
2360 }
2361
2362 pci_bus_dump_resources(bus);
2363 }
2364
pci_assign_unassigned_resources(void)2365 void pci_assign_unassigned_resources(void)
2366 {
2367 struct pci_bus *root_bus;
2368
2369 list_for_each_entry(root_bus, &pci_root_buses, node) {
2370 pci_assign_unassigned_root_bus_resources(root_bus);
2371
2372 /* Make sure the root bridge has a companion ACPI device */
2373 if (ACPI_HANDLE(root_bus->bridge))
2374 acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
2375 }
2376 }
2377
pci_assign_unassigned_bridge_resources(struct pci_dev * bridge)2378 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
2379 {
2380 struct pci_bus *parent = bridge->subordinate;
2381 /* List of resources that want additional resources */
2382 LIST_HEAD(add_list);
2383 int tried_times = 0;
2384 LIST_HEAD(fail_head);
2385 int ret;
2386
2387 while (1) {
2388 __pci_bus_size_bridges(parent, &add_list);
2389
2390 /*
2391 * Distribute remaining resources (if any) equally between
2392 * hotplug bridges below. This makes it possible to extend
2393 * the hierarchy later without running out of resources.
2394 */
2395 pci_bridge_distribute_available_resources(bridge, &add_list);
2396
2397 __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
2398 if (WARN_ON_ONCE(!list_empty(&add_list)))
2399 free_list(&add_list);
2400 tried_times++;
2401
2402 if (list_empty(&fail_head))
2403 break;
2404
2405 if (tried_times >= 2) {
2406 /* Still fail, don't need to try more */
2407 free_list(&fail_head);
2408 break;
2409 }
2410
2411 pci_prepare_next_assign_round(&fail_head, tried_times,
2412 whole_subtree);
2413 }
2414
2415 ret = pci_reenable_device(bridge);
2416 if (ret)
2417 pci_err(bridge, "Error reenabling bridge (%d)\n", ret);
2418 pci_set_master(bridge);
2419 }
2420 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
2421
2422 /*
2423 * Walk to the root bus, find the bridge window relevant for @res and
2424 * release it when possible. If the bridge window contains assigned
2425 * resources, it cannot be released.
2426 */
pbus_reassign_bridge_resources(struct pci_bus * bus,struct resource * res,struct list_head * saved)2427 static int pbus_reassign_bridge_resources(struct pci_bus *bus, struct resource *res,
2428 struct list_head *saved)
2429 {
2430 unsigned long type = res->flags;
2431 struct pci_dev_resource *dev_res;
2432 struct pci_dev *bridge = NULL;
2433 LIST_HEAD(added);
2434 LIST_HEAD(failed);
2435 unsigned int i;
2436 int ret = 0;
2437
2438 while (!pci_is_root_bus(bus)) {
2439 bridge = bus->self;
2440 res = pbus_select_window(bus, res);
2441 if (!res)
2442 break;
2443
2444 i = pci_resource_num(bridge, res);
2445
2446 /* Ignore BARs which are still in use */
2447 if (!res->child) {
2448 ret = add_to_list(saved, bridge, res, 0, 0);
2449 if (ret)
2450 return ret;
2451
2452 pci_release_resource(bridge, i);
2453 } else {
2454 const char *res_name = pci_resource_name(bridge, i);
2455
2456 pci_warn(bridge,
2457 "%s %pR: was not released (still contains assigned resources)\n",
2458 res_name, res);
2459 }
2460
2461 bus = bus->parent;
2462 }
2463
2464 if (!bridge)
2465 return -ENOENT;
2466
2467 __pci_bus_size_bridges(bridge->subordinate, &added);
2468 __pci_bridge_assign_resources(bridge, &added, &failed);
2469 if (WARN_ON_ONCE(!list_empty(&added)))
2470 free_list(&added);
2471
2472 if (!list_empty(&failed)) {
2473 if (pci_required_resource_failed(&failed, type))
2474 ret = -ENOSPC;
2475 free_list(&failed);
2476 if (ret)
2477 return ret;
2478
2479 /* Only resources with unrelated types failed (again) */
2480 }
2481
2482 list_for_each_entry(dev_res, saved, list) {
2483 struct pci_dev *dev = dev_res->dev;
2484
2485 /* Skip the bridge we just assigned resources for */
2486 if (bridge == dev)
2487 continue;
2488
2489 if (!dev->subordinate)
2490 continue;
2491
2492 pci_setup_bridge(dev->subordinate);
2493 }
2494
2495 return 0;
2496 }
2497
pci_do_resource_release_and_resize(struct pci_dev * pdev,int resno,int size,int exclude_bars)2498 int pci_do_resource_release_and_resize(struct pci_dev *pdev, int resno, int size,
2499 int exclude_bars)
2500 {
2501 struct resource *res = pci_resource_n(pdev, resno);
2502 struct pci_dev_resource *dev_res;
2503 struct pci_bus *bus = pdev->bus;
2504 struct resource *b_win, *r;
2505 LIST_HEAD(saved);
2506 unsigned int i;
2507 int ret = 0;
2508
2509 b_win = pbus_select_window(bus, res);
2510 if (!b_win)
2511 return -EINVAL;
2512
2513 pci_dev_for_each_resource(pdev, r, i) {
2514 if (i >= PCI_BRIDGE_RESOURCES)
2515 break;
2516
2517 if (exclude_bars & BIT(i))
2518 continue;
2519
2520 if (b_win != pbus_select_window(bus, r))
2521 continue;
2522
2523 ret = add_to_list(&saved, pdev, r, 0, 0);
2524 if (ret)
2525 goto restore;
2526 pci_release_resource(pdev, i);
2527 }
2528
2529 pci_resize_resource_set_size(pdev, resno, size);
2530
2531 if (!bus->self)
2532 goto out;
2533
2534 down_read(&pci_bus_sem);
2535 ret = pbus_reassign_bridge_resources(bus, res, &saved);
2536 if (ret)
2537 goto restore;
2538
2539 out:
2540 up_read(&pci_bus_sem);
2541 free_list(&saved);
2542 return ret;
2543
2544 restore:
2545 /* Revert to the old configuration */
2546 list_for_each_entry(dev_res, &saved, list) {
2547 struct resource *res = dev_res->res;
2548 struct pci_dev *dev = dev_res->dev;
2549
2550 i = pci_resource_num(dev, res);
2551
2552 if (res->parent) {
2553 release_child_resources(res);
2554 pci_release_resource(dev, i);
2555 }
2556
2557 restore_dev_resource(dev_res);
2558
2559 ret = pci_claim_resource(dev, i);
2560 if (ret)
2561 continue;
2562
2563 if (i < PCI_BRIDGE_RESOURCES) {
2564 const char *res_name = pci_resource_name(dev, i);
2565
2566 pci_update_resource(dev, i);
2567 pci_info(dev, "%s %pR: old value restored\n",
2568 res_name, res);
2569 }
2570 if (dev->subordinate)
2571 pci_setup_bridge(dev->subordinate);
2572 }
2573 goto out;
2574 }
2575
pci_assign_unassigned_bus_resources(struct pci_bus * bus)2576 void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
2577 {
2578 struct pci_dev *dev;
2579 /* List of resources that want additional resources */
2580 LIST_HEAD(add_list);
2581
2582 down_read(&pci_bus_sem);
2583 for_each_pci_bridge(dev, bus)
2584 if (pci_has_subordinate(dev))
2585 __pci_bus_size_bridges(dev->subordinate, &add_list);
2586 up_read(&pci_bus_sem);
2587 __pci_bus_assign_resources(bus, &add_list, NULL);
2588 if (WARN_ON_ONCE(!list_empty(&add_list)))
2589 free_list(&add_list);
2590 }
2591 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
2592