xref: /linux/arch/powerpc/platforms/powernv/pci-ioda.c (revision c1aac62f36c1e37ee81c9e09ee9ee733eef05dcb)
1 /*
2  * Support PCI/PCIe on PowerNV platforms
3  *
4  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #undef DEBUG
13 
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/crash_dump.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h>
22 #include <linux/irq.h>
23 #include <linux/io.h>
24 #include <linux/msi.h>
25 #include <linux/memblock.h>
26 #include <linux/iommu.h>
27 #include <linux/rculist.h>
28 #include <linux/sizes.h>
29 
30 #include <asm/sections.h>
31 #include <asm/io.h>
32 #include <asm/prom.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/machdep.h>
35 #include <asm/msi_bitmap.h>
36 #include <asm/ppc-pci.h>
37 #include <asm/opal.h>
38 #include <asm/iommu.h>
39 #include <asm/tce.h>
40 #include <asm/xics.h>
41 #include <asm/debug.h>
42 #include <asm/firmware.h>
43 #include <asm/pnv-pci.h>
44 #include <asm/mmzone.h>
45 
46 #include <misc/cxl-base.h>
47 
48 #include "powernv.h"
49 #include "pci.h"
50 
51 #define PNV_IODA1_M64_NUM	16	/* Number of M64 BARs	*/
52 #define PNV_IODA1_M64_SEGS	8	/* Segments per M64 BAR	*/
53 #define PNV_IODA1_DMA32_SEGSIZE	0x10000000
54 
55 #define POWERNV_IOMMU_DEFAULT_LEVELS	1
56 #define POWERNV_IOMMU_MAX_LEVELS	5
57 
58 static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU" };
59 static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
60 
61 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
62 			    const char *fmt, ...)
63 {
64 	struct va_format vaf;
65 	va_list args;
66 	char pfix[32];
67 
68 	va_start(args, fmt);
69 
70 	vaf.fmt = fmt;
71 	vaf.va = &args;
72 
73 	if (pe->flags & PNV_IODA_PE_DEV)
74 		strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
75 	else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
76 		sprintf(pfix, "%04x:%02x     ",
77 			pci_domain_nr(pe->pbus), pe->pbus->number);
78 #ifdef CONFIG_PCI_IOV
79 	else if (pe->flags & PNV_IODA_PE_VF)
80 		sprintf(pfix, "%04x:%02x:%2x.%d",
81 			pci_domain_nr(pe->parent_dev->bus),
82 			(pe->rid & 0xff00) >> 8,
83 			PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
84 #endif /* CONFIG_PCI_IOV*/
85 
86 	printk("%spci %s: [PE# %.2x] %pV",
87 	       level, pfix, pe->pe_number, &vaf);
88 
89 	va_end(args);
90 }
91 
92 static bool pnv_iommu_bypass_disabled __read_mostly;
93 
94 static int __init iommu_setup(char *str)
95 {
96 	if (!str)
97 		return -EINVAL;
98 
99 	while (*str) {
100 		if (!strncmp(str, "nobypass", 8)) {
101 			pnv_iommu_bypass_disabled = true;
102 			pr_info("PowerNV: IOMMU bypass window disabled.\n");
103 			break;
104 		}
105 		str += strcspn(str, ",");
106 		if (*str == ',')
107 			str++;
108 	}
109 
110 	return 0;
111 }
112 early_param("iommu", iommu_setup);
113 
114 static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
115 {
116 	/*
117 	 * WARNING: We cannot rely on the resource flags. The Linux PCI
118 	 * allocation code sometimes decides to put a 64-bit prefetchable
119 	 * BAR in the 32-bit window, so we have to compare the addresses.
120 	 *
121 	 * For simplicity we only test resource start.
122 	 */
123 	return (r->start >= phb->ioda.m64_base &&
124 		r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
125 }
126 
127 static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
128 {
129 	unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
130 
131 	return (resource_flags & flags) == flags;
132 }
133 
134 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
135 {
136 	s64 rc;
137 
138 	phb->ioda.pe_array[pe_no].phb = phb;
139 	phb->ioda.pe_array[pe_no].pe_number = pe_no;
140 
141 	/*
142 	 * Clear the PE frozen state as it might be put into frozen state
143 	 * in the last PCI remove path. It's not harmful to do so when the
144 	 * PE is already in unfrozen state.
145 	 */
146 	rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
147 				       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
148 	if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
149 		pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
150 			__func__, rc, phb->hose->global_number, pe_no);
151 
152 	return &phb->ioda.pe_array[pe_no];
153 }
154 
155 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
156 {
157 	if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
158 		pr_warn("%s: Invalid PE %x on PHB#%x\n",
159 			__func__, pe_no, phb->hose->global_number);
160 		return;
161 	}
162 
163 	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
164 		pr_debug("%s: PE %x was reserved on PHB#%x\n",
165 			 __func__, pe_no, phb->hose->global_number);
166 
167 	pnv_ioda_init_pe(phb, pe_no);
168 }
169 
170 static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
171 {
172 	long pe;
173 
174 	for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
175 		if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
176 			return pnv_ioda_init_pe(phb, pe);
177 	}
178 
179 	return NULL;
180 }
181 
182 static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
183 {
184 	struct pnv_phb *phb = pe->phb;
185 	unsigned int pe_num = pe->pe_number;
186 
187 	WARN_ON(pe->pdev);
188 
189 	memset(pe, 0, sizeof(struct pnv_ioda_pe));
190 	clear_bit(pe_num, phb->ioda.pe_alloc);
191 }
192 
193 /* The default M64 BAR is shared by all PEs */
194 static int pnv_ioda2_init_m64(struct pnv_phb *phb)
195 {
196 	const char *desc;
197 	struct resource *r;
198 	s64 rc;
199 
200 	/* Configure the default M64 BAR */
201 	rc = opal_pci_set_phb_mem_window(phb->opal_id,
202 					 OPAL_M64_WINDOW_TYPE,
203 					 phb->ioda.m64_bar_idx,
204 					 phb->ioda.m64_base,
205 					 0, /* unused */
206 					 phb->ioda.m64_size);
207 	if (rc != OPAL_SUCCESS) {
208 		desc = "configuring";
209 		goto fail;
210 	}
211 
212 	/* Enable the default M64 BAR */
213 	rc = opal_pci_phb_mmio_enable(phb->opal_id,
214 				      OPAL_M64_WINDOW_TYPE,
215 				      phb->ioda.m64_bar_idx,
216 				      OPAL_ENABLE_M64_SPLIT);
217 	if (rc != OPAL_SUCCESS) {
218 		desc = "enabling";
219 		goto fail;
220 	}
221 
222 	/*
223 	 * Exclude the segments for reserved and root bus PE, which
224 	 * are first or last two PEs.
225 	 */
226 	r = &phb->hose->mem_resources[1];
227 	if (phb->ioda.reserved_pe_idx == 0)
228 		r->start += (2 * phb->ioda.m64_segsize);
229 	else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
230 		r->end -= (2 * phb->ioda.m64_segsize);
231 	else
232 		pr_warn("  Cannot strip M64 segment for reserved PE#%x\n",
233 			phb->ioda.reserved_pe_idx);
234 
235 	return 0;
236 
237 fail:
238 	pr_warn("  Failure %lld %s M64 BAR#%d\n",
239 		rc, desc, phb->ioda.m64_bar_idx);
240 	opal_pci_phb_mmio_enable(phb->opal_id,
241 				 OPAL_M64_WINDOW_TYPE,
242 				 phb->ioda.m64_bar_idx,
243 				 OPAL_DISABLE_M64);
244 	return -EIO;
245 }
246 
247 static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
248 					 unsigned long *pe_bitmap)
249 {
250 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
251 	struct pnv_phb *phb = hose->private_data;
252 	struct resource *r;
253 	resource_size_t base, sgsz, start, end;
254 	int segno, i;
255 
256 	base = phb->ioda.m64_base;
257 	sgsz = phb->ioda.m64_segsize;
258 	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
259 		r = &pdev->resource[i];
260 		if (!r->parent || !pnv_pci_is_m64(phb, r))
261 			continue;
262 
263 		start = _ALIGN_DOWN(r->start - base, sgsz);
264 		end = _ALIGN_UP(r->end - base, sgsz);
265 		for (segno = start / sgsz; segno < end / sgsz; segno++) {
266 			if (pe_bitmap)
267 				set_bit(segno, pe_bitmap);
268 			else
269 				pnv_ioda_reserve_pe(phb, segno);
270 		}
271 	}
272 }
273 
274 static int pnv_ioda1_init_m64(struct pnv_phb *phb)
275 {
276 	struct resource *r;
277 	int index;
278 
279 	/*
280 	 * There are 16 M64 BARs, each of which has 8 segments. So
281 	 * there are as many M64 segments as the maximum number of
282 	 * PEs, which is 128.
283 	 */
284 	for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
285 		unsigned long base, segsz = phb->ioda.m64_segsize;
286 		int64_t rc;
287 
288 		base = phb->ioda.m64_base +
289 		       index * PNV_IODA1_M64_SEGS * segsz;
290 		rc = opal_pci_set_phb_mem_window(phb->opal_id,
291 				OPAL_M64_WINDOW_TYPE, index, base, 0,
292 				PNV_IODA1_M64_SEGS * segsz);
293 		if (rc != OPAL_SUCCESS) {
294 			pr_warn("  Error %lld setting M64 PHB#%x-BAR#%d\n",
295 				rc, phb->hose->global_number, index);
296 			goto fail;
297 		}
298 
299 		rc = opal_pci_phb_mmio_enable(phb->opal_id,
300 				OPAL_M64_WINDOW_TYPE, index,
301 				OPAL_ENABLE_M64_SPLIT);
302 		if (rc != OPAL_SUCCESS) {
303 			pr_warn("  Error %lld enabling M64 PHB#%x-BAR#%d\n",
304 				rc, phb->hose->global_number, index);
305 			goto fail;
306 		}
307 	}
308 
309 	/*
310 	 * Exclude the segments for reserved and root bus PE, which
311 	 * are first or last two PEs.
312 	 */
313 	r = &phb->hose->mem_resources[1];
314 	if (phb->ioda.reserved_pe_idx == 0)
315 		r->start += (2 * phb->ioda.m64_segsize);
316 	else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
317 		r->end -= (2 * phb->ioda.m64_segsize);
318 	else
319 		WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
320 		     phb->ioda.reserved_pe_idx, phb->hose->global_number);
321 
322 	return 0;
323 
324 fail:
325 	for ( ; index >= 0; index--)
326 		opal_pci_phb_mmio_enable(phb->opal_id,
327 			OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
328 
329 	return -EIO;
330 }
331 
332 static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
333 				    unsigned long *pe_bitmap,
334 				    bool all)
335 {
336 	struct pci_dev *pdev;
337 
338 	list_for_each_entry(pdev, &bus->devices, bus_list) {
339 		pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
340 
341 		if (all && pdev->subordinate)
342 			pnv_ioda_reserve_m64_pe(pdev->subordinate,
343 						pe_bitmap, all);
344 	}
345 }
346 
347 static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
348 {
349 	struct pci_controller *hose = pci_bus_to_host(bus);
350 	struct pnv_phb *phb = hose->private_data;
351 	struct pnv_ioda_pe *master_pe, *pe;
352 	unsigned long size, *pe_alloc;
353 	int i;
354 
355 	/* Root bus shouldn't use M64 */
356 	if (pci_is_root_bus(bus))
357 		return NULL;
358 
359 	/* Allocate bitmap */
360 	size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
361 	pe_alloc = kzalloc(size, GFP_KERNEL);
362 	if (!pe_alloc) {
363 		pr_warn("%s: Out of memory !\n",
364 			__func__);
365 		return NULL;
366 	}
367 
368 	/* Figure out reserved PE numbers by the PE */
369 	pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
370 
371 	/*
372 	 * the current bus might not own M64 window and that's all
373 	 * contributed by its child buses. For the case, we needn't
374 	 * pick M64 dependent PE#.
375 	 */
376 	if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
377 		kfree(pe_alloc);
378 		return NULL;
379 	}
380 
381 	/*
382 	 * Figure out the master PE and put all slave PEs to master
383 	 * PE's list to form compound PE.
384 	 */
385 	master_pe = NULL;
386 	i = -1;
387 	while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
388 		phb->ioda.total_pe_num) {
389 		pe = &phb->ioda.pe_array[i];
390 
391 		phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
392 		if (!master_pe) {
393 			pe->flags |= PNV_IODA_PE_MASTER;
394 			INIT_LIST_HEAD(&pe->slaves);
395 			master_pe = pe;
396 		} else {
397 			pe->flags |= PNV_IODA_PE_SLAVE;
398 			pe->master = master_pe;
399 			list_add_tail(&pe->list, &master_pe->slaves);
400 		}
401 
402 		/*
403 		 * P7IOC supports M64DT, which helps mapping M64 segment
404 		 * to one particular PE#. However, PHB3 has fixed mapping
405 		 * between M64 segment and PE#. In order to have same logic
406 		 * for P7IOC and PHB3, we enforce fixed mapping between M64
407 		 * segment and PE# on P7IOC.
408 		 */
409 		if (phb->type == PNV_PHB_IODA1) {
410 			int64_t rc;
411 
412 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
413 					pe->pe_number, OPAL_M64_WINDOW_TYPE,
414 					pe->pe_number / PNV_IODA1_M64_SEGS,
415 					pe->pe_number % PNV_IODA1_M64_SEGS);
416 			if (rc != OPAL_SUCCESS)
417 				pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
418 					__func__, rc, phb->hose->global_number,
419 					pe->pe_number);
420 		}
421 	}
422 
423 	kfree(pe_alloc);
424 	return master_pe;
425 }
426 
427 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
428 {
429 	struct pci_controller *hose = phb->hose;
430 	struct device_node *dn = hose->dn;
431 	struct resource *res;
432 	u32 m64_range[2], i;
433 	const __be32 *r;
434 	u64 pci_addr;
435 
436 	if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
437 		pr_info("  Not support M64 window\n");
438 		return;
439 	}
440 
441 	if (!firmware_has_feature(FW_FEATURE_OPAL)) {
442 		pr_info("  Firmware too old to support M64 window\n");
443 		return;
444 	}
445 
446 	r = of_get_property(dn, "ibm,opal-m64-window", NULL);
447 	if (!r) {
448 		pr_info("  No <ibm,opal-m64-window> on %s\n",
449 			dn->full_name);
450 		return;
451 	}
452 
453 	/*
454 	 * Find the available M64 BAR range and pickup the last one for
455 	 * covering the whole 64-bits space. We support only one range.
456 	 */
457 	if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
458 				       m64_range, 2)) {
459 		/* In absence of the property, assume 0..15 */
460 		m64_range[0] = 0;
461 		m64_range[1] = 16;
462 	}
463 	/* We only support 64 bits in our allocator */
464 	if (m64_range[1] > 63) {
465 		pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
466 			__func__, m64_range[1], phb->hose->global_number);
467 		m64_range[1] = 63;
468 	}
469 	/* Empty range, no m64 */
470 	if (m64_range[1] <= m64_range[0]) {
471 		pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
472 			__func__, phb->hose->global_number);
473 		return;
474 	}
475 
476 	/* Configure M64 informations */
477 	res = &hose->mem_resources[1];
478 	res->name = dn->full_name;
479 	res->start = of_translate_address(dn, r + 2);
480 	res->end = res->start + of_read_number(r + 4, 2) - 1;
481 	res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
482 	pci_addr = of_read_number(r, 2);
483 	hose->mem_offset[1] = res->start - pci_addr;
484 
485 	phb->ioda.m64_size = resource_size(res);
486 	phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
487 	phb->ioda.m64_base = pci_addr;
488 
489 	/* This lines up nicely with the display from processing OF ranges */
490 	pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
491 		res->start, res->end, pci_addr, m64_range[0],
492 		m64_range[0] + m64_range[1] - 1);
493 
494 	/* Mark all M64 used up by default */
495 	phb->ioda.m64_bar_alloc = (unsigned long)-1;
496 
497 	/* Use last M64 BAR to cover M64 window */
498 	m64_range[1]--;
499 	phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
500 
501 	pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
502 
503 	/* Mark remaining ones free */
504 	for (i = m64_range[0]; i < m64_range[1]; i++)
505 		clear_bit(i, &phb->ioda.m64_bar_alloc);
506 
507 	/*
508 	 * Setup init functions for M64 based on IODA version, IODA3 uses
509 	 * the IODA2 code.
510 	 */
511 	if (phb->type == PNV_PHB_IODA1)
512 		phb->init_m64 = pnv_ioda1_init_m64;
513 	else
514 		phb->init_m64 = pnv_ioda2_init_m64;
515 	phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe;
516 	phb->pick_m64_pe = pnv_ioda_pick_m64_pe;
517 }
518 
519 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
520 {
521 	struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
522 	struct pnv_ioda_pe *slave;
523 	s64 rc;
524 
525 	/* Fetch master PE */
526 	if (pe->flags & PNV_IODA_PE_SLAVE) {
527 		pe = pe->master;
528 		if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
529 			return;
530 
531 		pe_no = pe->pe_number;
532 	}
533 
534 	/* Freeze master PE */
535 	rc = opal_pci_eeh_freeze_set(phb->opal_id,
536 				     pe_no,
537 				     OPAL_EEH_ACTION_SET_FREEZE_ALL);
538 	if (rc != OPAL_SUCCESS) {
539 		pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
540 			__func__, rc, phb->hose->global_number, pe_no);
541 		return;
542 	}
543 
544 	/* Freeze slave PEs */
545 	if (!(pe->flags & PNV_IODA_PE_MASTER))
546 		return;
547 
548 	list_for_each_entry(slave, &pe->slaves, list) {
549 		rc = opal_pci_eeh_freeze_set(phb->opal_id,
550 					     slave->pe_number,
551 					     OPAL_EEH_ACTION_SET_FREEZE_ALL);
552 		if (rc != OPAL_SUCCESS)
553 			pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
554 				__func__, rc, phb->hose->global_number,
555 				slave->pe_number);
556 	}
557 }
558 
559 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
560 {
561 	struct pnv_ioda_pe *pe, *slave;
562 	s64 rc;
563 
564 	/* Find master PE */
565 	pe = &phb->ioda.pe_array[pe_no];
566 	if (pe->flags & PNV_IODA_PE_SLAVE) {
567 		pe = pe->master;
568 		WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
569 		pe_no = pe->pe_number;
570 	}
571 
572 	/* Clear frozen state for master PE */
573 	rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
574 	if (rc != OPAL_SUCCESS) {
575 		pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
576 			__func__, rc, opt, phb->hose->global_number, pe_no);
577 		return -EIO;
578 	}
579 
580 	if (!(pe->flags & PNV_IODA_PE_MASTER))
581 		return 0;
582 
583 	/* Clear frozen state for slave PEs */
584 	list_for_each_entry(slave, &pe->slaves, list) {
585 		rc = opal_pci_eeh_freeze_clear(phb->opal_id,
586 					     slave->pe_number,
587 					     opt);
588 		if (rc != OPAL_SUCCESS) {
589 			pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
590 				__func__, rc, opt, phb->hose->global_number,
591 				slave->pe_number);
592 			return -EIO;
593 		}
594 	}
595 
596 	return 0;
597 }
598 
599 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
600 {
601 	struct pnv_ioda_pe *slave, *pe;
602 	u8 fstate, state;
603 	__be16 pcierr;
604 	s64 rc;
605 
606 	/* Sanity check on PE number */
607 	if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
608 		return OPAL_EEH_STOPPED_PERM_UNAVAIL;
609 
610 	/*
611 	 * Fetch the master PE and the PE instance might be
612 	 * not initialized yet.
613 	 */
614 	pe = &phb->ioda.pe_array[pe_no];
615 	if (pe->flags & PNV_IODA_PE_SLAVE) {
616 		pe = pe->master;
617 		WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
618 		pe_no = pe->pe_number;
619 	}
620 
621 	/* Check the master PE */
622 	rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
623 					&state, &pcierr, NULL);
624 	if (rc != OPAL_SUCCESS) {
625 		pr_warn("%s: Failure %lld getting "
626 			"PHB#%x-PE#%x state\n",
627 			__func__, rc,
628 			phb->hose->global_number, pe_no);
629 		return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
630 	}
631 
632 	/* Check the slave PE */
633 	if (!(pe->flags & PNV_IODA_PE_MASTER))
634 		return state;
635 
636 	list_for_each_entry(slave, &pe->slaves, list) {
637 		rc = opal_pci_eeh_freeze_status(phb->opal_id,
638 						slave->pe_number,
639 						&fstate,
640 						&pcierr,
641 						NULL);
642 		if (rc != OPAL_SUCCESS) {
643 			pr_warn("%s: Failure %lld getting "
644 				"PHB#%x-PE#%x state\n",
645 				__func__, rc,
646 				phb->hose->global_number, slave->pe_number);
647 			return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
648 		}
649 
650 		/*
651 		 * Override the result based on the ascending
652 		 * priority.
653 		 */
654 		if (fstate > state)
655 			state = fstate;
656 	}
657 
658 	return state;
659 }
660 
661 /* Currently those 2 are only used when MSIs are enabled, this will change
662  * but in the meantime, we need to protect them to avoid warnings
663  */
664 #ifdef CONFIG_PCI_MSI
665 struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
666 {
667 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
668 	struct pnv_phb *phb = hose->private_data;
669 	struct pci_dn *pdn = pci_get_pdn(dev);
670 
671 	if (!pdn)
672 		return NULL;
673 	if (pdn->pe_number == IODA_INVALID_PE)
674 		return NULL;
675 	return &phb->ioda.pe_array[pdn->pe_number];
676 }
677 #endif /* CONFIG_PCI_MSI */
678 
679 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
680 				  struct pnv_ioda_pe *parent,
681 				  struct pnv_ioda_pe *child,
682 				  bool is_add)
683 {
684 	const char *desc = is_add ? "adding" : "removing";
685 	uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
686 			      OPAL_REMOVE_PE_FROM_DOMAIN;
687 	struct pnv_ioda_pe *slave;
688 	long rc;
689 
690 	/* Parent PE affects child PE */
691 	rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
692 				child->pe_number, op);
693 	if (rc != OPAL_SUCCESS) {
694 		pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
695 			rc, desc);
696 		return -ENXIO;
697 	}
698 
699 	if (!(child->flags & PNV_IODA_PE_MASTER))
700 		return 0;
701 
702 	/* Compound case: parent PE affects slave PEs */
703 	list_for_each_entry(slave, &child->slaves, list) {
704 		rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
705 					slave->pe_number, op);
706 		if (rc != OPAL_SUCCESS) {
707 			pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
708 				rc, desc);
709 			return -ENXIO;
710 		}
711 	}
712 
713 	return 0;
714 }
715 
716 static int pnv_ioda_set_peltv(struct pnv_phb *phb,
717 			      struct pnv_ioda_pe *pe,
718 			      bool is_add)
719 {
720 	struct pnv_ioda_pe *slave;
721 	struct pci_dev *pdev = NULL;
722 	int ret;
723 
724 	/*
725 	 * Clear PE frozen state. If it's master PE, we need
726 	 * clear slave PE frozen state as well.
727 	 */
728 	if (is_add) {
729 		opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
730 					  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
731 		if (pe->flags & PNV_IODA_PE_MASTER) {
732 			list_for_each_entry(slave, &pe->slaves, list)
733 				opal_pci_eeh_freeze_clear(phb->opal_id,
734 							  slave->pe_number,
735 							  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
736 		}
737 	}
738 
739 	/*
740 	 * Associate PE in PELT. We need add the PE into the
741 	 * corresponding PELT-V as well. Otherwise, the error
742 	 * originated from the PE might contribute to other
743 	 * PEs.
744 	 */
745 	ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
746 	if (ret)
747 		return ret;
748 
749 	/* For compound PEs, any one affects all of them */
750 	if (pe->flags & PNV_IODA_PE_MASTER) {
751 		list_for_each_entry(slave, &pe->slaves, list) {
752 			ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
753 			if (ret)
754 				return ret;
755 		}
756 	}
757 
758 	if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
759 		pdev = pe->pbus->self;
760 	else if (pe->flags & PNV_IODA_PE_DEV)
761 		pdev = pe->pdev->bus->self;
762 #ifdef CONFIG_PCI_IOV
763 	else if (pe->flags & PNV_IODA_PE_VF)
764 		pdev = pe->parent_dev;
765 #endif /* CONFIG_PCI_IOV */
766 	while (pdev) {
767 		struct pci_dn *pdn = pci_get_pdn(pdev);
768 		struct pnv_ioda_pe *parent;
769 
770 		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
771 			parent = &phb->ioda.pe_array[pdn->pe_number];
772 			ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
773 			if (ret)
774 				return ret;
775 		}
776 
777 		pdev = pdev->bus->self;
778 	}
779 
780 	return 0;
781 }
782 
783 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
784 {
785 	struct pci_dev *parent;
786 	uint8_t bcomp, dcomp, fcomp;
787 	int64_t rc;
788 	long rid_end, rid;
789 
790 	/* Currently, we just deconfigure VF PE. Bus PE will always there.*/
791 	if (pe->pbus) {
792 		int count;
793 
794 		dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
795 		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
796 		parent = pe->pbus->self;
797 		if (pe->flags & PNV_IODA_PE_BUS_ALL)
798 			count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
799 		else
800 			count = 1;
801 
802 		switch(count) {
803 		case  1: bcomp = OpalPciBusAll;         break;
804 		case  2: bcomp = OpalPciBus7Bits;       break;
805 		case  4: bcomp = OpalPciBus6Bits;       break;
806 		case  8: bcomp = OpalPciBus5Bits;       break;
807 		case 16: bcomp = OpalPciBus4Bits;       break;
808 		case 32: bcomp = OpalPciBus3Bits;       break;
809 		default:
810 			dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
811 			        count);
812 			/* Do an exact match only */
813 			bcomp = OpalPciBusAll;
814 		}
815 		rid_end = pe->rid + (count << 8);
816 	} else {
817 #ifdef CONFIG_PCI_IOV
818 		if (pe->flags & PNV_IODA_PE_VF)
819 			parent = pe->parent_dev;
820 		else
821 #endif
822 			parent = pe->pdev->bus->self;
823 		bcomp = OpalPciBusAll;
824 		dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
825 		fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
826 		rid_end = pe->rid + 1;
827 	}
828 
829 	/* Clear the reverse map */
830 	for (rid = pe->rid; rid < rid_end; rid++)
831 		phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
832 
833 	/* Release from all parents PELT-V */
834 	while (parent) {
835 		struct pci_dn *pdn = pci_get_pdn(parent);
836 		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
837 			rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
838 						pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
839 			/* XXX What to do in case of error ? */
840 		}
841 		parent = parent->bus->self;
842 	}
843 
844 	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
845 				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
846 
847 	/* Disassociate PE in PELT */
848 	rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
849 				pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
850 	if (rc)
851 		pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
852 	rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
853 			     bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
854 	if (rc)
855 		pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
856 
857 	pe->pbus = NULL;
858 	pe->pdev = NULL;
859 #ifdef CONFIG_PCI_IOV
860 	pe->parent_dev = NULL;
861 #endif
862 
863 	return 0;
864 }
865 
866 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
867 {
868 	struct pci_dev *parent;
869 	uint8_t bcomp, dcomp, fcomp;
870 	long rc, rid_end, rid;
871 
872 	/* Bus validation ? */
873 	if (pe->pbus) {
874 		int count;
875 
876 		dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
877 		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
878 		parent = pe->pbus->self;
879 		if (pe->flags & PNV_IODA_PE_BUS_ALL)
880 			count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
881 		else
882 			count = 1;
883 
884 		switch(count) {
885 		case  1: bcomp = OpalPciBusAll;		break;
886 		case  2: bcomp = OpalPciBus7Bits;	break;
887 		case  4: bcomp = OpalPciBus6Bits;	break;
888 		case  8: bcomp = OpalPciBus5Bits;	break;
889 		case 16: bcomp = OpalPciBus4Bits;	break;
890 		case 32: bcomp = OpalPciBus3Bits;	break;
891 		default:
892 			dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
893 			        count);
894 			/* Do an exact match only */
895 			bcomp = OpalPciBusAll;
896 		}
897 		rid_end = pe->rid + (count << 8);
898 	} else {
899 #ifdef CONFIG_PCI_IOV
900 		if (pe->flags & PNV_IODA_PE_VF)
901 			parent = pe->parent_dev;
902 		else
903 #endif /* CONFIG_PCI_IOV */
904 			parent = pe->pdev->bus->self;
905 		bcomp = OpalPciBusAll;
906 		dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
907 		fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
908 		rid_end = pe->rid + 1;
909 	}
910 
911 	/*
912 	 * Associate PE in PELT. We need add the PE into the
913 	 * corresponding PELT-V as well. Otherwise, the error
914 	 * originated from the PE might contribute to other
915 	 * PEs.
916 	 */
917 	rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
918 			     bcomp, dcomp, fcomp, OPAL_MAP_PE);
919 	if (rc) {
920 		pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
921 		return -ENXIO;
922 	}
923 
924 	/*
925 	 * Configure PELTV. NPUs don't have a PELTV table so skip
926 	 * configuration on them.
927 	 */
928 	if (phb->type != PNV_PHB_NPU)
929 		pnv_ioda_set_peltv(phb, pe, true);
930 
931 	/* Setup reverse map */
932 	for (rid = pe->rid; rid < rid_end; rid++)
933 		phb->ioda.pe_rmap[rid] = pe->pe_number;
934 
935 	/* Setup one MVTs on IODA1 */
936 	if (phb->type != PNV_PHB_IODA1) {
937 		pe->mve_number = 0;
938 		goto out;
939 	}
940 
941 	pe->mve_number = pe->pe_number;
942 	rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
943 	if (rc != OPAL_SUCCESS) {
944 		pe_err(pe, "OPAL error %ld setting up MVE %x\n",
945 		       rc, pe->mve_number);
946 		pe->mve_number = -1;
947 	} else {
948 		rc = opal_pci_set_mve_enable(phb->opal_id,
949 					     pe->mve_number, OPAL_ENABLE_MVE);
950 		if (rc) {
951 			pe_err(pe, "OPAL error %ld enabling MVE %x\n",
952 			       rc, pe->mve_number);
953 			pe->mve_number = -1;
954 		}
955 	}
956 
957 out:
958 	return 0;
959 }
960 
961 #ifdef CONFIG_PCI_IOV
962 static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
963 {
964 	struct pci_dn *pdn = pci_get_pdn(dev);
965 	int i;
966 	struct resource *res, res2;
967 	resource_size_t size;
968 	u16 num_vfs;
969 
970 	if (!dev->is_physfn)
971 		return -EINVAL;
972 
973 	/*
974 	 * "offset" is in VFs.  The M64 windows are sized so that when they
975 	 * are segmented, each segment is the same size as the IOV BAR.
976 	 * Each segment is in a separate PE, and the high order bits of the
977 	 * address are the PE number.  Therefore, each VF's BAR is in a
978 	 * separate PE, and changing the IOV BAR start address changes the
979 	 * range of PEs the VFs are in.
980 	 */
981 	num_vfs = pdn->num_vfs;
982 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
983 		res = &dev->resource[i + PCI_IOV_RESOURCES];
984 		if (!res->flags || !res->parent)
985 			continue;
986 
987 		/*
988 		 * The actual IOV BAR range is determined by the start address
989 		 * and the actual size for num_vfs VFs BAR.  This check is to
990 		 * make sure that after shifting, the range will not overlap
991 		 * with another device.
992 		 */
993 		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
994 		res2.flags = res->flags;
995 		res2.start = res->start + (size * offset);
996 		res2.end = res2.start + (size * num_vfs) - 1;
997 
998 		if (res2.end > res->end) {
999 			dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
1000 				i, &res2, res, num_vfs, offset);
1001 			return -EBUSY;
1002 		}
1003 	}
1004 
1005 	/*
1006 	 * After doing so, there would be a "hole" in the /proc/iomem when
1007 	 * offset is a positive value. It looks like the device return some
1008 	 * mmio back to the system, which actually no one could use it.
1009 	 */
1010 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1011 		res = &dev->resource[i + PCI_IOV_RESOURCES];
1012 		if (!res->flags || !res->parent)
1013 			continue;
1014 
1015 		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
1016 		res2 = *res;
1017 		res->start += size * offset;
1018 
1019 		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
1020 			 i, &res2, res, (offset > 0) ? "En" : "Dis",
1021 			 num_vfs, offset);
1022 		pci_update_resource(dev, i + PCI_IOV_RESOURCES);
1023 	}
1024 	return 0;
1025 }
1026 #endif /* CONFIG_PCI_IOV */
1027 
1028 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
1029 {
1030 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
1031 	struct pnv_phb *phb = hose->private_data;
1032 	struct pci_dn *pdn = pci_get_pdn(dev);
1033 	struct pnv_ioda_pe *pe;
1034 
1035 	if (!pdn) {
1036 		pr_err("%s: Device tree node not associated properly\n",
1037 			   pci_name(dev));
1038 		return NULL;
1039 	}
1040 	if (pdn->pe_number != IODA_INVALID_PE)
1041 		return NULL;
1042 
1043 	pe = pnv_ioda_alloc_pe(phb);
1044 	if (!pe) {
1045 		pr_warning("%s: Not enough PE# available, disabling device\n",
1046 			   pci_name(dev));
1047 		return NULL;
1048 	}
1049 
1050 	/* NOTE: We get only one ref to the pci_dev for the pdn, not for the
1051 	 * pointer in the PE data structure, both should be destroyed at the
1052 	 * same time. However, this needs to be looked at more closely again
1053 	 * once we actually start removing things (Hotplug, SR-IOV, ...)
1054 	 *
1055 	 * At some point we want to remove the PDN completely anyways
1056 	 */
1057 	pci_dev_get(dev);
1058 	pdn->pcidev = dev;
1059 	pdn->pe_number = pe->pe_number;
1060 	pe->flags = PNV_IODA_PE_DEV;
1061 	pe->pdev = dev;
1062 	pe->pbus = NULL;
1063 	pe->mve_number = -1;
1064 	pe->rid = dev->bus->number << 8 | pdn->devfn;
1065 
1066 	pe_info(pe, "Associated device to PE\n");
1067 
1068 	if (pnv_ioda_configure_pe(phb, pe)) {
1069 		/* XXX What do we do here ? */
1070 		pnv_ioda_free_pe(pe);
1071 		pdn->pe_number = IODA_INVALID_PE;
1072 		pe->pdev = NULL;
1073 		pci_dev_put(dev);
1074 		return NULL;
1075 	}
1076 
1077 	/* Put PE to the list */
1078 	list_add_tail(&pe->list, &phb->ioda.pe_list);
1079 
1080 	return pe;
1081 }
1082 
1083 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1084 {
1085 	struct pci_dev *dev;
1086 
1087 	list_for_each_entry(dev, &bus->devices, bus_list) {
1088 		struct pci_dn *pdn = pci_get_pdn(dev);
1089 
1090 		if (pdn == NULL) {
1091 			pr_warn("%s: No device node associated with device !\n",
1092 				pci_name(dev));
1093 			continue;
1094 		}
1095 
1096 		/*
1097 		 * In partial hotplug case, the PCI device might be still
1098 		 * associated with the PE and needn't attach it to the PE
1099 		 * again.
1100 		 */
1101 		if (pdn->pe_number != IODA_INVALID_PE)
1102 			continue;
1103 
1104 		pe->device_count++;
1105 		pdn->pcidev = dev;
1106 		pdn->pe_number = pe->pe_number;
1107 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1108 			pnv_ioda_setup_same_PE(dev->subordinate, pe);
1109 	}
1110 }
1111 
1112 /*
1113  * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1114  * single PCI bus. Another one that contains the primary PCI bus and its
1115  * subordinate PCI devices and buses. The second type of PE is normally
1116  * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1117  */
1118 static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
1119 {
1120 	struct pci_controller *hose = pci_bus_to_host(bus);
1121 	struct pnv_phb *phb = hose->private_data;
1122 	struct pnv_ioda_pe *pe = NULL;
1123 	unsigned int pe_num;
1124 
1125 	/*
1126 	 * In partial hotplug case, the PE instance might be still alive.
1127 	 * We should reuse it instead of allocating a new one.
1128 	 */
1129 	pe_num = phb->ioda.pe_rmap[bus->number << 8];
1130 	if (pe_num != IODA_INVALID_PE) {
1131 		pe = &phb->ioda.pe_array[pe_num];
1132 		pnv_ioda_setup_same_PE(bus, pe);
1133 		return NULL;
1134 	}
1135 
1136 	/* PE number for root bus should have been reserved */
1137 	if (pci_is_root_bus(bus) &&
1138 	    phb->ioda.root_pe_idx != IODA_INVALID_PE)
1139 		pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
1140 
1141 	/* Check if PE is determined by M64 */
1142 	if (!pe && phb->pick_m64_pe)
1143 		pe = phb->pick_m64_pe(bus, all);
1144 
1145 	/* The PE number isn't pinned by M64 */
1146 	if (!pe)
1147 		pe = pnv_ioda_alloc_pe(phb);
1148 
1149 	if (!pe) {
1150 		pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1151 			__func__, pci_domain_nr(bus), bus->number);
1152 		return NULL;
1153 	}
1154 
1155 	pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1156 	pe->pbus = bus;
1157 	pe->pdev = NULL;
1158 	pe->mve_number = -1;
1159 	pe->rid = bus->busn_res.start << 8;
1160 
1161 	if (all)
1162 		pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
1163 			bus->busn_res.start, bus->busn_res.end, pe->pe_number);
1164 	else
1165 		pe_info(pe, "Secondary bus %d associated with PE#%x\n",
1166 			bus->busn_res.start, pe->pe_number);
1167 
1168 	if (pnv_ioda_configure_pe(phb, pe)) {
1169 		/* XXX What do we do here ? */
1170 		pnv_ioda_free_pe(pe);
1171 		pe->pbus = NULL;
1172 		return NULL;
1173 	}
1174 
1175 	/* Associate it with all child devices */
1176 	pnv_ioda_setup_same_PE(bus, pe);
1177 
1178 	/* Put PE to the list */
1179 	list_add_tail(&pe->list, &phb->ioda.pe_list);
1180 
1181 	return pe;
1182 }
1183 
1184 static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
1185 {
1186 	int pe_num, found_pe = false, rc;
1187 	long rid;
1188 	struct pnv_ioda_pe *pe;
1189 	struct pci_dev *gpu_pdev;
1190 	struct pci_dn *npu_pdn;
1191 	struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
1192 	struct pnv_phb *phb = hose->private_data;
1193 
1194 	/*
1195 	 * Due to a hardware errata PE#0 on the NPU is reserved for
1196 	 * error handling. This means we only have three PEs remaining
1197 	 * which need to be assigned to four links, implying some
1198 	 * links must share PEs.
1199 	 *
1200 	 * To achieve this we assign PEs such that NPUs linking the
1201 	 * same GPU get assigned the same PE.
1202 	 */
1203 	gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
1204 	for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
1205 		pe = &phb->ioda.pe_array[pe_num];
1206 		if (!pe->pdev)
1207 			continue;
1208 
1209 		if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
1210 			/*
1211 			 * This device has the same peer GPU so should
1212 			 * be assigned the same PE as the existing
1213 			 * peer NPU.
1214 			 */
1215 			dev_info(&npu_pdev->dev,
1216 				"Associating to existing PE %x\n", pe_num);
1217 			pci_dev_get(npu_pdev);
1218 			npu_pdn = pci_get_pdn(npu_pdev);
1219 			rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
1220 			npu_pdn->pcidev = npu_pdev;
1221 			npu_pdn->pe_number = pe_num;
1222 			phb->ioda.pe_rmap[rid] = pe->pe_number;
1223 
1224 			/* Map the PE to this link */
1225 			rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
1226 					OpalPciBusAll,
1227 					OPAL_COMPARE_RID_DEVICE_NUMBER,
1228 					OPAL_COMPARE_RID_FUNCTION_NUMBER,
1229 					OPAL_MAP_PE);
1230 			WARN_ON(rc != OPAL_SUCCESS);
1231 			found_pe = true;
1232 			break;
1233 		}
1234 	}
1235 
1236 	if (!found_pe)
1237 		/*
1238 		 * Could not find an existing PE so allocate a new
1239 		 * one.
1240 		 */
1241 		return pnv_ioda_setup_dev_PE(npu_pdev);
1242 	else
1243 		return pe;
1244 }
1245 
1246 static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
1247 {
1248 	struct pci_dev *pdev;
1249 
1250 	list_for_each_entry(pdev, &bus->devices, bus_list)
1251 		pnv_ioda_setup_npu_PE(pdev);
1252 }
1253 
1254 static void pnv_pci_ioda_setup_PEs(void)
1255 {
1256 	struct pci_controller *hose, *tmp;
1257 	struct pnv_phb *phb;
1258 
1259 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1260 		phb = hose->private_data;
1261 		if (phb->type == PNV_PHB_NPU) {
1262 			/* PE#0 is needed for error reporting */
1263 			pnv_ioda_reserve_pe(phb, 0);
1264 			pnv_ioda_setup_npu_PEs(hose->bus);
1265 		}
1266 	}
1267 }
1268 
1269 #ifdef CONFIG_PCI_IOV
1270 static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
1271 {
1272 	struct pci_bus        *bus;
1273 	struct pci_controller *hose;
1274 	struct pnv_phb        *phb;
1275 	struct pci_dn         *pdn;
1276 	int                    i, j;
1277 	int                    m64_bars;
1278 
1279 	bus = pdev->bus;
1280 	hose = pci_bus_to_host(bus);
1281 	phb = hose->private_data;
1282 	pdn = pci_get_pdn(pdev);
1283 
1284 	if (pdn->m64_single_mode)
1285 		m64_bars = num_vfs;
1286 	else
1287 		m64_bars = 1;
1288 
1289 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1290 		for (j = 0; j < m64_bars; j++) {
1291 			if (pdn->m64_map[j][i] == IODA_INVALID_M64)
1292 				continue;
1293 			opal_pci_phb_mmio_enable(phb->opal_id,
1294 				OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
1295 			clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
1296 			pdn->m64_map[j][i] = IODA_INVALID_M64;
1297 		}
1298 
1299 	kfree(pdn->m64_map);
1300 	return 0;
1301 }
1302 
1303 static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
1304 {
1305 	struct pci_bus        *bus;
1306 	struct pci_controller *hose;
1307 	struct pnv_phb        *phb;
1308 	struct pci_dn         *pdn;
1309 	unsigned int           win;
1310 	struct resource       *res;
1311 	int                    i, j;
1312 	int64_t                rc;
1313 	int                    total_vfs;
1314 	resource_size_t        size, start;
1315 	int                    pe_num;
1316 	int                    m64_bars;
1317 
1318 	bus = pdev->bus;
1319 	hose = pci_bus_to_host(bus);
1320 	phb = hose->private_data;
1321 	pdn = pci_get_pdn(pdev);
1322 	total_vfs = pci_sriov_get_totalvfs(pdev);
1323 
1324 	if (pdn->m64_single_mode)
1325 		m64_bars = num_vfs;
1326 	else
1327 		m64_bars = 1;
1328 
1329 	pdn->m64_map = kmalloc_array(m64_bars,
1330 				     sizeof(*pdn->m64_map),
1331 				     GFP_KERNEL);
1332 	if (!pdn->m64_map)
1333 		return -ENOMEM;
1334 	/* Initialize the m64_map to IODA_INVALID_M64 */
1335 	for (i = 0; i < m64_bars ; i++)
1336 		for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
1337 			pdn->m64_map[i][j] = IODA_INVALID_M64;
1338 
1339 
1340 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1341 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
1342 		if (!res->flags || !res->parent)
1343 			continue;
1344 
1345 		for (j = 0; j < m64_bars; j++) {
1346 			do {
1347 				win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1348 						phb->ioda.m64_bar_idx + 1, 0);
1349 
1350 				if (win >= phb->ioda.m64_bar_idx + 1)
1351 					goto m64_failed;
1352 			} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1353 
1354 			pdn->m64_map[j][i] = win;
1355 
1356 			if (pdn->m64_single_mode) {
1357 				size = pci_iov_resource_size(pdev,
1358 							PCI_IOV_RESOURCES + i);
1359 				start = res->start + size * j;
1360 			} else {
1361 				size = resource_size(res);
1362 				start = res->start;
1363 			}
1364 
1365 			/* Map the M64 here */
1366 			if (pdn->m64_single_mode) {
1367 				pe_num = pdn->pe_num_map[j];
1368 				rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1369 						pe_num, OPAL_M64_WINDOW_TYPE,
1370 						pdn->m64_map[j][i], 0);
1371 			}
1372 
1373 			rc = opal_pci_set_phb_mem_window(phb->opal_id,
1374 						 OPAL_M64_WINDOW_TYPE,
1375 						 pdn->m64_map[j][i],
1376 						 start,
1377 						 0, /* unused */
1378 						 size);
1379 
1380 
1381 			if (rc != OPAL_SUCCESS) {
1382 				dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1383 					win, rc);
1384 				goto m64_failed;
1385 			}
1386 
1387 			if (pdn->m64_single_mode)
1388 				rc = opal_pci_phb_mmio_enable(phb->opal_id,
1389 				     OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
1390 			else
1391 				rc = opal_pci_phb_mmio_enable(phb->opal_id,
1392 				     OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
1393 
1394 			if (rc != OPAL_SUCCESS) {
1395 				dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1396 					win, rc);
1397 				goto m64_failed;
1398 			}
1399 		}
1400 	}
1401 	return 0;
1402 
1403 m64_failed:
1404 	pnv_pci_vf_release_m64(pdev, num_vfs);
1405 	return -EBUSY;
1406 }
1407 
1408 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
1409 		int num);
1410 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
1411 
1412 static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1413 {
1414 	struct iommu_table    *tbl;
1415 	int64_t               rc;
1416 
1417 	tbl = pe->table_group.tables[0];
1418 	rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
1419 	if (rc)
1420 		pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1421 
1422 	pnv_pci_ioda2_set_bypass(pe, false);
1423 	if (pe->table_group.group) {
1424 		iommu_group_put(pe->table_group.group);
1425 		BUG_ON(pe->table_group.group);
1426 	}
1427 	pnv_pci_ioda2_table_free_pages(tbl);
1428 	iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
1429 }
1430 
1431 static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
1432 {
1433 	struct pci_bus        *bus;
1434 	struct pci_controller *hose;
1435 	struct pnv_phb        *phb;
1436 	struct pnv_ioda_pe    *pe, *pe_n;
1437 	struct pci_dn         *pdn;
1438 
1439 	bus = pdev->bus;
1440 	hose = pci_bus_to_host(bus);
1441 	phb = hose->private_data;
1442 	pdn = pci_get_pdn(pdev);
1443 
1444 	if (!pdev->is_physfn)
1445 		return;
1446 
1447 	list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1448 		if (pe->parent_dev != pdev)
1449 			continue;
1450 
1451 		pnv_pci_ioda2_release_dma_pe(pdev, pe);
1452 
1453 		/* Remove from list */
1454 		mutex_lock(&phb->ioda.pe_list_mutex);
1455 		list_del(&pe->list);
1456 		mutex_unlock(&phb->ioda.pe_list_mutex);
1457 
1458 		pnv_ioda_deconfigure_pe(phb, pe);
1459 
1460 		pnv_ioda_free_pe(pe);
1461 	}
1462 }
1463 
1464 void pnv_pci_sriov_disable(struct pci_dev *pdev)
1465 {
1466 	struct pci_bus        *bus;
1467 	struct pci_controller *hose;
1468 	struct pnv_phb        *phb;
1469 	struct pnv_ioda_pe    *pe;
1470 	struct pci_dn         *pdn;
1471 	struct pci_sriov      *iov;
1472 	u16                    num_vfs, i;
1473 
1474 	bus = pdev->bus;
1475 	hose = pci_bus_to_host(bus);
1476 	phb = hose->private_data;
1477 	pdn = pci_get_pdn(pdev);
1478 	iov = pdev->sriov;
1479 	num_vfs = pdn->num_vfs;
1480 
1481 	/* Release VF PEs */
1482 	pnv_ioda_release_vf_PE(pdev);
1483 
1484 	if (phb->type == PNV_PHB_IODA2) {
1485 		if (!pdn->m64_single_mode)
1486 			pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
1487 
1488 		/* Release M64 windows */
1489 		pnv_pci_vf_release_m64(pdev, num_vfs);
1490 
1491 		/* Release PE numbers */
1492 		if (pdn->m64_single_mode) {
1493 			for (i = 0; i < num_vfs; i++) {
1494 				if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1495 					continue;
1496 
1497 				pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1498 				pnv_ioda_free_pe(pe);
1499 			}
1500 		} else
1501 			bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1502 		/* Releasing pe_num_map */
1503 		kfree(pdn->pe_num_map);
1504 	}
1505 }
1506 
1507 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1508 				       struct pnv_ioda_pe *pe);
1509 static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1510 {
1511 	struct pci_bus        *bus;
1512 	struct pci_controller *hose;
1513 	struct pnv_phb        *phb;
1514 	struct pnv_ioda_pe    *pe;
1515 	int                    pe_num;
1516 	u16                    vf_index;
1517 	struct pci_dn         *pdn;
1518 
1519 	bus = pdev->bus;
1520 	hose = pci_bus_to_host(bus);
1521 	phb = hose->private_data;
1522 	pdn = pci_get_pdn(pdev);
1523 
1524 	if (!pdev->is_physfn)
1525 		return;
1526 
1527 	/* Reserve PE for each VF */
1528 	for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1529 		if (pdn->m64_single_mode)
1530 			pe_num = pdn->pe_num_map[vf_index];
1531 		else
1532 			pe_num = *pdn->pe_num_map + vf_index;
1533 
1534 		pe = &phb->ioda.pe_array[pe_num];
1535 		pe->pe_number = pe_num;
1536 		pe->phb = phb;
1537 		pe->flags = PNV_IODA_PE_VF;
1538 		pe->pbus = NULL;
1539 		pe->parent_dev = pdev;
1540 		pe->mve_number = -1;
1541 		pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1542 			   pci_iov_virtfn_devfn(pdev, vf_index);
1543 
1544 		pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
1545 			hose->global_number, pdev->bus->number,
1546 			PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1547 			PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1548 
1549 		if (pnv_ioda_configure_pe(phb, pe)) {
1550 			/* XXX What do we do here ? */
1551 			pnv_ioda_free_pe(pe);
1552 			pe->pdev = NULL;
1553 			continue;
1554 		}
1555 
1556 		/* Put PE to the list */
1557 		mutex_lock(&phb->ioda.pe_list_mutex);
1558 		list_add_tail(&pe->list, &phb->ioda.pe_list);
1559 		mutex_unlock(&phb->ioda.pe_list_mutex);
1560 
1561 		pnv_pci_ioda2_setup_dma_pe(phb, pe);
1562 	}
1563 }
1564 
1565 int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1566 {
1567 	struct pci_bus        *bus;
1568 	struct pci_controller *hose;
1569 	struct pnv_phb        *phb;
1570 	struct pnv_ioda_pe    *pe;
1571 	struct pci_dn         *pdn;
1572 	int                    ret;
1573 	u16                    i;
1574 
1575 	bus = pdev->bus;
1576 	hose = pci_bus_to_host(bus);
1577 	phb = hose->private_data;
1578 	pdn = pci_get_pdn(pdev);
1579 
1580 	if (phb->type == PNV_PHB_IODA2) {
1581 		if (!pdn->vfs_expanded) {
1582 			dev_info(&pdev->dev, "don't support this SRIOV device"
1583 				" with non 64bit-prefetchable IOV BAR\n");
1584 			return -ENOSPC;
1585 		}
1586 
1587 		/*
1588 		 * When M64 BARs functions in Single PE mode, the number of VFs
1589 		 * could be enabled must be less than the number of M64 BARs.
1590 		 */
1591 		if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
1592 			dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
1593 			return -EBUSY;
1594 		}
1595 
1596 		/* Allocating pe_num_map */
1597 		if (pdn->m64_single_mode)
1598 			pdn->pe_num_map = kmalloc_array(num_vfs,
1599 							sizeof(*pdn->pe_num_map),
1600 							GFP_KERNEL);
1601 		else
1602 			pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
1603 
1604 		if (!pdn->pe_num_map)
1605 			return -ENOMEM;
1606 
1607 		if (pdn->m64_single_mode)
1608 			for (i = 0; i < num_vfs; i++)
1609 				pdn->pe_num_map[i] = IODA_INVALID_PE;
1610 
1611 		/* Calculate available PE for required VFs */
1612 		if (pdn->m64_single_mode) {
1613 			for (i = 0; i < num_vfs; i++) {
1614 				pe = pnv_ioda_alloc_pe(phb);
1615 				if (!pe) {
1616 					ret = -EBUSY;
1617 					goto m64_failed;
1618 				}
1619 
1620 				pdn->pe_num_map[i] = pe->pe_number;
1621 			}
1622 		} else {
1623 			mutex_lock(&phb->ioda.pe_alloc_mutex);
1624 			*pdn->pe_num_map = bitmap_find_next_zero_area(
1625 				phb->ioda.pe_alloc, phb->ioda.total_pe_num,
1626 				0, num_vfs, 0);
1627 			if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
1628 				mutex_unlock(&phb->ioda.pe_alloc_mutex);
1629 				dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1630 				kfree(pdn->pe_num_map);
1631 				return -EBUSY;
1632 			}
1633 			bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1634 			mutex_unlock(&phb->ioda.pe_alloc_mutex);
1635 		}
1636 		pdn->num_vfs = num_vfs;
1637 
1638 		/* Assign M64 window accordingly */
1639 		ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
1640 		if (ret) {
1641 			dev_info(&pdev->dev, "Not enough M64 window resources\n");
1642 			goto m64_failed;
1643 		}
1644 
1645 		/*
1646 		 * When using one M64 BAR to map one IOV BAR, we need to shift
1647 		 * the IOV BAR according to the PE# allocated to the VFs.
1648 		 * Otherwise, the PE# for the VF will conflict with others.
1649 		 */
1650 		if (!pdn->m64_single_mode) {
1651 			ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
1652 			if (ret)
1653 				goto m64_failed;
1654 		}
1655 	}
1656 
1657 	/* Setup VF PEs */
1658 	pnv_ioda_setup_vf_PE(pdev, num_vfs);
1659 
1660 	return 0;
1661 
1662 m64_failed:
1663 	if (pdn->m64_single_mode) {
1664 		for (i = 0; i < num_vfs; i++) {
1665 			if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1666 				continue;
1667 
1668 			pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1669 			pnv_ioda_free_pe(pe);
1670 		}
1671 	} else
1672 		bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1673 
1674 	/* Releasing pe_num_map */
1675 	kfree(pdn->pe_num_map);
1676 
1677 	return ret;
1678 }
1679 
1680 int pcibios_sriov_disable(struct pci_dev *pdev)
1681 {
1682 	pnv_pci_sriov_disable(pdev);
1683 
1684 	/* Release PCI data */
1685 	remove_dev_pci_data(pdev);
1686 	return 0;
1687 }
1688 
1689 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1690 {
1691 	/* Allocate PCI data */
1692 	add_dev_pci_data(pdev);
1693 
1694 	return pnv_pci_sriov_enable(pdev, num_vfs);
1695 }
1696 #endif /* CONFIG_PCI_IOV */
1697 
1698 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
1699 {
1700 	struct pci_dn *pdn = pci_get_pdn(pdev);
1701 	struct pnv_ioda_pe *pe;
1702 
1703 	/*
1704 	 * The function can be called while the PE#
1705 	 * hasn't been assigned. Do nothing for the
1706 	 * case.
1707 	 */
1708 	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1709 		return;
1710 
1711 	pe = &phb->ioda.pe_array[pdn->pe_number];
1712 	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
1713 	set_dma_offset(&pdev->dev, pe->tce_bypass_base);
1714 	set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1715 	/*
1716 	 * Note: iommu_add_device() will fail here as
1717 	 * for physical PE: the device is already added by now;
1718 	 * for virtual PE: sysfs entries are not ready yet and
1719 	 * tce_iommu_bus_notifier will add the device to a group later.
1720 	 */
1721 }
1722 
1723 static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
1724 {
1725 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1726 	struct pnv_phb *phb = hose->private_data;
1727 	struct pci_dn *pdn = pci_get_pdn(pdev);
1728 	struct pnv_ioda_pe *pe;
1729 	uint64_t top;
1730 	bool bypass = false;
1731 
1732 	if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1733 		return -ENODEV;;
1734 
1735 	pe = &phb->ioda.pe_array[pdn->pe_number];
1736 	if (pe->tce_bypass_enabled) {
1737 		top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1738 		bypass = (dma_mask >= top);
1739 	}
1740 
1741 	if (bypass) {
1742 		dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1743 		set_dma_ops(&pdev->dev, &dma_direct_ops);
1744 	} else {
1745 		dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1746 		set_dma_ops(&pdev->dev, &dma_iommu_ops);
1747 	}
1748 	*pdev->dev.dma_mask = dma_mask;
1749 
1750 	/* Update peer npu devices */
1751 	pnv_npu_try_dma_set_bypass(pdev, bypass);
1752 
1753 	return 0;
1754 }
1755 
1756 static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
1757 {
1758 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1759 	struct pnv_phb *phb = hose->private_data;
1760 	struct pci_dn *pdn = pci_get_pdn(pdev);
1761 	struct pnv_ioda_pe *pe;
1762 	u64 end, mask;
1763 
1764 	if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1765 		return 0;
1766 
1767 	pe = &phb->ioda.pe_array[pdn->pe_number];
1768 	if (!pe->tce_bypass_enabled)
1769 		return __dma_get_required_mask(&pdev->dev);
1770 
1771 
1772 	end = pe->tce_bypass_base + memblock_end_of_DRAM();
1773 	mask = 1ULL << (fls64(end) - 1);
1774 	mask += mask - 1;
1775 
1776 	return mask;
1777 }
1778 
1779 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
1780 				   struct pci_bus *bus)
1781 {
1782 	struct pci_dev *dev;
1783 
1784 	list_for_each_entry(dev, &bus->devices, bus_list) {
1785 		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1786 		set_dma_offset(&dev->dev, pe->tce_bypass_base);
1787 		iommu_add_device(&dev->dev);
1788 
1789 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1790 			pnv_ioda_setup_bus_dma(pe, dev->subordinate);
1791 	}
1792 }
1793 
1794 static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
1795 						     bool real_mode)
1796 {
1797 	return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
1798 		(phb->regs + 0x210);
1799 }
1800 
1801 static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
1802 		unsigned long index, unsigned long npages, bool rm)
1803 {
1804 	struct iommu_table_group_link *tgl = list_first_entry_or_null(
1805 			&tbl->it_group_list, struct iommu_table_group_link,
1806 			next);
1807 	struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1808 			struct pnv_ioda_pe, table_group);
1809 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1810 	unsigned long start, end, inc;
1811 
1812 	start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1813 	end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1814 			npages - 1);
1815 
1816 	/* p7ioc-style invalidation, 2 TCEs per write */
1817 	start |= (1ull << 63);
1818 	end |= (1ull << 63);
1819 	inc = 16;
1820         end |= inc - 1;	/* round up end to be different than start */
1821 
1822         mb(); /* Ensure above stores are visible */
1823         while (start <= end) {
1824 		if (rm)
1825 			__raw_rm_writeq(cpu_to_be64(start), invalidate);
1826 		else
1827 			__raw_writeq(cpu_to_be64(start), invalidate);
1828                 start += inc;
1829         }
1830 
1831 	/*
1832 	 * The iommu layer will do another mb() for us on build()
1833 	 * and we don't care on free()
1834 	 */
1835 }
1836 
1837 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1838 		long npages, unsigned long uaddr,
1839 		enum dma_data_direction direction,
1840 		unsigned long attrs)
1841 {
1842 	int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1843 			attrs);
1844 
1845 	if (!ret)
1846 		pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1847 
1848 	return ret;
1849 }
1850 
1851 #ifdef CONFIG_IOMMU_API
1852 static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
1853 		unsigned long *hpa, enum dma_data_direction *direction)
1854 {
1855 	long ret = pnv_tce_xchg(tbl, index, hpa, direction);
1856 
1857 	if (!ret)
1858 		pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
1859 
1860 	return ret;
1861 }
1862 #endif
1863 
1864 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1865 		long npages)
1866 {
1867 	pnv_tce_free(tbl, index, npages);
1868 
1869 	pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1870 }
1871 
1872 static struct iommu_table_ops pnv_ioda1_iommu_ops = {
1873 	.set = pnv_ioda1_tce_build,
1874 #ifdef CONFIG_IOMMU_API
1875 	.exchange = pnv_ioda1_tce_xchg,
1876 #endif
1877 	.clear = pnv_ioda1_tce_free,
1878 	.get = pnv_tce_get,
1879 };
1880 
1881 #define PHB3_TCE_KILL_INVAL_ALL		PPC_BIT(0)
1882 #define PHB3_TCE_KILL_INVAL_PE		PPC_BIT(1)
1883 #define PHB3_TCE_KILL_INVAL_ONE		PPC_BIT(2)
1884 
1885 void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
1886 {
1887 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
1888 	const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
1889 
1890 	mb(); /* Ensure previous TCE table stores are visible */
1891 	if (rm)
1892 		__raw_rm_writeq(cpu_to_be64(val), invalidate);
1893 	else
1894 		__raw_writeq(cpu_to_be64(val), invalidate);
1895 }
1896 
1897 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1898 {
1899 	/* 01xb - invalidate TCEs that match the specified PE# */
1900 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
1901 	unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
1902 
1903 	mb(); /* Ensure above stores are visible */
1904 	__raw_writeq(cpu_to_be64(val), invalidate);
1905 }
1906 
1907 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
1908 					unsigned shift, unsigned long index,
1909 					unsigned long npages)
1910 {
1911 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1912 	unsigned long start, end, inc;
1913 
1914 	/* We'll invalidate DMA address in PE scope */
1915 	start = PHB3_TCE_KILL_INVAL_ONE;
1916 	start |= (pe->pe_number & 0xFF);
1917 	end = start;
1918 
1919 	/* Figure out the start, end and step */
1920 	start |= (index << shift);
1921 	end |= ((index + npages - 1) << shift);
1922 	inc = (0x1ull << shift);
1923 	mb();
1924 
1925 	while (start <= end) {
1926 		if (rm)
1927 			__raw_rm_writeq(cpu_to_be64(start), invalidate);
1928 		else
1929 			__raw_writeq(cpu_to_be64(start), invalidate);
1930 		start += inc;
1931 	}
1932 }
1933 
1934 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1935 {
1936 	struct pnv_phb *phb = pe->phb;
1937 
1938 	if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1939 		pnv_pci_phb3_tce_invalidate_pe(pe);
1940 	else
1941 		opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
1942 				  pe->pe_number, 0, 0, 0);
1943 }
1944 
1945 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1946 		unsigned long index, unsigned long npages, bool rm)
1947 {
1948 	struct iommu_table_group_link *tgl;
1949 
1950 	list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
1951 		struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1952 				struct pnv_ioda_pe, table_group);
1953 		struct pnv_phb *phb = pe->phb;
1954 		unsigned int shift = tbl->it_page_shift;
1955 
1956 		/*
1957 		 * NVLink1 can use the TCE kill register directly as
1958 		 * it's the same as PHB3. NVLink2 is different and
1959 		 * should go via the OPAL call.
1960 		 */
1961 		if (phb->model == PNV_PHB_MODEL_NPU) {
1962 			/*
1963 			 * The NVLink hardware does not support TCE kill
1964 			 * per TCE entry so we have to invalidate
1965 			 * the entire cache for it.
1966 			 */
1967 			pnv_pci_phb3_tce_invalidate_entire(phb, rm);
1968 			continue;
1969 		}
1970 		if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1971 			pnv_pci_phb3_tce_invalidate(pe, rm, shift,
1972 						    index, npages);
1973 		else
1974 			opal_pci_tce_kill(phb->opal_id,
1975 					  OPAL_PCI_TCE_KILL_PAGES,
1976 					  pe->pe_number, 1u << shift,
1977 					  index << shift, npages);
1978 	}
1979 }
1980 
1981 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1982 		long npages, unsigned long uaddr,
1983 		enum dma_data_direction direction,
1984 		unsigned long attrs)
1985 {
1986 	int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1987 			attrs);
1988 
1989 	if (!ret)
1990 		pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1991 
1992 	return ret;
1993 }
1994 
1995 #ifdef CONFIG_IOMMU_API
1996 static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
1997 		unsigned long *hpa, enum dma_data_direction *direction)
1998 {
1999 	long ret = pnv_tce_xchg(tbl, index, hpa, direction);
2000 
2001 	if (!ret)
2002 		pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
2003 
2004 	return ret;
2005 }
2006 #endif
2007 
2008 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
2009 		long npages)
2010 {
2011 	pnv_tce_free(tbl, index, npages);
2012 
2013 	pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
2014 }
2015 
2016 static void pnv_ioda2_table_free(struct iommu_table *tbl)
2017 {
2018 	pnv_pci_ioda2_table_free_pages(tbl);
2019 	iommu_free_table(tbl, "pnv");
2020 }
2021 
2022 static struct iommu_table_ops pnv_ioda2_iommu_ops = {
2023 	.set = pnv_ioda2_tce_build,
2024 #ifdef CONFIG_IOMMU_API
2025 	.exchange = pnv_ioda2_tce_xchg,
2026 #endif
2027 	.clear = pnv_ioda2_tce_free,
2028 	.get = pnv_tce_get,
2029 	.free = pnv_ioda2_table_free,
2030 };
2031 
2032 static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
2033 {
2034 	unsigned int *weight = (unsigned int *)data;
2035 
2036 	/* This is quite simplistic. The "base" weight of a device
2037 	 * is 10. 0 means no DMA is to be accounted for it.
2038 	 */
2039 	if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
2040 		return 0;
2041 
2042 	if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
2043 	    dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
2044 	    dev->class == PCI_CLASS_SERIAL_USB_EHCI)
2045 		*weight += 3;
2046 	else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
2047 		*weight += 15;
2048 	else
2049 		*weight += 10;
2050 
2051 	return 0;
2052 }
2053 
2054 static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
2055 {
2056 	unsigned int weight = 0;
2057 
2058 	/* SRIOV VF has same DMA32 weight as its PF */
2059 #ifdef CONFIG_PCI_IOV
2060 	if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
2061 		pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
2062 		return weight;
2063 	}
2064 #endif
2065 
2066 	if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
2067 		pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
2068 	} else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
2069 		struct pci_dev *pdev;
2070 
2071 		list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
2072 			pnv_pci_ioda_dev_dma_weight(pdev, &weight);
2073 	} else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
2074 		pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
2075 	}
2076 
2077 	return weight;
2078 }
2079 
2080 static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
2081 				       struct pnv_ioda_pe *pe)
2082 {
2083 
2084 	struct page *tce_mem = NULL;
2085 	struct iommu_table *tbl;
2086 	unsigned int weight, total_weight = 0;
2087 	unsigned int tce32_segsz, base, segs, avail, i;
2088 	int64_t rc;
2089 	void *addr;
2090 
2091 	/* XXX FIXME: Handle 64-bit only DMA devices */
2092 	/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
2093 	/* XXX FIXME: Allocate multi-level tables on PHB3 */
2094 	weight = pnv_pci_ioda_pe_dma_weight(pe);
2095 	if (!weight)
2096 		return;
2097 
2098 	pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
2099 		     &total_weight);
2100 	segs = (weight * phb->ioda.dma32_count) / total_weight;
2101 	if (!segs)
2102 		segs = 1;
2103 
2104 	/*
2105 	 * Allocate contiguous DMA32 segments. We begin with the expected
2106 	 * number of segments. With one more attempt, the number of DMA32
2107 	 * segments to be allocated is decreased by one until one segment
2108 	 * is allocated successfully.
2109 	 */
2110 	do {
2111 		for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
2112 			for (avail = 0, i = base; i < base + segs; i++) {
2113 				if (phb->ioda.dma32_segmap[i] ==
2114 				    IODA_INVALID_PE)
2115 					avail++;
2116 			}
2117 
2118 			if (avail == segs)
2119 				goto found;
2120 		}
2121 	} while (--segs);
2122 
2123 	if (!segs) {
2124 		pe_warn(pe, "No available DMA32 segments\n");
2125 		return;
2126 	}
2127 
2128 found:
2129 	tbl = pnv_pci_table_alloc(phb->hose->node);
2130 	iommu_register_group(&pe->table_group, phb->hose->global_number,
2131 			pe->pe_number);
2132 	pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
2133 
2134 	/* Grab a 32-bit TCE table */
2135 	pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
2136 		weight, total_weight, base, segs);
2137 	pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
2138 		base * PNV_IODA1_DMA32_SEGSIZE,
2139 		(base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
2140 
2141 	/* XXX Currently, we allocate one big contiguous table for the
2142 	 * TCEs. We only really need one chunk per 256M of TCE space
2143 	 * (ie per segment) but that's an optimization for later, it
2144 	 * requires some added smarts with our get/put_tce implementation
2145 	 *
2146 	 * Each TCE page is 4KB in size and each TCE entry occupies 8
2147 	 * bytes
2148 	 */
2149 	tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
2150 	tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
2151 				   get_order(tce32_segsz * segs));
2152 	if (!tce_mem) {
2153 		pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
2154 		goto fail;
2155 	}
2156 	addr = page_address(tce_mem);
2157 	memset(addr, 0, tce32_segsz * segs);
2158 
2159 	/* Configure HW */
2160 	for (i = 0; i < segs; i++) {
2161 		rc = opal_pci_map_pe_dma_window(phb->opal_id,
2162 					      pe->pe_number,
2163 					      base + i, 1,
2164 					      __pa(addr) + tce32_segsz * i,
2165 					      tce32_segsz, IOMMU_PAGE_SIZE_4K);
2166 		if (rc) {
2167 			pe_err(pe, " Failed to configure 32-bit TCE table,"
2168 			       " err %ld\n", rc);
2169 			goto fail;
2170 		}
2171 	}
2172 
2173 	/* Setup DMA32 segment mapping */
2174 	for (i = base; i < base + segs; i++)
2175 		phb->ioda.dma32_segmap[i] = pe->pe_number;
2176 
2177 	/* Setup linux iommu table */
2178 	pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
2179 				  base * PNV_IODA1_DMA32_SEGSIZE,
2180 				  IOMMU_PAGE_SHIFT_4K);
2181 
2182 	tbl->it_ops = &pnv_ioda1_iommu_ops;
2183 	pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
2184 	pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
2185 	iommu_init_table(tbl, phb->hose->node);
2186 
2187 	if (pe->flags & PNV_IODA_PE_DEV) {
2188 		/*
2189 		 * Setting table base here only for carrying iommu_group
2190 		 * further down to let iommu_add_device() do the job.
2191 		 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2192 		 */
2193 		set_iommu_table_base(&pe->pdev->dev, tbl);
2194 		iommu_add_device(&pe->pdev->dev);
2195 	} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2196 		pnv_ioda_setup_bus_dma(pe, pe->pbus);
2197 
2198 	return;
2199  fail:
2200 	/* XXX Failure: Try to fallback to 64-bit only ? */
2201 	if (tce_mem)
2202 		__free_pages(tce_mem, get_order(tce32_segsz * segs));
2203 	if (tbl) {
2204 		pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
2205 		iommu_free_table(tbl, "pnv");
2206 	}
2207 }
2208 
2209 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
2210 		int num, struct iommu_table *tbl)
2211 {
2212 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2213 			table_group);
2214 	struct pnv_phb *phb = pe->phb;
2215 	int64_t rc;
2216 	const unsigned long size = tbl->it_indirect_levels ?
2217 			tbl->it_level_size : tbl->it_size;
2218 	const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
2219 	const __u64 win_size = tbl->it_size << tbl->it_page_shift;
2220 
2221 	pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num,
2222 			start_addr, start_addr + win_size - 1,
2223 			IOMMU_PAGE_SIZE(tbl));
2224 
2225 	/*
2226 	 * Map TCE table through TVT. The TVE index is the PE number
2227 	 * shifted by 1 bit for 32-bits DMA space.
2228 	 */
2229 	rc = opal_pci_map_pe_dma_window(phb->opal_id,
2230 			pe->pe_number,
2231 			(pe->pe_number << 1) + num,
2232 			tbl->it_indirect_levels + 1,
2233 			__pa(tbl->it_base),
2234 			size << 3,
2235 			IOMMU_PAGE_SIZE(tbl));
2236 	if (rc) {
2237 		pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
2238 		return rc;
2239 	}
2240 
2241 	pnv_pci_link_table_and_group(phb->hose->node, num,
2242 			tbl, &pe->table_group);
2243 	pnv_pci_ioda2_tce_invalidate_pe(pe);
2244 
2245 	return 0;
2246 }
2247 
2248 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
2249 {
2250 	uint16_t window_id = (pe->pe_number << 1 ) + 1;
2251 	int64_t rc;
2252 
2253 	pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
2254 	if (enable) {
2255 		phys_addr_t top = memblock_end_of_DRAM();
2256 
2257 		top = roundup_pow_of_two(top);
2258 		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2259 						     pe->pe_number,
2260 						     window_id,
2261 						     pe->tce_bypass_base,
2262 						     top);
2263 	} else {
2264 		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2265 						     pe->pe_number,
2266 						     window_id,
2267 						     pe->tce_bypass_base,
2268 						     0);
2269 	}
2270 	if (rc)
2271 		pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
2272 	else
2273 		pe->tce_bypass_enabled = enable;
2274 }
2275 
2276 static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2277 		__u32 page_shift, __u64 window_size, __u32 levels,
2278 		struct iommu_table *tbl);
2279 
2280 static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
2281 		int num, __u32 page_shift, __u64 window_size, __u32 levels,
2282 		struct iommu_table **ptbl)
2283 {
2284 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2285 			table_group);
2286 	int nid = pe->phb->hose->node;
2287 	__u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
2288 	long ret;
2289 	struct iommu_table *tbl;
2290 
2291 	tbl = pnv_pci_table_alloc(nid);
2292 	if (!tbl)
2293 		return -ENOMEM;
2294 
2295 	ret = pnv_pci_ioda2_table_alloc_pages(nid,
2296 			bus_offset, page_shift, window_size,
2297 			levels, tbl);
2298 	if (ret) {
2299 		iommu_free_table(tbl, "pnv");
2300 		return ret;
2301 	}
2302 
2303 	tbl->it_ops = &pnv_ioda2_iommu_ops;
2304 
2305 	*ptbl = tbl;
2306 
2307 	return 0;
2308 }
2309 
2310 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
2311 {
2312 	struct iommu_table *tbl = NULL;
2313 	long rc;
2314 
2315 	/*
2316 	 * crashkernel= specifies the kdump kernel's maximum memory at
2317 	 * some offset and there is no guaranteed the result is a power
2318 	 * of 2, which will cause errors later.
2319 	 */
2320 	const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
2321 
2322 	/*
2323 	 * In memory constrained environments, e.g. kdump kernel, the
2324 	 * DMA window can be larger than available memory, which will
2325 	 * cause errors later.
2326 	 */
2327 	const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
2328 
2329 	rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
2330 			IOMMU_PAGE_SHIFT_4K,
2331 			window_size,
2332 			POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
2333 	if (rc) {
2334 		pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
2335 				rc);
2336 		return rc;
2337 	}
2338 
2339 	iommu_init_table(tbl, pe->phb->hose->node);
2340 
2341 	rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
2342 	if (rc) {
2343 		pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
2344 				rc);
2345 		pnv_ioda2_table_free(tbl);
2346 		return rc;
2347 	}
2348 
2349 	if (!pnv_iommu_bypass_disabled)
2350 		pnv_pci_ioda2_set_bypass(pe, true);
2351 
2352 	/*
2353 	 * Setting table base here only for carrying iommu_group
2354 	 * further down to let iommu_add_device() do the job.
2355 	 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2356 	 */
2357 	if (pe->flags & PNV_IODA_PE_DEV)
2358 		set_iommu_table_base(&pe->pdev->dev, tbl);
2359 
2360 	return 0;
2361 }
2362 
2363 #if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
2364 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
2365 		int num)
2366 {
2367 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2368 			table_group);
2369 	struct pnv_phb *phb = pe->phb;
2370 	long ret;
2371 
2372 	pe_info(pe, "Removing DMA window #%d\n", num);
2373 
2374 	ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2375 			(pe->pe_number << 1) + num,
2376 			0/* levels */, 0/* table address */,
2377 			0/* table size */, 0/* page size */);
2378 	if (ret)
2379 		pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
2380 	else
2381 		pnv_pci_ioda2_tce_invalidate_pe(pe);
2382 
2383 	pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2384 
2385 	return ret;
2386 }
2387 #endif
2388 
2389 #ifdef CONFIG_IOMMU_API
2390 static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
2391 		__u64 window_size, __u32 levels)
2392 {
2393 	unsigned long bytes = 0;
2394 	const unsigned window_shift = ilog2(window_size);
2395 	unsigned entries_shift = window_shift - page_shift;
2396 	unsigned table_shift = entries_shift + 3;
2397 	unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
2398 	unsigned long direct_table_size;
2399 
2400 	if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
2401 			(window_size > memory_hotplug_max()) ||
2402 			!is_power_of_2(window_size))
2403 		return 0;
2404 
2405 	/* Calculate a direct table size from window_size and levels */
2406 	entries_shift = (entries_shift + levels - 1) / levels;
2407 	table_shift = entries_shift + 3;
2408 	table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
2409 	direct_table_size =  1UL << table_shift;
2410 
2411 	for ( ; levels; --levels) {
2412 		bytes += _ALIGN_UP(tce_table_size, direct_table_size);
2413 
2414 		tce_table_size /= direct_table_size;
2415 		tce_table_size <<= 3;
2416 		tce_table_size = _ALIGN_UP(tce_table_size, direct_table_size);
2417 	}
2418 
2419 	return bytes;
2420 }
2421 
2422 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
2423 {
2424 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2425 						table_group);
2426 	/* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
2427 	struct iommu_table *tbl = pe->table_group.tables[0];
2428 
2429 	pnv_pci_ioda2_set_bypass(pe, false);
2430 	pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2431 	pnv_ioda2_table_free(tbl);
2432 }
2433 
2434 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2435 {
2436 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2437 						table_group);
2438 
2439 	pnv_pci_ioda2_setup_default_config(pe);
2440 }
2441 
2442 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
2443 	.get_table_size = pnv_pci_ioda2_get_table_size,
2444 	.create_table = pnv_pci_ioda2_create_table,
2445 	.set_window = pnv_pci_ioda2_set_window,
2446 	.unset_window = pnv_pci_ioda2_unset_window,
2447 	.take_ownership = pnv_ioda2_take_ownership,
2448 	.release_ownership = pnv_ioda2_release_ownership,
2449 };
2450 
2451 static int gpe_table_group_to_npe_cb(struct device *dev, void *opaque)
2452 {
2453 	struct pci_controller *hose;
2454 	struct pnv_phb *phb;
2455 	struct pnv_ioda_pe **ptmppe = opaque;
2456 	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
2457 	struct pci_dn *pdn = pci_get_pdn(pdev);
2458 
2459 	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
2460 		return 0;
2461 
2462 	hose = pci_bus_to_host(pdev->bus);
2463 	phb = hose->private_data;
2464 	if (phb->type != PNV_PHB_NPU)
2465 		return 0;
2466 
2467 	*ptmppe = &phb->ioda.pe_array[pdn->pe_number];
2468 
2469 	return 1;
2470 }
2471 
2472 /*
2473  * This returns PE of associated NPU.
2474  * This assumes that NPU is in the same IOMMU group with GPU and there is
2475  * no other PEs.
2476  */
2477 static struct pnv_ioda_pe *gpe_table_group_to_npe(
2478 		struct iommu_table_group *table_group)
2479 {
2480 	struct pnv_ioda_pe *npe = NULL;
2481 	int ret = iommu_group_for_each_dev(table_group->group, &npe,
2482 			gpe_table_group_to_npe_cb);
2483 
2484 	BUG_ON(!ret || !npe);
2485 
2486 	return npe;
2487 }
2488 
2489 static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group,
2490 		int num, struct iommu_table *tbl)
2491 {
2492 	long ret = pnv_pci_ioda2_set_window(table_group, num, tbl);
2493 
2494 	if (ret)
2495 		return ret;
2496 
2497 	ret = pnv_npu_set_window(gpe_table_group_to_npe(table_group), num, tbl);
2498 	if (ret)
2499 		pnv_pci_ioda2_unset_window(table_group, num);
2500 
2501 	return ret;
2502 }
2503 
2504 static long pnv_pci_ioda2_npu_unset_window(
2505 		struct iommu_table_group *table_group,
2506 		int num)
2507 {
2508 	long ret = pnv_pci_ioda2_unset_window(table_group, num);
2509 
2510 	if (ret)
2511 		return ret;
2512 
2513 	return pnv_npu_unset_window(gpe_table_group_to_npe(table_group), num);
2514 }
2515 
2516 static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group)
2517 {
2518 	/*
2519 	 * Detach NPU first as pnv_ioda2_take_ownership() will destroy
2520 	 * the iommu_table if 32bit DMA is enabled.
2521 	 */
2522 	pnv_npu_take_ownership(gpe_table_group_to_npe(table_group));
2523 	pnv_ioda2_take_ownership(table_group);
2524 }
2525 
2526 static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = {
2527 	.get_table_size = pnv_pci_ioda2_get_table_size,
2528 	.create_table = pnv_pci_ioda2_create_table,
2529 	.set_window = pnv_pci_ioda2_npu_set_window,
2530 	.unset_window = pnv_pci_ioda2_npu_unset_window,
2531 	.take_ownership = pnv_ioda2_npu_take_ownership,
2532 	.release_ownership = pnv_ioda2_release_ownership,
2533 };
2534 
2535 static void pnv_pci_ioda_setup_iommu_api(void)
2536 {
2537 	struct pci_controller *hose, *tmp;
2538 	struct pnv_phb *phb;
2539 	struct pnv_ioda_pe *pe, *gpe;
2540 
2541 	/*
2542 	 * Now we have all PHBs discovered, time to add NPU devices to
2543 	 * the corresponding IOMMU groups.
2544 	 */
2545 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2546 		phb = hose->private_data;
2547 
2548 		if (phb->type != PNV_PHB_NPU)
2549 			continue;
2550 
2551 		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2552 			gpe = pnv_pci_npu_setup_iommu(pe);
2553 			if (gpe)
2554 				gpe->table_group.ops = &pnv_pci_ioda2_npu_ops;
2555 		}
2556 	}
2557 }
2558 #else /* !CONFIG_IOMMU_API */
2559 static void pnv_pci_ioda_setup_iommu_api(void) { };
2560 #endif
2561 
2562 static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2563 		unsigned levels, unsigned long limit,
2564 		unsigned long *current_offset, unsigned long *total_allocated)
2565 {
2566 	struct page *tce_mem = NULL;
2567 	__be64 *addr, *tmp;
2568 	unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
2569 	unsigned long allocated = 1UL << (order + PAGE_SHIFT);
2570 	unsigned entries = 1UL << (shift - 3);
2571 	long i;
2572 
2573 	tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
2574 	if (!tce_mem) {
2575 		pr_err("Failed to allocate a TCE memory, order=%d\n", order);
2576 		return NULL;
2577 	}
2578 	addr = page_address(tce_mem);
2579 	memset(addr, 0, allocated);
2580 	*total_allocated += allocated;
2581 
2582 	--levels;
2583 	if (!levels) {
2584 		*current_offset += allocated;
2585 		return addr;
2586 	}
2587 
2588 	for (i = 0; i < entries; ++i) {
2589 		tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2590 				levels, limit, current_offset, total_allocated);
2591 		if (!tmp)
2592 			break;
2593 
2594 		addr[i] = cpu_to_be64(__pa(tmp) |
2595 				TCE_PCI_READ | TCE_PCI_WRITE);
2596 
2597 		if (*current_offset >= limit)
2598 			break;
2599 	}
2600 
2601 	return addr;
2602 }
2603 
2604 static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2605 		unsigned long size, unsigned level);
2606 
2607 static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2608 		__u32 page_shift, __u64 window_size, __u32 levels,
2609 		struct iommu_table *tbl)
2610 {
2611 	void *addr;
2612 	unsigned long offset = 0, level_shift, total_allocated = 0;
2613 	const unsigned window_shift = ilog2(window_size);
2614 	unsigned entries_shift = window_shift - page_shift;
2615 	unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
2616 	const unsigned long tce_table_size = 1UL << table_shift;
2617 
2618 	if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
2619 		return -EINVAL;
2620 
2621 	if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
2622 		return -EINVAL;
2623 
2624 	/* Adjust direct table size from window_size and levels */
2625 	entries_shift = (entries_shift + levels - 1) / levels;
2626 	level_shift = entries_shift + 3;
2627 	level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
2628 
2629 	/* Allocate TCE table */
2630 	addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2631 			levels, tce_table_size, &offset, &total_allocated);
2632 
2633 	/* addr==NULL means that the first level allocation failed */
2634 	if (!addr)
2635 		return -ENOMEM;
2636 
2637 	/*
2638 	 * First level was allocated but some lower level failed as
2639 	 * we did not allocate as much as we wanted,
2640 	 * release partially allocated table.
2641 	 */
2642 	if (offset < tce_table_size) {
2643 		pnv_pci_ioda2_table_do_free_pages(addr,
2644 				1ULL << (level_shift - 3), levels - 1);
2645 		return -ENOMEM;
2646 	}
2647 
2648 	/* Setup linux iommu table */
2649 	pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
2650 			page_shift);
2651 	tbl->it_level_size = 1ULL << (level_shift - 3);
2652 	tbl->it_indirect_levels = levels - 1;
2653 	tbl->it_allocated_size = total_allocated;
2654 
2655 	pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2656 			window_size, tce_table_size, bus_offset);
2657 
2658 	return 0;
2659 }
2660 
2661 static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2662 		unsigned long size, unsigned level)
2663 {
2664 	const unsigned long addr_ul = (unsigned long) addr &
2665 			~(TCE_PCI_READ | TCE_PCI_WRITE);
2666 
2667 	if (level) {
2668 		long i;
2669 		u64 *tmp = (u64 *) addr_ul;
2670 
2671 		for (i = 0; i < size; ++i) {
2672 			unsigned long hpa = be64_to_cpu(tmp[i]);
2673 
2674 			if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
2675 				continue;
2676 
2677 			pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
2678 					level - 1);
2679 		}
2680 	}
2681 
2682 	free_pages(addr_ul, get_order(size << 3));
2683 }
2684 
2685 static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
2686 {
2687 	const unsigned long size = tbl->it_indirect_levels ?
2688 			tbl->it_level_size : tbl->it_size;
2689 
2690 	if (!tbl->it_size)
2691 		return;
2692 
2693 	pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
2694 			tbl->it_indirect_levels);
2695 }
2696 
2697 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2698 				       struct pnv_ioda_pe *pe)
2699 {
2700 	int64_t rc;
2701 
2702 	if (!pnv_pci_ioda_pe_dma_weight(pe))
2703 		return;
2704 
2705 	/* TVE #1 is selected by PCI address bit 59 */
2706 	pe->tce_bypass_base = 1ull << 59;
2707 
2708 	iommu_register_group(&pe->table_group, phb->hose->global_number,
2709 			pe->pe_number);
2710 
2711 	/* The PE will reserve all possible 32-bits space */
2712 	pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
2713 		phb->ioda.m32_pci_base);
2714 
2715 	/* Setup linux iommu table */
2716 	pe->table_group.tce32_start = 0;
2717 	pe->table_group.tce32_size = phb->ioda.m32_pci_base;
2718 	pe->table_group.max_dynamic_windows_supported =
2719 			IOMMU_TABLE_GROUP_MAX_TABLES;
2720 	pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
2721 	pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M;
2722 #ifdef CONFIG_IOMMU_API
2723 	pe->table_group.ops = &pnv_pci_ioda2_ops;
2724 #endif
2725 
2726 	rc = pnv_pci_ioda2_setup_default_config(pe);
2727 	if (rc)
2728 		return;
2729 
2730 	if (pe->flags & PNV_IODA_PE_DEV)
2731 		iommu_add_device(&pe->pdev->dev);
2732 	else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2733 		pnv_ioda_setup_bus_dma(pe, pe->pbus);
2734 }
2735 
2736 #ifdef CONFIG_PCI_MSI
2737 int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
2738 {
2739 	struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2740 					   ioda.irq_chip);
2741 
2742 	return opal_pci_msi_eoi(phb->opal_id, hw_irq);
2743 }
2744 
2745 static void pnv_ioda2_msi_eoi(struct irq_data *d)
2746 {
2747 	int64_t rc;
2748 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2749 	struct irq_chip *chip = irq_data_get_irq_chip(d);
2750 
2751 	rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
2752 	WARN_ON_ONCE(rc);
2753 
2754 	icp_native_eoi(d);
2755 }
2756 
2757 
2758 void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2759 {
2760 	struct irq_data *idata;
2761 	struct irq_chip *ichip;
2762 
2763 	/* The MSI EOI OPAL call is only needed on PHB3 */
2764 	if (phb->model != PNV_PHB_MODEL_PHB3)
2765 		return;
2766 
2767 	if (!phb->ioda.irq_chip_init) {
2768 		/*
2769 		 * First time we setup an MSI IRQ, we need to setup the
2770 		 * corresponding IRQ chip to route correctly.
2771 		 */
2772 		idata = irq_get_irq_data(virq);
2773 		ichip = irq_data_get_irq_chip(idata);
2774 		phb->ioda.irq_chip_init = 1;
2775 		phb->ioda.irq_chip = *ichip;
2776 		phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2777 	}
2778 	irq_set_chip(virq, &phb->ioda.irq_chip);
2779 }
2780 
2781 /*
2782  * Returns true iff chip is something that we could call
2783  * pnv_opal_pci_msi_eoi for.
2784  */
2785 bool is_pnv_opal_msi(struct irq_chip *chip)
2786 {
2787 	return chip->irq_eoi == pnv_ioda2_msi_eoi;
2788 }
2789 EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
2790 
2791 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
2792 				  unsigned int hwirq, unsigned int virq,
2793 				  unsigned int is_64, struct msi_msg *msg)
2794 {
2795 	struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2796 	unsigned int xive_num = hwirq - phb->msi_base;
2797 	__be32 data;
2798 	int rc;
2799 
2800 	/* No PE assigned ? bail out ... no MSI for you ! */
2801 	if (pe == NULL)
2802 		return -ENXIO;
2803 
2804 	/* Check if we have an MVE */
2805 	if (pe->mve_number < 0)
2806 		return -ENXIO;
2807 
2808 	/* Force 32-bit MSI on some broken devices */
2809 	if (dev->no_64bit_msi)
2810 		is_64 = 0;
2811 
2812 	/* Assign XIVE to PE */
2813 	rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2814 	if (rc) {
2815 		pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2816 			pci_name(dev), rc, xive_num);
2817 		return -EIO;
2818 	}
2819 
2820 	if (is_64) {
2821 		__be64 addr64;
2822 
2823 		rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2824 				     &addr64, &data);
2825 		if (rc) {
2826 			pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2827 				pci_name(dev), rc);
2828 			return -EIO;
2829 		}
2830 		msg->address_hi = be64_to_cpu(addr64) >> 32;
2831 		msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
2832 	} else {
2833 		__be32 addr32;
2834 
2835 		rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2836 				     &addr32, &data);
2837 		if (rc) {
2838 			pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2839 				pci_name(dev), rc);
2840 			return -EIO;
2841 		}
2842 		msg->address_hi = 0;
2843 		msg->address_lo = be32_to_cpu(addr32);
2844 	}
2845 	msg->data = be32_to_cpu(data);
2846 
2847 	pnv_set_msi_irq_chip(phb, virq);
2848 
2849 	pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2850 		 " address=%x_%08x data=%x PE# %x\n",
2851 		 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2852 		 msg->address_hi, msg->address_lo, data, pe->pe_number);
2853 
2854 	return 0;
2855 }
2856 
2857 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2858 {
2859 	unsigned int count;
2860 	const __be32 *prop = of_get_property(phb->hose->dn,
2861 					     "ibm,opal-msi-ranges", NULL);
2862 	if (!prop) {
2863 		/* BML Fallback */
2864 		prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2865 	}
2866 	if (!prop)
2867 		return;
2868 
2869 	phb->msi_base = be32_to_cpup(prop);
2870 	count = be32_to_cpup(prop + 1);
2871 	if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
2872 		pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2873 		       phb->hose->global_number);
2874 		return;
2875 	}
2876 
2877 	phb->msi_setup = pnv_pci_ioda_msi_setup;
2878 	phb->msi32_support = 1;
2879 	pr_info("  Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
2880 		count, phb->msi_base);
2881 }
2882 #else
2883 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2884 #endif /* CONFIG_PCI_MSI */
2885 
2886 #ifdef CONFIG_PCI_IOV
2887 static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2888 {
2889 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
2890 	struct pnv_phb *phb = hose->private_data;
2891 	const resource_size_t gate = phb->ioda.m64_segsize >> 2;
2892 	struct resource *res;
2893 	int i;
2894 	resource_size_t size, total_vf_bar_sz;
2895 	struct pci_dn *pdn;
2896 	int mul, total_vfs;
2897 
2898 	if (!pdev->is_physfn || pdev->is_added)
2899 		return;
2900 
2901 	pdn = pci_get_pdn(pdev);
2902 	pdn->vfs_expanded = 0;
2903 	pdn->m64_single_mode = false;
2904 
2905 	total_vfs = pci_sriov_get_totalvfs(pdev);
2906 	mul = phb->ioda.total_pe_num;
2907 	total_vf_bar_sz = 0;
2908 
2909 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2910 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
2911 		if (!res->flags || res->parent)
2912 			continue;
2913 		if (!pnv_pci_is_m64_flags(res->flags)) {
2914 			dev_warn(&pdev->dev, "Don't support SR-IOV with"
2915 					" non M64 VF BAR%d: %pR. \n",
2916 				 i, res);
2917 			goto truncate_iov;
2918 		}
2919 
2920 		total_vf_bar_sz += pci_iov_resource_size(pdev,
2921 				i + PCI_IOV_RESOURCES);
2922 
2923 		/*
2924 		 * If bigger than quarter of M64 segment size, just round up
2925 		 * power of two.
2926 		 *
2927 		 * Generally, one M64 BAR maps one IOV BAR. To avoid conflict
2928 		 * with other devices, IOV BAR size is expanded to be
2929 		 * (total_pe * VF_BAR_size).  When VF_BAR_size is half of M64
2930 		 * segment size , the expanded size would equal to half of the
2931 		 * whole M64 space size, which will exhaust the M64 Space and
2932 		 * limit the system flexibility.  This is a design decision to
2933 		 * set the boundary to quarter of the M64 segment size.
2934 		 */
2935 		if (total_vf_bar_sz > gate) {
2936 			mul = roundup_pow_of_two(total_vfs);
2937 			dev_info(&pdev->dev,
2938 				"VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
2939 				total_vf_bar_sz, gate, mul);
2940 			pdn->m64_single_mode = true;
2941 			break;
2942 		}
2943 	}
2944 
2945 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2946 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
2947 		if (!res->flags || res->parent)
2948 			continue;
2949 
2950 		size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2951 		/*
2952 		 * On PHB3, the minimum size alignment of M64 BAR in single
2953 		 * mode is 32MB.
2954 		 */
2955 		if (pdn->m64_single_mode && (size < SZ_32M))
2956 			goto truncate_iov;
2957 		dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2958 		res->end = res->start + size * mul - 1;
2959 		dev_dbg(&pdev->dev, "                       %pR\n", res);
2960 		dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
2961 			 i, res, mul);
2962 	}
2963 	pdn->vfs_expanded = mul;
2964 
2965 	return;
2966 
2967 truncate_iov:
2968 	/* To save MMIO space, IOV BAR is truncated. */
2969 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2970 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
2971 		res->flags = 0;
2972 		res->end = res->start - 1;
2973 	}
2974 }
2975 #endif /* CONFIG_PCI_IOV */
2976 
2977 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
2978 				  struct resource *res)
2979 {
2980 	struct pnv_phb *phb = pe->phb;
2981 	struct pci_bus_region region;
2982 	int index;
2983 	int64_t rc;
2984 
2985 	if (!res || !res->flags || res->start > res->end)
2986 		return;
2987 
2988 	if (res->flags & IORESOURCE_IO) {
2989 		region.start = res->start - phb->ioda.io_pci_base;
2990 		region.end   = res->end - phb->ioda.io_pci_base;
2991 		index = region.start / phb->ioda.io_segsize;
2992 
2993 		while (index < phb->ioda.total_pe_num &&
2994 		       region.start <= region.end) {
2995 			phb->ioda.io_segmap[index] = pe->pe_number;
2996 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2997 				pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2998 			if (rc != OPAL_SUCCESS) {
2999 				pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
3000 				       __func__, rc, index, pe->pe_number);
3001 				break;
3002 			}
3003 
3004 			region.start += phb->ioda.io_segsize;
3005 			index++;
3006 		}
3007 	} else if ((res->flags & IORESOURCE_MEM) &&
3008 		   !pnv_pci_is_m64(phb, res)) {
3009 		region.start = res->start -
3010 			       phb->hose->mem_offset[0] -
3011 			       phb->ioda.m32_pci_base;
3012 		region.end   = res->end -
3013 			       phb->hose->mem_offset[0] -
3014 			       phb->ioda.m32_pci_base;
3015 		index = region.start / phb->ioda.m32_segsize;
3016 
3017 		while (index < phb->ioda.total_pe_num &&
3018 		       region.start <= region.end) {
3019 			phb->ioda.m32_segmap[index] = pe->pe_number;
3020 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3021 				pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
3022 			if (rc != OPAL_SUCCESS) {
3023 				pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
3024 				       __func__, rc, index, pe->pe_number);
3025 				break;
3026 			}
3027 
3028 			region.start += phb->ioda.m32_segsize;
3029 			index++;
3030 		}
3031 	}
3032 }
3033 
3034 /*
3035  * This function is supposed to be called on basis of PE from top
3036  * to bottom style. So the the I/O or MMIO segment assigned to
3037  * parent PE could be overrided by its child PEs if necessary.
3038  */
3039 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
3040 {
3041 	struct pci_dev *pdev;
3042 	int i;
3043 
3044 	/*
3045 	 * NOTE: We only care PCI bus based PE for now. For PCI
3046 	 * device based PE, for example SRIOV sensitive VF should
3047 	 * be figured out later.
3048 	 */
3049 	BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
3050 
3051 	list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
3052 		for (i = 0; i <= PCI_ROM_RESOURCE; i++)
3053 			pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
3054 
3055 		/*
3056 		 * If the PE contains all subordinate PCI buses, the
3057 		 * windows of the child bridges should be mapped to
3058 		 * the PE as well.
3059 		 */
3060 		if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
3061 			continue;
3062 		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
3063 			pnv_ioda_setup_pe_res(pe,
3064 				&pdev->resource[PCI_BRIDGE_RESOURCES + i]);
3065 	}
3066 }
3067 
3068 #ifdef CONFIG_DEBUG_FS
3069 static int pnv_pci_diag_data_set(void *data, u64 val)
3070 {
3071 	struct pci_controller *hose;
3072 	struct pnv_phb *phb;
3073 	s64 ret;
3074 
3075 	if (val != 1ULL)
3076 		return -EINVAL;
3077 
3078 	hose = (struct pci_controller *)data;
3079 	if (!hose || !hose->private_data)
3080 		return -ENODEV;
3081 
3082 	phb = hose->private_data;
3083 
3084 	/* Retrieve the diag data from firmware */
3085 	ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
3086 					  PNV_PCI_DIAG_BUF_SIZE);
3087 	if (ret != OPAL_SUCCESS)
3088 		return -EIO;
3089 
3090 	/* Print the diag data to the kernel log */
3091 	pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
3092 	return 0;
3093 }
3094 
3095 DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
3096 			pnv_pci_diag_data_set, "%llu\n");
3097 
3098 #endif /* CONFIG_DEBUG_FS */
3099 
3100 static void pnv_pci_ioda_create_dbgfs(void)
3101 {
3102 #ifdef CONFIG_DEBUG_FS
3103 	struct pci_controller *hose, *tmp;
3104 	struct pnv_phb *phb;
3105 	char name[16];
3106 
3107 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3108 		phb = hose->private_data;
3109 
3110 		/* Notify initialization of PHB done */
3111 		phb->initialized = 1;
3112 
3113 		sprintf(name, "PCI%04x", hose->global_number);
3114 		phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
3115 		if (!phb->dbgfs) {
3116 			pr_warning("%s: Error on creating debugfs on PHB#%x\n",
3117 				__func__, hose->global_number);
3118 			continue;
3119 		}
3120 
3121 		debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
3122 				    &pnv_pci_diag_data_fops);
3123 	}
3124 #endif /* CONFIG_DEBUG_FS */
3125 }
3126 
3127 static void pnv_pci_ioda_fixup(void)
3128 {
3129 	pnv_pci_ioda_setup_PEs();
3130 	pnv_pci_ioda_setup_iommu_api();
3131 	pnv_pci_ioda_create_dbgfs();
3132 
3133 #ifdef CONFIG_EEH
3134 	eeh_init();
3135 	eeh_addr_cache_build();
3136 #endif
3137 }
3138 
3139 /*
3140  * Returns the alignment for I/O or memory windows for P2P
3141  * bridges. That actually depends on how PEs are segmented.
3142  * For now, we return I/O or M32 segment size for PE sensitive
3143  * P2P bridges. Otherwise, the default values (4KiB for I/O,
3144  * 1MiB for memory) will be returned.
3145  *
3146  * The current PCI bus might be put into one PE, which was
3147  * create against the parent PCI bridge. For that case, we
3148  * needn't enlarge the alignment so that we can save some
3149  * resources.
3150  */
3151 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3152 						unsigned long type)
3153 {
3154 	struct pci_dev *bridge;
3155 	struct pci_controller *hose = pci_bus_to_host(bus);
3156 	struct pnv_phb *phb = hose->private_data;
3157 	int num_pci_bridges = 0;
3158 
3159 	bridge = bus->self;
3160 	while (bridge) {
3161 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
3162 			num_pci_bridges++;
3163 			if (num_pci_bridges >= 2)
3164 				return 1;
3165 		}
3166 
3167 		bridge = bridge->bus->self;
3168 	}
3169 
3170 	/*
3171 	 * We fall back to M32 if M64 isn't supported. We enforce the M64
3172 	 * alignment for any 64-bit resource, PCIe doesn't care and
3173 	 * bridges only do 64-bit prefetchable anyway.
3174 	 */
3175 	if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
3176 		return phb->ioda.m64_segsize;
3177 	if (type & IORESOURCE_MEM)
3178 		return phb->ioda.m32_segsize;
3179 
3180 	return phb->ioda.io_segsize;
3181 }
3182 
3183 /*
3184  * We are updating root port or the upstream port of the
3185  * bridge behind the root port with PHB's windows in order
3186  * to accommodate the changes on required resources during
3187  * PCI (slot) hotplug, which is connected to either root
3188  * port or the downstream ports of PCIe switch behind the
3189  * root port.
3190  */
3191 static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
3192 					   unsigned long type)
3193 {
3194 	struct pci_controller *hose = pci_bus_to_host(bus);
3195 	struct pnv_phb *phb = hose->private_data;
3196 	struct pci_dev *bridge = bus->self;
3197 	struct resource *r, *w;
3198 	bool msi_region = false;
3199 	int i;
3200 
3201 	/* Check if we need apply fixup to the bridge's windows */
3202 	if (!pci_is_root_bus(bridge->bus) &&
3203 	    !pci_is_root_bus(bridge->bus->self->bus))
3204 		return;
3205 
3206 	/* Fixup the resources */
3207 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
3208 		r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
3209 		if (!r->flags || !r->parent)
3210 			continue;
3211 
3212 		w = NULL;
3213 		if (r->flags & type & IORESOURCE_IO)
3214 			w = &hose->io_resource;
3215 		else if (pnv_pci_is_m64(phb, r) &&
3216 			 (type & IORESOURCE_PREFETCH) &&
3217 			 phb->ioda.m64_segsize)
3218 			w = &hose->mem_resources[1];
3219 		else if (r->flags & type & IORESOURCE_MEM) {
3220 			w = &hose->mem_resources[0];
3221 			msi_region = true;
3222 		}
3223 
3224 		r->start = w->start;
3225 		r->end = w->end;
3226 
3227 		/* The 64KB 32-bits MSI region shouldn't be included in
3228 		 * the 32-bits bridge window. Otherwise, we can see strange
3229 		 * issues. One of them is EEH error observed on Garrison.
3230 		 *
3231 		 * Exclude top 1MB region which is the minimal alignment of
3232 		 * 32-bits bridge window.
3233 		 */
3234 		if (msi_region) {
3235 			r->end += 0x10000;
3236 			r->end -= 0x100000;
3237 		}
3238 	}
3239 }
3240 
3241 static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
3242 {
3243 	struct pci_controller *hose = pci_bus_to_host(bus);
3244 	struct pnv_phb *phb = hose->private_data;
3245 	struct pci_dev *bridge = bus->self;
3246 	struct pnv_ioda_pe *pe;
3247 	bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
3248 
3249 	/* Extend bridge's windows if necessary */
3250 	pnv_pci_fixup_bridge_resources(bus, type);
3251 
3252 	/* The PE for root bus should be realized before any one else */
3253 	if (!phb->ioda.root_pe_populated) {
3254 		pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
3255 		if (pe) {
3256 			phb->ioda.root_pe_idx = pe->pe_number;
3257 			phb->ioda.root_pe_populated = true;
3258 		}
3259 	}
3260 
3261 	/* Don't assign PE to PCI bus, which doesn't have subordinate devices */
3262 	if (list_empty(&bus->devices))
3263 		return;
3264 
3265 	/* Reserve PEs according to used M64 resources */
3266 	if (phb->reserve_m64_pe)
3267 		phb->reserve_m64_pe(bus, NULL, all);
3268 
3269 	/*
3270 	 * Assign PE. We might run here because of partial hotplug.
3271 	 * For the case, we just pick up the existing PE and should
3272 	 * not allocate resources again.
3273 	 */
3274 	pe = pnv_ioda_setup_bus_PE(bus, all);
3275 	if (!pe)
3276 		return;
3277 
3278 	pnv_ioda_setup_pe_seg(pe);
3279 	switch (phb->type) {
3280 	case PNV_PHB_IODA1:
3281 		pnv_pci_ioda1_setup_dma_pe(phb, pe);
3282 		break;
3283 	case PNV_PHB_IODA2:
3284 		pnv_pci_ioda2_setup_dma_pe(phb, pe);
3285 		break;
3286 	default:
3287 		pr_warn("%s: No DMA for PHB#%x (type %d)\n",
3288 			__func__, phb->hose->global_number, phb->type);
3289 	}
3290 }
3291 
3292 #ifdef CONFIG_PCI_IOV
3293 static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
3294 						      int resno)
3295 {
3296 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3297 	struct pnv_phb *phb = hose->private_data;
3298 	struct pci_dn *pdn = pci_get_pdn(pdev);
3299 	resource_size_t align;
3300 
3301 	/*
3302 	 * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
3303 	 * SR-IOV. While from hardware perspective, the range mapped by M64
3304 	 * BAR should be size aligned.
3305 	 *
3306 	 * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
3307 	 * powernv-specific hardware restriction is gone. But if just use the
3308 	 * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
3309 	 * in one segment of M64 #15, which introduces the PE conflict between
3310 	 * PF and VF. Based on this, the minimum alignment of an IOV BAR is
3311 	 * m64_segsize.
3312 	 *
3313 	 * This function returns the total IOV BAR size if M64 BAR is in
3314 	 * Shared PE mode or just VF BAR size if not.
3315 	 * If the M64 BAR is in Single PE mode, return the VF BAR size or
3316 	 * M64 segment size if IOV BAR size is less.
3317 	 */
3318 	align = pci_iov_resource_size(pdev, resno);
3319 	if (!pdn->vfs_expanded)
3320 		return align;
3321 	if (pdn->m64_single_mode)
3322 		return max(align, (resource_size_t)phb->ioda.m64_segsize);
3323 
3324 	return pdn->vfs_expanded * align;
3325 }
3326 #endif /* CONFIG_PCI_IOV */
3327 
3328 /* Prevent enabling devices for which we couldn't properly
3329  * assign a PE
3330  */
3331 bool pnv_pci_enable_device_hook(struct pci_dev *dev)
3332 {
3333 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
3334 	struct pnv_phb *phb = hose->private_data;
3335 	struct pci_dn *pdn;
3336 
3337 	/* The function is probably called while the PEs have
3338 	 * not be created yet. For example, resource reassignment
3339 	 * during PCI probe period. We just skip the check if
3340 	 * PEs isn't ready.
3341 	 */
3342 	if (!phb->initialized)
3343 		return true;
3344 
3345 	pdn = pci_get_pdn(dev);
3346 	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3347 		return false;
3348 
3349 	return true;
3350 }
3351 
3352 static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
3353 				       int num)
3354 {
3355 	struct pnv_ioda_pe *pe = container_of(table_group,
3356 					      struct pnv_ioda_pe, table_group);
3357 	struct pnv_phb *phb = pe->phb;
3358 	unsigned int idx;
3359 	long rc;
3360 
3361 	pe_info(pe, "Removing DMA window #%d\n", num);
3362 	for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
3363 		if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
3364 			continue;
3365 
3366 		rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
3367 						idx, 0, 0ul, 0ul, 0ul);
3368 		if (rc != OPAL_SUCCESS) {
3369 			pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
3370 				rc, idx);
3371 			return rc;
3372 		}
3373 
3374 		phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
3375 	}
3376 
3377 	pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
3378 	return OPAL_SUCCESS;
3379 }
3380 
3381 static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
3382 {
3383 	unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3384 	struct iommu_table *tbl = pe->table_group.tables[0];
3385 	int64_t rc;
3386 
3387 	if (!weight)
3388 		return;
3389 
3390 	rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
3391 	if (rc != OPAL_SUCCESS)
3392 		return;
3393 
3394 	pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
3395 	if (pe->table_group.group) {
3396 		iommu_group_put(pe->table_group.group);
3397 		WARN_ON(pe->table_group.group);
3398 	}
3399 
3400 	free_pages(tbl->it_base, get_order(tbl->it_size << 3));
3401 	iommu_free_table(tbl, "pnv");
3402 }
3403 
3404 static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
3405 {
3406 	struct iommu_table *tbl = pe->table_group.tables[0];
3407 	unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3408 #ifdef CONFIG_IOMMU_API
3409 	int64_t rc;
3410 #endif
3411 
3412 	if (!weight)
3413 		return;
3414 
3415 #ifdef CONFIG_IOMMU_API
3416 	rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
3417 	if (rc)
3418 		pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
3419 #endif
3420 
3421 	pnv_pci_ioda2_set_bypass(pe, false);
3422 	if (pe->table_group.group) {
3423 		iommu_group_put(pe->table_group.group);
3424 		WARN_ON(pe->table_group.group);
3425 	}
3426 
3427 	pnv_pci_ioda2_table_free_pages(tbl);
3428 	iommu_free_table(tbl, "pnv");
3429 }
3430 
3431 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
3432 				 unsigned short win,
3433 				 unsigned int *map)
3434 {
3435 	struct pnv_phb *phb = pe->phb;
3436 	int idx;
3437 	int64_t rc;
3438 
3439 	for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
3440 		if (map[idx] != pe->pe_number)
3441 			continue;
3442 
3443 		if (win == OPAL_M64_WINDOW_TYPE)
3444 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3445 					phb->ioda.reserved_pe_idx, win,
3446 					idx / PNV_IODA1_M64_SEGS,
3447 					idx % PNV_IODA1_M64_SEGS);
3448 		else
3449 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3450 					phb->ioda.reserved_pe_idx, win, 0, idx);
3451 
3452 		if (rc != OPAL_SUCCESS)
3453 			pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n",
3454 				rc, win, idx);
3455 
3456 		map[idx] = IODA_INVALID_PE;
3457 	}
3458 }
3459 
3460 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
3461 {
3462 	struct pnv_phb *phb = pe->phb;
3463 
3464 	if (phb->type == PNV_PHB_IODA1) {
3465 		pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
3466 				     phb->ioda.io_segmap);
3467 		pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3468 				     phb->ioda.m32_segmap);
3469 		pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
3470 				     phb->ioda.m64_segmap);
3471 	} else if (phb->type == PNV_PHB_IODA2) {
3472 		pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3473 				     phb->ioda.m32_segmap);
3474 	}
3475 }
3476 
3477 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
3478 {
3479 	struct pnv_phb *phb = pe->phb;
3480 	struct pnv_ioda_pe *slave, *tmp;
3481 
3482 	list_del(&pe->list);
3483 	switch (phb->type) {
3484 	case PNV_PHB_IODA1:
3485 		pnv_pci_ioda1_release_pe_dma(pe);
3486 		break;
3487 	case PNV_PHB_IODA2:
3488 		pnv_pci_ioda2_release_pe_dma(pe);
3489 		break;
3490 	default:
3491 		WARN_ON(1);
3492 	}
3493 
3494 	pnv_ioda_release_pe_seg(pe);
3495 	pnv_ioda_deconfigure_pe(pe->phb, pe);
3496 
3497 	/* Release slave PEs in the compound PE */
3498 	if (pe->flags & PNV_IODA_PE_MASTER) {
3499 		list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
3500 			list_del(&slave->list);
3501 			pnv_ioda_free_pe(slave);
3502 		}
3503 	}
3504 
3505 	/*
3506 	 * The PE for root bus can be removed because of hotplug in EEH
3507 	 * recovery for fenced PHB error. We need to mark the PE dead so
3508 	 * that it can be populated again in PCI hot add path. The PE
3509 	 * shouldn't be destroyed as it's the global reserved resource.
3510 	 */
3511 	if (phb->ioda.root_pe_populated &&
3512 	    phb->ioda.root_pe_idx == pe->pe_number)
3513 		phb->ioda.root_pe_populated = false;
3514 	else
3515 		pnv_ioda_free_pe(pe);
3516 }
3517 
3518 static void pnv_pci_release_device(struct pci_dev *pdev)
3519 {
3520 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3521 	struct pnv_phb *phb = hose->private_data;
3522 	struct pci_dn *pdn = pci_get_pdn(pdev);
3523 	struct pnv_ioda_pe *pe;
3524 
3525 	if (pdev->is_virtfn)
3526 		return;
3527 
3528 	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3529 		return;
3530 
3531 	/*
3532 	 * PCI hotplug can happen as part of EEH error recovery. The @pdn
3533 	 * isn't removed and added afterwards in this scenario. We should
3534 	 * set the PE number in @pdn to an invalid one. Otherwise, the PE's
3535 	 * device count is decreased on removing devices while failing to
3536 	 * be increased on adding devices. It leads to unbalanced PE's device
3537 	 * count and eventually make normal PCI hotplug path broken.
3538 	 */
3539 	pe = &phb->ioda.pe_array[pdn->pe_number];
3540 	pdn->pe_number = IODA_INVALID_PE;
3541 
3542 	WARN_ON(--pe->device_count < 0);
3543 	if (pe->device_count == 0)
3544 		pnv_ioda_release_pe(pe);
3545 }
3546 
3547 static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
3548 {
3549 	struct pnv_phb *phb = hose->private_data;
3550 
3551 	opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
3552 		       OPAL_ASSERT_RESET);
3553 }
3554 
3555 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
3556 	.dma_dev_setup		= pnv_pci_dma_dev_setup,
3557 	.dma_bus_setup		= pnv_pci_dma_bus_setup,
3558 #ifdef CONFIG_PCI_MSI
3559 	.setup_msi_irqs		= pnv_setup_msi_irqs,
3560 	.teardown_msi_irqs	= pnv_teardown_msi_irqs,
3561 #endif
3562 	.enable_device_hook	= pnv_pci_enable_device_hook,
3563 	.release_device		= pnv_pci_release_device,
3564 	.window_alignment	= pnv_pci_window_alignment,
3565 	.setup_bridge		= pnv_pci_setup_bridge,
3566 	.reset_secondary_bus	= pnv_pci_reset_secondary_bus,
3567 	.dma_set_mask		= pnv_pci_ioda_dma_set_mask,
3568 	.dma_get_required_mask	= pnv_pci_ioda_dma_get_required_mask,
3569 	.shutdown		= pnv_pci_ioda_shutdown,
3570 };
3571 
3572 static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
3573 {
3574 	dev_err_once(&npdev->dev,
3575 			"%s operation unsupported for NVLink devices\n",
3576 			__func__);
3577 	return -EPERM;
3578 }
3579 
3580 static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
3581 	.dma_dev_setup		= pnv_pci_dma_dev_setup,
3582 #ifdef CONFIG_PCI_MSI
3583 	.setup_msi_irqs		= pnv_setup_msi_irqs,
3584 	.teardown_msi_irqs	= pnv_teardown_msi_irqs,
3585 #endif
3586 	.enable_device_hook	= pnv_pci_enable_device_hook,
3587 	.window_alignment	= pnv_pci_window_alignment,
3588 	.reset_secondary_bus	= pnv_pci_reset_secondary_bus,
3589 	.dma_set_mask		= pnv_npu_dma_set_mask,
3590 	.shutdown		= pnv_pci_ioda_shutdown,
3591 };
3592 
3593 #ifdef CONFIG_CXL_BASE
3594 const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = {
3595 	.dma_dev_setup		= pnv_pci_dma_dev_setup,
3596 	.dma_bus_setup		= pnv_pci_dma_bus_setup,
3597 #ifdef CONFIG_PCI_MSI
3598 	.setup_msi_irqs		= pnv_cxl_cx4_setup_msi_irqs,
3599 	.teardown_msi_irqs	= pnv_cxl_cx4_teardown_msi_irqs,
3600 #endif
3601 	.enable_device_hook	= pnv_cxl_enable_device_hook,
3602 	.disable_device		= pnv_cxl_disable_device,
3603 	.release_device		= pnv_pci_release_device,
3604 	.window_alignment	= pnv_pci_window_alignment,
3605 	.setup_bridge		= pnv_pci_setup_bridge,
3606 	.reset_secondary_bus	= pnv_pci_reset_secondary_bus,
3607 	.dma_set_mask		= pnv_pci_ioda_dma_set_mask,
3608 	.dma_get_required_mask	= pnv_pci_ioda_dma_get_required_mask,
3609 	.shutdown		= pnv_pci_ioda_shutdown,
3610 };
3611 #endif
3612 
3613 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3614 					 u64 hub_id, int ioda_type)
3615 {
3616 	struct pci_controller *hose;
3617 	struct pnv_phb *phb;
3618 	unsigned long size, m64map_off, m32map_off, pemap_off;
3619 	unsigned long iomap_off = 0, dma32map_off = 0;
3620 	struct resource r;
3621 	const __be64 *prop64;
3622 	const __be32 *prop32;
3623 	int len;
3624 	unsigned int segno;
3625 	u64 phb_id;
3626 	void *aux;
3627 	long rc;
3628 
3629 	if (!of_device_is_available(np))
3630 		return;
3631 
3632 	pr_info("Initializing %s PHB (%s)\n",
3633 		pnv_phb_names[ioda_type], of_node_full_name(np));
3634 
3635 	prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
3636 	if (!prop64) {
3637 		pr_err("  Missing \"ibm,opal-phbid\" property !\n");
3638 		return;
3639 	}
3640 	phb_id = be64_to_cpup(prop64);
3641 	pr_debug("  PHB-ID  : 0x%016llx\n", phb_id);
3642 
3643 	phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
3644 
3645 	/* Allocate PCI controller */
3646 	phb->hose = hose = pcibios_alloc_controller(np);
3647 	if (!phb->hose) {
3648 		pr_err("  Can't allocate PCI controller for %s\n",
3649 		       np->full_name);
3650 		memblock_free(__pa(phb), sizeof(struct pnv_phb));
3651 		return;
3652 	}
3653 
3654 	spin_lock_init(&phb->lock);
3655 	prop32 = of_get_property(np, "bus-range", &len);
3656 	if (prop32 && len == 8) {
3657 		hose->first_busno = be32_to_cpu(prop32[0]);
3658 		hose->last_busno = be32_to_cpu(prop32[1]);
3659 	} else {
3660 		pr_warn("  Broken <bus-range> on %s\n", np->full_name);
3661 		hose->first_busno = 0;
3662 		hose->last_busno = 0xff;
3663 	}
3664 	hose->private_data = phb;
3665 	phb->hub_id = hub_id;
3666 	phb->opal_id = phb_id;
3667 	phb->type = ioda_type;
3668 	mutex_init(&phb->ioda.pe_alloc_mutex);
3669 
3670 	/* Detect specific models for error handling */
3671 	if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
3672 		phb->model = PNV_PHB_MODEL_P7IOC;
3673 	else if (of_device_is_compatible(np, "ibm,power8-pciex"))
3674 		phb->model = PNV_PHB_MODEL_PHB3;
3675 	else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
3676 		phb->model = PNV_PHB_MODEL_NPU;
3677 	else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
3678 		phb->model = PNV_PHB_MODEL_NPU2;
3679 	else
3680 		phb->model = PNV_PHB_MODEL_UNKNOWN;
3681 
3682 	/* Parse 32-bit and IO ranges (if any) */
3683 	pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
3684 
3685 	/* Get registers */
3686 	if (!of_address_to_resource(np, 0, &r)) {
3687 		phb->regs_phys = r.start;
3688 		phb->regs = ioremap(r.start, resource_size(&r));
3689 		if (phb->regs == NULL)
3690 			pr_err("  Failed to map registers !\n");
3691 	}
3692 
3693 	/* Initialize more IODA stuff */
3694 	phb->ioda.total_pe_num = 1;
3695 	prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
3696 	if (prop32)
3697 		phb->ioda.total_pe_num = be32_to_cpup(prop32);
3698 	prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
3699 	if (prop32)
3700 		phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
3701 
3702 	/* Invalidate RID to PE# mapping */
3703 	for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
3704 		phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
3705 
3706 	/* Parse 64-bit MMIO range */
3707 	pnv_ioda_parse_m64_window(phb);
3708 
3709 	phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
3710 	/* FW Has already off top 64k of M32 space (MSI space) */
3711 	phb->ioda.m32_size += 0x10000;
3712 
3713 	phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
3714 	phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
3715 	phb->ioda.io_size = hose->pci_io_size;
3716 	phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
3717 	phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
3718 
3719 	/* Calculate how many 32-bit TCE segments we have */
3720 	phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3721 				PNV_IODA1_DMA32_SEGSIZE;
3722 
3723 	/* Allocate aux data & arrays. We don't have IO ports on PHB3 */
3724 	size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
3725 			sizeof(unsigned long));
3726 	m64map_off = size;
3727 	size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
3728 	m32map_off = size;
3729 	size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
3730 	if (phb->type == PNV_PHB_IODA1) {
3731 		iomap_off = size;
3732 		size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
3733 		dma32map_off = size;
3734 		size += phb->ioda.dma32_count *
3735 			sizeof(phb->ioda.dma32_segmap[0]);
3736 	}
3737 	pemap_off = size;
3738 	size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
3739 	aux = memblock_virt_alloc(size, 0);
3740 	phb->ioda.pe_alloc = aux;
3741 	phb->ioda.m64_segmap = aux + m64map_off;
3742 	phb->ioda.m32_segmap = aux + m32map_off;
3743 	for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
3744 		phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
3745 		phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
3746 	}
3747 	if (phb->type == PNV_PHB_IODA1) {
3748 		phb->ioda.io_segmap = aux + iomap_off;
3749 		for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
3750 			phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
3751 
3752 		phb->ioda.dma32_segmap = aux + dma32map_off;
3753 		for (segno = 0; segno < phb->ioda.dma32_count; segno++)
3754 			phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
3755 	}
3756 	phb->ioda.pe_array = aux + pemap_off;
3757 
3758 	/*
3759 	 * Choose PE number for root bus, which shouldn't have
3760 	 * M64 resources consumed by its child devices. To pick
3761 	 * the PE number adjacent to the reserved one if possible.
3762 	 */
3763 	pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
3764 	if (phb->ioda.reserved_pe_idx == 0) {
3765 		phb->ioda.root_pe_idx = 1;
3766 		pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3767 	} else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
3768 		phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
3769 		pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3770 	} else {
3771 		phb->ioda.root_pe_idx = IODA_INVALID_PE;
3772 	}
3773 
3774 	INIT_LIST_HEAD(&phb->ioda.pe_list);
3775 	mutex_init(&phb->ioda.pe_list_mutex);
3776 
3777 	/* Calculate how many 32-bit TCE segments we have */
3778 	phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3779 				PNV_IODA1_DMA32_SEGSIZE;
3780 
3781 #if 0 /* We should really do that ... */
3782 	rc = opal_pci_set_phb_mem_window(opal->phb_id,
3783 					 window_type,
3784 					 window_num,
3785 					 starting_real_address,
3786 					 starting_pci_address,
3787 					 segment_size);
3788 #endif
3789 
3790 	pr_info("  %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
3791 		phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
3792 		phb->ioda.m32_size, phb->ioda.m32_segsize);
3793 	if (phb->ioda.m64_size)
3794 		pr_info("                 M64: 0x%lx [segment=0x%lx]\n",
3795 			phb->ioda.m64_size, phb->ioda.m64_segsize);
3796 	if (phb->ioda.io_size)
3797 		pr_info("                  IO: 0x%x [segment=0x%x]\n",
3798 			phb->ioda.io_size, phb->ioda.io_segsize);
3799 
3800 
3801 	phb->hose->ops = &pnv_pci_ops;
3802 	phb->get_pe_state = pnv_ioda_get_pe_state;
3803 	phb->freeze_pe = pnv_ioda_freeze_pe;
3804 	phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
3805 
3806 	/* Setup MSI support */
3807 	pnv_pci_init_ioda_msis(phb);
3808 
3809 	/*
3810 	 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
3811 	 * to let the PCI core do resource assignment. It's supposed
3812 	 * that the PCI core will do correct I/O and MMIO alignment
3813 	 * for the P2P bridge bars so that each PCI bus (excluding
3814 	 * the child P2P bridges) can form individual PE.
3815 	 */
3816 	ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
3817 
3818 	if (phb->type == PNV_PHB_NPU) {
3819 		hose->controller_ops = pnv_npu_ioda_controller_ops;
3820 	} else {
3821 		phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
3822 		hose->controller_ops = pnv_pci_ioda_controller_ops;
3823 	}
3824 
3825 #ifdef CONFIG_PCI_IOV
3826 	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
3827 	ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
3828 #endif
3829 
3830 	pci_add_flags(PCI_REASSIGN_ALL_RSRC);
3831 
3832 	/* Reset IODA tables to a clean state */
3833 	rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
3834 	if (rc)
3835 		pr_warning("  OPAL Error %ld performing IODA table reset !\n", rc);
3836 
3837 	/*
3838 	 * If we're running in kdump kernel, the previous kernel never
3839 	 * shutdown PCI devices correctly. We already got IODA table
3840 	 * cleaned out. So we have to issue PHB reset to stop all PCI
3841 	 * transactions from previous kernel.
3842 	 */
3843 	if (is_kdump_kernel()) {
3844 		pr_info("  Issue PHB reset ...\n");
3845 		pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
3846 		pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
3847 	}
3848 
3849 	/* Remove M64 resource if we can't configure it successfully */
3850 	if (!phb->init_m64 || phb->init_m64(phb))
3851 		hose->mem_resources[1].flags = 0;
3852 }
3853 
3854 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
3855 {
3856 	pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
3857 }
3858 
3859 void __init pnv_pci_init_npu_phb(struct device_node *np)
3860 {
3861 	pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU);
3862 }
3863 
3864 void __init pnv_pci_init_ioda_hub(struct device_node *np)
3865 {
3866 	struct device_node *phbn;
3867 	const __be64 *prop64;
3868 	u64 hub_id;
3869 
3870 	pr_info("Probing IODA IO-Hub %s\n", np->full_name);
3871 
3872 	prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
3873 	if (!prop64) {
3874 		pr_err(" Missing \"ibm,opal-hubid\" property !\n");
3875 		return;
3876 	}
3877 	hub_id = be64_to_cpup(prop64);
3878 	pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
3879 
3880 	/* Count child PHBs */
3881 	for_each_child_of_node(np, phbn) {
3882 		/* Look for IODA1 PHBs */
3883 		if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
3884 			pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
3885 	}
3886 }
3887