xref: /linux/arch/s390/pci/pci.c (revision a46044a92add6a400f4dada7b943b30221f7cc80)
1adbb3901SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cd248341SJan Glauber /*
3cd248341SJan Glauber  * Copyright IBM Corp. 2012
4cd248341SJan Glauber  *
5cd248341SJan Glauber  * Author(s):
6cd248341SJan Glauber  *   Jan Glauber <jang@linux.vnet.ibm.com>
7cd248341SJan Glauber  *
8cd248341SJan Glauber  * The System z PCI code is a rewrite from a prototype by
9cd248341SJan Glauber  * the following people (Kudoz!):
10bedef755SJan Glauber  *   Alexander Schmidt
11bedef755SJan Glauber  *   Christoph Raisch
12bedef755SJan Glauber  *   Hannes Hering
13bedef755SJan Glauber  *   Hoang-Nam Nguyen
14bedef755SJan Glauber  *   Jan-Bernd Themann
15bedef755SJan Glauber  *   Stefan Roscher
16bedef755SJan Glauber  *   Thomas Klein
17cd248341SJan Glauber  */
18cd248341SJan Glauber 
19896cb7e6SGerald Schaefer #define KMSG_COMPONENT "zpci"
20896cb7e6SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21cd248341SJan Glauber 
22cd248341SJan Glauber #include <linux/kernel.h>
23cd248341SJan Glauber #include <linux/slab.h>
24cd248341SJan Glauber #include <linux/err.h>
25cd248341SJan Glauber #include <linux/export.h>
26cd248341SJan Glauber #include <linux/delay.h>
27cd248341SJan Glauber #include <linux/seq_file.h>
2871ba41c9SSebastian Ott #include <linux/jump_label.h>
29cd248341SJan Glauber #include <linux/pci.h>
30794b8846SNiklas Schnelle #include <linux/printk.h>
31cd248341SJan Glauber 
329a4da8a5SJan Glauber #include <asm/isc.h>
339a4da8a5SJan Glauber #include <asm/airq.h>
34cd248341SJan Glauber #include <asm/facility.h>
35cd248341SJan Glauber #include <asm/pci_insn.h>
36a755a45dSJan Glauber #include <asm/pci_clp.h>
37828b35f6SJan Glauber #include <asm/pci_dma.h>
38cd248341SJan Glauber 
3905bc1be6SPierre Morel #include "pci_bus.h"
40abb95b75SNiklas Schnelle #include "pci_iov.h"
4105bc1be6SPierre Morel 
42cd248341SJan Glauber /* list of all detected zpci devices */
4367f43f38SSebastian Ott static LIST_HEAD(zpci_list);
4457b5918cSSebastian Ott static DEFINE_SPINLOCK(zpci_list_lock);
451f44a225SMartin Schwidefsky 
46969ae01bSNiklas Schnelle static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
47cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_domain_lock);
48cd248341SJan Glauber 
49c506fff3SSebastian Ott #define ZPCI_IOMAP_ENTRIES						\
50c9c13ba4SDenis Efremov 	min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),	\
51c506fff3SSebastian Ott 	    ZPCI_IOMAP_MAX_ENTRIES)
52c506fff3SSebastian Ott 
536cf17f9aSPierre Morel unsigned int s390_pci_no_rid;
546cf17f9aSPierre Morel 
55cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_iomap_lock);
56c506fff3SSebastian Ott static unsigned long *zpci_iomap_bitmap;
57cd248341SJan Glauber struct zpci_iomap_entry *zpci_iomap_start;
58cd248341SJan Glauber EXPORT_SYMBOL_GPL(zpci_iomap_start);
59cd248341SJan Glauber 
6071ba41c9SSebastian Ott DEFINE_STATIC_KEY_FALSE(have_mio);
6171ba41c9SSebastian Ott 
62d0b08853SJan Glauber static struct kmem_cache *zdev_fmb_cache;
63d0b08853SJan Glauber 
64cd248341SJan Glauber struct zpci_dev *get_zdev_by_fid(u32 fid)
65cd248341SJan Glauber {
66cd248341SJan Glauber 	struct zpci_dev *tmp, *zdev = NULL;
67cd248341SJan Glauber 
6857b5918cSSebastian Ott 	spin_lock(&zpci_list_lock);
69cd248341SJan Glauber 	list_for_each_entry(tmp, &zpci_list, entry) {
70cd248341SJan Glauber 		if (tmp->fid == fid) {
71cd248341SJan Glauber 			zdev = tmp;
72cd248341SJan Glauber 			break;
73cd248341SJan Glauber 		}
74cd248341SJan Glauber 	}
7557b5918cSSebastian Ott 	spin_unlock(&zpci_list_lock);
76cd248341SJan Glauber 	return zdev;
77cd248341SJan Glauber }
78cd248341SJan Glauber 
7901553d9aSSebastian Ott void zpci_remove_reserved_devices(void)
8001553d9aSSebastian Ott {
8101553d9aSSebastian Ott 	struct zpci_dev *tmp, *zdev;
8201553d9aSSebastian Ott 	enum zpci_state state;
8301553d9aSSebastian Ott 	LIST_HEAD(remove);
8401553d9aSSebastian Ott 
8501553d9aSSebastian Ott 	spin_lock(&zpci_list_lock);
8601553d9aSSebastian Ott 	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
8701553d9aSSebastian Ott 		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
8801553d9aSSebastian Ott 		    !clp_get_state(zdev->fid, &state) &&
8901553d9aSSebastian Ott 		    state == ZPCI_FN_STATE_RESERVED)
9001553d9aSSebastian Ott 			list_move_tail(&zdev->entry, &remove);
9101553d9aSSebastian Ott 	}
9201553d9aSSebastian Ott 	spin_unlock(&zpci_list_lock);
9301553d9aSSebastian Ott 
9401553d9aSSebastian Ott 	list_for_each_entry_safe(zdev, tmp, &remove, entry)
95*a46044a9SNiklas Schnelle 		zpci_device_reserved(zdev);
96cd248341SJan Glauber }
97cd248341SJan Glauber 
98cd248341SJan Glauber int pci_domain_nr(struct pci_bus *bus)
99cd248341SJan Glauber {
10005bc1be6SPierre Morel 	return ((struct zpci_bus *) bus->sysdata)->domain_nr;
101cd248341SJan Glauber }
102cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_domain_nr);
103cd248341SJan Glauber 
104cd248341SJan Glauber int pci_proc_domain(struct pci_bus *bus)
105cd248341SJan Glauber {
106cd248341SJan Glauber 	return pci_domain_nr(bus);
107cd248341SJan Glauber }
108cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_proc_domain);
109cd248341SJan Glauber 
110828b35f6SJan Glauber /* Modify PCI: Register I/O address translation parameters */
111828b35f6SJan Glauber int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
112828b35f6SJan Glauber 		       u64 base, u64 limit, u64 iota)
113828b35f6SJan Glauber {
11472570834SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
11572570834SSebastian Ott 	struct zpci_fib fib = {0};
1161f3f7681SNiklas Schnelle 	u8 cc, status;
117828b35f6SJan Glauber 
118828b35f6SJan Glauber 	WARN_ON_ONCE(iota & 0x3fff);
11972570834SSebastian Ott 	fib.pba = base;
12072570834SSebastian Ott 	fib.pal = limit;
12172570834SSebastian Ott 	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
1221f3f7681SNiklas Schnelle 	cc = zpci_mod_fc(req, &fib, &status);
1231f3f7681SNiklas Schnelle 	if (cc)
1241f3f7681SNiklas Schnelle 		zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
1251f3f7681SNiklas Schnelle 	return cc;
126828b35f6SJan Glauber }
127828b35f6SJan Glauber 
128828b35f6SJan Glauber /* Modify PCI: Unregister I/O address translation parameters */
129828b35f6SJan Glauber int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
130828b35f6SJan Glauber {
13172570834SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
13272570834SSebastian Ott 	struct zpci_fib fib = {0};
13372570834SSebastian Ott 	u8 cc, status;
134828b35f6SJan Glauber 
13572570834SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1361f3f7681SNiklas Schnelle 	if (cc)
1371f3f7681SNiklas Schnelle 		zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
1381f3f7681SNiklas Schnelle 	return cc;
139828b35f6SJan Glauber }
140828b35f6SJan Glauber 
141d0b08853SJan Glauber /* Modify PCI: Set PCI function measurement parameters */
142d0b08853SJan Glauber int zpci_fmb_enable_device(struct zpci_dev *zdev)
143d0b08853SJan Glauber {
1444e5bd780SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
1454e5bd780SSebastian Ott 	struct zpci_fib fib = {0};
1464e5bd780SSebastian Ott 	u8 cc, status;
147d0b08853SJan Glauber 
1480b7589ecSSebastian Ott 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
149d0b08853SJan Glauber 		return -EINVAL;
150d0b08853SJan Glauber 
15108b42124SWei Yongjun 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
152d0b08853SJan Glauber 	if (!zdev->fmb)
153d0b08853SJan Glauber 		return -ENOMEM;
154d0b08853SJan Glauber 	WARN_ON((u64) zdev->fmb & 0xf);
155d0b08853SJan Glauber 
1566001018aSSebastian Ott 	/* reset software counters */
1576001018aSSebastian Ott 	atomic64_set(&zdev->allocated_pages, 0);
1586001018aSSebastian Ott 	atomic64_set(&zdev->mapped_pages, 0);
1596001018aSSebastian Ott 	atomic64_set(&zdev->unmapped_pages, 0);
1606001018aSSebastian Ott 
1614e5bd780SSebastian Ott 	fib.fmb_addr = virt_to_phys(zdev->fmb);
1624e5bd780SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1634e5bd780SSebastian Ott 	if (cc) {
1644e5bd780SSebastian Ott 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
1654e5bd780SSebastian Ott 		zdev->fmb = NULL;
1664e5bd780SSebastian Ott 	}
1674e5bd780SSebastian Ott 	return cc ? -EIO : 0;
168d0b08853SJan Glauber }
169d0b08853SJan Glauber 
170d0b08853SJan Glauber /* Modify PCI: Disable PCI function measurement */
171d0b08853SJan Glauber int zpci_fmb_disable_device(struct zpci_dev *zdev)
172d0b08853SJan Glauber {
1734e5bd780SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
1744e5bd780SSebastian Ott 	struct zpci_fib fib = {0};
1754e5bd780SSebastian Ott 	u8 cc, status;
176d0b08853SJan Glauber 
177d0b08853SJan Glauber 	if (!zdev->fmb)
178d0b08853SJan Glauber 		return -EINVAL;
179d0b08853SJan Glauber 
180d0b08853SJan Glauber 	/* Function measurement is disabled if fmb address is zero */
1814e5bd780SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1824e5bd780SSebastian Ott 	if (cc == 3) /* Function already gone. */
1834e5bd780SSebastian Ott 		cc = 0;
184d0b08853SJan Glauber 
1854e5bd780SSebastian Ott 	if (!cc) {
186d0b08853SJan Glauber 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
187d0b08853SJan Glauber 		zdev->fmb = NULL;
1884e5bd780SSebastian Ott 	}
1894e5bd780SSebastian Ott 	return cc ? -EIO : 0;
190d0b08853SJan Glauber }
191d0b08853SJan Glauber 
192cd248341SJan Glauber static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
193cd248341SJan Glauber {
194cd248341SJan Glauber 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
195cd248341SJan Glauber 	u64 data;
196cd248341SJan Glauber 	int rc;
197cd248341SJan Glauber 
19881deca12SSebastian Ott 	rc = __zpci_load(&data, req, offset);
199b170bad4SSebastian Ott 	if (!rc) {
2005064cd35SSebastian Ott 		data = le64_to_cpu((__force __le64) data);
2015064cd35SSebastian Ott 		data >>= (8 - len) * 8;
202cd248341SJan Glauber 		*val = (u32) data;
203b170bad4SSebastian Ott 	} else
204cd248341SJan Glauber 		*val = 0xffffffff;
205cd248341SJan Glauber 	return rc;
206cd248341SJan Glauber }
207cd248341SJan Glauber 
208cd248341SJan Glauber static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
209cd248341SJan Glauber {
210cd248341SJan Glauber 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
211cd248341SJan Glauber 	u64 data = val;
212cd248341SJan Glauber 	int rc;
213cd248341SJan Glauber 
2145064cd35SSebastian Ott 	data <<= (8 - len) * 8;
2155064cd35SSebastian Ott 	data = (__force u64) cpu_to_le64(data);
21681deca12SSebastian Ott 	rc = __zpci_store(data, req, offset);
217cd248341SJan Glauber 	return rc;
218cd248341SJan Glauber }
219cd248341SJan Glauber 
220cd248341SJan Glauber resource_size_t pcibios_align_resource(void *data, const struct resource *res,
221cd248341SJan Glauber 				       resource_size_t size,
222cd248341SJan Glauber 				       resource_size_t align)
223cd248341SJan Glauber {
224cd248341SJan Glauber 	return 0;
225cd248341SJan Glauber }
226cd248341SJan Glauber 
22787bc359bSJan Glauber /* combine single writes by using store-block insn */
22887bc359bSJan Glauber void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
22987bc359bSJan Glauber {
23087bc359bSJan Glauber        zpci_memcpy_toio(to, from, count);
23187bc359bSJan Glauber }
23287bc359bSJan Glauber 
233b02002ccSNiklas Schnelle static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
23471ba41c9SSebastian Ott {
235a999eb96SNiklas Schnelle 	unsigned long offset, vaddr;
23671ba41c9SSebastian Ott 	struct vm_struct *area;
237a999eb96SNiklas Schnelle 	phys_addr_t last_addr;
23871ba41c9SSebastian Ott 
239a999eb96SNiklas Schnelle 	last_addr = addr + size - 1;
240a999eb96SNiklas Schnelle 	if (!size || last_addr < addr)
24171ba41c9SSebastian Ott 		return NULL;
24271ba41c9SSebastian Ott 
24371ba41c9SSebastian Ott 	if (!static_branch_unlikely(&have_mio))
244a999eb96SNiklas Schnelle 		return (void __iomem *) addr;
24571ba41c9SSebastian Ott 
246a999eb96SNiklas Schnelle 	offset = addr & ~PAGE_MASK;
247a999eb96SNiklas Schnelle 	addr &= PAGE_MASK;
24871ba41c9SSebastian Ott 	size = PAGE_ALIGN(size + offset);
24971ba41c9SSebastian Ott 	area = get_vm_area(size, VM_IOREMAP);
25071ba41c9SSebastian Ott 	if (!area)
25171ba41c9SSebastian Ott 		return NULL;
25271ba41c9SSebastian Ott 
253a999eb96SNiklas Schnelle 	vaddr = (unsigned long) area->addr;
254b02002ccSNiklas Schnelle 	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
255a999eb96SNiklas Schnelle 		free_vm_area(area);
25671ba41c9SSebastian Ott 		return NULL;
25771ba41c9SSebastian Ott 	}
25871ba41c9SSebastian Ott 	return (void __iomem *) ((unsigned long) area->addr + offset);
25971ba41c9SSebastian Ott }
260b02002ccSNiklas Schnelle 
261b02002ccSNiklas Schnelle void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
262b02002ccSNiklas Schnelle {
263b02002ccSNiklas Schnelle 	return __ioremap(addr, size, __pgprot(prot));
264b02002ccSNiklas Schnelle }
265b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_prot);
266b02002ccSNiklas Schnelle 
267b02002ccSNiklas Schnelle void __iomem *ioremap(phys_addr_t addr, size_t size)
268b02002ccSNiklas Schnelle {
269b02002ccSNiklas Schnelle 	return __ioremap(addr, size, PAGE_KERNEL);
270b02002ccSNiklas Schnelle }
27171ba41c9SSebastian Ott EXPORT_SYMBOL(ioremap);
27271ba41c9SSebastian Ott 
273b02002ccSNiklas Schnelle void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
274b02002ccSNiklas Schnelle {
275b02002ccSNiklas Schnelle 	return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
276b02002ccSNiklas Schnelle }
277b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_wc);
278b02002ccSNiklas Schnelle 
279b02002ccSNiklas Schnelle void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
280b02002ccSNiklas Schnelle {
281b02002ccSNiklas Schnelle 	return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
282b02002ccSNiklas Schnelle }
283b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_wt);
284b02002ccSNiklas Schnelle 
28571ba41c9SSebastian Ott void iounmap(volatile void __iomem *addr)
28671ba41c9SSebastian Ott {
28771ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
28871ba41c9SSebastian Ott 		vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
28971ba41c9SSebastian Ott }
29071ba41c9SSebastian Ott EXPORT_SYMBOL(iounmap);
29171ba41c9SSebastian Ott 
292cd248341SJan Glauber /* Create a virtual mapping cookie for a PCI BAR */
29371ba41c9SSebastian Ott static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
29471ba41c9SSebastian Ott 					unsigned long offset, unsigned long max)
295cd248341SJan Glauber {
296198a5278SSebastian Ott 	struct zpci_dev *zdev =	to_zpci(pdev);
297cd248341SJan Glauber 	int idx;
298cd248341SJan Glauber 
299cd248341SJan Glauber 	idx = zdev->bars[bar].map_idx;
300cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
301f5e44f82SSebastian Ott 	/* Detect overrun */
302f5e44f82SSebastian Ott 	WARN_ON(!++zpci_iomap_start[idx].count);
303cd248341SJan Glauber 	zpci_iomap_start[idx].fh = zdev->fh;
304cd248341SJan Glauber 	zpci_iomap_start[idx].bar = bar;
305cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
306cd248341SJan Glauber 
3079e00caaeSSebastian Ott 	return (void __iomem *) ZPCI_ADDR(idx) + offset;
308cd248341SJan Glauber }
30971ba41c9SSebastian Ott 
31071ba41c9SSebastian Ott static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
31171ba41c9SSebastian Ott 					 unsigned long offset,
31271ba41c9SSebastian Ott 					 unsigned long max)
31371ba41c9SSebastian Ott {
31471ba41c9SSebastian Ott 	unsigned long barsize = pci_resource_len(pdev, bar);
31571ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
31671ba41c9SSebastian Ott 	void __iomem *iova;
31771ba41c9SSebastian Ott 
31871ba41c9SSebastian Ott 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
31971ba41c9SSebastian Ott 	return iova ? iova + offset : iova;
32071ba41c9SSebastian Ott }
32171ba41c9SSebastian Ott 
32271ba41c9SSebastian Ott void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
32371ba41c9SSebastian Ott 			      unsigned long offset, unsigned long max)
32471ba41c9SSebastian Ott {
325c9c13ba4SDenis Efremov 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
32671ba41c9SSebastian Ott 		return NULL;
32771ba41c9SSebastian Ott 
32871ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
32971ba41c9SSebastian Ott 		return pci_iomap_range_mio(pdev, bar, offset, max);
33071ba41c9SSebastian Ott 	else
33171ba41c9SSebastian Ott 		return pci_iomap_range_fh(pdev, bar, offset, max);
33271ba41c9SSebastian Ott }
333d9426083SSebastian Ott EXPORT_SYMBOL(pci_iomap_range);
3348cfc99b5SMichael S. Tsirkin 
3358cfc99b5SMichael S. Tsirkin void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
3368cfc99b5SMichael S. Tsirkin {
3378cfc99b5SMichael S. Tsirkin 	return pci_iomap_range(dev, bar, 0, maxlen);
3388cfc99b5SMichael S. Tsirkin }
3398cfc99b5SMichael S. Tsirkin EXPORT_SYMBOL(pci_iomap);
340cd248341SJan Glauber 
34171ba41c9SSebastian Ott static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
34271ba41c9SSebastian Ott 					    unsigned long offset, unsigned long max)
34371ba41c9SSebastian Ott {
34471ba41c9SSebastian Ott 	unsigned long barsize = pci_resource_len(pdev, bar);
34571ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
34671ba41c9SSebastian Ott 	void __iomem *iova;
34771ba41c9SSebastian Ott 
34871ba41c9SSebastian Ott 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
34971ba41c9SSebastian Ott 	return iova ? iova + offset : iova;
35071ba41c9SSebastian Ott }
35171ba41c9SSebastian Ott 
35271ba41c9SSebastian Ott void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
35371ba41c9SSebastian Ott 				 unsigned long offset, unsigned long max)
35471ba41c9SSebastian Ott {
355c9c13ba4SDenis Efremov 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
35671ba41c9SSebastian Ott 		return NULL;
35771ba41c9SSebastian Ott 
35871ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
35971ba41c9SSebastian Ott 		return pci_iomap_wc_range_mio(pdev, bar, offset, max);
36071ba41c9SSebastian Ott 	else
36171ba41c9SSebastian Ott 		return pci_iomap_range_fh(pdev, bar, offset, max);
36271ba41c9SSebastian Ott }
36371ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc_range);
36471ba41c9SSebastian Ott 
36571ba41c9SSebastian Ott void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
36671ba41c9SSebastian Ott {
36771ba41c9SSebastian Ott 	return pci_iomap_wc_range(dev, bar, 0, maxlen);
36871ba41c9SSebastian Ott }
36971ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc);
37071ba41c9SSebastian Ott 
37171ba41c9SSebastian Ott static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
372cd248341SJan Glauber {
3739e00caaeSSebastian Ott 	unsigned int idx = ZPCI_IDX(addr);
374cd248341SJan Glauber 
375cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
3768cfc99b5SMichael S. Tsirkin 	/* Detect underrun */
377f5e44f82SSebastian Ott 	WARN_ON(!zpci_iomap_start[idx].count);
3788cfc99b5SMichael S. Tsirkin 	if (!--zpci_iomap_start[idx].count) {
379cd248341SJan Glauber 		zpci_iomap_start[idx].fh = 0;
380cd248341SJan Glauber 		zpci_iomap_start[idx].bar = 0;
3818cfc99b5SMichael S. Tsirkin 	}
382cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
383cd248341SJan Glauber }
38471ba41c9SSebastian Ott 
38571ba41c9SSebastian Ott static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
38671ba41c9SSebastian Ott {
38771ba41c9SSebastian Ott 	iounmap(addr);
38871ba41c9SSebastian Ott }
38971ba41c9SSebastian Ott 
39071ba41c9SSebastian Ott void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
39171ba41c9SSebastian Ott {
39271ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
39371ba41c9SSebastian Ott 		pci_iounmap_mio(pdev, addr);
39471ba41c9SSebastian Ott 	else
39571ba41c9SSebastian Ott 		pci_iounmap_fh(pdev, addr);
39671ba41c9SSebastian Ott }
397d9426083SSebastian Ott EXPORT_SYMBOL(pci_iounmap);
398cd248341SJan Glauber 
399cd248341SJan Glauber static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
400cd248341SJan Glauber 		    int size, u32 *val)
401cd248341SJan Glauber {
40244510d6fSPierre Morel 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
403cd248341SJan Glauber 
40444510d6fSPierre Morel 	return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
405cd248341SJan Glauber }
406cd248341SJan Glauber 
407cd248341SJan Glauber static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
408cd248341SJan Glauber 		     int size, u32 val)
409cd248341SJan Glauber {
41044510d6fSPierre Morel 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
411cd248341SJan Glauber 
41244510d6fSPierre Morel 	return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
413cd248341SJan Glauber }
414cd248341SJan Glauber 
415cd248341SJan Glauber static struct pci_ops pci_root_ops = {
416cd248341SJan Glauber 	.read = pci_read,
417cd248341SJan Glauber 	.write = pci_write,
418cd248341SJan Glauber };
419cd248341SJan Glauber 
4201803ba2dSSebastian Ott static void zpci_map_resources(struct pci_dev *pdev)
421cd248341SJan Glauber {
42271ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
423cd248341SJan Glauber 	resource_size_t len;
424cd248341SJan Glauber 	int i;
425cd248341SJan Glauber 
426c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
427cd248341SJan Glauber 		len = pci_resource_len(pdev, i);
428cd248341SJan Glauber 		if (!len)
429cd248341SJan Glauber 			continue;
43071ba41c9SSebastian Ott 
431c7ff0e91SSebastian Ott 		if (zpci_use_mio(zdev))
43271ba41c9SSebastian Ott 			pdev->resource[i].start =
433df057c91SNiklas Schnelle 				(resource_size_t __force) zdev->bars[i].mio_wt;
43471ba41c9SSebastian Ott 		else
435c7ff0e91SSebastian Ott 			pdev->resource[i].start = (resource_size_t __force)
436c7ff0e91SSebastian Ott 				pci_iomap_range_fh(pdev, i, 0, 0);
437cd248341SJan Glauber 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
438cd248341SJan Glauber 	}
439cfbb4a7aSSebastian Ott 
440abb95b75SNiklas Schnelle 	zpci_iov_map_resources(pdev);
441944239c5SSebastian Ott }
442944239c5SSebastian Ott 
4431803ba2dSSebastian Ott static void zpci_unmap_resources(struct pci_dev *pdev)
444944239c5SSebastian Ott {
445c7ff0e91SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
446944239c5SSebastian Ott 	resource_size_t len;
447944239c5SSebastian Ott 	int i;
448944239c5SSebastian Ott 
449c7ff0e91SSebastian Ott 	if (zpci_use_mio(zdev))
45071ba41c9SSebastian Ott 		return;
45171ba41c9SSebastian Ott 
452c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
453944239c5SSebastian Ott 		len = pci_resource_len(pdev, i);
454944239c5SSebastian Ott 		if (!len)
455944239c5SSebastian Ott 			continue;
456c7ff0e91SSebastian Ott 		pci_iounmap_fh(pdev, (void __iomem __force *)
4575b9f2081SMartin Schwidefsky 			       pdev->resource[i].start);
458944239c5SSebastian Ott 	}
459944239c5SSebastian Ott }
460cd248341SJan Glauber 
461cd248341SJan Glauber static int zpci_alloc_iomap(struct zpci_dev *zdev)
462cd248341SJan Glauber {
463bf19c94dSSebastian Ott 	unsigned long entry;
464cd248341SJan Glauber 
465cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
466c506fff3SSebastian Ott 	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
467c506fff3SSebastian Ott 	if (entry == ZPCI_IOMAP_ENTRIES) {
468cd248341SJan Glauber 		spin_unlock(&zpci_iomap_lock);
469cd248341SJan Glauber 		return -ENOSPC;
470cd248341SJan Glauber 	}
471c506fff3SSebastian Ott 	set_bit(entry, zpci_iomap_bitmap);
472cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
473cd248341SJan Glauber 	return entry;
474cd248341SJan Glauber }
475cd248341SJan Glauber 
476cd248341SJan Glauber static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
477cd248341SJan Glauber {
478cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
479cd248341SJan Glauber 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
480c506fff3SSebastian Ott 	clear_bit(entry, zpci_iomap_bitmap);
481cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
482cd248341SJan Glauber }
483cd248341SJan Glauber 
4847a572a3aSSebastian Ott static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
4857a572a3aSSebastian Ott 				    unsigned long size, unsigned long flags)
4867a572a3aSSebastian Ott {
4877a572a3aSSebastian Ott 	struct resource *r;
4887a572a3aSSebastian Ott 
4897a572a3aSSebastian Ott 	r = kzalloc(sizeof(*r), GFP_KERNEL);
4907a572a3aSSebastian Ott 	if (!r)
4917a572a3aSSebastian Ott 		return NULL;
4927a572a3aSSebastian Ott 
4937a572a3aSSebastian Ott 	r->start = start;
4947a572a3aSSebastian Ott 	r->end = r->start + size - 1;
4957a572a3aSSebastian Ott 	r->flags = flags;
4967a572a3aSSebastian Ott 	r->name = zdev->res_name;
4977a572a3aSSebastian Ott 
4987a572a3aSSebastian Ott 	if (request_resource(&iomem_resource, r)) {
4997a572a3aSSebastian Ott 		kfree(r);
5007a572a3aSSebastian Ott 		return NULL;
5017a572a3aSSebastian Ott 	}
5027a572a3aSSebastian Ott 	return r;
5037a572a3aSSebastian Ott }
5047a572a3aSSebastian Ott 
50505bc1be6SPierre Morel int zpci_setup_bus_resources(struct zpci_dev *zdev,
5067a572a3aSSebastian Ott 			     struct list_head *resources)
5077a572a3aSSebastian Ott {
5087a572a3aSSebastian Ott 	unsigned long addr, size, flags;
5097a572a3aSSebastian Ott 	struct resource *res;
5107a572a3aSSebastian Ott 	int i, entry;
5117a572a3aSSebastian Ott 
5127a572a3aSSebastian Ott 	snprintf(zdev->res_name, sizeof(zdev->res_name),
51305bc1be6SPierre Morel 		 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
5147a572a3aSSebastian Ott 
515c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
5167a572a3aSSebastian Ott 		if (!zdev->bars[i].size)
5177a572a3aSSebastian Ott 			continue;
5187a572a3aSSebastian Ott 		entry = zpci_alloc_iomap(zdev);
5197a572a3aSSebastian Ott 		if (entry < 0)
5207a572a3aSSebastian Ott 			return entry;
5217a572a3aSSebastian Ott 		zdev->bars[i].map_idx = entry;
5227a572a3aSSebastian Ott 
5237a572a3aSSebastian Ott 		/* only MMIO is supported */
5247a572a3aSSebastian Ott 		flags = IORESOURCE_MEM;
5257a572a3aSSebastian Ott 		if (zdev->bars[i].val & 8)
5267a572a3aSSebastian Ott 			flags |= IORESOURCE_PREFETCH;
5277a572a3aSSebastian Ott 		if (zdev->bars[i].val & 4)
5287a572a3aSSebastian Ott 			flags |= IORESOURCE_MEM_64;
5297a572a3aSSebastian Ott 
530c7ff0e91SSebastian Ott 		if (zpci_use_mio(zdev))
531df057c91SNiklas Schnelle 			addr = (unsigned long) zdev->bars[i].mio_wt;
532dcd33b23SSebastian Ott 		else
5339e00caaeSSebastian Ott 			addr = ZPCI_ADDR(entry);
5347a572a3aSSebastian Ott 		size = 1UL << zdev->bars[i].size;
5357a572a3aSSebastian Ott 
5367a572a3aSSebastian Ott 		res = __alloc_res(zdev, addr, size, flags);
5377a572a3aSSebastian Ott 		if (!res) {
5387a572a3aSSebastian Ott 			zpci_free_iomap(zdev, entry);
5397a572a3aSSebastian Ott 			return -ENOMEM;
5407a572a3aSSebastian Ott 		}
5417a572a3aSSebastian Ott 		zdev->bars[i].res = res;
5427a572a3aSSebastian Ott 		pci_add_resource(resources, res);
5437a572a3aSSebastian Ott 	}
544a50297cfSNiklas Schnelle 	zdev->has_resources = 1;
5457a572a3aSSebastian Ott 
5467a572a3aSSebastian Ott 	return 0;
5477a572a3aSSebastian Ott }
5487a572a3aSSebastian Ott 
5497a572a3aSSebastian Ott static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
5507a572a3aSSebastian Ott {
5517a572a3aSSebastian Ott 	int i;
5527a572a3aSSebastian Ott 
553c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
5542b1df724SSebastian Ott 		if (!zdev->bars[i].size || !zdev->bars[i].res)
5557a572a3aSSebastian Ott 			continue;
5567a572a3aSSebastian Ott 
5577a572a3aSSebastian Ott 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
5587a572a3aSSebastian Ott 		release_resource(zdev->bars[i].res);
5597a572a3aSSebastian Ott 		kfree(zdev->bars[i].res);
5607a572a3aSSebastian Ott 	}
561a50297cfSNiklas Schnelle 	zdev->has_resources = 0;
5627a572a3aSSebastian Ott }
5637a572a3aSSebastian Ott 
564af0a8a84SSebastian Ott int pcibios_add_device(struct pci_dev *pdev)
565af0a8a84SSebastian Ott {
5662a671f77SNiklas Schnelle 	struct zpci_dev *zdev = to_zpci(pdev);
567cb809182SSebastian Ott 	struct resource *res;
568cb809182SSebastian Ott 	int i;
569cb809182SSebastian Ott 
5702a671f77SNiklas Schnelle 	/* The pdev has a reference to the zdev via its bus */
5712a671f77SNiklas Schnelle 	zpci_zdev_get(zdev);
5727dc20ab1SSebastian Ott 	if (pdev->is_physfn)
5737dc20ab1SSebastian Ott 		pdev->no_vf_scan = 1;
5747dc20ab1SSebastian Ott 
575ef4858c6SSebastian Ott 	pdev->dev.groups = zpci_attr_groups;
5765657933dSBart Van Assche 	pdev->dev.dma_ops = &s390_pci_dma_ops;
5771803ba2dSSebastian Ott 	zpci_map_resources(pdev);
578cb809182SSebastian Ott 
579c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
580cb809182SSebastian Ott 		res = &pdev->resource[i];
581cb809182SSebastian Ott 		if (res->parent || !res->flags)
582cb809182SSebastian Ott 			continue;
583cb809182SSebastian Ott 		pci_claim_resource(pdev, i);
584cb809182SSebastian Ott 	}
585cb809182SSebastian Ott 
586cb809182SSebastian Ott 	return 0;
587cb809182SSebastian Ott }
588cb809182SSebastian Ott 
5891803ba2dSSebastian Ott void pcibios_release_device(struct pci_dev *pdev)
5901803ba2dSSebastian Ott {
5912a671f77SNiklas Schnelle 	struct zpci_dev *zdev = to_zpci(pdev);
5922a671f77SNiklas Schnelle 
5931803ba2dSSebastian Ott 	zpci_unmap_resources(pdev);
5942a671f77SNiklas Schnelle 	zpci_zdev_put(zdev);
5951803ba2dSSebastian Ott }
5961803ba2dSSebastian Ott 
597cb809182SSebastian Ott int pcibios_enable_device(struct pci_dev *pdev, int mask)
598cb809182SSebastian Ott {
599198a5278SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
600af0a8a84SSebastian Ott 
6019a99649fSSebastian Ott 	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
602af0a8a84SSebastian Ott 	zpci_fmb_enable_device(zdev);
603af0a8a84SSebastian Ott 
604d7533232SBjorn Helgaas 	return pci_enable_resources(pdev, mask);
605af0a8a84SSebastian Ott }
606af0a8a84SSebastian Ott 
607cb809182SSebastian Ott void pcibios_disable_device(struct pci_dev *pdev)
608944239c5SSebastian Ott {
609198a5278SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
610944239c5SSebastian Ott 
611944239c5SSebastian Ott 	zpci_fmb_disable_device(zdev);
612944239c5SSebastian Ott 	zpci_debug_exit_device(zdev);
613944239c5SSebastian Ott }
614944239c5SSebastian Ott 
61505bc1be6SPierre Morel static int __zpci_register_domain(int domain)
616cd248341SJan Glauber {
617969ae01bSNiklas Schnelle 	spin_lock(&zpci_domain_lock);
61805bc1be6SPierre Morel 	if (test_bit(domain, zpci_domain)) {
619969ae01bSNiklas Schnelle 		spin_unlock(&zpci_domain_lock);
62005bc1be6SPierre Morel 		pr_err("Domain %04x is already assigned\n", domain);
621312e8462SSebastian Ott 		return -EEXIST;
622312e8462SSebastian Ott 	}
62305bc1be6SPierre Morel 	set_bit(domain, zpci_domain);
624312e8462SSebastian Ott 	spin_unlock(&zpci_domain_lock);
62505bc1be6SPierre Morel 	return domain;
6265c5afd02SSebastian Ott }
62705bc1be6SPierre Morel 
62805bc1be6SPierre Morel static int __zpci_alloc_domain(void)
62905bc1be6SPierre Morel {
63005bc1be6SPierre Morel 	int domain;
63105bc1be6SPierre Morel 
63205bc1be6SPierre Morel 	spin_lock(&zpci_domain_lock);
633969ae01bSNiklas Schnelle 	/*
634969ae01bSNiklas Schnelle 	 * We can always auto allocate domains below ZPCI_NR_DEVICES.
635969ae01bSNiklas Schnelle 	 * There is either a free domain or we have reached the maximum in
636969ae01bSNiklas Schnelle 	 * which case we would have bailed earlier.
637969ae01bSNiklas Schnelle 	 */
63805bc1be6SPierre Morel 	domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
63905bc1be6SPierre Morel 	set_bit(domain, zpci_domain);
640cd248341SJan Glauber 	spin_unlock(&zpci_domain_lock);
64105bc1be6SPierre Morel 	return domain;
642cd248341SJan Glauber }
643cd248341SJan Glauber 
64405bc1be6SPierre Morel int zpci_alloc_domain(int domain)
64505bc1be6SPierre Morel {
64605bc1be6SPierre Morel 	if (zpci_unique_uid) {
64705bc1be6SPierre Morel 		if (domain)
64805bc1be6SPierre Morel 			return __zpci_register_domain(domain);
64905bc1be6SPierre Morel 		pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
65005bc1be6SPierre Morel 		update_uid_checking(false);
65105bc1be6SPierre Morel 	}
65205bc1be6SPierre Morel 	return __zpci_alloc_domain();
65305bc1be6SPierre Morel }
65405bc1be6SPierre Morel 
65505bc1be6SPierre Morel void zpci_free_domain(int domain)
656cd248341SJan Glauber {
657cd248341SJan Glauber 	spin_lock(&zpci_domain_lock);
65805bc1be6SPierre Morel 	clear_bit(domain, zpci_domain);
659cd248341SJan Glauber 	spin_unlock(&zpci_domain_lock);
660cd248341SJan Glauber }
661cd248341SJan Glauber 
6627d594322SSebastian Ott 
663a755a45dSJan Glauber int zpci_enable_device(struct zpci_dev *zdev)
664a755a45dSJan Glauber {
665cc049eecSNiklas Schnelle 	u32 fh = zdev->fh;
6661f3f7681SNiklas Schnelle 	int rc = 0;
667a755a45dSJan Glauber 
6681f3f7681SNiklas Schnelle 	if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
669f7addcddSNiklas Schnelle 		rc = -EIO;
6701f3f7681SNiklas Schnelle 	else
671cc049eecSNiklas Schnelle 		zdev->fh = fh;
672a755a45dSJan Glauber 	return rc;
673a755a45dSJan Glauber }
674a755a45dSJan Glauber 
675cb65a669SSebastian Ott int zpci_disable_device(struct zpci_dev *zdev)
676cb65a669SSebastian Ott {
677cc049eecSNiklas Schnelle 	u32 fh = zdev->fh;
6788256addaSNiklas Schnelle 	int cc, rc = 0;
6798256addaSNiklas Schnelle 
680cc049eecSNiklas Schnelle 	cc = clp_disable_fh(zdev, &fh);
681cc049eecSNiklas Schnelle 	if (!cc) {
682cc049eecSNiklas Schnelle 		zdev->fh = fh;
683cc049eecSNiklas Schnelle 	} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
6848256addaSNiklas Schnelle 		pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
6858256addaSNiklas Schnelle 			zdev->fid);
6868256addaSNiklas Schnelle 		/* Function is already disabled - update handle */
687cc049eecSNiklas Schnelle 		rc = clp_refresh_fh(zdev->fid, &fh);
688cc049eecSNiklas Schnelle 		if (!rc) {
689cc049eecSNiklas Schnelle 			zdev->fh = fh;
6908256addaSNiklas Schnelle 			rc = -EINVAL;
691cc049eecSNiklas Schnelle 		}
692cc049eecSNiklas Schnelle 	} else {
6938256addaSNiklas Schnelle 		rc = -EIO;
6948256addaSNiklas Schnelle 	}
6958256addaSNiklas Schnelle 	return rc;
696cb65a669SSebastian Ott }
697cb65a669SSebastian Ott 
698ba764dd7SNiklas Schnelle /**
699ba764dd7SNiklas Schnelle  * zpci_create_device() - Create a new zpci_dev and add it to the zbus
700ba764dd7SNiklas Schnelle  * @fid: Function ID of the device to be created
701ba764dd7SNiklas Schnelle  * @fh: Current Function Handle of the device to be created
702ba764dd7SNiklas Schnelle  * @state: Initial state after creation either Standby or Configured
703ba764dd7SNiklas Schnelle  *
704ba764dd7SNiklas Schnelle  * Creates a new zpci device and adds it to its, possibly newly created, zbus
705ba764dd7SNiklas Schnelle  * as well as zpci_list.
706ba764dd7SNiklas Schnelle  *
70714c87ba8SNiklas Schnelle  * Returns: the zdev on success or an error pointer otherwise
708ba764dd7SNiklas Schnelle  */
70914c87ba8SNiklas Schnelle struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
710cd248341SJan Glauber {
711ba764dd7SNiklas Schnelle 	struct zpci_dev *zdev;
712cd248341SJan Glauber 	int rc;
713cd248341SJan Glauber 
714ba764dd7SNiklas Schnelle 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
715ba764dd7SNiklas Schnelle 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
716ba764dd7SNiklas Schnelle 	if (!zdev)
71714c87ba8SNiklas Schnelle 		return ERR_PTR(-ENOMEM);
718ba764dd7SNiklas Schnelle 
719ba764dd7SNiklas Schnelle 	/* FID and Function Handle are the static/dynamic identifiers */
720ba764dd7SNiklas Schnelle 	zdev->fid = fid;
721ba764dd7SNiklas Schnelle 	zdev->fh = fh;
722ba764dd7SNiklas Schnelle 
723ba764dd7SNiklas Schnelle 	/* Query function properties and update zdev */
724ba764dd7SNiklas Schnelle 	rc = clp_query_pci_fn(zdev);
725ba764dd7SNiklas Schnelle 	if (rc)
726ba764dd7SNiklas Schnelle 		goto error;
727ba764dd7SNiklas Schnelle 	zdev->state =  state;
728ba764dd7SNiklas Schnelle 
72905bc1be6SPierre Morel 	kref_init(&zdev->kref);
730ba764dd7SNiklas Schnelle 	mutex_init(&zdev->lock);
731ba764dd7SNiklas Schnelle 
732ba764dd7SNiklas Schnelle 	rc = zpci_init_iommu(zdev);
733ba764dd7SNiklas Schnelle 	if (rc)
734ba764dd7SNiklas Schnelle 		goto error;
735ba764dd7SNiklas Schnelle 
736ba764dd7SNiklas Schnelle 	rc = zpci_bus_device_register(zdev, &pci_root_ops);
737ba764dd7SNiklas Schnelle 	if (rc)
738a50297cfSNiklas Schnelle 		goto error_destroy_iommu;
73905bc1be6SPierre Morel 
74005bc1be6SPierre Morel 	spin_lock(&zpci_list_lock);
74105bc1be6SPierre Morel 	list_add_tail(&zdev->entry, &zpci_list);
74205bc1be6SPierre Morel 	spin_unlock(&zpci_list_lock);
743cd248341SJan Glauber 
74414c87ba8SNiklas Schnelle 	return zdev;
745cd248341SJan Glauber 
746ba764dd7SNiklas Schnelle error_destroy_iommu:
747f42c2235SJoerg Roedel 	zpci_destroy_iommu(zdev);
748ba764dd7SNiklas Schnelle error:
749ba764dd7SNiklas Schnelle 	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
750ba764dd7SNiklas Schnelle 	kfree(zdev);
75114c87ba8SNiklas Schnelle 	return ERR_PTR(rc);
752cd248341SJan Glauber }
753cd248341SJan Glauber 
754*a46044a9SNiklas Schnelle bool zpci_is_device_configured(struct zpci_dev *zdev)
755*a46044a9SNiklas Schnelle {
756*a46044a9SNiklas Schnelle 	enum zpci_state state = zdev->state;
757*a46044a9SNiklas Schnelle 
758*a46044a9SNiklas Schnelle 	return state != ZPCI_FN_STATE_RESERVED &&
759*a46044a9SNiklas Schnelle 		state != ZPCI_FN_STATE_STANDBY;
760*a46044a9SNiklas Schnelle }
761*a46044a9SNiklas Schnelle 
7622631f6b6SNiklas Schnelle /**
763a7f82c36SNiklas Schnelle  * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
7642631f6b6SNiklas Schnelle  * @zdev: The zpci_dev to be configured
7652631f6b6SNiklas Schnelle  * @fh: The general function handle supplied by the platform
7662631f6b6SNiklas Schnelle  *
76761311e32SNiklas Schnelle  * Given a device in the configuration state Configured, enables, scans and
768a7f82c36SNiklas Schnelle  * adds it to the common code PCI subsystem if possible. If the PCI device is
769a7f82c36SNiklas Schnelle  * parked because we can not yet create a PCI bus because we have not seen
770a7f82c36SNiklas Schnelle  * function 0, it is ignored but will be scanned once function 0 appears.
771a7f82c36SNiklas Schnelle  * If any failure occurs, the zpci_dev is left disabled.
7722631f6b6SNiklas Schnelle  *
7732631f6b6SNiklas Schnelle  * Return: 0 on success, or an error code otherwise
7742631f6b6SNiklas Schnelle  */
775a7f82c36SNiklas Schnelle int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
7762631f6b6SNiklas Schnelle {
7772631f6b6SNiklas Schnelle 	int rc;
7782631f6b6SNiklas Schnelle 
7792631f6b6SNiklas Schnelle 	zdev->fh = fh;
7802631f6b6SNiklas Schnelle 	/* the PCI function will be scanned once function 0 appears */
7812631f6b6SNiklas Schnelle 	if (!zdev->zbus->bus)
7822631f6b6SNiklas Schnelle 		return 0;
7832631f6b6SNiklas Schnelle 
784a50297cfSNiklas Schnelle 	/* For function 0 on a multi-function bus scan whole bus as we might
785a50297cfSNiklas Schnelle 	 * have to pick up existing functions waiting for it to allow creating
786a50297cfSNiklas Schnelle 	 * the PCI bus
787a50297cfSNiklas Schnelle 	 */
788a50297cfSNiklas Schnelle 	if (zdev->devfn == 0 && zdev->zbus->multifunction)
789a50297cfSNiklas Schnelle 		rc = zpci_bus_scan_bus(zdev->zbus);
790a50297cfSNiklas Schnelle 	else
791faf29a4dSNiklas Schnelle 		rc = zpci_bus_scan_device(zdev);
7922631f6b6SNiklas Schnelle 
7932631f6b6SNiklas Schnelle 	return rc;
7942631f6b6SNiklas Schnelle }
7952631f6b6SNiklas Schnelle 
7962631f6b6SNiklas Schnelle /**
7972631f6b6SNiklas Schnelle  * zpci_deconfigure_device() - Deconfigure a zpci_dev
7982631f6b6SNiklas Schnelle  * @zdev: The zpci_dev to configure
7992631f6b6SNiklas Schnelle  *
8002631f6b6SNiklas Schnelle  * Deconfigure a zPCI function that is currently configured and possibly known
8012631f6b6SNiklas Schnelle  * to the common code PCI subsystem.
8022631f6b6SNiklas Schnelle  * If any failure occurs the device is left as is.
8032631f6b6SNiklas Schnelle  *
8042631f6b6SNiklas Schnelle  * Return: 0 on success, or an error code otherwise
8052631f6b6SNiklas Schnelle  */
8062631f6b6SNiklas Schnelle int zpci_deconfigure_device(struct zpci_dev *zdev)
8072631f6b6SNiklas Schnelle {
8082631f6b6SNiklas Schnelle 	int rc;
8092631f6b6SNiklas Schnelle 
8102631f6b6SNiklas Schnelle 	if (zdev->zbus->bus)
81195b3a8b4SNiklas Schnelle 		zpci_bus_remove_device(zdev, false);
8122631f6b6SNiklas Schnelle 
8131f3f7681SNiklas Schnelle 	if (zdev->dma_table) {
8141f3f7681SNiklas Schnelle 		rc = zpci_dma_exit_device(zdev);
8151f3f7681SNiklas Schnelle 		if (rc)
8161f3f7681SNiklas Schnelle 			return rc;
8171f3f7681SNiklas Schnelle 	}
8182631f6b6SNiklas Schnelle 	if (zdev_enabled(zdev)) {
8192631f6b6SNiklas Schnelle 		rc = zpci_disable_device(zdev);
8202631f6b6SNiklas Schnelle 		if (rc)
8212631f6b6SNiklas Schnelle 			return rc;
8222631f6b6SNiklas Schnelle 	}
8232631f6b6SNiklas Schnelle 
8242631f6b6SNiklas Schnelle 	rc = sclp_pci_deconfigure(zdev->fid);
8252631f6b6SNiklas Schnelle 	zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
8262631f6b6SNiklas Schnelle 	if (rc)
8272631f6b6SNiklas Schnelle 		return rc;
8282631f6b6SNiklas Schnelle 	zdev->state = ZPCI_FN_STATE_STANDBY;
8292631f6b6SNiklas Schnelle 
8302631f6b6SNiklas Schnelle 	return 0;
8312631f6b6SNiklas Schnelle }
8322631f6b6SNiklas Schnelle 
833*a46044a9SNiklas Schnelle /**
834*a46044a9SNiklas Schnelle  * zpci_device_reserved() - Mark device as resverved
835*a46044a9SNiklas Schnelle  * @zdev: the zpci_dev that was reserved
836*a46044a9SNiklas Schnelle  *
837*a46044a9SNiklas Schnelle  * Handle the case that a given zPCI function was reserved by another system.
838*a46044a9SNiklas Schnelle  * After a call to this function the zpci_dev can not be found via
839*a46044a9SNiklas Schnelle  * get_zdev_by_fid() anymore but may still be accessible via existing
840*a46044a9SNiklas Schnelle  * references though it will not be functional anymore.
841*a46044a9SNiklas Schnelle  */
842*a46044a9SNiklas Schnelle void zpci_device_reserved(struct zpci_dev *zdev)
843*a46044a9SNiklas Schnelle {
844*a46044a9SNiklas Schnelle 	if (zdev->has_hp_slot)
845*a46044a9SNiklas Schnelle 		zpci_exit_slot(zdev);
846*a46044a9SNiklas Schnelle 	/*
847*a46044a9SNiklas Schnelle 	 * Remove device from zpci_list as it is going away. This also
848*a46044a9SNiklas Schnelle 	 * makes sure we ignore subsequent zPCI events for this device.
849*a46044a9SNiklas Schnelle 	 */
850*a46044a9SNiklas Schnelle 	spin_lock(&zpci_list_lock);
851*a46044a9SNiklas Schnelle 	list_del(&zdev->entry);
852*a46044a9SNiklas Schnelle 	spin_unlock(&zpci_list_lock);
853*a46044a9SNiklas Schnelle 	zdev->state = ZPCI_FN_STATE_RESERVED;
854*a46044a9SNiklas Schnelle 	zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
855*a46044a9SNiklas Schnelle 	zpci_zdev_put(zdev);
856*a46044a9SNiklas Schnelle }
857*a46044a9SNiklas Schnelle 
85805bc1be6SPierre Morel void zpci_release_device(struct kref *kref)
859623bd44dSSebastian Ott {
86005bc1be6SPierre Morel 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
861a9045c22SNiklas Schnelle 	int ret;
862623bd44dSSebastian Ott 
8632f0230b2SNiklas Schnelle 	if (zdev->zbus->bus)
86495b3a8b4SNiklas Schnelle 		zpci_bus_remove_device(zdev, false);
86544510d6fSPierre Morel 
8661f3f7681SNiklas Schnelle 	if (zdev->dma_table)
8671f3f7681SNiklas Schnelle 		zpci_dma_exit_device(zdev);
868f6576a1bSNiklas Schnelle 	if (zdev_enabled(zdev))
86905bc1be6SPierre Morel 		zpci_disable_device(zdev);
870f6576a1bSNiklas Schnelle 
871f6576a1bSNiklas Schnelle 	switch (zdev->state) {
872a9045c22SNiklas Schnelle 	case ZPCI_FN_STATE_CONFIGURED:
873a9045c22SNiklas Schnelle 		ret = sclp_pci_deconfigure(zdev->fid);
874a9045c22SNiklas Schnelle 		zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
875a9045c22SNiklas Schnelle 		fallthrough;
87605bc1be6SPierre Morel 	case ZPCI_FN_STATE_STANDBY:
87744510d6fSPierre Morel 		if (zdev->has_hp_slot)
87805bc1be6SPierre Morel 			zpci_exit_slot(zdev);
879*a46044a9SNiklas Schnelle 		spin_lock(&zpci_list_lock);
880*a46044a9SNiklas Schnelle 		list_del(&zdev->entry);
881*a46044a9SNiklas Schnelle 		spin_unlock(&zpci_list_lock);
882*a46044a9SNiklas Schnelle 		zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
883*a46044a9SNiklas Schnelle 		fallthrough;
884*a46044a9SNiklas Schnelle 	case ZPCI_FN_STATE_RESERVED:
88502368b7cSNiklas Schnelle 		if (zdev->has_resources)
88605bc1be6SPierre Morel 			zpci_cleanup_bus_resources(zdev);
88705bc1be6SPierre Morel 		zpci_bus_device_unregister(zdev);
88805bc1be6SPierre Morel 		zpci_destroy_iommu(zdev);
88905bc1be6SPierre Morel 		fallthrough;
89005bc1be6SPierre Morel 	default:
89105bc1be6SPierre Morel 		break;
89205bc1be6SPierre Morel 	}
89305bc1be6SPierre Morel 	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
89405bc1be6SPierre Morel 	kfree(zdev);
895623bd44dSSebastian Ott }
896623bd44dSSebastian Ott 
897bd3a1725SMartin Schwidefsky int zpci_report_error(struct pci_dev *pdev,
898bd3a1725SMartin Schwidefsky 		      struct zpci_report_error_header *report)
899bd3a1725SMartin Schwidefsky {
900bd3a1725SMartin Schwidefsky 	struct zpci_dev *zdev = to_zpci(pdev);
901bd3a1725SMartin Schwidefsky 
902bd3a1725SMartin Schwidefsky 	return sclp_pci_report(report, zdev->fh, zdev->fid);
903bd3a1725SMartin Schwidefsky }
904bd3a1725SMartin Schwidefsky EXPORT_SYMBOL(zpci_report_error);
905bd3a1725SMartin Schwidefsky 
906cd248341SJan Glauber static int zpci_mem_init(void)
907cd248341SJan Glauber {
90880c544deSSebastian Ott 	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
90980c544deSSebastian Ott 		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
91080c544deSSebastian Ott 
911d0b08853SJan Glauber 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
91280c544deSSebastian Ott 					   __alignof__(struct zpci_fmb), 0, NULL);
913d0b08853SJan Glauber 	if (!zdev_fmb_cache)
914c506fff3SSebastian Ott 		goto error_fmb;
915d0b08853SJan Glauber 
916c506fff3SSebastian Ott 	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
917c506fff3SSebastian Ott 				   sizeof(*zpci_iomap_start), GFP_KERNEL);
918cd248341SJan Glauber 	if (!zpci_iomap_start)
9199a4da8a5SJan Glauber 		goto error_iomap;
920cd248341SJan Glauber 
921c506fff3SSebastian Ott 	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
922c506fff3SSebastian Ott 				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
923c506fff3SSebastian Ott 	if (!zpci_iomap_bitmap)
924c506fff3SSebastian Ott 		goto error_iomap_bitmap;
925c506fff3SSebastian Ott 
926b02002ccSNiklas Schnelle 	if (static_branch_likely(&have_mio))
927b02002ccSNiklas Schnelle 		clp_setup_writeback_mio();
928b02002ccSNiklas Schnelle 
929c506fff3SSebastian Ott 	return 0;
930c506fff3SSebastian Ott error_iomap_bitmap:
931c506fff3SSebastian Ott 	kfree(zpci_iomap_start);
9329a4da8a5SJan Glauber error_iomap:
933d0b08853SJan Glauber 	kmem_cache_destroy(zdev_fmb_cache);
934c506fff3SSebastian Ott error_fmb:
935cd248341SJan Glauber 	return -ENOMEM;
936cd248341SJan Glauber }
937cd248341SJan Glauber 
938cd248341SJan Glauber static void zpci_mem_exit(void)
939cd248341SJan Glauber {
940c506fff3SSebastian Ott 	kfree(zpci_iomap_bitmap);
941cd248341SJan Glauber 	kfree(zpci_iomap_start);
942d0b08853SJan Glauber 	kmem_cache_destroy(zdev_fmb_cache);
943cd248341SJan Glauber }
944cd248341SJan Glauber 
9456324b4deSSebastian Ott static unsigned int s390_pci_probe __initdata = 1;
946fbfe07d4SSebastian Ott unsigned int s390_pci_force_floating __initdata;
947aa3b7c29SSebastian Ott static unsigned int s390_pci_initialized;
948cd248341SJan Glauber 
949cd248341SJan Glauber char * __init pcibios_setup(char *str)
950cd248341SJan Glauber {
951257608fbSSebastian Ott 	if (!strcmp(str, "off")) {
952257608fbSSebastian Ott 		s390_pci_probe = 0;
953cd248341SJan Glauber 		return NULL;
954cd248341SJan Glauber 	}
95556271303SSebastian Ott 	if (!strcmp(str, "nomio")) {
9563322ba0dSNiklas Schnelle 		S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
95756271303SSebastian Ott 		return NULL;
95856271303SSebastian Ott 	}
959fbfe07d4SSebastian Ott 	if (!strcmp(str, "force_floating")) {
960fbfe07d4SSebastian Ott 		s390_pci_force_floating = 1;
961fbfe07d4SSebastian Ott 		return NULL;
962fbfe07d4SSebastian Ott 	}
9636cf17f9aSPierre Morel 	if (!strcmp(str, "norid")) {
9646cf17f9aSPierre Morel 		s390_pci_no_rid = 1;
9656cf17f9aSPierre Morel 		return NULL;
9666cf17f9aSPierre Morel 	}
967cd248341SJan Glauber 	return str;
968cd248341SJan Glauber }
969cd248341SJan Glauber 
970aa3b7c29SSebastian Ott bool zpci_is_enabled(void)
971aa3b7c29SSebastian Ott {
972aa3b7c29SSebastian Ott 	return s390_pci_initialized;
973aa3b7c29SSebastian Ott }
974aa3b7c29SSebastian Ott 
975cd248341SJan Glauber static int __init pci_base_init(void)
976cd248341SJan Glauber {
977cd248341SJan Glauber 	int rc;
978cd248341SJan Glauber 
9791e5635d1SHeiko Carstens 	if (!s390_pci_probe)
980cd248341SJan Glauber 		return 0;
981cd248341SJan Glauber 
982da78693eSNiklas Schnelle 	if (!test_facility(69) || !test_facility(71)) {
983da78693eSNiklas Schnelle 		pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
984cd248341SJan Glauber 		return 0;
985da78693eSNiklas Schnelle 	}
986cd248341SJan Glauber 
9873322ba0dSNiklas Schnelle 	if (MACHINE_HAS_PCI_MIO) {
98871ba41c9SSebastian Ott 		static_branch_enable(&have_mio);
9899964f396SSebastian Ott 		ctl_set_bit(2, 5);
9909964f396SSebastian Ott 	}
99171ba41c9SSebastian Ott 
992d0b08853SJan Glauber 	rc = zpci_debug_init();
993d0b08853SJan Glauber 	if (rc)
9941f44a225SMartin Schwidefsky 		goto out;
995d0b08853SJan Glauber 
996cd248341SJan Glauber 	rc = zpci_mem_init();
997cd248341SJan Glauber 	if (rc)
998cd248341SJan Glauber 		goto out_mem;
999cd248341SJan Glauber 
10009a4da8a5SJan Glauber 	rc = zpci_irq_init();
10019a4da8a5SJan Glauber 	if (rc)
10029a4da8a5SJan Glauber 		goto out_irq;
10039a4da8a5SJan Glauber 
1004828b35f6SJan Glauber 	rc = zpci_dma_init();
1005828b35f6SJan Glauber 	if (rc)
1006828b35f6SJan Glauber 		goto out_dma;
1007828b35f6SJan Glauber 
10081d578966SSebastian Ott 	rc = clp_scan_pci_devices();
1009a755a45dSJan Glauber 	if (rc)
1010a755a45dSJan Glauber 		goto out_find;
101114c87ba8SNiklas Schnelle 	zpci_bus_scan_busses();
1012a755a45dSJan Glauber 
1013aa3b7c29SSebastian Ott 	s390_pci_initialized = 1;
1014cd248341SJan Glauber 	return 0;
1015cd248341SJan Glauber 
1016a755a45dSJan Glauber out_find:
1017828b35f6SJan Glauber 	zpci_dma_exit();
1018828b35f6SJan Glauber out_dma:
10199a4da8a5SJan Glauber 	zpci_irq_exit();
10209a4da8a5SJan Glauber out_irq:
1021cd248341SJan Glauber 	zpci_mem_exit();
1022cd248341SJan Glauber out_mem:
1023d0b08853SJan Glauber 	zpci_debug_exit();
10241f44a225SMartin Schwidefsky out:
1025cd248341SJan Glauber 	return rc;
1026cd248341SJan Glauber }
102767f43f38SSebastian Ott subsys_initcall_sync(pci_base_init);
1028