xref: /linux/arch/s390/pci/pci.c (revision 4fe204977096e900cb91a3298b05c794ac24f540)
1adbb3901SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cd248341SJan Glauber /*
3cd248341SJan Glauber  * Copyright IBM Corp. 2012
4cd248341SJan Glauber  *
5cd248341SJan Glauber  * Author(s):
6cd248341SJan Glauber  *   Jan Glauber <jang@linux.vnet.ibm.com>
7cd248341SJan Glauber  *
8cd248341SJan Glauber  * The System z PCI code is a rewrite from a prototype by
9cd248341SJan Glauber  * the following people (Kudoz!):
10bedef755SJan Glauber  *   Alexander Schmidt
11bedef755SJan Glauber  *   Christoph Raisch
12bedef755SJan Glauber  *   Hannes Hering
13bedef755SJan Glauber  *   Hoang-Nam Nguyen
14bedef755SJan Glauber  *   Jan-Bernd Themann
15bedef755SJan Glauber  *   Stefan Roscher
16bedef755SJan Glauber  *   Thomas Klein
17cd248341SJan Glauber  */
18cd248341SJan Glauber 
19896cb7e6SGerald Schaefer #define KMSG_COMPONENT "zpci"
20896cb7e6SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21cd248341SJan Glauber 
22cd248341SJan Glauber #include <linux/kernel.h>
23cd248341SJan Glauber #include <linux/slab.h>
24cd248341SJan Glauber #include <linux/err.h>
25cd248341SJan Glauber #include <linux/export.h>
26cd248341SJan Glauber #include <linux/delay.h>
27cd248341SJan Glauber #include <linux/seq_file.h>
2871ba41c9SSebastian Ott #include <linux/jump_label.h>
29cd248341SJan Glauber #include <linux/pci.h>
30794b8846SNiklas Schnelle #include <linux/printk.h>
31cd248341SJan Glauber 
329a4da8a5SJan Glauber #include <asm/isc.h>
339a4da8a5SJan Glauber #include <asm/airq.h>
34cd248341SJan Glauber #include <asm/facility.h>
35cd248341SJan Glauber #include <asm/pci_insn.h>
36a755a45dSJan Glauber #include <asm/pci_clp.h>
37828b35f6SJan Glauber #include <asm/pci_dma.h>
38cd248341SJan Glauber 
3905bc1be6SPierre Morel #include "pci_bus.h"
40abb95b75SNiklas Schnelle #include "pci_iov.h"
4105bc1be6SPierre Morel 
42cd248341SJan Glauber /* list of all detected zpci devices */
4367f43f38SSebastian Ott static LIST_HEAD(zpci_list);
4457b5918cSSebastian Ott static DEFINE_SPINLOCK(zpci_list_lock);
451f44a225SMartin Schwidefsky 
46969ae01bSNiklas Schnelle static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
47cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_domain_lock);
48cd248341SJan Glauber 
49c506fff3SSebastian Ott #define ZPCI_IOMAP_ENTRIES						\
50c9c13ba4SDenis Efremov 	min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),	\
51c506fff3SSebastian Ott 	    ZPCI_IOMAP_MAX_ENTRIES)
52c506fff3SSebastian Ott 
536cf17f9aSPierre Morel unsigned int s390_pci_no_rid;
546cf17f9aSPierre Morel 
55cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_iomap_lock);
56c506fff3SSebastian Ott static unsigned long *zpci_iomap_bitmap;
57cd248341SJan Glauber struct zpci_iomap_entry *zpci_iomap_start;
58cd248341SJan Glauber EXPORT_SYMBOL_GPL(zpci_iomap_start);
59cd248341SJan Glauber 
6071ba41c9SSebastian Ott DEFINE_STATIC_KEY_FALSE(have_mio);
6171ba41c9SSebastian Ott 
62d0b08853SJan Glauber static struct kmem_cache *zdev_fmb_cache;
63d0b08853SJan Glauber 
64cd248341SJan Glauber struct zpci_dev *get_zdev_by_fid(u32 fid)
65cd248341SJan Glauber {
66cd248341SJan Glauber 	struct zpci_dev *tmp, *zdev = NULL;
67cd248341SJan Glauber 
6857b5918cSSebastian Ott 	spin_lock(&zpci_list_lock);
69cd248341SJan Glauber 	list_for_each_entry(tmp, &zpci_list, entry) {
70cd248341SJan Glauber 		if (tmp->fid == fid) {
71cd248341SJan Glauber 			zdev = tmp;
72cd248341SJan Glauber 			break;
73cd248341SJan Glauber 		}
74cd248341SJan Glauber 	}
7557b5918cSSebastian Ott 	spin_unlock(&zpci_list_lock);
76cd248341SJan Glauber 	return zdev;
77cd248341SJan Glauber }
78cd248341SJan Glauber 
7901553d9aSSebastian Ott void zpci_remove_reserved_devices(void)
8001553d9aSSebastian Ott {
8101553d9aSSebastian Ott 	struct zpci_dev *tmp, *zdev;
8201553d9aSSebastian Ott 	enum zpci_state state;
8301553d9aSSebastian Ott 	LIST_HEAD(remove);
8401553d9aSSebastian Ott 
8501553d9aSSebastian Ott 	spin_lock(&zpci_list_lock);
8601553d9aSSebastian Ott 	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
8701553d9aSSebastian Ott 		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
8801553d9aSSebastian Ott 		    !clp_get_state(zdev->fid, &state) &&
8901553d9aSSebastian Ott 		    state == ZPCI_FN_STATE_RESERVED)
9001553d9aSSebastian Ott 			list_move_tail(&zdev->entry, &remove);
9101553d9aSSebastian Ott 	}
9201553d9aSSebastian Ott 	spin_unlock(&zpci_list_lock);
9301553d9aSSebastian Ott 
9401553d9aSSebastian Ott 	list_for_each_entry_safe(zdev, tmp, &remove, entry)
95a46044a9SNiklas Schnelle 		zpci_device_reserved(zdev);
96cd248341SJan Glauber }
97cd248341SJan Glauber 
98cd248341SJan Glauber int pci_domain_nr(struct pci_bus *bus)
99cd248341SJan Glauber {
10005bc1be6SPierre Morel 	return ((struct zpci_bus *) bus->sysdata)->domain_nr;
101cd248341SJan Glauber }
102cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_domain_nr);
103cd248341SJan Glauber 
104cd248341SJan Glauber int pci_proc_domain(struct pci_bus *bus)
105cd248341SJan Glauber {
106cd248341SJan Glauber 	return pci_domain_nr(bus);
107cd248341SJan Glauber }
108cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_proc_domain);
109cd248341SJan Glauber 
110828b35f6SJan Glauber /* Modify PCI: Register I/O address translation parameters */
111828b35f6SJan Glauber int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
112828b35f6SJan Glauber 		       u64 base, u64 limit, u64 iota)
113828b35f6SJan Glauber {
11472570834SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
11572570834SSebastian Ott 	struct zpci_fib fib = {0};
1161f3f7681SNiklas Schnelle 	u8 cc, status;
117828b35f6SJan Glauber 
118828b35f6SJan Glauber 	WARN_ON_ONCE(iota & 0x3fff);
11972570834SSebastian Ott 	fib.pba = base;
12072570834SSebastian Ott 	fib.pal = limit;
12172570834SSebastian Ott 	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
1221f3f7681SNiklas Schnelle 	cc = zpci_mod_fc(req, &fib, &status);
1231f3f7681SNiklas Schnelle 	if (cc)
1241f3f7681SNiklas Schnelle 		zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
1251f3f7681SNiklas Schnelle 	return cc;
126828b35f6SJan Glauber }
127828b35f6SJan Glauber 
128828b35f6SJan Glauber /* Modify PCI: Unregister I/O address translation parameters */
129828b35f6SJan Glauber int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
130828b35f6SJan Glauber {
13172570834SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
13272570834SSebastian Ott 	struct zpci_fib fib = {0};
13372570834SSebastian Ott 	u8 cc, status;
134828b35f6SJan Glauber 
13572570834SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1361f3f7681SNiklas Schnelle 	if (cc)
1371f3f7681SNiklas Schnelle 		zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
1381f3f7681SNiklas Schnelle 	return cc;
139828b35f6SJan Glauber }
140828b35f6SJan Glauber 
141d0b08853SJan Glauber /* Modify PCI: Set PCI function measurement parameters */
142d0b08853SJan Glauber int zpci_fmb_enable_device(struct zpci_dev *zdev)
143d0b08853SJan Glauber {
1444e5bd780SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
1454e5bd780SSebastian Ott 	struct zpci_fib fib = {0};
1464e5bd780SSebastian Ott 	u8 cc, status;
147d0b08853SJan Glauber 
1480b7589ecSSebastian Ott 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
149d0b08853SJan Glauber 		return -EINVAL;
150d0b08853SJan Glauber 
15108b42124SWei Yongjun 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
152d0b08853SJan Glauber 	if (!zdev->fmb)
153d0b08853SJan Glauber 		return -ENOMEM;
154d0b08853SJan Glauber 	WARN_ON((u64) zdev->fmb & 0xf);
155d0b08853SJan Glauber 
1566001018aSSebastian Ott 	/* reset software counters */
1576001018aSSebastian Ott 	atomic64_set(&zdev->allocated_pages, 0);
1586001018aSSebastian Ott 	atomic64_set(&zdev->mapped_pages, 0);
1596001018aSSebastian Ott 	atomic64_set(&zdev->unmapped_pages, 0);
1606001018aSSebastian Ott 
1614e5bd780SSebastian Ott 	fib.fmb_addr = virt_to_phys(zdev->fmb);
1624e5bd780SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1634e5bd780SSebastian Ott 	if (cc) {
1644e5bd780SSebastian Ott 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
1654e5bd780SSebastian Ott 		zdev->fmb = NULL;
1664e5bd780SSebastian Ott 	}
1674e5bd780SSebastian Ott 	return cc ? -EIO : 0;
168d0b08853SJan Glauber }
169d0b08853SJan Glauber 
170d0b08853SJan Glauber /* Modify PCI: Disable PCI function measurement */
171d0b08853SJan Glauber int zpci_fmb_disable_device(struct zpci_dev *zdev)
172d0b08853SJan Glauber {
1734e5bd780SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
1744e5bd780SSebastian Ott 	struct zpci_fib fib = {0};
1754e5bd780SSebastian Ott 	u8 cc, status;
176d0b08853SJan Glauber 
177d0b08853SJan Glauber 	if (!zdev->fmb)
178d0b08853SJan Glauber 		return -EINVAL;
179d0b08853SJan Glauber 
180d0b08853SJan Glauber 	/* Function measurement is disabled if fmb address is zero */
1814e5bd780SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1824e5bd780SSebastian Ott 	if (cc == 3) /* Function already gone. */
1834e5bd780SSebastian Ott 		cc = 0;
184d0b08853SJan Glauber 
1854e5bd780SSebastian Ott 	if (!cc) {
186d0b08853SJan Glauber 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
187d0b08853SJan Glauber 		zdev->fmb = NULL;
1884e5bd780SSebastian Ott 	}
1894e5bd780SSebastian Ott 	return cc ? -EIO : 0;
190d0b08853SJan Glauber }
191d0b08853SJan Glauber 
192cd248341SJan Glauber static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
193cd248341SJan Glauber {
194cd248341SJan Glauber 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
195cd248341SJan Glauber 	u64 data;
196cd248341SJan Glauber 	int rc;
197cd248341SJan Glauber 
19881deca12SSebastian Ott 	rc = __zpci_load(&data, req, offset);
199b170bad4SSebastian Ott 	if (!rc) {
2005064cd35SSebastian Ott 		data = le64_to_cpu((__force __le64) data);
2015064cd35SSebastian Ott 		data >>= (8 - len) * 8;
202cd248341SJan Glauber 		*val = (u32) data;
203b170bad4SSebastian Ott 	} else
204cd248341SJan Glauber 		*val = 0xffffffff;
205cd248341SJan Glauber 	return rc;
206cd248341SJan Glauber }
207cd248341SJan Glauber 
208cd248341SJan Glauber static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
209cd248341SJan Glauber {
210cd248341SJan Glauber 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
211cd248341SJan Glauber 	u64 data = val;
212cd248341SJan Glauber 	int rc;
213cd248341SJan Glauber 
2145064cd35SSebastian Ott 	data <<= (8 - len) * 8;
2155064cd35SSebastian Ott 	data = (__force u64) cpu_to_le64(data);
21681deca12SSebastian Ott 	rc = __zpci_store(data, req, offset);
217cd248341SJan Glauber 	return rc;
218cd248341SJan Glauber }
219cd248341SJan Glauber 
220cd248341SJan Glauber resource_size_t pcibios_align_resource(void *data, const struct resource *res,
221cd248341SJan Glauber 				       resource_size_t size,
222cd248341SJan Glauber 				       resource_size_t align)
223cd248341SJan Glauber {
224cd248341SJan Glauber 	return 0;
225cd248341SJan Glauber }
226cd248341SJan Glauber 
22787bc359bSJan Glauber /* combine single writes by using store-block insn */
22887bc359bSJan Glauber void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
22987bc359bSJan Glauber {
23087bc359bSJan Glauber        zpci_memcpy_toio(to, from, count);
23187bc359bSJan Glauber }
23287bc359bSJan Glauber 
233b02002ccSNiklas Schnelle static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
23471ba41c9SSebastian Ott {
235a999eb96SNiklas Schnelle 	unsigned long offset, vaddr;
23671ba41c9SSebastian Ott 	struct vm_struct *area;
237a999eb96SNiklas Schnelle 	phys_addr_t last_addr;
23871ba41c9SSebastian Ott 
239a999eb96SNiklas Schnelle 	last_addr = addr + size - 1;
240a999eb96SNiklas Schnelle 	if (!size || last_addr < addr)
24171ba41c9SSebastian Ott 		return NULL;
24271ba41c9SSebastian Ott 
24371ba41c9SSebastian Ott 	if (!static_branch_unlikely(&have_mio))
244a999eb96SNiklas Schnelle 		return (void __iomem *) addr;
24571ba41c9SSebastian Ott 
246a999eb96SNiklas Schnelle 	offset = addr & ~PAGE_MASK;
247a999eb96SNiklas Schnelle 	addr &= PAGE_MASK;
24871ba41c9SSebastian Ott 	size = PAGE_ALIGN(size + offset);
24971ba41c9SSebastian Ott 	area = get_vm_area(size, VM_IOREMAP);
25071ba41c9SSebastian Ott 	if (!area)
25171ba41c9SSebastian Ott 		return NULL;
25271ba41c9SSebastian Ott 
253a999eb96SNiklas Schnelle 	vaddr = (unsigned long) area->addr;
254b02002ccSNiklas Schnelle 	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
255a999eb96SNiklas Schnelle 		free_vm_area(area);
25671ba41c9SSebastian Ott 		return NULL;
25771ba41c9SSebastian Ott 	}
25871ba41c9SSebastian Ott 	return (void __iomem *) ((unsigned long) area->addr + offset);
25971ba41c9SSebastian Ott }
260b02002ccSNiklas Schnelle 
261b02002ccSNiklas Schnelle void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
262b02002ccSNiklas Schnelle {
263b02002ccSNiklas Schnelle 	return __ioremap(addr, size, __pgprot(prot));
264b02002ccSNiklas Schnelle }
265b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_prot);
266b02002ccSNiklas Schnelle 
267b02002ccSNiklas Schnelle void __iomem *ioremap(phys_addr_t addr, size_t size)
268b02002ccSNiklas Schnelle {
269b02002ccSNiklas Schnelle 	return __ioremap(addr, size, PAGE_KERNEL);
270b02002ccSNiklas Schnelle }
27171ba41c9SSebastian Ott EXPORT_SYMBOL(ioremap);
27271ba41c9SSebastian Ott 
273b02002ccSNiklas Schnelle void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
274b02002ccSNiklas Schnelle {
275b02002ccSNiklas Schnelle 	return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
276b02002ccSNiklas Schnelle }
277b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_wc);
278b02002ccSNiklas Schnelle 
279b02002ccSNiklas Schnelle void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
280b02002ccSNiklas Schnelle {
281b02002ccSNiklas Schnelle 	return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
282b02002ccSNiklas Schnelle }
283b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_wt);
284b02002ccSNiklas Schnelle 
28571ba41c9SSebastian Ott void iounmap(volatile void __iomem *addr)
28671ba41c9SSebastian Ott {
28771ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
28871ba41c9SSebastian Ott 		vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
28971ba41c9SSebastian Ott }
29071ba41c9SSebastian Ott EXPORT_SYMBOL(iounmap);
29171ba41c9SSebastian Ott 
292cd248341SJan Glauber /* Create a virtual mapping cookie for a PCI BAR */
29371ba41c9SSebastian Ott static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
29471ba41c9SSebastian Ott 					unsigned long offset, unsigned long max)
295cd248341SJan Glauber {
296198a5278SSebastian Ott 	struct zpci_dev *zdev =	to_zpci(pdev);
297cd248341SJan Glauber 	int idx;
298cd248341SJan Glauber 
299cd248341SJan Glauber 	idx = zdev->bars[bar].map_idx;
300cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
301f5e44f82SSebastian Ott 	/* Detect overrun */
302f5e44f82SSebastian Ott 	WARN_ON(!++zpci_iomap_start[idx].count);
303cd248341SJan Glauber 	zpci_iomap_start[idx].fh = zdev->fh;
304cd248341SJan Glauber 	zpci_iomap_start[idx].bar = bar;
305cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
306cd248341SJan Glauber 
3079e00caaeSSebastian Ott 	return (void __iomem *) ZPCI_ADDR(idx) + offset;
308cd248341SJan Glauber }
30971ba41c9SSebastian Ott 
31071ba41c9SSebastian Ott static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
31171ba41c9SSebastian Ott 					 unsigned long offset,
31271ba41c9SSebastian Ott 					 unsigned long max)
31371ba41c9SSebastian Ott {
31471ba41c9SSebastian Ott 	unsigned long barsize = pci_resource_len(pdev, bar);
31571ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
31671ba41c9SSebastian Ott 	void __iomem *iova;
31771ba41c9SSebastian Ott 
31871ba41c9SSebastian Ott 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
31971ba41c9SSebastian Ott 	return iova ? iova + offset : iova;
32071ba41c9SSebastian Ott }
32171ba41c9SSebastian Ott 
32271ba41c9SSebastian Ott void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
32371ba41c9SSebastian Ott 			      unsigned long offset, unsigned long max)
32471ba41c9SSebastian Ott {
325c9c13ba4SDenis Efremov 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
32671ba41c9SSebastian Ott 		return NULL;
32771ba41c9SSebastian Ott 
32871ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
32971ba41c9SSebastian Ott 		return pci_iomap_range_mio(pdev, bar, offset, max);
33071ba41c9SSebastian Ott 	else
33171ba41c9SSebastian Ott 		return pci_iomap_range_fh(pdev, bar, offset, max);
33271ba41c9SSebastian Ott }
333d9426083SSebastian Ott EXPORT_SYMBOL(pci_iomap_range);
3348cfc99b5SMichael S. Tsirkin 
3358cfc99b5SMichael S. Tsirkin void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
3368cfc99b5SMichael S. Tsirkin {
3378cfc99b5SMichael S. Tsirkin 	return pci_iomap_range(dev, bar, 0, maxlen);
3388cfc99b5SMichael S. Tsirkin }
3398cfc99b5SMichael S. Tsirkin EXPORT_SYMBOL(pci_iomap);
340cd248341SJan Glauber 
34171ba41c9SSebastian Ott static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
34271ba41c9SSebastian Ott 					    unsigned long offset, unsigned long max)
34371ba41c9SSebastian Ott {
34471ba41c9SSebastian Ott 	unsigned long barsize = pci_resource_len(pdev, bar);
34571ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
34671ba41c9SSebastian Ott 	void __iomem *iova;
34771ba41c9SSebastian Ott 
34871ba41c9SSebastian Ott 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
34971ba41c9SSebastian Ott 	return iova ? iova + offset : iova;
35071ba41c9SSebastian Ott }
35171ba41c9SSebastian Ott 
35271ba41c9SSebastian Ott void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
35371ba41c9SSebastian Ott 				 unsigned long offset, unsigned long max)
35471ba41c9SSebastian Ott {
355c9c13ba4SDenis Efremov 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
35671ba41c9SSebastian Ott 		return NULL;
35771ba41c9SSebastian Ott 
35871ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
35971ba41c9SSebastian Ott 		return pci_iomap_wc_range_mio(pdev, bar, offset, max);
36071ba41c9SSebastian Ott 	else
36171ba41c9SSebastian Ott 		return pci_iomap_range_fh(pdev, bar, offset, max);
36271ba41c9SSebastian Ott }
36371ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc_range);
36471ba41c9SSebastian Ott 
36571ba41c9SSebastian Ott void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
36671ba41c9SSebastian Ott {
36771ba41c9SSebastian Ott 	return pci_iomap_wc_range(dev, bar, 0, maxlen);
36871ba41c9SSebastian Ott }
36971ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc);
37071ba41c9SSebastian Ott 
37171ba41c9SSebastian Ott static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
372cd248341SJan Glauber {
3739e00caaeSSebastian Ott 	unsigned int idx = ZPCI_IDX(addr);
374cd248341SJan Glauber 
375cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
3768cfc99b5SMichael S. Tsirkin 	/* Detect underrun */
377f5e44f82SSebastian Ott 	WARN_ON(!zpci_iomap_start[idx].count);
3788cfc99b5SMichael S. Tsirkin 	if (!--zpci_iomap_start[idx].count) {
379cd248341SJan Glauber 		zpci_iomap_start[idx].fh = 0;
380cd248341SJan Glauber 		zpci_iomap_start[idx].bar = 0;
3818cfc99b5SMichael S. Tsirkin 	}
382cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
383cd248341SJan Glauber }
38471ba41c9SSebastian Ott 
38571ba41c9SSebastian Ott static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
38671ba41c9SSebastian Ott {
38771ba41c9SSebastian Ott 	iounmap(addr);
38871ba41c9SSebastian Ott }
38971ba41c9SSebastian Ott 
39071ba41c9SSebastian Ott void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
39171ba41c9SSebastian Ott {
39271ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
39371ba41c9SSebastian Ott 		pci_iounmap_mio(pdev, addr);
39471ba41c9SSebastian Ott 	else
39571ba41c9SSebastian Ott 		pci_iounmap_fh(pdev, addr);
39671ba41c9SSebastian Ott }
397d9426083SSebastian Ott EXPORT_SYMBOL(pci_iounmap);
398cd248341SJan Glauber 
399cd248341SJan Glauber static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
400cd248341SJan Glauber 		    int size, u32 *val)
401cd248341SJan Glauber {
40244510d6fSPierre Morel 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
403cd248341SJan Glauber 
40444510d6fSPierre Morel 	return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
405cd248341SJan Glauber }
406cd248341SJan Glauber 
407cd248341SJan Glauber static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
408cd248341SJan Glauber 		     int size, u32 val)
409cd248341SJan Glauber {
41044510d6fSPierre Morel 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
411cd248341SJan Glauber 
41244510d6fSPierre Morel 	return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
413cd248341SJan Glauber }
414cd248341SJan Glauber 
415cd248341SJan Glauber static struct pci_ops pci_root_ops = {
416cd248341SJan Glauber 	.read = pci_read,
417cd248341SJan Glauber 	.write = pci_write,
418cd248341SJan Glauber };
419cd248341SJan Glauber 
4201803ba2dSSebastian Ott static void zpci_map_resources(struct pci_dev *pdev)
421cd248341SJan Glauber {
42271ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
423cd248341SJan Glauber 	resource_size_t len;
424cd248341SJan Glauber 	int i;
425cd248341SJan Glauber 
426c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
427cd248341SJan Glauber 		len = pci_resource_len(pdev, i);
428cd248341SJan Glauber 		if (!len)
429cd248341SJan Glauber 			continue;
43071ba41c9SSebastian Ott 
431c7ff0e91SSebastian Ott 		if (zpci_use_mio(zdev))
43271ba41c9SSebastian Ott 			pdev->resource[i].start =
433df057c91SNiklas Schnelle 				(resource_size_t __force) zdev->bars[i].mio_wt;
43471ba41c9SSebastian Ott 		else
435c7ff0e91SSebastian Ott 			pdev->resource[i].start = (resource_size_t __force)
436c7ff0e91SSebastian Ott 				pci_iomap_range_fh(pdev, i, 0, 0);
437cd248341SJan Glauber 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
438cd248341SJan Glauber 	}
439cfbb4a7aSSebastian Ott 
440abb95b75SNiklas Schnelle 	zpci_iov_map_resources(pdev);
441944239c5SSebastian Ott }
442944239c5SSebastian Ott 
4431803ba2dSSebastian Ott static void zpci_unmap_resources(struct pci_dev *pdev)
444944239c5SSebastian Ott {
445c7ff0e91SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
446944239c5SSebastian Ott 	resource_size_t len;
447944239c5SSebastian Ott 	int i;
448944239c5SSebastian Ott 
449c7ff0e91SSebastian Ott 	if (zpci_use_mio(zdev))
45071ba41c9SSebastian Ott 		return;
45171ba41c9SSebastian Ott 
452c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
453944239c5SSebastian Ott 		len = pci_resource_len(pdev, i);
454944239c5SSebastian Ott 		if (!len)
455944239c5SSebastian Ott 			continue;
456c7ff0e91SSebastian Ott 		pci_iounmap_fh(pdev, (void __iomem __force *)
4575b9f2081SMartin Schwidefsky 			       pdev->resource[i].start);
458944239c5SSebastian Ott 	}
459944239c5SSebastian Ott }
460cd248341SJan Glauber 
461cd248341SJan Glauber static int zpci_alloc_iomap(struct zpci_dev *zdev)
462cd248341SJan Glauber {
463bf19c94dSSebastian Ott 	unsigned long entry;
464cd248341SJan Glauber 
465cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
466c506fff3SSebastian Ott 	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
467c506fff3SSebastian Ott 	if (entry == ZPCI_IOMAP_ENTRIES) {
468cd248341SJan Glauber 		spin_unlock(&zpci_iomap_lock);
469cd248341SJan Glauber 		return -ENOSPC;
470cd248341SJan Glauber 	}
471c506fff3SSebastian Ott 	set_bit(entry, zpci_iomap_bitmap);
472cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
473cd248341SJan Glauber 	return entry;
474cd248341SJan Glauber }
475cd248341SJan Glauber 
476cd248341SJan Glauber static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
477cd248341SJan Glauber {
478cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
479cd248341SJan Glauber 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
480c506fff3SSebastian Ott 	clear_bit(entry, zpci_iomap_bitmap);
481cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
482cd248341SJan Glauber }
483cd248341SJan Glauber 
484*4fe20497SNiklas Schnelle static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
485*4fe20497SNiklas Schnelle {
486*4fe20497SNiklas Schnelle 	int bar, idx;
487*4fe20497SNiklas Schnelle 
488*4fe20497SNiklas Schnelle 	spin_lock(&zpci_iomap_lock);
489*4fe20497SNiklas Schnelle 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
490*4fe20497SNiklas Schnelle 		if (!zdev->bars[bar].size)
491*4fe20497SNiklas Schnelle 			continue;
492*4fe20497SNiklas Schnelle 		idx = zdev->bars[bar].map_idx;
493*4fe20497SNiklas Schnelle 		if (!zpci_iomap_start[idx].count)
494*4fe20497SNiklas Schnelle 			continue;
495*4fe20497SNiklas Schnelle 		WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
496*4fe20497SNiklas Schnelle 	}
497*4fe20497SNiklas Schnelle 	spin_unlock(&zpci_iomap_lock);
498*4fe20497SNiklas Schnelle }
499*4fe20497SNiklas Schnelle 
500*4fe20497SNiklas Schnelle void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
501*4fe20497SNiklas Schnelle {
502*4fe20497SNiklas Schnelle 	if (!fh || zdev->fh == fh)
503*4fe20497SNiklas Schnelle 		return;
504*4fe20497SNiklas Schnelle 
505*4fe20497SNiklas Schnelle 	zdev->fh = fh;
506*4fe20497SNiklas Schnelle 	if (zpci_use_mio(zdev))
507*4fe20497SNiklas Schnelle 		return;
508*4fe20497SNiklas Schnelle 	if (zdev->has_resources && zdev_enabled(zdev))
509*4fe20497SNiklas Schnelle 		zpci_do_update_iomap_fh(zdev, fh);
510*4fe20497SNiklas Schnelle }
511*4fe20497SNiklas Schnelle 
5127a572a3aSSebastian Ott static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
5137a572a3aSSebastian Ott 				    unsigned long size, unsigned long flags)
5147a572a3aSSebastian Ott {
5157a572a3aSSebastian Ott 	struct resource *r;
5167a572a3aSSebastian Ott 
5177a572a3aSSebastian Ott 	r = kzalloc(sizeof(*r), GFP_KERNEL);
5187a572a3aSSebastian Ott 	if (!r)
5197a572a3aSSebastian Ott 		return NULL;
5207a572a3aSSebastian Ott 
5217a572a3aSSebastian Ott 	r->start = start;
5227a572a3aSSebastian Ott 	r->end = r->start + size - 1;
5237a572a3aSSebastian Ott 	r->flags = flags;
5247a572a3aSSebastian Ott 	r->name = zdev->res_name;
5257a572a3aSSebastian Ott 
5267a572a3aSSebastian Ott 	if (request_resource(&iomem_resource, r)) {
5277a572a3aSSebastian Ott 		kfree(r);
5287a572a3aSSebastian Ott 		return NULL;
5297a572a3aSSebastian Ott 	}
5307a572a3aSSebastian Ott 	return r;
5317a572a3aSSebastian Ott }
5327a572a3aSSebastian Ott 
53305bc1be6SPierre Morel int zpci_setup_bus_resources(struct zpci_dev *zdev,
5347a572a3aSSebastian Ott 			     struct list_head *resources)
5357a572a3aSSebastian Ott {
5367a572a3aSSebastian Ott 	unsigned long addr, size, flags;
5377a572a3aSSebastian Ott 	struct resource *res;
5387a572a3aSSebastian Ott 	int i, entry;
5397a572a3aSSebastian Ott 
5407a572a3aSSebastian Ott 	snprintf(zdev->res_name, sizeof(zdev->res_name),
54105bc1be6SPierre Morel 		 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
5427a572a3aSSebastian Ott 
543c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
5447a572a3aSSebastian Ott 		if (!zdev->bars[i].size)
5457a572a3aSSebastian Ott 			continue;
5467a572a3aSSebastian Ott 		entry = zpci_alloc_iomap(zdev);
5477a572a3aSSebastian Ott 		if (entry < 0)
5487a572a3aSSebastian Ott 			return entry;
5497a572a3aSSebastian Ott 		zdev->bars[i].map_idx = entry;
5507a572a3aSSebastian Ott 
5517a572a3aSSebastian Ott 		/* only MMIO is supported */
5527a572a3aSSebastian Ott 		flags = IORESOURCE_MEM;
5537a572a3aSSebastian Ott 		if (zdev->bars[i].val & 8)
5547a572a3aSSebastian Ott 			flags |= IORESOURCE_PREFETCH;
5557a572a3aSSebastian Ott 		if (zdev->bars[i].val & 4)
5567a572a3aSSebastian Ott 			flags |= IORESOURCE_MEM_64;
5577a572a3aSSebastian Ott 
558c7ff0e91SSebastian Ott 		if (zpci_use_mio(zdev))
559df057c91SNiklas Schnelle 			addr = (unsigned long) zdev->bars[i].mio_wt;
560dcd33b23SSebastian Ott 		else
5619e00caaeSSebastian Ott 			addr = ZPCI_ADDR(entry);
5627a572a3aSSebastian Ott 		size = 1UL << zdev->bars[i].size;
5637a572a3aSSebastian Ott 
5647a572a3aSSebastian Ott 		res = __alloc_res(zdev, addr, size, flags);
5657a572a3aSSebastian Ott 		if (!res) {
5667a572a3aSSebastian Ott 			zpci_free_iomap(zdev, entry);
5677a572a3aSSebastian Ott 			return -ENOMEM;
5687a572a3aSSebastian Ott 		}
5697a572a3aSSebastian Ott 		zdev->bars[i].res = res;
5707a572a3aSSebastian Ott 		pci_add_resource(resources, res);
5717a572a3aSSebastian Ott 	}
572a50297cfSNiklas Schnelle 	zdev->has_resources = 1;
5737a572a3aSSebastian Ott 
5747a572a3aSSebastian Ott 	return 0;
5757a572a3aSSebastian Ott }
5767a572a3aSSebastian Ott 
5777a572a3aSSebastian Ott static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
5787a572a3aSSebastian Ott {
5797a572a3aSSebastian Ott 	int i;
5807a572a3aSSebastian Ott 
581c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
5822b1df724SSebastian Ott 		if (!zdev->bars[i].size || !zdev->bars[i].res)
5837a572a3aSSebastian Ott 			continue;
5847a572a3aSSebastian Ott 
5857a572a3aSSebastian Ott 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
5867a572a3aSSebastian Ott 		release_resource(zdev->bars[i].res);
5877a572a3aSSebastian Ott 		kfree(zdev->bars[i].res);
5887a572a3aSSebastian Ott 	}
589a50297cfSNiklas Schnelle 	zdev->has_resources = 0;
5907a572a3aSSebastian Ott }
5917a572a3aSSebastian Ott 
59206dc660eSOliver O'Halloran int pcibios_device_add(struct pci_dev *pdev)
593af0a8a84SSebastian Ott {
5942a671f77SNiklas Schnelle 	struct zpci_dev *zdev = to_zpci(pdev);
595cb809182SSebastian Ott 	struct resource *res;
596cb809182SSebastian Ott 	int i;
597cb809182SSebastian Ott 
5982a671f77SNiklas Schnelle 	/* The pdev has a reference to the zdev via its bus */
5992a671f77SNiklas Schnelle 	zpci_zdev_get(zdev);
6007dc20ab1SSebastian Ott 	if (pdev->is_physfn)
6017dc20ab1SSebastian Ott 		pdev->no_vf_scan = 1;
6027dc20ab1SSebastian Ott 
603ef4858c6SSebastian Ott 	pdev->dev.groups = zpci_attr_groups;
6045657933dSBart Van Assche 	pdev->dev.dma_ops = &s390_pci_dma_ops;
6051803ba2dSSebastian Ott 	zpci_map_resources(pdev);
606cb809182SSebastian Ott 
607c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
608cb809182SSebastian Ott 		res = &pdev->resource[i];
609cb809182SSebastian Ott 		if (res->parent || !res->flags)
610cb809182SSebastian Ott 			continue;
611cb809182SSebastian Ott 		pci_claim_resource(pdev, i);
612cb809182SSebastian Ott 	}
613cb809182SSebastian Ott 
614cb809182SSebastian Ott 	return 0;
615cb809182SSebastian Ott }
616cb809182SSebastian Ott 
6171803ba2dSSebastian Ott void pcibios_release_device(struct pci_dev *pdev)
6181803ba2dSSebastian Ott {
6192a671f77SNiklas Schnelle 	struct zpci_dev *zdev = to_zpci(pdev);
6202a671f77SNiklas Schnelle 
6211803ba2dSSebastian Ott 	zpci_unmap_resources(pdev);
6222a671f77SNiklas Schnelle 	zpci_zdev_put(zdev);
6231803ba2dSSebastian Ott }
6241803ba2dSSebastian Ott 
625cb809182SSebastian Ott int pcibios_enable_device(struct pci_dev *pdev, int mask)
626cb809182SSebastian Ott {
627198a5278SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
628af0a8a84SSebastian Ott 
6299a99649fSSebastian Ott 	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
630af0a8a84SSebastian Ott 	zpci_fmb_enable_device(zdev);
631af0a8a84SSebastian Ott 
632d7533232SBjorn Helgaas 	return pci_enable_resources(pdev, mask);
633af0a8a84SSebastian Ott }
634af0a8a84SSebastian Ott 
635cb809182SSebastian Ott void pcibios_disable_device(struct pci_dev *pdev)
636944239c5SSebastian Ott {
637198a5278SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
638944239c5SSebastian Ott 
639944239c5SSebastian Ott 	zpci_fmb_disable_device(zdev);
640944239c5SSebastian Ott 	zpci_debug_exit_device(zdev);
641944239c5SSebastian Ott }
642944239c5SSebastian Ott 
64305bc1be6SPierre Morel static int __zpci_register_domain(int domain)
644cd248341SJan Glauber {
645969ae01bSNiklas Schnelle 	spin_lock(&zpci_domain_lock);
64605bc1be6SPierre Morel 	if (test_bit(domain, zpci_domain)) {
647969ae01bSNiklas Schnelle 		spin_unlock(&zpci_domain_lock);
64805bc1be6SPierre Morel 		pr_err("Domain %04x is already assigned\n", domain);
649312e8462SSebastian Ott 		return -EEXIST;
650312e8462SSebastian Ott 	}
65105bc1be6SPierre Morel 	set_bit(domain, zpci_domain);
652312e8462SSebastian Ott 	spin_unlock(&zpci_domain_lock);
65305bc1be6SPierre Morel 	return domain;
6545c5afd02SSebastian Ott }
65505bc1be6SPierre Morel 
65605bc1be6SPierre Morel static int __zpci_alloc_domain(void)
65705bc1be6SPierre Morel {
65805bc1be6SPierre Morel 	int domain;
65905bc1be6SPierre Morel 
66005bc1be6SPierre Morel 	spin_lock(&zpci_domain_lock);
661969ae01bSNiklas Schnelle 	/*
662969ae01bSNiklas Schnelle 	 * We can always auto allocate domains below ZPCI_NR_DEVICES.
663969ae01bSNiklas Schnelle 	 * There is either a free domain or we have reached the maximum in
664969ae01bSNiklas Schnelle 	 * which case we would have bailed earlier.
665969ae01bSNiklas Schnelle 	 */
66605bc1be6SPierre Morel 	domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
66705bc1be6SPierre Morel 	set_bit(domain, zpci_domain);
668cd248341SJan Glauber 	spin_unlock(&zpci_domain_lock);
66905bc1be6SPierre Morel 	return domain;
670cd248341SJan Glauber }
671cd248341SJan Glauber 
67205bc1be6SPierre Morel int zpci_alloc_domain(int domain)
67305bc1be6SPierre Morel {
67405bc1be6SPierre Morel 	if (zpci_unique_uid) {
67505bc1be6SPierre Morel 		if (domain)
67605bc1be6SPierre Morel 			return __zpci_register_domain(domain);
67705bc1be6SPierre Morel 		pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
67805bc1be6SPierre Morel 		update_uid_checking(false);
67905bc1be6SPierre Morel 	}
68005bc1be6SPierre Morel 	return __zpci_alloc_domain();
68105bc1be6SPierre Morel }
68205bc1be6SPierre Morel 
68305bc1be6SPierre Morel void zpci_free_domain(int domain)
684cd248341SJan Glauber {
685cd248341SJan Glauber 	spin_lock(&zpci_domain_lock);
68605bc1be6SPierre Morel 	clear_bit(domain, zpci_domain);
687cd248341SJan Glauber 	spin_unlock(&zpci_domain_lock);
688cd248341SJan Glauber }
689cd248341SJan Glauber 
6907d594322SSebastian Ott 
691a755a45dSJan Glauber int zpci_enable_device(struct zpci_dev *zdev)
692a755a45dSJan Glauber {
693cc049eecSNiklas Schnelle 	u32 fh = zdev->fh;
6941f3f7681SNiklas Schnelle 	int rc = 0;
695a755a45dSJan Glauber 
6961f3f7681SNiklas Schnelle 	if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
697f7addcddSNiklas Schnelle 		rc = -EIO;
6981f3f7681SNiklas Schnelle 	else
699*4fe20497SNiklas Schnelle 		zpci_update_fh(zdev, fh);
700a755a45dSJan Glauber 	return rc;
701a755a45dSJan Glauber }
702a755a45dSJan Glauber 
703cb65a669SSebastian Ott int zpci_disable_device(struct zpci_dev *zdev)
704cb65a669SSebastian Ott {
705cc049eecSNiklas Schnelle 	u32 fh = zdev->fh;
7068256addaSNiklas Schnelle 	int cc, rc = 0;
7078256addaSNiklas Schnelle 
708cc049eecSNiklas Schnelle 	cc = clp_disable_fh(zdev, &fh);
709cc049eecSNiklas Schnelle 	if (!cc) {
710*4fe20497SNiklas Schnelle 		zpci_update_fh(zdev, fh);
711cc049eecSNiklas Schnelle 	} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
7128256addaSNiklas Schnelle 		pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
7138256addaSNiklas Schnelle 			zdev->fid);
7148256addaSNiklas Schnelle 		/* Function is already disabled - update handle */
715cc049eecSNiklas Schnelle 		rc = clp_refresh_fh(zdev->fid, &fh);
716cc049eecSNiklas Schnelle 		if (!rc) {
717*4fe20497SNiklas Schnelle 			zpci_update_fh(zdev, fh);
7188256addaSNiklas Schnelle 			rc = -EINVAL;
719cc049eecSNiklas Schnelle 		}
720cc049eecSNiklas Schnelle 	} else {
7218256addaSNiklas Schnelle 		rc = -EIO;
7228256addaSNiklas Schnelle 	}
7238256addaSNiklas Schnelle 	return rc;
724cb65a669SSebastian Ott }
725cb65a669SSebastian Ott 
726ba764dd7SNiklas Schnelle /**
727ba764dd7SNiklas Schnelle  * zpci_create_device() - Create a new zpci_dev and add it to the zbus
728ba764dd7SNiklas Schnelle  * @fid: Function ID of the device to be created
729ba764dd7SNiklas Schnelle  * @fh: Current Function Handle of the device to be created
730ba764dd7SNiklas Schnelle  * @state: Initial state after creation either Standby or Configured
731ba764dd7SNiklas Schnelle  *
732ba764dd7SNiklas Schnelle  * Creates a new zpci device and adds it to its, possibly newly created, zbus
733ba764dd7SNiklas Schnelle  * as well as zpci_list.
734ba764dd7SNiklas Schnelle  *
73514c87ba8SNiklas Schnelle  * Returns: the zdev on success or an error pointer otherwise
736ba764dd7SNiklas Schnelle  */
73714c87ba8SNiklas Schnelle struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
738cd248341SJan Glauber {
739ba764dd7SNiklas Schnelle 	struct zpci_dev *zdev;
740cd248341SJan Glauber 	int rc;
741cd248341SJan Glauber 
742ba764dd7SNiklas Schnelle 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
743ba764dd7SNiklas Schnelle 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
744ba764dd7SNiklas Schnelle 	if (!zdev)
74514c87ba8SNiklas Schnelle 		return ERR_PTR(-ENOMEM);
746ba764dd7SNiklas Schnelle 
747ba764dd7SNiklas Schnelle 	/* FID and Function Handle are the static/dynamic identifiers */
748ba764dd7SNiklas Schnelle 	zdev->fid = fid;
749ba764dd7SNiklas Schnelle 	zdev->fh = fh;
750ba764dd7SNiklas Schnelle 
751ba764dd7SNiklas Schnelle 	/* Query function properties and update zdev */
752ba764dd7SNiklas Schnelle 	rc = clp_query_pci_fn(zdev);
753ba764dd7SNiklas Schnelle 	if (rc)
754ba764dd7SNiklas Schnelle 		goto error;
755ba764dd7SNiklas Schnelle 	zdev->state =  state;
756ba764dd7SNiklas Schnelle 
75705bc1be6SPierre Morel 	kref_init(&zdev->kref);
758ba764dd7SNiklas Schnelle 	mutex_init(&zdev->lock);
759ba764dd7SNiklas Schnelle 
760ba764dd7SNiklas Schnelle 	rc = zpci_init_iommu(zdev);
761ba764dd7SNiklas Schnelle 	if (rc)
762ba764dd7SNiklas Schnelle 		goto error;
763ba764dd7SNiklas Schnelle 
764ba764dd7SNiklas Schnelle 	rc = zpci_bus_device_register(zdev, &pci_root_ops);
765ba764dd7SNiklas Schnelle 	if (rc)
766a50297cfSNiklas Schnelle 		goto error_destroy_iommu;
76705bc1be6SPierre Morel 
76805bc1be6SPierre Morel 	spin_lock(&zpci_list_lock);
76905bc1be6SPierre Morel 	list_add_tail(&zdev->entry, &zpci_list);
77005bc1be6SPierre Morel 	spin_unlock(&zpci_list_lock);
771cd248341SJan Glauber 
77214c87ba8SNiklas Schnelle 	return zdev;
773cd248341SJan Glauber 
774ba764dd7SNiklas Schnelle error_destroy_iommu:
775f42c2235SJoerg Roedel 	zpci_destroy_iommu(zdev);
776ba764dd7SNiklas Schnelle error:
777ba764dd7SNiklas Schnelle 	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
778ba764dd7SNiklas Schnelle 	kfree(zdev);
77914c87ba8SNiklas Schnelle 	return ERR_PTR(rc);
780cd248341SJan Glauber }
781cd248341SJan Glauber 
782a46044a9SNiklas Schnelle bool zpci_is_device_configured(struct zpci_dev *zdev)
783a46044a9SNiklas Schnelle {
784a46044a9SNiklas Schnelle 	enum zpci_state state = zdev->state;
785a46044a9SNiklas Schnelle 
786a46044a9SNiklas Schnelle 	return state != ZPCI_FN_STATE_RESERVED &&
787a46044a9SNiklas Schnelle 		state != ZPCI_FN_STATE_STANDBY;
788a46044a9SNiklas Schnelle }
789a46044a9SNiklas Schnelle 
7902631f6b6SNiklas Schnelle /**
791a7f82c36SNiklas Schnelle  * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
7922631f6b6SNiklas Schnelle  * @zdev: The zpci_dev to be configured
7932631f6b6SNiklas Schnelle  * @fh: The general function handle supplied by the platform
7942631f6b6SNiklas Schnelle  *
79561311e32SNiklas Schnelle  * Given a device in the configuration state Configured, enables, scans and
796a7f82c36SNiklas Schnelle  * adds it to the common code PCI subsystem if possible. If the PCI device is
797a7f82c36SNiklas Schnelle  * parked because we can not yet create a PCI bus because we have not seen
798a7f82c36SNiklas Schnelle  * function 0, it is ignored but will be scanned once function 0 appears.
799a7f82c36SNiklas Schnelle  * If any failure occurs, the zpci_dev is left disabled.
8002631f6b6SNiklas Schnelle  *
8012631f6b6SNiklas Schnelle  * Return: 0 on success, or an error code otherwise
8022631f6b6SNiklas Schnelle  */
803a7f82c36SNiklas Schnelle int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
8042631f6b6SNiklas Schnelle {
8052631f6b6SNiklas Schnelle 	int rc;
8062631f6b6SNiklas Schnelle 
807*4fe20497SNiklas Schnelle 	zpci_update_fh(zdev, fh);
8082631f6b6SNiklas Schnelle 	/* the PCI function will be scanned once function 0 appears */
8092631f6b6SNiklas Schnelle 	if (!zdev->zbus->bus)
8102631f6b6SNiklas Schnelle 		return 0;
8112631f6b6SNiklas Schnelle 
812a50297cfSNiklas Schnelle 	/* For function 0 on a multi-function bus scan whole bus as we might
813a50297cfSNiklas Schnelle 	 * have to pick up existing functions waiting for it to allow creating
814a50297cfSNiklas Schnelle 	 * the PCI bus
815a50297cfSNiklas Schnelle 	 */
816a50297cfSNiklas Schnelle 	if (zdev->devfn == 0 && zdev->zbus->multifunction)
817a50297cfSNiklas Schnelle 		rc = zpci_bus_scan_bus(zdev->zbus);
818a50297cfSNiklas Schnelle 	else
819faf29a4dSNiklas Schnelle 		rc = zpci_bus_scan_device(zdev);
8202631f6b6SNiklas Schnelle 
8212631f6b6SNiklas Schnelle 	return rc;
8222631f6b6SNiklas Schnelle }
8232631f6b6SNiklas Schnelle 
8242631f6b6SNiklas Schnelle /**
8252631f6b6SNiklas Schnelle  * zpci_deconfigure_device() - Deconfigure a zpci_dev
8262631f6b6SNiklas Schnelle  * @zdev: The zpci_dev to configure
8272631f6b6SNiklas Schnelle  *
8282631f6b6SNiklas Schnelle  * Deconfigure a zPCI function that is currently configured and possibly known
8292631f6b6SNiklas Schnelle  * to the common code PCI subsystem.
8302631f6b6SNiklas Schnelle  * If any failure occurs the device is left as is.
8312631f6b6SNiklas Schnelle  *
8322631f6b6SNiklas Schnelle  * Return: 0 on success, or an error code otherwise
8332631f6b6SNiklas Schnelle  */
8342631f6b6SNiklas Schnelle int zpci_deconfigure_device(struct zpci_dev *zdev)
8352631f6b6SNiklas Schnelle {
8362631f6b6SNiklas Schnelle 	int rc;
8372631f6b6SNiklas Schnelle 
8382631f6b6SNiklas Schnelle 	if (zdev->zbus->bus)
83995b3a8b4SNiklas Schnelle 		zpci_bus_remove_device(zdev, false);
8402631f6b6SNiklas Schnelle 
8411f3f7681SNiklas Schnelle 	if (zdev->dma_table) {
8421f3f7681SNiklas Schnelle 		rc = zpci_dma_exit_device(zdev);
8431f3f7681SNiklas Schnelle 		if (rc)
8441f3f7681SNiklas Schnelle 			return rc;
8451f3f7681SNiklas Schnelle 	}
8462631f6b6SNiklas Schnelle 	if (zdev_enabled(zdev)) {
8472631f6b6SNiklas Schnelle 		rc = zpci_disable_device(zdev);
8482631f6b6SNiklas Schnelle 		if (rc)
8492631f6b6SNiklas Schnelle 			return rc;
8502631f6b6SNiklas Schnelle 	}
8512631f6b6SNiklas Schnelle 
8522631f6b6SNiklas Schnelle 	rc = sclp_pci_deconfigure(zdev->fid);
8532631f6b6SNiklas Schnelle 	zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
8542631f6b6SNiklas Schnelle 	if (rc)
8552631f6b6SNiklas Schnelle 		return rc;
8562631f6b6SNiklas Schnelle 	zdev->state = ZPCI_FN_STATE_STANDBY;
8572631f6b6SNiklas Schnelle 
8582631f6b6SNiklas Schnelle 	return 0;
8592631f6b6SNiklas Schnelle }
8602631f6b6SNiklas Schnelle 
861a46044a9SNiklas Schnelle /**
862a46044a9SNiklas Schnelle  * zpci_device_reserved() - Mark device as resverved
863a46044a9SNiklas Schnelle  * @zdev: the zpci_dev that was reserved
864a46044a9SNiklas Schnelle  *
865a46044a9SNiklas Schnelle  * Handle the case that a given zPCI function was reserved by another system.
866a46044a9SNiklas Schnelle  * After a call to this function the zpci_dev can not be found via
867a46044a9SNiklas Schnelle  * get_zdev_by_fid() anymore but may still be accessible via existing
868a46044a9SNiklas Schnelle  * references though it will not be functional anymore.
869a46044a9SNiklas Schnelle  */
870a46044a9SNiklas Schnelle void zpci_device_reserved(struct zpci_dev *zdev)
871a46044a9SNiklas Schnelle {
872a46044a9SNiklas Schnelle 	if (zdev->has_hp_slot)
873a46044a9SNiklas Schnelle 		zpci_exit_slot(zdev);
874a46044a9SNiklas Schnelle 	/*
875a46044a9SNiklas Schnelle 	 * Remove device from zpci_list as it is going away. This also
876a46044a9SNiklas Schnelle 	 * makes sure we ignore subsequent zPCI events for this device.
877a46044a9SNiklas Schnelle 	 */
878a46044a9SNiklas Schnelle 	spin_lock(&zpci_list_lock);
879a46044a9SNiklas Schnelle 	list_del(&zdev->entry);
880a46044a9SNiklas Schnelle 	spin_unlock(&zpci_list_lock);
881a46044a9SNiklas Schnelle 	zdev->state = ZPCI_FN_STATE_RESERVED;
882a46044a9SNiklas Schnelle 	zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
883a46044a9SNiklas Schnelle 	zpci_zdev_put(zdev);
884a46044a9SNiklas Schnelle }
885a46044a9SNiklas Schnelle 
88605bc1be6SPierre Morel void zpci_release_device(struct kref *kref)
887623bd44dSSebastian Ott {
88805bc1be6SPierre Morel 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
889a9045c22SNiklas Schnelle 	int ret;
890623bd44dSSebastian Ott 
8912f0230b2SNiklas Schnelle 	if (zdev->zbus->bus)
89295b3a8b4SNiklas Schnelle 		zpci_bus_remove_device(zdev, false);
89344510d6fSPierre Morel 
8941f3f7681SNiklas Schnelle 	if (zdev->dma_table)
8951f3f7681SNiklas Schnelle 		zpci_dma_exit_device(zdev);
896f6576a1bSNiklas Schnelle 	if (zdev_enabled(zdev))
89705bc1be6SPierre Morel 		zpci_disable_device(zdev);
898f6576a1bSNiklas Schnelle 
899f6576a1bSNiklas Schnelle 	switch (zdev->state) {
900a9045c22SNiklas Schnelle 	case ZPCI_FN_STATE_CONFIGURED:
901a9045c22SNiklas Schnelle 		ret = sclp_pci_deconfigure(zdev->fid);
902a9045c22SNiklas Schnelle 		zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
903a9045c22SNiklas Schnelle 		fallthrough;
90405bc1be6SPierre Morel 	case ZPCI_FN_STATE_STANDBY:
90544510d6fSPierre Morel 		if (zdev->has_hp_slot)
90605bc1be6SPierre Morel 			zpci_exit_slot(zdev);
907a46044a9SNiklas Schnelle 		spin_lock(&zpci_list_lock);
908a46044a9SNiklas Schnelle 		list_del(&zdev->entry);
909a46044a9SNiklas Schnelle 		spin_unlock(&zpci_list_lock);
910a46044a9SNiklas Schnelle 		zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
911a46044a9SNiklas Schnelle 		fallthrough;
912a46044a9SNiklas Schnelle 	case ZPCI_FN_STATE_RESERVED:
91302368b7cSNiklas Schnelle 		if (zdev->has_resources)
91405bc1be6SPierre Morel 			zpci_cleanup_bus_resources(zdev);
91505bc1be6SPierre Morel 		zpci_bus_device_unregister(zdev);
91605bc1be6SPierre Morel 		zpci_destroy_iommu(zdev);
91705bc1be6SPierre Morel 		fallthrough;
91805bc1be6SPierre Morel 	default:
91905bc1be6SPierre Morel 		break;
92005bc1be6SPierre Morel 	}
92105bc1be6SPierre Morel 	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
92205bc1be6SPierre Morel 	kfree(zdev);
923623bd44dSSebastian Ott }
924623bd44dSSebastian Ott 
925bd3a1725SMartin Schwidefsky int zpci_report_error(struct pci_dev *pdev,
926bd3a1725SMartin Schwidefsky 		      struct zpci_report_error_header *report)
927bd3a1725SMartin Schwidefsky {
928bd3a1725SMartin Schwidefsky 	struct zpci_dev *zdev = to_zpci(pdev);
929bd3a1725SMartin Schwidefsky 
930bd3a1725SMartin Schwidefsky 	return sclp_pci_report(report, zdev->fh, zdev->fid);
931bd3a1725SMartin Schwidefsky }
932bd3a1725SMartin Schwidefsky EXPORT_SYMBOL(zpci_report_error);
933bd3a1725SMartin Schwidefsky 
934cd248341SJan Glauber static int zpci_mem_init(void)
935cd248341SJan Glauber {
93680c544deSSebastian Ott 	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
93780c544deSSebastian Ott 		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
93880c544deSSebastian Ott 
939d0b08853SJan Glauber 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
94080c544deSSebastian Ott 					   __alignof__(struct zpci_fmb), 0, NULL);
941d0b08853SJan Glauber 	if (!zdev_fmb_cache)
942c506fff3SSebastian Ott 		goto error_fmb;
943d0b08853SJan Glauber 
944c506fff3SSebastian Ott 	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
945c506fff3SSebastian Ott 				   sizeof(*zpci_iomap_start), GFP_KERNEL);
946cd248341SJan Glauber 	if (!zpci_iomap_start)
9479a4da8a5SJan Glauber 		goto error_iomap;
948cd248341SJan Glauber 
949c506fff3SSebastian Ott 	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
950c506fff3SSebastian Ott 				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
951c506fff3SSebastian Ott 	if (!zpci_iomap_bitmap)
952c506fff3SSebastian Ott 		goto error_iomap_bitmap;
953c506fff3SSebastian Ott 
954b02002ccSNiklas Schnelle 	if (static_branch_likely(&have_mio))
955b02002ccSNiklas Schnelle 		clp_setup_writeback_mio();
956b02002ccSNiklas Schnelle 
957c506fff3SSebastian Ott 	return 0;
958c506fff3SSebastian Ott error_iomap_bitmap:
959c506fff3SSebastian Ott 	kfree(zpci_iomap_start);
9609a4da8a5SJan Glauber error_iomap:
961d0b08853SJan Glauber 	kmem_cache_destroy(zdev_fmb_cache);
962c506fff3SSebastian Ott error_fmb:
963cd248341SJan Glauber 	return -ENOMEM;
964cd248341SJan Glauber }
965cd248341SJan Glauber 
966cd248341SJan Glauber static void zpci_mem_exit(void)
967cd248341SJan Glauber {
968c506fff3SSebastian Ott 	kfree(zpci_iomap_bitmap);
969cd248341SJan Glauber 	kfree(zpci_iomap_start);
970d0b08853SJan Glauber 	kmem_cache_destroy(zdev_fmb_cache);
971cd248341SJan Glauber }
972cd248341SJan Glauber 
9736324b4deSSebastian Ott static unsigned int s390_pci_probe __initdata = 1;
974fbfe07d4SSebastian Ott unsigned int s390_pci_force_floating __initdata;
975aa3b7c29SSebastian Ott static unsigned int s390_pci_initialized;
976cd248341SJan Glauber 
977cd248341SJan Glauber char * __init pcibios_setup(char *str)
978cd248341SJan Glauber {
979257608fbSSebastian Ott 	if (!strcmp(str, "off")) {
980257608fbSSebastian Ott 		s390_pci_probe = 0;
981cd248341SJan Glauber 		return NULL;
982cd248341SJan Glauber 	}
98356271303SSebastian Ott 	if (!strcmp(str, "nomio")) {
9843322ba0dSNiklas Schnelle 		S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
98556271303SSebastian Ott 		return NULL;
98656271303SSebastian Ott 	}
987fbfe07d4SSebastian Ott 	if (!strcmp(str, "force_floating")) {
988fbfe07d4SSebastian Ott 		s390_pci_force_floating = 1;
989fbfe07d4SSebastian Ott 		return NULL;
990fbfe07d4SSebastian Ott 	}
9916cf17f9aSPierre Morel 	if (!strcmp(str, "norid")) {
9926cf17f9aSPierre Morel 		s390_pci_no_rid = 1;
9936cf17f9aSPierre Morel 		return NULL;
9946cf17f9aSPierre Morel 	}
995cd248341SJan Glauber 	return str;
996cd248341SJan Glauber }
997cd248341SJan Glauber 
998aa3b7c29SSebastian Ott bool zpci_is_enabled(void)
999aa3b7c29SSebastian Ott {
1000aa3b7c29SSebastian Ott 	return s390_pci_initialized;
1001aa3b7c29SSebastian Ott }
1002aa3b7c29SSebastian Ott 
1003cd248341SJan Glauber static int __init pci_base_init(void)
1004cd248341SJan Glauber {
1005cd248341SJan Glauber 	int rc;
1006cd248341SJan Glauber 
10071e5635d1SHeiko Carstens 	if (!s390_pci_probe)
1008cd248341SJan Glauber 		return 0;
1009cd248341SJan Glauber 
1010da78693eSNiklas Schnelle 	if (!test_facility(69) || !test_facility(71)) {
1011da78693eSNiklas Schnelle 		pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1012cd248341SJan Glauber 		return 0;
1013da78693eSNiklas Schnelle 	}
1014cd248341SJan Glauber 
10153322ba0dSNiklas Schnelle 	if (MACHINE_HAS_PCI_MIO) {
101671ba41c9SSebastian Ott 		static_branch_enable(&have_mio);
10179964f396SSebastian Ott 		ctl_set_bit(2, 5);
10189964f396SSebastian Ott 	}
101971ba41c9SSebastian Ott 
1020d0b08853SJan Glauber 	rc = zpci_debug_init();
1021d0b08853SJan Glauber 	if (rc)
10221f44a225SMartin Schwidefsky 		goto out;
1023d0b08853SJan Glauber 
1024cd248341SJan Glauber 	rc = zpci_mem_init();
1025cd248341SJan Glauber 	if (rc)
1026cd248341SJan Glauber 		goto out_mem;
1027cd248341SJan Glauber 
10289a4da8a5SJan Glauber 	rc = zpci_irq_init();
10299a4da8a5SJan Glauber 	if (rc)
10309a4da8a5SJan Glauber 		goto out_irq;
10319a4da8a5SJan Glauber 
1032828b35f6SJan Glauber 	rc = zpci_dma_init();
1033828b35f6SJan Glauber 	if (rc)
1034828b35f6SJan Glauber 		goto out_dma;
1035828b35f6SJan Glauber 
10361d578966SSebastian Ott 	rc = clp_scan_pci_devices();
1037a755a45dSJan Glauber 	if (rc)
1038a755a45dSJan Glauber 		goto out_find;
103914c87ba8SNiklas Schnelle 	zpci_bus_scan_busses();
1040a755a45dSJan Glauber 
1041aa3b7c29SSebastian Ott 	s390_pci_initialized = 1;
1042cd248341SJan Glauber 	return 0;
1043cd248341SJan Glauber 
1044a755a45dSJan Glauber out_find:
1045828b35f6SJan Glauber 	zpci_dma_exit();
1046828b35f6SJan Glauber out_dma:
10479a4da8a5SJan Glauber 	zpci_irq_exit();
10489a4da8a5SJan Glauber out_irq:
1049cd248341SJan Glauber 	zpci_mem_exit();
1050cd248341SJan Glauber out_mem:
1051d0b08853SJan Glauber 	zpci_debug_exit();
10521f44a225SMartin Schwidefsky out:
1053cd248341SJan Glauber 	return rc;
1054cd248341SJan Glauber }
105567f43f38SSebastian Ott subsys_initcall_sync(pci_base_init);
1056