xref: /linux/arch/s390/pci/pci.c (revision f6576a1b4896b984dce0e8393efeba68cc2b96c8)
1adbb3901SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cd248341SJan Glauber /*
3cd248341SJan Glauber  * Copyright IBM Corp. 2012
4cd248341SJan Glauber  *
5cd248341SJan Glauber  * Author(s):
6cd248341SJan Glauber  *   Jan Glauber <jang@linux.vnet.ibm.com>
7cd248341SJan Glauber  *
8cd248341SJan Glauber  * The System z PCI code is a rewrite from a prototype by
9cd248341SJan Glauber  * the following people (Kudoz!):
10bedef755SJan Glauber  *   Alexander Schmidt
11bedef755SJan Glauber  *   Christoph Raisch
12bedef755SJan Glauber  *   Hannes Hering
13bedef755SJan Glauber  *   Hoang-Nam Nguyen
14bedef755SJan Glauber  *   Jan-Bernd Themann
15bedef755SJan Glauber  *   Stefan Roscher
16bedef755SJan Glauber  *   Thomas Klein
17cd248341SJan Glauber  */
18cd248341SJan Glauber 
19896cb7e6SGerald Schaefer #define KMSG_COMPONENT "zpci"
20896cb7e6SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21cd248341SJan Glauber 
22cd248341SJan Glauber #include <linux/kernel.h>
23cd248341SJan Glauber #include <linux/slab.h>
24cd248341SJan Glauber #include <linux/err.h>
25cd248341SJan Glauber #include <linux/export.h>
26cd248341SJan Glauber #include <linux/delay.h>
27cd248341SJan Glauber #include <linux/seq_file.h>
2871ba41c9SSebastian Ott #include <linux/jump_label.h>
29cd248341SJan Glauber #include <linux/pci.h>
30794b8846SNiklas Schnelle #include <linux/printk.h>
31cd248341SJan Glauber 
329a4da8a5SJan Glauber #include <asm/isc.h>
339a4da8a5SJan Glauber #include <asm/airq.h>
34cd248341SJan Glauber #include <asm/facility.h>
35cd248341SJan Glauber #include <asm/pci_insn.h>
36a755a45dSJan Glauber #include <asm/pci_clp.h>
37828b35f6SJan Glauber #include <asm/pci_dma.h>
38cd248341SJan Glauber 
3905bc1be6SPierre Morel #include "pci_bus.h"
40abb95b75SNiklas Schnelle #include "pci_iov.h"
4105bc1be6SPierre Morel 
42cd248341SJan Glauber /* list of all detected zpci devices */
4367f43f38SSebastian Ott static LIST_HEAD(zpci_list);
4457b5918cSSebastian Ott static DEFINE_SPINLOCK(zpci_list_lock);
451f44a225SMartin Schwidefsky 
46969ae01bSNiklas Schnelle static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
47cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_domain_lock);
48cd248341SJan Glauber 
49c506fff3SSebastian Ott #define ZPCI_IOMAP_ENTRIES						\
50c9c13ba4SDenis Efremov 	min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),	\
51c506fff3SSebastian Ott 	    ZPCI_IOMAP_MAX_ENTRIES)
52c506fff3SSebastian Ott 
536cf17f9aSPierre Morel unsigned int s390_pci_no_rid;
546cf17f9aSPierre Morel 
55cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_iomap_lock);
56c506fff3SSebastian Ott static unsigned long *zpci_iomap_bitmap;
57cd248341SJan Glauber struct zpci_iomap_entry *zpci_iomap_start;
58cd248341SJan Glauber EXPORT_SYMBOL_GPL(zpci_iomap_start);
59cd248341SJan Glauber 
6071ba41c9SSebastian Ott DEFINE_STATIC_KEY_FALSE(have_mio);
6171ba41c9SSebastian Ott 
62d0b08853SJan Glauber static struct kmem_cache *zdev_fmb_cache;
63d0b08853SJan Glauber 
64cd248341SJan Glauber struct zpci_dev *get_zdev_by_fid(u32 fid)
65cd248341SJan Glauber {
66cd248341SJan Glauber 	struct zpci_dev *tmp, *zdev = NULL;
67cd248341SJan Glauber 
6857b5918cSSebastian Ott 	spin_lock(&zpci_list_lock);
69cd248341SJan Glauber 	list_for_each_entry(tmp, &zpci_list, entry) {
70cd248341SJan Glauber 		if (tmp->fid == fid) {
71cd248341SJan Glauber 			zdev = tmp;
72cd248341SJan Glauber 			break;
73cd248341SJan Glauber 		}
74cd248341SJan Glauber 	}
7557b5918cSSebastian Ott 	spin_unlock(&zpci_list_lock);
76cd248341SJan Glauber 	return zdev;
77cd248341SJan Glauber }
78cd248341SJan Glauber 
7901553d9aSSebastian Ott void zpci_remove_reserved_devices(void)
8001553d9aSSebastian Ott {
8101553d9aSSebastian Ott 	struct zpci_dev *tmp, *zdev;
8201553d9aSSebastian Ott 	enum zpci_state state;
8301553d9aSSebastian Ott 	LIST_HEAD(remove);
8401553d9aSSebastian Ott 
8501553d9aSSebastian Ott 	spin_lock(&zpci_list_lock);
8601553d9aSSebastian Ott 	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
8701553d9aSSebastian Ott 		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
8801553d9aSSebastian Ott 		    !clp_get_state(zdev->fid, &state) &&
8901553d9aSSebastian Ott 		    state == ZPCI_FN_STATE_RESERVED)
9001553d9aSSebastian Ott 			list_move_tail(&zdev->entry, &remove);
9101553d9aSSebastian Ott 	}
9201553d9aSSebastian Ott 	spin_unlock(&zpci_list_lock);
9301553d9aSSebastian Ott 
9401553d9aSSebastian Ott 	list_for_each_entry_safe(zdev, tmp, &remove, entry)
9505bc1be6SPierre Morel 		zpci_zdev_put(zdev);
96cd248341SJan Glauber }
97cd248341SJan Glauber 
98cd248341SJan Glauber int pci_domain_nr(struct pci_bus *bus)
99cd248341SJan Glauber {
10005bc1be6SPierre Morel 	return ((struct zpci_bus *) bus->sysdata)->domain_nr;
101cd248341SJan Glauber }
102cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_domain_nr);
103cd248341SJan Glauber 
104cd248341SJan Glauber int pci_proc_domain(struct pci_bus *bus)
105cd248341SJan Glauber {
106cd248341SJan Glauber 	return pci_domain_nr(bus);
107cd248341SJan Glauber }
108cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_proc_domain);
109cd248341SJan Glauber 
110828b35f6SJan Glauber /* Modify PCI: Register I/O address translation parameters */
111828b35f6SJan Glauber int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
112828b35f6SJan Glauber 		       u64 base, u64 limit, u64 iota)
113828b35f6SJan Glauber {
11472570834SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
11572570834SSebastian Ott 	struct zpci_fib fib = {0};
11672570834SSebastian Ott 	u8 status;
117828b35f6SJan Glauber 
118828b35f6SJan Glauber 	WARN_ON_ONCE(iota & 0x3fff);
11972570834SSebastian Ott 	fib.pba = base;
12072570834SSebastian Ott 	fib.pal = limit;
12172570834SSebastian Ott 	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
12272570834SSebastian Ott 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
123828b35f6SJan Glauber }
124828b35f6SJan Glauber 
125828b35f6SJan Glauber /* Modify PCI: Unregister I/O address translation parameters */
126828b35f6SJan Glauber int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
127828b35f6SJan Glauber {
12872570834SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
12972570834SSebastian Ott 	struct zpci_fib fib = {0};
13072570834SSebastian Ott 	u8 cc, status;
131828b35f6SJan Glauber 
13272570834SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
13372570834SSebastian Ott 	if (cc == 3) /* Function already gone. */
13472570834SSebastian Ott 		cc = 0;
13572570834SSebastian Ott 	return cc ? -EIO : 0;
136828b35f6SJan Glauber }
137828b35f6SJan Glauber 
138d0b08853SJan Glauber /* Modify PCI: Set PCI function measurement parameters */
139d0b08853SJan Glauber int zpci_fmb_enable_device(struct zpci_dev *zdev)
140d0b08853SJan Glauber {
1414e5bd780SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
1424e5bd780SSebastian Ott 	struct zpci_fib fib = {0};
1434e5bd780SSebastian Ott 	u8 cc, status;
144d0b08853SJan Glauber 
1450b7589ecSSebastian Ott 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
146d0b08853SJan Glauber 		return -EINVAL;
147d0b08853SJan Glauber 
14808b42124SWei Yongjun 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
149d0b08853SJan Glauber 	if (!zdev->fmb)
150d0b08853SJan Glauber 		return -ENOMEM;
151d0b08853SJan Glauber 	WARN_ON((u64) zdev->fmb & 0xf);
152d0b08853SJan Glauber 
1536001018aSSebastian Ott 	/* reset software counters */
1546001018aSSebastian Ott 	atomic64_set(&zdev->allocated_pages, 0);
1556001018aSSebastian Ott 	atomic64_set(&zdev->mapped_pages, 0);
1566001018aSSebastian Ott 	atomic64_set(&zdev->unmapped_pages, 0);
1576001018aSSebastian Ott 
1584e5bd780SSebastian Ott 	fib.fmb_addr = virt_to_phys(zdev->fmb);
1594e5bd780SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1604e5bd780SSebastian Ott 	if (cc) {
1614e5bd780SSebastian Ott 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
1624e5bd780SSebastian Ott 		zdev->fmb = NULL;
1634e5bd780SSebastian Ott 	}
1644e5bd780SSebastian Ott 	return cc ? -EIO : 0;
165d0b08853SJan Glauber }
166d0b08853SJan Glauber 
167d0b08853SJan Glauber /* Modify PCI: Disable PCI function measurement */
168d0b08853SJan Glauber int zpci_fmb_disable_device(struct zpci_dev *zdev)
169d0b08853SJan Glauber {
1704e5bd780SSebastian Ott 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
1714e5bd780SSebastian Ott 	struct zpci_fib fib = {0};
1724e5bd780SSebastian Ott 	u8 cc, status;
173d0b08853SJan Glauber 
174d0b08853SJan Glauber 	if (!zdev->fmb)
175d0b08853SJan Glauber 		return -EINVAL;
176d0b08853SJan Glauber 
177d0b08853SJan Glauber 	/* Function measurement is disabled if fmb address is zero */
1784e5bd780SSebastian Ott 	cc = zpci_mod_fc(req, &fib, &status);
1794e5bd780SSebastian Ott 	if (cc == 3) /* Function already gone. */
1804e5bd780SSebastian Ott 		cc = 0;
181d0b08853SJan Glauber 
1824e5bd780SSebastian Ott 	if (!cc) {
183d0b08853SJan Glauber 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
184d0b08853SJan Glauber 		zdev->fmb = NULL;
1854e5bd780SSebastian Ott 	}
1864e5bd780SSebastian Ott 	return cc ? -EIO : 0;
187d0b08853SJan Glauber }
188d0b08853SJan Glauber 
189cd248341SJan Glauber static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
190cd248341SJan Glauber {
191cd248341SJan Glauber 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
192cd248341SJan Glauber 	u64 data;
193cd248341SJan Glauber 	int rc;
194cd248341SJan Glauber 
19581deca12SSebastian Ott 	rc = __zpci_load(&data, req, offset);
196b170bad4SSebastian Ott 	if (!rc) {
1975064cd35SSebastian Ott 		data = le64_to_cpu((__force __le64) data);
1985064cd35SSebastian Ott 		data >>= (8 - len) * 8;
199cd248341SJan Glauber 		*val = (u32) data;
200b170bad4SSebastian Ott 	} else
201cd248341SJan Glauber 		*val = 0xffffffff;
202cd248341SJan Glauber 	return rc;
203cd248341SJan Glauber }
204cd248341SJan Glauber 
205cd248341SJan Glauber static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
206cd248341SJan Glauber {
207cd248341SJan Glauber 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
208cd248341SJan Glauber 	u64 data = val;
209cd248341SJan Glauber 	int rc;
210cd248341SJan Glauber 
2115064cd35SSebastian Ott 	data <<= (8 - len) * 8;
2125064cd35SSebastian Ott 	data = (__force u64) cpu_to_le64(data);
21381deca12SSebastian Ott 	rc = __zpci_store(data, req, offset);
214cd248341SJan Glauber 	return rc;
215cd248341SJan Glauber }
216cd248341SJan Glauber 
217cd248341SJan Glauber resource_size_t pcibios_align_resource(void *data, const struct resource *res,
218cd248341SJan Glauber 				       resource_size_t size,
219cd248341SJan Glauber 				       resource_size_t align)
220cd248341SJan Glauber {
221cd248341SJan Glauber 	return 0;
222cd248341SJan Glauber }
223cd248341SJan Glauber 
22487bc359bSJan Glauber /* combine single writes by using store-block insn */
22587bc359bSJan Glauber void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
22687bc359bSJan Glauber {
22787bc359bSJan Glauber        zpci_memcpy_toio(to, from, count);
22887bc359bSJan Glauber }
22987bc359bSJan Glauber 
230b02002ccSNiklas Schnelle static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
23171ba41c9SSebastian Ott {
232a999eb96SNiklas Schnelle 	unsigned long offset, vaddr;
23371ba41c9SSebastian Ott 	struct vm_struct *area;
234a999eb96SNiklas Schnelle 	phys_addr_t last_addr;
23571ba41c9SSebastian Ott 
236a999eb96SNiklas Schnelle 	last_addr = addr + size - 1;
237a999eb96SNiklas Schnelle 	if (!size || last_addr < addr)
23871ba41c9SSebastian Ott 		return NULL;
23971ba41c9SSebastian Ott 
24071ba41c9SSebastian Ott 	if (!static_branch_unlikely(&have_mio))
241a999eb96SNiklas Schnelle 		return (void __iomem *) addr;
24271ba41c9SSebastian Ott 
243a999eb96SNiklas Schnelle 	offset = addr & ~PAGE_MASK;
244a999eb96SNiklas Schnelle 	addr &= PAGE_MASK;
24571ba41c9SSebastian Ott 	size = PAGE_ALIGN(size + offset);
24671ba41c9SSebastian Ott 	area = get_vm_area(size, VM_IOREMAP);
24771ba41c9SSebastian Ott 	if (!area)
24871ba41c9SSebastian Ott 		return NULL;
24971ba41c9SSebastian Ott 
250a999eb96SNiklas Schnelle 	vaddr = (unsigned long) area->addr;
251b02002ccSNiklas Schnelle 	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
252a999eb96SNiklas Schnelle 		free_vm_area(area);
25371ba41c9SSebastian Ott 		return NULL;
25471ba41c9SSebastian Ott 	}
25571ba41c9SSebastian Ott 	return (void __iomem *) ((unsigned long) area->addr + offset);
25671ba41c9SSebastian Ott }
257b02002ccSNiklas Schnelle 
258b02002ccSNiklas Schnelle void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
259b02002ccSNiklas Schnelle {
260b02002ccSNiklas Schnelle 	return __ioremap(addr, size, __pgprot(prot));
261b02002ccSNiklas Schnelle }
262b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_prot);
263b02002ccSNiklas Schnelle 
264b02002ccSNiklas Schnelle void __iomem *ioremap(phys_addr_t addr, size_t size)
265b02002ccSNiklas Schnelle {
266b02002ccSNiklas Schnelle 	return __ioremap(addr, size, PAGE_KERNEL);
267b02002ccSNiklas Schnelle }
26871ba41c9SSebastian Ott EXPORT_SYMBOL(ioremap);
26971ba41c9SSebastian Ott 
270b02002ccSNiklas Schnelle void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
271b02002ccSNiklas Schnelle {
272b02002ccSNiklas Schnelle 	return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
273b02002ccSNiklas Schnelle }
274b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_wc);
275b02002ccSNiklas Schnelle 
276b02002ccSNiklas Schnelle void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
277b02002ccSNiklas Schnelle {
278b02002ccSNiklas Schnelle 	return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
279b02002ccSNiklas Schnelle }
280b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_wt);
281b02002ccSNiklas Schnelle 
28271ba41c9SSebastian Ott void iounmap(volatile void __iomem *addr)
28371ba41c9SSebastian Ott {
28471ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
28571ba41c9SSebastian Ott 		vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
28671ba41c9SSebastian Ott }
28771ba41c9SSebastian Ott EXPORT_SYMBOL(iounmap);
28871ba41c9SSebastian Ott 
289cd248341SJan Glauber /* Create a virtual mapping cookie for a PCI BAR */
29071ba41c9SSebastian Ott static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
29171ba41c9SSebastian Ott 					unsigned long offset, unsigned long max)
292cd248341SJan Glauber {
293198a5278SSebastian Ott 	struct zpci_dev *zdev =	to_zpci(pdev);
294cd248341SJan Glauber 	int idx;
295cd248341SJan Glauber 
296cd248341SJan Glauber 	idx = zdev->bars[bar].map_idx;
297cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
298f5e44f82SSebastian Ott 	/* Detect overrun */
299f5e44f82SSebastian Ott 	WARN_ON(!++zpci_iomap_start[idx].count);
300cd248341SJan Glauber 	zpci_iomap_start[idx].fh = zdev->fh;
301cd248341SJan Glauber 	zpci_iomap_start[idx].bar = bar;
302cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
303cd248341SJan Glauber 
3049e00caaeSSebastian Ott 	return (void __iomem *) ZPCI_ADDR(idx) + offset;
305cd248341SJan Glauber }
30671ba41c9SSebastian Ott 
30771ba41c9SSebastian Ott static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
30871ba41c9SSebastian Ott 					 unsigned long offset,
30971ba41c9SSebastian Ott 					 unsigned long max)
31071ba41c9SSebastian Ott {
31171ba41c9SSebastian Ott 	unsigned long barsize = pci_resource_len(pdev, bar);
31271ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
31371ba41c9SSebastian Ott 	void __iomem *iova;
31471ba41c9SSebastian Ott 
31571ba41c9SSebastian Ott 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
31671ba41c9SSebastian Ott 	return iova ? iova + offset : iova;
31771ba41c9SSebastian Ott }
31871ba41c9SSebastian Ott 
31971ba41c9SSebastian Ott void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
32071ba41c9SSebastian Ott 			      unsigned long offset, unsigned long max)
32171ba41c9SSebastian Ott {
322c9c13ba4SDenis Efremov 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
32371ba41c9SSebastian Ott 		return NULL;
32471ba41c9SSebastian Ott 
32571ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
32671ba41c9SSebastian Ott 		return pci_iomap_range_mio(pdev, bar, offset, max);
32771ba41c9SSebastian Ott 	else
32871ba41c9SSebastian Ott 		return pci_iomap_range_fh(pdev, bar, offset, max);
32971ba41c9SSebastian Ott }
330d9426083SSebastian Ott EXPORT_SYMBOL(pci_iomap_range);
3318cfc99b5SMichael S. Tsirkin 
3328cfc99b5SMichael S. Tsirkin void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
3338cfc99b5SMichael S. Tsirkin {
3348cfc99b5SMichael S. Tsirkin 	return pci_iomap_range(dev, bar, 0, maxlen);
3358cfc99b5SMichael S. Tsirkin }
3368cfc99b5SMichael S. Tsirkin EXPORT_SYMBOL(pci_iomap);
337cd248341SJan Glauber 
33871ba41c9SSebastian Ott static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
33971ba41c9SSebastian Ott 					    unsigned long offset, unsigned long max)
34071ba41c9SSebastian Ott {
34171ba41c9SSebastian Ott 	unsigned long barsize = pci_resource_len(pdev, bar);
34271ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
34371ba41c9SSebastian Ott 	void __iomem *iova;
34471ba41c9SSebastian Ott 
34571ba41c9SSebastian Ott 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
34671ba41c9SSebastian Ott 	return iova ? iova + offset : iova;
34771ba41c9SSebastian Ott }
34871ba41c9SSebastian Ott 
34971ba41c9SSebastian Ott void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
35071ba41c9SSebastian Ott 				 unsigned long offset, unsigned long max)
35171ba41c9SSebastian Ott {
352c9c13ba4SDenis Efremov 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
35371ba41c9SSebastian Ott 		return NULL;
35471ba41c9SSebastian Ott 
35571ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
35671ba41c9SSebastian Ott 		return pci_iomap_wc_range_mio(pdev, bar, offset, max);
35771ba41c9SSebastian Ott 	else
35871ba41c9SSebastian Ott 		return pci_iomap_range_fh(pdev, bar, offset, max);
35971ba41c9SSebastian Ott }
36071ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc_range);
36171ba41c9SSebastian Ott 
36271ba41c9SSebastian Ott void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
36371ba41c9SSebastian Ott {
36471ba41c9SSebastian Ott 	return pci_iomap_wc_range(dev, bar, 0, maxlen);
36571ba41c9SSebastian Ott }
36671ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc);
36771ba41c9SSebastian Ott 
36871ba41c9SSebastian Ott static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
369cd248341SJan Glauber {
3709e00caaeSSebastian Ott 	unsigned int idx = ZPCI_IDX(addr);
371cd248341SJan Glauber 
372cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
3738cfc99b5SMichael S. Tsirkin 	/* Detect underrun */
374f5e44f82SSebastian Ott 	WARN_ON(!zpci_iomap_start[idx].count);
3758cfc99b5SMichael S. Tsirkin 	if (!--zpci_iomap_start[idx].count) {
376cd248341SJan Glauber 		zpci_iomap_start[idx].fh = 0;
377cd248341SJan Glauber 		zpci_iomap_start[idx].bar = 0;
3788cfc99b5SMichael S. Tsirkin 	}
379cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
380cd248341SJan Glauber }
38171ba41c9SSebastian Ott 
38271ba41c9SSebastian Ott static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
38371ba41c9SSebastian Ott {
38471ba41c9SSebastian Ott 	iounmap(addr);
38571ba41c9SSebastian Ott }
38671ba41c9SSebastian Ott 
38771ba41c9SSebastian Ott void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
38871ba41c9SSebastian Ott {
38971ba41c9SSebastian Ott 	if (static_branch_likely(&have_mio))
39071ba41c9SSebastian Ott 		pci_iounmap_mio(pdev, addr);
39171ba41c9SSebastian Ott 	else
39271ba41c9SSebastian Ott 		pci_iounmap_fh(pdev, addr);
39371ba41c9SSebastian Ott }
394d9426083SSebastian Ott EXPORT_SYMBOL(pci_iounmap);
395cd248341SJan Glauber 
396cd248341SJan Glauber static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
397cd248341SJan Glauber 		    int size, u32 *val)
398cd248341SJan Glauber {
39944510d6fSPierre Morel 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
400cd248341SJan Glauber 
40144510d6fSPierre Morel 	return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
402cd248341SJan Glauber }
403cd248341SJan Glauber 
404cd248341SJan Glauber static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
405cd248341SJan Glauber 		     int size, u32 val)
406cd248341SJan Glauber {
40744510d6fSPierre Morel 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
408cd248341SJan Glauber 
40944510d6fSPierre Morel 	return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
410cd248341SJan Glauber }
411cd248341SJan Glauber 
412cd248341SJan Glauber static struct pci_ops pci_root_ops = {
413cd248341SJan Glauber 	.read = pci_read,
414cd248341SJan Glauber 	.write = pci_write,
415cd248341SJan Glauber };
416cd248341SJan Glauber 
4171803ba2dSSebastian Ott static void zpci_map_resources(struct pci_dev *pdev)
418cd248341SJan Glauber {
41971ba41c9SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
420cd248341SJan Glauber 	resource_size_t len;
421cd248341SJan Glauber 	int i;
422cd248341SJan Glauber 
423c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
424cd248341SJan Glauber 		len = pci_resource_len(pdev, i);
425cd248341SJan Glauber 		if (!len)
426cd248341SJan Glauber 			continue;
42771ba41c9SSebastian Ott 
428c7ff0e91SSebastian Ott 		if (zpci_use_mio(zdev))
42971ba41c9SSebastian Ott 			pdev->resource[i].start =
430df057c91SNiklas Schnelle 				(resource_size_t __force) zdev->bars[i].mio_wt;
43171ba41c9SSebastian Ott 		else
432c7ff0e91SSebastian Ott 			pdev->resource[i].start = (resource_size_t __force)
433c7ff0e91SSebastian Ott 				pci_iomap_range_fh(pdev, i, 0, 0);
434cd248341SJan Glauber 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
435cd248341SJan Glauber 	}
436cfbb4a7aSSebastian Ott 
437abb95b75SNiklas Schnelle 	zpci_iov_map_resources(pdev);
438944239c5SSebastian Ott }
439944239c5SSebastian Ott 
4401803ba2dSSebastian Ott static void zpci_unmap_resources(struct pci_dev *pdev)
441944239c5SSebastian Ott {
442c7ff0e91SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
443944239c5SSebastian Ott 	resource_size_t len;
444944239c5SSebastian Ott 	int i;
445944239c5SSebastian Ott 
446c7ff0e91SSebastian Ott 	if (zpci_use_mio(zdev))
44771ba41c9SSebastian Ott 		return;
44871ba41c9SSebastian Ott 
449c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
450944239c5SSebastian Ott 		len = pci_resource_len(pdev, i);
451944239c5SSebastian Ott 		if (!len)
452944239c5SSebastian Ott 			continue;
453c7ff0e91SSebastian Ott 		pci_iounmap_fh(pdev, (void __iomem __force *)
4545b9f2081SMartin Schwidefsky 			       pdev->resource[i].start);
455944239c5SSebastian Ott 	}
456944239c5SSebastian Ott }
457cd248341SJan Glauber 
458cd248341SJan Glauber static int zpci_alloc_iomap(struct zpci_dev *zdev)
459cd248341SJan Glauber {
460bf19c94dSSebastian Ott 	unsigned long entry;
461cd248341SJan Glauber 
462cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
463c506fff3SSebastian Ott 	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
464c506fff3SSebastian Ott 	if (entry == ZPCI_IOMAP_ENTRIES) {
465cd248341SJan Glauber 		spin_unlock(&zpci_iomap_lock);
466cd248341SJan Glauber 		return -ENOSPC;
467cd248341SJan Glauber 	}
468c506fff3SSebastian Ott 	set_bit(entry, zpci_iomap_bitmap);
469cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
470cd248341SJan Glauber 	return entry;
471cd248341SJan Glauber }
472cd248341SJan Glauber 
473cd248341SJan Glauber static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
474cd248341SJan Glauber {
475cd248341SJan Glauber 	spin_lock(&zpci_iomap_lock);
476cd248341SJan Glauber 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
477c506fff3SSebastian Ott 	clear_bit(entry, zpci_iomap_bitmap);
478cd248341SJan Glauber 	spin_unlock(&zpci_iomap_lock);
479cd248341SJan Glauber }
480cd248341SJan Glauber 
4817a572a3aSSebastian Ott static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
4827a572a3aSSebastian Ott 				    unsigned long size, unsigned long flags)
4837a572a3aSSebastian Ott {
4847a572a3aSSebastian Ott 	struct resource *r;
4857a572a3aSSebastian Ott 
4867a572a3aSSebastian Ott 	r = kzalloc(sizeof(*r), GFP_KERNEL);
4877a572a3aSSebastian Ott 	if (!r)
4887a572a3aSSebastian Ott 		return NULL;
4897a572a3aSSebastian Ott 
4907a572a3aSSebastian Ott 	r->start = start;
4917a572a3aSSebastian Ott 	r->end = r->start + size - 1;
4927a572a3aSSebastian Ott 	r->flags = flags;
4937a572a3aSSebastian Ott 	r->name = zdev->res_name;
4947a572a3aSSebastian Ott 
4957a572a3aSSebastian Ott 	if (request_resource(&iomem_resource, r)) {
4967a572a3aSSebastian Ott 		kfree(r);
4977a572a3aSSebastian Ott 		return NULL;
4987a572a3aSSebastian Ott 	}
4997a572a3aSSebastian Ott 	return r;
5007a572a3aSSebastian Ott }
5017a572a3aSSebastian Ott 
50205bc1be6SPierre Morel int zpci_setup_bus_resources(struct zpci_dev *zdev,
5037a572a3aSSebastian Ott 			     struct list_head *resources)
5047a572a3aSSebastian Ott {
5057a572a3aSSebastian Ott 	unsigned long addr, size, flags;
5067a572a3aSSebastian Ott 	struct resource *res;
5077a572a3aSSebastian Ott 	int i, entry;
5087a572a3aSSebastian Ott 
5097a572a3aSSebastian Ott 	snprintf(zdev->res_name, sizeof(zdev->res_name),
51005bc1be6SPierre Morel 		 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
5117a572a3aSSebastian Ott 
512c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
5137a572a3aSSebastian Ott 		if (!zdev->bars[i].size)
5147a572a3aSSebastian Ott 			continue;
5157a572a3aSSebastian Ott 		entry = zpci_alloc_iomap(zdev);
5167a572a3aSSebastian Ott 		if (entry < 0)
5177a572a3aSSebastian Ott 			return entry;
5187a572a3aSSebastian Ott 		zdev->bars[i].map_idx = entry;
5197a572a3aSSebastian Ott 
5207a572a3aSSebastian Ott 		/* only MMIO is supported */
5217a572a3aSSebastian Ott 		flags = IORESOURCE_MEM;
5227a572a3aSSebastian Ott 		if (zdev->bars[i].val & 8)
5237a572a3aSSebastian Ott 			flags |= IORESOURCE_PREFETCH;
5247a572a3aSSebastian Ott 		if (zdev->bars[i].val & 4)
5257a572a3aSSebastian Ott 			flags |= IORESOURCE_MEM_64;
5267a572a3aSSebastian Ott 
527c7ff0e91SSebastian Ott 		if (zpci_use_mio(zdev))
528df057c91SNiklas Schnelle 			addr = (unsigned long) zdev->bars[i].mio_wt;
529dcd33b23SSebastian Ott 		else
5309e00caaeSSebastian Ott 			addr = ZPCI_ADDR(entry);
5317a572a3aSSebastian Ott 		size = 1UL << zdev->bars[i].size;
5327a572a3aSSebastian Ott 
5337a572a3aSSebastian Ott 		res = __alloc_res(zdev, addr, size, flags);
5347a572a3aSSebastian Ott 		if (!res) {
5357a572a3aSSebastian Ott 			zpci_free_iomap(zdev, entry);
5367a572a3aSSebastian Ott 			return -ENOMEM;
5377a572a3aSSebastian Ott 		}
5387a572a3aSSebastian Ott 		zdev->bars[i].res = res;
5397a572a3aSSebastian Ott 		pci_add_resource(resources, res);
5407a572a3aSSebastian Ott 	}
5417a572a3aSSebastian Ott 
5427a572a3aSSebastian Ott 	return 0;
5437a572a3aSSebastian Ott }
5447a572a3aSSebastian Ott 
5457a572a3aSSebastian Ott static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
5467a572a3aSSebastian Ott {
5477a572a3aSSebastian Ott 	int i;
5487a572a3aSSebastian Ott 
549c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
5502b1df724SSebastian Ott 		if (!zdev->bars[i].size || !zdev->bars[i].res)
5517a572a3aSSebastian Ott 			continue;
5527a572a3aSSebastian Ott 
5537a572a3aSSebastian Ott 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
5547a572a3aSSebastian Ott 		release_resource(zdev->bars[i].res);
5557a572a3aSSebastian Ott 		kfree(zdev->bars[i].res);
5567a572a3aSSebastian Ott 	}
5577a572a3aSSebastian Ott }
5587a572a3aSSebastian Ott 
559af0a8a84SSebastian Ott int pcibios_add_device(struct pci_dev *pdev)
560af0a8a84SSebastian Ott {
561cb809182SSebastian Ott 	struct resource *res;
562cb809182SSebastian Ott 	int i;
563cb809182SSebastian Ott 
5647dc20ab1SSebastian Ott 	if (pdev->is_physfn)
5657dc20ab1SSebastian Ott 		pdev->no_vf_scan = 1;
5667dc20ab1SSebastian Ott 
567ef4858c6SSebastian Ott 	pdev->dev.groups = zpci_attr_groups;
5685657933dSBart Van Assche 	pdev->dev.dma_ops = &s390_pci_dma_ops;
5691803ba2dSSebastian Ott 	zpci_map_resources(pdev);
570cb809182SSebastian Ott 
571c9c13ba4SDenis Efremov 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
572cb809182SSebastian Ott 		res = &pdev->resource[i];
573cb809182SSebastian Ott 		if (res->parent || !res->flags)
574cb809182SSebastian Ott 			continue;
575cb809182SSebastian Ott 		pci_claim_resource(pdev, i);
576cb809182SSebastian Ott 	}
577cb809182SSebastian Ott 
578cb809182SSebastian Ott 	return 0;
579cb809182SSebastian Ott }
580cb809182SSebastian Ott 
5811803ba2dSSebastian Ott void pcibios_release_device(struct pci_dev *pdev)
5821803ba2dSSebastian Ott {
5831803ba2dSSebastian Ott 	zpci_unmap_resources(pdev);
5841803ba2dSSebastian Ott }
5851803ba2dSSebastian Ott 
586cb809182SSebastian Ott int pcibios_enable_device(struct pci_dev *pdev, int mask)
587cb809182SSebastian Ott {
588198a5278SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
589af0a8a84SSebastian Ott 
5909a99649fSSebastian Ott 	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
591af0a8a84SSebastian Ott 	zpci_fmb_enable_device(zdev);
592af0a8a84SSebastian Ott 
593d7533232SBjorn Helgaas 	return pci_enable_resources(pdev, mask);
594af0a8a84SSebastian Ott }
595af0a8a84SSebastian Ott 
596cb809182SSebastian Ott void pcibios_disable_device(struct pci_dev *pdev)
597944239c5SSebastian Ott {
598198a5278SSebastian Ott 	struct zpci_dev *zdev = to_zpci(pdev);
599944239c5SSebastian Ott 
600944239c5SSebastian Ott 	zpci_fmb_disable_device(zdev);
601944239c5SSebastian Ott 	zpci_debug_exit_device(zdev);
602944239c5SSebastian Ott }
603944239c5SSebastian Ott 
60405bc1be6SPierre Morel static int __zpci_register_domain(int domain)
605cd248341SJan Glauber {
606969ae01bSNiklas Schnelle 	spin_lock(&zpci_domain_lock);
60705bc1be6SPierre Morel 	if (test_bit(domain, zpci_domain)) {
608969ae01bSNiklas Schnelle 		spin_unlock(&zpci_domain_lock);
60905bc1be6SPierre Morel 		pr_err("Domain %04x is already assigned\n", domain);
610312e8462SSebastian Ott 		return -EEXIST;
611312e8462SSebastian Ott 	}
61205bc1be6SPierre Morel 	set_bit(domain, zpci_domain);
613312e8462SSebastian Ott 	spin_unlock(&zpci_domain_lock);
61405bc1be6SPierre Morel 	return domain;
6155c5afd02SSebastian Ott }
61605bc1be6SPierre Morel 
61705bc1be6SPierre Morel static int __zpci_alloc_domain(void)
61805bc1be6SPierre Morel {
61905bc1be6SPierre Morel 	int domain;
62005bc1be6SPierre Morel 
62105bc1be6SPierre Morel 	spin_lock(&zpci_domain_lock);
622969ae01bSNiklas Schnelle 	/*
623969ae01bSNiklas Schnelle 	 * We can always auto allocate domains below ZPCI_NR_DEVICES.
624969ae01bSNiklas Schnelle 	 * There is either a free domain or we have reached the maximum in
625969ae01bSNiklas Schnelle 	 * which case we would have bailed earlier.
626969ae01bSNiklas Schnelle 	 */
62705bc1be6SPierre Morel 	domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
62805bc1be6SPierre Morel 	set_bit(domain, zpci_domain);
629cd248341SJan Glauber 	spin_unlock(&zpci_domain_lock);
63005bc1be6SPierre Morel 	return domain;
631cd248341SJan Glauber }
632cd248341SJan Glauber 
63305bc1be6SPierre Morel int zpci_alloc_domain(int domain)
63405bc1be6SPierre Morel {
63505bc1be6SPierre Morel 	if (zpci_unique_uid) {
63605bc1be6SPierre Morel 		if (domain)
63705bc1be6SPierre Morel 			return __zpci_register_domain(domain);
63805bc1be6SPierre Morel 		pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
63905bc1be6SPierre Morel 		update_uid_checking(false);
64005bc1be6SPierre Morel 	}
64105bc1be6SPierre Morel 	return __zpci_alloc_domain();
64205bc1be6SPierre Morel }
64305bc1be6SPierre Morel 
64405bc1be6SPierre Morel void zpci_free_domain(int domain)
645cd248341SJan Glauber {
646cd248341SJan Glauber 	spin_lock(&zpci_domain_lock);
64705bc1be6SPierre Morel 	clear_bit(domain, zpci_domain);
648cd248341SJan Glauber 	spin_unlock(&zpci_domain_lock);
649cd248341SJan Glauber }
650cd248341SJan Glauber 
6517d594322SSebastian Ott 
652a755a45dSJan Glauber int zpci_enable_device(struct zpci_dev *zdev)
653a755a45dSJan Glauber {
654a755a45dSJan Glauber 	int rc;
655a755a45dSJan Glauber 
656a755a45dSJan Glauber 	rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
657a755a45dSJan Glauber 	if (rc)
658a755a45dSJan Glauber 		goto out;
659828b35f6SJan Glauber 
660828b35f6SJan Glauber 	rc = zpci_dma_init_device(zdev);
661828b35f6SJan Glauber 	if (rc)
662828b35f6SJan Glauber 		goto out_dma;
6630ff70ec8SSebastian Ott 
664a755a45dSJan Glauber 	return 0;
665828b35f6SJan Glauber 
666828b35f6SJan Glauber out_dma:
667828b35f6SJan Glauber 	clp_disable_fh(zdev);
668a755a45dSJan Glauber out:
669a755a45dSJan Glauber 	return rc;
670a755a45dSJan Glauber }
671a755a45dSJan Glauber EXPORT_SYMBOL_GPL(zpci_enable_device);
672a755a45dSJan Glauber 
673cb65a669SSebastian Ott int zpci_disable_device(struct zpci_dev *zdev)
674cb65a669SSebastian Ott {
675cb65a669SSebastian Ott 	zpci_dma_exit_device(zdev);
676afdf9550SNiklas Schnelle 	/*
677afdf9550SNiklas Schnelle 	 * The zPCI function may already be disabled by the platform, this is
678afdf9550SNiklas Schnelle 	 * detected in clp_disable_fh() which becomes a no-op.
679afdf9550SNiklas Schnelle 	 */
680cb65a669SSebastian Ott 	return clp_disable_fh(zdev);
681cb65a669SSebastian Ott }
682cb65a669SSebastian Ott EXPORT_SYMBOL_GPL(zpci_disable_device);
683cb65a669SSebastian Ott 
6840b13525cSNiklas Schnelle /* zpci_remove_device - Removes the given zdev from the PCI core
6850b13525cSNiklas Schnelle  * @zdev: the zdev to be removed from the PCI core
6860b13525cSNiklas Schnelle  * @set_error: if true the device's error state is set to permanent failure
6870b13525cSNiklas Schnelle  *
6880b13525cSNiklas Schnelle  * Sets a zPCI device to a configured but offline state; the zPCI
6890b13525cSNiklas Schnelle  * device is still accessible through its hotplug slot and the zPCI
6900b13525cSNiklas Schnelle  * API but is removed from the common code PCI bus, making it
6910b13525cSNiklas Schnelle  * no longer available to drivers.
6920b13525cSNiklas Schnelle  */
6930b13525cSNiklas Schnelle void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
6942f0230b2SNiklas Schnelle {
6952f0230b2SNiklas Schnelle 	struct zpci_bus *zbus = zdev->zbus;
6962f0230b2SNiklas Schnelle 	struct pci_dev *pdev;
6972f0230b2SNiklas Schnelle 
6980b13525cSNiklas Schnelle 	if (!zdev->zbus->bus)
6990b13525cSNiklas Schnelle 		return;
7000b13525cSNiklas Schnelle 
7012f0230b2SNiklas Schnelle 	pdev = pci_get_slot(zbus->bus, zdev->devfn);
702b97bf44fSNiklas Schnelle 	if (pdev) {
7030b13525cSNiklas Schnelle 		if (set_error)
7040b13525cSNiklas Schnelle 			pdev->error_state = pci_channel_io_perm_failure;
7050b13525cSNiklas Schnelle 		if (pdev->is_virtfn) {
7060b13525cSNiklas Schnelle 			zpci_iov_remove_virtfn(pdev, zdev->vfn);
7070b13525cSNiklas Schnelle 			/* balance pci_get_slot */
7080b13525cSNiklas Schnelle 			pci_dev_put(pdev);
7090b13525cSNiklas Schnelle 			return;
7100b13525cSNiklas Schnelle 		}
7112f0230b2SNiklas Schnelle 		pci_stop_and_remove_bus_device_locked(pdev);
7120b13525cSNiklas Schnelle 		/* balance pci_get_slot */
7130b13525cSNiklas Schnelle 		pci_dev_put(pdev);
7142f0230b2SNiklas Schnelle 	}
715b97bf44fSNiklas Schnelle }
7162f0230b2SNiklas Schnelle 
717ba764dd7SNiklas Schnelle /**
718ba764dd7SNiklas Schnelle  * zpci_create_device() - Create a new zpci_dev and add it to the zbus
719ba764dd7SNiklas Schnelle  * @fid: Function ID of the device to be created
720ba764dd7SNiklas Schnelle  * @fh: Current Function Handle of the device to be created
721ba764dd7SNiklas Schnelle  * @state: Initial state after creation either Standby or Configured
722ba764dd7SNiklas Schnelle  *
723ba764dd7SNiklas Schnelle  * Creates a new zpci device and adds it to its, possibly newly created, zbus
724ba764dd7SNiklas Schnelle  * as well as zpci_list.
725ba764dd7SNiklas Schnelle  *
726ba764dd7SNiklas Schnelle  * Returns: 0 on success, an error value otherwise
727ba764dd7SNiklas Schnelle  */
728ba764dd7SNiklas Schnelle int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
729cd248341SJan Glauber {
730ba764dd7SNiklas Schnelle 	struct zpci_dev *zdev;
731cd248341SJan Glauber 	int rc;
732cd248341SJan Glauber 
733ba764dd7SNiklas Schnelle 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
734ba764dd7SNiklas Schnelle 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
735ba764dd7SNiklas Schnelle 	if (!zdev)
736ba764dd7SNiklas Schnelle 		return -ENOMEM;
737ba764dd7SNiklas Schnelle 
738ba764dd7SNiklas Schnelle 	/* FID and Function Handle are the static/dynamic identifiers */
739ba764dd7SNiklas Schnelle 	zdev->fid = fid;
740ba764dd7SNiklas Schnelle 	zdev->fh = fh;
741ba764dd7SNiklas Schnelle 
742ba764dd7SNiklas Schnelle 	/* Query function properties and update zdev */
743ba764dd7SNiklas Schnelle 	rc = clp_query_pci_fn(zdev);
744ba764dd7SNiklas Schnelle 	if (rc)
745ba764dd7SNiklas Schnelle 		goto error;
746ba764dd7SNiklas Schnelle 	zdev->state =  state;
747ba764dd7SNiklas Schnelle 
74805bc1be6SPierre Morel 	kref_init(&zdev->kref);
749ba764dd7SNiklas Schnelle 	mutex_init(&zdev->lock);
750ba764dd7SNiklas Schnelle 
751ba764dd7SNiklas Schnelle 	rc = zpci_init_iommu(zdev);
752ba764dd7SNiklas Schnelle 	if (rc)
753ba764dd7SNiklas Schnelle 		goto error;
754ba764dd7SNiklas Schnelle 
755ba764dd7SNiklas Schnelle 	if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
756ba764dd7SNiklas Schnelle 		rc = zpci_enable_device(zdev);
757ba764dd7SNiklas Schnelle 		if (rc)
758ba764dd7SNiklas Schnelle 			goto error_destroy_iommu;
759ba764dd7SNiklas Schnelle 	}
760ba764dd7SNiklas Schnelle 
761ba764dd7SNiklas Schnelle 	rc = zpci_bus_device_register(zdev, &pci_root_ops);
762ba764dd7SNiklas Schnelle 	if (rc)
763ba764dd7SNiklas Schnelle 		goto error_disable;
76405bc1be6SPierre Morel 
76505bc1be6SPierre Morel 	spin_lock(&zpci_list_lock);
76605bc1be6SPierre Morel 	list_add_tail(&zdev->entry, &zpci_list);
76705bc1be6SPierre Morel 	spin_unlock(&zpci_list_lock);
768cd248341SJan Glauber 
769cd248341SJan Glauber 	return 0;
770cd248341SJan Glauber 
771ba764dd7SNiklas Schnelle error_disable:
772*f6576a1bSNiklas Schnelle 	if (zdev_enabled(zdev))
7731c21351bSSebastian Ott 		zpci_disable_device(zdev);
774ba764dd7SNiklas Schnelle error_destroy_iommu:
775f42c2235SJoerg Roedel 	zpci_destroy_iommu(zdev);
776ba764dd7SNiklas Schnelle error:
777ba764dd7SNiklas Schnelle 	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
778ba764dd7SNiklas Schnelle 	kfree(zdev);
779cd248341SJan Glauber 	return rc;
780cd248341SJan Glauber }
781cd248341SJan Glauber 
78205bc1be6SPierre Morel void zpci_release_device(struct kref *kref)
783623bd44dSSebastian Ott {
78405bc1be6SPierre Morel 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
785623bd44dSSebastian Ott 
7862f0230b2SNiklas Schnelle 	if (zdev->zbus->bus)
7870b13525cSNiklas Schnelle 		zpci_remove_device(zdev, false);
78844510d6fSPierre Morel 
789*f6576a1bSNiklas Schnelle 	if (zdev_enabled(zdev))
79005bc1be6SPierre Morel 		zpci_disable_device(zdev);
791*f6576a1bSNiklas Schnelle 
792*f6576a1bSNiklas Schnelle 	switch (zdev->state) {
79305bc1be6SPierre Morel 	case ZPCI_FN_STATE_STANDBY:
79444510d6fSPierre Morel 		if (zdev->has_hp_slot)
79505bc1be6SPierre Morel 			zpci_exit_slot(zdev);
79605bc1be6SPierre Morel 		zpci_cleanup_bus_resources(zdev);
79705bc1be6SPierre Morel 		zpci_bus_device_unregister(zdev);
79805bc1be6SPierre Morel 		zpci_destroy_iommu(zdev);
79905bc1be6SPierre Morel 		fallthrough;
80005bc1be6SPierre Morel 	default:
80105bc1be6SPierre Morel 		break;
80205bc1be6SPierre Morel 	}
80305bc1be6SPierre Morel 
80405bc1be6SPierre Morel 	spin_lock(&zpci_list_lock);
80505bc1be6SPierre Morel 	list_del(&zdev->entry);
80605bc1be6SPierre Morel 	spin_unlock(&zpci_list_lock);
80705bc1be6SPierre Morel 	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
80805bc1be6SPierre Morel 	kfree(zdev);
809623bd44dSSebastian Ott }
810623bd44dSSebastian Ott 
811bd3a1725SMartin Schwidefsky int zpci_report_error(struct pci_dev *pdev,
812bd3a1725SMartin Schwidefsky 		      struct zpci_report_error_header *report)
813bd3a1725SMartin Schwidefsky {
814bd3a1725SMartin Schwidefsky 	struct zpci_dev *zdev = to_zpci(pdev);
815bd3a1725SMartin Schwidefsky 
816bd3a1725SMartin Schwidefsky 	return sclp_pci_report(report, zdev->fh, zdev->fid);
817bd3a1725SMartin Schwidefsky }
818bd3a1725SMartin Schwidefsky EXPORT_SYMBOL(zpci_report_error);
819bd3a1725SMartin Schwidefsky 
820cd248341SJan Glauber static int zpci_mem_init(void)
821cd248341SJan Glauber {
82280c544deSSebastian Ott 	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
82380c544deSSebastian Ott 		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
82480c544deSSebastian Ott 
825d0b08853SJan Glauber 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
82680c544deSSebastian Ott 					   __alignof__(struct zpci_fmb), 0, NULL);
827d0b08853SJan Glauber 	if (!zdev_fmb_cache)
828c506fff3SSebastian Ott 		goto error_fmb;
829d0b08853SJan Glauber 
830c506fff3SSebastian Ott 	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
831c506fff3SSebastian Ott 				   sizeof(*zpci_iomap_start), GFP_KERNEL);
832cd248341SJan Glauber 	if (!zpci_iomap_start)
8339a4da8a5SJan Glauber 		goto error_iomap;
834cd248341SJan Glauber 
835c506fff3SSebastian Ott 	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
836c506fff3SSebastian Ott 				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
837c506fff3SSebastian Ott 	if (!zpci_iomap_bitmap)
838c506fff3SSebastian Ott 		goto error_iomap_bitmap;
839c506fff3SSebastian Ott 
840b02002ccSNiklas Schnelle 	if (static_branch_likely(&have_mio))
841b02002ccSNiklas Schnelle 		clp_setup_writeback_mio();
842b02002ccSNiklas Schnelle 
843c506fff3SSebastian Ott 	return 0;
844c506fff3SSebastian Ott error_iomap_bitmap:
845c506fff3SSebastian Ott 	kfree(zpci_iomap_start);
8469a4da8a5SJan Glauber error_iomap:
847d0b08853SJan Glauber 	kmem_cache_destroy(zdev_fmb_cache);
848c506fff3SSebastian Ott error_fmb:
849cd248341SJan Glauber 	return -ENOMEM;
850cd248341SJan Glauber }
851cd248341SJan Glauber 
852cd248341SJan Glauber static void zpci_mem_exit(void)
853cd248341SJan Glauber {
854c506fff3SSebastian Ott 	kfree(zpci_iomap_bitmap);
855cd248341SJan Glauber 	kfree(zpci_iomap_start);
856d0b08853SJan Glauber 	kmem_cache_destroy(zdev_fmb_cache);
857cd248341SJan Glauber }
858cd248341SJan Glauber 
8596324b4deSSebastian Ott static unsigned int s390_pci_probe __initdata = 1;
86056271303SSebastian Ott static unsigned int s390_pci_no_mio __initdata;
861fbfe07d4SSebastian Ott unsigned int s390_pci_force_floating __initdata;
862aa3b7c29SSebastian Ott static unsigned int s390_pci_initialized;
863cd248341SJan Glauber 
864cd248341SJan Glauber char * __init pcibios_setup(char *str)
865cd248341SJan Glauber {
866257608fbSSebastian Ott 	if (!strcmp(str, "off")) {
867257608fbSSebastian Ott 		s390_pci_probe = 0;
868cd248341SJan Glauber 		return NULL;
869cd248341SJan Glauber 	}
87056271303SSebastian Ott 	if (!strcmp(str, "nomio")) {
87156271303SSebastian Ott 		s390_pci_no_mio = 1;
87256271303SSebastian Ott 		return NULL;
87356271303SSebastian Ott 	}
874fbfe07d4SSebastian Ott 	if (!strcmp(str, "force_floating")) {
875fbfe07d4SSebastian Ott 		s390_pci_force_floating = 1;
876fbfe07d4SSebastian Ott 		return NULL;
877fbfe07d4SSebastian Ott 	}
8786cf17f9aSPierre Morel 	if (!strcmp(str, "norid")) {
8796cf17f9aSPierre Morel 		s390_pci_no_rid = 1;
8806cf17f9aSPierre Morel 		return NULL;
8816cf17f9aSPierre Morel 	}
882cd248341SJan Glauber 	return str;
883cd248341SJan Glauber }
884cd248341SJan Glauber 
885aa3b7c29SSebastian Ott bool zpci_is_enabled(void)
886aa3b7c29SSebastian Ott {
887aa3b7c29SSebastian Ott 	return s390_pci_initialized;
888aa3b7c29SSebastian Ott }
889aa3b7c29SSebastian Ott 
890cd248341SJan Glauber static int __init pci_base_init(void)
891cd248341SJan Glauber {
892cd248341SJan Glauber 	int rc;
893cd248341SJan Glauber 
8941e5635d1SHeiko Carstens 	if (!s390_pci_probe)
895cd248341SJan Glauber 		return 0;
896cd248341SJan Glauber 
897da78693eSNiklas Schnelle 	if (!test_facility(69) || !test_facility(71)) {
898da78693eSNiklas Schnelle 		pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
899cd248341SJan Glauber 		return 0;
900da78693eSNiklas Schnelle 	}
901cd248341SJan Glauber 
9029964f396SSebastian Ott 	if (test_facility(153) && !s390_pci_no_mio) {
90371ba41c9SSebastian Ott 		static_branch_enable(&have_mio);
9049964f396SSebastian Ott 		ctl_set_bit(2, 5);
9059964f396SSebastian Ott 	}
90671ba41c9SSebastian Ott 
907d0b08853SJan Glauber 	rc = zpci_debug_init();
908d0b08853SJan Glauber 	if (rc)
9091f44a225SMartin Schwidefsky 		goto out;
910d0b08853SJan Glauber 
911cd248341SJan Glauber 	rc = zpci_mem_init();
912cd248341SJan Glauber 	if (rc)
913cd248341SJan Glauber 		goto out_mem;
914cd248341SJan Glauber 
9159a4da8a5SJan Glauber 	rc = zpci_irq_init();
9169a4da8a5SJan Glauber 	if (rc)
9179a4da8a5SJan Glauber 		goto out_irq;
9189a4da8a5SJan Glauber 
919828b35f6SJan Glauber 	rc = zpci_dma_init();
920828b35f6SJan Glauber 	if (rc)
921828b35f6SJan Glauber 		goto out_dma;
922828b35f6SJan Glauber 
9231d578966SSebastian Ott 	rc = clp_scan_pci_devices();
924a755a45dSJan Glauber 	if (rc)
925a755a45dSJan Glauber 		goto out_find;
926a755a45dSJan Glauber 
927aa3b7c29SSebastian Ott 	s390_pci_initialized = 1;
928cd248341SJan Glauber 	return 0;
929cd248341SJan Glauber 
930a755a45dSJan Glauber out_find:
931828b35f6SJan Glauber 	zpci_dma_exit();
932828b35f6SJan Glauber out_dma:
9339a4da8a5SJan Glauber 	zpci_irq_exit();
9349a4da8a5SJan Glauber out_irq:
935cd248341SJan Glauber 	zpci_mem_exit();
936cd248341SJan Glauber out_mem:
937d0b08853SJan Glauber 	zpci_debug_exit();
9381f44a225SMartin Schwidefsky out:
939cd248341SJan Glauber 	return rc;
940cd248341SJan Glauber }
94167f43f38SSebastian Ott subsys_initcall_sync(pci_base_init);
942