1adbb3901SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cd248341SJan Glauber /*
3cd248341SJan Glauber * Copyright IBM Corp. 2012
4cd248341SJan Glauber *
5cd248341SJan Glauber * Author(s):
6cd248341SJan Glauber * Jan Glauber <jang@linux.vnet.ibm.com>
7cd248341SJan Glauber *
8cd248341SJan Glauber * The System z PCI code is a rewrite from a prototype by
9cd248341SJan Glauber * the following people (Kudoz!):
10bedef755SJan Glauber * Alexander Schmidt
11bedef755SJan Glauber * Christoph Raisch
12bedef755SJan Glauber * Hannes Hering
13bedef755SJan Glauber * Hoang-Nam Nguyen
14bedef755SJan Glauber * Jan-Bernd Themann
15bedef755SJan Glauber * Stefan Roscher
16bedef755SJan Glauber * Thomas Klein
17cd248341SJan Glauber */
18cd248341SJan Glauber
19896cb7e6SGerald Schaefer #define KMSG_COMPONENT "zpci"
20896cb7e6SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21cd248341SJan Glauber
22cd248341SJan Glauber #include <linux/kernel.h>
23cd248341SJan Glauber #include <linux/slab.h>
24cd248341SJan Glauber #include <linux/err.h>
25cd248341SJan Glauber #include <linux/export.h>
26cd248341SJan Glauber #include <linux/delay.h>
27cd248341SJan Glauber #include <linux/seq_file.h>
2871ba41c9SSebastian Ott #include <linux/jump_label.h>
29cd248341SJan Glauber #include <linux/pci.h>
30794b8846SNiklas Schnelle #include <linux/printk.h>
31bcb5d6c7SGerd Bayer #include <linux/lockdep.h>
32cd248341SJan Glauber
339a4da8a5SJan Glauber #include <asm/isc.h>
349a4da8a5SJan Glauber #include <asm/airq.h>
35cd248341SJan Glauber #include <asm/facility.h>
36cd248341SJan Glauber #include <asm/pci_insn.h>
37a755a45dSJan Glauber #include <asm/pci_clp.h>
38828b35f6SJan Glauber #include <asm/pci_dma.h>
39cd248341SJan Glauber
4005bc1be6SPierre Morel #include "pci_bus.h"
41abb95b75SNiklas Schnelle #include "pci_iov.h"
4205bc1be6SPierre Morel
43cd248341SJan Glauber /* list of all detected zpci devices */
4467f43f38SSebastian Ott static LIST_HEAD(zpci_list);
4557b5918cSSebastian Ott static DEFINE_SPINLOCK(zpci_list_lock);
461f44a225SMartin Schwidefsky
47969ae01bSNiklas Schnelle static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
48cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_domain_lock);
49cd248341SJan Glauber
50c506fff3SSebastian Ott #define ZPCI_IOMAP_ENTRIES \
51c9c13ba4SDenis Efremov min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
52c506fff3SSebastian Ott ZPCI_IOMAP_MAX_ENTRIES)
53c506fff3SSebastian Ott
546cf17f9aSPierre Morel unsigned int s390_pci_no_rid;
556cf17f9aSPierre Morel
56cd248341SJan Glauber static DEFINE_SPINLOCK(zpci_iomap_lock);
57c506fff3SSebastian Ott static unsigned long *zpci_iomap_bitmap;
58cd248341SJan Glauber struct zpci_iomap_entry *zpci_iomap_start;
59cd248341SJan Glauber EXPORT_SYMBOL_GPL(zpci_iomap_start);
60cd248341SJan Glauber
6171ba41c9SSebastian Ott DEFINE_STATIC_KEY_FALSE(have_mio);
6271ba41c9SSebastian Ott
63d0b08853SJan Glauber static struct kmem_cache *zdev_fmb_cache;
64d0b08853SJan Glauber
6598b1d33dSMatthew Rosato /* AEN structures that must be preserved over KVM module re-insertion */
6698b1d33dSMatthew Rosato union zpci_sic_iib *zpci_aipb;
6798b1d33dSMatthew Rosato EXPORT_SYMBOL_GPL(zpci_aipb);
6898b1d33dSMatthew Rosato struct airq_iv *zpci_aif_sbv;
6998b1d33dSMatthew Rosato EXPORT_SYMBOL_GPL(zpci_aif_sbv);
7098b1d33dSMatthew Rosato
get_zdev_by_fid(u32 fid)71cd248341SJan Glauber struct zpci_dev *get_zdev_by_fid(u32 fid)
72cd248341SJan Glauber {
73cd248341SJan Glauber struct zpci_dev *tmp, *zdev = NULL;
74cd248341SJan Glauber
7557b5918cSSebastian Ott spin_lock(&zpci_list_lock);
76cd248341SJan Glauber list_for_each_entry(tmp, &zpci_list, entry) {
77cd248341SJan Glauber if (tmp->fid == fid) {
78cd248341SJan Glauber zdev = tmp;
79c122383dSNiklas Schnelle zpci_zdev_get(zdev);
80cd248341SJan Glauber break;
81cd248341SJan Glauber }
82cd248341SJan Glauber }
8357b5918cSSebastian Ott spin_unlock(&zpci_list_lock);
84cd248341SJan Glauber return zdev;
85cd248341SJan Glauber }
86cd248341SJan Glauber
zpci_remove_reserved_devices(void)8701553d9aSSebastian Ott void zpci_remove_reserved_devices(void)
8801553d9aSSebastian Ott {
8901553d9aSSebastian Ott struct zpci_dev *tmp, *zdev;
9001553d9aSSebastian Ott enum zpci_state state;
9101553d9aSSebastian Ott LIST_HEAD(remove);
9201553d9aSSebastian Ott
9301553d9aSSebastian Ott spin_lock(&zpci_list_lock);
9401553d9aSSebastian Ott list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
9501553d9aSSebastian Ott if (zdev->state == ZPCI_FN_STATE_STANDBY &&
9601553d9aSSebastian Ott !clp_get_state(zdev->fid, &state) &&
9701553d9aSSebastian Ott state == ZPCI_FN_STATE_RESERVED)
9801553d9aSSebastian Ott list_move_tail(&zdev->entry, &remove);
9901553d9aSSebastian Ott }
10001553d9aSSebastian Ott spin_unlock(&zpci_list_lock);
10101553d9aSSebastian Ott
10201553d9aSSebastian Ott list_for_each_entry_safe(zdev, tmp, &remove, entry)
103a46044a9SNiklas Schnelle zpci_device_reserved(zdev);
104cd248341SJan Glauber }
105cd248341SJan Glauber
pci_domain_nr(struct pci_bus * bus)106cd248341SJan Glauber int pci_domain_nr(struct pci_bus *bus)
107cd248341SJan Glauber {
10805bc1be6SPierre Morel return ((struct zpci_bus *) bus->sysdata)->domain_nr;
109cd248341SJan Glauber }
110cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_domain_nr);
111cd248341SJan Glauber
pci_proc_domain(struct pci_bus * bus)112cd248341SJan Glauber int pci_proc_domain(struct pci_bus *bus)
113cd248341SJan Glauber {
114cd248341SJan Glauber return pci_domain_nr(bus);
115cd248341SJan Glauber }
116cd248341SJan Glauber EXPORT_SYMBOL_GPL(pci_proc_domain);
117cd248341SJan Glauber
118828b35f6SJan Glauber /* Modify PCI: Register I/O address translation parameters */
zpci_register_ioat(struct zpci_dev * zdev,u8 dmaas,u64 base,u64 limit,u64 iota,u8 * status)119828b35f6SJan Glauber int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
12059bbf596SNiklas Schnelle u64 base, u64 limit, u64 iota, u8 *status)
121828b35f6SJan Glauber {
12272570834SSebastian Ott u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
12372570834SSebastian Ott struct zpci_fib fib = {0};
12459bbf596SNiklas Schnelle u8 cc;
125828b35f6SJan Glauber
126828b35f6SJan Glauber WARN_ON_ONCE(iota & 0x3fff);
12772570834SSebastian Ott fib.pba = base;
128c76c067eSNiklas Schnelle /* Work around off by one in ISM virt device */
129c76c067eSNiklas Schnelle if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
130c76c067eSNiklas Schnelle fib.pal = limit + (1 << 12);
131c76c067eSNiklas Schnelle else
13272570834SSebastian Ott fib.pal = limit;
13372570834SSebastian Ott fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
134c68468edSMatthew Rosato fib.gd = zdev->gisa;
13559bbf596SNiklas Schnelle cc = zpci_mod_fc(req, &fib, status);
1361f3f7681SNiklas Schnelle if (cc)
13759bbf596SNiklas Schnelle zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
1381f3f7681SNiklas Schnelle return cc;
139828b35f6SJan Glauber }
14009340b2fSMatthew Rosato EXPORT_SYMBOL_GPL(zpci_register_ioat);
141828b35f6SJan Glauber
142828b35f6SJan Glauber /* Modify PCI: Unregister I/O address translation parameters */
zpci_unregister_ioat(struct zpci_dev * zdev,u8 dmaas)143828b35f6SJan Glauber int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
144828b35f6SJan Glauber {
14572570834SSebastian Ott u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
14672570834SSebastian Ott struct zpci_fib fib = {0};
14772570834SSebastian Ott u8 cc, status;
148828b35f6SJan Glauber
149c68468edSMatthew Rosato fib.gd = zdev->gisa;
150c68468edSMatthew Rosato
15172570834SSebastian Ott cc = zpci_mod_fc(req, &fib, &status);
1521f3f7681SNiklas Schnelle if (cc)
1531f3f7681SNiklas Schnelle zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
1541f3f7681SNiklas Schnelle return cc;
155828b35f6SJan Glauber }
156828b35f6SJan Glauber
157d0b08853SJan Glauber /* Modify PCI: Set PCI function measurement parameters */
zpci_fmb_enable_device(struct zpci_dev * zdev)158d0b08853SJan Glauber int zpci_fmb_enable_device(struct zpci_dev *zdev)
159d0b08853SJan Glauber {
1604e5bd780SSebastian Ott u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
16192bce97fSNiklas Schnelle struct zpci_iommu_ctrs *ctrs;
1624e5bd780SSebastian Ott struct zpci_fib fib = {0};
1634e5bd780SSebastian Ott u8 cc, status;
164d0b08853SJan Glauber
1650b7589ecSSebastian Ott if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
166d0b08853SJan Glauber return -EINVAL;
167d0b08853SJan Glauber
16808b42124SWei Yongjun zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
169d0b08853SJan Glauber if (!zdev->fmb)
170d0b08853SJan Glauber return -ENOMEM;
171d0b08853SJan Glauber WARN_ON((u64) zdev->fmb & 0xf);
172d0b08853SJan Glauber
1736001018aSSebastian Ott /* reset software counters */
17492bce97fSNiklas Schnelle ctrs = zpci_get_iommu_ctrs(zdev);
17592bce97fSNiklas Schnelle if (ctrs) {
17692bce97fSNiklas Schnelle atomic64_set(&ctrs->mapped_pages, 0);
17792bce97fSNiklas Schnelle atomic64_set(&ctrs->unmapped_pages, 0);
17892bce97fSNiklas Schnelle atomic64_set(&ctrs->global_rpcits, 0);
17992bce97fSNiklas Schnelle atomic64_set(&ctrs->sync_map_rpcits, 0);
18092bce97fSNiklas Schnelle atomic64_set(&ctrs->sync_rpcits, 0);
18192bce97fSNiklas Schnelle }
18292bce97fSNiklas Schnelle
1836001018aSSebastian Ott
1844e5bd780SSebastian Ott fib.fmb_addr = virt_to_phys(zdev->fmb);
185c68468edSMatthew Rosato fib.gd = zdev->gisa;
1864e5bd780SSebastian Ott cc = zpci_mod_fc(req, &fib, &status);
1874e5bd780SSebastian Ott if (cc) {
1884e5bd780SSebastian Ott kmem_cache_free(zdev_fmb_cache, zdev->fmb);
1894e5bd780SSebastian Ott zdev->fmb = NULL;
1904e5bd780SSebastian Ott }
1914e5bd780SSebastian Ott return cc ? -EIO : 0;
192d0b08853SJan Glauber }
193d0b08853SJan Glauber
194d0b08853SJan Glauber /* Modify PCI: Disable PCI function measurement */
zpci_fmb_disable_device(struct zpci_dev * zdev)195d0b08853SJan Glauber int zpci_fmb_disable_device(struct zpci_dev *zdev)
196d0b08853SJan Glauber {
1974e5bd780SSebastian Ott u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
1984e5bd780SSebastian Ott struct zpci_fib fib = {0};
1994e5bd780SSebastian Ott u8 cc, status;
200d0b08853SJan Glauber
201d0b08853SJan Glauber if (!zdev->fmb)
202d0b08853SJan Glauber return -EINVAL;
203d0b08853SJan Glauber
204c68468edSMatthew Rosato fib.gd = zdev->gisa;
205c68468edSMatthew Rosato
206d0b08853SJan Glauber /* Function measurement is disabled if fmb address is zero */
2074e5bd780SSebastian Ott cc = zpci_mod_fc(req, &fib, &status);
2084e5bd780SSebastian Ott if (cc == 3) /* Function already gone. */
2094e5bd780SSebastian Ott cc = 0;
210d0b08853SJan Glauber
2114e5bd780SSebastian Ott if (!cc) {
212d0b08853SJan Glauber kmem_cache_free(zdev_fmb_cache, zdev->fmb);
213d0b08853SJan Glauber zdev->fmb = NULL;
2144e5bd780SSebastian Ott }
2154e5bd780SSebastian Ott return cc ? -EIO : 0;
216d0b08853SJan Glauber }
217d0b08853SJan Glauber
zpci_cfg_load(struct zpci_dev * zdev,int offset,u32 * val,u8 len)218cd248341SJan Glauber static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
219cd248341SJan Glauber {
220cd248341SJan Glauber u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
221cd248341SJan Glauber u64 data;
222cd248341SJan Glauber int rc;
223cd248341SJan Glauber
22481deca12SSebastian Ott rc = __zpci_load(&data, req, offset);
225b170bad4SSebastian Ott if (!rc) {
2265064cd35SSebastian Ott data = le64_to_cpu((__force __le64) data);
2275064cd35SSebastian Ott data >>= (8 - len) * 8;
228cd248341SJan Glauber *val = (u32) data;
229b170bad4SSebastian Ott } else
230cd248341SJan Glauber *val = 0xffffffff;
231cd248341SJan Glauber return rc;
232cd248341SJan Glauber }
233cd248341SJan Glauber
zpci_cfg_store(struct zpci_dev * zdev,int offset,u32 val,u8 len)234cd248341SJan Glauber static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
235cd248341SJan Glauber {
236cd248341SJan Glauber u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
237cd248341SJan Glauber u64 data = val;
238cd248341SJan Glauber int rc;
239cd248341SJan Glauber
2405064cd35SSebastian Ott data <<= (8 - len) * 8;
2415064cd35SSebastian Ott data = (__force u64) cpu_to_le64(data);
24281deca12SSebastian Ott rc = __zpci_store(data, req, offset);
243cd248341SJan Glauber return rc;
244cd248341SJan Glauber }
245cd248341SJan Glauber
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)246cd248341SJan Glauber resource_size_t pcibios_align_resource(void *data, const struct resource *res,
247cd248341SJan Glauber resource_size_t size,
248cd248341SJan Glauber resource_size_t align)
249cd248341SJan Glauber {
250cd248341SJan Glauber return 0;
251cd248341SJan Glauber }
252cd248341SJan Glauber
ioremap_prot(phys_addr_t phys_addr,size_t size,unsigned long prot)253b43b3fffSBaoquan He void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
254b43b3fffSBaoquan He unsigned long prot)
25571ba41c9SSebastian Ott {
256b43b3fffSBaoquan He /*
257b43b3fffSBaoquan He * When PCI MIO instructions are unavailable the "physical" address
258b43b3fffSBaoquan He * encodes a hint for accessing the PCI memory space it represents.
259b43b3fffSBaoquan He * Just pass it unchanged such that ioread/iowrite can decode it.
260b43b3fffSBaoquan He */
26171ba41c9SSebastian Ott if (!static_branch_unlikely(&have_mio))
262b43b3fffSBaoquan He return (void __iomem *)phys_addr;
26371ba41c9SSebastian Ott
264b43b3fffSBaoquan He return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
265b02002ccSNiklas Schnelle }
266b02002ccSNiklas Schnelle EXPORT_SYMBOL(ioremap_prot);
267b02002ccSNiklas Schnelle
iounmap(volatile void __iomem * addr)26871ba41c9SSebastian Ott void iounmap(volatile void __iomem *addr)
26971ba41c9SSebastian Ott {
27071ba41c9SSebastian Ott if (static_branch_likely(&have_mio))
271b43b3fffSBaoquan He generic_iounmap(addr);
27271ba41c9SSebastian Ott }
27371ba41c9SSebastian Ott EXPORT_SYMBOL(iounmap);
27471ba41c9SSebastian Ott
275cd248341SJan Glauber /* Create a virtual mapping cookie for a PCI BAR */
pci_iomap_range_fh(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)27671ba41c9SSebastian Ott static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
27771ba41c9SSebastian Ott unsigned long offset, unsigned long max)
278cd248341SJan Glauber {
279198a5278SSebastian Ott struct zpci_dev *zdev = to_zpci(pdev);
280cd248341SJan Glauber int idx;
281cd248341SJan Glauber
282cd248341SJan Glauber idx = zdev->bars[bar].map_idx;
283cd248341SJan Glauber spin_lock(&zpci_iomap_lock);
284f5e44f82SSebastian Ott /* Detect overrun */
285f5e44f82SSebastian Ott WARN_ON(!++zpci_iomap_start[idx].count);
286cd248341SJan Glauber zpci_iomap_start[idx].fh = zdev->fh;
287cd248341SJan Glauber zpci_iomap_start[idx].bar = bar;
288cd248341SJan Glauber spin_unlock(&zpci_iomap_lock);
289cd248341SJan Glauber
2909e00caaeSSebastian Ott return (void __iomem *) ZPCI_ADDR(idx) + offset;
291cd248341SJan Glauber }
29271ba41c9SSebastian Ott
pci_iomap_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)29371ba41c9SSebastian Ott static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
29471ba41c9SSebastian Ott unsigned long offset,
29571ba41c9SSebastian Ott unsigned long max)
29671ba41c9SSebastian Ott {
29771ba41c9SSebastian Ott unsigned long barsize = pci_resource_len(pdev, bar);
29871ba41c9SSebastian Ott struct zpci_dev *zdev = to_zpci(pdev);
29971ba41c9SSebastian Ott void __iomem *iova;
30071ba41c9SSebastian Ott
30171ba41c9SSebastian Ott iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
30271ba41c9SSebastian Ott return iova ? iova + offset : iova;
30371ba41c9SSebastian Ott }
30471ba41c9SSebastian Ott
pci_iomap_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)30571ba41c9SSebastian Ott void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
30671ba41c9SSebastian Ott unsigned long offset, unsigned long max)
30771ba41c9SSebastian Ott {
308c9c13ba4SDenis Efremov if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
30971ba41c9SSebastian Ott return NULL;
31071ba41c9SSebastian Ott
31171ba41c9SSebastian Ott if (static_branch_likely(&have_mio))
31271ba41c9SSebastian Ott return pci_iomap_range_mio(pdev, bar, offset, max);
31371ba41c9SSebastian Ott else
31471ba41c9SSebastian Ott return pci_iomap_range_fh(pdev, bar, offset, max);
31571ba41c9SSebastian Ott }
316d9426083SSebastian Ott EXPORT_SYMBOL(pci_iomap_range);
3178cfc99b5SMichael S. Tsirkin
pci_iomap(struct pci_dev * dev,int bar,unsigned long maxlen)3188cfc99b5SMichael S. Tsirkin void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
3198cfc99b5SMichael S. Tsirkin {
3208cfc99b5SMichael S. Tsirkin return pci_iomap_range(dev, bar, 0, maxlen);
3218cfc99b5SMichael S. Tsirkin }
3228cfc99b5SMichael S. Tsirkin EXPORT_SYMBOL(pci_iomap);
323cd248341SJan Glauber
pci_iomap_wc_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)32471ba41c9SSebastian Ott static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
32571ba41c9SSebastian Ott unsigned long offset, unsigned long max)
32671ba41c9SSebastian Ott {
32771ba41c9SSebastian Ott unsigned long barsize = pci_resource_len(pdev, bar);
32871ba41c9SSebastian Ott struct zpci_dev *zdev = to_zpci(pdev);
32971ba41c9SSebastian Ott void __iomem *iova;
33071ba41c9SSebastian Ott
33171ba41c9SSebastian Ott iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
33271ba41c9SSebastian Ott return iova ? iova + offset : iova;
33371ba41c9SSebastian Ott }
33471ba41c9SSebastian Ott
pci_iomap_wc_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)33571ba41c9SSebastian Ott void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
33671ba41c9SSebastian Ott unsigned long offset, unsigned long max)
33771ba41c9SSebastian Ott {
338c9c13ba4SDenis Efremov if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
33971ba41c9SSebastian Ott return NULL;
34071ba41c9SSebastian Ott
34171ba41c9SSebastian Ott if (static_branch_likely(&have_mio))
34271ba41c9SSebastian Ott return pci_iomap_wc_range_mio(pdev, bar, offset, max);
34371ba41c9SSebastian Ott else
34471ba41c9SSebastian Ott return pci_iomap_range_fh(pdev, bar, offset, max);
34571ba41c9SSebastian Ott }
34671ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc_range);
34771ba41c9SSebastian Ott
pci_iomap_wc(struct pci_dev * dev,int bar,unsigned long maxlen)34871ba41c9SSebastian Ott void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
34971ba41c9SSebastian Ott {
35071ba41c9SSebastian Ott return pci_iomap_wc_range(dev, bar, 0, maxlen);
35171ba41c9SSebastian Ott }
35271ba41c9SSebastian Ott EXPORT_SYMBOL(pci_iomap_wc);
35371ba41c9SSebastian Ott
pci_iounmap_fh(struct pci_dev * pdev,void __iomem * addr)35471ba41c9SSebastian Ott static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
355cd248341SJan Glauber {
3569e00caaeSSebastian Ott unsigned int idx = ZPCI_IDX(addr);
357cd248341SJan Glauber
358cd248341SJan Glauber spin_lock(&zpci_iomap_lock);
3598cfc99b5SMichael S. Tsirkin /* Detect underrun */
360f5e44f82SSebastian Ott WARN_ON(!zpci_iomap_start[idx].count);
3618cfc99b5SMichael S. Tsirkin if (!--zpci_iomap_start[idx].count) {
362cd248341SJan Glauber zpci_iomap_start[idx].fh = 0;
363cd248341SJan Glauber zpci_iomap_start[idx].bar = 0;
3648cfc99b5SMichael S. Tsirkin }
365cd248341SJan Glauber spin_unlock(&zpci_iomap_lock);
366cd248341SJan Glauber }
36771ba41c9SSebastian Ott
pci_iounmap_mio(struct pci_dev * pdev,void __iomem * addr)36871ba41c9SSebastian Ott static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
36971ba41c9SSebastian Ott {
37071ba41c9SSebastian Ott iounmap(addr);
37171ba41c9SSebastian Ott }
37271ba41c9SSebastian Ott
pci_iounmap(struct pci_dev * pdev,void __iomem * addr)37371ba41c9SSebastian Ott void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
37471ba41c9SSebastian Ott {
37571ba41c9SSebastian Ott if (static_branch_likely(&have_mio))
37671ba41c9SSebastian Ott pci_iounmap_mio(pdev, addr);
37771ba41c9SSebastian Ott else
37871ba41c9SSebastian Ott pci_iounmap_fh(pdev, addr);
37971ba41c9SSebastian Ott }
380d9426083SSebastian Ott EXPORT_SYMBOL(pci_iounmap);
381cd248341SJan Glauber
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)382cd248341SJan Glauber static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
383cd248341SJan Glauber int size, u32 *val)
384cd248341SJan Glauber {
3857dcfe50fSNiklas Schnelle struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
386cd248341SJan Glauber
38744510d6fSPierre Morel return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
388cd248341SJan Glauber }
389cd248341SJan Glauber
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)390cd248341SJan Glauber static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
391cd248341SJan Glauber int size, u32 val)
392cd248341SJan Glauber {
3937dcfe50fSNiklas Schnelle struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
394cd248341SJan Glauber
39544510d6fSPierre Morel return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
396cd248341SJan Glauber }
397cd248341SJan Glauber
398cd248341SJan Glauber static struct pci_ops pci_root_ops = {
399cd248341SJan Glauber .read = pci_read,
400cd248341SJan Glauber .write = pci_write,
401cd248341SJan Glauber };
402cd248341SJan Glauber
zpci_map_resources(struct pci_dev * pdev)4031803ba2dSSebastian Ott static void zpci_map_resources(struct pci_dev *pdev)
404cd248341SJan Glauber {
40571ba41c9SSebastian Ott struct zpci_dev *zdev = to_zpci(pdev);
406cd248341SJan Glauber resource_size_t len;
407cd248341SJan Glauber int i;
408cd248341SJan Glauber
409c9c13ba4SDenis Efremov for (i = 0; i < PCI_STD_NUM_BARS; i++) {
410cd248341SJan Glauber len = pci_resource_len(pdev, i);
411cd248341SJan Glauber if (!len)
412cd248341SJan Glauber continue;
41371ba41c9SSebastian Ott
414c7ff0e91SSebastian Ott if (zpci_use_mio(zdev))
41571ba41c9SSebastian Ott pdev->resource[i].start =
416df057c91SNiklas Schnelle (resource_size_t __force) zdev->bars[i].mio_wt;
41771ba41c9SSebastian Ott else
418c7ff0e91SSebastian Ott pdev->resource[i].start = (resource_size_t __force)
419c7ff0e91SSebastian Ott pci_iomap_range_fh(pdev, i, 0, 0);
420cd248341SJan Glauber pdev->resource[i].end = pdev->resource[i].start + len - 1;
421cd248341SJan Glauber }
422cfbb4a7aSSebastian Ott
423abb95b75SNiklas Schnelle zpci_iov_map_resources(pdev);
424944239c5SSebastian Ott }
425944239c5SSebastian Ott
zpci_unmap_resources(struct pci_dev * pdev)4261803ba2dSSebastian Ott static void zpci_unmap_resources(struct pci_dev *pdev)
427944239c5SSebastian Ott {
428c7ff0e91SSebastian Ott struct zpci_dev *zdev = to_zpci(pdev);
429944239c5SSebastian Ott resource_size_t len;
430944239c5SSebastian Ott int i;
431944239c5SSebastian Ott
432c7ff0e91SSebastian Ott if (zpci_use_mio(zdev))
43371ba41c9SSebastian Ott return;
43471ba41c9SSebastian Ott
435c9c13ba4SDenis Efremov for (i = 0; i < PCI_STD_NUM_BARS; i++) {
436944239c5SSebastian Ott len = pci_resource_len(pdev, i);
437944239c5SSebastian Ott if (!len)
438944239c5SSebastian Ott continue;
439c7ff0e91SSebastian Ott pci_iounmap_fh(pdev, (void __iomem __force *)
4405b9f2081SMartin Schwidefsky pdev->resource[i].start);
441944239c5SSebastian Ott }
442944239c5SSebastian Ott }
443cd248341SJan Glauber
zpci_alloc_iomap(struct zpci_dev * zdev)444cd248341SJan Glauber static int zpci_alloc_iomap(struct zpci_dev *zdev)
445cd248341SJan Glauber {
446bf19c94dSSebastian Ott unsigned long entry;
447cd248341SJan Glauber
448cd248341SJan Glauber spin_lock(&zpci_iomap_lock);
449c506fff3SSebastian Ott entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
450c506fff3SSebastian Ott if (entry == ZPCI_IOMAP_ENTRIES) {
451cd248341SJan Glauber spin_unlock(&zpci_iomap_lock);
452cd248341SJan Glauber return -ENOSPC;
453cd248341SJan Glauber }
454c506fff3SSebastian Ott set_bit(entry, zpci_iomap_bitmap);
455cd248341SJan Glauber spin_unlock(&zpci_iomap_lock);
456cd248341SJan Glauber return entry;
457cd248341SJan Glauber }
458cd248341SJan Glauber
zpci_free_iomap(struct zpci_dev * zdev,int entry)459cd248341SJan Glauber static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
460cd248341SJan Glauber {
461cd248341SJan Glauber spin_lock(&zpci_iomap_lock);
462cd248341SJan Glauber memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
463c506fff3SSebastian Ott clear_bit(entry, zpci_iomap_bitmap);
464cd248341SJan Glauber spin_unlock(&zpci_iomap_lock);
465cd248341SJan Glauber }
466cd248341SJan Glauber
zpci_do_update_iomap_fh(struct zpci_dev * zdev,u32 fh)4674fe20497SNiklas Schnelle static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
4684fe20497SNiklas Schnelle {
4694fe20497SNiklas Schnelle int bar, idx;
4704fe20497SNiklas Schnelle
4714fe20497SNiklas Schnelle spin_lock(&zpci_iomap_lock);
4724fe20497SNiklas Schnelle for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
4734fe20497SNiklas Schnelle if (!zdev->bars[bar].size)
4744fe20497SNiklas Schnelle continue;
4754fe20497SNiklas Schnelle idx = zdev->bars[bar].map_idx;
4764fe20497SNiklas Schnelle if (!zpci_iomap_start[idx].count)
4774fe20497SNiklas Schnelle continue;
4784fe20497SNiklas Schnelle WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
4794fe20497SNiklas Schnelle }
4804fe20497SNiklas Schnelle spin_unlock(&zpci_iomap_lock);
4814fe20497SNiklas Schnelle }
4824fe20497SNiklas Schnelle
zpci_update_fh(struct zpci_dev * zdev,u32 fh)4834fe20497SNiklas Schnelle void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
4844fe20497SNiklas Schnelle {
4854fe20497SNiklas Schnelle if (!fh || zdev->fh == fh)
4864fe20497SNiklas Schnelle return;
4874fe20497SNiklas Schnelle
4884fe20497SNiklas Schnelle zdev->fh = fh;
4894fe20497SNiklas Schnelle if (zpci_use_mio(zdev))
4904fe20497SNiklas Schnelle return;
4914fe20497SNiklas Schnelle if (zdev->has_resources && zdev_enabled(zdev))
4924fe20497SNiklas Schnelle zpci_do_update_iomap_fh(zdev, fh);
4934fe20497SNiklas Schnelle }
4944fe20497SNiklas Schnelle
__alloc_res(struct zpci_dev * zdev,unsigned long start,unsigned long size,unsigned long flags)4957a572a3aSSebastian Ott static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
4967a572a3aSSebastian Ott unsigned long size, unsigned long flags)
4977a572a3aSSebastian Ott {
4987a572a3aSSebastian Ott struct resource *r;
4997a572a3aSSebastian Ott
5007a572a3aSSebastian Ott r = kzalloc(sizeof(*r), GFP_KERNEL);
5017a572a3aSSebastian Ott if (!r)
5027a572a3aSSebastian Ott return NULL;
5037a572a3aSSebastian Ott
5047a572a3aSSebastian Ott r->start = start;
5057a572a3aSSebastian Ott r->end = r->start + size - 1;
5067a572a3aSSebastian Ott r->flags = flags;
5077a572a3aSSebastian Ott r->name = zdev->res_name;
5087a572a3aSSebastian Ott
5097a572a3aSSebastian Ott if (request_resource(&iomem_resource, r)) {
5107a572a3aSSebastian Ott kfree(r);
5117a572a3aSSebastian Ott return NULL;
5127a572a3aSSebastian Ott }
5137a572a3aSSebastian Ott return r;
5147a572a3aSSebastian Ott }
5157a572a3aSSebastian Ott
zpci_setup_bus_resources(struct zpci_dev * zdev)516ab909509SNiklas Schnelle int zpci_setup_bus_resources(struct zpci_dev *zdev)
5177a572a3aSSebastian Ott {
5187a572a3aSSebastian Ott unsigned long addr, size, flags;
5197a572a3aSSebastian Ott struct resource *res;
5207a572a3aSSebastian Ott int i, entry;
5217a572a3aSSebastian Ott
5227a572a3aSSebastian Ott snprintf(zdev->res_name, sizeof(zdev->res_name),
52305bc1be6SPierre Morel "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
5247a572a3aSSebastian Ott
525c9c13ba4SDenis Efremov for (i = 0; i < PCI_STD_NUM_BARS; i++) {
5267a572a3aSSebastian Ott if (!zdev->bars[i].size)
5277a572a3aSSebastian Ott continue;
5287a572a3aSSebastian Ott entry = zpci_alloc_iomap(zdev);
5297a572a3aSSebastian Ott if (entry < 0)
5307a572a3aSSebastian Ott return entry;
5317a572a3aSSebastian Ott zdev->bars[i].map_idx = entry;
5327a572a3aSSebastian Ott
5337a572a3aSSebastian Ott /* only MMIO is supported */
5347a572a3aSSebastian Ott flags = IORESOURCE_MEM;
5357a572a3aSSebastian Ott if (zdev->bars[i].val & 8)
5367a572a3aSSebastian Ott flags |= IORESOURCE_PREFETCH;
5377a572a3aSSebastian Ott if (zdev->bars[i].val & 4)
5387a572a3aSSebastian Ott flags |= IORESOURCE_MEM_64;
5397a572a3aSSebastian Ott
540c7ff0e91SSebastian Ott if (zpci_use_mio(zdev))
541df057c91SNiklas Schnelle addr = (unsigned long) zdev->bars[i].mio_wt;
542dcd33b23SSebastian Ott else
5439e00caaeSSebastian Ott addr = ZPCI_ADDR(entry);
5447a572a3aSSebastian Ott size = 1UL << zdev->bars[i].size;
5457a572a3aSSebastian Ott
5467a572a3aSSebastian Ott res = __alloc_res(zdev, addr, size, flags);
5477a572a3aSSebastian Ott if (!res) {
5487a572a3aSSebastian Ott zpci_free_iomap(zdev, entry);
5497a572a3aSSebastian Ott return -ENOMEM;
5507a572a3aSSebastian Ott }
5517a572a3aSSebastian Ott zdev->bars[i].res = res;
5527a572a3aSSebastian Ott }
553a50297cfSNiklas Schnelle zdev->has_resources = 1;
5547a572a3aSSebastian Ott
5557a572a3aSSebastian Ott return 0;
5567a572a3aSSebastian Ott }
5577a572a3aSSebastian Ott
zpci_cleanup_bus_resources(struct zpci_dev * zdev)5587a572a3aSSebastian Ott static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
5597a572a3aSSebastian Ott {
560ab909509SNiklas Schnelle struct resource *res;
5617a572a3aSSebastian Ott int i;
5627a572a3aSSebastian Ott
563ab909509SNiklas Schnelle pci_lock_rescan_remove();
564c9c13ba4SDenis Efremov for (i = 0; i < PCI_STD_NUM_BARS; i++) {
565ab909509SNiklas Schnelle res = zdev->bars[i].res;
566ab909509SNiklas Schnelle if (!res)
5677a572a3aSSebastian Ott continue;
5687a572a3aSSebastian Ott
569ab909509SNiklas Schnelle release_resource(res);
570ab909509SNiklas Schnelle pci_bus_remove_resource(zdev->zbus->bus, res);
5717a572a3aSSebastian Ott zpci_free_iomap(zdev, zdev->bars[i].map_idx);
572ab909509SNiklas Schnelle zdev->bars[i].res = NULL;
573ab909509SNiklas Schnelle kfree(res);
5747a572a3aSSebastian Ott }
575a50297cfSNiklas Schnelle zdev->has_resources = 0;
576ab909509SNiklas Schnelle pci_unlock_rescan_remove();
5777a572a3aSSebastian Ott }
5787a572a3aSSebastian Ott
pcibios_device_add(struct pci_dev * pdev)57906dc660eSOliver O'Halloran int pcibios_device_add(struct pci_dev *pdev)
580af0a8a84SSebastian Ott {
5812a671f77SNiklas Schnelle struct zpci_dev *zdev = to_zpci(pdev);
582cb809182SSebastian Ott struct resource *res;
583cb809182SSebastian Ott int i;
584cb809182SSebastian Ott
5852a671f77SNiklas Schnelle /* The pdev has a reference to the zdev via its bus */
5862a671f77SNiklas Schnelle zpci_zdev_get(zdev);
5877dc20ab1SSebastian Ott if (pdev->is_physfn)
5887dc20ab1SSebastian Ott pdev->no_vf_scan = 1;
5897dc20ab1SSebastian Ott
5901803ba2dSSebastian Ott zpci_map_resources(pdev);
591cb809182SSebastian Ott
592c9c13ba4SDenis Efremov for (i = 0; i < PCI_STD_NUM_BARS; i++) {
593cb809182SSebastian Ott res = &pdev->resource[i];
594cb809182SSebastian Ott if (res->parent || !res->flags)
595cb809182SSebastian Ott continue;
596cb809182SSebastian Ott pci_claim_resource(pdev, i);
597cb809182SSebastian Ott }
598cb809182SSebastian Ott
599cb809182SSebastian Ott return 0;
600cb809182SSebastian Ott }
601cb809182SSebastian Ott
pcibios_release_device(struct pci_dev * pdev)6021803ba2dSSebastian Ott void pcibios_release_device(struct pci_dev *pdev)
6031803ba2dSSebastian Ott {
6042a671f77SNiklas Schnelle struct zpci_dev *zdev = to_zpci(pdev);
6052a671f77SNiklas Schnelle
6061803ba2dSSebastian Ott zpci_unmap_resources(pdev);
6072a671f77SNiklas Schnelle zpci_zdev_put(zdev);
6081803ba2dSSebastian Ott }
6091803ba2dSSebastian Ott
pcibios_enable_device(struct pci_dev * pdev,int mask)610cb809182SSebastian Ott int pcibios_enable_device(struct pci_dev *pdev, int mask)
611cb809182SSebastian Ott {
612198a5278SSebastian Ott struct zpci_dev *zdev = to_zpci(pdev);
613af0a8a84SSebastian Ott
6149a99649fSSebastian Ott zpci_debug_init_device(zdev, dev_name(&pdev->dev));
615af0a8a84SSebastian Ott zpci_fmb_enable_device(zdev);
616af0a8a84SSebastian Ott
617d7533232SBjorn Helgaas return pci_enable_resources(pdev, mask);
618af0a8a84SSebastian Ott }
619af0a8a84SSebastian Ott
pcibios_disable_device(struct pci_dev * pdev)620cb809182SSebastian Ott void pcibios_disable_device(struct pci_dev *pdev)
621944239c5SSebastian Ott {
622198a5278SSebastian Ott struct zpci_dev *zdev = to_zpci(pdev);
623944239c5SSebastian Ott
624944239c5SSebastian Ott zpci_fmb_disable_device(zdev);
625944239c5SSebastian Ott zpci_debug_exit_device(zdev);
626944239c5SSebastian Ott }
627944239c5SSebastian Ott
__zpci_register_domain(int domain)62805bc1be6SPierre Morel static int __zpci_register_domain(int domain)
629cd248341SJan Glauber {
630969ae01bSNiklas Schnelle spin_lock(&zpci_domain_lock);
63105bc1be6SPierre Morel if (test_bit(domain, zpci_domain)) {
632969ae01bSNiklas Schnelle spin_unlock(&zpci_domain_lock);
63305bc1be6SPierre Morel pr_err("Domain %04x is already assigned\n", domain);
634312e8462SSebastian Ott return -EEXIST;
635312e8462SSebastian Ott }
63605bc1be6SPierre Morel set_bit(domain, zpci_domain);
637312e8462SSebastian Ott spin_unlock(&zpci_domain_lock);
63805bc1be6SPierre Morel return domain;
6395c5afd02SSebastian Ott }
64005bc1be6SPierre Morel
__zpci_alloc_domain(void)64105bc1be6SPierre Morel static int __zpci_alloc_domain(void)
64205bc1be6SPierre Morel {
64305bc1be6SPierre Morel int domain;
64405bc1be6SPierre Morel
64505bc1be6SPierre Morel spin_lock(&zpci_domain_lock);
646969ae01bSNiklas Schnelle /*
647969ae01bSNiklas Schnelle * We can always auto allocate domains below ZPCI_NR_DEVICES.
648969ae01bSNiklas Schnelle * There is either a free domain or we have reached the maximum in
649969ae01bSNiklas Schnelle * which case we would have bailed earlier.
650969ae01bSNiklas Schnelle */
65105bc1be6SPierre Morel domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
65205bc1be6SPierre Morel set_bit(domain, zpci_domain);
653cd248341SJan Glauber spin_unlock(&zpci_domain_lock);
65405bc1be6SPierre Morel return domain;
655cd248341SJan Glauber }
656cd248341SJan Glauber
zpci_alloc_domain(int domain)65705bc1be6SPierre Morel int zpci_alloc_domain(int domain)
65805bc1be6SPierre Morel {
65905bc1be6SPierre Morel if (zpci_unique_uid) {
66005bc1be6SPierre Morel if (domain)
66105bc1be6SPierre Morel return __zpci_register_domain(domain);
66205bc1be6SPierre Morel pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
66305bc1be6SPierre Morel update_uid_checking(false);
66405bc1be6SPierre Morel }
66505bc1be6SPierre Morel return __zpci_alloc_domain();
66605bc1be6SPierre Morel }
66705bc1be6SPierre Morel
zpci_free_domain(int domain)66805bc1be6SPierre Morel void zpci_free_domain(int domain)
669cd248341SJan Glauber {
670cd248341SJan Glauber spin_lock(&zpci_domain_lock);
67105bc1be6SPierre Morel clear_bit(domain, zpci_domain);
672cd248341SJan Glauber spin_unlock(&zpci_domain_lock);
673cd248341SJan Glauber }
674cd248341SJan Glauber
6757d594322SSebastian Ott
zpci_enable_device(struct zpci_dev * zdev)676a755a45dSJan Glauber int zpci_enable_device(struct zpci_dev *zdev)
677a755a45dSJan Glauber {
678cc049eecSNiklas Schnelle u32 fh = zdev->fh;
6791f3f7681SNiklas Schnelle int rc = 0;
680a755a45dSJan Glauber
6811f3f7681SNiklas Schnelle if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
682f7addcddSNiklas Schnelle rc = -EIO;
6831f3f7681SNiklas Schnelle else
6844fe20497SNiklas Schnelle zpci_update_fh(zdev, fh);
685a755a45dSJan Glauber return rc;
686a755a45dSJan Glauber }
68709340b2fSMatthew Rosato EXPORT_SYMBOL_GPL(zpci_enable_device);
688a755a45dSJan Glauber
zpci_disable_device(struct zpci_dev * zdev)689cb65a669SSebastian Ott int zpci_disable_device(struct zpci_dev *zdev)
690cb65a669SSebastian Ott {
691cc049eecSNiklas Schnelle u32 fh = zdev->fh;
6928256addaSNiklas Schnelle int cc, rc = 0;
6938256addaSNiklas Schnelle
694cc049eecSNiklas Schnelle cc = clp_disable_fh(zdev, &fh);
695cc049eecSNiklas Schnelle if (!cc) {
6964fe20497SNiklas Schnelle zpci_update_fh(zdev, fh);
697cc049eecSNiklas Schnelle } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
6988256addaSNiklas Schnelle pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
6998256addaSNiklas Schnelle zdev->fid);
7008256addaSNiklas Schnelle /* Function is already disabled - update handle */
701cc049eecSNiklas Schnelle rc = clp_refresh_fh(zdev->fid, &fh);
702cc049eecSNiklas Schnelle if (!rc) {
7034fe20497SNiklas Schnelle zpci_update_fh(zdev, fh);
7048256addaSNiklas Schnelle rc = -EINVAL;
705cc049eecSNiklas Schnelle }
706cc049eecSNiklas Schnelle } else {
7078256addaSNiklas Schnelle rc = -EIO;
7088256addaSNiklas Schnelle }
7098256addaSNiklas Schnelle return rc;
710cb65a669SSebastian Ott }
71109340b2fSMatthew Rosato EXPORT_SYMBOL_GPL(zpci_disable_device);
712cb65a669SSebastian Ott
713ba764dd7SNiklas Schnelle /**
714da995d53SNiklas Schnelle * zpci_hot_reset_device - perform a reset of the given zPCI function
715da995d53SNiklas Schnelle * @zdev: the slot which should be reset
716da995d53SNiklas Schnelle *
717da995d53SNiklas Schnelle * Performs a low level reset of the zPCI function. The reset is low level in
718da995d53SNiklas Schnelle * the sense that the zPCI function can be reset without detaching it from the
719da995d53SNiklas Schnelle * common PCI subsystem. The reset may be performed while under control of
720da995d53SNiklas Schnelle * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
721da995d53SNiklas Schnelle * table is reinstated at the end of the reset.
722da995d53SNiklas Schnelle *
723da995d53SNiklas Schnelle * After the reset the functions internal state is reset to an initial state
724da995d53SNiklas Schnelle * equivalent to its state during boot when first probing a driver.
725da995d53SNiklas Schnelle * Consequently after reset the PCI function requires re-initialization via the
726da995d53SNiklas Schnelle * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
727bcb5d6c7SGerd Bayer * and enabling the function via e.g. pci_enable_device_flags(). The caller
728da995d53SNiklas Schnelle * must guard against concurrent reset attempts.
729da995d53SNiklas Schnelle *
730da995d53SNiklas Schnelle * In most cases this function should not be called directly but through
731da995d53SNiklas Schnelle * pci_reset_function() or pci_reset_bus() which handle the save/restore and
732bcb5d6c7SGerd Bayer * locking - asserted by lockdep.
733da995d53SNiklas Schnelle *
734da995d53SNiklas Schnelle * Return: 0 on success and an error value otherwise
735da995d53SNiklas Schnelle */
zpci_hot_reset_device(struct zpci_dev * zdev)736da995d53SNiklas Schnelle int zpci_hot_reset_device(struct zpci_dev *zdev)
737da995d53SNiklas Schnelle {
73859bbf596SNiklas Schnelle u8 status;
739da995d53SNiklas Schnelle int rc;
740da995d53SNiklas Schnelle
741bcb5d6c7SGerd Bayer lockdep_assert_held(&zdev->state_lock);
742da995d53SNiklas Schnelle zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
743da995d53SNiklas Schnelle if (zdev_enabled(zdev)) {
744da995d53SNiklas Schnelle /* Disables device access, DMAs and IRQs (reset state) */
745da995d53SNiklas Schnelle rc = zpci_disable_device(zdev);
746da995d53SNiklas Schnelle /*
747da995d53SNiklas Schnelle * Due to a z/VM vs LPAR inconsistency in the error state the
748da995d53SNiklas Schnelle * FH may indicate an enabled device but disable says the
749da995d53SNiklas Schnelle * device is already disabled don't treat it as an error here.
750da995d53SNiklas Schnelle */
751da995d53SNiklas Schnelle if (rc == -EINVAL)
752da995d53SNiklas Schnelle rc = 0;
753da995d53SNiklas Schnelle if (rc)
754da995d53SNiklas Schnelle return rc;
755da995d53SNiklas Schnelle }
756da995d53SNiklas Schnelle
757da995d53SNiklas Schnelle rc = zpci_enable_device(zdev);
758da995d53SNiklas Schnelle if (rc)
759da995d53SNiklas Schnelle return rc;
760da995d53SNiklas Schnelle
761da995d53SNiklas Schnelle if (zdev->dma_table)
762da995d53SNiklas Schnelle rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
76359bbf596SNiklas Schnelle virt_to_phys(zdev->dma_table), &status);
764da995d53SNiklas Schnelle if (rc) {
765da995d53SNiklas Schnelle zpci_disable_device(zdev);
766da995d53SNiklas Schnelle return rc;
767da995d53SNiklas Schnelle }
768da995d53SNiklas Schnelle
769da995d53SNiklas Schnelle return 0;
770da995d53SNiklas Schnelle }
771da995d53SNiklas Schnelle
772da995d53SNiklas Schnelle /**
773ba764dd7SNiklas Schnelle * zpci_create_device() - Create a new zpci_dev and add it to the zbus
774ba764dd7SNiklas Schnelle * @fid: Function ID of the device to be created
775ba764dd7SNiklas Schnelle * @fh: Current Function Handle of the device to be created
776ba764dd7SNiklas Schnelle * @state: Initial state after creation either Standby or Configured
777ba764dd7SNiklas Schnelle *
778ba764dd7SNiklas Schnelle * Creates a new zpci device and adds it to its, possibly newly created, zbus
779ba764dd7SNiklas Schnelle * as well as zpci_list.
780ba764dd7SNiklas Schnelle *
78114c87ba8SNiklas Schnelle * Returns: the zdev on success or an error pointer otherwise
782ba764dd7SNiklas Schnelle */
zpci_create_device(u32 fid,u32 fh,enum zpci_state state)78314c87ba8SNiklas Schnelle struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
784cd248341SJan Glauber {
785ba764dd7SNiklas Schnelle struct zpci_dev *zdev;
786cd248341SJan Glauber int rc;
787cd248341SJan Glauber
78852c79e63SNiklas Schnelle zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
789ba764dd7SNiklas Schnelle zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
790ba764dd7SNiklas Schnelle if (!zdev)
79114c87ba8SNiklas Schnelle return ERR_PTR(-ENOMEM);
792ba764dd7SNiklas Schnelle
793ba764dd7SNiklas Schnelle /* FID and Function Handle are the static/dynamic identifiers */
794ba764dd7SNiklas Schnelle zdev->fid = fid;
795ba764dd7SNiklas Schnelle zdev->fh = fh;
796ba764dd7SNiklas Schnelle
797ba764dd7SNiklas Schnelle /* Query function properties and update zdev */
798ba764dd7SNiklas Schnelle rc = clp_query_pci_fn(zdev);
799ba764dd7SNiklas Schnelle if (rc)
800ba764dd7SNiklas Schnelle goto error;
801ba764dd7SNiklas Schnelle zdev->state = state;
802ba764dd7SNiklas Schnelle
80305bc1be6SPierre Morel kref_init(&zdev->kref);
804bcb5d6c7SGerd Bayer mutex_init(&zdev->state_lock);
8050d48566dSGerd Bayer mutex_init(&zdev->fmb_lock);
80609340b2fSMatthew Rosato mutex_init(&zdev->kzdev_lock);
807ba764dd7SNiklas Schnelle
808ba764dd7SNiklas Schnelle rc = zpci_init_iommu(zdev);
809ba764dd7SNiklas Schnelle if (rc)
810ba764dd7SNiklas Schnelle goto error;
811ba764dd7SNiklas Schnelle
812ba764dd7SNiklas Schnelle rc = zpci_bus_device_register(zdev, &pci_root_ops);
813ba764dd7SNiklas Schnelle if (rc)
814a50297cfSNiklas Schnelle goto error_destroy_iommu;
81505bc1be6SPierre Morel
81605bc1be6SPierre Morel spin_lock(&zpci_list_lock);
81705bc1be6SPierre Morel list_add_tail(&zdev->entry, &zpci_list);
81805bc1be6SPierre Morel spin_unlock(&zpci_list_lock);
819cd248341SJan Glauber
82014c87ba8SNiklas Schnelle return zdev;
821cd248341SJan Glauber
822ba764dd7SNiklas Schnelle error_destroy_iommu:
823f42c2235SJoerg Roedel zpci_destroy_iommu(zdev);
824ba764dd7SNiklas Schnelle error:
825ba764dd7SNiklas Schnelle zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
826ba764dd7SNiklas Schnelle kfree(zdev);
82714c87ba8SNiklas Schnelle return ERR_PTR(rc);
828cd248341SJan Glauber }
829cd248341SJan Glauber
zpci_is_device_configured(struct zpci_dev * zdev)830a46044a9SNiklas Schnelle bool zpci_is_device_configured(struct zpci_dev *zdev)
831a46044a9SNiklas Schnelle {
832a46044a9SNiklas Schnelle enum zpci_state state = zdev->state;
833a46044a9SNiklas Schnelle
834a46044a9SNiklas Schnelle return state != ZPCI_FN_STATE_RESERVED &&
835a46044a9SNiklas Schnelle state != ZPCI_FN_STATE_STANDBY;
836a46044a9SNiklas Schnelle }
837a46044a9SNiklas Schnelle
8382631f6b6SNiklas Schnelle /**
839a7f82c36SNiklas Schnelle * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
8402631f6b6SNiklas Schnelle * @zdev: The zpci_dev to be configured
8412631f6b6SNiklas Schnelle * @fh: The general function handle supplied by the platform
8422631f6b6SNiklas Schnelle *
84361311e32SNiklas Schnelle * Given a device in the configuration state Configured, enables, scans and
84445e5f0c0SNiklas Schnelle * adds it to the common code PCI subsystem if possible. If any failure occurs,
84545e5f0c0SNiklas Schnelle * the zpci_dev is left disabled.
8462631f6b6SNiklas Schnelle *
8472631f6b6SNiklas Schnelle * Return: 0 on success, or an error code otherwise
8482631f6b6SNiklas Schnelle */
zpci_scan_configured_device(struct zpci_dev * zdev,u32 fh)849a7f82c36SNiklas Schnelle int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
8502631f6b6SNiklas Schnelle {
8514fe20497SNiklas Schnelle zpci_update_fh(zdev, fh);
85245e5f0c0SNiklas Schnelle return zpci_bus_scan_device(zdev);
8532631f6b6SNiklas Schnelle }
8542631f6b6SNiklas Schnelle
8552631f6b6SNiklas Schnelle /**
8562631f6b6SNiklas Schnelle * zpci_deconfigure_device() - Deconfigure a zpci_dev
8572631f6b6SNiklas Schnelle * @zdev: The zpci_dev to configure
8582631f6b6SNiklas Schnelle *
8592631f6b6SNiklas Schnelle * Deconfigure a zPCI function that is currently configured and possibly known
8602631f6b6SNiklas Schnelle * to the common code PCI subsystem.
8612631f6b6SNiklas Schnelle * If any failure occurs the device is left as is.
8622631f6b6SNiklas Schnelle *
8632631f6b6SNiklas Schnelle * Return: 0 on success, or an error code otherwise
8642631f6b6SNiklas Schnelle */
zpci_deconfigure_device(struct zpci_dev * zdev)8652631f6b6SNiklas Schnelle int zpci_deconfigure_device(struct zpci_dev *zdev)
8662631f6b6SNiklas Schnelle {
8672631f6b6SNiklas Schnelle int rc;
8682631f6b6SNiklas Schnelle
869bcb5d6c7SGerd Bayer lockdep_assert_held(&zdev->state_lock);
870bcb5d6c7SGerd Bayer if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
871bcb5d6c7SGerd Bayer return 0;
872bcb5d6c7SGerd Bayer
8732631f6b6SNiklas Schnelle if (zdev->zbus->bus)
87495b3a8b4SNiklas Schnelle zpci_bus_remove_device(zdev, false);
8752631f6b6SNiklas Schnelle
8762631f6b6SNiklas Schnelle if (zdev_enabled(zdev)) {
8772631f6b6SNiklas Schnelle rc = zpci_disable_device(zdev);
8782631f6b6SNiklas Schnelle if (rc)
8792631f6b6SNiklas Schnelle return rc;
8802631f6b6SNiklas Schnelle }
8812631f6b6SNiklas Schnelle
8822631f6b6SNiklas Schnelle rc = sclp_pci_deconfigure(zdev->fid);
8832631f6b6SNiklas Schnelle zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
8842631f6b6SNiklas Schnelle if (rc)
8852631f6b6SNiklas Schnelle return rc;
8862631f6b6SNiklas Schnelle zdev->state = ZPCI_FN_STATE_STANDBY;
8872631f6b6SNiklas Schnelle
8882631f6b6SNiklas Schnelle return 0;
8892631f6b6SNiklas Schnelle }
8902631f6b6SNiklas Schnelle
891a46044a9SNiklas Schnelle /**
892d0c8fd21SGerd Bayer * zpci_device_reserved() - Mark device as reserved
893a46044a9SNiklas Schnelle * @zdev: the zpci_dev that was reserved
894a46044a9SNiklas Schnelle *
895a46044a9SNiklas Schnelle * Handle the case that a given zPCI function was reserved by another system.
896a46044a9SNiklas Schnelle * After a call to this function the zpci_dev can not be found via
897a46044a9SNiklas Schnelle * get_zdev_by_fid() anymore but may still be accessible via existing
898a46044a9SNiklas Schnelle * references though it will not be functional anymore.
899a46044a9SNiklas Schnelle */
zpci_device_reserved(struct zpci_dev * zdev)900a46044a9SNiklas Schnelle void zpci_device_reserved(struct zpci_dev *zdev)
901a46044a9SNiklas Schnelle {
902a46044a9SNiklas Schnelle /*
903a46044a9SNiklas Schnelle * Remove device from zpci_list as it is going away. This also
904a46044a9SNiklas Schnelle * makes sure we ignore subsequent zPCI events for this device.
905a46044a9SNiklas Schnelle */
906a46044a9SNiklas Schnelle spin_lock(&zpci_list_lock);
907a46044a9SNiklas Schnelle list_del(&zdev->entry);
908a46044a9SNiklas Schnelle spin_unlock(&zpci_list_lock);
909a46044a9SNiklas Schnelle zdev->state = ZPCI_FN_STATE_RESERVED;
910a46044a9SNiklas Schnelle zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
911a46044a9SNiklas Schnelle zpci_zdev_put(zdev);
912a46044a9SNiklas Schnelle }
913a46044a9SNiklas Schnelle
zpci_release_device(struct kref * kref)91405bc1be6SPierre Morel void zpci_release_device(struct kref *kref)
915623bd44dSSebastian Ott {
91605bc1be6SPierre Morel struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
917a9045c22SNiklas Schnelle int ret;
918623bd44dSSebastian Ott
9196ee600bfSGerd Bayer if (zdev->has_hp_slot)
9206ee600bfSGerd Bayer zpci_exit_slot(zdev);
9216ee600bfSGerd Bayer
9222f0230b2SNiklas Schnelle if (zdev->zbus->bus)
92395b3a8b4SNiklas Schnelle zpci_bus_remove_device(zdev, false);
92444510d6fSPierre Morel
925f6576a1bSNiklas Schnelle if (zdev_enabled(zdev))
92605bc1be6SPierre Morel zpci_disable_device(zdev);
927f6576a1bSNiklas Schnelle
928f6576a1bSNiklas Schnelle switch (zdev->state) {
929a9045c22SNiklas Schnelle case ZPCI_FN_STATE_CONFIGURED:
930a9045c22SNiklas Schnelle ret = sclp_pci_deconfigure(zdev->fid);
931a9045c22SNiklas Schnelle zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
932a9045c22SNiklas Schnelle fallthrough;
93305bc1be6SPierre Morel case ZPCI_FN_STATE_STANDBY:
93444510d6fSPierre Morel if (zdev->has_hp_slot)
93505bc1be6SPierre Morel zpci_exit_slot(zdev);
936a46044a9SNiklas Schnelle spin_lock(&zpci_list_lock);
937a46044a9SNiklas Schnelle list_del(&zdev->entry);
938a46044a9SNiklas Schnelle spin_unlock(&zpci_list_lock);
939a46044a9SNiklas Schnelle zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
940a46044a9SNiklas Schnelle fallthrough;
941a46044a9SNiklas Schnelle case ZPCI_FN_STATE_RESERVED:
94202368b7cSNiklas Schnelle if (zdev->has_resources)
94305bc1be6SPierre Morel zpci_cleanup_bus_resources(zdev);
94405bc1be6SPierre Morel zpci_bus_device_unregister(zdev);
94505bc1be6SPierre Morel zpci_destroy_iommu(zdev);
94605bc1be6SPierre Morel fallthrough;
94705bc1be6SPierre Morel default:
94805bc1be6SPierre Morel break;
94905bc1be6SPierre Morel }
95005bc1be6SPierre Morel zpci_dbg(3, "rem fid:%x\n", zdev->fid);
9512ba8336dSNiklas Schnelle kfree_rcu(zdev, rcu);
952623bd44dSSebastian Ott }
953623bd44dSSebastian Ott
zpci_report_error(struct pci_dev * pdev,struct zpci_report_error_header * report)954bd3a1725SMartin Schwidefsky int zpci_report_error(struct pci_dev *pdev,
955bd3a1725SMartin Schwidefsky struct zpci_report_error_header *report)
956bd3a1725SMartin Schwidefsky {
957bd3a1725SMartin Schwidefsky struct zpci_dev *zdev = to_zpci(pdev);
958bd3a1725SMartin Schwidefsky
959bd3a1725SMartin Schwidefsky return sclp_pci_report(report, zdev->fh, zdev->fid);
960bd3a1725SMartin Schwidefsky }
961bd3a1725SMartin Schwidefsky EXPORT_SYMBOL(zpci_report_error);
962bd3a1725SMartin Schwidefsky
9634cdf2f4eSNiklas Schnelle /**
9644cdf2f4eSNiklas Schnelle * zpci_clear_error_state() - Clears the zPCI error state of the device
9654cdf2f4eSNiklas Schnelle * @zdev: The zdev for which the zPCI error state should be reset
9664cdf2f4eSNiklas Schnelle *
9674cdf2f4eSNiklas Schnelle * Clear the zPCI error state of the device. If clearing the zPCI error state
9684cdf2f4eSNiklas Schnelle * fails the device is left in the error state. In this case it may make sense
9694cdf2f4eSNiklas Schnelle * to call zpci_io_perm_failure() on the associated pdev if it exists.
9704cdf2f4eSNiklas Schnelle *
9714cdf2f4eSNiklas Schnelle * Returns: 0 on success, -EIO otherwise
9724cdf2f4eSNiklas Schnelle */
zpci_clear_error_state(struct zpci_dev * zdev)9734cdf2f4eSNiklas Schnelle int zpci_clear_error_state(struct zpci_dev *zdev)
9744cdf2f4eSNiklas Schnelle {
9754cdf2f4eSNiklas Schnelle u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
9764cdf2f4eSNiklas Schnelle struct zpci_fib fib = {0};
9774cdf2f4eSNiklas Schnelle u8 status;
9784cdf2f4eSNiklas Schnelle int cc;
9794cdf2f4eSNiklas Schnelle
9804cdf2f4eSNiklas Schnelle cc = zpci_mod_fc(req, &fib, &status);
9814cdf2f4eSNiklas Schnelle if (cc) {
9824cdf2f4eSNiklas Schnelle zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
9834cdf2f4eSNiklas Schnelle return -EIO;
9844cdf2f4eSNiklas Schnelle }
9854cdf2f4eSNiklas Schnelle
9864cdf2f4eSNiklas Schnelle return 0;
9874cdf2f4eSNiklas Schnelle }
9884cdf2f4eSNiklas Schnelle
9894cdf2f4eSNiklas Schnelle /**
9904cdf2f4eSNiklas Schnelle * zpci_reset_load_store_blocked() - Re-enables L/S from error state
9914cdf2f4eSNiklas Schnelle * @zdev: The zdev for which to unblock load/store access
9924cdf2f4eSNiklas Schnelle *
9934cdf2f4eSNiklas Schnelle * Re-enables load/store access for a PCI function in the error state while
9944cdf2f4eSNiklas Schnelle * keeping DMA blocked. In this state drivers can poke MMIO space to determine
9954cdf2f4eSNiklas Schnelle * if error recovery is possible while catching any rogue DMA access from the
9964cdf2f4eSNiklas Schnelle * device.
9974cdf2f4eSNiklas Schnelle *
9984cdf2f4eSNiklas Schnelle * Returns: 0 on success, -EIO otherwise
9994cdf2f4eSNiklas Schnelle */
zpci_reset_load_store_blocked(struct zpci_dev * zdev)10004cdf2f4eSNiklas Schnelle int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
10014cdf2f4eSNiklas Schnelle {
10024cdf2f4eSNiklas Schnelle u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
10034cdf2f4eSNiklas Schnelle struct zpci_fib fib = {0};
10044cdf2f4eSNiklas Schnelle u8 status;
10054cdf2f4eSNiklas Schnelle int cc;
10064cdf2f4eSNiklas Schnelle
10074cdf2f4eSNiklas Schnelle cc = zpci_mod_fc(req, &fib, &status);
10084cdf2f4eSNiklas Schnelle if (cc) {
10094cdf2f4eSNiklas Schnelle zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
10104cdf2f4eSNiklas Schnelle return -EIO;
10114cdf2f4eSNiklas Schnelle }
10124cdf2f4eSNiklas Schnelle
10134cdf2f4eSNiklas Schnelle return 0;
10144cdf2f4eSNiklas Schnelle }
10154cdf2f4eSNiklas Schnelle
zpci_mem_init(void)1016cd248341SJan Glauber static int zpci_mem_init(void)
1017cd248341SJan Glauber {
101880c544deSSebastian Ott BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
101980c544deSSebastian Ott __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
102080c544deSSebastian Ott
1021d0b08853SJan Glauber zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
102280c544deSSebastian Ott __alignof__(struct zpci_fmb), 0, NULL);
1023d0b08853SJan Glauber if (!zdev_fmb_cache)
1024c506fff3SSebastian Ott goto error_fmb;
1025d0b08853SJan Glauber
1026c506fff3SSebastian Ott zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1027c506fff3SSebastian Ott sizeof(*zpci_iomap_start), GFP_KERNEL);
1028cd248341SJan Glauber if (!zpci_iomap_start)
10299a4da8a5SJan Glauber goto error_iomap;
1030cd248341SJan Glauber
1031c506fff3SSebastian Ott zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1032c506fff3SSebastian Ott sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1033c506fff3SSebastian Ott if (!zpci_iomap_bitmap)
1034c506fff3SSebastian Ott goto error_iomap_bitmap;
1035c506fff3SSebastian Ott
1036b02002ccSNiklas Schnelle if (static_branch_likely(&have_mio))
1037b02002ccSNiklas Schnelle clp_setup_writeback_mio();
1038b02002ccSNiklas Schnelle
1039c506fff3SSebastian Ott return 0;
1040c506fff3SSebastian Ott error_iomap_bitmap:
1041c506fff3SSebastian Ott kfree(zpci_iomap_start);
10429a4da8a5SJan Glauber error_iomap:
1043d0b08853SJan Glauber kmem_cache_destroy(zdev_fmb_cache);
1044c506fff3SSebastian Ott error_fmb:
1045cd248341SJan Glauber return -ENOMEM;
1046cd248341SJan Glauber }
1047cd248341SJan Glauber
zpci_mem_exit(void)1048cd248341SJan Glauber static void zpci_mem_exit(void)
1049cd248341SJan Glauber {
1050c506fff3SSebastian Ott kfree(zpci_iomap_bitmap);
1051cd248341SJan Glauber kfree(zpci_iomap_start);
1052d0b08853SJan Glauber kmem_cache_destroy(zdev_fmb_cache);
1053cd248341SJan Glauber }
1054cd248341SJan Glauber
10556324b4deSSebastian Ott static unsigned int s390_pci_probe __initdata = 1;
1056fbfe07d4SSebastian Ott unsigned int s390_pci_force_floating __initdata;
1057aa3b7c29SSebastian Ott static unsigned int s390_pci_initialized;
1058cd248341SJan Glauber
pcibios_setup(char * str)1059cd248341SJan Glauber char * __init pcibios_setup(char *str)
1060cd248341SJan Glauber {
1061257608fbSSebastian Ott if (!strcmp(str, "off")) {
1062257608fbSSebastian Ott s390_pci_probe = 0;
1063cd248341SJan Glauber return NULL;
1064cd248341SJan Glauber }
106556271303SSebastian Ott if (!strcmp(str, "nomio")) {
1066*208da1d5SSven Schnelle get_lowcore()->machine_flags &= ~MACHINE_FLAG_PCI_MIO;
106756271303SSebastian Ott return NULL;
106856271303SSebastian Ott }
1069fbfe07d4SSebastian Ott if (!strcmp(str, "force_floating")) {
1070fbfe07d4SSebastian Ott s390_pci_force_floating = 1;
1071fbfe07d4SSebastian Ott return NULL;
1072fbfe07d4SSebastian Ott }
10736cf17f9aSPierre Morel if (!strcmp(str, "norid")) {
10746cf17f9aSPierre Morel s390_pci_no_rid = 1;
10756cf17f9aSPierre Morel return NULL;
10766cf17f9aSPierre Morel }
1077cd248341SJan Glauber return str;
1078cd248341SJan Glauber }
1079cd248341SJan Glauber
zpci_is_enabled(void)1080aa3b7c29SSebastian Ott bool zpci_is_enabled(void)
1081aa3b7c29SSebastian Ott {
1082aa3b7c29SSebastian Ott return s390_pci_initialized;
1083aa3b7c29SSebastian Ott }
1084aa3b7c29SSebastian Ott
pci_base_init(void)1085cd248341SJan Glauber static int __init pci_base_init(void)
1086cd248341SJan Glauber {
1087cd248341SJan Glauber int rc;
1088cd248341SJan Glauber
10891e5635d1SHeiko Carstens if (!s390_pci_probe)
1090cd248341SJan Glauber return 0;
1091cd248341SJan Glauber
1092da78693eSNiklas Schnelle if (!test_facility(69) || !test_facility(71)) {
1093da78693eSNiklas Schnelle pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1094cd248341SJan Glauber return 0;
1095da78693eSNiklas Schnelle }
1096cd248341SJan Glauber
10973322ba0dSNiklas Schnelle if (MACHINE_HAS_PCI_MIO) {
109871ba41c9SSebastian Ott static_branch_enable(&have_mio);
109999441a38SHeiko Carstens system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
11009964f396SSebastian Ott }
110171ba41c9SSebastian Ott
1102d0b08853SJan Glauber rc = zpci_debug_init();
1103d0b08853SJan Glauber if (rc)
11041f44a225SMartin Schwidefsky goto out;
1105d0b08853SJan Glauber
1106cd248341SJan Glauber rc = zpci_mem_init();
1107cd248341SJan Glauber if (rc)
1108cd248341SJan Glauber goto out_mem;
1109cd248341SJan Glauber
11109a4da8a5SJan Glauber rc = zpci_irq_init();
11119a4da8a5SJan Glauber if (rc)
11129a4da8a5SJan Glauber goto out_irq;
11139a4da8a5SJan Glauber
11141d578966SSebastian Ott rc = clp_scan_pci_devices();
1115a755a45dSJan Glauber if (rc)
1116a755a45dSJan Glauber goto out_find;
111714c87ba8SNiklas Schnelle zpci_bus_scan_busses();
1118a755a45dSJan Glauber
1119aa3b7c29SSebastian Ott s390_pci_initialized = 1;
1120cd248341SJan Glauber return 0;
1121cd248341SJan Glauber
1122a755a45dSJan Glauber out_find:
11239a4da8a5SJan Glauber zpci_irq_exit();
11249a4da8a5SJan Glauber out_irq:
1125cd248341SJan Glauber zpci_mem_exit();
1126cd248341SJan Glauber out_mem:
1127d0b08853SJan Glauber zpci_debug_exit();
11281f44a225SMartin Schwidefsky out:
1129cd248341SJan Glauber return rc;
1130cd248341SJan Glauber }
113167f43f38SSebastian Ott subsys_initcall_sync(pci_base_init);
1132