xref: /linux/arch/powerpc/platforms/pseries/pci.c (revision ff2632d7d08edc11e8bd0629e9fcfebab25c78b4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
4  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
5  *
6  * pSeries specific routines for PCI.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/ioport.h>
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/string.h>
14 
15 #include <asm/eeh.h>
16 #include <asm/pci-bridge.h>
17 #include <asm/ppc-pci.h>
18 #include <asm/pci.h>
19 #include "pseries.h"
20 
21 #ifdef CONFIG_PCI_IOV
22 #define MAX_VFS_FOR_MAP_PE 256
23 struct pe_map_bar_entry {
24 	__be64     bar;       /* Input:  Virtual Function BAR */
25 	__be16     rid;       /* Input:  Virtual Function Router ID */
26 	__be16     pe_num;    /* Output: Virtual Function PE Number */
27 	__be32     reserved;  /* Reserved Space */
28 };
29 
pseries_send_map_pe(struct pci_dev * pdev,u16 num_vfs,struct pe_map_bar_entry * vf_pe_array)30 static int pseries_send_map_pe(struct pci_dev *pdev, u16 num_vfs,
31 			       struct pe_map_bar_entry *vf_pe_array)
32 {
33 	struct pci_dn *pdn;
34 	int rc;
35 	unsigned long buid, addr;
36 	int ibm_map_pes = rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_MAP_PE_NUMBER);
37 
38 	if (ibm_map_pes == RTAS_UNKNOWN_SERVICE)
39 		return -EINVAL;
40 
41 	pdn = pci_get_pdn(pdev);
42 	addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
43 	buid = pdn->phb->buid;
44 	spin_lock(&rtas_data_buf_lock);
45 	memcpy(rtas_data_buf, vf_pe_array,
46 	       RTAS_DATA_BUF_SIZE);
47 	rc = rtas_call(ibm_map_pes, 5, 1, NULL, addr,
48 		       BUID_HI(buid), BUID_LO(buid),
49 		       rtas_data_buf,
50 		       num_vfs * sizeof(struct pe_map_bar_entry));
51 	memcpy(vf_pe_array, rtas_data_buf, RTAS_DATA_BUF_SIZE);
52 	spin_unlock(&rtas_data_buf_lock);
53 
54 	if (rc)
55 		dev_err(&pdev->dev,
56 			"%s: Failed to associate pes PE#%lx, rc=%x\n",
57 			__func__,  addr, rc);
58 
59 	return rc;
60 }
61 
pseries_set_pe_num(struct pci_dev * pdev,u16 vf_index,__be16 pe_num)62 static void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
63 {
64 	struct pci_dn *pdn;
65 
66 	pdn = pci_get_pdn(pdev);
67 	pdn->pe_num_map[vf_index] = be16_to_cpu(pe_num);
68 	dev_dbg(&pdev->dev, "VF %04x:%02x:%02x.%x associated with PE#%x\n",
69 		pci_domain_nr(pdev->bus),
70 		pdev->bus->number,
71 		PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
72 		PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)),
73 		pdn->pe_num_map[vf_index]);
74 }
75 
pseries_associate_pes(struct pci_dev * pdev,u16 num_vfs)76 static int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
77 {
78 	struct pci_dn *pdn;
79 	int i, rc, vf_index;
80 	struct pe_map_bar_entry *vf_pe_array;
81 	struct resource *res;
82 	u64 size;
83 
84 	vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
85 	if (!vf_pe_array)
86 		return -ENOMEM;
87 
88 	pdn = pci_get_pdn(pdev);
89 	/* create firmware structure to associate pes */
90 	for (vf_index = 0; vf_index < num_vfs; vf_index++) {
91 		pdn->pe_num_map[vf_index] = IODA_INVALID_PE;
92 		for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
93 			res = &pdev->resource[i + PCI_IOV_RESOURCES];
94 			if (!res->parent)
95 				continue;
96 			size = pcibios_iov_resource_alignment(pdev, i +
97 					PCI_IOV_RESOURCES);
98 			vf_pe_array[vf_index].bar =
99 				cpu_to_be64(res->start + size * vf_index);
100 			vf_pe_array[vf_index].rid =
101 				cpu_to_be16((pci_iov_virtfn_bus(pdev, vf_index)
102 					    << 8) | pci_iov_virtfn_devfn(pdev,
103 					    vf_index));
104 			vf_pe_array[vf_index].pe_num =
105 				cpu_to_be16(IODA_INVALID_PE);
106 		}
107 	}
108 
109 	rc = pseries_send_map_pe(pdev, num_vfs, vf_pe_array);
110 	/* Only zero is success */
111 	if (!rc)
112 		for (vf_index = 0; vf_index < num_vfs; vf_index++)
113 			pseries_set_pe_num(pdev, vf_index,
114 					   vf_pe_array[vf_index].pe_num);
115 
116 	kfree(vf_pe_array);
117 	return rc;
118 }
119 
pseries_pci_sriov_enable(struct pci_dev * pdev,u16 num_vfs)120 static int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
121 {
122 	struct pci_dn         *pdn;
123 	int                    rc;
124 	const int *max_vfs;
125 	int max_config_vfs;
126 	struct device_node *dn = pci_device_to_OF_node(pdev);
127 
128 	max_vfs = of_get_property(dn, "ibm,number-of-configurable-vfs", NULL);
129 
130 	if (!max_vfs)
131 		return -EINVAL;
132 
133 	/* First integer stores max config */
134 	max_config_vfs = of_read_number(&max_vfs[0], 1);
135 	if (max_config_vfs < num_vfs && num_vfs > MAX_VFS_FOR_MAP_PE) {
136 		dev_err(&pdev->dev,
137 			"Num VFs %x > %x Configurable VFs\n",
138 			num_vfs, (num_vfs > MAX_VFS_FOR_MAP_PE) ?
139 			MAX_VFS_FOR_MAP_PE : max_config_vfs);
140 		return -EINVAL;
141 	}
142 
143 	pdn = pci_get_pdn(pdev);
144 	pdn->pe_num_map = kmalloc_array(num_vfs,
145 					sizeof(*pdn->pe_num_map),
146 					GFP_KERNEL);
147 	if (!pdn->pe_num_map)
148 		return -ENOMEM;
149 
150 	rc = pseries_associate_pes(pdev, num_vfs);
151 
152 	/* Anything other than zero is failure */
153 	if (rc) {
154 		dev_err(&pdev->dev, "Failure to enable sriov: %x\n", rc);
155 		kfree(pdn->pe_num_map);
156 	} else {
157 		pci_vf_drivers_autoprobe(pdev, false);
158 	}
159 
160 	return rc;
161 }
162 
pseries_pcibios_sriov_enable(struct pci_dev * pdev,u16 num_vfs)163 static int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
164 {
165 	/* Allocate PCI data */
166 	add_sriov_vf_pdns(pdev);
167 	return pseries_pci_sriov_enable(pdev, num_vfs);
168 }
169 
pseries_pcibios_sriov_disable(struct pci_dev * pdev)170 static int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
171 {
172 	struct pci_dn         *pdn;
173 
174 	pdn = pci_get_pdn(pdev);
175 	/* Releasing pe_num_map */
176 	kfree(pdn->pe_num_map);
177 	/* Release PCI data */
178 	remove_sriov_vf_pdns(pdev);
179 	pci_vf_drivers_autoprobe(pdev, true);
180 	return 0;
181 }
182 #endif
183 
pSeries_request_regions(void)184 static void __init pSeries_request_regions(void)
185 {
186 	if (!isa_io_base)
187 		return;
188 
189 	request_region(0x20,0x20,"pic1");
190 	request_region(0xa0,0x20,"pic2");
191 	request_region(0x00,0x20,"dma1");
192 	request_region(0x40,0x20,"timer");
193 	request_region(0x80,0x10,"dma page reg");
194 	request_region(0xc0,0x20,"dma2");
195 }
196 
pSeries_final_fixup(void)197 void __init pSeries_final_fixup(void)
198 {
199 	pSeries_request_regions();
200 
201 	eeh_show_enabled();
202 
203 #ifdef CONFIG_PCI_IOV
204 	ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable;
205 	ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable;
206 #endif
207 }
208 
209 /*
210  * Assume the winbond 82c105 is the IDE controller on a
211  * p610/p615/p630. We should probably be more careful in case
212  * someone tries to plug in a similar adapter.
213  */
fixup_winbond_82c105(struct pci_dev * dev)214 static void fixup_winbond_82c105(struct pci_dev* dev)
215 {
216 	struct resource *r;
217 	unsigned int reg;
218 
219 	if (!machine_is(pseries))
220 		return;
221 
222 	printk("Using INTC for W82c105 IDE controller.\n");
223 	pci_read_config_dword(dev, 0x40, &reg);
224 	/* Enable LEGIRQ to use INTC instead of ISA interrupts */
225 	pci_write_config_dword(dev, 0x40, reg | (1<<11));
226 
227 	pci_dev_for_each_resource(dev, r) {
228 		/* zap the 2nd function of the winbond chip */
229 		if (dev->bus->number == 0 && dev->devfn == 0x81 &&
230 		    r->flags & IORESOURCE_IO)
231 			r->flags &= ~IORESOURCE_IO;
232 		if (r->start == 0 && r->end) {
233 			r->flags = 0;
234 			r->end = 0;
235 		}
236 	}
237 }
238 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
239 			 fixup_winbond_82c105);
240 
prop_to_pci_speed(u32 prop)241 static enum pci_bus_speed prop_to_pci_speed(u32 prop)
242 {
243 	switch (prop) {
244 	case 0x01:
245 		return PCIE_SPEED_2_5GT;
246 	case 0x02:
247 		return PCIE_SPEED_5_0GT;
248 	case 0x04:
249 		return PCIE_SPEED_8_0GT;
250 	case 0x08:
251 		return PCIE_SPEED_16_0GT;
252 	case 0x10:
253 		return PCIE_SPEED_32_0GT;
254 	default:
255 		pr_debug("Unexpected PCI link speed property value\n");
256 		return PCI_SPEED_UNKNOWN;
257 	}
258 }
259 
pseries_root_bridge_prepare(struct pci_host_bridge * bridge)260 int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
261 {
262 	struct device_node *dn, *pdn;
263 	struct pci_bus *bus;
264 	u32 pcie_link_speed_stats[2];
265 	int rc;
266 
267 	bus = bridge->bus;
268 
269 	/* Rely on the pcibios_free_controller_deferred() callback. */
270 	pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
271 					(void *) pci_bus_to_host(bus));
272 
273 	dn = pcibios_get_phb_of_node(bus);
274 	if (!dn)
275 		return 0;
276 
277 	for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
278 		rc = of_property_read_u32_array(pdn,
279 				"ibm,pcie-link-speed-stats",
280 				&pcie_link_speed_stats[0], 2);
281 		if (!rc)
282 			break;
283 	}
284 
285 	of_node_put(pdn);
286 
287 	if (rc) {
288 		pr_debug("no ibm,pcie-link-speed-stats property\n");
289 		return 0;
290 	}
291 
292 	bus->max_bus_speed = prop_to_pci_speed(pcie_link_speed_stats[0]);
293 	bus->cur_bus_speed = prop_to_pci_speed(pcie_link_speed_stats[1]);
294 	return 0;
295 }
296