xref: /linux/arch/x86/pci/intel_mid.c (revision 3719a04a80caf660f899a462cd8f3973bcfa676e)
1*a2c6c1c2SAndy Shevchenko // SPDX-License-Identifier: GPL-2.0
2*a2c6c1c2SAndy Shevchenko /*
3*a2c6c1c2SAndy Shevchenko  * Intel MID PCI support
4*a2c6c1c2SAndy Shevchenko  *   Copyright (c) 2008 Intel Corporation
5*a2c6c1c2SAndy Shevchenko  *     Jesse Barnes <jesse.barnes@intel.com>
6*a2c6c1c2SAndy Shevchenko  *
7*a2c6c1c2SAndy Shevchenko  * Moorestown has an interesting PCI implementation:
8*a2c6c1c2SAndy Shevchenko  *   - configuration space is memory mapped (as defined by MCFG)
9*a2c6c1c2SAndy Shevchenko  *   - Lincroft devices also have a real, type 1 configuration space
10*a2c6c1c2SAndy Shevchenko  *   - Early Lincroft silicon has a type 1 access bug that will cause
11*a2c6c1c2SAndy Shevchenko  *     a hang if non-existent devices are accessed
12*a2c6c1c2SAndy Shevchenko  *   - some devices have the "fixed BAR" capability, which means
13*a2c6c1c2SAndy Shevchenko  *     they can't be relocated or modified; check for that during
14*a2c6c1c2SAndy Shevchenko  *     BAR sizing
15*a2c6c1c2SAndy Shevchenko  *
16*a2c6c1c2SAndy Shevchenko  * So, we use the MCFG space for all reads and writes, but also send
17*a2c6c1c2SAndy Shevchenko  * Lincroft writes to type 1 space.  But only read/write if the device
18*a2c6c1c2SAndy Shevchenko  * actually exists, otherwise return all 1s for reads and bit bucket
19*a2c6c1c2SAndy Shevchenko  * the writes.
20*a2c6c1c2SAndy Shevchenko  */
21*a2c6c1c2SAndy Shevchenko 
22*a2c6c1c2SAndy Shevchenko #include <linux/sched.h>
23*a2c6c1c2SAndy Shevchenko #include <linux/pci.h>
24*a2c6c1c2SAndy Shevchenko #include <linux/ioport.h>
25*a2c6c1c2SAndy Shevchenko #include <linux/init.h>
26*a2c6c1c2SAndy Shevchenko #include <linux/dmi.h>
27*a2c6c1c2SAndy Shevchenko #include <linux/acpi.h>
28*a2c6c1c2SAndy Shevchenko #include <linux/io.h>
29*a2c6c1c2SAndy Shevchenko #include <linux/smp.h>
30*a2c6c1c2SAndy Shevchenko 
31*a2c6c1c2SAndy Shevchenko #include <asm/cpu_device_id.h>
32*a2c6c1c2SAndy Shevchenko #include <asm/segment.h>
33*a2c6c1c2SAndy Shevchenko #include <asm/pci_x86.h>
34*a2c6c1c2SAndy Shevchenko #include <asm/hw_irq.h>
35*a2c6c1c2SAndy Shevchenko #include <asm/io_apic.h>
36*a2c6c1c2SAndy Shevchenko #include <asm/intel-family.h>
37*a2c6c1c2SAndy Shevchenko #include <asm/intel-mid.h>
38*a2c6c1c2SAndy Shevchenko #include <asm/acpi.h>
39*a2c6c1c2SAndy Shevchenko 
40*a2c6c1c2SAndy Shevchenko #define PCIE_CAP_OFFSET	0x100
41*a2c6c1c2SAndy Shevchenko 
42*a2c6c1c2SAndy Shevchenko /* Quirks for the listed devices */
43*a2c6c1c2SAndy Shevchenko #define PCI_DEVICE_ID_INTEL_MRFLD_MMC	0x1190
44*a2c6c1c2SAndy Shevchenko #define PCI_DEVICE_ID_INTEL_MRFLD_HSU	0x1191
45*a2c6c1c2SAndy Shevchenko 
46*a2c6c1c2SAndy Shevchenko /* Fixed BAR fields */
47*a2c6c1c2SAndy Shevchenko #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00	/* Fixed BAR (TBD) */
48*a2c6c1c2SAndy Shevchenko #define PCI_FIXED_BAR_0_SIZE	0x04
49*a2c6c1c2SAndy Shevchenko #define PCI_FIXED_BAR_1_SIZE	0x08
50*a2c6c1c2SAndy Shevchenko #define PCI_FIXED_BAR_2_SIZE	0x0c
51*a2c6c1c2SAndy Shevchenko #define PCI_FIXED_BAR_3_SIZE	0x10
52*a2c6c1c2SAndy Shevchenko #define PCI_FIXED_BAR_4_SIZE	0x14
53*a2c6c1c2SAndy Shevchenko #define PCI_FIXED_BAR_5_SIZE	0x1c
54*a2c6c1c2SAndy Shevchenko 
55*a2c6c1c2SAndy Shevchenko static int pci_soc_mode;
56*a2c6c1c2SAndy Shevchenko 
57*a2c6c1c2SAndy Shevchenko /**
58*a2c6c1c2SAndy Shevchenko  * fixed_bar_cap - return the offset of the fixed BAR cap if found
59*a2c6c1c2SAndy Shevchenko  * @bus: PCI bus
60*a2c6c1c2SAndy Shevchenko  * @devfn: device in question
61*a2c6c1c2SAndy Shevchenko  *
62*a2c6c1c2SAndy Shevchenko  * Look for the fixed BAR cap on @bus and @devfn, returning its offset
63*a2c6c1c2SAndy Shevchenko  * if found or 0 otherwise.
64*a2c6c1c2SAndy Shevchenko  */
fixed_bar_cap(struct pci_bus * bus,unsigned int devfn)65*a2c6c1c2SAndy Shevchenko static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
66*a2c6c1c2SAndy Shevchenko {
67*a2c6c1c2SAndy Shevchenko 	int pos;
68*a2c6c1c2SAndy Shevchenko 	u32 pcie_cap = 0, cap_data;
69*a2c6c1c2SAndy Shevchenko 
70*a2c6c1c2SAndy Shevchenko 	pos = PCIE_CAP_OFFSET;
71*a2c6c1c2SAndy Shevchenko 
72*a2c6c1c2SAndy Shevchenko 	if (!raw_pci_ext_ops)
73*a2c6c1c2SAndy Shevchenko 		return 0;
74*a2c6c1c2SAndy Shevchenko 
75*a2c6c1c2SAndy Shevchenko 	while (pos) {
76*a2c6c1c2SAndy Shevchenko 		if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
77*a2c6c1c2SAndy Shevchenko 					  devfn, pos, 4, &pcie_cap))
78*a2c6c1c2SAndy Shevchenko 			return 0;
79*a2c6c1c2SAndy Shevchenko 
80*a2c6c1c2SAndy Shevchenko 		if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 ||
81*a2c6c1c2SAndy Shevchenko 			PCI_EXT_CAP_ID(pcie_cap) == 0xffff)
82*a2c6c1c2SAndy Shevchenko 			break;
83*a2c6c1c2SAndy Shevchenko 
84*a2c6c1c2SAndy Shevchenko 		if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
85*a2c6c1c2SAndy Shevchenko 			raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
86*a2c6c1c2SAndy Shevchenko 					      devfn, pos + 4, 4, &cap_data);
87*a2c6c1c2SAndy Shevchenko 			if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
88*a2c6c1c2SAndy Shevchenko 				return pos;
89*a2c6c1c2SAndy Shevchenko 		}
90*a2c6c1c2SAndy Shevchenko 
91*a2c6c1c2SAndy Shevchenko 		pos = PCI_EXT_CAP_NEXT(pcie_cap);
92*a2c6c1c2SAndy Shevchenko 	}
93*a2c6c1c2SAndy Shevchenko 
94*a2c6c1c2SAndy Shevchenko 	return 0;
95*a2c6c1c2SAndy Shevchenko }
96*a2c6c1c2SAndy Shevchenko 
pci_device_update_fixed(struct pci_bus * bus,unsigned int devfn,int reg,int len,u32 val,int offset)97*a2c6c1c2SAndy Shevchenko static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
98*a2c6c1c2SAndy Shevchenko 				   int reg, int len, u32 val, int offset)
99*a2c6c1c2SAndy Shevchenko {
100*a2c6c1c2SAndy Shevchenko 	u32 size;
101*a2c6c1c2SAndy Shevchenko 	unsigned int domain, busnum;
102*a2c6c1c2SAndy Shevchenko 	int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
103*a2c6c1c2SAndy Shevchenko 
104*a2c6c1c2SAndy Shevchenko 	domain = pci_domain_nr(bus);
105*a2c6c1c2SAndy Shevchenko 	busnum = bus->number;
106*a2c6c1c2SAndy Shevchenko 
107*a2c6c1c2SAndy Shevchenko 	if (val == ~0 && len == 4) {
108*a2c6c1c2SAndy Shevchenko 		unsigned long decode;
109*a2c6c1c2SAndy Shevchenko 
110*a2c6c1c2SAndy Shevchenko 		raw_pci_ext_ops->read(domain, busnum, devfn,
111*a2c6c1c2SAndy Shevchenko 			       offset + 8 + (bar * 4), 4, &size);
112*a2c6c1c2SAndy Shevchenko 
113*a2c6c1c2SAndy Shevchenko 		/* Turn the size into a decode pattern for the sizing code */
114*a2c6c1c2SAndy Shevchenko 		if (size) {
115*a2c6c1c2SAndy Shevchenko 			decode = size - 1;
116*a2c6c1c2SAndy Shevchenko 			decode |= decode >> 1;
117*a2c6c1c2SAndy Shevchenko 			decode |= decode >> 2;
118*a2c6c1c2SAndy Shevchenko 			decode |= decode >> 4;
119*a2c6c1c2SAndy Shevchenko 			decode |= decode >> 8;
120*a2c6c1c2SAndy Shevchenko 			decode |= decode >> 16;
121*a2c6c1c2SAndy Shevchenko 			decode++;
122*a2c6c1c2SAndy Shevchenko 			decode = ~(decode - 1);
123*a2c6c1c2SAndy Shevchenko 		} else {
124*a2c6c1c2SAndy Shevchenko 			decode = 0;
125*a2c6c1c2SAndy Shevchenko 		}
126*a2c6c1c2SAndy Shevchenko 
127*a2c6c1c2SAndy Shevchenko 		/*
128*a2c6c1c2SAndy Shevchenko 		 * If val is all ones, the core code is trying to size the reg,
129*a2c6c1c2SAndy Shevchenko 		 * so update the mmconfig space with the real size.
130*a2c6c1c2SAndy Shevchenko 		 *
131*a2c6c1c2SAndy Shevchenko 		 * Note: this assumes the fixed size we got is a power of two.
132*a2c6c1c2SAndy Shevchenko 		 */
133*a2c6c1c2SAndy Shevchenko 		return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
134*a2c6c1c2SAndy Shevchenko 				       decode);
135*a2c6c1c2SAndy Shevchenko 	}
136*a2c6c1c2SAndy Shevchenko 
137*a2c6c1c2SAndy Shevchenko 	/* This is some other kind of BAR write, so just do it. */
138*a2c6c1c2SAndy Shevchenko 	return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
139*a2c6c1c2SAndy Shevchenko }
140*a2c6c1c2SAndy Shevchenko 
141*a2c6c1c2SAndy Shevchenko /**
142*a2c6c1c2SAndy Shevchenko  * type1_access_ok - check whether to use type 1
143*a2c6c1c2SAndy Shevchenko  * @bus: bus number
144*a2c6c1c2SAndy Shevchenko  * @devfn: device & function in question
145*a2c6c1c2SAndy Shevchenko  * @reg: configuration register offset
146*a2c6c1c2SAndy Shevchenko  *
147*a2c6c1c2SAndy Shevchenko  * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
148*a2c6c1c2SAndy Shevchenko  * all, the we can go ahead with any reads & writes.  If it's on a Lincroft,
149*a2c6c1c2SAndy Shevchenko  * but doesn't exist, avoid the access altogether to keep the chip from
150*a2c6c1c2SAndy Shevchenko  * hanging.
151*a2c6c1c2SAndy Shevchenko  */
type1_access_ok(unsigned int bus,unsigned int devfn,int reg)152*a2c6c1c2SAndy Shevchenko static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
153*a2c6c1c2SAndy Shevchenko {
154*a2c6c1c2SAndy Shevchenko 	/*
155*a2c6c1c2SAndy Shevchenko 	 * This is a workaround for A0 LNC bug where PCI status register does
156*a2c6c1c2SAndy Shevchenko 	 * not have new CAP bit set. can not be written by SW either.
157*a2c6c1c2SAndy Shevchenko 	 *
158*a2c6c1c2SAndy Shevchenko 	 * PCI header type in real LNC indicates a single function device, this
159*a2c6c1c2SAndy Shevchenko 	 * will prevent probing other devices under the same function in PCI
160*a2c6c1c2SAndy Shevchenko 	 * shim. Therefore, use the header type in shim instead.
161*a2c6c1c2SAndy Shevchenko 	 */
162*a2c6c1c2SAndy Shevchenko 	if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
163*a2c6c1c2SAndy Shevchenko 		return false;
164*a2c6c1c2SAndy Shevchenko 	if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
165*a2c6c1c2SAndy Shevchenko 				|| devfn == PCI_DEVFN(0, 0)
166*a2c6c1c2SAndy Shevchenko 				|| devfn == PCI_DEVFN(3, 0)))
167*a2c6c1c2SAndy Shevchenko 		return true;
168*a2c6c1c2SAndy Shevchenko 	return false; /* Langwell on others */
169*a2c6c1c2SAndy Shevchenko }
170*a2c6c1c2SAndy Shevchenko 
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)171*a2c6c1c2SAndy Shevchenko static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
172*a2c6c1c2SAndy Shevchenko 		    int size, u32 *value)
173*a2c6c1c2SAndy Shevchenko {
174*a2c6c1c2SAndy Shevchenko 	if (type1_access_ok(bus->number, devfn, where))
175*a2c6c1c2SAndy Shevchenko 		return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
176*a2c6c1c2SAndy Shevchenko 					devfn, where, size, value);
177*a2c6c1c2SAndy Shevchenko 	return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
178*a2c6c1c2SAndy Shevchenko 			      devfn, where, size, value);
179*a2c6c1c2SAndy Shevchenko }
180*a2c6c1c2SAndy Shevchenko 
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)181*a2c6c1c2SAndy Shevchenko static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
182*a2c6c1c2SAndy Shevchenko 		     int size, u32 value)
183*a2c6c1c2SAndy Shevchenko {
184*a2c6c1c2SAndy Shevchenko 	int offset;
185*a2c6c1c2SAndy Shevchenko 
186*a2c6c1c2SAndy Shevchenko 	/*
187*a2c6c1c2SAndy Shevchenko 	 * On MRST, there is no PCI ROM BAR, this will cause a subsequent read
188*a2c6c1c2SAndy Shevchenko 	 * to ROM BAR return 0 then being ignored.
189*a2c6c1c2SAndy Shevchenko 	 */
190*a2c6c1c2SAndy Shevchenko 	if (where == PCI_ROM_ADDRESS)
191*a2c6c1c2SAndy Shevchenko 		return 0;
192*a2c6c1c2SAndy Shevchenko 
193*a2c6c1c2SAndy Shevchenko 	/*
194*a2c6c1c2SAndy Shevchenko 	 * Devices with fixed BARs need special handling:
195*a2c6c1c2SAndy Shevchenko 	 *   - BAR sizing code will save, write ~0, read size, restore
196*a2c6c1c2SAndy Shevchenko 	 *   - so writes to fixed BARs need special handling
197*a2c6c1c2SAndy Shevchenko 	 *   - other writes to fixed BAR devices should go through mmconfig
198*a2c6c1c2SAndy Shevchenko 	 */
199*a2c6c1c2SAndy Shevchenko 	offset = fixed_bar_cap(bus, devfn);
200*a2c6c1c2SAndy Shevchenko 	if (offset &&
201*a2c6c1c2SAndy Shevchenko 	    (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
202*a2c6c1c2SAndy Shevchenko 		return pci_device_update_fixed(bus, devfn, where, size, value,
203*a2c6c1c2SAndy Shevchenko 					       offset);
204*a2c6c1c2SAndy Shevchenko 	}
205*a2c6c1c2SAndy Shevchenko 
206*a2c6c1c2SAndy Shevchenko 	/*
207*a2c6c1c2SAndy Shevchenko 	 * On Moorestown update both real & mmconfig space
208*a2c6c1c2SAndy Shevchenko 	 * Note: early Lincroft silicon can't handle type 1 accesses to
209*a2c6c1c2SAndy Shevchenko 	 *       non-existent devices, so just eat the write in that case.
210*a2c6c1c2SAndy Shevchenko 	 */
211*a2c6c1c2SAndy Shevchenko 	if (type1_access_ok(bus->number, devfn, where))
212*a2c6c1c2SAndy Shevchenko 		return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
213*a2c6c1c2SAndy Shevchenko 					      devfn, where, size, value);
214*a2c6c1c2SAndy Shevchenko 	return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
215*a2c6c1c2SAndy Shevchenko 			       where, size, value);
216*a2c6c1c2SAndy Shevchenko }
217*a2c6c1c2SAndy Shevchenko 
218*a2c6c1c2SAndy Shevchenko static const struct x86_cpu_id intel_mid_cpu_ids[] = {
219*a2c6c1c2SAndy Shevchenko 	X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, NULL),
220*a2c6c1c2SAndy Shevchenko 	{}
221*a2c6c1c2SAndy Shevchenko };
222*a2c6c1c2SAndy Shevchenko 
intel_mid_pci_irq_enable(struct pci_dev * dev)223*a2c6c1c2SAndy Shevchenko static int intel_mid_pci_irq_enable(struct pci_dev *dev)
224*a2c6c1c2SAndy Shevchenko {
225*a2c6c1c2SAndy Shevchenko 	const struct x86_cpu_id *id;
226*a2c6c1c2SAndy Shevchenko 	struct irq_alloc_info info;
227*a2c6c1c2SAndy Shevchenko 	bool polarity_low;
228*a2c6c1c2SAndy Shevchenko 	u16 model = 0;
229*a2c6c1c2SAndy Shevchenko 	int ret;
230*a2c6c1c2SAndy Shevchenko 	u8 gsi;
231*a2c6c1c2SAndy Shevchenko 
232*a2c6c1c2SAndy Shevchenko 	if (dev->irq_managed && dev->irq > 0)
233*a2c6c1c2SAndy Shevchenko 		return 0;
234*a2c6c1c2SAndy Shevchenko 
235*a2c6c1c2SAndy Shevchenko 	ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
236*a2c6c1c2SAndy Shevchenko 	if (ret) {
237*a2c6c1c2SAndy Shevchenko 		dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
238*a2c6c1c2SAndy Shevchenko 		return pcibios_err_to_errno(ret);
239*a2c6c1c2SAndy Shevchenko 	}
240*a2c6c1c2SAndy Shevchenko 
241*a2c6c1c2SAndy Shevchenko 	id = x86_match_cpu(intel_mid_cpu_ids);
242*a2c6c1c2SAndy Shevchenko 	if (id)
243*a2c6c1c2SAndy Shevchenko 		model = id->model;
244*a2c6c1c2SAndy Shevchenko 
245*a2c6c1c2SAndy Shevchenko 	switch (model) {
246*a2c6c1c2SAndy Shevchenko 	case VFM_MODEL(INTEL_ATOM_SILVERMONT_MID):
247*a2c6c1c2SAndy Shevchenko 		polarity_low = false;
248*a2c6c1c2SAndy Shevchenko 
249*a2c6c1c2SAndy Shevchenko 		/* Special treatment for IRQ0 */
250*a2c6c1c2SAndy Shevchenko 		if (gsi == 0) {
251*a2c6c1c2SAndy Shevchenko 			/*
252*a2c6c1c2SAndy Shevchenko 			 * Skip HS UART common registers device since it has
253*a2c6c1c2SAndy Shevchenko 			 * IRQ0 assigned and not used by the kernel.
254*a2c6c1c2SAndy Shevchenko 			 */
255*a2c6c1c2SAndy Shevchenko 			if (dev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU)
256*a2c6c1c2SAndy Shevchenko 				return -EBUSY;
257*a2c6c1c2SAndy Shevchenko 			/*
258*a2c6c1c2SAndy Shevchenko 			 * TNG has IRQ0 assigned to eMMC controller. But there
259*a2c6c1c2SAndy Shevchenko 			 * are also other devices with bogus PCI configuration
260*a2c6c1c2SAndy Shevchenko 			 * that have IRQ0 assigned. This check ensures that
261*a2c6c1c2SAndy Shevchenko 			 * eMMC gets it. The rest of devices still could be
262*a2c6c1c2SAndy Shevchenko 			 * enabled without interrupt line being allocated.
263*a2c6c1c2SAndy Shevchenko 			 */
264*a2c6c1c2SAndy Shevchenko 			if (dev->device != PCI_DEVICE_ID_INTEL_MRFLD_MMC)
265*a2c6c1c2SAndy Shevchenko 				return 0;
266*a2c6c1c2SAndy Shevchenko 		}
267*a2c6c1c2SAndy Shevchenko 		break;
268*a2c6c1c2SAndy Shevchenko 	default:
269*a2c6c1c2SAndy Shevchenko 		polarity_low = true;
270*a2c6c1c2SAndy Shevchenko 		break;
271*a2c6c1c2SAndy Shevchenko 	}
272*a2c6c1c2SAndy Shevchenko 
273*a2c6c1c2SAndy Shevchenko 	ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity_low);
274*a2c6c1c2SAndy Shevchenko 
275*a2c6c1c2SAndy Shevchenko 	/*
276*a2c6c1c2SAndy Shevchenko 	 * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
277*a2c6c1c2SAndy Shevchenko 	 * IOAPIC RTE entries, so we just enable RTE for the device.
278*a2c6c1c2SAndy Shevchenko 	 */
279*a2c6c1c2SAndy Shevchenko 	ret = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
280*a2c6c1c2SAndy Shevchenko 	if (ret < 0)
281*a2c6c1c2SAndy Shevchenko 		return ret;
282*a2c6c1c2SAndy Shevchenko 
283*a2c6c1c2SAndy Shevchenko 	dev->irq = ret;
284*a2c6c1c2SAndy Shevchenko 	dev->irq_managed = 1;
285*a2c6c1c2SAndy Shevchenko 
286*a2c6c1c2SAndy Shevchenko 	return 0;
287*a2c6c1c2SAndy Shevchenko }
288*a2c6c1c2SAndy Shevchenko 
intel_mid_pci_irq_disable(struct pci_dev * dev)289*a2c6c1c2SAndy Shevchenko static void intel_mid_pci_irq_disable(struct pci_dev *dev)
290*a2c6c1c2SAndy Shevchenko {
291*a2c6c1c2SAndy Shevchenko 	if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
292*a2c6c1c2SAndy Shevchenko 	    dev->irq > 0) {
293*a2c6c1c2SAndy Shevchenko 		mp_unmap_irq(dev->irq);
294*a2c6c1c2SAndy Shevchenko 		dev->irq_managed = 0;
295*a2c6c1c2SAndy Shevchenko 	}
296*a2c6c1c2SAndy Shevchenko }
297*a2c6c1c2SAndy Shevchenko 
298*a2c6c1c2SAndy Shevchenko static const struct pci_ops intel_mid_pci_ops __initconst = {
299*a2c6c1c2SAndy Shevchenko 	.read = pci_read,
300*a2c6c1c2SAndy Shevchenko 	.write = pci_write,
301*a2c6c1c2SAndy Shevchenko };
302*a2c6c1c2SAndy Shevchenko 
303*a2c6c1c2SAndy Shevchenko /**
304*a2c6c1c2SAndy Shevchenko  * intel_mid_pci_init - installs intel_mid_pci_ops
305*a2c6c1c2SAndy Shevchenko  *
306*a2c6c1c2SAndy Shevchenko  * Moorestown has an interesting PCI implementation (see above).
307*a2c6c1c2SAndy Shevchenko  * Called when the early platform detection installs it.
308*a2c6c1c2SAndy Shevchenko  */
intel_mid_pci_init(void)309*a2c6c1c2SAndy Shevchenko int __init intel_mid_pci_init(void)
310*a2c6c1c2SAndy Shevchenko {
311*a2c6c1c2SAndy Shevchenko 	pr_info("Intel MID platform detected, using MID PCI ops\n");
312*a2c6c1c2SAndy Shevchenko 	pci_mmcfg_late_init();
313*a2c6c1c2SAndy Shevchenko 	pcibios_enable_irq = intel_mid_pci_irq_enable;
314*a2c6c1c2SAndy Shevchenko 	pcibios_disable_irq = intel_mid_pci_irq_disable;
315*a2c6c1c2SAndy Shevchenko 	pci_root_ops = intel_mid_pci_ops;
316*a2c6c1c2SAndy Shevchenko 	pci_soc_mode = 1;
317*a2c6c1c2SAndy Shevchenko 	/* Continue with standard init */
318*a2c6c1c2SAndy Shevchenko 	acpi_noirq_set();
319*a2c6c1c2SAndy Shevchenko 	return 1;
320*a2c6c1c2SAndy Shevchenko }
321*a2c6c1c2SAndy Shevchenko 
322*a2c6c1c2SAndy Shevchenko /*
323*a2c6c1c2SAndy Shevchenko  * Langwell devices are not true PCI devices; they are not subject to 10 ms
324*a2c6c1c2SAndy Shevchenko  * d3 to d0 delay required by PCI spec.
325*a2c6c1c2SAndy Shevchenko  */
pci_d3delay_fixup(struct pci_dev * dev)326*a2c6c1c2SAndy Shevchenko static void pci_d3delay_fixup(struct pci_dev *dev)
327*a2c6c1c2SAndy Shevchenko {
328*a2c6c1c2SAndy Shevchenko 	/*
329*a2c6c1c2SAndy Shevchenko 	 * PCI fixups are effectively decided compile time. If we have a dual
330*a2c6c1c2SAndy Shevchenko 	 * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices.
331*a2c6c1c2SAndy Shevchenko 	 */
332*a2c6c1c2SAndy Shevchenko 	if (!pci_soc_mode)
333*a2c6c1c2SAndy Shevchenko 		return;
334*a2c6c1c2SAndy Shevchenko 	/*
335*a2c6c1c2SAndy Shevchenko 	 * True PCI devices in Lincroft should allow type 1 access, the rest
336*a2c6c1c2SAndy Shevchenko 	 * are Langwell fake PCI devices.
337*a2c6c1c2SAndy Shevchenko 	 */
338*a2c6c1c2SAndy Shevchenko 	if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
339*a2c6c1c2SAndy Shevchenko 		return;
340*a2c6c1c2SAndy Shevchenko 	dev->d3hot_delay = 0;
341*a2c6c1c2SAndy Shevchenko }
342*a2c6c1c2SAndy Shevchenko DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
343*a2c6c1c2SAndy Shevchenko 
mid_power_off_one_device(struct pci_dev * dev)344*a2c6c1c2SAndy Shevchenko static void mid_power_off_one_device(struct pci_dev *dev)
345*a2c6c1c2SAndy Shevchenko {
346*a2c6c1c2SAndy Shevchenko 	u16 pmcsr;
347*a2c6c1c2SAndy Shevchenko 
348*a2c6c1c2SAndy Shevchenko 	/*
349*a2c6c1c2SAndy Shevchenko 	 * Update current state first, otherwise PCI core enforces PCI_D0 in
350*a2c6c1c2SAndy Shevchenko 	 * pci_set_power_state() for devices which status was PCI_UNKNOWN.
351*a2c6c1c2SAndy Shevchenko 	 */
352*a2c6c1c2SAndy Shevchenko 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
353*a2c6c1c2SAndy Shevchenko 	dev->current_state = (pci_power_t __force)(pmcsr & PCI_PM_CTRL_STATE_MASK);
354*a2c6c1c2SAndy Shevchenko 
355*a2c6c1c2SAndy Shevchenko 	pci_set_power_state(dev, PCI_D3hot);
356*a2c6c1c2SAndy Shevchenko }
357*a2c6c1c2SAndy Shevchenko 
mid_power_off_devices(struct pci_dev * dev)358*a2c6c1c2SAndy Shevchenko static void mid_power_off_devices(struct pci_dev *dev)
359*a2c6c1c2SAndy Shevchenko {
360*a2c6c1c2SAndy Shevchenko 	int id;
361*a2c6c1c2SAndy Shevchenko 
362*a2c6c1c2SAndy Shevchenko 	if (!pci_soc_mode)
363*a2c6c1c2SAndy Shevchenko 		return;
364*a2c6c1c2SAndy Shevchenko 
365*a2c6c1c2SAndy Shevchenko 	id = intel_mid_pwr_get_lss_id(dev);
366*a2c6c1c2SAndy Shevchenko 	if (id < 0)
367*a2c6c1c2SAndy Shevchenko 		return;
368*a2c6c1c2SAndy Shevchenko 
369*a2c6c1c2SAndy Shevchenko 	/*
370*a2c6c1c2SAndy Shevchenko 	 * This sets only PMCSR bits. The actual power off will happen in
371*a2c6c1c2SAndy Shevchenko 	 * arch/x86/platform/intel-mid/pwr.c.
372*a2c6c1c2SAndy Shevchenko 	 */
373*a2c6c1c2SAndy Shevchenko 	mid_power_off_one_device(dev);
374*a2c6c1c2SAndy Shevchenko }
375*a2c6c1c2SAndy Shevchenko 
376*a2c6c1c2SAndy Shevchenko DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, mid_power_off_devices);
377*a2c6c1c2SAndy Shevchenko 
378*a2c6c1c2SAndy Shevchenko /*
379*a2c6c1c2SAndy Shevchenko  * Langwell devices reside at fixed offsets, don't try to move them.
380*a2c6c1c2SAndy Shevchenko  */
pci_fixed_bar_fixup(struct pci_dev * dev)381*a2c6c1c2SAndy Shevchenko static void pci_fixed_bar_fixup(struct pci_dev *dev)
382*a2c6c1c2SAndy Shevchenko {
383*a2c6c1c2SAndy Shevchenko 	unsigned long offset;
384*a2c6c1c2SAndy Shevchenko 	u32 size;
385*a2c6c1c2SAndy Shevchenko 	int i;
386*a2c6c1c2SAndy Shevchenko 
387*a2c6c1c2SAndy Shevchenko 	if (!pci_soc_mode)
388*a2c6c1c2SAndy Shevchenko 		return;
389*a2c6c1c2SAndy Shevchenko 
390*a2c6c1c2SAndy Shevchenko 	/* Must have extended configuration space */
391*a2c6c1c2SAndy Shevchenko 	if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
392*a2c6c1c2SAndy Shevchenko 		return;
393*a2c6c1c2SAndy Shevchenko 
394*a2c6c1c2SAndy Shevchenko 	/* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
395*a2c6c1c2SAndy Shevchenko 	offset = fixed_bar_cap(dev->bus, dev->devfn);
396*a2c6c1c2SAndy Shevchenko 	if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
397*a2c6c1c2SAndy Shevchenko 	    PCI_DEVFN(2, 2) == dev->devfn)
398*a2c6c1c2SAndy Shevchenko 		return;
399*a2c6c1c2SAndy Shevchenko 
400*a2c6c1c2SAndy Shevchenko 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
401*a2c6c1c2SAndy Shevchenko 		pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
402*a2c6c1c2SAndy Shevchenko 		dev->resource[i].end = dev->resource[i].start + size - 1;
403*a2c6c1c2SAndy Shevchenko 		dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
404*a2c6c1c2SAndy Shevchenko 	}
405*a2c6c1c2SAndy Shevchenko }
406*a2c6c1c2SAndy Shevchenko DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
407