xref: /linux/arch/sh/drivers/pci/pci-sh7780.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Low-Level PCI Support for the SH7780
4  *
5  *  Copyright (C) 2005 - 2010  Paul Mundt
6  */
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/pci.h>
11 #include <linux/interrupt.h>
12 #include <linux/timer.h>
13 #include <linux/irq.h>
14 #include <linux/errno.h>
15 #include <linux/delay.h>
16 #include <linux/log2.h>
17 #include "pci-sh4.h"
18 #include <asm/mmu.h>
19 #include <linux/sizes.h>
20 
21 #if defined(CONFIG_CPU_BIG_ENDIAN)
22 # define PCICR_ENDIANNESS SH4_PCICR_BSWP
23 #else
24 # define PCICR_ENDIANNESS 0
25 #endif
26 
27 
28 static struct resource sh7785_pci_resources[] = {
29 	{
30 		.name	= "PCI IO",
31 		.start	= 0x1000,
32 		.end	= SZ_4M - 1,
33 		.flags	= IORESOURCE_IO,
34 	}, {
35 		.name	= "PCI MEM 0",
36 		.start	= 0xfd000000,
37 		.end	= 0xfd000000 + SZ_16M - 1,
38 		.flags	= IORESOURCE_MEM,
39 	}, {
40 		.name	= "PCI MEM 1",
41 		.start	= 0x10000000,
42 		.end	= 0x10000000 + SZ_64M - 1,
43 		.flags	= IORESOURCE_MEM,
44 	}, {
45 		/*
46 		 * 32-bit only resources must be last.
47 		 */
48 		.name	= "PCI MEM 2",
49 		.start	= 0xc0000000,
50 		.end	= 0xc0000000 + SZ_512M - 1,
51 		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
52 	},
53 };
54 
55 static struct pci_channel sh7780_pci_controller = {
56 	.pci_ops	= &sh4_pci_ops,
57 	.resources	= sh7785_pci_resources,
58 	.nr_resources	= ARRAY_SIZE(sh7785_pci_resources),
59 	.io_offset	= 0,
60 	.mem_offset	= 0,
61 	.io_map_base	= 0xfe200000,
62 	.serr_irq	= evt2irq(0xa00),
63 	.err_irq	= evt2irq(0xaa0),
64 };
65 
66 struct pci_errors {
67 	unsigned int	mask;
68 	const char	*str;
69 } pci_arbiter_errors[] = {
70 	{ SH4_PCIAINT_MBKN,	"master broken" },
71 	{ SH4_PCIAINT_TBTO,	"target bus time out" },
72 	{ SH4_PCIAINT_MBTO,	"master bus time out" },
73 	{ SH4_PCIAINT_TABT,	"target abort" },
74 	{ SH4_PCIAINT_MABT,	"master abort" },
75 	{ SH4_PCIAINT_RDPE,	"read data parity error" },
76 	{ SH4_PCIAINT_WDPE,	"write data parity error" },
77 }, pci_interrupt_errors[] = {
78 	{ SH4_PCIINT_MLCK,	"master lock error" },
79 	{ SH4_PCIINT_TABT,	"target-target abort" },
80 	{ SH4_PCIINT_TRET,	"target retry time out" },
81 	{ SH4_PCIINT_MFDE,	"master function disable error" },
82 	{ SH4_PCIINT_PRTY,	"address parity error" },
83 	{ SH4_PCIINT_SERR,	"SERR" },
84 	{ SH4_PCIINT_TWDP,	"data parity error for target write" },
85 	{ SH4_PCIINT_TRDP,	"PERR detected for target read" },
86 	{ SH4_PCIINT_MTABT,	"target abort for master" },
87 	{ SH4_PCIINT_MMABT,	"master abort for master" },
88 	{ SH4_PCIINT_MWPD,	"master write data parity error" },
89 	{ SH4_PCIINT_MRPD,	"master read data parity error" },
90 };
91 
sh7780_pci_err_irq(int irq,void * dev_id)92 static irqreturn_t sh7780_pci_err_irq(int irq, void *dev_id)
93 {
94 	struct pci_channel *hose = dev_id;
95 	unsigned long addr;
96 	unsigned int status;
97 	unsigned int cmd;
98 	int i;
99 
100 	addr = __raw_readl(hose->reg_base + SH4_PCIALR);
101 
102 	/*
103 	 * Handle status errors.
104 	 */
105 	status = __raw_readw(hose->reg_base + PCI_STATUS);
106 	if (status & (PCI_STATUS_PARITY |
107 		      PCI_STATUS_DETECTED_PARITY |
108 		      PCI_STATUS_SIG_TARGET_ABORT |
109 		      PCI_STATUS_REC_TARGET_ABORT |
110 		      PCI_STATUS_REC_MASTER_ABORT)) {
111 		cmd = pcibios_handle_status_errors(addr, status, hose);
112 		if (likely(cmd))
113 			__raw_writew(cmd, hose->reg_base + PCI_STATUS);
114 	}
115 
116 	/*
117 	 * Handle arbiter errors.
118 	 */
119 	status = __raw_readl(hose->reg_base + SH4_PCIAINT);
120 	for (i = cmd = 0; i < ARRAY_SIZE(pci_arbiter_errors); i++) {
121 		if (status & pci_arbiter_errors[i].mask) {
122 			printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
123 			       pci_arbiter_errors[i].str, addr);
124 			cmd |= pci_arbiter_errors[i].mask;
125 		}
126 	}
127 	__raw_writel(cmd, hose->reg_base + SH4_PCIAINT);
128 
129 	/*
130 	 * Handle the remaining PCI errors.
131 	 */
132 	status = __raw_readl(hose->reg_base + SH4_PCIINT);
133 	for (i = cmd = 0; i < ARRAY_SIZE(pci_interrupt_errors); i++) {
134 		if (status & pci_interrupt_errors[i].mask) {
135 			printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
136 			       pci_interrupt_errors[i].str, addr);
137 			cmd |= pci_interrupt_errors[i].mask;
138 		}
139 	}
140 	__raw_writel(cmd, hose->reg_base + SH4_PCIINT);
141 
142 	return IRQ_HANDLED;
143 }
144 
sh7780_pci_serr_irq(int irq,void * dev_id)145 static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id)
146 {
147 	struct pci_channel *hose = dev_id;
148 
149 	printk(KERN_DEBUG "PCI: system error received: ");
150 	pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1);
151 	pr_cont("\n");
152 
153 	/* Deassert SERR */
154 	__raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM);
155 
156 	/* Back off the IRQ for awhile */
157 	disable_irq_nosync(irq);
158 	hose->serr_timer.expires = jiffies + HZ;
159 	add_timer(&hose->serr_timer);
160 
161 	return IRQ_HANDLED;
162 }
163 
sh7780_pci_setup_irqs(struct pci_channel * hose)164 static int __init sh7780_pci_setup_irqs(struct pci_channel *hose)
165 {
166 	int ret;
167 
168 	/* Clear out PCI arbiter IRQs */
169 	__raw_writel(0, hose->reg_base + SH4_PCIAINT);
170 
171 	/* Clear all error conditions */
172 	__raw_writew(PCI_STATUS_DETECTED_PARITY  | \
173 		     PCI_STATUS_SIG_SYSTEM_ERROR | \
174 		     PCI_STATUS_REC_MASTER_ABORT | \
175 		     PCI_STATUS_REC_TARGET_ABORT | \
176 		     PCI_STATUS_SIG_TARGET_ABORT | \
177 		     PCI_STATUS_PARITY, hose->reg_base + PCI_STATUS);
178 
179 	ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, 0,
180 			  "PCI SERR interrupt", hose);
181 	if (unlikely(ret)) {
182 		pr_err("PCI: Failed hooking SERR IRQ\n");
183 		return ret;
184 	}
185 
186 	/*
187 	 * The PCI ERR IRQ needs to be IRQF_SHARED since all of the power
188 	 * down IRQ vectors are routed through the ERR IRQ vector. We
189 	 * only request_irq() once as there is only a single masking
190 	 * source for multiple events.
191 	 */
192 	ret = request_irq(hose->err_irq, sh7780_pci_err_irq, IRQF_SHARED,
193 			  "PCI ERR interrupt", hose);
194 	if (unlikely(ret)) {
195 		free_irq(hose->serr_irq, hose);
196 		return ret;
197 	}
198 
199 	/* Unmask all of the arbiter IRQs. */
200 	__raw_writel(SH4_PCIAINT_MBKN | SH4_PCIAINT_TBTO | SH4_PCIAINT_MBTO | \
201 		     SH4_PCIAINT_TABT | SH4_PCIAINT_MABT | SH4_PCIAINT_RDPE | \
202 		     SH4_PCIAINT_WDPE, hose->reg_base + SH4_PCIAINTM);
203 
204 	/* Unmask all of the PCI IRQs */
205 	__raw_writel(SH4_PCIINTM_TTADIM  | SH4_PCIINTM_TMTOIM  | \
206 		     SH4_PCIINTM_MDEIM   | SH4_PCIINTM_APEDIM  | \
207 		     SH4_PCIINTM_SDIM    | SH4_PCIINTM_DPEITWM | \
208 		     SH4_PCIINTM_PEDITRM | SH4_PCIINTM_TADIMM  | \
209 		     SH4_PCIINTM_MADIMM  | SH4_PCIINTM_MWPDIM  | \
210 		     SH4_PCIINTM_MRDPEIM, hose->reg_base + SH4_PCIINTM);
211 
212 	return ret;
213 }
214 
sh7780_pci_teardown_irqs(struct pci_channel * hose)215 static inline void __init sh7780_pci_teardown_irqs(struct pci_channel *hose)
216 {
217 	free_irq(hose->err_irq, hose);
218 	free_irq(hose->serr_irq, hose);
219 }
220 
sh7780_pci66_init(struct pci_channel * hose)221 static void __init sh7780_pci66_init(struct pci_channel *hose)
222 {
223 	unsigned int tmp;
224 
225 	if (!pci_is_66mhz_capable(hose, 0, 0))
226 		return;
227 
228 	/* Enable register access */
229 	tmp = __raw_readl(hose->reg_base + SH4_PCICR);
230 	tmp |= SH4_PCICR_PREFIX;
231 	__raw_writel(tmp, hose->reg_base + SH4_PCICR);
232 
233 	/* Enable 66MHz operation */
234 	tmp = __raw_readw(hose->reg_base + PCI_STATUS);
235 	tmp |= PCI_STATUS_66MHZ;
236 	__raw_writew(tmp, hose->reg_base + PCI_STATUS);
237 
238 	/* Done */
239 	tmp = __raw_readl(hose->reg_base + SH4_PCICR);
240 	tmp |= SH4_PCICR_PREFIX | SH4_PCICR_CFIN;
241 	__raw_writel(tmp, hose->reg_base + SH4_PCICR);
242 }
243 
sh7780_pci_init(void)244 static int __init sh7780_pci_init(void)
245 {
246 	struct pci_channel *chan = &sh7780_pci_controller;
247 	phys_addr_t memphys;
248 	size_t memsize;
249 	unsigned int id;
250 	const char *type;
251 	int ret, i;
252 
253 	pr_notice("PCI: Starting initialization.\n");
254 
255 	chan->reg_base = 0xfe040000;
256 
257 	/* Enable CPU access to the PCIC registers. */
258 	__raw_writel(PCIECR_ENBL, PCIECR);
259 
260 	/* Reset */
261 	__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST | PCICR_ENDIANNESS,
262 		     chan->reg_base + SH4_PCICR);
263 
264 	/*
265 	 * Wait for it to come back up. The spec says to allow for up to
266 	 * 1 second after toggling the reset pin, but in practice 100ms
267 	 * is more than enough.
268 	 */
269 	mdelay(100);
270 
271 	id = __raw_readw(chan->reg_base + PCI_VENDOR_ID);
272 	if (id != PCI_VENDOR_ID_RENESAS) {
273 		pr_err("PCI: Unknown vendor ID 0x%04x.\n", id);
274 		return -ENODEV;
275 	}
276 
277 	id = __raw_readw(chan->reg_base + PCI_DEVICE_ID);
278 	type = (id == PCI_DEVICE_ID_RENESAS_SH7763) ? "SH7763" :
279 	       (id == PCI_DEVICE_ID_RENESAS_SH7780) ? "SH7780" :
280 	       (id == PCI_DEVICE_ID_RENESAS_SH7781) ? "SH7781" :
281 	       (id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" :
282 					  NULL;
283 	if (unlikely(!type)) {
284 		pr_err("PCI: Found an unsupported Renesas host controller, device id 0x%04x.\n",
285 		       id);
286 		return -EINVAL;
287 	}
288 
289 	pr_notice("PCI: Found a Renesas %s host controller, revision %d.\n",
290 		  type, __raw_readb(chan->reg_base + PCI_REVISION_ID));
291 
292 	/*
293 	 * Now throw it in to register initialization mode and
294 	 * start the real work.
295 	 */
296 	__raw_writel(SH4_PCICR_PREFIX | PCICR_ENDIANNESS,
297 		     chan->reg_base + SH4_PCICR);
298 
299 	memphys = __pa(memory_start);
300 	memsize = roundup_pow_of_two(memory_end - memory_start);
301 
302 	/*
303 	 * If there's more than 512MB of memory, we need to roll over to
304 	 * LAR1/LSR1.
305 	 */
306 	if (memsize > SZ_512M) {
307 		__raw_writel(memphys + SZ_512M, chan->reg_base + SH4_PCILAR1);
308 		__raw_writel((((memsize - SZ_512M) - SZ_1M) & 0x1ff00000) | 1,
309 			     chan->reg_base + SH4_PCILSR1);
310 		memsize = SZ_512M;
311 	} else {
312 		/*
313 		 * Otherwise just zero it out and disable it.
314 		 */
315 		__raw_writel(0, chan->reg_base + SH4_PCILAR1);
316 		__raw_writel(0, chan->reg_base + SH4_PCILSR1);
317 	}
318 
319 	/*
320 	 * LAR0/LSR0 covers up to the first 512MB, which is enough to
321 	 * cover all of lowmem on most platforms.
322 	 */
323 	__raw_writel(memphys, chan->reg_base + SH4_PCILAR0);
324 	__raw_writel(((memsize - SZ_1M) & 0x1ff00000) | 1,
325 		     chan->reg_base + SH4_PCILSR0);
326 
327 	/*
328 	 * Hook up the ERR and SERR IRQs.
329 	 */
330 	ret = sh7780_pci_setup_irqs(chan);
331 	if (unlikely(ret))
332 		return ret;
333 
334 	/*
335 	 * Disable the cache snoop controller for non-coherent DMA.
336 	 */
337 	__raw_writel(0, chan->reg_base + SH7780_PCICSCR0);
338 	__raw_writel(0, chan->reg_base + SH7780_PCICSAR0);
339 	__raw_writel(0, chan->reg_base + SH7780_PCICSCR1);
340 	__raw_writel(0, chan->reg_base + SH7780_PCICSAR1);
341 
342 	/*
343 	 * Setup the memory BARs
344 	 */
345 	for (i = 1; i < chan->nr_resources; i++) {
346 		struct resource *res = chan->resources + i;
347 		resource_size_t size;
348 
349 		if (unlikely(res->flags & IORESOURCE_IO))
350 			continue;
351 
352 		/*
353 		 * Make sure we're in the right physical addressing mode
354 		 * for dealing with the resource.
355 		 */
356 		if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) {
357 			chan->nr_resources--;
358 			continue;
359 		}
360 
361 		size = resource_size(res);
362 
363 		/*
364 		 * The MBMR mask is calculated in units of 256kB, which
365 		 * keeps things pretty simple.
366 		 */
367 		__raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
368 			     chan->reg_base + SH7780_PCIMBMR(i - 1));
369 		__raw_writel(res->start, chan->reg_base + SH7780_PCIMBR(i - 1));
370 	}
371 
372 	/*
373 	 * And I/O.
374 	 */
375 	__raw_writel(0, chan->reg_base + PCI_BASE_ADDRESS_0);
376 	__raw_writel(0, chan->reg_base + SH7780_PCIIOBR);
377 	__raw_writel(0, chan->reg_base + SH7780_PCIIOBMR);
378 
379 	__raw_writew(PCI_COMMAND_SERR   | PCI_COMMAND_WAIT   | \
380 		     PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | \
381 		     PCI_COMMAND_MEMORY, chan->reg_base + PCI_COMMAND);
382 
383 	/*
384 	 * Initialization mode complete, release the control register and
385 	 * enable round robin mode to stop device overruns/starvation.
386 	 */
387 	__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO |
388 		     PCICR_ENDIANNESS,
389 		     chan->reg_base + SH4_PCICR);
390 
391 	ret = register_pci_controller(chan);
392 	if (unlikely(ret))
393 		goto err;
394 
395 	sh7780_pci66_init(chan);
396 
397 	pr_notice("PCI: Running at %dMHz.\n",
398 		  (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ)
399 		  ? 66 : 33);
400 
401 	return 0;
402 
403 err:
404 	sh7780_pci_teardown_irqs(chan);
405 	return ret;
406 }
407 arch_initcall(sh7780_pci_init);
408