xref: /linux/arch/sh/drivers/pci/pci-sh7780.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Low-Level PCI Support for the SH7780
4  *
5  *  Copyright (C) 2005 - 2010  Paul Mundt
6  */
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/pci.h>
11 #include <linux/interrupt.h>
12 #include <linux/timer.h>
13 #include <linux/irq.h>
14 #include <linux/errno.h>
15 #include <linux/delay.h>
16 #include <linux/log2.h>
17 #include "pci-sh4.h"
18 #include <asm/mmu.h>
19 #include <linux/sizes.h>
20 
21 #if defined(CONFIG_CPU_BIG_ENDIAN)
22 # define PCICR_ENDIANNESS SH4_PCICR_BSWP
23 #else
24 # define PCICR_ENDIANNESS 0
25 #endif
26 
27 
28 static struct resource sh7785_pci_resources[] = {
29 	{
30 		.name	= "PCI IO",
31 		.start	= 0x1000,
32 		.end	= SZ_4M - 1,
33 		.flags	= IORESOURCE_IO,
34 	}, {
35 		.name	= "PCI MEM 0",
36 		.start	= 0xfd000000,
37 		.end	= 0xfd000000 + SZ_16M - 1,
38 		.flags	= IORESOURCE_MEM,
39 	}, {
40 		.name	= "PCI MEM 1",
41 		.start	= 0x10000000,
42 		.end	= 0x10000000 + SZ_64M - 1,
43 		.flags	= IORESOURCE_MEM,
44 	}, {
45 		/*
46 		 * 32-bit only resources must be last.
47 		 */
48 		.name	= "PCI MEM 2",
49 		.start	= 0xc0000000,
50 		.end	= 0xc0000000 + SZ_512M - 1,
51 		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
52 	},
53 };
54 
55 static struct pci_channel sh7780_pci_controller = {
56 	.pci_ops	= &sh4_pci_ops,
57 	.resources	= sh7785_pci_resources,
58 	.nr_resources	= ARRAY_SIZE(sh7785_pci_resources),
59 	.io_offset	= 0,
60 	.mem_offset	= 0,
61 	.io_map_base	= 0xfe200000,
62 	.serr_irq	= evt2irq(0xa00),
63 	.err_irq	= evt2irq(0xaa0),
64 };
65 
66 struct pci_errors {
67 	unsigned int	mask;
68 	const char	*str;
69 } pci_arbiter_errors[] = {
70 	{ SH4_PCIAINT_MBKN,	"master broken" },
71 	{ SH4_PCIAINT_TBTO,	"target bus time out" },
72 	{ SH4_PCIAINT_MBTO,	"master bus time out" },
73 	{ SH4_PCIAINT_TABT,	"target abort" },
74 	{ SH4_PCIAINT_MABT,	"master abort" },
75 	{ SH4_PCIAINT_RDPE,	"read data parity error" },
76 	{ SH4_PCIAINT_WDPE,	"write data parity error" },
77 }, pci_interrupt_errors[] = {
78 	{ SH4_PCIINT_MLCK,	"master lock error" },
79 	{ SH4_PCIINT_TABT,	"target-target abort" },
80 	{ SH4_PCIINT_TRET,	"target retry time out" },
81 	{ SH4_PCIINT_MFDE,	"master function disable error" },
82 	{ SH4_PCIINT_PRTY,	"address parity error" },
83 	{ SH4_PCIINT_SERR,	"SERR" },
84 	{ SH4_PCIINT_TWDP,	"data parity error for target write" },
85 	{ SH4_PCIINT_TRDP,	"PERR detected for target read" },
86 	{ SH4_PCIINT_MTABT,	"target abort for master" },
87 	{ SH4_PCIINT_MMABT,	"master abort for master" },
88 	{ SH4_PCIINT_MWPD,	"master write data parity error" },
89 	{ SH4_PCIINT_MRPD,	"master read data parity error" },
90 };
91 
92 static irqreturn_t sh7780_pci_err_irq(int irq, void *dev_id)
93 {
94 	struct pci_channel *hose = dev_id;
95 	unsigned long addr;
96 	unsigned int status;
97 	unsigned int cmd;
98 	int i;
99 
100 	addr = __raw_readl(hose->reg_base + SH4_PCIALR);
101 
102 	/*
103 	 * Handle status errors.
104 	 */
105 	status = __raw_readw(hose->reg_base + PCI_STATUS);
106 	if (status & (PCI_STATUS_PARITY |
107 		      PCI_STATUS_DETECTED_PARITY |
108 		      PCI_STATUS_SIG_TARGET_ABORT |
109 		      PCI_STATUS_REC_TARGET_ABORT |
110 		      PCI_STATUS_REC_MASTER_ABORT)) {
111 		cmd = pcibios_handle_status_errors(addr, status, hose);
112 		if (likely(cmd))
113 			__raw_writew(cmd, hose->reg_base + PCI_STATUS);
114 	}
115 
116 	/*
117 	 * Handle arbiter errors.
118 	 */
119 	status = __raw_readl(hose->reg_base + SH4_PCIAINT);
120 	for (i = cmd = 0; i < ARRAY_SIZE(pci_arbiter_errors); i++) {
121 		if (status & pci_arbiter_errors[i].mask) {
122 			printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
123 			       pci_arbiter_errors[i].str, addr);
124 			cmd |= pci_arbiter_errors[i].mask;
125 		}
126 	}
127 	__raw_writel(cmd, hose->reg_base + SH4_PCIAINT);
128 
129 	/*
130 	 * Handle the remaining PCI errors.
131 	 */
132 	status = __raw_readl(hose->reg_base + SH4_PCIINT);
133 	for (i = cmd = 0; i < ARRAY_SIZE(pci_interrupt_errors); i++) {
134 		if (status & pci_interrupt_errors[i].mask) {
135 			printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
136 			       pci_interrupt_errors[i].str, addr);
137 			cmd |= pci_interrupt_errors[i].mask;
138 		}
139 	}
140 	__raw_writel(cmd, hose->reg_base + SH4_PCIINT);
141 
142 	return IRQ_HANDLED;
143 }
144 
145 static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id)
146 {
147 	struct pci_channel *hose = dev_id;
148 
149 	printk(KERN_DEBUG "PCI: system error received: ");
150 	pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1);
151 	printk("\n");
152 
153 	/* Deassert SERR */
154 	__raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM);
155 
156 	/* Back off the IRQ for awhile */
157 	disable_irq_nosync(irq);
158 	hose->serr_timer.expires = jiffies + HZ;
159 	add_timer(&hose->serr_timer);
160 
161 	return IRQ_HANDLED;
162 }
163 
164 static int __init sh7780_pci_setup_irqs(struct pci_channel *hose)
165 {
166 	int ret;
167 
168 	/* Clear out PCI arbiter IRQs */
169 	__raw_writel(0, hose->reg_base + SH4_PCIAINT);
170 
171 	/* Clear all error conditions */
172 	__raw_writew(PCI_STATUS_DETECTED_PARITY  | \
173 		     PCI_STATUS_SIG_SYSTEM_ERROR | \
174 		     PCI_STATUS_REC_MASTER_ABORT | \
175 		     PCI_STATUS_REC_TARGET_ABORT | \
176 		     PCI_STATUS_SIG_TARGET_ABORT | \
177 		     PCI_STATUS_PARITY, hose->reg_base + PCI_STATUS);
178 
179 	ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, 0,
180 			  "PCI SERR interrupt", hose);
181 	if (unlikely(ret)) {
182 		printk(KERN_ERR "PCI: Failed hooking SERR IRQ\n");
183 		return ret;
184 	}
185 
186 	/*
187 	 * The PCI ERR IRQ needs to be IRQF_SHARED since all of the power
188 	 * down IRQ vectors are routed through the ERR IRQ vector. We
189 	 * only request_irq() once as there is only a single masking
190 	 * source for multiple events.
191 	 */
192 	ret = request_irq(hose->err_irq, sh7780_pci_err_irq, IRQF_SHARED,
193 			  "PCI ERR interrupt", hose);
194 	if (unlikely(ret)) {
195 		free_irq(hose->serr_irq, hose);
196 		return ret;
197 	}
198 
199 	/* Unmask all of the arbiter IRQs. */
200 	__raw_writel(SH4_PCIAINT_MBKN | SH4_PCIAINT_TBTO | SH4_PCIAINT_MBTO | \
201 		     SH4_PCIAINT_TABT | SH4_PCIAINT_MABT | SH4_PCIAINT_RDPE | \
202 		     SH4_PCIAINT_WDPE, hose->reg_base + SH4_PCIAINTM);
203 
204 	/* Unmask all of the PCI IRQs */
205 	__raw_writel(SH4_PCIINTM_TTADIM  | SH4_PCIINTM_TMTOIM  | \
206 		     SH4_PCIINTM_MDEIM   | SH4_PCIINTM_APEDIM  | \
207 		     SH4_PCIINTM_SDIM    | SH4_PCIINTM_DPEITWM | \
208 		     SH4_PCIINTM_PEDITRM | SH4_PCIINTM_TADIMM  | \
209 		     SH4_PCIINTM_MADIMM  | SH4_PCIINTM_MWPDIM  | \
210 		     SH4_PCIINTM_MRDPEIM, hose->reg_base + SH4_PCIINTM);
211 
212 	return ret;
213 }
214 
215 static inline void __init sh7780_pci_teardown_irqs(struct pci_channel *hose)
216 {
217 	free_irq(hose->err_irq, hose);
218 	free_irq(hose->serr_irq, hose);
219 }
220 
221 static void __init sh7780_pci66_init(struct pci_channel *hose)
222 {
223 	unsigned int tmp;
224 
225 	if (!pci_is_66mhz_capable(hose, 0, 0))
226 		return;
227 
228 	/* Enable register access */
229 	tmp = __raw_readl(hose->reg_base + SH4_PCICR);
230 	tmp |= SH4_PCICR_PREFIX;
231 	__raw_writel(tmp, hose->reg_base + SH4_PCICR);
232 
233 	/* Enable 66MHz operation */
234 	tmp = __raw_readw(hose->reg_base + PCI_STATUS);
235 	tmp |= PCI_STATUS_66MHZ;
236 	__raw_writew(tmp, hose->reg_base + PCI_STATUS);
237 
238 	/* Done */
239 	tmp = __raw_readl(hose->reg_base + SH4_PCICR);
240 	tmp |= SH4_PCICR_PREFIX | SH4_PCICR_CFIN;
241 	__raw_writel(tmp, hose->reg_base + SH4_PCICR);
242 }
243 
244 static int __init sh7780_pci_init(void)
245 {
246 	struct pci_channel *chan = &sh7780_pci_controller;
247 	phys_addr_t memphys;
248 	size_t memsize;
249 	unsigned int id;
250 	const char *type;
251 	int ret, i;
252 
253 	printk(KERN_NOTICE "PCI: Starting initialization.\n");
254 
255 	chan->reg_base = 0xfe040000;
256 
257 	/* Enable CPU access to the PCIC registers. */
258 	__raw_writel(PCIECR_ENBL, PCIECR);
259 
260 	/* Reset */
261 	__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST | PCICR_ENDIANNESS,
262 		     chan->reg_base + SH4_PCICR);
263 
264 	/*
265 	 * Wait for it to come back up. The spec says to allow for up to
266 	 * 1 second after toggling the reset pin, but in practice 100ms
267 	 * is more than enough.
268 	 */
269 	mdelay(100);
270 
271 	id = __raw_readw(chan->reg_base + PCI_VENDOR_ID);
272 	if (id != PCI_VENDOR_ID_RENESAS) {
273 		printk(KERN_ERR "PCI: Unknown vendor ID 0x%04x.\n", id);
274 		return -ENODEV;
275 	}
276 
277 	id = __raw_readw(chan->reg_base + PCI_DEVICE_ID);
278 	type = (id == PCI_DEVICE_ID_RENESAS_SH7763) ? "SH7763" :
279 	       (id == PCI_DEVICE_ID_RENESAS_SH7780) ? "SH7780" :
280 	       (id == PCI_DEVICE_ID_RENESAS_SH7781) ? "SH7781" :
281 	       (id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" :
282 					  NULL;
283 	if (unlikely(!type)) {
284 		printk(KERN_ERR "PCI: Found an unsupported Renesas host "
285 		       "controller, device id 0x%04x.\n", id);
286 		return -EINVAL;
287 	}
288 
289 	printk(KERN_NOTICE "PCI: Found a Renesas %s host "
290 	       "controller, revision %d.\n", type,
291 	       __raw_readb(chan->reg_base + PCI_REVISION_ID));
292 
293 	/*
294 	 * Now throw it in to register initialization mode and
295 	 * start the real work.
296 	 */
297 	__raw_writel(SH4_PCICR_PREFIX | PCICR_ENDIANNESS,
298 		     chan->reg_base + SH4_PCICR);
299 
300 	memphys = __pa(memory_start);
301 	memsize = roundup_pow_of_two(memory_end - memory_start);
302 
303 	/*
304 	 * If there's more than 512MB of memory, we need to roll over to
305 	 * LAR1/LSR1.
306 	 */
307 	if (memsize > SZ_512M) {
308 		__raw_writel(memphys + SZ_512M, chan->reg_base + SH4_PCILAR1);
309 		__raw_writel((((memsize - SZ_512M) - SZ_1M) & 0x1ff00000) | 1,
310 			     chan->reg_base + SH4_PCILSR1);
311 		memsize = SZ_512M;
312 	} else {
313 		/*
314 		 * Otherwise just zero it out and disable it.
315 		 */
316 		__raw_writel(0, chan->reg_base + SH4_PCILAR1);
317 		__raw_writel(0, chan->reg_base + SH4_PCILSR1);
318 	}
319 
320 	/*
321 	 * LAR0/LSR0 covers up to the first 512MB, which is enough to
322 	 * cover all of lowmem on most platforms.
323 	 */
324 	__raw_writel(memphys, chan->reg_base + SH4_PCILAR0);
325 	__raw_writel(((memsize - SZ_1M) & 0x1ff00000) | 1,
326 		     chan->reg_base + SH4_PCILSR0);
327 
328 	/*
329 	 * Hook up the ERR and SERR IRQs.
330 	 */
331 	ret = sh7780_pci_setup_irqs(chan);
332 	if (unlikely(ret))
333 		return ret;
334 
335 	/*
336 	 * Disable the cache snoop controller for non-coherent DMA.
337 	 */
338 	__raw_writel(0, chan->reg_base + SH7780_PCICSCR0);
339 	__raw_writel(0, chan->reg_base + SH7780_PCICSAR0);
340 	__raw_writel(0, chan->reg_base + SH7780_PCICSCR1);
341 	__raw_writel(0, chan->reg_base + SH7780_PCICSAR1);
342 
343 	/*
344 	 * Setup the memory BARs
345 	 */
346 	for (i = 1; i < chan->nr_resources; i++) {
347 		struct resource *res = chan->resources + i;
348 		resource_size_t size;
349 
350 		if (unlikely(res->flags & IORESOURCE_IO))
351 			continue;
352 
353 		/*
354 		 * Make sure we're in the right physical addressing mode
355 		 * for dealing with the resource.
356 		 */
357 		if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) {
358 			chan->nr_resources--;
359 			continue;
360 		}
361 
362 		size = resource_size(res);
363 
364 		/*
365 		 * The MBMR mask is calculated in units of 256kB, which
366 		 * keeps things pretty simple.
367 		 */
368 		__raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
369 			     chan->reg_base + SH7780_PCIMBMR(i - 1));
370 		__raw_writel(res->start, chan->reg_base + SH7780_PCIMBR(i - 1));
371 	}
372 
373 	/*
374 	 * And I/O.
375 	 */
376 	__raw_writel(0, chan->reg_base + PCI_BASE_ADDRESS_0);
377 	__raw_writel(0, chan->reg_base + SH7780_PCIIOBR);
378 	__raw_writel(0, chan->reg_base + SH7780_PCIIOBMR);
379 
380 	__raw_writew(PCI_COMMAND_SERR   | PCI_COMMAND_WAIT   | \
381 		     PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | \
382 		     PCI_COMMAND_MEMORY, chan->reg_base + PCI_COMMAND);
383 
384 	/*
385 	 * Initialization mode complete, release the control register and
386 	 * enable round robin mode to stop device overruns/starvation.
387 	 */
388 	__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO |
389 		     PCICR_ENDIANNESS,
390 		     chan->reg_base + SH4_PCICR);
391 
392 	ret = register_pci_controller(chan);
393 	if (unlikely(ret))
394 		goto err;
395 
396 	sh7780_pci66_init(chan);
397 
398 	printk(KERN_NOTICE "PCI: Running at %dMHz.\n",
399 	       (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ) ?
400 	       66 : 33);
401 
402 	return 0;
403 
404 err:
405 	sh7780_pci_teardown_irqs(chan);
406 	return ret;
407 }
408 arch_initcall(sh7780_pci_init);
409