xref: /linux/arch/alpha/kernel/core_irongate.c (revision c4c14c3bd177ea769fee938674f73a8ec0cdd47a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/arch/alpha/kernel/core_irongate.c
4  *
5  * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
6  *
7  *	Copyright (C) 1999 Alpha Processor, Inc.,
8  *		(David Daniel, Stig Telfer, Soohoon Lee)
9  *
10  * Code common to all IRONGATE core logic chips.
11  */
12 
13 #define __EXTERN_INLINE inline
14 #include <asm/io.h>
15 #include <asm/core_irongate.h>
16 #undef __EXTERN_INLINE
17 
18 #include <linux/types.h>
19 #include <linux/pci.h>
20 #include <linux/sched.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 
26 #include <asm/ptrace.h>
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
29 
30 #include "proto.h"
31 #include "pci_impl.h"
32 
33 /*
34  * BIOS32-style PCI interface:
35  */
36 
37 #define DEBUG_CONFIG 0
38 
39 #if DEBUG_CONFIG
40 # define DBG_CFG(args)	printk args
41 #else
42 # define DBG_CFG(args)
43 #endif
44 
45 igcsr32 *IronECC;
46 
47 /*
48  * Given a bus, device, and function number, compute resulting
49  * configuration space address accordingly.  It is therefore not safe
50  * to have concurrent invocations to configuration space access
51  * routines, but there really shouldn't be any need for this.
52  *
53  *	addr[31:24]		reserved
54  *	addr[23:16]		bus number (8 bits = 128 possible buses)
55  *	addr[15:11]		Device number (5 bits)
56  *	addr[10: 8]		function number
57  *	addr[ 7: 2]		register number
58  *
59  * For IRONGATE:
60  *    if (bus = addr[23:16]) == 0
61  *    then
62  *	  type 0 config cycle:
63  *	      addr_on_pci[31:11] = id selection for device = addr[15:11]
64  *	      addr_on_pci[10: 2] = addr[10: 2] ???
65  *	      addr_on_pci[ 1: 0] = 00
66  *    else
67  *	  type 1 config cycle (pass on with no decoding):
68  *	      addr_on_pci[31:24] = 0
69  *	      addr_on_pci[23: 2] = addr[23: 2]
70  *	      addr_on_pci[ 1: 0] = 01
71  *    fi
72  *
73  * Notes:
74  *	The function number selects which function of a multi-function device
75  *	(e.g., SCSI and Ethernet).
76  *
77  *	The register selects a DWORD (32 bit) register offset.	Hence it
78  *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
79  *	bits.
80  */
81 
82 static int
83 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
84 	     unsigned long *pci_addr, unsigned char *type1)
85 {
86 	unsigned long addr;
87 	u8 bus = pbus->number;
88 
89 	DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
90 		 "pci_addr=0x%p, type1=0x%p)\n",
91 		 bus, device_fn, where, pci_addr, type1));
92 
93 	*type1 = (bus != 0);
94 
95 	addr = (bus << 16) | (device_fn << 8) | where;
96 	addr |= IRONGATE_CONF;
97 
98 	*pci_addr = addr;
99 	DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
100 	return 0;
101 }
102 
103 static int
104 irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where,
105 		     int size, u32 *value)
106 {
107 	unsigned long addr;
108 	unsigned char type1;
109 
110 	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
111 		return PCIBIOS_DEVICE_NOT_FOUND;
112 
113 	switch (size) {
114 	case 1:
115 		*value = __kernel_ldbu(*(vucp)addr);
116 		break;
117 	case 2:
118 		*value = __kernel_ldwu(*(vusp)addr);
119 		break;
120 	case 4:
121 		*value = *(vuip)addr;
122 		break;
123 	}
124 
125 	return PCIBIOS_SUCCESSFUL;
126 }
127 
128 static int
129 irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where,
130 		      int size, u32 value)
131 {
132 	unsigned long addr;
133 	unsigned char type1;
134 
135 	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
136 		return PCIBIOS_DEVICE_NOT_FOUND;
137 
138 	switch (size) {
139 	case 1:
140 		__kernel_stb(value, *(vucp)addr);
141 		mb();
142 		__kernel_ldbu(*(vucp)addr);
143 		break;
144 	case 2:
145 		__kernel_stw(value, *(vusp)addr);
146 		mb();
147 		__kernel_ldwu(*(vusp)addr);
148 		break;
149 	case 4:
150 		*(vuip)addr = value;
151 		mb();
152 		*(vuip)addr;
153 		break;
154 	}
155 
156 	return PCIBIOS_SUCCESSFUL;
157 }
158 
159 struct pci_ops irongate_pci_ops =
160 {
161 	.read =		irongate_read_config,
162 	.write =	irongate_write_config,
163 };
164 
165 int
166 irongate_pci_clr_err(void)
167 {
168 	unsigned int nmi_ctl=0;
169 	unsigned int IRONGATE_jd;
170 
171 again:
172 	IRONGATE_jd = IRONGATE0->stat_cmd;
173 	printk("Iron stat_cmd %x\n", IRONGATE_jd);
174 	IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */
175 	mb();
176 	IRONGATE_jd = IRONGATE0->stat_cmd;  /* re-read to force write */
177 
178 	IRONGATE_jd = *IronECC;
179 	printk("Iron ECC %x\n", IRONGATE_jd);
180 	*IronECC = IRONGATE_jd; /* write again clears error bits */
181 	mb();
182 	IRONGATE_jd = *IronECC;  /* re-read to force write */
183 
184 	/* Clear ALI NMI */
185         nmi_ctl = inb(0x61);
186         nmi_ctl |= 0x0c;
187         outb(nmi_ctl, 0x61);
188         nmi_ctl &= ~0x0c;
189         outb(nmi_ctl, 0x61);
190 
191 	IRONGATE_jd = *IronECC;
192 	if (IRONGATE_jd & 0x300) goto again;
193 
194 	return 0;
195 }
196 
197 #define IRONGATE_3GB 0xc0000000UL
198 
199 /* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some
200    memory for PCI. At this point we just reserve memory above 3Gb. Most
201    of this memory will be freed after PCI setup is done. */
202 static void __init
203 albacore_init_arch(void)
204 {
205 	unsigned long memtop = max_low_pfn << PAGE_SHIFT;
206 	unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL;
207 	struct percpu_struct *cpu;
208 	int pal_rev, pal_var;
209 
210 	cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
211 	pal_rev = cpu->pal_revision & 0xffff;
212 	pal_var = (cpu->pal_revision >> 16) & 0xff;
213 
214 	/* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up
215 	   the CPU incorrectly (leave speculative stores enabled),
216 	   which causes memory corruption under certain conditions.
217 	   Issue a warning for such consoles. */
218 	if (alpha_using_srm &&
219 	    (pal_rev < 0x13e ||	(pal_rev == 0x13e && pal_var < 2)))
220 		printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 "
221 				    "or later\n");
222 
223 	if (pci_mem > IRONGATE_3GB)
224 		pci_mem = IRONGATE_3GB;
225 	IRONGATE0->pci_mem = pci_mem;
226 	alpha_mv.min_mem_address = pci_mem;
227 	if (memtop > pci_mem) {
228 #ifdef CONFIG_BLK_DEV_INITRD
229 		extern unsigned long initrd_start, initrd_end;
230 		extern void *move_initrd(unsigned long);
231 
232 		/* Move the initrd out of the way. */
233 		if (initrd_end && __pa(initrd_end) > pci_mem) {
234 			unsigned long size;
235 
236 			size = initrd_end - initrd_start;
237 			free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
238 					  PAGE_ALIGN(size));
239 			if (!move_initrd(pci_mem))
240 				printk("irongate_init_arch: initrd too big "
241 				       "(%ldK)\ndisabling initrd\n",
242 				       size / 1024);
243 		}
244 #endif
245 		memblock_reserve(pci_mem, memtop - pci_mem);
246 		printk("irongate_init_arch: temporarily reserving "
247 			"region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
248 	}
249 }
250 
251 static void __init
252 irongate_setup_agp(void)
253 {
254 	/* Disable the GART window. AGPGART doesn't work due to yet
255 	   unresolved memory coherency issues... */
256 	IRONGATE0->agpva = IRONGATE0->agpva & ~0xf;
257 	alpha_agpgart_size = 0;
258 }
259 
260 void __init
261 irongate_init_arch(void)
262 {
263 	struct pci_controller *hose;
264 	int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006;	/* Albacore? */
265 
266 	IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
267 
268 	irongate_pci_clr_err();
269 
270 	if (amd761)
271 		albacore_init_arch();
272 
273 	irongate_setup_agp();
274 
275 	/*
276 	 * Create our single hose.
277 	 */
278 
279 	pci_isa_hose = hose = alloc_pci_controller();
280 	hose->io_space = &ioport_resource;
281 	hose->mem_space = &iomem_resource;
282 	hose->index = 0;
283 
284 	/* This is for userland consumption.  For some reason, the 40-bit
285 	   PIO bias that we use in the kernel through KSEG didn't work for
286 	   the page table based user mappings.  So make sure we get the
287 	   43-bit PIO bias.  */
288 	hose->sparse_mem_base = 0;
289 	hose->sparse_io_base = 0;
290 	hose->dense_mem_base
291 	  = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
292 	hose->dense_io_base
293 	  = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
294 
295 	hose->sg_isa = hose->sg_pci = NULL;
296 	__direct_map_base = 0;
297 	__direct_map_size = 0xffffffff;
298 }
299 
300 /*
301  * IO map and AGP support
302  */
303 #include <linux/vmalloc.h>
304 #include <linux/agp_backend.h>
305 #include <linux/agpgart.h>
306 #include <linux/export.h>
307 #include <asm/pgalloc.h>
308 
309 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
310 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
311 
312 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
313 #define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
314 
315 void __iomem *
316 irongate_ioremap(unsigned long addr, unsigned long size)
317 {
318 	struct vm_struct *area;
319 	unsigned long vaddr;
320 	unsigned long baddr, last;
321 	u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
322 	unsigned long gart_bus_addr;
323 
324 	if (!alpha_agpgart_size)
325 		return (void __iomem *)(addr + IRONGATE_MEM);
326 
327 	gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
328 			PCI_BASE_ADDRESS_MEM_MASK;
329 
330 	/*
331 	 * Check for within the AGP aperture...
332 	 */
333 	do {
334 		/*
335 		 * Check the AGP area
336 		 */
337 		if (addr >= gart_bus_addr && addr + size - 1 <
338 		    gart_bus_addr + alpha_agpgart_size)
339 			break;
340 
341 		/*
342 		 * Not found - assume legacy ioremap
343 		 */
344 		return (void __iomem *)(addr + IRONGATE_MEM);
345 	} while(0);
346 
347 	mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
348 			PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
349 
350 	gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */
351 
352 	/*
353 	 * Adjust the limits (mappings must be page aligned)
354 	 */
355 	if (addr & ~PAGE_MASK) {
356 		printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
357 		       addr);
358 		return (void __iomem *)(addr + IRONGATE_MEM);
359 	}
360 	last = addr + size - 1;
361 	size = PAGE_ALIGN(last) - addr;
362 
363 #if 0
364 	printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
365 	printk("irongate_ioremap:  gart_bus_addr  0x%lx\n", gart_bus_addr);
366 	printk("irongate_ioremap:  gart_aper_size 0x%lx\n", gart_aper_size);
367 	printk("irongate_ioremap:  mmio_regs      %p\n", mmio_regs);
368 	printk("irongate_ioremap:  gatt_pages     %p\n", gatt_pages);
369 
370 	for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
371 	{
372 		cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
373 		pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
374 		printk("irongate_ioremap:  cur_gatt %p pte 0x%x\n",
375 		       cur_gatt, pte);
376 	}
377 #endif
378 
379 	/*
380 	 * Map it
381 	 */
382 	area = get_vm_area(size, VM_IOREMAP);
383 	if (!area) return NULL;
384 
385 	for(baddr = addr, vaddr = (unsigned long)area->addr;
386 	    baddr <= last;
387 	    baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
388 	{
389 		cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
390 		pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
391 
392 		if (__alpha_remap_area_pages(vaddr,
393 					     pte, PAGE_SIZE, 0)) {
394 			printk("AGP ioremap: FAILED to map...\n");
395 			vfree(area->addr);
396 			return NULL;
397 		}
398 	}
399 
400 	flush_tlb_all();
401 
402 	vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
403 #if 0
404 	printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
405 	       addr, size, vaddr);
406 #endif
407 	return (void __iomem *)vaddr;
408 }
409 EXPORT_SYMBOL(irongate_ioremap);
410 
411 void
412 irongate_iounmap(volatile void __iomem *xaddr)
413 {
414 	unsigned long addr = (unsigned long) xaddr;
415 	if (((long)addr >> 41) == -2)
416 		return;	/* kseg map, nothing to do */
417 	if (addr)
418 		return vfree((void *)(PAGE_MASK & addr));
419 }
420 EXPORT_SYMBOL(irongate_iounmap);
421