xref: /linux/arch/parisc/kernel/inventory.c (revision ec63e2a4897075e427c121d863bd89c44578094f)
1 /*
2  * inventory.c
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  *
9  * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
10  * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
11  *
12  * These are the routines to discover what hardware exists in this box.
13  * This task is complicated by there being 3 different ways of
14  * performing an inventory, depending largely on the age of the box.
15  * The recommended way to do this is to check to see whether the machine
16  * is a `Snake' first, then try System Map, then try PAT.  We try System
17  * Map before checking for a Snake -- this probably doesn't cause any
18  * problems, but...
19  */
20 
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <asm/hardware.h>
27 #include <asm/io.h>
28 #include <asm/mmzone.h>
29 #include <asm/pdc.h>
30 #include <asm/pdcpat.h>
31 #include <asm/processor.h>
32 #include <asm/page.h>
33 #include <asm/parisc-device.h>
34 
35 /*
36 ** Debug options
37 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
38 */
39 #undef DEBUG_PAT
40 
41 int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
42 
43 /* cell number and location (PAT firmware only) */
44 unsigned long parisc_cell_num __read_mostly;
45 unsigned long parisc_cell_loc __read_mostly;
46 unsigned long parisc_pat_pdc_cap __read_mostly;
47 
48 
49 void __init setup_pdc(void)
50 {
51 	long status;
52 	unsigned int bus_id;
53 	struct pdc_system_map_mod_info module_result;
54 	struct pdc_module_path module_path;
55 	struct pdc_model model;
56 #ifdef CONFIG_64BIT
57 	struct pdc_pat_cell_num cell_info;
58 #endif
59 
60 	/* Determine the pdc "type" used on this machine */
61 
62 	printk(KERN_INFO "Determining PDC firmware type: ");
63 
64 	status = pdc_system_map_find_mods(&module_result, &module_path, 0);
65 	if (status == PDC_OK) {
66 		pdc_type = PDC_TYPE_SYSTEM_MAP;
67 		pr_cont("System Map.\n");
68 		return;
69 	}
70 
71 	/*
72 	 * If the machine doesn't support PDC_SYSTEM_MAP then either it
73 	 * is a pdc pat box, or it is an older box. All 64 bit capable
74 	 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
75 	 */
76 
77 	/*
78 	 * TODO: We should test for 64 bit capability and give a
79 	 * clearer message.
80 	 */
81 
82 #ifdef CONFIG_64BIT
83 	status = pdc_pat_cell_get_number(&cell_info);
84 	if (status == PDC_OK) {
85 		unsigned long legacy_rev, pat_rev;
86 		pdc_type = PDC_TYPE_PAT;
87 		pr_cont("64 bit PAT.\n");
88 		parisc_cell_num = cell_info.cell_num;
89 		parisc_cell_loc = cell_info.cell_loc;
90 		pr_info("PAT: Running on cell %lu and location %lu.\n",
91 			parisc_cell_num, parisc_cell_loc);
92 		status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
93 			&pat_rev, &parisc_pat_pdc_cap);
94 		pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
95 			legacy_rev, pat_rev, parisc_pat_pdc_cap,
96 			parisc_pat_pdc_cap
97 			 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
98 			parisc_pat_pdc_cap
99 			 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ   ? 1:0);
100 		return;
101 	}
102 #endif
103 
104 	/* Check the CPU's bus ID.  There's probably a better test.  */
105 
106 	status = pdc_model_info(&model);
107 
108 	bus_id = (model.hversion >> (4 + 7)) & 0x1f;
109 
110 	switch (bus_id) {
111 	case 0x4:		/* 720, 730, 750, 735, 755 */
112 	case 0x6:		/* 705, 710 */
113 	case 0x7:		/* 715, 725 */
114 	case 0x8:		/* 745, 747, 742 */
115 	case 0xA:		/* 712 and similar */
116 	case 0xC:		/* 715/64, at least */
117 
118 		pdc_type = PDC_TYPE_SNAKE;
119 		pr_cont("Snake.\n");
120 		return;
121 
122 	default:		/* Everything else */
123 
124 		pr_cont("Unsupported.\n");
125 		panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
126 	}
127 }
128 
129 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
130 
131 static void __init
132 set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
133 	       unsigned long pages4k)
134 {
135 	/* Rather than aligning and potentially throwing away
136 	 * memory, we'll assume that any ranges are already
137 	 * nicely aligned with any reasonable page size, and
138 	 * panic if they are not (it's more likely that the
139 	 * pdc info is bad in this case).
140 	 */
141 
142 	if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
143 	    || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
144 
145 		panic("Memory range doesn't align with page size!\n");
146 	}
147 
148 	pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
149 	pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
150 }
151 
152 static void __init pagezero_memconfig(void)
153 {
154 	unsigned long npages;
155 
156 	/* Use the 32 bit information from page zero to create a single
157 	 * entry in the pmem_ranges[] table.
158 	 *
159 	 * We currently don't support machines with contiguous memory
160 	 * >= 4 Gb, who report that memory using 64 bit only fields
161 	 * on page zero. It's not worth doing until it can be tested,
162 	 * and it is not clear we can support those machines for other
163 	 * reasons.
164 	 *
165 	 * If that support is done in the future, this is where it
166 	 * should be done.
167 	 */
168 
169 	npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
170 	set_pmem_entry(pmem_ranges,0UL,npages);
171 	npmem_ranges = 1;
172 }
173 
174 #ifdef CONFIG_64BIT
175 
176 /* All of the PDC PAT specific code is 64-bit only */
177 
178 /*
179 **  The module object is filled via PDC_PAT_CELL[Return Cell Module].
180 **  If a module is found, register module will get the IODC bytes via
181 **  pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
182 **
183 **  The IO view can be used by PDC_PAT_CELL[Return Cell Module]
184 **  only for SBAs and LBAs.  This view will cause an invalid
185 **  argument error for all other cell module types.
186 **
187 */
188 
189 static int __init
190 pat_query_module(ulong pcell_loc, ulong mod_index)
191 {
192 	pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
193 	unsigned long bytecnt;
194 	unsigned long temp;	/* 64-bit scratch value */
195 	long status;		/* PDC return value status */
196 	struct parisc_device *dev;
197 
198 	pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
199 	if (!pa_pdc_cell)
200 		panic("couldn't allocate memory for PDC_PAT_CELL!");
201 
202 	/* return cell module (PA or Processor view) */
203 	status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
204 				     PA_VIEW, pa_pdc_cell);
205 
206 	if (status != PDC_OK) {
207 		/* no more cell modules or error */
208 		kfree(pa_pdc_cell);
209 		return status;
210 	}
211 
212 	temp = pa_pdc_cell->cba;
213 	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
214 	if (!dev) {
215 		kfree(pa_pdc_cell);
216 		return PDC_OK;
217 	}
218 
219 	/* alloc_pa_dev sets dev->hpa */
220 
221 	/*
222 	** save parameters in the parisc_device
223 	** (The idea being the device driver will call pdc_pat_cell_module()
224 	** and store the results in its own data structure.)
225 	*/
226 	dev->pcell_loc = pcell_loc;
227 	dev->mod_index = mod_index;
228 
229 	/* save generic info returned from the call */
230 	/* REVISIT: who is the consumer of this? not sure yet... */
231 	dev->mod_info = pa_pdc_cell->mod_info;	/* pass to PAT_GET_ENTITY() */
232 	dev->pmod_loc = pa_pdc_cell->mod_location;
233 	dev->mod0 = pa_pdc_cell->mod[0];
234 
235 	register_parisc_device(dev);	/* advertise device */
236 
237 #ifdef DEBUG_PAT
238 	/* dump what we see so far... */
239 	switch (PAT_GET_ENTITY(dev->mod_info)) {
240 		pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
241 		unsigned long i;
242 
243 	case PAT_ENTITY_PROC:
244 		printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
245 			pa_pdc_cell->mod[0]);
246 		break;
247 
248 	case PAT_ENTITY_MEM:
249 		printk(KERN_DEBUG
250 			"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
251 			pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
252 			pa_pdc_cell->mod[2]);
253 		break;
254 	case PAT_ENTITY_CA:
255 		printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
256 		break;
257 
258 	case PAT_ENTITY_PBC:
259 		printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
260 		goto print_ranges;
261 
262 	case PAT_ENTITY_SBA:
263 		printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
264 		goto print_ranges;
265 
266 	case PAT_ENTITY_LBA:
267 		printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
268 
269  print_ranges:
270 		pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
271 				    IO_VIEW, &io_pdc_cell);
272 		printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
273 		for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
274 			printk(KERN_DEBUG
275 				"  PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
276 				i, pa_pdc_cell->mod[2 + i * 3],	/* type */
277 				pa_pdc_cell->mod[3 + i * 3],	/* start */
278 				pa_pdc_cell->mod[4 + i * 3]);	/* finish (ie end) */
279 			printk(KERN_DEBUG
280 				"  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
281 				i, io_pdc_cell.mod[2 + i * 3],	/* type */
282 				io_pdc_cell.mod[3 + i * 3],	/* start */
283 				io_pdc_cell.mod[4 + i * 3]);	/* finish (ie end) */
284 		}
285 		printk(KERN_DEBUG "\n");
286 		break;
287 	}
288 #endif /* DEBUG_PAT */
289 
290 	kfree(pa_pdc_cell);
291 
292 	return PDC_OK;
293 }
294 
295 
296 /* pat pdc can return information about a variety of different
297  * types of memory (e.g. firmware,i/o, etc) but we only care about
298  * the usable physical ram right now. Since the firmware specific
299  * information is allocated on the stack, we'll be generous, in
300  * case there is a lot of other information we don't care about.
301  */
302 
303 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
304 
305 static void __init pat_memconfig(void)
306 {
307 	unsigned long actual_len;
308 	struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
309 	struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
310 	physmem_range_t *pmem_ptr;
311 	long status;
312 	int entries;
313 	unsigned long length;
314 	int i;
315 
316 	length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
317 
318 	status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
319 
320 	if ((status != PDC_OK)
321 	    || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
322 
323 		/* The above pdc call shouldn't fail, but, just in
324 		 * case, just use the PAGE0 info.
325 		 */
326 
327 		printk("\n\n\n");
328 		printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
329 			"All memory may not be used!\n\n\n");
330 		pagezero_memconfig();
331 		return;
332 	}
333 
334 	entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
335 
336 	if (entries > PAT_MAX_RANGES) {
337 		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
338 		printk(KERN_WARNING "Some memory may not be used!\n");
339 	}
340 
341 	/* Copy information into the firmware independent pmem_ranges
342 	 * array, skipping types we don't care about. Notice we said
343 	 * "may" above. We'll use all the entries that were returned.
344 	 */
345 
346 	npmem_ranges = 0;
347 	mtbl_ptr = mem_table;
348 	pmem_ptr = pmem_ranges; /* Global firmware independent table */
349 	for (i = 0; i < entries; i++,mtbl_ptr++) {
350 		if (   (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
351 		    || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
352 		    || (mtbl_ptr->pages == 0)
353 		    || (   (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
354 			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
355 			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
356 
357 			continue;
358 		}
359 
360 		if (npmem_ranges == MAX_PHYSMEM_RANGES) {
361 			printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
362 			printk(KERN_WARNING "Some memory will not be used!\n");
363 			break;
364 		}
365 
366 		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
367 		npmem_ranges++;
368 	}
369 }
370 
371 static int __init pat_inventory(void)
372 {
373 	int status;
374 	ulong mod_index = 0;
375 	struct pdc_pat_cell_num cell_info;
376 
377 	/*
378 	** Note:  Prelude (and it's successors: Lclass, A400/500) only
379 	**        implement PDC_PAT_CELL sub-options 0 and 2.
380 	*/
381 	status = pdc_pat_cell_get_number(&cell_info);
382 	if (status != PDC_OK) {
383 		return 0;
384 	}
385 
386 #ifdef DEBUG_PAT
387 	printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
388 	       cell_info.cell_loc);
389 #endif
390 
391 	while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
392 		mod_index++;
393 	}
394 
395 	return mod_index;
396 }
397 
398 /* We only look for extended memory ranges on a 64 bit capable box */
399 static void __init sprockets_memconfig(void)
400 {
401 	struct pdc_memory_table_raddr r_addr;
402 	struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
403 	struct pdc_memory_table *mtbl_ptr;
404 	physmem_range_t *pmem_ptr;
405 	long status;
406 	int entries;
407 	int i;
408 
409 	status = pdc_mem_mem_table(&r_addr,mem_table,
410 				(unsigned long)MAX_PHYSMEM_RANGES);
411 
412 	if (status != PDC_OK) {
413 
414 		/* The above pdc call only works on boxes with sprockets
415 		 * firmware (newer B,C,J class). Other non PAT PDC machines
416 		 * do support more than 3.75 Gb of memory, but we don't
417 		 * support them yet.
418 		 */
419 
420 		pagezero_memconfig();
421 		return;
422 	}
423 
424 	if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
425 		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
426 		printk(KERN_WARNING "Some memory will not be used!\n");
427 	}
428 
429 	entries = (int)r_addr.entries_returned;
430 
431 	npmem_ranges = 0;
432 	mtbl_ptr = mem_table;
433 	pmem_ptr = pmem_ranges; /* Global firmware independent table */
434 	for (i = 0; i < entries; i++,mtbl_ptr++) {
435 		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
436 		npmem_ranges++;
437 	}
438 }
439 
440 #else   /* !CONFIG_64BIT */
441 
442 #define pat_inventory() do { } while (0)
443 #define pat_memconfig() do { } while (0)
444 #define sprockets_memconfig() pagezero_memconfig()
445 
446 #endif	/* !CONFIG_64BIT */
447 
448 
449 #ifndef CONFIG_PA20
450 
451 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
452 
453 static struct parisc_device * __init
454 legacy_create_device(struct pdc_memory_map *r_addr,
455 		struct pdc_module_path *module_path)
456 {
457 	struct parisc_device *dev;
458 	int status = pdc_mem_map_hpa(r_addr, module_path);
459 	if (status != PDC_OK)
460 		return NULL;
461 
462 	dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
463 	if (dev == NULL)
464 		return NULL;
465 
466 	register_parisc_device(dev);
467 	return dev;
468 }
469 
470 /**
471  * snake_inventory
472  *
473  * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
474  * To use it, we initialise the mod_path.bc to 0xff and try all values of
475  * mod to get the HPA for the top-level devices.  Bus adapters may have
476  * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
477  * module, then trying all possible functions.
478  */
479 static void __init snake_inventory(void)
480 {
481 	int mod;
482 	for (mod = 0; mod < 16; mod++) {
483 		struct parisc_device *dev;
484 		struct pdc_module_path module_path;
485 		struct pdc_memory_map r_addr;
486 		unsigned int func;
487 
488 		memset(module_path.path.bc, 0xff, 6);
489 		module_path.path.mod = mod;
490 		dev = legacy_create_device(&r_addr, &module_path);
491 		if ((!dev) || (dev->id.hw_type != HPHW_BA))
492 			continue;
493 
494 		memset(module_path.path.bc, 0xff, 4);
495 		module_path.path.bc[4] = mod;
496 
497 		for (func = 0; func < 16; func++) {
498 			module_path.path.bc[5] = 0;
499 			module_path.path.mod = func;
500 			legacy_create_device(&r_addr, &module_path);
501 		}
502 	}
503 }
504 
505 #else /* CONFIG_PA20 */
506 #define snake_inventory() do { } while (0)
507 #endif  /* CONFIG_PA20 */
508 
509 /* Common 32/64 bit based code goes here */
510 
511 /**
512  * add_system_map_addresses - Add additional addresses to the parisc device.
513  * @dev: The parisc device.
514  * @num_addrs: Then number of addresses to add;
515  * @module_instance: The system_map module instance.
516  *
517  * This function adds any additional addresses reported by the system_map
518  * firmware to the parisc device.
519  */
520 static void __init
521 add_system_map_addresses(struct parisc_device *dev, int num_addrs,
522 			 int module_instance)
523 {
524 	int i;
525 	long status;
526 	struct pdc_system_map_addr_info addr_result;
527 
528 	dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
529 	if(!dev->addr) {
530 		printk(KERN_ERR "%s %s(): memory allocation failure\n",
531 		       __FILE__, __func__);
532 		return;
533 	}
534 
535 	for(i = 1; i <= num_addrs; ++i) {
536 		status = pdc_system_map_find_addrs(&addr_result,
537 						   module_instance, i);
538 		if(PDC_OK == status) {
539 			dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
540 			dev->num_addrs++;
541 		} else {
542 			printk(KERN_WARNING
543 			       "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
544 			       status, i);
545 		}
546 	}
547 }
548 
549 /**
550  * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
551  *
552  * This function attempts to retrieve and register all the devices firmware
553  * knows about via the SYSTEM_MAP PDC call.
554  */
555 static void __init system_map_inventory(void)
556 {
557 	int i;
558 	long status = PDC_OK;
559 
560 	for (i = 0; i < 256; i++) {
561 		struct parisc_device *dev;
562 		struct pdc_system_map_mod_info module_result;
563 		struct pdc_module_path module_path;
564 
565 		status = pdc_system_map_find_mods(&module_result,
566 				&module_path, i);
567 		if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
568 			break;
569 		if (status != PDC_OK)
570 			continue;
571 
572 		dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
573 		if (!dev)
574 			continue;
575 
576 		register_parisc_device(dev);
577 
578 		/* if available, get the additional addresses for a module */
579 		if (!module_result.add_addrs)
580 			continue;
581 
582 		add_system_map_addresses(dev, module_result.add_addrs, i);
583 	}
584 
585 	walk_central_bus();
586 	return;
587 }
588 
589 void __init do_memory_inventory(void)
590 {
591 	switch (pdc_type) {
592 
593 	case PDC_TYPE_PAT:
594 		pat_memconfig();
595 		break;
596 
597 	case PDC_TYPE_SYSTEM_MAP:
598 		sprockets_memconfig();
599 		break;
600 
601 	case PDC_TYPE_SNAKE:
602 		pagezero_memconfig();
603 		return;
604 
605 	default:
606 		panic("Unknown PDC type!\n");
607 	}
608 
609 	if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
610 		printk(KERN_WARNING "Bad memory configuration returned!\n");
611 		printk(KERN_WARNING "Some memory may not be used!\n");
612 		pagezero_memconfig();
613 	}
614 }
615 
616 void __init do_device_inventory(void)
617 {
618 	printk(KERN_INFO "Searching for devices...\n");
619 
620 	init_parisc_bus();
621 
622 	switch (pdc_type) {
623 
624 	case PDC_TYPE_PAT:
625 		pat_inventory();
626 		break;
627 
628 	case PDC_TYPE_SYSTEM_MAP:
629 		system_map_inventory();
630 		break;
631 
632 	case PDC_TYPE_SNAKE:
633 		snake_inventory();
634 		break;
635 
636 	default:
637 		panic("Unknown PDC type!\n");
638 	}
639 	printk(KERN_INFO "Found devices:\n");
640 	print_parisc_devices();
641 }
642