xref: /linux/arch/parisc/kernel/inventory.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * inventory.c
4  *
5  * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
6  * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
7  *
8  * These are the routines to discover what hardware exists in this box.
9  * This task is complicated by there being 3 different ways of
10  * performing an inventory, depending largely on the age of the box.
11  * The recommended way to do this is to check to see whether the machine
12  * is a `Snake' first, then try System Map, then try PAT.  We try System
13  * Map before checking for a Snake -- this probably doesn't cause any
14  * problems, but...
15  */
16 
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/platform_device.h>
23 #include <asm/hardware.h>
24 #include <asm/io.h>
25 #include <asm/mmzone.h>
26 #include <asm/pdc.h>
27 #include <asm/pdcpat.h>
28 #include <asm/processor.h>
29 #include <asm/page.h>
30 #include <asm/parisc-device.h>
31 #include <asm/tlbflush.h>
32 
33 /*
34 ** Debug options
35 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
36 */
37 #undef DEBUG_PAT
38 
39 int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
40 
41 /* cell number and location (PAT firmware only) */
42 unsigned long parisc_cell_num __ro_after_init;
43 unsigned long parisc_cell_loc __ro_after_init;
44 unsigned long parisc_pat_pdc_cap __ro_after_init;
45 
46 
47 void __init setup_pdc(void)
48 {
49 	long status;
50 	unsigned int bus_id;
51 	struct pdc_system_map_mod_info module_result;
52 	struct pdc_module_path module_path;
53 	struct pdc_model model;
54 #ifdef CONFIG_64BIT
55 	struct pdc_pat_cell_num cell_info;
56 #endif
57 
58 	/* Determine the pdc "type" used on this machine */
59 
60 	printk(KERN_INFO "Determining PDC firmware type: ");
61 
62 	status = pdc_system_map_find_mods(&module_result, &module_path, 0);
63 	if (status == PDC_OK) {
64 		pdc_type = PDC_TYPE_SYSTEM_MAP;
65 		pr_cont("System Map.\n");
66 		return;
67 	}
68 
69 	/*
70 	 * If the machine doesn't support PDC_SYSTEM_MAP then either it
71 	 * is a pdc pat box, or it is an older box. All 64 bit capable
72 	 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
73 	 */
74 
75 	/*
76 	 * TODO: We should test for 64 bit capability and give a
77 	 * clearer message.
78 	 */
79 
80 #ifdef CONFIG_64BIT
81 	status = pdc_pat_cell_get_number(&cell_info);
82 	if (status == PDC_OK) {
83 		unsigned long legacy_rev, pat_rev;
84 		pdc_type = PDC_TYPE_PAT;
85 		pr_cont("64 bit PAT.\n");
86 		parisc_cell_num = cell_info.cell_num;
87 		parisc_cell_loc = cell_info.cell_loc;
88 		pr_info("PAT: Running on cell %lu and location %lu.\n",
89 			parisc_cell_num, parisc_cell_loc);
90 		status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
91 			&pat_rev, &parisc_pat_pdc_cap);
92 		pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
93 			legacy_rev, pat_rev, parisc_pat_pdc_cap,
94 			parisc_pat_pdc_cap
95 			 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
96 			parisc_pat_pdc_cap
97 			 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ   ? 1:0);
98 		return;
99 	}
100 #endif
101 
102 	/* Check the CPU's bus ID.  There's probably a better test.  */
103 
104 	status = pdc_model_info(&model);
105 
106 	bus_id = (model.hversion >> (4 + 7)) & 0x1f;
107 
108 	switch (bus_id) {
109 	case 0x4:		/* 720, 730, 750, 735, 755 */
110 	case 0x6:		/* 705, 710 */
111 	case 0x7:		/* 715, 725 */
112 	case 0x8:		/* 745, 747, 742 */
113 	case 0xA:		/* 712 and similar */
114 	case 0xC:		/* 715/64, at least */
115 
116 		pdc_type = PDC_TYPE_SNAKE;
117 		pr_cont("Snake.\n");
118 		return;
119 
120 	default:		/* Everything else */
121 
122 		pr_cont("Unsupported.\n");
123 		panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
124 	}
125 }
126 
127 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
128 
129 static void __init
130 set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
131 	       unsigned long pages4k)
132 {
133 	/* Rather than aligning and potentially throwing away
134 	 * memory, we'll assume that any ranges are already
135 	 * nicely aligned with any reasonable page size, and
136 	 * panic if they are not (it's more likely that the
137 	 * pdc info is bad in this case).
138 	 */
139 
140 	if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
141 	    || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
142 
143 		panic("Memory range doesn't align with page size!\n");
144 	}
145 
146 	pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
147 	pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
148 }
149 
150 static void __init pagezero_memconfig(void)
151 {
152 	unsigned long npages;
153 
154 	/* Use the 32 bit information from page zero to create a single
155 	 * entry in the pmem_ranges[] table.
156 	 *
157 	 * We currently don't support machines with contiguous memory
158 	 * >= 4 Gb, who report that memory using 64 bit only fields
159 	 * on page zero. It's not worth doing until it can be tested,
160 	 * and it is not clear we can support those machines for other
161 	 * reasons.
162 	 *
163 	 * If that support is done in the future, this is where it
164 	 * should be done.
165 	 */
166 
167 	npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
168 	set_pmem_entry(pmem_ranges,0UL,npages);
169 	npmem_ranges = 1;
170 }
171 
172 #ifdef CONFIG_64BIT
173 
174 /* All of the PDC PAT specific code is 64-bit only */
175 
176 /*
177 **  The module object is filled via PDC_PAT_CELL[Return Cell Module].
178 **  If a module is found, register module will get the IODC bytes via
179 **  pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
180 **
181 **  The IO view can be used by PDC_PAT_CELL[Return Cell Module]
182 **  only for SBAs and LBAs.  This view will cause an invalid
183 **  argument error for all other cell module types.
184 **
185 */
186 
187 static int __init
188 pat_query_module(ulong pcell_loc, ulong mod_index)
189 {
190 	pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
191 	unsigned long bytecnt;
192 	unsigned long temp;	/* 64-bit scratch value */
193 	long status;		/* PDC return value status */
194 	struct parisc_device *dev;
195 
196 	pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
197 	if (!pa_pdc_cell)
198 		panic("couldn't allocate memory for PDC_PAT_CELL!");
199 
200 	/* return cell module (PA or Processor view) */
201 	status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
202 				     PA_VIEW, pa_pdc_cell);
203 
204 	if (status != PDC_OK) {
205 		/* no more cell modules or error */
206 		kfree(pa_pdc_cell);
207 		return status;
208 	}
209 
210 #ifdef DEBUG_PAT
211 	pr_debug("PAT INDEX: %lu: cba 0x%lx, "
212 		"mod_info 0x%lx, mod_location 0x%lx, "
213 		"mod: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx "
214 		"0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
215 		mod_index + 1, pa_pdc_cell->cba,
216 		pa_pdc_cell->mod_info, pa_pdc_cell->mod_location,
217 		pa_pdc_cell->mod[0], pa_pdc_cell->mod[1], pa_pdc_cell->mod[2],
218 		pa_pdc_cell->mod[3], pa_pdc_cell->mod[4], pa_pdc_cell->mod[5],
219 		pa_pdc_cell->mod[6], pa_pdc_cell->mod[7], pa_pdc_cell->mod[8],
220 		pa_pdc_cell->mod[9], pa_pdc_cell->mod[10], pa_pdc_cell->mod[11]);
221 #endif
222 
223 	temp = pa_pdc_cell->cba;
224 	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
225 	if (!dev) {
226 		kfree(pa_pdc_cell);
227 		return PDC_OK;
228 	}
229 
230 	/* alloc_pa_dev sets dev->hpa */
231 
232 	/*
233 	** save parameters in the parisc_device
234 	** (The idea being the device driver will call pdc_pat_cell_module()
235 	** and store the results in its own data structure.)
236 	*/
237 	dev->pcell_loc = pcell_loc;
238 	dev->mod_index = mod_index;
239 
240 	/* save generic info returned from the call */
241 	/* REVISIT: who is the consumer of this? not sure yet... */
242 	dev->mod_info = pa_pdc_cell->mod_info;	/* pass to PAT_GET_ENTITY() */
243 	dev->pmod_loc = pa_pdc_cell->mod_location;
244 	dev->mod0 = pa_pdc_cell->mod[0];
245 
246 	register_parisc_device(dev);	/* advertise device */
247 
248 #ifdef DEBUG_PAT
249 	/* dump what we see so far... */
250 	switch (PAT_GET_ENTITY(dev->mod_info)) {
251 		pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
252 		unsigned long i;
253 
254 	case PAT_ENTITY_PROC:
255 		printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
256 			pa_pdc_cell->mod[0]);
257 		break;
258 
259 	case PAT_ENTITY_MEM:
260 		printk(KERN_DEBUG
261 			"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
262 			pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
263 			pa_pdc_cell->mod[2]);
264 		break;
265 	case PAT_ENTITY_CA:
266 		printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
267 		break;
268 
269 	case PAT_ENTITY_PBC:
270 		printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
271 		goto print_ranges;
272 
273 	case PAT_ENTITY_SBA:
274 		printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
275 		goto print_ranges;
276 
277 	case PAT_ENTITY_LBA:
278 		printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
279 
280  print_ranges:
281 		pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
282 				    IO_VIEW, &io_pdc_cell);
283 		printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
284 		for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
285 			printk(KERN_DEBUG
286 				"  PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
287 				i, pa_pdc_cell->mod[2 + i * 3],	/* type */
288 				pa_pdc_cell->mod[3 + i * 3],	/* start */
289 				pa_pdc_cell->mod[4 + i * 3]);	/* finish (ie end) */
290 			printk(KERN_DEBUG
291 				"  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
292 				i, io_pdc_cell.mod[2 + i * 3],	/* type */
293 				io_pdc_cell.mod[3 + i * 3],	/* start */
294 				io_pdc_cell.mod[4 + i * 3]);	/* finish (ie end) */
295 		}
296 		printk(KERN_DEBUG "\n");
297 		break;
298 	}
299 #endif /* DEBUG_PAT */
300 
301 	kfree(pa_pdc_cell);
302 
303 	return PDC_OK;
304 }
305 
306 
307 /* pat pdc can return information about a variety of different
308  * types of memory (e.g. firmware,i/o, etc) but we only care about
309  * the usable physical ram right now. Since the firmware specific
310  * information is allocated on the stack, we'll be generous, in
311  * case there is a lot of other information we don't care about.
312  */
313 
314 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
315 
316 static void __init pat_memconfig(void)
317 {
318 	unsigned long actual_len;
319 	struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
320 	struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
321 	physmem_range_t *pmem_ptr;
322 	long status;
323 	int entries;
324 	unsigned long length;
325 	int i;
326 
327 	length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
328 
329 	status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
330 
331 	if ((status != PDC_OK)
332 	    || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
333 
334 		/* The above pdc call shouldn't fail, but, just in
335 		 * case, just use the PAGE0 info.
336 		 */
337 
338 		printk("\n\n\n");
339 		printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
340 			"All memory may not be used!\n\n\n");
341 		pagezero_memconfig();
342 		return;
343 	}
344 
345 	entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
346 
347 	if (entries > PAT_MAX_RANGES) {
348 		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
349 		printk(KERN_WARNING "Some memory may not be used!\n");
350 	}
351 
352 	/* Copy information into the firmware independent pmem_ranges
353 	 * array, skipping types we don't care about. Notice we said
354 	 * "may" above. We'll use all the entries that were returned.
355 	 */
356 
357 	npmem_ranges = 0;
358 	mtbl_ptr = mem_table;
359 	pmem_ptr = pmem_ranges; /* Global firmware independent table */
360 	for (i = 0; i < entries; i++,mtbl_ptr++) {
361 		if (   (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
362 		    || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
363 		    || (mtbl_ptr->pages == 0)
364 		    || (   (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
365 			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
366 			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
367 
368 			continue;
369 		}
370 
371 		if (npmem_ranges == MAX_PHYSMEM_RANGES) {
372 			printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
373 			printk(KERN_WARNING "Some memory will not be used!\n");
374 			break;
375 		}
376 
377 		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
378 		npmem_ranges++;
379 	}
380 }
381 
382 static int __init pat_inventory(void)
383 {
384 	int status;
385 	ulong mod_index = 0;
386 	struct pdc_pat_cell_num cell_info;
387 
388 	/*
389 	** Note:  Prelude (and it's successors: Lclass, A400/500) only
390 	**        implement PDC_PAT_CELL sub-options 0 and 2.
391 	*/
392 	status = pdc_pat_cell_get_number(&cell_info);
393 	if (status != PDC_OK) {
394 		return 0;
395 	}
396 
397 #ifdef DEBUG_PAT
398 	printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
399 	       cell_info.cell_loc);
400 #endif
401 
402 	while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
403 		mod_index++;
404 	}
405 
406 	return mod_index;
407 }
408 
409 /* We only look for extended memory ranges on a 64 bit capable box */
410 static void __init sprockets_memconfig(void)
411 {
412 	struct pdc_memory_table_raddr r_addr;
413 	struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
414 	struct pdc_memory_table *mtbl_ptr;
415 	physmem_range_t *pmem_ptr;
416 	long status;
417 	int entries;
418 	int i;
419 
420 	status = pdc_mem_mem_table(&r_addr,mem_table,
421 				(unsigned long)MAX_PHYSMEM_RANGES);
422 
423 	if (status != PDC_OK) {
424 
425 		/* The above pdc call only works on boxes with sprockets
426 		 * firmware (newer B,C,J class). Other non PAT PDC machines
427 		 * do support more than 3.75 Gb of memory, but we don't
428 		 * support them yet.
429 		 */
430 
431 		pagezero_memconfig();
432 		return;
433 	}
434 
435 	if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
436 		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
437 		printk(KERN_WARNING "Some memory will not be used!\n");
438 	}
439 
440 	entries = (int)r_addr.entries_returned;
441 
442 	npmem_ranges = 0;
443 	mtbl_ptr = mem_table;
444 	pmem_ptr = pmem_ranges; /* Global firmware independent table */
445 	for (i = 0; i < entries; i++,mtbl_ptr++) {
446 		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
447 		npmem_ranges++;
448 	}
449 }
450 
451 #else   /* !CONFIG_64BIT */
452 
453 #define pat_inventory() do { } while (0)
454 #define pat_memconfig() do { } while (0)
455 #define sprockets_memconfig() pagezero_memconfig()
456 
457 #endif	/* !CONFIG_64BIT */
458 
459 
460 #ifndef CONFIG_PA20
461 
462 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
463 
464 static struct parisc_device * __init
465 legacy_create_device(struct pdc_memory_map *r_addr,
466 		struct pdc_module_path *module_path)
467 {
468 	struct parisc_device *dev;
469 	int status = pdc_mem_map_hpa(r_addr, module_path);
470 	if (status != PDC_OK)
471 		return NULL;
472 
473 	dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
474 	if (dev == NULL)
475 		return NULL;
476 
477 	register_parisc_device(dev);
478 	return dev;
479 }
480 
481 /**
482  * snake_inventory
483  *
484  * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
485  * To use it, we initialise the mod_path.bc to 0xff and try all values of
486  * mod to get the HPA for the top-level devices.  Bus adapters may have
487  * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
488  * module, then trying all possible functions.
489  */
490 static void __init snake_inventory(void)
491 {
492 	int mod;
493 	for (mod = 0; mod < 16; mod++) {
494 		struct parisc_device *dev;
495 		struct pdc_module_path module_path;
496 		struct pdc_memory_map r_addr;
497 		unsigned int func;
498 
499 		memset(module_path.path.bc, 0xff, 6);
500 		module_path.path.mod = mod;
501 		dev = legacy_create_device(&r_addr, &module_path);
502 		if ((!dev) || (dev->id.hw_type != HPHW_BA))
503 			continue;
504 
505 		memset(module_path.path.bc, 0xff, 4);
506 		module_path.path.bc[4] = mod;
507 
508 		for (func = 0; func < 16; func++) {
509 			module_path.path.bc[5] = 0;
510 			module_path.path.mod = func;
511 			legacy_create_device(&r_addr, &module_path);
512 		}
513 	}
514 }
515 
516 #else /* CONFIG_PA20 */
517 #define snake_inventory() do { } while (0)
518 #endif  /* CONFIG_PA20 */
519 
520 /* Common 32/64 bit based code goes here */
521 
522 /**
523  * add_system_map_addresses - Add additional addresses to the parisc device.
524  * @dev: The parisc device.
525  * @num_addrs: Then number of addresses to add;
526  * @module_instance: The system_map module instance.
527  *
528  * This function adds any additional addresses reported by the system_map
529  * firmware to the parisc device.
530  */
531 static void __init
532 add_system_map_addresses(struct parisc_device *dev, int num_addrs,
533 			 int module_instance)
534 {
535 	int i;
536 	long status;
537 	struct pdc_system_map_addr_info addr_result;
538 
539 	dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
540 	if(!dev->addr) {
541 		printk(KERN_ERR "%s %s(): memory allocation failure\n",
542 		       __FILE__, __func__);
543 		return;
544 	}
545 
546 	for(i = 1; i <= num_addrs; ++i) {
547 		status = pdc_system_map_find_addrs(&addr_result,
548 						   module_instance, i);
549 		if(PDC_OK == status) {
550 			dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
551 			dev->num_addrs++;
552 		} else {
553 			printk(KERN_WARNING
554 			       "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
555 			       status, i);
556 		}
557 	}
558 }
559 
560 /**
561  * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
562  *
563  * This function attempts to retrieve and register all the devices firmware
564  * knows about via the SYSTEM_MAP PDC call.
565  */
566 static void __init system_map_inventory(void)
567 {
568 	int i;
569 	long status = PDC_OK;
570 
571 	for (i = 0; i < 256; i++) {
572 		struct parisc_device *dev;
573 		struct pdc_system_map_mod_info module_result;
574 		struct pdc_module_path module_path;
575 
576 		status = pdc_system_map_find_mods(&module_result,
577 				&module_path, i);
578 		if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
579 			break;
580 		if (status != PDC_OK)
581 			continue;
582 
583 		dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
584 		if (!dev)
585 			continue;
586 
587 		register_parisc_device(dev);
588 
589 		/* if available, get the additional addresses for a module */
590 		if (!module_result.add_addrs)
591 			continue;
592 
593 		add_system_map_addresses(dev, module_result.add_addrs, i);
594 	}
595 
596 	walk_central_bus();
597 	return;
598 }
599 
600 void __init do_memory_inventory(void)
601 {
602 	switch (pdc_type) {
603 
604 	case PDC_TYPE_PAT:
605 		pat_memconfig();
606 		break;
607 
608 	case PDC_TYPE_SYSTEM_MAP:
609 		sprockets_memconfig();
610 		break;
611 
612 	case PDC_TYPE_SNAKE:
613 		pagezero_memconfig();
614 		return;
615 
616 	default:
617 		panic("Unknown PDC type!\n");
618 	}
619 
620 	if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
621 		printk(KERN_WARNING "Bad memory configuration returned!\n");
622 		printk(KERN_WARNING "Some memory may not be used!\n");
623 		pagezero_memconfig();
624 	}
625 }
626 
627 void __init do_device_inventory(void)
628 {
629 	printk(KERN_INFO "Searching for devices...\n");
630 
631 	init_parisc_bus();
632 
633 	switch (pdc_type) {
634 
635 	case PDC_TYPE_PAT:
636 		pat_inventory();
637 		break;
638 
639 	case PDC_TYPE_SYSTEM_MAP:
640 		system_map_inventory();
641 		break;
642 
643 	case PDC_TYPE_SNAKE:
644 		snake_inventory();
645 		break;
646 
647 	default:
648 		panic("Unknown PDC type!\n");
649 	}
650 	printk(KERN_INFO "Found devices:\n");
651 	print_parisc_devices();
652 
653 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
654 	pa_serialize_tlb_flushes = machine_has_merced_bus();
655 	if (pa_serialize_tlb_flushes)
656 		pr_info("Merced bus found: Enable PxTLB serialization.\n");
657 #endif
658 
659 #if defined(CONFIG_FW_CFG_SYSFS)
660 	if (running_on_qemu) {
661 		struct resource res[3] = {0,};
662 		unsigned int base;
663 
664 		base = ((unsigned long long) PAGE0->pad0[2] << 32)
665 			| PAGE0->pad0[3]; /* SeaBIOS stored it here */
666 
667 		res[0].name = "fw_cfg";
668 		res[0].start = base;
669 		res[0].end = base + 8 - 1;
670 		res[0].flags = IORESOURCE_MEM;
671 
672 		res[1].name = "ctrl";
673 		res[1].start = 0;
674 		res[1].flags = IORESOURCE_REG;
675 
676 		res[2].name = "data";
677 		res[2].start = 4;
678 		res[2].flags = IORESOURCE_REG;
679 
680 		if (base) {
681 			pr_info("Found qemu fw_cfg interface at %#08x\n", base);
682 			platform_device_register_simple("fw_cfg",
683 				PLATFORM_DEVID_NONE, res, 3);
684 		}
685 	}
686 #endif
687 }
688