xref: /titanic_51/usr/src/uts/sun4u/boston/os/boston.c (revision fe0e7ec4d916b05b52d8c7cc8a3e6a1b28e77b6f)
1 /*
2  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/sysmacros.h>
11 #include <sys/sunddi.h>
12 #include <sys/esunddi.h>
13 
14 #include <sys/platform_module.h>
15 #include <sys/errno.h>
16 #include <sys/cpu_sgnblk_defs.h>
17 #include <sys/rmc_comm_dp.h>
18 #include <sys/rmc_comm_drvintf.h>
19 #include <sys/modctl.h>
20 #include <sys/lgrp.h>
21 #include <sys/memnode.h>
22 #include <sys/promif.h>
23 
24 #define	SHARED_MI2CV_PATH "/i2c@1f,520000"
25 static dev_info_t *shared_mi2cv_dip;
26 static kmutex_t mi2cv_mutex;
27 
28 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
29 static void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
30 int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL;
31 
32 void
33 startup_platform(void)
34 {
35 	mutex_init(&mi2cv_mutex, NULL, NULL, NULL);
36 }
37 
38 int
39 set_platform_tsb_spares()
40 {
41 	return (0);
42 }
43 
44 void
45 set_platform_defaults(void)
46 {
47 	extern char *tod_module_name;
48 	/* Set appropriate tod module */
49 	if (tod_module_name == NULL)
50 		tod_module_name = "todm5823";
51 
52 	cpu_sgn_func = cpu_sgn_update;
53 }
54 
55 /*
56  * Definitions for accessing the pci config space of the isa node
57  * of Southbridge.
58  */
59 static ddi_acc_handle_t isa_handle = NULL;	/* handle for isa pci space */
60 
61 /*
62  * Definition for accessing rmclomv
63  */
64 #define	RMCLOMV_PATHNAME	"/pseudo/rmclomv@0"
65 
66 void
67 load_platform_drivers(void)
68 {
69 	/*
70 	 * It is OK to return error because 'us' driver is not available
71 	 * in all clusters (e.g. missing in Core cluster).
72 	 */
73 	(void) i_ddi_attach_hw_nodes("us");
74 
75 
76 	/*
77 	 * mc-us3i must stay loaded for plat_get_mem_unum()
78 	 */
79 	if (i_ddi_attach_hw_nodes("mc-us3i") != DDI_SUCCESS)
80 		cmn_err(CE_WARN, "mc-us3i driver failed to install");
81 	(void) ddi_hold_driver(ddi_name_to_major("mc-us3i"));
82 
83 	/*
84 	 * load the power button driver
85 	 */
86 	if (i_ddi_attach_hw_nodes("power") != DDI_SUCCESS)
87 		cmn_err(CE_WARN, "power button driver failed to install");
88 	(void) ddi_hold_driver(ddi_name_to_major("power"));
89 
90 	/*
91 	 * load the GPIO driver for the ALOM reset and watchdog lines
92 	 */
93 	if (i_ddi_attach_hw_nodes("pmugpio") != DDI_SUCCESS)
94 		cmn_err(CE_WARN, "pmugpio failed to install");
95 	else {
96 		extern int watchdog_enable, watchdog_available;
97 		extern int disable_watchdog_on_exit;
98 
99 		/*
100 		 * Disable an active h/w watchdog timer upon exit to OBP.
101 		 */
102 		disable_watchdog_on_exit = 1;
103 
104 		watchdog_enable = 1;
105 		watchdog_available = 1;
106 	}
107 	(void) ddi_hold_driver(ddi_name_to_major("pmugpio"));
108 
109 	/*
110 	 * Figure out which mi2cv dip is shared with OBP for the nvram
111 	 * device, so the lock can be acquired.
112 	 */
113 	shared_mi2cv_dip = e_ddi_hold_devi_by_path(SHARED_MI2CV_PATH, 0);
114 
115 	/*
116 	 * Load the environmentals driver (rmclomv)
117 	 *
118 	 * We need this driver to handle events from the RMC when state
119 	 * changes occur in the environmental data.
120 	 */
121 	if (i_ddi_attach_hw_nodes("rmc_comm") != DDI_SUCCESS) {
122 		cmn_err(CE_WARN, "rmc_comm failed to install");
123 	} else {
124 		(void) ddi_hold_driver(ddi_name_to_major("rmc_comm"));
125 
126 		if (e_ddi_hold_devi_by_path(RMCLOMV_PATHNAME, 0) == NULL) {
127 			cmn_err(CE_WARN, "Could not install rmclomv driver\n");
128 		}
129 	}
130 	/*
131 	 * create a handle to the rmc_comm_request_nowait() function
132 	 * inside the rmc_comm module.
133 	 *
134 	 * The Seattle/Boston todm5823 driver will use this handle to
135 	 * use the rmc_comm_request_nowait() function to send time/date
136 	 * updates to ALOM.
137 	 */
138 	rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t))
139 		modgetsymvalue("rmc_comm_request_nowait", 0);
140 }
141 
142 /*
143  * This routine is needed if a device error or timeout occurs before the
144  * driver is loaded.
145  */
146 /*ARGSUSED*/
147 int
148 plat_ide_chipreset(dev_info_t *dip, int chno)
149 {
150 	int	ret = DDI_SUCCESS;
151 
152 	if (isa_handle == NULL) {
153 		return (DDI_FAILURE);
154 	}
155 
156 	/*
157 	 * This will be filled in with the reset logic
158 	 * for the ULI1573 when that becomes available.
159 	 * currently this is just a stub.
160 	 */
161 	return (ret);
162 }
163 
164 
165 /*ARGSUSED*/
166 int
167 plat_cpu_poweron(struct cpu *cp)
168 {
169 	return (ENOTSUP);	/* not supported on this platform */
170 }
171 
172 /*ARGSUSED*/
173 int
174 plat_cpu_poweroff(struct cpu *cp)
175 {
176 	return (ENOTSUP);	/* not supported on this platform */
177 }
178 
179 /*ARGSUSED*/
180 void
181 plat_freelist_process(int mnode)
182 {
183 }
184 
185 char *platform_module_list[] = {
186 	"mi2cv",
187 	"pca9556",
188 	(char *)0
189 };
190 
191 /*ARGSUSED*/
192 void
193 plat_tod_fault(enum tod_fault_type tod_bad)
194 {
195 }
196 
197 /*ARGSUSED*/
198 int
199 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
200     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
201 {
202 	if (flt_in_memory && (p2get_mem_unum != NULL))
203 		return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
204 		    buf, buflen, lenp));
205 	else
206 		return (ENOTSUP);
207 }
208 
209 /*
210  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3i
211  * driver giving each platform the opportunity to add platform
212  * specific label information to the unum for ECC error logging purposes.
213  */
214 /*ARGSUSED*/
215 void
216 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
217 {
218 	char old_unum[UNUM_NAMLEN];
219 	int printed;
220 	int buflen = UNUM_NAMLEN;
221 
222 	strcpy(old_unum, unum);
223 	printed = snprintf(unum, buflen, "C%d/P0/B%d", mcid, bank);
224 	buflen -= printed;
225 	unum += printed;
226 
227 	if (dimm != -1) {
228 		printed = snprintf(unum, buflen, "/D%d", dimm);
229 		buflen -= printed;
230 		unum += printed;
231 	}
232 
233 	snprintf(unum, buflen, ": %s", old_unum);
234 }
235 
236 /*ARGSUSED*/
237 int
238 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
239 {
240 	if (snprintf(buf, buflen, "MB") >= buflen) {
241 		return (ENOSPC);
242 	} else {
243 		*lenp = strlen(buf);
244 		return (0);
245 	}
246 }
247 
248 /*
249  * Our nodename has been set, pass it along to the RMC.
250  */
251 void
252 plat_nodename_set(void)
253 {
254 	rmc_comm_msg_t	req;	/* request */
255 	int (*rmc_req_res)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t) = NULL;
256 
257 	/*
258 	 * find the symbol for the mailbox routine
259 	 */
260 	rmc_req_res = (int (*)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t))
261 		modgetsymvalue("rmc_comm_request_response", 0);
262 
263 	if (rmc_req_res == NULL) {
264 		return;
265 	}
266 
267 	/*
268 	 * construct the message telling the RMC our nodename
269 	 */
270 	req.msg_type = DP_SET_CPU_NODENAME;
271 	req.msg_len = strlen(utsname.nodename) + 1;
272 	req.msg_bytes = 0;
273 	req.msg_buf = (caddr_t)utsname.nodename;
274 
275 	/*
276 	 * ship it
277 	 */
278 	(void) (rmc_req_res)(&req, NULL, 2000);
279 }
280 
281 sig_state_t current_sgn;
282 
283 /*
284  * cpu signatures - we're only interested in the overall system
285  * "signature" on this platform - not individual cpu signatures
286  */
287 /*ARGSUSED*/
288 static void
289 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
290 {
291 	dp_cpu_signature_t signature;
292 	rmc_comm_msg_t	req;	/* request */
293 	int (*rmc_req_res)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t) = NULL;
294 	int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL;
295 
296 
297 	/*
298 	 * Differentiate a panic reboot from a non-panic reboot in the
299 	 * setting of the substate of the signature.
300 	 *
301 	 * If the new substate is REBOOT and we're rebooting due to a panic,
302 	 * then set the new substate to a special value indicating a panic
303 	 * reboot, SIGSUBST_PANIC_REBOOT.
304 	 *
305 	 * A panic reboot is detected by a current (previous) signature
306 	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
307 	 * The domain signature state SIGST_EXIT is used as the panic flow
308 	 * progresses.
309 	 *
310 	 * At the end of the panic flow, the reboot occurs but we should know
311 	 * one that was involuntary, something that may be quite useful to know
312 	 * at OBP level.
313 	 */
314 	if (state == SIGST_EXIT && sub_state == SIGSUBST_REBOOT) {
315 		if (current_sgn.state_t.state == SIGST_EXIT &&
316 		    current_sgn.state_t.sub_state != SIGSUBST_REBOOT)
317 			sub_state = SIGSUBST_PANIC_REBOOT;
318 	}
319 
320 	/*
321 	 * offline and detached states only apply to a specific cpu
322 	 * so ignore them.
323 	 */
324 	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
325 		return;
326 	}
327 
328 	current_sgn.signature = CPU_SIG_BLD(sig, state, sub_state);
329 
330 	/*
331 	 * find the symbol for the mailbox routine
332 	 */
333 	rmc_req_res = (int (*)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t))
334 		modgetsymvalue("rmc_comm_request_response", 0);
335 	if (rmc_req_res == NULL) {
336 		return;
337 	}
338 
339 	/*
340 	 * find the symbol for the mailbox routine
341 	 */
342 	rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t))
343 		modgetsymvalue("rmc_comm_request_nowait", 0);
344 	if (rmc_req_now == NULL) {
345 		return;
346 	}
347 
348 	signature.cpu_id = -1;
349 	signature.sig = sig;
350 	signature.states = state;
351 	signature.sub_state = sub_state;
352 	req.msg_type = DP_SET_CPU_SIGNATURE;
353 	req.msg_len = (int)(sizeof (signature));
354 	req.msg_bytes = 0;
355 	req.msg_buf = (caddr_t)&signature;
356 
357 	/*
358 	 * ship it
359 	 * - note that for panic or reboot need to send with nowait/urgent
360 	 */
361 	if (state == SIGST_EXIT && (sub_state == SIGSUBST_HALT ||
362 	    sub_state == SIGSUBST_REBOOT || sub_state == SIGSUBST_ENVIRON ||
363 	    sub_state == SIGSUBST_PANIC_REBOOT))
364 		(void) (rmc_req_now)(&req, RMC_COMM_DREQ_URGENT);
365 	else
366 		(void) (rmc_req_res)(&req, NULL, 2000);
367 }
368 
369 /*
370  * Fiesta support for lgroups.
371  *
372  * On fiesta platform, an lgroup platform handle == CPU id
373  */
374 
375 /*
376  * Macro for extracting the CPU number from the CPU id
377  */
378 #define	CPUID_TO_LGRP(id)	((id) & 0x7)
379 #define	PLATFORM_MC_SHIFT	36
380 
381 /*
382  * Return the platform handle for the lgroup containing the given CPU
383  */
384 void *
385 plat_lgrp_cpu_to_hand(processorid_t id)
386 {
387 	return ((void *)(uintptr_t)CPUID_TO_LGRP(id));
388 }
389 
390 /*
391  * Platform specific lgroup initialization
392  */
393 void
394 plat_lgrp_init(void)
395 {
396 	pnode_t		curnode;
397 	char		tmp_name[MAXSYSNAME];
398 	int		portid;
399 	int		cpucnt = 0;
400 	int		max_portid = -1;
401 	extern uint32_t lgrp_expand_proc_thresh;
402 	extern uint32_t lgrp_expand_proc_diff;
403 	extern pgcnt_t	lgrp_mem_free_thresh;
404 	extern uint32_t lgrp_loadavg_tolerance;
405 	extern uint32_t lgrp_loadavg_max_effect;
406 	extern uint32_t lgrp_load_thresh;
407 	extern lgrp_mem_policy_t  lgrp_mem_policy_root;
408 
409 	/*
410 	 * Count the number of CPUs installed to determine if
411 	 * NUMA optimization should be enabled or not.
412 	 *
413 	 * All CPU nodes reside in the root node and have a
414 	 * device type "cpu".
415 	 */
416 	curnode = prom_rootnode();
417 	for (curnode = prom_childnode(curnode); curnode;
418 	    curnode = prom_nextnode(curnode)) {
419 		bzero(tmp_name, MAXSYSNAME);
420 		if (prom_getproplen(curnode, OBP_NAME) < MAXSYSNAME) {
421 			if (prom_getprop(curnode, OBP_NAME,
422 			    (caddr_t)tmp_name) == -1 || prom_getprop(curnode,
423 			    OBP_DEVICETYPE, tmp_name) == -1 || strcmp(tmp_name,
424 			    "cpu") != 0)
425 			continue;
426 
427 			cpucnt++;
428 			if (prom_getprop(curnode, "portid", (caddr_t)&portid) !=
429 			    -1 && portid > max_portid)
430 				max_portid = portid;
431 		}
432 	}
433 	if (cpucnt <= 1)
434 		max_mem_nodes = 1;
435 	else if (max_portid >= 0 && max_portid < MAX_MEM_NODES)
436 		max_mem_nodes = max_portid + 1;
437 
438 	/*
439 	 * Set tuneables for fiesta architecture
440 	 *
441 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
442 	 * this process is currently running on before considering
443 	 * expanding threads to another lgroup.
444 	 *
445 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
446 	 * must be loaded before expanding to it.
447 	 *
448 	 * Optimize for memory bandwidth by spreading multi-threaded
449 	 * program to different lgroups.
450 	 */
451 	lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1;
452 	lgrp_expand_proc_diff = lgrp_loadavg_max_effect / 2;
453 	lgrp_loadavg_tolerance = lgrp_loadavg_max_effect / 2;
454 	lgrp_mem_free_thresh = 1;	/* home lgrp must have some memory */
455 	lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1;
456 	lgrp_mem_policy_root = LGRP_MEM_POLICY_NEXT;
457 	lgrp_load_thresh = 0;
458 
459 	mem_node_pfn_shift = PLATFORM_MC_SHIFT - MMU_PAGESHIFT;
460 }
461 
462 /*
463  * Return latency between "from" and "to" lgroups
464  *
465  * This latency number can only be used for relative comparison
466  * between lgroups on the running system, cannot be used across platforms,
467  * and may not reflect the actual latency.  It is platform and implementation
468  * specific, so platform gets to decide its value.  It would be nice if the
469  * number was at least proportional to make comparisons more meaningful though.
470  * NOTE: The numbers below are supposed to be load latencies for uncached
471  * memory divided by 10.
472  */
473 int
474 plat_lgrp_latency(void *from, void *to)
475 {
476 	/*
477 	 * Return remote latency when there are more than two lgroups
478 	 * (root and child) and getting latency between two different
479 	 * lgroups or root is involved
480 	 */
481 	if (lgrp_optimizations() && (from != to || from ==
482 	    (void *) LGRP_DEFAULT_HANDLE || to == (void *) LGRP_DEFAULT_HANDLE))
483 		return (17);
484 	else
485 		return (12);
486 }
487 
488 int
489 plat_pfn_to_mem_node(pfn_t pfn)
490 {
491 	ASSERT(max_mem_nodes > 1);
492 	return (pfn >> mem_node_pfn_shift);
493 }
494 
495 /*
496  * Assign memnode to lgroups
497  */
498 void
499 plat_fill_mc(pnode_t nodeid)
500 {
501 	int		portid;
502 
503 	/*
504 	 * Memory controller portid == global CPU id
505 	 */
506 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) == -1) ||
507 	    (portid < 0))
508 		return;
509 
510 	if (portid < max_mem_nodes)
511 		plat_assign_lgrphand_to_mem_node((lgrp_handle_t)portid, portid);
512 }
513 
514 /* ARGSUSED */
515 void
516 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
517 {
518 	size_t	elem;
519 	pfn_t	basepfn;
520 	pgcnt_t	npgs;
521 
522 	/*
523 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
524 	 */
525 	for (elem = 0; elem < nelems; elem += 2) {
526 		basepfn = btop(list[elem]);
527 		npgs = btop(list[elem+1]);
528 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
529 	}
530 }
531 
532 /*
533  * Common locking enter code
534  */
535 void
536 plat_setprop_enter(void)
537 {
538 	mutex_enter(&mi2cv_mutex);
539 }
540 
541 /*
542  * Common locking exit code
543  */
544 void
545 plat_setprop_exit(void)
546 {
547 	mutex_exit(&mi2cv_mutex);
548 }
549 
550 /*
551  * Called by mi2cv driver
552  */
553 void
554 plat_shared_i2c_enter(dev_info_t *i2cnexus_dip)
555 {
556 	if (i2cnexus_dip == shared_mi2cv_dip) {
557 		plat_setprop_enter();
558 	}
559 }
560 
561 /*
562  * Called by mi2cv driver
563  */
564 void
565 plat_shared_i2c_exit(dev_info_t *i2cnexus_dip)
566 {
567 	if (i2cnexus_dip == shared_mi2cv_dip) {
568 		plat_setprop_exit();
569 	}
570 }
571 
572 /*
573  * Called by todm5823 driver
574  */
575 void
576 plat_rmc_comm_req(struct rmc_comm_msg *request)
577 {
578 	if (rmc_req_now)
579 		(void) rmc_req_now(request, 0);
580 }
581