xref: /titanic_50/usr/src/uts/sun4u/seattle/os/seattle.c (revision 4f85d229295a756a4e6f1759b47df7b97412db7d)
1 /*
2  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/sysmacros.h>
11 #include <sys/sunddi.h>
12 #include <sys/esunddi.h>
13 
14 #include <sys/platform_module.h>
15 #include <sys/errno.h>
16 #include <sys/cpu_sgnblk_defs.h>
17 #include <sys/rmc_comm_dp.h>
18 #include <sys/rmc_comm_drvintf.h>
19 #include <sys/modctl.h>
20 #include <sys/lgrp.h>
21 #include <sys/memnode.h>
22 #include <sys/promif.h>
23 
24 /* Anything related to shared i2c access applies to Seattle only */
25 #define	SHARED_MI2CV_PATH "/i2c@1f,530000"
26 static dev_info_t *shared_mi2cv_dip;
27 static kmutex_t mi2cv_mutex;
28 
29 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
30 static void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
31 int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL;
32 
33 void
34 startup_platform(void)
35 {
36 	mutex_init(&mi2cv_mutex, NULL, NULL, NULL);
37 }
38 
39 int
40 set_platform_tsb_spares()
41 {
42 	return (0);
43 }
44 
45 void
46 set_platform_defaults(void)
47 {
48 	extern char *tod_module_name;
49 	/* Set appropriate tod module */
50 	if (tod_module_name == NULL)
51 		tod_module_name = "todm5823";
52 
53 	cpu_sgn_func = cpu_sgn_update;
54 }
55 
56 /*
57  * Definitions for accessing the pci config space of the isa node
58  * of Southbridge.
59  */
60 static ddi_acc_handle_t isa_handle = NULL;	/* handle for isa pci space */
61 
62 /*
63  * Definition for accessing rmclomv
64  */
65 #define	RMCLOMV_PATHNAME	"/pseudo/rmclomv@0"
66 
67 void
68 load_platform_drivers(void)
69 {
70 	dev_info_t	*rmclomv_dip;
71 	/*
72 	 * It is OK to return error because 'us' driver is not available
73 	 * in all clusters (e.g. missing in Core cluster).
74 	 */
75 	(void) i_ddi_attach_hw_nodes("us");
76 
77 
78 	/*
79 	 * mc-us3i must stay loaded for plat_get_mem_unum()
80 	 */
81 	if (i_ddi_attach_hw_nodes("mc-us3i") != DDI_SUCCESS)
82 		cmn_err(CE_WARN, "mc-us3i driver failed to install");
83 	(void) ddi_hold_driver(ddi_name_to_major("mc-us3i"));
84 
85 	/*
86 	 * load the GPIO driver for the ALOM reset and watchdog lines
87 	 */
88 	if (i_ddi_attach_hw_nodes("pmugpio") != DDI_SUCCESS)
89 		cmn_err(CE_WARN, "pmugpio failed to install");
90 	else {
91 		extern int watchdog_enable, watchdog_available;
92 		extern int disable_watchdog_on_exit;
93 
94 		/*
95 		 * Disable an active h/w watchdog timer upon exit to OBP.
96 		 */
97 		disable_watchdog_on_exit = 1;
98 
99 		watchdog_enable = 1;
100 		watchdog_available = 1;
101 	}
102 	(void) ddi_hold_driver(ddi_name_to_major("pmugpio"));
103 
104 	/*
105 	 * Figure out which mi2cv dip is shared with OBP for the nvram
106 	 * device, so the lock can be acquired.
107 	 */
108 	shared_mi2cv_dip = e_ddi_hold_devi_by_path(SHARED_MI2CV_PATH, 0);
109 	/*
110 	 * Load the environmentals driver (rmclomv)
111 	 *
112 	 * We need this driver to handle events from the RMC when state
113 	 * changes occur in the environmental data.
114 	 */
115 	if (i_ddi_attach_hw_nodes("rmc_comm") != DDI_SUCCESS) {
116 		cmn_err(CE_WARN, "rmc_comm failed to install");
117 	} else {
118 		(void) ddi_hold_driver(ddi_name_to_major("rmc_comm"));
119 
120 		rmclomv_dip = e_ddi_hold_devi_by_path(RMCLOMV_PATHNAME, 0);
121 		if (rmclomv_dip == NULL) {
122 			cmn_err(CE_WARN, "Could not install rmclomv driver\n");
123 		}
124 	}
125 
126 	/*
127 	 * create a handle to the rmc_comm_request_nowait() function
128 	 * inside the rmc_comm module.
129 	 *
130 	 * The Seattle/Boston todm5823 driver will use this handle to
131 	 * use the rmc_comm_request_nowait() function to send time/date
132 	 * updates to ALOM.
133 	 */
134 	rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t))
135 		modgetsymvalue("rmc_comm_request_nowait", 0);
136 }
137 
138 /*
139  * This routine is needed if a device error or timeout occurs before the
140  * driver is loaded.
141  */
142 /*ARGSUSED*/
143 int
144 plat_ide_chipreset(dev_info_t *dip, int chno)
145 {
146 	int	ret = DDI_SUCCESS;
147 
148 	if (isa_handle == NULL) {
149 		return (DDI_FAILURE);
150 	}
151 
152 	/*
153 	 * This will be filled in with the reset logic
154 	 * for the ULI1573 when that becomes available.
155 	 * currently this is just a stub.
156 	 */
157 	return (ret);
158 }
159 
160 
161 /*ARGSUSED*/
162 int
163 plat_cpu_poweron(struct cpu *cp)
164 {
165 	return (ENOTSUP);	/* not supported on this platform */
166 }
167 
168 /*ARGSUSED*/
169 int
170 plat_cpu_poweroff(struct cpu *cp)
171 {
172 	return (ENOTSUP);	/* not supported on this platform */
173 }
174 
175 /*ARGSUSED*/
176 void
177 plat_freelist_process(int mnode)
178 {
179 }
180 
181 char *platform_module_list[] = {
182 	"mi2cv",
183 	"pca9556",
184 	(char *)0
185 };
186 
187 /*ARGSUSED*/
188 void
189 plat_tod_fault(enum tod_fault_type tod_bad)
190 {
191 }
192 
193 /*ARGSUSED*/
194 int
195 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
196     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
197 {
198 	if (flt_in_memory && (p2get_mem_unum != NULL))
199 		return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
200 		    buf, buflen, lenp));
201 	else
202 		return (ENOTSUP);
203 }
204 
205 /*ARGSUSED*/
206 int
207 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
208 {
209 	if (snprintf(buf, buflen, "MB") >= buflen) {
210 		return (ENOSPC);
211 	} else {
212 		*lenp = strlen(buf);
213 		return (0);
214 	}
215 }
216 
217 /*
218  * Our nodename has been set, pass it along to the RMC.
219  */
220 void
221 plat_nodename_set(void)
222 {
223 	rmc_comm_msg_t	req;	/* request */
224 	int (*rmc_req_res)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t) = NULL;
225 
226 	/*
227 	 * find the symbol for the mailbox routine
228 	 */
229 	rmc_req_res = (int (*)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t))
230 		modgetsymvalue("rmc_comm_request_response", 0);
231 
232 	if (rmc_req_res == NULL) {
233 		return;
234 	}
235 
236 	/*
237 	 * construct the message telling the RMC our nodename
238 	 */
239 	req.msg_type = DP_SET_CPU_NODENAME;
240 	req.msg_len = strlen(utsname.nodename) + 1;
241 	req.msg_bytes = 0;
242 	req.msg_buf = (caddr_t)utsname.nodename;
243 
244 	/*
245 	 * ship it
246 	 */
247 	(void) (rmc_req_res)(&req, NULL, 2000);
248 }
249 
250 sig_state_t current_sgn;
251 
252 /*
253  * cpu signatures - we're only interested in the overall system
254  * "signature" on this platform - not individual cpu signatures
255  */
256 /*ARGSUSED*/
257 static void
258 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
259 {
260 	dp_cpu_signature_t signature;
261 	rmc_comm_msg_t	req;	/* request */
262 	int (*rmc_req_res)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t) = NULL;
263 	int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL;
264 
265 
266 	/*
267 	 * Differentiate a panic reboot from a non-panic reboot in the
268 	 * setting of the substate of the signature.
269 	 *
270 	 * If the new substate is REBOOT and we're rebooting due to a panic,
271 	 * then set the new substate to a special value indicating a panic
272 	 * reboot, SIGSUBST_PANIC_REBOOT.
273 	 *
274 	 * A panic reboot is detected by a current (previous) signature
275 	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
276 	 * The domain signature state SIGST_EXIT is used as the panic flow
277 	 * progresses.
278 	 *
279 	 * At the end of the panic flow, the reboot occurs but we should know
280 	 * one that was involuntary, something that may be quite useful to know
281 	 * at OBP level.
282 	 */
283 	if (state == SIGST_EXIT && sub_state == SIGSUBST_REBOOT) {
284 		if (current_sgn.state_t.state == SIGST_EXIT &&
285 		    current_sgn.state_t.sub_state != SIGSUBST_REBOOT)
286 			sub_state = SIGSUBST_PANIC_REBOOT;
287 	}
288 
289 	/*
290 	 * offline and detached states only apply to a specific cpu
291 	 * so ignore them.
292 	 */
293 	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
294 		return;
295 	}
296 
297 	current_sgn.signature = CPU_SIG_BLD(sig, state, sub_state);
298 
299 	/*
300 	 * find the symbol for the mailbox routine
301 	 */
302 	rmc_req_res = (int (*)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t))
303 		modgetsymvalue("rmc_comm_request_response", 0);
304 	if (rmc_req_res == NULL) {
305 		return;
306 	}
307 
308 	/*
309 	 * find the symbol for the mailbox routine
310 	 */
311 	rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t))
312 		modgetsymvalue("rmc_comm_request_nowait", 0);
313 	if (rmc_req_now == NULL) {
314 		return;
315 	}
316 
317 	signature.cpu_id = -1;
318 	signature.sig = sig;
319 	signature.states = state;
320 	signature.sub_state = sub_state;
321 	req.msg_type = DP_SET_CPU_SIGNATURE;
322 	req.msg_len = (int)(sizeof (signature));
323 	req.msg_bytes = 0;
324 	req.msg_buf = (caddr_t)&signature;
325 
326 	/*
327 	 * ship it
328 	 * - note that for panic or reboot need to send with nowait/urgent
329 	 */
330 	if (state == SIGST_EXIT && (sub_state == SIGSUBST_HALT ||
331 	    sub_state == SIGSUBST_REBOOT || sub_state == SIGSUBST_ENVIRON ||
332 	    sub_state == SIGSUBST_PANIC_REBOOT))
333 		(void) (rmc_req_now)(&req, RMC_COMM_DREQ_URGENT);
334 	else
335 		(void) (rmc_req_res)(&req, NULL, 2000);
336 }
337 
338 /*
339  * Fiesta support for lgroups.
340  *
341  * On fiesta platform, an lgroup platform handle == CPU id
342  */
343 
344 /*
345  * Macro for extracting the CPU number from the CPU id
346  */
347 #define	CPUID_TO_LGRP(id)	((id) & 0x7)
348 #define	PLATFORM_MC_SHIFT	36
349 
350 /*
351  * Return the platform handle for the lgroup containing the given CPU
352  */
353 void *
354 plat_lgrp_cpu_to_hand(processorid_t id)
355 {
356 	return ((void *) CPUID_TO_LGRP(id));
357 }
358 
359 /*
360  * Platform specific lgroup initialization
361  */
362 void
363 plat_lgrp_init(void)
364 {
365 	pnode_t		curnode;
366 	char		tmp_name[MAXSYSNAME];
367 	int		portid;
368 	int		cpucnt = 0;
369 	int		max_portid = -1;
370 	extern uint32_t lgrp_expand_proc_thresh;
371 	extern uint32_t lgrp_expand_proc_diff;
372 	extern pgcnt_t	lgrp_mem_free_thresh;
373 	extern uint32_t lgrp_loadavg_tolerance;
374 	extern uint32_t lgrp_loadavg_max_effect;
375 	extern uint32_t lgrp_load_thresh;
376 	extern lgrp_mem_policy_t  lgrp_mem_policy_root;
377 
378 	/*
379 	 * Count the number of CPUs installed to determine if
380 	 * NUMA optimization should be enabled or not.
381 	 *
382 	 * All CPU nodes reside in the root node and have a
383 	 * device type "cpu".
384 	 */
385 	curnode = prom_rootnode();
386 	for (curnode = prom_childnode(curnode); curnode;
387 	    curnode = prom_nextnode(curnode)) {
388 		bzero(tmp_name, MAXSYSNAME);
389 		if (prom_getproplen(curnode, OBP_NAME) < MAXSYSNAME) {
390 			if (prom_getprop(curnode, OBP_NAME,
391 			    (caddr_t)tmp_name) == -1 || prom_getprop(curnode,
392 			    OBP_DEVICETYPE, tmp_name) == -1 || strcmp(tmp_name,
393 			    "cpu") != 0)
394 			continue;
395 
396 			cpucnt++;
397 			if (prom_getprop(curnode, "portid", (caddr_t)&portid) !=
398 			    -1 && portid > max_portid)
399 				max_portid = portid;
400 		}
401 	}
402 	if (cpucnt <= 1)
403 		max_mem_nodes = 1;
404 	else if (max_portid >= 0 && max_portid < MAX_MEM_NODES)
405 		max_mem_nodes = max_portid + 1;
406 
407 	/*
408 	 * Set tuneables for fiesta architecture
409 	 *
410 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
411 	 * this process is currently running on before considering
412 	 * expanding threads to another lgroup.
413 	 *
414 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
415 	 * must be loaded before expanding to it.
416 	 *
417 	 * Optimize for memory bandwidth by spreading multi-threaded
418 	 * program to different lgroups.
419 	 */
420 	lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1;
421 	lgrp_expand_proc_diff = lgrp_loadavg_max_effect / 2;
422 	lgrp_loadavg_tolerance = lgrp_loadavg_max_effect / 2;
423 	lgrp_mem_free_thresh = 1;	/* home lgrp must have some memory */
424 	lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1;
425 	lgrp_mem_policy_root = LGRP_MEM_POLICY_NEXT;
426 	lgrp_load_thresh = 0;
427 
428 	mem_node_pfn_shift = PLATFORM_MC_SHIFT - MMU_PAGESHIFT;
429 }
430 
431 /*
432  * Return latency between "from" and "to" lgroups
433  *
434  * This latency number can only be used for relative comparison
435  * between lgroups on the running system, cannot be used across platforms,
436  * and may not reflect the actual latency.  It is platform and implementation
437  * specific, so platform gets to decide its value.  It would be nice if the
438  * number was at least proportional to make comparisons more meaningful though.
439  * NOTE: The numbers below are supposed to be load latencies for uncached
440  * memory divided by 10.
441  */
442 int
443 plat_lgrp_latency(void *from, void *to)
444 {
445 	/*
446 	 * Return remote latency when there are more than two lgroups
447 	 * (root and child) and getting latency between two different
448 	 * lgroups or root is involved
449 	 */
450 	if (lgrp_optimizations() && (from != to || from ==
451 	    (void *) LGRP_DEFAULT_HANDLE || to == (void *) LGRP_DEFAULT_HANDLE))
452 		return (17);
453 	else
454 		return (12);
455 }
456 
457 int
458 plat_pfn_to_mem_node(pfn_t pfn)
459 {
460 	ASSERT(max_mem_nodes > 1);
461 	return (pfn >> mem_node_pfn_shift);
462 }
463 
464 /*
465  * Assign memnode to lgroups
466  */
467 void
468 plat_fill_mc(pnode_t nodeid)
469 {
470 	int		portid;
471 
472 	/*
473 	 * Memory controller portid == global CPU id
474 	 */
475 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) == -1) ||
476 	    (portid < 0))
477 		return;
478 
479 	if (portid < max_mem_nodes)
480 		plat_assign_lgrphand_to_mem_node((lgrp_handle_t)portid, portid);
481 }
482 
483 /* ARGSUSED */
484 void
485 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
486 {
487 	size_t	elem;
488 	pfn_t	basepfn;
489 	pgcnt_t	npgs;
490 
491 	/*
492 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
493 	 */
494 	for (elem = 0; elem < nelems; elem += 2) {
495 		basepfn = btop(list[elem]);
496 		npgs = btop(list[elem+1]);
497 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
498 	}
499 }
500 
501 /*
502  * Common locking enter code
503  */
504 void
505 plat_setprop_enter(void)
506 {
507 	mutex_enter(&mi2cv_mutex);
508 }
509 
510 /*
511  * Common locking exit code
512  */
513 void
514 plat_setprop_exit(void)
515 {
516 	mutex_exit(&mi2cv_mutex);
517 }
518 
519 /*
520  * Called by mi2cv driver
521  */
522 void
523 plat_shared_i2c_enter(dev_info_t *i2cnexus_dip)
524 {
525 	if (i2cnexus_dip == shared_mi2cv_dip) {
526 		plat_setprop_enter();
527 	}
528 }
529 
530 /*
531  * Called by mi2cv driver
532  */
533 void
534 plat_shared_i2c_exit(dev_info_t *i2cnexus_dip)
535 {
536 	if (i2cnexus_dip == shared_mi2cv_dip) {
537 		plat_setprop_exit();
538 	}
539 }
540 /*
541  * Called by todm5823 driver
542  */
543 void
544 plat_rmc_comm_req(struct rmc_comm_msg *request)
545 {
546 	if (rmc_req_now)
547 		(void) rmc_req_now(request, 0);
548 }
549