xref: /titanic_44/usr/src/cmd/picl/plugins/sun4u/snowbird/envmond/piclplatmod.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <stdio.h>
30 #include <limits.h>
31 #include <unistd.h>
32 #include <sys/systeminfo.h>
33 #include <pthread.h>
34 #include <syslog.h>
35 #include <picl.h>
36 #include <picltree.h>
37 #include <picldefs.h>
38 #include <string.h>
39 #include <libnvpair.h>
40 #include <libintl.h>
41 #include <librcm.h>
42 #include <stropts.h>
43 #include <smclib.h>
44 #include <sys/sysevent/dr.h>
45 #include "piclenvmond.h"
46 #include "picldr.h"
47 
48 /* local defines */
49 #define	RESET_CPU "/usr/sbin/shutdown -y -g 0 -i6"
50 #define	SHUTDOWN_CPU "/usr/sbin/shutdown -y -g 0 -i0"
51 #define	RCM_ABSTRACT_RESOURCE	"SUNW_snowbird/board0/CPU1"
52 #define	CPU_SENSOR_GEO_ADDR	0xe
53 #define	IS_HEALTHY		0x01
54 #define	PICL_NODE_SYSMGMT	"sysmgmt"
55 #define	SYSMGMT_PATH 		PLATFORM_PATH"/pci/pci/isa/sysmgmt"
56 #define	BUF_SIZE		7
57 
58 /* external functions */
59 extern picl_errno_t env_create_property(int, int, size_t, char *,
60 	int (*readfn)(ptree_rarg_t *, void *),
61 	int (*writefn)(ptree_warg_t *, const void *),
62 	picl_nodehdl_t, picl_prophdl_t *, void *);
63 extern picl_errno_t post_dr_req_event(picl_nodehdl_t, char *, uint8_t);
64 extern picl_errno_t post_dr_ap_state_change_event(picl_nodehdl_t, char *,
65 	uint8_t);
66 extern boolean_t env_admin_lock_enabled(picl_nodehdl_t);
67 extern picl_errno_t env_create_temp_sensor_node(picl_nodehdl_t, uint8_t);
68 extern void env_handle_sensor_event(void *);
69 extern int env_open_smc();
70 
71 /* external variables */
72 extern int env_debug;
73 extern uint8_t cpu_geo_addr;
74 extern picl_nodehdl_t rooth, platformh, sysmgmth, sensorh;
75 extern picl_nodehdl_t chassis_nodehdl, cpu_nodehdl, cpu_lnodehdl;
76 
77 /* locals */
78 static pthread_mutex_t env_dmc_mutex = PTHREAD_MUTEX_INITIALIZER;
79 static pthread_cond_t env_dmc_cond = PTHREAD_COND_INITIALIZER;
80 static boolean_t env_reset_cpu = B_FALSE;
81 static boolean_t env_shutdown_system = B_FALSE;
82 static env_state_event_t env_chassis_state = FRU_STATE_UNKNOWN;
83 static char *rcm_abstr_cp2300_name = RCM_ABSTRACT_RESOURCE;
84 static boolean_t env_got_dmc_msg = B_FALSE;
85 static long env_dmc_wait_time = 15;
86 static pthread_t dmc_thr_tid;
87 
88 /*
89  * issue halt or reboot based on the reset_cpu flag
90  */
91 /*ARGSUSED*/
92 static void
shutdown_cpu(boolean_t force)93 shutdown_cpu(boolean_t force)
94 {
95 	if (env_shutdown_system) {
96 		if (env_reset_cpu) {
97 			(void) pclose(popen(RESET_CPU, "w"));
98 		} else {
99 			(void) pclose(popen(SHUTDOWN_CPU, "w"));
100 		}
101 	}
102 }
103 
104 /*
105  * inform RCM framework that the remove op is successful
106  */
107 static void
confirm_rcm(char * abstr_name,rcm_handle_t * rhandle)108 confirm_rcm(char *abstr_name, rcm_handle_t *rhandle)
109 {
110 	rcm_notify_remove(rhandle, abstr_name, 0, NULL);
111 }
112 
113 /*
114  * inform RCM framework that the remove op is failed
115  */
116 static void
fail_rcm(char * abstr_name,rcm_handle_t * rhandle)117 fail_rcm(char *abstr_name, rcm_handle_t *rhandle)
118 {
119 	(void) rcm_notify_online(rhandle, abstr_name, 0, NULL);
120 }
121 
122 /*
123  * check RCM framework if it is ok to offline a device
124  */
125 static int
check_rcm(char * rcm_abstr_cp2300_name,uint_t flags)126 check_rcm(char *rcm_abstr_cp2300_name, uint_t flags)
127 {
128 	rcm_info_t *rinfo;
129 	rcm_handle_t *rhandle;
130 	int rv;
131 
132 	if (rcm_alloc_handle(NULL, 0, NULL, &rhandle) != RCM_SUCCESS) {
133 		return (RCM_FAILURE);
134 	}
135 
136 	rv = rcm_request_offline(rhandle, rcm_abstr_cp2300_name,
137 		flags, &rinfo);
138 
139 	if (rv == RCM_FAILURE) {
140 		rcm_free_info(rinfo);
141 		fail_rcm(rcm_abstr_cp2300_name, rhandle);
142 		rcm_free_handle(rhandle);
143 		return (RCM_FAILURE);
144 	}
145 	if (rv == RCM_CONFLICT) {
146 		rcm_free_info(rinfo);
147 		rcm_free_handle(rhandle);
148 		return (RCM_CONFLICT);
149 	}
150 
151 	confirm_rcm(rcm_abstr_cp2300_name, rhandle);
152 	rcm_free_info(rinfo);
153 	rcm_free_handle(rhandle);
154 	return (RCM_SUCCESS);
155 }
156 
157 /*
158  * utility routine to send response to an IPMI message
159  */
160 static int
send_response2remote_device(uint8_t ipmb_addr,uint8_t cmd,uint8_t reqseq_lun,uint8_t cc)161 send_response2remote_device(uint8_t ipmb_addr, uint8_t cmd, uint8_t reqseq_lun,
162 	uint8_t cc)
163 {
164 	int rc = SMC_SUCCESS;
165 	sc_reqmsg_t req_pkt;
166 	sc_rspmsg_t rsp_pkt;
167 	uint8_t data = cc; /* completion code */
168 
169 	/* make a call to ctsmc lib */
170 	(void) smc_init_ipmi_msg(&req_pkt, cmd, DEFAULT_FD, 1, &data,
171 		(reqseq_lun >> 2), ipmb_addr, SMC_NETFN_APP_RSP,
172 		(reqseq_lun & 0x03));
173 	rc = smc_send_msg(DEFAULT_FD, &req_pkt, &rsp_pkt,
174 		POLL_TIMEOUT);
175 
176 	if (rc != SMC_SUCCESS)
177 		syslog(LOG_ERR, gettext("SUNW_envmond:Error in sending response"
178 			" to %x, error = %d"), ipmb_addr, rc);
179 	return (rc);
180 }
181 
182 /*
183  * do all the checks like adminlock check, rcm check and initiate
184  * shutdown
185  */
186 /*ARGSUSED*/
187 static int
initiate_shutdown(boolean_t force)188 initiate_shutdown(boolean_t force)
189 {
190 	int rv;
191 	uint_t	rcmflags = 0;
192 	struct timespec rqtp, rmtp;
193 
194 	if (!env_shutdown_system) {
195 		return (-1);
196 	}
197 
198 	/* check the adminlock prop */
199 	if ((!force) && (env_admin_lock_enabled(cpu_nodehdl))) {
200 		syslog(LOG_ERR, gettext("SUNW_envmond: "
201 			"CPU in use! Cannot shutdown"));
202 		return (-1);
203 	}
204 
205 	if (force) {
206 		rcmflags = RCM_FORCE;
207 	}
208 
209 	/* check with rcm framework */
210 	rv = check_rcm(rcm_abstr_cp2300_name, rcmflags);
211 
212 	if ((rv == RCM_FAILURE) || (rv == RCM_CONFLICT)) {
213 		syslog(LOG_ERR, gettext("SUNW_envmond: RCM error %d, Cannot"
214 			" shutdown"), rv);
215 		return (-1);
216 	}
217 
218 	/*
219 	 * force events on chassis node
220 	 */
221 	if (force) {
222 		if (post_dr_req_event(chassis_nodehdl, DR_REQ_OUTGOING_RES,
223 			NO_WAIT) == PICL_SUCCESS) {
224 			/* wait a little for clean up of frutree */
225 			rqtp.tv_sec = 5;
226 			rqtp.tv_nsec = 0;
227 			(void) nanosleep(&rqtp, &rmtp);
228 		}
229 		/*
230 		 * If force option is set, do it right here for now
231 		 * since there is no way to pass this info via events
232 		 * to frutree framework.
233 		 */
234 		shutdown_cpu(force);
235 		return (0);
236 	}
237 
238 	if (post_dr_req_event(chassis_nodehdl, DR_REQ_OUTGOING_RES, NO_WAIT)
239 		!= PICL_SUCCESS) {
240 		syslog(LOG_ERR, gettext("SUNW_envmond:cannot shutdown "
241 			"the host CPU."));
242 		return (-1);
243 	}
244 	return (0);
245 }
246 
247 /*
248  * get the HEALTHY# line state
249  * Return -1 for Error
250  *         0 for HEALTHY# down
251  *         1 for HEALTHY# up
252  */
253 static int
env_get_healthy_status()254 env_get_healthy_status()
255 {
256 	sc_reqmsg_t	req_pkt;
257 	sc_rspmsg_t	rsp_pkt;
258 	uint8_t		size = 0;
259 
260 	/* initialize the request packet */
261 	(void) smc_init_smc_msg(&req_pkt, SMC_GET_EXECUTION_STATE,
262 		DEFAULT_SEQN, size);
263 
264 	/* make a call to smc library to send cmd */
265 	if (smc_send_msg(DEFAULT_FD, &req_pkt, &rsp_pkt,
266 		POLL_TIMEOUT) != SMC_SUCCESS) {
267 		return (-1);
268 	}
269 	return (rsp_pkt.data[0] & IS_HEALTHY);
270 }
271 
272 /*
273  * initialization
274  */
275 picl_errno_t
env_platmod_init()276 env_platmod_init()
277 {
278 	picl_errno_t rc =  PICL_SUCCESS;
279 
280 	if (rooth == 0) {
281 		if (ptree_get_root(&rooth) != PICL_SUCCESS) {
282 			return (rc);
283 		}
284 	}
285 
286 	if (chassis_nodehdl == 0) {
287 		if ((rc = ptree_get_node_by_path(PICL_FRUTREE_CHASSIS,
288 			&chassis_nodehdl)) != PICL_SUCCESS) {
289 			return (rc);
290 		}
291 	}
292 	if (post_dr_req_event(chassis_nodehdl, DR_REQ_INCOMING_RES,
293 		NO_WAIT) != PICL_SUCCESS) {
294 		syslog(LOG_ERR, gettext("SUNW_envmond: Error in "
295 			"Posting configure event for Chassis node"));
296 		rc = PICL_FAILURE;
297 	}
298 	return (rc);
299 }
300 
301 /*
302  * release all the resources
303  */
304 void
env_platmod_fini()305 env_platmod_fini()
306 {
307 	cpu_geo_addr = 0;
308 	rooth = platformh = sysmgmth = 0;
309 	chassis_nodehdl = cpu_nodehdl = cpu_lnodehdl = 0;
310 	env_chassis_state = FRU_STATE_UNKNOWN;
311 	(void) ptree_delete_node(sensorh);
312 	(void) ptree_destroy_node(sensorh);
313 }
314 
315 /*
316  * handle chassis state change
317  */
318 static void
env_handle_chassis_state_event(char * state)319 env_handle_chassis_state_event(char *state)
320 {
321 	if (strcmp(state, PICLEVENTARGVAL_CONFIGURING) == 0) {
322 		env_chassis_state = FRU_STATE_CONFIGURING;
323 		return;
324 	}
325 
326 	if (strcmp(state, PICLEVENTARGVAL_UNCONFIGURED) == 0) {
327 		if (env_chassis_state == FRU_STATE_CONFIGURING ||
328 			env_chassis_state == FRU_STATE_UNKNOWN) {
329 			/* picl intialization is failed, dont issue shutdown */
330 			env_chassis_state = FRU_STATE_UNCONFIGURED;
331 			return;
332 		}
333 		env_chassis_state = FRU_STATE_UNCONFIGURED;
334 		if (env_reset_cpu) {
335 			(void) pclose(popen(RESET_CPU, "w"));
336 		} else {
337 			(void) pclose(popen(SHUTDOWN_CPU, "w"));
338 		}
339 		return;
340 	}
341 
342 	if (strcmp(state, PICLEVENTARGVAL_CONFIGURED) == 0) {
343 		env_chassis_state = FRU_STATE_CONFIGURED;
344 	}
345 }
346 
347 /*
348  *  event handler for watchdog state change event
349  */
350 static picl_errno_t
env_handle_watchdog_expiry(picl_nodehdl_t wd_nodehdl)351 env_handle_watchdog_expiry(picl_nodehdl_t wd_nodehdl)
352 {
353 	picl_errno_t rc = PICL_SUCCESS;
354 	char class[PICL_CLASSNAMELEN_MAX];
355 	char value[PICL_PROPNAMELEN_MAX];
356 	char cond[BUF_SIZE];
357 
358 	if ((rc = ptree_get_propval_by_name(wd_nodehdl,
359 		PICL_PROP_CLASSNAME, class,
360 		PICL_CLASSNAMELEN_MAX)) != PICL_SUCCESS) {
361 		return (rc);
362 	}
363 
364 	/* if the event is not of watchdog-timer, return */
365 	if (strcmp(class, PICL_CLASS_WATCHDOG_TIMER) != 0) {
366 		return (PICL_INVALIDARG);
367 	}
368 
369 	if ((rc = ptree_get_propval_by_name(wd_nodehdl,
370 		PICL_PROP_WATCHDOG_ACTION, value, sizeof (value))) !=
371 		PICL_SUCCESS) {
372 		return (rc);
373 	}
374 
375 	/* if action is none, dont do anything */
376 	if (strcmp(value, PICL_PROPVAL_WD_ACTION_ALARM) != 0) {
377 		return (PICL_SUCCESS);
378 	}
379 
380 	(void) strncpy(cond, PICLEVENTARGVAL_FAILED, sizeof (cond));
381 	/* update CPU condition to failed */
382 	if ((rc = ptree_update_propval_by_name(cpu_nodehdl,
383 		PICL_PROP_CONDITION, cond, sizeof (cond))) != PICL_SUCCESS) {
384 		return (rc);
385 	}
386 
387 	/* post dr ap state change event */
388 	rc = post_dr_ap_state_change_event(cpu_nodehdl,
389 		DR_RESERVED_ATTR, NO_COND_TIMEDWAIT);
390 	return (rc);
391 }
392 
393 /*
394  * rotine that handles all the picl state and condition change events
395  */
396 void
env_platmod_handle_event(const char * ename,const void * earg,size_t size)397 env_platmod_handle_event(const char *ename, const void *earg, size_t size)
398 {
399 	picl_errno_t		rc;
400 	picl_nodehdl_t		nodeh = 0;
401 	picl_prophdl_t		proph;
402 	nvlist_t		*nvlp;
403 	char			*value;
404 	boolean_t		state_event;
405 	env_state_event_t	event;
406 	char			result[PICL_PROPNAMELEN_MAX];
407 	uint64_t		status_time, cond_time;
408 	char 			cond[BUF_SIZE];
409 
410 	if (!ename) {
411 		return;
412 	}
413 	if (strcmp(ename, PICLEVENT_STATE_CHANGE) == 0) {
414 		state_event = B_TRUE;
415 	} else if (strcmp(ename, PICLEVENT_CONDITION_CHANGE) == 0) {
416 		state_event = B_FALSE;
417 	} else {
418 		syslog(LOG_ERR, gettext("SUNW_envmond: unknown event:%s\n"),
419 			ename);
420 		return;
421 	}
422 
423 	/* unpack the nvlist and get the information */
424 	if (nvlist_unpack((char *)earg, size, &nvlp, NULL)) {
425 		return;
426 	}
427 	if (nvlist_lookup_uint64(nvlp, PICLEVENTARG_NODEHANDLE, &nodeh) == -1) {
428 		nvlist_free(nvlp);
429 		return;
430 	}
431 	if (nvlist_lookup_string(nvlp, (state_event) ?
432 		PICLEVENTARG_STATE :
433 		PICLEVENTARG_CONDITION, &value) != 0) {
434 		nvlist_free(nvlp);
435 		return;
436 	}
437 
438 	if (env_debug & PICLEVENTS) {
439 		if (ptree_get_propval_by_name(nodeh, PICL_PROP_NAME,
440 			result, sizeof (result)) != PICL_SUCCESS) {
441 			syslog(LOG_ERR, " SUNW_envmond: error in getting"
442 				" node name");
443 			nvlist_free(nvlp);
444 			return;
445 		}
446 		syslog(LOG_INFO, "SUNW_envmond: %s (%s) on %s",
447 			ename, value, result);
448 	}
449 
450 	if (chassis_nodehdl == 0 && state_event) {
451 		if (ptree_get_propval_by_name(nodeh, PICL_PROP_NAME,
452 			result, sizeof (result)) != PICL_SUCCESS) {
453 			nvlist_free(nvlp);
454 			return;
455 		}
456 		if (strcmp(result, PICL_NODE_CHASSIS) == 0) {
457 			chassis_nodehdl = nodeh;
458 		}
459 	}
460 
461 	if (nodeh == chassis_nodehdl && state_event) {
462 		env_handle_chassis_state_event(value);
463 		nvlist_free(nvlp);
464 		return;
465 	}
466 
467 	if (strcmp(PICLEVENTARGVAL_DISCONNECTED, value) == 0) {
468 		event = LOC_STATE_DISCONNECTED;
469 	} else if (strcmp(PICLEVENTARGVAL_CONNECTED, value) == 0) {
470 		event = LOC_STATE_CONNECTED;
471 	} else if (strcmp(PICLEVENTARGVAL_EMPTY, value) == 0) {
472 		event = LOC_STATE_EMPTY;
473 	} else if (strcmp(PICLEVENTARGVAL_CONFIGURED, value) == 0) {
474 		event = FRU_STATE_CONFIGURED;
475 	} else if (strcmp(PICLEVENTARGVAL_UNCONFIGURED, value) == 0) {
476 		event = FRU_STATE_UNCONFIGURED;
477 	} else if (strcmp(PICL_PROPVAL_WD_STATE_EXPIRED, value) == 0) {
478 		/* watchdog expiry event */
479 		if ((rc = env_handle_watchdog_expiry(nodeh)) != PICL_SUCCESS) {
480 			syslog(LOG_ERR, gettext("SUNW_envmond:Error in handling"
481 				"watchdog expiry event"));
482 		}
483 		nvlist_free(nvlp);
484 		return;
485 	} else {
486 		nvlist_free(nvlp);
487 		return;
488 	}
489 
490 	switch (event) {
491 	case LOC_STATE_EMPTY:
492 		break;
493 
494 	case LOC_STATE_DISCONNECTED:
495 		if (nodeh == cpu_lnodehdl) {
496 			(void) initiate_shutdown(B_FALSE);
497 		}
498 		break;
499 	case LOC_STATE_CONNECTED:
500 		if (nodeh != cpu_lnodehdl) {
501 			break;
502 		}
503 		if (ptree_get_propval_by_name(cpu_lnodehdl,
504 			PICL_PROP_CHILD, &cpu_nodehdl,
505 			sizeof (picl_nodehdl_t)) != PICL_SUCCESS) {
506 			syslog(LOG_ERR, gettext("SUNW_envmond:Cannot "
507 				"initialize CPU node handle %llx"), nodeh);
508 			cpu_nodehdl = 0;
509 		}
510 		break;
511 	case FRU_STATE_CONFIGURED:
512 		if (nodeh != cpu_nodehdl) {
513 			break;
514 		}
515 		if (ptree_get_prop_by_name(cpu_nodehdl,
516 			PICL_PROP_STATUS_TIME, &proph) != PICL_SUCCESS) {
517 				status_time = (uint64_t)time(NULL);
518 				(void) env_create_property(PICL_PTYPE_TIMESTAMP,
519 					PICL_READ, sizeof (status_time),
520 					PICL_PROP_STATUS_TIME, NULLREAD,
521 					NULLWRITE, cpu_nodehdl, &proph,
522 					&status_time);
523 		}
524 		if (ptree_get_prop_by_name(cpu_nodehdl,
525 			PICL_PROP_CONDITION_TIME, &proph) != PICL_SUCCESS) {
526 				cond_time = (uint64_t)time(NULL);
527 				(void) env_create_property(PICL_PTYPE_TIMESTAMP,
528 					PICL_READ, sizeof (cond_time),
529 					PICL_PROP_CONDITION_TIME, NULLREAD,
530 					NULLWRITE, cpu_nodehdl, &proph,
531 					&cond_time);
532 			}
533 		env_shutdown_system = B_FALSE;
534 		/* if HEALTHY# is UP update the condition to "ok" */
535 		switch (env_get_healthy_status()) {
536 		case 0:
537 		/* update CPU condition to failed */
538 		(void) strncpy(cond, PICLEVENTARGVAL_FAILED, sizeof (cond));
539 		break;
540 		case 1:
541 		/* update CPU condition to ok */
542 		(void) strncpy(cond, PICLEVENTARGVAL_OK, sizeof (cond));
543 		break;
544 		case -1:	/*FALLTHRU*/
545 		default:
546 		/* update the condition to unknown */
547 		(void) strncpy(cond, PICLEVENTARGVAL_UNKNOWN, sizeof (cond));
548 			syslog(LOG_ERR, gettext("SUNW_envmond:Error in "
549 				"reading HEALTHY# status"));
550 		}
551 
552 		if ((rc = ptree_update_propval_by_name(cpu_nodehdl,
553 			PICL_PROP_CONDITION, cond, sizeof (cond))) !=
554 			PICL_SUCCESS) {
555 			syslog(LOG_ERR, gettext("SUNW_envmond:Error in "
556 				"updating CPU condition, error = %d"), rc);
557 		}
558 		break;
559 	case FRU_STATE_UNCONFIGURED:
560 		if (env_reset_cpu && nodeh == cpu_nodehdl) {
561 			(void) initiate_shutdown(B_FALSE);
562 		}
563 		break;
564 	default:
565 	break;
566 	} /* end of switch */
567 	nvlist_free(nvlp);
568 }
569 
570 /*
571  * This thread waits for dmc message to come, as it has to send
572  * response ACK back to DMC. Otherwise DMC may think that message
573  * is lost and issues poweroff on a node. So there is a chance for
574  * CPU to be powered off in the middle of shutdown process. If the
575  * DMC message didnt come, then process the local shutdown request.
576  */
577 /*ARGSUSED*/
578 static void *
env_wait_for_dmc_msg(void * args)579 env_wait_for_dmc_msg(void *args)
580 {
581 	struct timeval  ct;
582 	struct timespec to;
583 
584 	(void) pthread_mutex_lock(&env_dmc_mutex);
585 	if (env_got_dmc_msg == B_TRUE) {
586 		(void) pthread_mutex_unlock(&env_dmc_mutex);
587 		return (NULL);
588 	}
589 
590 	/*
591 	 * wait for specified time to check if dmc sends the
592 	 * shutdown request
593 	 */
594 	(void) gettimeofday(&ct, NULL);
595 	to.tv_sec = ct.tv_sec + env_dmc_wait_time;
596 	to.tv_nsec = 0;
597 	(void) pthread_cond_timedwait(&env_dmc_cond,
598 		&env_dmc_mutex, &to);
599 	if (env_got_dmc_msg == B_TRUE) {
600 		(void) pthread_mutex_unlock(&env_dmc_mutex);
601 		return (NULL);
602 	}
603 	(void) pthread_mutex_unlock(&env_dmc_mutex);
604 
605 	env_shutdown_system = B_TRUE;
606 	env_reset_cpu = B_FALSE;
607 	(void) initiate_shutdown(B_FALSE);
608 	return (NULL);
609 }
610 
611 /*
612  * Handle the Latch open event(shutdown the node)
613  */
614 picl_errno_t
env_platmod_handle_latch_open()615 env_platmod_handle_latch_open()
616 {
617 	/*
618 	 * create a thread to process local event after waiting for DMC CPU
619 	 * node state offline message
620 	 */
621 	if (pthread_create(&dmc_thr_tid, NULL, &env_wait_for_dmc_msg,
622 		NULL) != 0) {
623 		syslog(LOG_ERR, gettext("SUNW_envmond:Error in creating "
624 			"dmc thread"));
625 		return (PICL_FAILURE);
626 	}
627 	return (PICL_SUCCESS);
628 }
629 
630 /*
631  * For Sanibel, hotswap initialization is not reqd.
632  */
633 picl_errno_t
env_platmod_setup_hotswap()634 env_platmod_setup_hotswap()
635 {
636 	return (PICL_SUCCESS);
637 }
638 
639 /*
640  * For sanibel this supoort is not required
641  */
642 picl_errno_t
env_platmod_sp_monitor()643 env_platmod_sp_monitor()
644 {
645 	return (PICL_SUCCESS);
646 }
647 
648 /*
649  * For sanibel this supoort is not required
650  */
651 picl_errno_t
env_platmod_create_hotswap_prop()652 env_platmod_create_hotswap_prop()
653 {
654 	return (PICL_SUCCESS);
655 }
656 
657 /*
658  * For sanibel this supoort is not required
659  */
660 /*ARGSUSED*/
661 void
process_platmod_sp_heartbeat(uint8_t data)662 process_platmod_sp_heartbeat(uint8_t data)
663 {
664 }
665 
666 /*
667  * For sanibel this supoort is not required
668  */
669 /*ARGSUSED*/
670 int
process_platmod_async_msg_notif(void * resdatap)671 process_platmod_async_msg_notif(void *resdatap)
672 {
673 	return (0);
674 }
675 
676 /*
677  * For sanibel this supoort is not required
678  */
679 /*ARGSUSED*/
680 int
process_platmod_change_cpci_state(void * res_datap)681 process_platmod_change_cpci_state(void *res_datap)
682 {
683 	return (0);
684 }
685 
686 /*
687  * handle request from service processor for shutdown/online
688  */
689 int
process_platmod_change_cpu_node_state(void * res_datap)690 process_platmod_change_cpu_node_state(void *res_datap)
691 {
692 	int rc = SMC_SUCCESS;
693 	uint8_t state = BYTE_7(res_datap);
694 	boolean_t force_flag = B_FALSE;
695 
696 	switch (state & 1) {
697 	case CPU_NODE_STATE_OFFLINE:
698 		(void) pthread_mutex_lock(&env_dmc_mutex);
699 		env_got_dmc_msg = B_TRUE;
700 		(void) pthread_cond_signal(&env_dmc_cond);
701 		(void) pthread_mutex_unlock(&env_dmc_mutex);
702 		env_shutdown_system = B_TRUE;
703 		if ((state >> 2) & 1)
704 			env_reset_cpu = B_TRUE;
705 		if (state >> 1 & 1) {	/* force flag set? */
706 			force_flag = B_TRUE;
707 		} else {
708 			force_flag = B_FALSE;
709 		}
710 
711 		if (initiate_shutdown(force_flag) == 0) {
712 			if ((rc = send_response2remote_device(SMC_BMC_ADDR,
713 				EVENT_MSG_CHANGE_CPU_NODE_STATE,
714 				BYTE_5(res_datap), 0x0)) != SMC_SUCCESS) {
715 				return (rc);
716 			}
717 		} else {
718 			if ((rc = send_response2remote_device(SMC_BMC_ADDR,
719 				EVENT_MSG_CHANGE_CPU_NODE_STATE,
720 				BYTE_5(res_datap), 0xFF)) != SMC_SUCCESS) {
721 				return (rc);
722 			}
723 			env_shutdown_system = B_FALSE;
724 			if ((state >> 2) & 1)
725 				env_reset_cpu = B_FALSE;
726 		}
727 		break;
728 	case CPU_NODE_STATE_ONLINE:
729 		if ((rc =  send_response2remote_device(SMC_BMC_ADDR,
730 			EVENT_MSG_CHANGE_CPU_NODE_STATE,
731 			BYTE_5(res_datap), 0x0)) != SMC_SUCCESS) {
732 			return (rc);
733 		}
734 		break;
735 	default:
736 		break;
737 	}
738 	return (0);
739 }
740 
741 /*
742  * Handle change in state of service processor
743  */
744 int
process_platmod_sp_state_change_notif(void * res_datap)745 process_platmod_sp_state_change_notif(void *res_datap)
746 {
747 	int rc = SMC_SUCCESS;
748 	uint8_t state = BYTE_7(res_datap);
749 	uint8_t rq_addr = BYTE_4(res_datap);
750 
751 	if (rq_addr != SMC_BMC_ADDR) {
752 		return (PICL_FAILURE);
753 	}
754 
755 	switch (state) {
756 	case CPU_NODE_STATE_ONLINE:
757 		/* Send ACK to service processor */
758 		if ((rc = send_response2remote_device(SMC_BMC_ADDR,
759 			EVENT_MSG_AC_STATE_CHANGE,
760 			BYTE_5(res_datap), 0x0)) != SMC_SUCCESS) {
761 			return (rc);
762 		}
763 		break;
764 
765 	case CPU_NODE_STATE_OFFLINE:
766 		/* Send ACK to service processor */
767 		if ((rc = send_response2remote_device(SMC_BMC_ADDR,
768 			EVENT_MSG_AC_STATE_CHANGE,
769 			BYTE_5(res_datap), 0x0)) != SMC_SUCCESS) {
770 			return (rc);
771 		}
772 		break;
773 
774 	default:
775 		if ((rc = send_response2remote_device(SMC_BMC_ADDR,
776 			EVENT_MSG_AC_STATE_CHANGE,
777 			BYTE_5(res_datap), 0xFF)) != SMC_SUCCESS) {
778 			return (rc);
779 		}
780 		break;
781 	}
782 	return (0);
783 }
784 
785 /*
786  * For sanibel this supoort is not required
787  */
788 /*ARGSUSED*/
789 picl_errno_t
env_platmod_handle_bus_if_change(uint8_t data)790 env_platmod_handle_bus_if_change(uint8_t data)
791 {
792 	return (PICL_SUCCESS);
793 }
794 
795 /*
796  * create the temperature sensor nodes
797  */
798 picl_errno_t
env_platmod_create_sensors()799 env_platmod_create_sensors()
800 {
801 	picl_errno_t rc = PICL_SUCCESS;
802 
803 	if (rooth == 0) {
804 		if ((rc = ptree_get_root(&rooth)) != PICL_SUCCESS) {
805 			return (rc);
806 		}
807 	}
808 
809 	if (platformh == 0) {
810 		if ((rc = ptree_get_node_by_path(PLATFORM_PATH,
811 			&platformh)) != PICL_SUCCESS) {
812 			return (rc);
813 		}
814 	}
815 
816 	if (sysmgmth == 0) {
817 		if ((rc = ptree_get_node_by_path(SYSMGMT_PATH,
818 			&sysmgmth)) != PICL_SUCCESS) {
819 			return (rc);
820 		}
821 	}
822 
823 	rc = env_create_temp_sensor_node(sysmgmth, CPU_SENSOR_GEO_ADDR);
824 	return (rc);
825 }
826 
827 /*
828  * handler for sensor event
829  */
830 void
env_platmod_handle_sensor_event(void * res_datap)831 env_platmod_handle_sensor_event(void *res_datap)
832 {
833 	if (BYTE_4(res_datap) != CPU_SENSOR_GEO_ADDR) {
834 		return;
835 	}
836 	env_handle_sensor_event(res_datap);
837 }
838