xref: /linux/arch/powerpc/kernel/rtas.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *
3  * Procedures for interfacing to the RTAS on CHRP machines.
4  *
5  * Peter Bergner, IBM	March 2001.
6  * Copyright (C) 2001 IBM.
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #include <stdarg.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/spinlock.h>
18 #include <linux/export.h>
19 #include <linux/init.h>
20 #include <linux/capability.h>
21 #include <linux/delay.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/completion.h>
25 #include <linux/cpumask.h>
26 #include <linux/memblock.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29 
30 #include <asm/prom.h>
31 #include <asm/rtas.h>
32 #include <asm/hvcall.h>
33 #include <asm/machdep.h>
34 #include <asm/firmware.h>
35 #include <asm/page.h>
36 #include <asm/param.h>
37 #include <asm/delay.h>
38 #include <asm/uaccess.h>
39 #include <asm/udbg.h>
40 #include <asm/syscalls.h>
41 #include <asm/smp.h>
42 #include <linux/atomic.h>
43 #include <asm/time.h>
44 #include <asm/mmu.h>
45 #include <asm/topology.h>
46 
47 struct rtas_t rtas = {
48 	.lock = __ARCH_SPIN_LOCK_UNLOCKED
49 };
50 EXPORT_SYMBOL(rtas);
51 
52 DEFINE_SPINLOCK(rtas_data_buf_lock);
53 EXPORT_SYMBOL(rtas_data_buf_lock);
54 
55 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
56 EXPORT_SYMBOL(rtas_data_buf);
57 
58 unsigned long rtas_rmo_buf;
59 
60 /*
61  * If non-NULL, this gets called when the kernel terminates.
62  * This is done like this so rtas_flash can be a module.
63  */
64 void (*rtas_flash_term_hook)(int);
65 EXPORT_SYMBOL(rtas_flash_term_hook);
66 
67 /* RTAS use home made raw locking instead of spin_lock_irqsave
68  * because those can be called from within really nasty contexts
69  * such as having the timebase stopped which would lockup with
70  * normal locks and spinlock debugging enabled
71  */
72 static unsigned long lock_rtas(void)
73 {
74 	unsigned long flags;
75 
76 	local_irq_save(flags);
77 	preempt_disable();
78 	arch_spin_lock_flags(&rtas.lock, flags);
79 	return flags;
80 }
81 
82 static void unlock_rtas(unsigned long flags)
83 {
84 	arch_spin_unlock(&rtas.lock);
85 	local_irq_restore(flags);
86 	preempt_enable();
87 }
88 
89 /*
90  * call_rtas_display_status and call_rtas_display_status_delay
91  * are designed only for very early low-level debugging, which
92  * is why the token is hard-coded to 10.
93  */
94 static void call_rtas_display_status(unsigned char c)
95 {
96 	struct rtas_args *args = &rtas.args;
97 	unsigned long s;
98 
99 	if (!rtas.base)
100 		return;
101 	s = lock_rtas();
102 
103 	args->token = cpu_to_be32(10);
104 	args->nargs = cpu_to_be32(1);
105 	args->nret  = cpu_to_be32(1);
106 	args->rets  = &(args->args[1]);
107 	args->args[0] = cpu_to_be32(c);
108 
109 	enter_rtas(__pa(args));
110 
111 	unlock_rtas(s);
112 }
113 
114 static void call_rtas_display_status_delay(char c)
115 {
116 	static int pending_newline = 0;  /* did last write end with unprinted newline? */
117 	static int width = 16;
118 
119 	if (c == '\n') {
120 		while (width-- > 0)
121 			call_rtas_display_status(' ');
122 		width = 16;
123 		mdelay(500);
124 		pending_newline = 1;
125 	} else {
126 		if (pending_newline) {
127 			call_rtas_display_status('\r');
128 			call_rtas_display_status('\n');
129 		}
130 		pending_newline = 0;
131 		if (width--) {
132 			call_rtas_display_status(c);
133 			udelay(10000);
134 		}
135 	}
136 }
137 
138 void __init udbg_init_rtas_panel(void)
139 {
140 	udbg_putc = call_rtas_display_status_delay;
141 }
142 
143 #ifdef CONFIG_UDBG_RTAS_CONSOLE
144 
145 /* If you think you're dying before early_init_dt_scan_rtas() does its
146  * work, you can hard code the token values for your firmware here and
147  * hardcode rtas.base/entry etc.
148  */
149 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
150 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
151 
152 static void udbg_rtascon_putc(char c)
153 {
154 	int tries;
155 
156 	if (!rtas.base)
157 		return;
158 
159 	/* Add CRs before LFs */
160 	if (c == '\n')
161 		udbg_rtascon_putc('\r');
162 
163 	/* if there is more than one character to be displayed, wait a bit */
164 	for (tries = 0; tries < 16; tries++) {
165 		if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
166 			break;
167 		udelay(1000);
168 	}
169 }
170 
171 static int udbg_rtascon_getc_poll(void)
172 {
173 	int c;
174 
175 	if (!rtas.base)
176 		return -1;
177 
178 	if (rtas_call(rtas_getchar_token, 0, 2, &c))
179 		return -1;
180 
181 	return c;
182 }
183 
184 static int udbg_rtascon_getc(void)
185 {
186 	int c;
187 
188 	while ((c = udbg_rtascon_getc_poll()) == -1)
189 		;
190 
191 	return c;
192 }
193 
194 
195 void __init udbg_init_rtas_console(void)
196 {
197 	udbg_putc = udbg_rtascon_putc;
198 	udbg_getc = udbg_rtascon_getc;
199 	udbg_getc_poll = udbg_rtascon_getc_poll;
200 }
201 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
202 
203 void rtas_progress(char *s, unsigned short hex)
204 {
205 	struct device_node *root;
206 	int width;
207 	const __be32 *p;
208 	char *os;
209 	static int display_character, set_indicator;
210 	static int display_width, display_lines, form_feed;
211 	static const int *row_width;
212 	static DEFINE_SPINLOCK(progress_lock);
213 	static int current_line;
214 	static int pending_newline = 0;  /* did last write end with unprinted newline? */
215 
216 	if (!rtas.base)
217 		return;
218 
219 	if (display_width == 0) {
220 		display_width = 0x10;
221 		if ((root = of_find_node_by_path("/rtas"))) {
222 			if ((p = of_get_property(root,
223 					"ibm,display-line-length", NULL)))
224 				display_width = be32_to_cpu(*p);
225 			if ((p = of_get_property(root,
226 					"ibm,form-feed", NULL)))
227 				form_feed = be32_to_cpu(*p);
228 			if ((p = of_get_property(root,
229 					"ibm,display-number-of-lines", NULL)))
230 				display_lines = be32_to_cpu(*p);
231 			row_width = of_get_property(root,
232 					"ibm,display-truncation-length", NULL);
233 			of_node_put(root);
234 		}
235 		display_character = rtas_token("display-character");
236 		set_indicator = rtas_token("set-indicator");
237 	}
238 
239 	if (display_character == RTAS_UNKNOWN_SERVICE) {
240 		/* use hex display if available */
241 		if (set_indicator != RTAS_UNKNOWN_SERVICE)
242 			rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
243 		return;
244 	}
245 
246 	spin_lock(&progress_lock);
247 
248 	/*
249 	 * Last write ended with newline, but we didn't print it since
250 	 * it would just clear the bottom line of output. Print it now
251 	 * instead.
252 	 *
253 	 * If no newline is pending and form feed is supported, clear the
254 	 * display with a form feed; otherwise, print a CR to start output
255 	 * at the beginning of the line.
256 	 */
257 	if (pending_newline) {
258 		rtas_call(display_character, 1, 1, NULL, '\r');
259 		rtas_call(display_character, 1, 1, NULL, '\n');
260 		pending_newline = 0;
261 	} else {
262 		current_line = 0;
263 		if (form_feed)
264 			rtas_call(display_character, 1, 1, NULL,
265 				  (char)form_feed);
266 		else
267 			rtas_call(display_character, 1, 1, NULL, '\r');
268 	}
269 
270 	if (row_width)
271 		width = row_width[current_line];
272 	else
273 		width = display_width;
274 	os = s;
275 	while (*os) {
276 		if (*os == '\n' || *os == '\r') {
277 			/* If newline is the last character, save it
278 			 * until next call to avoid bumping up the
279 			 * display output.
280 			 */
281 			if (*os == '\n' && !os[1]) {
282 				pending_newline = 1;
283 				current_line++;
284 				if (current_line > display_lines-1)
285 					current_line = display_lines-1;
286 				spin_unlock(&progress_lock);
287 				return;
288 			}
289 
290 			/* RTAS wants CR-LF, not just LF */
291 
292 			if (*os == '\n') {
293 				rtas_call(display_character, 1, 1, NULL, '\r');
294 				rtas_call(display_character, 1, 1, NULL, '\n');
295 			} else {
296 				/* CR might be used to re-draw a line, so we'll
297 				 * leave it alone and not add LF.
298 				 */
299 				rtas_call(display_character, 1, 1, NULL, *os);
300 			}
301 
302 			if (row_width)
303 				width = row_width[current_line];
304 			else
305 				width = display_width;
306 		} else {
307 			width--;
308 			rtas_call(display_character, 1, 1, NULL, *os);
309 		}
310 
311 		os++;
312 
313 		/* if we overwrite the screen length */
314 		if (width <= 0)
315 			while ((*os != 0) && (*os != '\n') && (*os != '\r'))
316 				os++;
317 	}
318 
319 	spin_unlock(&progress_lock);
320 }
321 EXPORT_SYMBOL(rtas_progress);		/* needed by rtas_flash module */
322 
323 int rtas_token(const char *service)
324 {
325 	const __be32 *tokp;
326 	if (rtas.dev == NULL)
327 		return RTAS_UNKNOWN_SERVICE;
328 	tokp = of_get_property(rtas.dev, service, NULL);
329 	return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
330 }
331 EXPORT_SYMBOL(rtas_token);
332 
333 int rtas_service_present(const char *service)
334 {
335 	return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
336 }
337 EXPORT_SYMBOL(rtas_service_present);
338 
339 #ifdef CONFIG_RTAS_ERROR_LOGGING
340 /*
341  * Return the firmware-specified size of the error log buffer
342  *  for all rtas calls that require an error buffer argument.
343  *  This includes 'check-exception' and 'rtas-last-error'.
344  */
345 int rtas_get_error_log_max(void)
346 {
347 	static int rtas_error_log_max;
348 	if (rtas_error_log_max)
349 		return rtas_error_log_max;
350 
351 	rtas_error_log_max = rtas_token ("rtas-error-log-max");
352 	if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
353 	    (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
354 		printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
355 			rtas_error_log_max);
356 		rtas_error_log_max = RTAS_ERROR_LOG_MAX;
357 	}
358 	return rtas_error_log_max;
359 }
360 EXPORT_SYMBOL(rtas_get_error_log_max);
361 
362 
363 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
364 static int rtas_last_error_token;
365 
366 /** Return a copy of the detailed error text associated with the
367  *  most recent failed call to rtas.  Because the error text
368  *  might go stale if there are any other intervening rtas calls,
369  *  this routine must be called atomically with whatever produced
370  *  the error (i.e. with rtas.lock still held from the previous call).
371  */
372 static char *__fetch_rtas_last_error(char *altbuf)
373 {
374 	struct rtas_args err_args, save_args;
375 	u32 bufsz;
376 	char *buf = NULL;
377 
378 	if (rtas_last_error_token == -1)
379 		return NULL;
380 
381 	bufsz = rtas_get_error_log_max();
382 
383 	err_args.token = cpu_to_be32(rtas_last_error_token);
384 	err_args.nargs = cpu_to_be32(2);
385 	err_args.nret = cpu_to_be32(1);
386 	err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
387 	err_args.args[1] = cpu_to_be32(bufsz);
388 	err_args.args[2] = 0;
389 
390 	save_args = rtas.args;
391 	rtas.args = err_args;
392 
393 	enter_rtas(__pa(&rtas.args));
394 
395 	err_args = rtas.args;
396 	rtas.args = save_args;
397 
398 	/* Log the error in the unlikely case that there was one. */
399 	if (unlikely(err_args.args[2] == 0)) {
400 		if (altbuf) {
401 			buf = altbuf;
402 		} else {
403 			buf = rtas_err_buf;
404 			if (slab_is_available())
405 				buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
406 		}
407 		if (buf)
408 			memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
409 	}
410 
411 	return buf;
412 }
413 
414 #define get_errorlog_buffer()	kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
415 
416 #else /* CONFIG_RTAS_ERROR_LOGGING */
417 #define __fetch_rtas_last_error(x)	NULL
418 #define get_errorlog_buffer()		NULL
419 #endif
420 
421 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
422 {
423 	va_list list;
424 	int i;
425 	unsigned long s;
426 	struct rtas_args *rtas_args;
427 	char *buff_copy = NULL;
428 	int ret;
429 
430 	if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
431 		return -1;
432 
433 	s = lock_rtas();
434 	rtas_args = &rtas.args;
435 
436 	rtas_args->token = cpu_to_be32(token);
437 	rtas_args->nargs = cpu_to_be32(nargs);
438 	rtas_args->nret  = cpu_to_be32(nret);
439 	rtas_args->rets  = &(rtas_args->args[nargs]);
440 	va_start(list, outputs);
441 	for (i = 0; i < nargs; ++i)
442 		rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32));
443 	va_end(list);
444 
445 	for (i = 0; i < nret; ++i)
446 		rtas_args->rets[i] = 0;
447 
448 	enter_rtas(__pa(rtas_args));
449 
450 	/* A -1 return code indicates that the last command couldn't
451 	   be completed due to a hardware error. */
452 	if (be32_to_cpu(rtas_args->rets[0]) == -1)
453 		buff_copy = __fetch_rtas_last_error(NULL);
454 
455 	if (nret > 1 && outputs != NULL)
456 		for (i = 0; i < nret-1; ++i)
457 			outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
458 	ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
459 
460 	unlock_rtas(s);
461 
462 	if (buff_copy) {
463 		log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
464 		if (slab_is_available())
465 			kfree(buff_copy);
466 	}
467 	return ret;
468 }
469 EXPORT_SYMBOL(rtas_call);
470 
471 /* For RTAS_BUSY (-2), delay for 1 millisecond.  For an extended busy status
472  * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
473  */
474 unsigned int rtas_busy_delay_time(int status)
475 {
476 	int order;
477 	unsigned int ms = 0;
478 
479 	if (status == RTAS_BUSY) {
480 		ms = 1;
481 	} else if (status >= RTAS_EXTENDED_DELAY_MIN &&
482 		   status <= RTAS_EXTENDED_DELAY_MAX) {
483 		order = status - RTAS_EXTENDED_DELAY_MIN;
484 		for (ms = 1; order > 0; order--)
485 			ms *= 10;
486 	}
487 
488 	return ms;
489 }
490 EXPORT_SYMBOL(rtas_busy_delay_time);
491 
492 /* For an RTAS busy status code, perform the hinted delay. */
493 unsigned int rtas_busy_delay(int status)
494 {
495 	unsigned int ms;
496 
497 	might_sleep();
498 	ms = rtas_busy_delay_time(status);
499 	if (ms && need_resched())
500 		msleep(ms);
501 
502 	return ms;
503 }
504 EXPORT_SYMBOL(rtas_busy_delay);
505 
506 static int rtas_error_rc(int rtas_rc)
507 {
508 	int rc;
509 
510 	switch (rtas_rc) {
511 		case -1: 		/* Hardware Error */
512 			rc = -EIO;
513 			break;
514 		case -3:		/* Bad indicator/domain/etc */
515 			rc = -EINVAL;
516 			break;
517 		case -9000:		/* Isolation error */
518 			rc = -EFAULT;
519 			break;
520 		case -9001:		/* Outstanding TCE/PTE */
521 			rc = -EEXIST;
522 			break;
523 		case -9002:		/* No usable slot */
524 			rc = -ENODEV;
525 			break;
526 		default:
527 			printk(KERN_ERR "%s: unexpected RTAS error %d\n",
528 					__func__, rtas_rc);
529 			rc = -ERANGE;
530 			break;
531 	}
532 	return rc;
533 }
534 
535 int rtas_get_power_level(int powerdomain, int *level)
536 {
537 	int token = rtas_token("get-power-level");
538 	int rc;
539 
540 	if (token == RTAS_UNKNOWN_SERVICE)
541 		return -ENOENT;
542 
543 	while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
544 		udelay(1);
545 
546 	if (rc < 0)
547 		return rtas_error_rc(rc);
548 	return rc;
549 }
550 EXPORT_SYMBOL(rtas_get_power_level);
551 
552 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
553 {
554 	int token = rtas_token("set-power-level");
555 	int rc;
556 
557 	if (token == RTAS_UNKNOWN_SERVICE)
558 		return -ENOENT;
559 
560 	do {
561 		rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
562 	} while (rtas_busy_delay(rc));
563 
564 	if (rc < 0)
565 		return rtas_error_rc(rc);
566 	return rc;
567 }
568 EXPORT_SYMBOL(rtas_set_power_level);
569 
570 int rtas_get_sensor(int sensor, int index, int *state)
571 {
572 	int token = rtas_token("get-sensor-state");
573 	int rc;
574 
575 	if (token == RTAS_UNKNOWN_SERVICE)
576 		return -ENOENT;
577 
578 	do {
579 		rc = rtas_call(token, 2, 2, state, sensor, index);
580 	} while (rtas_busy_delay(rc));
581 
582 	if (rc < 0)
583 		return rtas_error_rc(rc);
584 	return rc;
585 }
586 EXPORT_SYMBOL(rtas_get_sensor);
587 
588 int rtas_get_sensor_fast(int sensor, int index, int *state)
589 {
590 	int token = rtas_token("get-sensor-state");
591 	int rc;
592 
593 	if (token == RTAS_UNKNOWN_SERVICE)
594 		return -ENOENT;
595 
596 	rc = rtas_call(token, 2, 2, state, sensor, index);
597 	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
598 				    rc <= RTAS_EXTENDED_DELAY_MAX));
599 
600 	if (rc < 0)
601 		return rtas_error_rc(rc);
602 	return rc;
603 }
604 
605 bool rtas_indicator_present(int token, int *maxindex)
606 {
607 	int proplen, count, i;
608 	const struct indicator_elem {
609 		__be32 token;
610 		__be32 maxindex;
611 	} *indicators;
612 
613 	indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
614 	if (!indicators)
615 		return false;
616 
617 	count = proplen / sizeof(struct indicator_elem);
618 
619 	for (i = 0; i < count; i++) {
620 		if (__be32_to_cpu(indicators[i].token) != token)
621 			continue;
622 		if (maxindex)
623 			*maxindex = __be32_to_cpu(indicators[i].maxindex);
624 		return true;
625 	}
626 
627 	return false;
628 }
629 EXPORT_SYMBOL(rtas_indicator_present);
630 
631 int rtas_set_indicator(int indicator, int index, int new_value)
632 {
633 	int token = rtas_token("set-indicator");
634 	int rc;
635 
636 	if (token == RTAS_UNKNOWN_SERVICE)
637 		return -ENOENT;
638 
639 	do {
640 		rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
641 	} while (rtas_busy_delay(rc));
642 
643 	if (rc < 0)
644 		return rtas_error_rc(rc);
645 	return rc;
646 }
647 EXPORT_SYMBOL(rtas_set_indicator);
648 
649 /*
650  * Ignoring RTAS extended delay
651  */
652 int rtas_set_indicator_fast(int indicator, int index, int new_value)
653 {
654 	int rc;
655 	int token = rtas_token("set-indicator");
656 
657 	if (token == RTAS_UNKNOWN_SERVICE)
658 		return -ENOENT;
659 
660 	rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
661 
662 	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
663 				    rc <= RTAS_EXTENDED_DELAY_MAX));
664 
665 	if (rc < 0)
666 		return rtas_error_rc(rc);
667 
668 	return rc;
669 }
670 
671 void rtas_restart(char *cmd)
672 {
673 	if (rtas_flash_term_hook)
674 		rtas_flash_term_hook(SYS_RESTART);
675 	printk("RTAS system-reboot returned %d\n",
676 	       rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
677 	for (;;);
678 }
679 
680 void rtas_power_off(void)
681 {
682 	if (rtas_flash_term_hook)
683 		rtas_flash_term_hook(SYS_POWER_OFF);
684 	/* allow power on only with power button press */
685 	printk("RTAS power-off returned %d\n",
686 	       rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
687 	for (;;);
688 }
689 
690 void rtas_halt(void)
691 {
692 	if (rtas_flash_term_hook)
693 		rtas_flash_term_hook(SYS_HALT);
694 	/* allow power on only with power button press */
695 	printk("RTAS power-off returned %d\n",
696 	       rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
697 	for (;;);
698 }
699 
700 /* Must be in the RMO region, so we place it here */
701 static char rtas_os_term_buf[2048];
702 
703 void rtas_os_term(char *str)
704 {
705 	int status;
706 
707 	/*
708 	 * Firmware with the ibm,extended-os-term property is guaranteed
709 	 * to always return from an ibm,os-term call. Earlier versions without
710 	 * this property may terminate the partition which we want to avoid
711 	 * since it interferes with panic_timeout.
712 	 */
713 	if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
714 	    RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
715 		return;
716 
717 	snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
718 
719 	do {
720 		status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
721 				   __pa(rtas_os_term_buf));
722 	} while (rtas_busy_delay(status));
723 
724 	if (status != 0)
725 		printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
726 }
727 
728 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
729 #ifdef CONFIG_PPC_PSERIES
730 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
731 {
732 	u16 slb_size = mmu_slb_size;
733 	int rc = H_MULTI_THREADS_ACTIVE;
734 	int cpu;
735 
736 	slb_set_size(SLB_MIN_SIZE);
737 	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
738 
739 	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
740 	       !atomic_read(&data->error))
741 		rc = rtas_call(data->token, 0, 1, NULL);
742 
743 	if (rc || atomic_read(&data->error)) {
744 		printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
745 		slb_set_size(slb_size);
746 	}
747 
748 	if (atomic_read(&data->error))
749 		rc = atomic_read(&data->error);
750 
751 	atomic_set(&data->error, rc);
752 	pSeries_coalesce_init();
753 
754 	if (wake_when_done) {
755 		atomic_set(&data->done, 1);
756 
757 		for_each_online_cpu(cpu)
758 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
759 	}
760 
761 	if (atomic_dec_return(&data->working) == 0)
762 		complete(data->complete);
763 
764 	return rc;
765 }
766 
767 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
768 {
769 	atomic_inc(&data->working);
770 	return __rtas_suspend_last_cpu(data, 0);
771 }
772 
773 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
774 {
775 	long rc = H_SUCCESS;
776 	unsigned long msr_save;
777 	int cpu;
778 
779 	atomic_inc(&data->working);
780 
781 	/* really need to ensure MSR.EE is off for H_JOIN */
782 	msr_save = mfmsr();
783 	mtmsr(msr_save & ~(MSR_EE));
784 
785 	while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
786 		rc = plpar_hcall_norets(H_JOIN);
787 
788 	mtmsr(msr_save);
789 
790 	if (rc == H_SUCCESS) {
791 		/* This cpu was prodded and the suspend is complete. */
792 		goto out;
793 	} else if (rc == H_CONTINUE) {
794 		/* All other cpus are in H_JOIN, this cpu does
795 		 * the suspend.
796 		 */
797 		return __rtas_suspend_last_cpu(data, wake_when_done);
798 	} else {
799 		printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
800 		       smp_processor_id(), rc);
801 		atomic_set(&data->error, rc);
802 	}
803 
804 	if (wake_when_done) {
805 		atomic_set(&data->done, 1);
806 
807 		/* This cpu did the suspend or got an error; in either case,
808 		 * we need to prod all other other cpus out of join state.
809 		 * Extra prods are harmless.
810 		 */
811 		for_each_online_cpu(cpu)
812 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
813 	}
814 out:
815 	if (atomic_dec_return(&data->working) == 0)
816 		complete(data->complete);
817 	return rc;
818 }
819 
820 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
821 {
822 	return __rtas_suspend_cpu(data, 0);
823 }
824 
825 static void rtas_percpu_suspend_me(void *info)
826 {
827 	__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
828 }
829 
830 enum rtas_cpu_state {
831 	DOWN,
832 	UP,
833 };
834 
835 #ifndef CONFIG_SMP
836 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
837 				cpumask_var_t cpus)
838 {
839 	if (!cpumask_empty(cpus)) {
840 		cpumask_clear(cpus);
841 		return -EINVAL;
842 	} else
843 		return 0;
844 }
845 #else
846 /* On return cpumask will be altered to indicate CPUs changed.
847  * CPUs with states changed will be set in the mask,
848  * CPUs with status unchanged will be unset in the mask. */
849 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
850 				cpumask_var_t cpus)
851 {
852 	int cpu;
853 	int cpuret = 0;
854 	int ret = 0;
855 
856 	if (cpumask_empty(cpus))
857 		return 0;
858 
859 	for_each_cpu(cpu, cpus) {
860 		switch (state) {
861 		case DOWN:
862 			cpuret = cpu_down(cpu);
863 			break;
864 		case UP:
865 			cpuret = cpu_up(cpu);
866 			break;
867 		}
868 		if (cpuret) {
869 			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
870 					__func__,
871 					((state == UP) ? "up" : "down"),
872 					cpu, cpuret);
873 			if (!ret)
874 				ret = cpuret;
875 			if (state == UP) {
876 				/* clear bits for unchanged cpus, return */
877 				cpumask_shift_right(cpus, cpus, cpu);
878 				cpumask_shift_left(cpus, cpus, cpu);
879 				break;
880 			} else {
881 				/* clear bit for unchanged cpu, continue */
882 				cpumask_clear_cpu(cpu, cpus);
883 			}
884 		}
885 	}
886 
887 	return ret;
888 }
889 #endif
890 
891 int rtas_online_cpus_mask(cpumask_var_t cpus)
892 {
893 	int ret;
894 
895 	ret = rtas_cpu_state_change_mask(UP, cpus);
896 
897 	if (ret) {
898 		cpumask_var_t tmp_mask;
899 
900 		if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
901 			return ret;
902 
903 		/* Use tmp_mask to preserve cpus mask from first failure */
904 		cpumask_copy(tmp_mask, cpus);
905 		rtas_offline_cpus_mask(tmp_mask);
906 		free_cpumask_var(tmp_mask);
907 	}
908 
909 	return ret;
910 }
911 EXPORT_SYMBOL(rtas_online_cpus_mask);
912 
913 int rtas_offline_cpus_mask(cpumask_var_t cpus)
914 {
915 	return rtas_cpu_state_change_mask(DOWN, cpus);
916 }
917 EXPORT_SYMBOL(rtas_offline_cpus_mask);
918 
919 int rtas_ibm_suspend_me(u64 handle)
920 {
921 	long state;
922 	long rc;
923 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
924 	struct rtas_suspend_me_data data;
925 	DECLARE_COMPLETION_ONSTACK(done);
926 	cpumask_var_t offline_mask;
927 	int cpuret;
928 
929 	if (!rtas_service_present("ibm,suspend-me"))
930 		return -ENOSYS;
931 
932 	/* Make sure the state is valid */
933 	rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
934 
935 	state = retbuf[0];
936 
937 	if (rc) {
938 		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
939 		return rc;
940 	} else if (state == H_VASI_ENABLED) {
941 		return -EAGAIN;
942 	} else if (state != H_VASI_SUSPENDING) {
943 		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
944 		       state);
945 		return -EIO;
946 	}
947 
948 	if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
949 		return -ENOMEM;
950 
951 	atomic_set(&data.working, 0);
952 	atomic_set(&data.done, 0);
953 	atomic_set(&data.error, 0);
954 	data.token = rtas_token("ibm,suspend-me");
955 	data.complete = &done;
956 
957 	/* All present CPUs must be online */
958 	cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
959 	cpuret = rtas_online_cpus_mask(offline_mask);
960 	if (cpuret) {
961 		pr_err("%s: Could not bring present CPUs online.\n", __func__);
962 		atomic_set(&data.error, cpuret);
963 		goto out;
964 	}
965 
966 	stop_topology_update();
967 
968 	/* Call function on all CPUs.  One of us will make the
969 	 * rtas call
970 	 */
971 	if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
972 		atomic_set(&data.error, -EINVAL);
973 
974 	wait_for_completion(&done);
975 
976 	if (atomic_read(&data.error) != 0)
977 		printk(KERN_ERR "Error doing global join\n");
978 
979 	start_topology_update();
980 
981 	/* Take down CPUs not online prior to suspend */
982 	cpuret = rtas_offline_cpus_mask(offline_mask);
983 	if (cpuret)
984 		pr_warn("%s: Could not restore CPUs to offline state.\n",
985 				__func__);
986 
987 out:
988 	free_cpumask_var(offline_mask);
989 	return atomic_read(&data.error);
990 }
991 #else /* CONFIG_PPC_PSERIES */
992 int rtas_ibm_suspend_me(u64 handle)
993 {
994 	return -ENOSYS;
995 }
996 #endif
997 
998 /**
999  * Find a specific pseries error log in an RTAS extended event log.
1000  * @log: RTAS error/event log
1001  * @section_id: two character section identifier
1002  *
1003  * Returns a pointer to the specified errorlog or NULL if not found.
1004  */
1005 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1006 					      uint16_t section_id)
1007 {
1008 	struct rtas_ext_event_log_v6 *ext_log =
1009 		(struct rtas_ext_event_log_v6 *)log->buffer;
1010 	struct pseries_errorlog *sect;
1011 	unsigned char *p, *log_end;
1012 	uint32_t ext_log_length = rtas_error_extended_log_length(log);
1013 	uint8_t log_format = rtas_ext_event_log_format(ext_log);
1014 	uint32_t company_id = rtas_ext_event_company_id(ext_log);
1015 
1016 	/* Check that we understand the format */
1017 	if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1018 	    log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1019 	    company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1020 		return NULL;
1021 
1022 	log_end = log->buffer + ext_log_length;
1023 	p = ext_log->vendor_log;
1024 
1025 	while (p < log_end) {
1026 		sect = (struct pseries_errorlog *)p;
1027 		if (pseries_errorlog_id(sect) == section_id)
1028 			return sect;
1029 		p += pseries_errorlog_length(sect);
1030 	}
1031 
1032 	return NULL;
1033 }
1034 
1035 /* We assume to be passed big endian arguments */
1036 asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1037 {
1038 	struct rtas_args args;
1039 	unsigned long flags;
1040 	char *buff_copy, *errbuf = NULL;
1041 	int nargs, nret, token;
1042 
1043 	if (!capable(CAP_SYS_ADMIN))
1044 		return -EPERM;
1045 
1046 	if (!rtas.entry)
1047 		return -EINVAL;
1048 
1049 	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1050 		return -EFAULT;
1051 
1052 	nargs = be32_to_cpu(args.nargs);
1053 	nret  = be32_to_cpu(args.nret);
1054 	token = be32_to_cpu(args.token);
1055 
1056 	if (nargs > ARRAY_SIZE(args.args)
1057 	    || nret > ARRAY_SIZE(args.args)
1058 	    || nargs + nret > ARRAY_SIZE(args.args))
1059 		return -EINVAL;
1060 
1061 	/* Copy in args. */
1062 	if (copy_from_user(args.args, uargs->args,
1063 			   nargs * sizeof(rtas_arg_t)) != 0)
1064 		return -EFAULT;
1065 
1066 	if (token == RTAS_UNKNOWN_SERVICE)
1067 		return -EINVAL;
1068 
1069 	args.rets = &args.args[nargs];
1070 	memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1071 
1072 	/* Need to handle ibm,suspend_me call specially */
1073 	if (token == ibm_suspend_me_token) {
1074 
1075 		/*
1076 		 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1077 		 * endian, or at least the hcall within it requires it.
1078 		 */
1079 		int rc = 0;
1080 		u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1081 		              | be32_to_cpu(args.args[1]);
1082 		rc = rtas_ibm_suspend_me(handle);
1083 		if (rc == -EAGAIN)
1084 			args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1085 		else if (rc == -EIO)
1086 			args.rets[0] = cpu_to_be32(-1);
1087 		else if (rc)
1088 			return rc;
1089 		goto copy_return;
1090 	}
1091 
1092 	buff_copy = get_errorlog_buffer();
1093 
1094 	flags = lock_rtas();
1095 
1096 	rtas.args = args;
1097 	enter_rtas(__pa(&rtas.args));
1098 	args = rtas.args;
1099 
1100 	/* A -1 return code indicates that the last command couldn't
1101 	   be completed due to a hardware error. */
1102 	if (be32_to_cpu(args.rets[0]) == -1)
1103 		errbuf = __fetch_rtas_last_error(buff_copy);
1104 
1105 	unlock_rtas(flags);
1106 
1107 	if (buff_copy) {
1108 		if (errbuf)
1109 			log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1110 		kfree(buff_copy);
1111 	}
1112 
1113  copy_return:
1114 	/* Copy out args. */
1115 	if (copy_to_user(uargs->args + nargs,
1116 			 args.args + nargs,
1117 			 nret * sizeof(rtas_arg_t)) != 0)
1118 		return -EFAULT;
1119 
1120 	return 0;
1121 }
1122 
1123 /*
1124  * Call early during boot, before mem init, to retrieve the RTAS
1125  * information from the device-tree and allocate the RMO buffer for userland
1126  * accesses.
1127  */
1128 void __init rtas_initialize(void)
1129 {
1130 	unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
1131 
1132 	/* Get RTAS dev node and fill up our "rtas" structure with infos
1133 	 * about it.
1134 	 */
1135 	rtas.dev = of_find_node_by_name(NULL, "rtas");
1136 	if (rtas.dev) {
1137 		const __be32 *basep, *entryp, *sizep;
1138 
1139 		basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
1140 		sizep = of_get_property(rtas.dev, "rtas-size", NULL);
1141 		if (basep != NULL && sizep != NULL) {
1142 			rtas.base = __be32_to_cpu(*basep);
1143 			rtas.size = __be32_to_cpu(*sizep);
1144 			entryp = of_get_property(rtas.dev,
1145 					"linux,rtas-entry", NULL);
1146 			if (entryp == NULL) /* Ugh */
1147 				rtas.entry = rtas.base;
1148 			else
1149 				rtas.entry = __be32_to_cpu(*entryp);
1150 		} else
1151 			rtas.dev = NULL;
1152 	}
1153 	if (!rtas.dev)
1154 		return;
1155 
1156 	/* If RTAS was found, allocate the RMO buffer for it and look for
1157 	 * the stop-self token if any
1158 	 */
1159 #ifdef CONFIG_PPC64
1160 	if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
1161 		rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
1162 		ibm_suspend_me_token = rtas_token("ibm,suspend-me");
1163 	}
1164 #endif
1165 	rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
1166 
1167 #ifdef CONFIG_RTAS_ERROR_LOGGING
1168 	rtas_last_error_token = rtas_token("rtas-last-error");
1169 #endif
1170 }
1171 
1172 int __init early_init_dt_scan_rtas(unsigned long node,
1173 		const char *uname, int depth, void *data)
1174 {
1175 	const u32 *basep, *entryp, *sizep;
1176 
1177 	if (depth != 1 || strcmp(uname, "rtas") != 0)
1178 		return 0;
1179 
1180 	basep  = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1181 	entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1182 	sizep  = of_get_flat_dt_prop(node, "rtas-size", NULL);
1183 
1184 	if (basep && entryp && sizep) {
1185 		rtas.base = *basep;
1186 		rtas.entry = *entryp;
1187 		rtas.size = *sizep;
1188 	}
1189 
1190 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1191 	basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1192 	if (basep)
1193 		rtas_putchar_token = *basep;
1194 
1195 	basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1196 	if (basep)
1197 		rtas_getchar_token = *basep;
1198 
1199 	if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1200 	    rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1201 		udbg_init_rtas_console();
1202 
1203 #endif
1204 
1205 	/* break now */
1206 	return 1;
1207 }
1208 
1209 static arch_spinlock_t timebase_lock;
1210 static u64 timebase = 0;
1211 
1212 void rtas_give_timebase(void)
1213 {
1214 	unsigned long flags;
1215 
1216 	local_irq_save(flags);
1217 	hard_irq_disable();
1218 	arch_spin_lock(&timebase_lock);
1219 	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1220 	timebase = get_tb();
1221 	arch_spin_unlock(&timebase_lock);
1222 
1223 	while (timebase)
1224 		barrier();
1225 	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1226 	local_irq_restore(flags);
1227 }
1228 
1229 void rtas_take_timebase(void)
1230 {
1231 	while (!timebase)
1232 		barrier();
1233 	arch_spin_lock(&timebase_lock);
1234 	set_tb(timebase >> 32, timebase & 0xffffffff);
1235 	timebase = 0;
1236 	arch_spin_unlock(&timebase_lock);
1237 }
1238