xref: /linux/arch/riscv/kernel/sbi.c (revision ba199dc909a20fe62270ae4e93f263987bb9d119)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI initialilization and all extension implementation.
4  *
5  * Copyright (c) 2020 Western Digital Corporation or its affiliates.
6  */
7 
8 #include <linux/bits.h>
9 #include <linux/init.h>
10 #include <linux/mm.h>
11 #include <linux/pm.h>
12 #include <linux/reboot.h>
13 #include <asm/sbi.h>
14 #include <asm/smp.h>
15 #include <asm/tlbflush.h>
16 
17 #define CREATE_TRACE_POINTS
18 #include <asm/trace.h>
19 
20 /* default SBI version is 0.1 */
21 unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
22 EXPORT_SYMBOL(sbi_spec_version);
23 
24 static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
25 static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init;
26 static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
27 			   unsigned long start, unsigned long size,
28 			   unsigned long arg4, unsigned long arg5) __ro_after_init;
29 
30 struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
31 			  unsigned long arg2, unsigned long arg3,
32 			  unsigned long arg4, unsigned long arg5,
33 			  int fid, int ext)
34 {
35 	struct sbiret ret;
36 
37 	trace_sbi_call(ext, fid);
38 
39 	register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
40 	register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
41 	register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
42 	register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
43 	register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
44 	register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
45 	register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
46 	register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
47 	asm volatile ("ecall"
48 		      : "+r" (a0), "+r" (a1)
49 		      : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
50 		      : "memory");
51 	ret.error = a0;
52 	ret.value = a1;
53 
54 	trace_sbi_return(ext, ret.error, ret.value);
55 
56 	return ret;
57 }
58 EXPORT_SYMBOL(__sbi_ecall);
59 
60 int sbi_err_map_linux_errno(int err)
61 {
62 	switch (err) {
63 	case SBI_SUCCESS:
64 		return 0;
65 	case SBI_ERR_DENIED:
66 		return -EPERM;
67 	case SBI_ERR_INVALID_PARAM:
68 		return -EINVAL;
69 	case SBI_ERR_INVALID_ADDRESS:
70 		return -EFAULT;
71 	case SBI_ERR_NOT_SUPPORTED:
72 	case SBI_ERR_FAILURE:
73 	default:
74 		return -ENOTSUPP;
75 	};
76 }
77 EXPORT_SYMBOL(sbi_err_map_linux_errno);
78 
79 #ifdef CONFIG_RISCV_SBI_V01
80 static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
81 {
82 	unsigned long cpuid, hartid;
83 	unsigned long hmask = 0;
84 
85 	/*
86 	 * There is no maximum hartid concept in RISC-V and NR_CPUS must not be
87 	 * associated with hartid. As SBI v0.1 is only kept for backward compatibility
88 	 * and will be removed in the future, there is no point in supporting hartid
89 	 * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2
90 	 * should be used for platforms with hartid greater than BITS_PER_LONG.
91 	 */
92 	for_each_cpu(cpuid, cpu_mask) {
93 		hartid = cpuid_to_hartid_map(cpuid);
94 		if (hartid >= BITS_PER_LONG) {
95 			pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
96 			break;
97 		}
98 		hmask |= BIT(hartid);
99 	}
100 
101 	return hmask;
102 }
103 
104 /**
105  * sbi_console_putchar() - Writes given character to the console device.
106  * @ch: The data to be written to the console.
107  *
108  * Return: None
109  */
110 void sbi_console_putchar(int ch)
111 {
112 	sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
113 }
114 EXPORT_SYMBOL(sbi_console_putchar);
115 
116 /**
117  * sbi_console_getchar() - Reads a byte from console device.
118  *
119  * Returns the value read from console.
120  */
121 int sbi_console_getchar(void)
122 {
123 	struct sbiret ret;
124 
125 	ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
126 
127 	return ret.error;
128 }
129 EXPORT_SYMBOL(sbi_console_getchar);
130 
131 /**
132  * sbi_shutdown() - Remove all the harts from executing supervisor code.
133  *
134  * Return: None
135  */
136 void sbi_shutdown(void)
137 {
138 	sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
139 }
140 EXPORT_SYMBOL(sbi_shutdown);
141 
142 /**
143  * __sbi_set_timer_v01() - Program the timer for next timer event.
144  * @stime_value: The value after which next timer event should fire.
145  *
146  * Return: None
147  */
148 static void __sbi_set_timer_v01(uint64_t stime_value)
149 {
150 #if __riscv_xlen == 32
151 	sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
152 		  stime_value >> 32, 0, 0, 0, 0);
153 #else
154 	sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
155 #endif
156 }
157 
158 static void __sbi_send_ipi_v01(unsigned int cpu)
159 {
160 	unsigned long hart_mask =
161 		__sbi_v01_cpumask_to_hartmask(cpumask_of(cpu));
162 	sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
163 		  0, 0, 0, 0, 0);
164 }
165 
166 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
167 			    unsigned long start, unsigned long size,
168 			    unsigned long arg4, unsigned long arg5)
169 {
170 	int result = 0;
171 	unsigned long hart_mask;
172 
173 	if (!cpu_mask || cpumask_empty(cpu_mask))
174 		cpu_mask = cpu_online_mask;
175 	hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
176 
177 	/* v0.2 function IDs are equivalent to v0.1 extension IDs */
178 	switch (fid) {
179 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
180 		sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
181 			  (unsigned long)&hart_mask, 0, 0, 0, 0, 0);
182 		break;
183 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
184 		sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
185 			  (unsigned long)&hart_mask, start, size,
186 			  0, 0, 0);
187 		break;
188 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
189 		sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
190 			  (unsigned long)&hart_mask, start, size,
191 			  arg4, 0, 0);
192 		break;
193 	default:
194 		pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
195 		result = -EINVAL;
196 	}
197 
198 	return result;
199 }
200 
201 static void sbi_set_power_off(void)
202 {
203 	pm_power_off = sbi_shutdown;
204 }
205 #else
206 static void __sbi_set_timer_v01(uint64_t stime_value)
207 {
208 	pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
209 		sbi_major_version(), sbi_minor_version());
210 }
211 
212 static void __sbi_send_ipi_v01(unsigned int cpu)
213 {
214 	pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
215 		sbi_major_version(), sbi_minor_version());
216 }
217 
218 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
219 			    unsigned long start, unsigned long size,
220 			    unsigned long arg4, unsigned long arg5)
221 {
222 	pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
223 		sbi_major_version(), sbi_minor_version());
224 
225 	return 0;
226 }
227 
228 static void sbi_set_power_off(void) {}
229 #endif /* CONFIG_RISCV_SBI_V01 */
230 
231 static void __sbi_set_timer_v02(uint64_t stime_value)
232 {
233 #if __riscv_xlen == 32
234 	sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
235 		  stime_value >> 32, 0, 0, 0, 0);
236 #else
237 	sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
238 		  0, 0, 0, 0);
239 #endif
240 }
241 
242 static void __sbi_send_ipi_v02(unsigned int cpu)
243 {
244 	int result;
245 	struct sbiret ret = {0};
246 
247 	ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
248 			1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0);
249 	if (ret.error) {
250 		result = sbi_err_map_linux_errno(ret.error);
251 		pr_err("%s: hbase = [%lu] failed (error [%d])\n",
252 			__func__, cpuid_to_hartid_map(cpu), result);
253 	}
254 }
255 
256 static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
257 				 unsigned long hbase, unsigned long start,
258 				 unsigned long size, unsigned long arg4,
259 				 unsigned long arg5)
260 {
261 	struct sbiret ret = {0};
262 	int ext = SBI_EXT_RFENCE;
263 	int result = 0;
264 
265 	switch (fid) {
266 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
267 		ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0);
268 		break;
269 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
270 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
271 				size, 0, 0);
272 		break;
273 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
274 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
275 				size, arg4, 0);
276 		break;
277 
278 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
279 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
280 				size, 0, 0);
281 		break;
282 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
283 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
284 				size, arg4, 0);
285 		break;
286 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
287 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
288 				size, 0, 0);
289 		break;
290 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
291 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
292 				size, arg4, 0);
293 		break;
294 	default:
295 		pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
296 		       fid, ext);
297 		result = -EINVAL;
298 	}
299 
300 	if (ret.error) {
301 		result = sbi_err_map_linux_errno(ret.error);
302 		pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
303 		       __func__, hbase, hmask, result);
304 	}
305 
306 	return result;
307 }
308 
309 static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
310 			    unsigned long start, unsigned long size,
311 			    unsigned long arg4, unsigned long arg5)
312 {
313 	unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
314 	int result;
315 
316 	if (!cpu_mask || cpumask_empty(cpu_mask))
317 		cpu_mask = cpu_online_mask;
318 
319 	for_each_cpu(cpuid, cpu_mask) {
320 		hartid = cpuid_to_hartid_map(cpuid);
321 		if (hmask) {
322 			if (hartid + BITS_PER_LONG <= htop ||
323 			    hbase + BITS_PER_LONG <= hartid) {
324 				result = __sbi_rfence_v02_call(fid, hmask,
325 						hbase, start, size, arg4, arg5);
326 				if (result)
327 					return result;
328 				hmask = 0;
329 			} else if (hartid < hbase) {
330 				/* shift the mask to fit lower hartid */
331 				hmask <<= hbase - hartid;
332 				hbase = hartid;
333 			}
334 		}
335 		if (!hmask) {
336 			hbase = hartid;
337 			htop = hartid;
338 		} else if (hartid > htop) {
339 			htop = hartid;
340 		}
341 		hmask |= BIT(hartid - hbase);
342 	}
343 
344 	if (hmask) {
345 		result = __sbi_rfence_v02_call(fid, hmask, hbase,
346 					       start, size, arg4, arg5);
347 		if (result)
348 			return result;
349 	}
350 
351 	return 0;
352 }
353 
354 /**
355  * sbi_set_timer() - Program the timer for next timer event.
356  * @stime_value: The value after which next timer event should fire.
357  *
358  * Return: None.
359  */
360 void sbi_set_timer(uint64_t stime_value)
361 {
362 	__sbi_set_timer(stime_value);
363 }
364 
365 /**
366  * sbi_send_ipi() - Send an IPI to any hart.
367  * @cpu: Logical id of the target CPU.
368  */
369 void sbi_send_ipi(unsigned int cpu)
370 {
371 	__sbi_send_ipi(cpu);
372 }
373 EXPORT_SYMBOL(sbi_send_ipi);
374 
375 /**
376  * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
377  * @cpu_mask: A cpu mask containing all the target harts.
378  *
379  * Return: 0 on success, appropriate linux error code otherwise.
380  */
381 int sbi_remote_fence_i(const struct cpumask *cpu_mask)
382 {
383 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
384 			    cpu_mask, 0, 0, 0, 0);
385 }
386 EXPORT_SYMBOL(sbi_remote_fence_i);
387 
388 /**
389  * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
390  * remote harts for a virtual address range belonging to a specific ASID or not.
391  *
392  * @cpu_mask: A cpu mask containing all the target harts.
393  * @start: Start of the virtual address
394  * @size: Total size of the virtual address range.
395  * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
396  * for flushing all address spaces.
397  *
398  * Return: 0 on success, appropriate linux error code otherwise.
399  */
400 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
401 				unsigned long start,
402 				unsigned long size,
403 				unsigned long asid)
404 {
405 	if (asid == FLUSH_TLB_NO_ASID)
406 		return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
407 				    cpu_mask, start, size, 0, 0);
408 	else
409 		return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
410 				    cpu_mask, start, size, asid, 0);
411 }
412 EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
413 
414 /**
415  * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
416  *			   harts for the specified guest physical address range.
417  * @cpu_mask: A cpu mask containing all the target harts.
418  * @start: Start of the guest physical address
419  * @size: Total size of the guest physical address range.
420  *
421  * Return: None
422  */
423 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
424 			   unsigned long start,
425 			   unsigned long size)
426 {
427 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
428 			    cpu_mask, start, size, 0, 0);
429 }
430 EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
431 
432 /**
433  * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
434  * remote harts for a guest physical address range belonging to a specific VMID.
435  *
436  * @cpu_mask: A cpu mask containing all the target harts.
437  * @start: Start of the guest physical address
438  * @size: Total size of the guest physical address range.
439  * @vmid: The value of guest ID (VMID).
440  *
441  * Return: 0 if success, Error otherwise.
442  */
443 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
444 				unsigned long start,
445 				unsigned long size,
446 				unsigned long vmid)
447 {
448 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
449 			    cpu_mask, start, size, vmid, 0);
450 }
451 EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
452 
453 /**
454  * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
455  *			     harts for the current guest virtual address range.
456  * @cpu_mask: A cpu mask containing all the target harts.
457  * @start: Start of the current guest virtual address
458  * @size: Total size of the current guest virtual address range.
459  *
460  * Return: None
461  */
462 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
463 			   unsigned long start,
464 			   unsigned long size)
465 {
466 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
467 			    cpu_mask, start, size, 0, 0);
468 }
469 EXPORT_SYMBOL(sbi_remote_hfence_vvma);
470 
471 /**
472  * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
473  * remote harts for current guest virtual address range belonging to a specific
474  * ASID.
475  *
476  * @cpu_mask: A cpu mask containing all the target harts.
477  * @start: Start of the current guest virtual address
478  * @size: Total size of the current guest virtual address range.
479  * @asid: The value of address space identifier (ASID).
480  *
481  * Return: None
482  */
483 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
484 				unsigned long start,
485 				unsigned long size,
486 				unsigned long asid)
487 {
488 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
489 			    cpu_mask, start, size, asid, 0);
490 }
491 EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
492 
493 static void sbi_srst_reset(unsigned long type, unsigned long reason)
494 {
495 	sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason,
496 		  0, 0, 0, 0);
497 	pr_warn("%s: type=0x%lx reason=0x%lx failed\n",
498 		__func__, type, reason);
499 }
500 
501 static int sbi_srst_reboot(struct notifier_block *this,
502 			   unsigned long mode, void *cmd)
503 {
504 	sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ?
505 		       SBI_SRST_RESET_TYPE_WARM_REBOOT :
506 		       SBI_SRST_RESET_TYPE_COLD_REBOOT,
507 		       SBI_SRST_RESET_REASON_NONE);
508 	return NOTIFY_DONE;
509 }
510 
511 static struct notifier_block sbi_srst_reboot_nb;
512 
513 static void sbi_srst_power_off(void)
514 {
515 	sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
516 		       SBI_SRST_RESET_REASON_NONE);
517 }
518 
519 /**
520  * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
521  * @extid: The extension ID to be probed.
522  *
523  * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
524  */
525 long sbi_probe_extension(int extid)
526 {
527 	struct sbiret ret;
528 
529 	ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
530 			0, 0, 0, 0, 0);
531 	if (!ret.error)
532 		return ret.value;
533 
534 	return 0;
535 }
536 EXPORT_SYMBOL(sbi_probe_extension);
537 
538 static long __sbi_base_ecall(int fid)
539 {
540 	struct sbiret ret;
541 
542 	ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
543 	if (!ret.error)
544 		return ret.value;
545 	else
546 		return sbi_err_map_linux_errno(ret.error);
547 }
548 
549 static inline long sbi_get_spec_version(void)
550 {
551 	return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
552 }
553 
554 static inline long sbi_get_firmware_id(void)
555 {
556 	return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
557 }
558 
559 static inline long sbi_get_firmware_version(void)
560 {
561 	return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
562 }
563 
564 long sbi_get_mvendorid(void)
565 {
566 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
567 }
568 EXPORT_SYMBOL_GPL(sbi_get_mvendorid);
569 
570 long sbi_get_marchid(void)
571 {
572 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
573 }
574 EXPORT_SYMBOL_GPL(sbi_get_marchid);
575 
576 long sbi_get_mimpid(void)
577 {
578 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
579 }
580 EXPORT_SYMBOL_GPL(sbi_get_mimpid);
581 
582 bool sbi_debug_console_available;
583 
584 int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
585 {
586 	phys_addr_t base_addr;
587 	struct sbiret ret;
588 
589 	if (!sbi_debug_console_available)
590 		return -EOPNOTSUPP;
591 
592 	if (is_vmalloc_addr(bytes))
593 		base_addr = page_to_phys(vmalloc_to_page(bytes)) +
594 			    offset_in_page(bytes);
595 	else
596 		base_addr = __pa(bytes);
597 	if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
598 		num_bytes = PAGE_SIZE - offset_in_page(bytes);
599 
600 	if (IS_ENABLED(CONFIG_32BIT))
601 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
602 				num_bytes, lower_32_bits(base_addr),
603 				upper_32_bits(base_addr), 0, 0, 0);
604 	else
605 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
606 				num_bytes, base_addr, 0, 0, 0, 0);
607 
608 	if (ret.error == SBI_ERR_FAILURE)
609 		return -EIO;
610 	return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
611 }
612 
613 int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
614 {
615 	phys_addr_t base_addr;
616 	struct sbiret ret;
617 
618 	if (!sbi_debug_console_available)
619 		return -EOPNOTSUPP;
620 
621 	if (is_vmalloc_addr(bytes))
622 		base_addr = page_to_phys(vmalloc_to_page(bytes)) +
623 			    offset_in_page(bytes);
624 	else
625 		base_addr = __pa(bytes);
626 	if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
627 		num_bytes = PAGE_SIZE - offset_in_page(bytes);
628 
629 	if (IS_ENABLED(CONFIG_32BIT))
630 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
631 				num_bytes, lower_32_bits(base_addr),
632 				upper_32_bits(base_addr), 0, 0, 0);
633 	else
634 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
635 				num_bytes, base_addr, 0, 0, 0, 0);
636 
637 	if (ret.error == SBI_ERR_FAILURE)
638 		return -EIO;
639 	return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
640 }
641 
642 void __init sbi_init(void)
643 {
644 	int ret;
645 
646 	sbi_set_power_off();
647 	ret = sbi_get_spec_version();
648 	if (ret > 0)
649 		sbi_spec_version = ret;
650 
651 	pr_info("SBI specification v%lu.%lu detected\n",
652 		sbi_major_version(), sbi_minor_version());
653 
654 	if (!sbi_spec_is_0_1()) {
655 		pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
656 			sbi_get_firmware_id(), sbi_get_firmware_version());
657 		if (sbi_probe_extension(SBI_EXT_TIME)) {
658 			__sbi_set_timer = __sbi_set_timer_v02;
659 			pr_info("SBI TIME extension detected\n");
660 		} else {
661 			__sbi_set_timer = __sbi_set_timer_v01;
662 		}
663 		if (sbi_probe_extension(SBI_EXT_IPI)) {
664 			__sbi_send_ipi	= __sbi_send_ipi_v02;
665 			pr_info("SBI IPI extension detected\n");
666 		} else {
667 			__sbi_send_ipi	= __sbi_send_ipi_v01;
668 		}
669 		if (sbi_probe_extension(SBI_EXT_RFENCE)) {
670 			__sbi_rfence	= __sbi_rfence_v02;
671 			pr_info("SBI RFENCE extension detected\n");
672 		} else {
673 			__sbi_rfence	= __sbi_rfence_v01;
674 		}
675 		if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
676 		    sbi_probe_extension(SBI_EXT_SRST)) {
677 			pr_info("SBI SRST extension detected\n");
678 			pm_power_off = sbi_srst_power_off;
679 			sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
680 			sbi_srst_reboot_nb.priority = 192;
681 			register_restart_handler(&sbi_srst_reboot_nb);
682 		}
683 		if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
684 		    (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
685 			pr_info("SBI DBCN extension detected\n");
686 			sbi_debug_console_available = true;
687 		}
688 	} else {
689 		__sbi_set_timer = __sbi_set_timer_v01;
690 		__sbi_send_ipi	= __sbi_send_ipi_v01;
691 		__sbi_rfence	= __sbi_rfence_v01;
692 	}
693 }
694