xref: /linux/arch/riscv/kernel/sbi.c (revision 4b2b7b1e8730d51542c62ba75dabeb52243dfb49)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI initialilization and all extension implementation.
4  *
5  * Copyright (c) 2020 Western Digital Corporation or its affiliates.
6  */
7 
8 #include <linux/bits.h>
9 #include <linux/init.h>
10 #include <linux/mm.h>
11 #include <linux/pm.h>
12 #include <linux/reboot.h>
13 #include <asm/sbi.h>
14 #include <asm/smp.h>
15 #include <asm/tlbflush.h>
16 
17 /* default SBI version is 0.1 */
18 unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
19 EXPORT_SYMBOL(sbi_spec_version);
20 
21 static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
22 static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init;
23 static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
24 			   unsigned long start, unsigned long size,
25 			   unsigned long arg4, unsigned long arg5) __ro_after_init;
26 
27 struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
28 			unsigned long arg1, unsigned long arg2,
29 			unsigned long arg3, unsigned long arg4,
30 			unsigned long arg5)
31 {
32 	struct sbiret ret;
33 
34 	register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
35 	register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
36 	register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
37 	register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
38 	register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
39 	register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
40 	register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
41 	register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
42 	asm volatile ("ecall"
43 		      : "+r" (a0), "+r" (a1)
44 		      : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
45 		      : "memory");
46 	ret.error = a0;
47 	ret.value = a1;
48 
49 	return ret;
50 }
51 EXPORT_SYMBOL(sbi_ecall);
52 
53 int sbi_err_map_linux_errno(int err)
54 {
55 	switch (err) {
56 	case SBI_SUCCESS:
57 		return 0;
58 	case SBI_ERR_DENIED:
59 		return -EPERM;
60 	case SBI_ERR_INVALID_PARAM:
61 		return -EINVAL;
62 	case SBI_ERR_INVALID_ADDRESS:
63 		return -EFAULT;
64 	case SBI_ERR_NOT_SUPPORTED:
65 	case SBI_ERR_FAILURE:
66 	default:
67 		return -ENOTSUPP;
68 	};
69 }
70 EXPORT_SYMBOL(sbi_err_map_linux_errno);
71 
72 #ifdef CONFIG_RISCV_SBI_V01
73 static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
74 {
75 	unsigned long cpuid, hartid;
76 	unsigned long hmask = 0;
77 
78 	/*
79 	 * There is no maximum hartid concept in RISC-V and NR_CPUS must not be
80 	 * associated with hartid. As SBI v0.1 is only kept for backward compatibility
81 	 * and will be removed in the future, there is no point in supporting hartid
82 	 * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2
83 	 * should be used for platforms with hartid greater than BITS_PER_LONG.
84 	 */
85 	for_each_cpu(cpuid, cpu_mask) {
86 		hartid = cpuid_to_hartid_map(cpuid);
87 		if (hartid >= BITS_PER_LONG) {
88 			pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
89 			break;
90 		}
91 		hmask |= BIT(hartid);
92 	}
93 
94 	return hmask;
95 }
96 
97 /**
98  * sbi_console_putchar() - Writes given character to the console device.
99  * @ch: The data to be written to the console.
100  *
101  * Return: None
102  */
103 void sbi_console_putchar(int ch)
104 {
105 	sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
106 }
107 EXPORT_SYMBOL(sbi_console_putchar);
108 
109 /**
110  * sbi_console_getchar() - Reads a byte from console device.
111  *
112  * Returns the value read from console.
113  */
114 int sbi_console_getchar(void)
115 {
116 	struct sbiret ret;
117 
118 	ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
119 
120 	return ret.error;
121 }
122 EXPORT_SYMBOL(sbi_console_getchar);
123 
124 /**
125  * sbi_shutdown() - Remove all the harts from executing supervisor code.
126  *
127  * Return: None
128  */
129 void sbi_shutdown(void)
130 {
131 	sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
132 }
133 EXPORT_SYMBOL(sbi_shutdown);
134 
135 /**
136  * __sbi_set_timer_v01() - Program the timer for next timer event.
137  * @stime_value: The value after which next timer event should fire.
138  *
139  * Return: None
140  */
141 static void __sbi_set_timer_v01(uint64_t stime_value)
142 {
143 #if __riscv_xlen == 32
144 	sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
145 		  stime_value >> 32, 0, 0, 0, 0);
146 #else
147 	sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
148 #endif
149 }
150 
151 static void __sbi_send_ipi_v01(unsigned int cpu)
152 {
153 	unsigned long hart_mask =
154 		__sbi_v01_cpumask_to_hartmask(cpumask_of(cpu));
155 	sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
156 		  0, 0, 0, 0, 0);
157 }
158 
159 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
160 			    unsigned long start, unsigned long size,
161 			    unsigned long arg4, unsigned long arg5)
162 {
163 	int result = 0;
164 	unsigned long hart_mask;
165 
166 	if (!cpu_mask || cpumask_empty(cpu_mask))
167 		cpu_mask = cpu_online_mask;
168 	hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
169 
170 	/* v0.2 function IDs are equivalent to v0.1 extension IDs */
171 	switch (fid) {
172 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
173 		sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
174 			  (unsigned long)&hart_mask, 0, 0, 0, 0, 0);
175 		break;
176 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
177 		sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
178 			  (unsigned long)&hart_mask, start, size,
179 			  0, 0, 0);
180 		break;
181 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
182 		sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
183 			  (unsigned long)&hart_mask, start, size,
184 			  arg4, 0, 0);
185 		break;
186 	default:
187 		pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
188 		result = -EINVAL;
189 	}
190 
191 	return result;
192 }
193 
194 static void sbi_set_power_off(void)
195 {
196 	pm_power_off = sbi_shutdown;
197 }
198 #else
199 static void __sbi_set_timer_v01(uint64_t stime_value)
200 {
201 	pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
202 		sbi_major_version(), sbi_minor_version());
203 }
204 
205 static void __sbi_send_ipi_v01(unsigned int cpu)
206 {
207 	pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
208 		sbi_major_version(), sbi_minor_version());
209 }
210 
211 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
212 			    unsigned long start, unsigned long size,
213 			    unsigned long arg4, unsigned long arg5)
214 {
215 	pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
216 		sbi_major_version(), sbi_minor_version());
217 
218 	return 0;
219 }
220 
221 static void sbi_set_power_off(void) {}
222 #endif /* CONFIG_RISCV_SBI_V01 */
223 
224 static void __sbi_set_timer_v02(uint64_t stime_value)
225 {
226 #if __riscv_xlen == 32
227 	sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
228 		  stime_value >> 32, 0, 0, 0, 0);
229 #else
230 	sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
231 		  0, 0, 0, 0);
232 #endif
233 }
234 
235 static void __sbi_send_ipi_v02(unsigned int cpu)
236 {
237 	int result;
238 	struct sbiret ret = {0};
239 
240 	ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
241 			1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0);
242 	if (ret.error) {
243 		result = sbi_err_map_linux_errno(ret.error);
244 		pr_err("%s: hbase = [%lu] failed (error [%d])\n",
245 			__func__, cpuid_to_hartid_map(cpu), result);
246 	}
247 }
248 
249 static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
250 				 unsigned long hbase, unsigned long start,
251 				 unsigned long size, unsigned long arg4,
252 				 unsigned long arg5)
253 {
254 	struct sbiret ret = {0};
255 	int ext = SBI_EXT_RFENCE;
256 	int result = 0;
257 
258 	switch (fid) {
259 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
260 		ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0);
261 		break;
262 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
263 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
264 				size, 0, 0);
265 		break;
266 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
267 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
268 				size, arg4, 0);
269 		break;
270 
271 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
272 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
273 				size, 0, 0);
274 		break;
275 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
276 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
277 				size, arg4, 0);
278 		break;
279 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
280 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
281 				size, 0, 0);
282 		break;
283 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
284 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
285 				size, arg4, 0);
286 		break;
287 	default:
288 		pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
289 		       fid, ext);
290 		result = -EINVAL;
291 	}
292 
293 	if (ret.error) {
294 		result = sbi_err_map_linux_errno(ret.error);
295 		pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
296 		       __func__, hbase, hmask, result);
297 	}
298 
299 	return result;
300 }
301 
302 static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
303 			    unsigned long start, unsigned long size,
304 			    unsigned long arg4, unsigned long arg5)
305 {
306 	unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
307 	int result;
308 
309 	if (!cpu_mask || cpumask_empty(cpu_mask))
310 		cpu_mask = cpu_online_mask;
311 
312 	for_each_cpu(cpuid, cpu_mask) {
313 		hartid = cpuid_to_hartid_map(cpuid);
314 		if (hmask) {
315 			if (hartid + BITS_PER_LONG <= htop ||
316 			    hbase + BITS_PER_LONG <= hartid) {
317 				result = __sbi_rfence_v02_call(fid, hmask,
318 						hbase, start, size, arg4, arg5);
319 				if (result)
320 					return result;
321 				hmask = 0;
322 			} else if (hartid < hbase) {
323 				/* shift the mask to fit lower hartid */
324 				hmask <<= hbase - hartid;
325 				hbase = hartid;
326 			}
327 		}
328 		if (!hmask) {
329 			hbase = hartid;
330 			htop = hartid;
331 		} else if (hartid > htop) {
332 			htop = hartid;
333 		}
334 		hmask |= BIT(hartid - hbase);
335 	}
336 
337 	if (hmask) {
338 		result = __sbi_rfence_v02_call(fid, hmask, hbase,
339 					       start, size, arg4, arg5);
340 		if (result)
341 			return result;
342 	}
343 
344 	return 0;
345 }
346 
347 /**
348  * sbi_set_timer() - Program the timer for next timer event.
349  * @stime_value: The value after which next timer event should fire.
350  *
351  * Return: None.
352  */
353 void sbi_set_timer(uint64_t stime_value)
354 {
355 	__sbi_set_timer(stime_value);
356 }
357 
358 /**
359  * sbi_send_ipi() - Send an IPI to any hart.
360  * @cpu: Logical id of the target CPU.
361  */
362 void sbi_send_ipi(unsigned int cpu)
363 {
364 	__sbi_send_ipi(cpu);
365 }
366 EXPORT_SYMBOL(sbi_send_ipi);
367 
368 /**
369  * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
370  * @cpu_mask: A cpu mask containing all the target harts.
371  *
372  * Return: 0 on success, appropriate linux error code otherwise.
373  */
374 int sbi_remote_fence_i(const struct cpumask *cpu_mask)
375 {
376 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
377 			    cpu_mask, 0, 0, 0, 0);
378 }
379 EXPORT_SYMBOL(sbi_remote_fence_i);
380 
381 /**
382  * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
383  * remote harts for a virtual address range belonging to a specific ASID or not.
384  *
385  * @cpu_mask: A cpu mask containing all the target harts.
386  * @start: Start of the virtual address
387  * @size: Total size of the virtual address range.
388  * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
389  * for flushing all address spaces.
390  *
391  * Return: 0 on success, appropriate linux error code otherwise.
392  */
393 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
394 				unsigned long start,
395 				unsigned long size,
396 				unsigned long asid)
397 {
398 	if (asid == FLUSH_TLB_NO_ASID)
399 		return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
400 				    cpu_mask, start, size, 0, 0);
401 	else
402 		return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
403 				    cpu_mask, start, size, asid, 0);
404 }
405 EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
406 
407 /**
408  * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
409  *			   harts for the specified guest physical address range.
410  * @cpu_mask: A cpu mask containing all the target harts.
411  * @start: Start of the guest physical address
412  * @size: Total size of the guest physical address range.
413  *
414  * Return: None
415  */
416 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
417 			   unsigned long start,
418 			   unsigned long size)
419 {
420 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
421 			    cpu_mask, start, size, 0, 0);
422 }
423 EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
424 
425 /**
426  * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
427  * remote harts for a guest physical address range belonging to a specific VMID.
428  *
429  * @cpu_mask: A cpu mask containing all the target harts.
430  * @start: Start of the guest physical address
431  * @size: Total size of the guest physical address range.
432  * @vmid: The value of guest ID (VMID).
433  *
434  * Return: 0 if success, Error otherwise.
435  */
436 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
437 				unsigned long start,
438 				unsigned long size,
439 				unsigned long vmid)
440 {
441 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
442 			    cpu_mask, start, size, vmid, 0);
443 }
444 EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
445 
446 /**
447  * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
448  *			     harts for the current guest virtual address range.
449  * @cpu_mask: A cpu mask containing all the target harts.
450  * @start: Start of the current guest virtual address
451  * @size: Total size of the current guest virtual address range.
452  *
453  * Return: None
454  */
455 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
456 			   unsigned long start,
457 			   unsigned long size)
458 {
459 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
460 			    cpu_mask, start, size, 0, 0);
461 }
462 EXPORT_SYMBOL(sbi_remote_hfence_vvma);
463 
464 /**
465  * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
466  * remote harts for current guest virtual address range belonging to a specific
467  * ASID.
468  *
469  * @cpu_mask: A cpu mask containing all the target harts.
470  * @start: Start of the current guest virtual address
471  * @size: Total size of the current guest virtual address range.
472  * @asid: The value of address space identifier (ASID).
473  *
474  * Return: None
475  */
476 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
477 				unsigned long start,
478 				unsigned long size,
479 				unsigned long asid)
480 {
481 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
482 			    cpu_mask, start, size, asid, 0);
483 }
484 EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
485 
486 static void sbi_srst_reset(unsigned long type, unsigned long reason)
487 {
488 	sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason,
489 		  0, 0, 0, 0);
490 	pr_warn("%s: type=0x%lx reason=0x%lx failed\n",
491 		__func__, type, reason);
492 }
493 
494 static int sbi_srst_reboot(struct notifier_block *this,
495 			   unsigned long mode, void *cmd)
496 {
497 	sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ?
498 		       SBI_SRST_RESET_TYPE_WARM_REBOOT :
499 		       SBI_SRST_RESET_TYPE_COLD_REBOOT,
500 		       SBI_SRST_RESET_REASON_NONE);
501 	return NOTIFY_DONE;
502 }
503 
504 static struct notifier_block sbi_srst_reboot_nb;
505 
506 static void sbi_srst_power_off(void)
507 {
508 	sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
509 		       SBI_SRST_RESET_REASON_NONE);
510 }
511 
512 /**
513  * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
514  * @extid: The extension ID to be probed.
515  *
516  * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
517  */
518 long sbi_probe_extension(int extid)
519 {
520 	struct sbiret ret;
521 
522 	ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
523 			0, 0, 0, 0, 0);
524 	if (!ret.error)
525 		return ret.value;
526 
527 	return 0;
528 }
529 EXPORT_SYMBOL(sbi_probe_extension);
530 
531 static long __sbi_base_ecall(int fid)
532 {
533 	struct sbiret ret;
534 
535 	ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
536 	if (!ret.error)
537 		return ret.value;
538 	else
539 		return sbi_err_map_linux_errno(ret.error);
540 }
541 
542 static inline long sbi_get_spec_version(void)
543 {
544 	return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
545 }
546 
547 static inline long sbi_get_firmware_id(void)
548 {
549 	return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
550 }
551 
552 static inline long sbi_get_firmware_version(void)
553 {
554 	return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
555 }
556 
557 long sbi_get_mvendorid(void)
558 {
559 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
560 }
561 EXPORT_SYMBOL_GPL(sbi_get_mvendorid);
562 
563 long sbi_get_marchid(void)
564 {
565 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
566 }
567 EXPORT_SYMBOL_GPL(sbi_get_marchid);
568 
569 long sbi_get_mimpid(void)
570 {
571 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
572 }
573 EXPORT_SYMBOL_GPL(sbi_get_mimpid);
574 
575 bool sbi_debug_console_available;
576 
577 int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
578 {
579 	phys_addr_t base_addr;
580 	struct sbiret ret;
581 
582 	if (!sbi_debug_console_available)
583 		return -EOPNOTSUPP;
584 
585 	if (is_vmalloc_addr(bytes))
586 		base_addr = page_to_phys(vmalloc_to_page(bytes)) +
587 			    offset_in_page(bytes);
588 	else
589 		base_addr = __pa(bytes);
590 	if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
591 		num_bytes = PAGE_SIZE - offset_in_page(bytes);
592 
593 	if (IS_ENABLED(CONFIG_32BIT))
594 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
595 				num_bytes, lower_32_bits(base_addr),
596 				upper_32_bits(base_addr), 0, 0, 0);
597 	else
598 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
599 				num_bytes, base_addr, 0, 0, 0, 0);
600 
601 	if (ret.error == SBI_ERR_FAILURE)
602 		return -EIO;
603 	return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
604 }
605 
606 int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
607 {
608 	phys_addr_t base_addr;
609 	struct sbiret ret;
610 
611 	if (!sbi_debug_console_available)
612 		return -EOPNOTSUPP;
613 
614 	if (is_vmalloc_addr(bytes))
615 		base_addr = page_to_phys(vmalloc_to_page(bytes)) +
616 			    offset_in_page(bytes);
617 	else
618 		base_addr = __pa(bytes);
619 	if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
620 		num_bytes = PAGE_SIZE - offset_in_page(bytes);
621 
622 	if (IS_ENABLED(CONFIG_32BIT))
623 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
624 				num_bytes, lower_32_bits(base_addr),
625 				upper_32_bits(base_addr), 0, 0, 0);
626 	else
627 		ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
628 				num_bytes, base_addr, 0, 0, 0, 0);
629 
630 	if (ret.error == SBI_ERR_FAILURE)
631 		return -EIO;
632 	return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
633 }
634 
635 void __init sbi_init(void)
636 {
637 	int ret;
638 
639 	sbi_set_power_off();
640 	ret = sbi_get_spec_version();
641 	if (ret > 0)
642 		sbi_spec_version = ret;
643 
644 	pr_info("SBI specification v%lu.%lu detected\n",
645 		sbi_major_version(), sbi_minor_version());
646 
647 	if (!sbi_spec_is_0_1()) {
648 		pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
649 			sbi_get_firmware_id(), sbi_get_firmware_version());
650 		if (sbi_probe_extension(SBI_EXT_TIME)) {
651 			__sbi_set_timer = __sbi_set_timer_v02;
652 			pr_info("SBI TIME extension detected\n");
653 		} else {
654 			__sbi_set_timer = __sbi_set_timer_v01;
655 		}
656 		if (sbi_probe_extension(SBI_EXT_IPI)) {
657 			__sbi_send_ipi	= __sbi_send_ipi_v02;
658 			pr_info("SBI IPI extension detected\n");
659 		} else {
660 			__sbi_send_ipi	= __sbi_send_ipi_v01;
661 		}
662 		if (sbi_probe_extension(SBI_EXT_RFENCE)) {
663 			__sbi_rfence	= __sbi_rfence_v02;
664 			pr_info("SBI RFENCE extension detected\n");
665 		} else {
666 			__sbi_rfence	= __sbi_rfence_v01;
667 		}
668 		if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
669 		    sbi_probe_extension(SBI_EXT_SRST)) {
670 			pr_info("SBI SRST extension detected\n");
671 			pm_power_off = sbi_srst_power_off;
672 			sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
673 			sbi_srst_reboot_nb.priority = 192;
674 			register_restart_handler(&sbi_srst_reboot_nb);
675 		}
676 		if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
677 		    (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
678 			pr_info("SBI DBCN extension detected\n");
679 			sbi_debug_console_available = true;
680 		}
681 	} else {
682 		__sbi_set_timer = __sbi_set_timer_v01;
683 		__sbi_send_ipi	= __sbi_send_ipi_v01;
684 		__sbi_rfence	= __sbi_rfence_v01;
685 	}
686 }
687