xref: /linux/arch/riscv/kernel/sbi.c (revision 3f0a50f345f78183f6e9b39c2f45ca5dcaa511ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI initialilization and all extension implementation.
4  *
5  * Copyright (c) 2020 Western Digital Corporation or its affiliates.
6  */
7 
8 #include <linux/bits.h>
9 #include <linux/init.h>
10 #include <linux/pm.h>
11 #include <linux/reboot.h>
12 #include <asm/sbi.h>
13 #include <asm/smp.h>
14 
15 /* default SBI version is 0.1 */
16 unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
17 EXPORT_SYMBOL(sbi_spec_version);
18 
19 static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
20 static int (*__sbi_send_ipi)(const struct cpumask *cpu_mask) __ro_after_init;
21 static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
22 			   unsigned long start, unsigned long size,
23 			   unsigned long arg4, unsigned long arg5) __ro_after_init;
24 
25 struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
26 			unsigned long arg1, unsigned long arg2,
27 			unsigned long arg3, unsigned long arg4,
28 			unsigned long arg5)
29 {
30 	struct sbiret ret;
31 
32 	register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
33 	register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
34 	register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
35 	register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
36 	register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
37 	register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
38 	register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
39 	register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
40 	asm volatile ("ecall"
41 		      : "+r" (a0), "+r" (a1)
42 		      : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
43 		      : "memory");
44 	ret.error = a0;
45 	ret.value = a1;
46 
47 	return ret;
48 }
49 EXPORT_SYMBOL(sbi_ecall);
50 
51 int sbi_err_map_linux_errno(int err)
52 {
53 	switch (err) {
54 	case SBI_SUCCESS:
55 		return 0;
56 	case SBI_ERR_DENIED:
57 		return -EPERM;
58 	case SBI_ERR_INVALID_PARAM:
59 		return -EINVAL;
60 	case SBI_ERR_INVALID_ADDRESS:
61 		return -EFAULT;
62 	case SBI_ERR_NOT_SUPPORTED:
63 	case SBI_ERR_FAILURE:
64 	default:
65 		return -ENOTSUPP;
66 	};
67 }
68 EXPORT_SYMBOL(sbi_err_map_linux_errno);
69 
70 #ifdef CONFIG_RISCV_SBI_V01
71 static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
72 {
73 	unsigned long cpuid, hartid;
74 	unsigned long hmask = 0;
75 
76 	/*
77 	 * There is no maximum hartid concept in RISC-V and NR_CPUS must not be
78 	 * associated with hartid. As SBI v0.1 is only kept for backward compatibility
79 	 * and will be removed in the future, there is no point in supporting hartid
80 	 * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2
81 	 * should be used for platforms with hartid greater than BITS_PER_LONG.
82 	 */
83 	for_each_cpu(cpuid, cpu_mask) {
84 		hartid = cpuid_to_hartid_map(cpuid);
85 		if (hartid >= BITS_PER_LONG) {
86 			pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
87 			break;
88 		}
89 		hmask |= BIT(hartid);
90 	}
91 
92 	return hmask;
93 }
94 
95 /**
96  * sbi_console_putchar() - Writes given character to the console device.
97  * @ch: The data to be written to the console.
98  *
99  * Return: None
100  */
101 void sbi_console_putchar(int ch)
102 {
103 	sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
104 }
105 EXPORT_SYMBOL(sbi_console_putchar);
106 
107 /**
108  * sbi_console_getchar() - Reads a byte from console device.
109  *
110  * Returns the value read from console.
111  */
112 int sbi_console_getchar(void)
113 {
114 	struct sbiret ret;
115 
116 	ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
117 
118 	return ret.error;
119 }
120 EXPORT_SYMBOL(sbi_console_getchar);
121 
122 /**
123  * sbi_shutdown() - Remove all the harts from executing supervisor code.
124  *
125  * Return: None
126  */
127 void sbi_shutdown(void)
128 {
129 	sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
130 }
131 EXPORT_SYMBOL(sbi_shutdown);
132 
133 /**
134  * sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
135  *
136  * Return: None
137  */
138 void sbi_clear_ipi(void)
139 {
140 	sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
141 }
142 EXPORT_SYMBOL(sbi_clear_ipi);
143 
144 /**
145  * __sbi_set_timer_v01() - Program the timer for next timer event.
146  * @stime_value: The value after which next timer event should fire.
147  *
148  * Return: None
149  */
150 static void __sbi_set_timer_v01(uint64_t stime_value)
151 {
152 #if __riscv_xlen == 32
153 	sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
154 		  stime_value >> 32, 0, 0, 0, 0);
155 #else
156 	sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
157 #endif
158 }
159 
160 static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
161 {
162 	unsigned long hart_mask;
163 
164 	if (!cpu_mask || cpumask_empty(cpu_mask))
165 		cpu_mask = cpu_online_mask;
166 	hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
167 
168 	sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
169 		  0, 0, 0, 0, 0);
170 	return 0;
171 }
172 
173 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
174 			    unsigned long start, unsigned long size,
175 			    unsigned long arg4, unsigned long arg5)
176 {
177 	int result = 0;
178 	unsigned long hart_mask;
179 
180 	if (!cpu_mask || cpumask_empty(cpu_mask))
181 		cpu_mask = cpu_online_mask;
182 	hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
183 
184 	/* v0.2 function IDs are equivalent to v0.1 extension IDs */
185 	switch (fid) {
186 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
187 		sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
188 			  (unsigned long)&hart_mask, 0, 0, 0, 0, 0);
189 		break;
190 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
191 		sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
192 			  (unsigned long)&hart_mask, start, size,
193 			  0, 0, 0);
194 		break;
195 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
196 		sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
197 			  (unsigned long)&hart_mask, start, size,
198 			  arg4, 0, 0);
199 		break;
200 	default:
201 		pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
202 		result = -EINVAL;
203 	}
204 
205 	return result;
206 }
207 
208 static void sbi_set_power_off(void)
209 {
210 	pm_power_off = sbi_shutdown;
211 }
212 #else
213 static void __sbi_set_timer_v01(uint64_t stime_value)
214 {
215 	pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
216 		sbi_major_version(), sbi_minor_version());
217 }
218 
219 static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
220 {
221 	pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
222 		sbi_major_version(), sbi_minor_version());
223 
224 	return 0;
225 }
226 
227 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
228 			    unsigned long start, unsigned long size,
229 			    unsigned long arg4, unsigned long arg5)
230 {
231 	pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
232 		sbi_major_version(), sbi_minor_version());
233 
234 	return 0;
235 }
236 
237 static void sbi_set_power_off(void) {}
238 #endif /* CONFIG_RISCV_SBI_V01 */
239 
240 static void __sbi_set_timer_v02(uint64_t stime_value)
241 {
242 #if __riscv_xlen == 32
243 	sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
244 		  stime_value >> 32, 0, 0, 0, 0);
245 #else
246 	sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
247 		  0, 0, 0, 0);
248 #endif
249 }
250 
251 static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
252 {
253 	unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
254 	struct sbiret ret = {0};
255 	int result;
256 
257 	if (!cpu_mask || cpumask_empty(cpu_mask))
258 		cpu_mask = cpu_online_mask;
259 
260 	for_each_cpu(cpuid, cpu_mask) {
261 		hartid = cpuid_to_hartid_map(cpuid);
262 		if (hmask) {
263 			if (hartid + BITS_PER_LONG <= htop ||
264 			    hbase + BITS_PER_LONG <= hartid) {
265 				ret = sbi_ecall(SBI_EXT_IPI,
266 						SBI_EXT_IPI_SEND_IPI, hmask,
267 						hbase, 0, 0, 0, 0);
268 				if (ret.error)
269 					goto ecall_failed;
270 				hmask = 0;
271 			} else if (hartid < hbase) {
272 				/* shift the mask to fit lower hartid */
273 				hmask <<= hbase - hartid;
274 				hbase = hartid;
275 			}
276 		}
277 		if (!hmask) {
278 			hbase = hartid;
279 			htop = hartid;
280 		} else if (hartid > htop) {
281 			htop = hartid;
282 		}
283 		hmask |= BIT(hartid - hbase);
284 	}
285 
286 	if (hmask) {
287 		ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
288 				hmask, hbase, 0, 0, 0, 0);
289 		if (ret.error)
290 			goto ecall_failed;
291 	}
292 
293 	return 0;
294 
295 ecall_failed:
296 	result = sbi_err_map_linux_errno(ret.error);
297 	pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
298 	       __func__, hbase, hmask, result);
299 	return result;
300 }
301 
302 static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
303 				 unsigned long hbase, unsigned long start,
304 				 unsigned long size, unsigned long arg4,
305 				 unsigned long arg5)
306 {
307 	struct sbiret ret = {0};
308 	int ext = SBI_EXT_RFENCE;
309 	int result = 0;
310 
311 	switch (fid) {
312 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
313 		ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0);
314 		break;
315 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
316 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
317 				size, 0, 0);
318 		break;
319 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
320 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
321 				size, arg4, 0);
322 		break;
323 
324 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
325 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
326 				size, 0, 0);
327 		break;
328 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
329 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
330 				size, arg4, 0);
331 		break;
332 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
333 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
334 				size, 0, 0);
335 		break;
336 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
337 		ret = sbi_ecall(ext, fid, hmask, hbase, start,
338 				size, arg4, 0);
339 		break;
340 	default:
341 		pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
342 		       fid, ext);
343 		result = -EINVAL;
344 	}
345 
346 	if (ret.error) {
347 		result = sbi_err_map_linux_errno(ret.error);
348 		pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
349 		       __func__, hbase, hmask, result);
350 	}
351 
352 	return result;
353 }
354 
355 static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
356 			    unsigned long start, unsigned long size,
357 			    unsigned long arg4, unsigned long arg5)
358 {
359 	unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
360 	int result;
361 
362 	if (!cpu_mask || cpumask_empty(cpu_mask))
363 		cpu_mask = cpu_online_mask;
364 
365 	for_each_cpu(cpuid, cpu_mask) {
366 		hartid = cpuid_to_hartid_map(cpuid);
367 		if (hmask) {
368 			if (hartid + BITS_PER_LONG <= htop ||
369 			    hbase + BITS_PER_LONG <= hartid) {
370 				result = __sbi_rfence_v02_call(fid, hmask,
371 						hbase, start, size, arg4, arg5);
372 				if (result)
373 					return result;
374 				hmask = 0;
375 			} else if (hartid < hbase) {
376 				/* shift the mask to fit lower hartid */
377 				hmask <<= hbase - hartid;
378 				hbase = hartid;
379 			}
380 		}
381 		if (!hmask) {
382 			hbase = hartid;
383 			htop = hartid;
384 		} else if (hartid > htop) {
385 			htop = hartid;
386 		}
387 		hmask |= BIT(hartid - hbase);
388 	}
389 
390 	if (hmask) {
391 		result = __sbi_rfence_v02_call(fid, hmask, hbase,
392 					       start, size, arg4, arg5);
393 		if (result)
394 			return result;
395 	}
396 
397 	return 0;
398 }
399 
400 /**
401  * sbi_set_timer() - Program the timer for next timer event.
402  * @stime_value: The value after which next timer event should fire.
403  *
404  * Return: None.
405  */
406 void sbi_set_timer(uint64_t stime_value)
407 {
408 	__sbi_set_timer(stime_value);
409 }
410 
411 /**
412  * sbi_send_ipi() - Send an IPI to any hart.
413  * @cpu_mask: A cpu mask containing all the target harts.
414  *
415  * Return: 0 on success, appropriate linux error code otherwise.
416  */
417 int sbi_send_ipi(const struct cpumask *cpu_mask)
418 {
419 	return __sbi_send_ipi(cpu_mask);
420 }
421 EXPORT_SYMBOL(sbi_send_ipi);
422 
423 /**
424  * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
425  * @cpu_mask: A cpu mask containing all the target harts.
426  *
427  * Return: 0 on success, appropriate linux error code otherwise.
428  */
429 int sbi_remote_fence_i(const struct cpumask *cpu_mask)
430 {
431 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
432 			    cpu_mask, 0, 0, 0, 0);
433 }
434 EXPORT_SYMBOL(sbi_remote_fence_i);
435 
436 /**
437  * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
438  *			     harts for the specified virtual address range.
439  * @cpu_mask: A cpu mask containing all the target harts.
440  * @start: Start of the virtual address
441  * @size: Total size of the virtual address range.
442  *
443  * Return: 0 on success, appropriate linux error code otherwise.
444  */
445 int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
446 			   unsigned long start,
447 			   unsigned long size)
448 {
449 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
450 			    cpu_mask, start, size, 0, 0);
451 }
452 EXPORT_SYMBOL(sbi_remote_sfence_vma);
453 
454 /**
455  * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
456  * remote harts for a virtual address range belonging to a specific ASID.
457  *
458  * @cpu_mask: A cpu mask containing all the target harts.
459  * @start: Start of the virtual address
460  * @size: Total size of the virtual address range.
461  * @asid: The value of address space identifier (ASID).
462  *
463  * Return: 0 on success, appropriate linux error code otherwise.
464  */
465 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
466 				unsigned long start,
467 				unsigned long size,
468 				unsigned long asid)
469 {
470 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
471 			    cpu_mask, start, size, asid, 0);
472 }
473 EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
474 
475 /**
476  * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
477  *			   harts for the specified guest physical address range.
478  * @cpu_mask: A cpu mask containing all the target harts.
479  * @start: Start of the guest physical address
480  * @size: Total size of the guest physical address range.
481  *
482  * Return: None
483  */
484 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
485 			   unsigned long start,
486 			   unsigned long size)
487 {
488 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
489 			    cpu_mask, start, size, 0, 0);
490 }
491 EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
492 
493 /**
494  * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
495  * remote harts for a guest physical address range belonging to a specific VMID.
496  *
497  * @cpu_mask: A cpu mask containing all the target harts.
498  * @start: Start of the guest physical address
499  * @size: Total size of the guest physical address range.
500  * @vmid: The value of guest ID (VMID).
501  *
502  * Return: 0 if success, Error otherwise.
503  */
504 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
505 				unsigned long start,
506 				unsigned long size,
507 				unsigned long vmid)
508 {
509 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
510 			    cpu_mask, start, size, vmid, 0);
511 }
512 EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
513 
514 /**
515  * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
516  *			     harts for the current guest virtual address range.
517  * @cpu_mask: A cpu mask containing all the target harts.
518  * @start: Start of the current guest virtual address
519  * @size: Total size of the current guest virtual address range.
520  *
521  * Return: None
522  */
523 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
524 			   unsigned long start,
525 			   unsigned long size)
526 {
527 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
528 			    cpu_mask, start, size, 0, 0);
529 }
530 EXPORT_SYMBOL(sbi_remote_hfence_vvma);
531 
532 /**
533  * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
534  * remote harts for current guest virtual address range belonging to a specific
535  * ASID.
536  *
537  * @cpu_mask: A cpu mask containing all the target harts.
538  * @start: Start of the current guest virtual address
539  * @size: Total size of the current guest virtual address range.
540  * @asid: The value of address space identifier (ASID).
541  *
542  * Return: None
543  */
544 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
545 				unsigned long start,
546 				unsigned long size,
547 				unsigned long asid)
548 {
549 	return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
550 			    cpu_mask, start, size, asid, 0);
551 }
552 EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
553 
554 static void sbi_srst_reset(unsigned long type, unsigned long reason)
555 {
556 	sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason,
557 		  0, 0, 0, 0);
558 	pr_warn("%s: type=0x%lx reason=0x%lx failed\n",
559 		__func__, type, reason);
560 }
561 
562 static int sbi_srst_reboot(struct notifier_block *this,
563 			   unsigned long mode, void *cmd)
564 {
565 	sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ?
566 		       SBI_SRST_RESET_TYPE_WARM_REBOOT :
567 		       SBI_SRST_RESET_TYPE_COLD_REBOOT,
568 		       SBI_SRST_RESET_REASON_NONE);
569 	return NOTIFY_DONE;
570 }
571 
572 static struct notifier_block sbi_srst_reboot_nb;
573 
574 static void sbi_srst_power_off(void)
575 {
576 	sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
577 		       SBI_SRST_RESET_REASON_NONE);
578 }
579 
580 /**
581  * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
582  * @extid: The extension ID to be probed.
583  *
584  * Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
585  */
586 int sbi_probe_extension(int extid)
587 {
588 	struct sbiret ret;
589 
590 	ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
591 			0, 0, 0, 0, 0);
592 	if (!ret.error)
593 		if (ret.value)
594 			return ret.value;
595 
596 	return -ENOTSUPP;
597 }
598 EXPORT_SYMBOL(sbi_probe_extension);
599 
600 static long __sbi_base_ecall(int fid)
601 {
602 	struct sbiret ret;
603 
604 	ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
605 	if (!ret.error)
606 		return ret.value;
607 	else
608 		return sbi_err_map_linux_errno(ret.error);
609 }
610 
611 static inline long sbi_get_spec_version(void)
612 {
613 	return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
614 }
615 
616 static inline long sbi_get_firmware_id(void)
617 {
618 	return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
619 }
620 
621 static inline long sbi_get_firmware_version(void)
622 {
623 	return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
624 }
625 
626 long sbi_get_mvendorid(void)
627 {
628 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
629 }
630 
631 long sbi_get_marchid(void)
632 {
633 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
634 }
635 
636 long sbi_get_mimpid(void)
637 {
638 	return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
639 }
640 
641 static void sbi_send_cpumask_ipi(const struct cpumask *target)
642 {
643 	sbi_send_ipi(target);
644 }
645 
646 static const struct riscv_ipi_ops sbi_ipi_ops = {
647 	.ipi_inject = sbi_send_cpumask_ipi
648 };
649 
650 void __init sbi_init(void)
651 {
652 	int ret;
653 
654 	sbi_set_power_off();
655 	ret = sbi_get_spec_version();
656 	if (ret > 0)
657 		sbi_spec_version = ret;
658 
659 	pr_info("SBI specification v%lu.%lu detected\n",
660 		sbi_major_version(), sbi_minor_version());
661 
662 	if (!sbi_spec_is_0_1()) {
663 		pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
664 			sbi_get_firmware_id(), sbi_get_firmware_version());
665 		if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
666 			__sbi_set_timer = __sbi_set_timer_v02;
667 			pr_info("SBI TIME extension detected\n");
668 		} else {
669 			__sbi_set_timer = __sbi_set_timer_v01;
670 		}
671 		if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
672 			__sbi_send_ipi	= __sbi_send_ipi_v02;
673 			pr_info("SBI IPI extension detected\n");
674 		} else {
675 			__sbi_send_ipi	= __sbi_send_ipi_v01;
676 		}
677 		if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
678 			__sbi_rfence	= __sbi_rfence_v02;
679 			pr_info("SBI RFENCE extension detected\n");
680 		} else {
681 			__sbi_rfence	= __sbi_rfence_v01;
682 		}
683 		if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
684 		    (sbi_probe_extension(SBI_EXT_SRST) > 0)) {
685 			pr_info("SBI SRST extension detected\n");
686 			pm_power_off = sbi_srst_power_off;
687 			sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
688 			sbi_srst_reboot_nb.priority = 192;
689 			register_restart_handler(&sbi_srst_reboot_nb);
690 		}
691 	} else {
692 		__sbi_set_timer = __sbi_set_timer_v01;
693 		__sbi_send_ipi	= __sbi_send_ipi_v01;
694 		__sbi_rfence	= __sbi_rfence_v01;
695 	}
696 
697 	riscv_set_ipi_ops(&sbi_ipi_ops);
698 }
699