1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Regents of the University of California
4 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
5 */
6
7 #ifndef _ASM_RISCV_SBI_H
8 #define _ASM_RISCV_SBI_H
9
10 #include <linux/types.h>
11 #include <linux/cpumask.h>
12 #include <linux/jump_label.h>
13
14 #ifdef CONFIG_RISCV_SBI
15 enum sbi_ext_id {
16 #ifdef CONFIG_RISCV_SBI_V01
17 SBI_EXT_0_1_SET_TIMER = 0x0,
18 SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1,
19 SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2,
20 SBI_EXT_0_1_CLEAR_IPI = 0x3,
21 SBI_EXT_0_1_SEND_IPI = 0x4,
22 SBI_EXT_0_1_REMOTE_FENCE_I = 0x5,
23 SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6,
24 SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7,
25 SBI_EXT_0_1_SHUTDOWN = 0x8,
26 #endif
27 SBI_EXT_BASE = 0x10,
28 SBI_EXT_TIME = 0x54494D45,
29 SBI_EXT_IPI = 0x735049,
30 SBI_EXT_RFENCE = 0x52464E43,
31 SBI_EXT_HSM = 0x48534D,
32 SBI_EXT_SRST = 0x53525354,
33 SBI_EXT_SUSP = 0x53555350,
34 SBI_EXT_PMU = 0x504D55,
35 SBI_EXT_DBCN = 0x4442434E,
36 SBI_EXT_STA = 0x535441,
37 SBI_EXT_NACL = 0x4E41434C,
38
39 /* Experimentals extensions must lie within this range */
40 SBI_EXT_EXPERIMENTAL_START = 0x08000000,
41 SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF,
42
43 /* Vendor extensions must lie within this range */
44 SBI_EXT_VENDOR_START = 0x09000000,
45 SBI_EXT_VENDOR_END = 0x09FFFFFF,
46 };
47
48 enum sbi_ext_base_fid {
49 SBI_EXT_BASE_GET_SPEC_VERSION = 0,
50 SBI_EXT_BASE_GET_IMP_ID,
51 SBI_EXT_BASE_GET_IMP_VERSION,
52 SBI_EXT_BASE_PROBE_EXT,
53 SBI_EXT_BASE_GET_MVENDORID,
54 SBI_EXT_BASE_GET_MARCHID,
55 SBI_EXT_BASE_GET_MIMPID,
56 };
57
58 enum sbi_ext_time_fid {
59 SBI_EXT_TIME_SET_TIMER = 0,
60 };
61
62 enum sbi_ext_ipi_fid {
63 SBI_EXT_IPI_SEND_IPI = 0,
64 };
65
66 enum sbi_ext_rfence_fid {
67 SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
68 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
69 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
70 SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
71 SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
72 SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
73 SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
74 };
75
76 enum sbi_ext_hsm_fid {
77 SBI_EXT_HSM_HART_START = 0,
78 SBI_EXT_HSM_HART_STOP,
79 SBI_EXT_HSM_HART_STATUS,
80 SBI_EXT_HSM_HART_SUSPEND,
81 };
82
83 enum sbi_hsm_hart_state {
84 SBI_HSM_STATE_STARTED = 0,
85 SBI_HSM_STATE_STOPPED,
86 SBI_HSM_STATE_START_PENDING,
87 SBI_HSM_STATE_STOP_PENDING,
88 SBI_HSM_STATE_SUSPENDED,
89 SBI_HSM_STATE_SUSPEND_PENDING,
90 SBI_HSM_STATE_RESUME_PENDING,
91 };
92
93 #define SBI_HSM_SUSP_BASE_MASK 0x7fffffff
94 #define SBI_HSM_SUSP_NON_RET_BIT 0x80000000
95 #define SBI_HSM_SUSP_PLAT_BASE 0x10000000
96
97 #define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000
98 #define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE
99 #define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK
100 #define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT
101 #define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \
102 SBI_HSM_SUSP_PLAT_BASE)
103 #define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \
104 SBI_HSM_SUSP_BASE_MASK)
105
106 enum sbi_ext_srst_fid {
107 SBI_EXT_SRST_RESET = 0,
108 };
109
110 enum sbi_srst_reset_type {
111 SBI_SRST_RESET_TYPE_SHUTDOWN = 0,
112 SBI_SRST_RESET_TYPE_COLD_REBOOT,
113 SBI_SRST_RESET_TYPE_WARM_REBOOT,
114 };
115
116 enum sbi_srst_reset_reason {
117 SBI_SRST_RESET_REASON_NONE = 0,
118 SBI_SRST_RESET_REASON_SYS_FAILURE,
119 };
120
121 enum sbi_ext_susp_fid {
122 SBI_EXT_SUSP_SYSTEM_SUSPEND = 0,
123 };
124
125 enum sbi_ext_susp_sleep_type {
126 SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0,
127 };
128
129 enum sbi_ext_pmu_fid {
130 SBI_EXT_PMU_NUM_COUNTERS = 0,
131 SBI_EXT_PMU_COUNTER_GET_INFO,
132 SBI_EXT_PMU_COUNTER_CFG_MATCH,
133 SBI_EXT_PMU_COUNTER_START,
134 SBI_EXT_PMU_COUNTER_STOP,
135 SBI_EXT_PMU_COUNTER_FW_READ,
136 SBI_EXT_PMU_COUNTER_FW_READ_HI,
137 SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
138 };
139
140 union sbi_pmu_ctr_info {
141 unsigned long value;
142 struct {
143 unsigned long csr:12;
144 unsigned long width:6;
145 #if __riscv_xlen == 32
146 unsigned long reserved:13;
147 #else
148 unsigned long reserved:45;
149 #endif
150 unsigned long type:1;
151 };
152 };
153
154 /* Data structure to contain the pmu snapshot data */
155 struct riscv_pmu_snapshot_data {
156 u64 ctr_overflow_mask;
157 u64 ctr_values[64];
158 u64 reserved[447];
159 };
160
161 #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
162 #define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
163 #define RISCV_PMU_RAW_EVENT_IDX 0x20000
164 #define RISCV_PLAT_FW_EVENT 0xFFFF
165
166 /** General pmu event codes specified in SBI PMU extension */
167 enum sbi_pmu_hw_generic_events_t {
168 SBI_PMU_HW_NO_EVENT = 0,
169 SBI_PMU_HW_CPU_CYCLES = 1,
170 SBI_PMU_HW_INSTRUCTIONS = 2,
171 SBI_PMU_HW_CACHE_REFERENCES = 3,
172 SBI_PMU_HW_CACHE_MISSES = 4,
173 SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5,
174 SBI_PMU_HW_BRANCH_MISSES = 6,
175 SBI_PMU_HW_BUS_CYCLES = 7,
176 SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8,
177 SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9,
178 SBI_PMU_HW_REF_CPU_CYCLES = 10,
179
180 SBI_PMU_HW_GENERAL_MAX,
181 };
182
183 /**
184 * Special "firmware" events provided by the firmware, even if the hardware
185 * does not support performance events. These events are encoded as a raw
186 * event type in Linux kernel perf framework.
187 */
188 enum sbi_pmu_fw_generic_events_t {
189 SBI_PMU_FW_MISALIGNED_LOAD = 0,
190 SBI_PMU_FW_MISALIGNED_STORE = 1,
191 SBI_PMU_FW_ACCESS_LOAD = 2,
192 SBI_PMU_FW_ACCESS_STORE = 3,
193 SBI_PMU_FW_ILLEGAL_INSN = 4,
194 SBI_PMU_FW_SET_TIMER = 5,
195 SBI_PMU_FW_IPI_SENT = 6,
196 SBI_PMU_FW_IPI_RCVD = 7,
197 SBI_PMU_FW_FENCE_I_SENT = 8,
198 SBI_PMU_FW_FENCE_I_RCVD = 9,
199 SBI_PMU_FW_SFENCE_VMA_SENT = 10,
200 SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
201 SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
202 SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
203
204 SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
205 SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
206 SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
207 SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
208
209 SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
210 SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
211 SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
212 SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
213 SBI_PMU_FW_MAX,
214 };
215
216 /* SBI PMU event types */
217 enum sbi_pmu_event_type {
218 SBI_PMU_EVENT_TYPE_HW = 0x0,
219 SBI_PMU_EVENT_TYPE_CACHE = 0x1,
220 SBI_PMU_EVENT_TYPE_RAW = 0x2,
221 SBI_PMU_EVENT_TYPE_FW = 0xf,
222 };
223
224 /* SBI PMU event types */
225 enum sbi_pmu_ctr_type {
226 SBI_PMU_CTR_TYPE_HW = 0x0,
227 SBI_PMU_CTR_TYPE_FW,
228 };
229
230 /* Helper macros to decode event idx */
231 #define SBI_PMU_EVENT_IDX_OFFSET 20
232 #define SBI_PMU_EVENT_IDX_MASK 0xFFFFF
233 #define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF
234 #define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000
235 #define SBI_PMU_EVENT_RAW_IDX 0x20000
236 #define SBI_PMU_FIXED_CTR_MASK 0x07
237
238 #define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8
239 #define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
240 #define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
241
242 #define SBI_PMU_EVENT_CACHE_ID_SHIFT 3
243 #define SBI_PMU_EVENT_CACHE_OP_SHIFT 1
244
245 #define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
246
247 /* Flags defined for config matching function */
248 #define SBI_PMU_CFG_FLAG_SKIP_MATCH BIT(0)
249 #define SBI_PMU_CFG_FLAG_CLEAR_VALUE BIT(1)
250 #define SBI_PMU_CFG_FLAG_AUTO_START BIT(2)
251 #define SBI_PMU_CFG_FLAG_SET_VUINH BIT(3)
252 #define SBI_PMU_CFG_FLAG_SET_VSINH BIT(4)
253 #define SBI_PMU_CFG_FLAG_SET_UINH BIT(5)
254 #define SBI_PMU_CFG_FLAG_SET_SINH BIT(6)
255 #define SBI_PMU_CFG_FLAG_SET_MINH BIT(7)
256
257 /* Flags defined for counter start function */
258 #define SBI_PMU_START_FLAG_SET_INIT_VALUE BIT(0)
259 #define SBI_PMU_START_FLAG_INIT_SNAPSHOT BIT(1)
260
261 /* Flags defined for counter stop function */
262 #define SBI_PMU_STOP_FLAG_RESET BIT(0)
263 #define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT BIT(1)
264
265 enum sbi_ext_dbcn_fid {
266 SBI_EXT_DBCN_CONSOLE_WRITE = 0,
267 SBI_EXT_DBCN_CONSOLE_READ = 1,
268 SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2,
269 };
270
271 /* SBI STA (steal-time accounting) extension */
272 enum sbi_ext_sta_fid {
273 SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0,
274 };
275
276 struct sbi_sta_struct {
277 __le32 sequence;
278 __le32 flags;
279 __le64 steal;
280 u8 preempted;
281 u8 pad[47];
282 } __packed;
283
284 #define SBI_SHMEM_DISABLE -1
285
286 enum sbi_ext_nacl_fid {
287 SBI_EXT_NACL_PROBE_FEATURE = 0x0,
288 SBI_EXT_NACL_SET_SHMEM = 0x1,
289 SBI_EXT_NACL_SYNC_CSR = 0x2,
290 SBI_EXT_NACL_SYNC_HFENCE = 0x3,
291 SBI_EXT_NACL_SYNC_SRET = 0x4,
292 };
293
294 enum sbi_ext_nacl_feature {
295 SBI_NACL_FEAT_SYNC_CSR = 0x0,
296 SBI_NACL_FEAT_SYNC_HFENCE = 0x1,
297 SBI_NACL_FEAT_SYNC_SRET = 0x2,
298 SBI_NACL_FEAT_AUTOSWAP_CSR = 0x3,
299 };
300
301 #define SBI_NACL_SHMEM_ADDR_SHIFT 12
302 #define SBI_NACL_SHMEM_SCRATCH_OFFSET 0x0000
303 #define SBI_NACL_SHMEM_SCRATCH_SIZE 0x1000
304 #define SBI_NACL_SHMEM_SRET_OFFSET 0x0000
305 #define SBI_NACL_SHMEM_SRET_SIZE 0x0200
306 #define SBI_NACL_SHMEM_AUTOSWAP_OFFSET (SBI_NACL_SHMEM_SRET_OFFSET + \
307 SBI_NACL_SHMEM_SRET_SIZE)
308 #define SBI_NACL_SHMEM_AUTOSWAP_SIZE 0x0080
309 #define SBI_NACL_SHMEM_UNUSED_OFFSET (SBI_NACL_SHMEM_AUTOSWAP_OFFSET + \
310 SBI_NACL_SHMEM_AUTOSWAP_SIZE)
311 #define SBI_NACL_SHMEM_UNUSED_SIZE 0x0580
312 #define SBI_NACL_SHMEM_HFENCE_OFFSET (SBI_NACL_SHMEM_UNUSED_OFFSET + \
313 SBI_NACL_SHMEM_UNUSED_SIZE)
314 #define SBI_NACL_SHMEM_HFENCE_SIZE 0x0780
315 #define SBI_NACL_SHMEM_DBITMAP_OFFSET (SBI_NACL_SHMEM_HFENCE_OFFSET + \
316 SBI_NACL_SHMEM_HFENCE_SIZE)
317 #define SBI_NACL_SHMEM_DBITMAP_SIZE 0x0080
318 #define SBI_NACL_SHMEM_CSR_OFFSET (SBI_NACL_SHMEM_DBITMAP_OFFSET + \
319 SBI_NACL_SHMEM_DBITMAP_SIZE)
320 #define SBI_NACL_SHMEM_CSR_SIZE ((__riscv_xlen / 8) * 1024)
321 #define SBI_NACL_SHMEM_SIZE (SBI_NACL_SHMEM_CSR_OFFSET + \
322 SBI_NACL_SHMEM_CSR_SIZE)
323
324 #define SBI_NACL_SHMEM_CSR_INDEX(__csr_num) \
325 ((((__csr_num) & 0xc00) >> 2) | ((__csr_num) & 0xff))
326
327 #define SBI_NACL_SHMEM_HFENCE_ENTRY_SZ ((__riscv_xlen / 8) * 4)
328 #define SBI_NACL_SHMEM_HFENCE_ENTRY_MAX \
329 (SBI_NACL_SHMEM_HFENCE_SIZE / \
330 SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
331 #define SBI_NACL_SHMEM_HFENCE_ENTRY(__num) \
332 (SBI_NACL_SHMEM_HFENCE_OFFSET + \
333 (__num) * SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
334 #define SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(__num) \
335 SBI_NACL_SHMEM_HFENCE_ENTRY(__num)
336 #define SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(__num)\
337 (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + (__riscv_xlen / 8))
338 #define SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(__num)\
339 (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + \
340 ((__riscv_xlen / 8) * 3))
341
342 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS 1
343 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT \
344 (__riscv_xlen - SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS)
345 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK \
346 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) - 1)
347 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND \
348 (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK << \
349 SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT)
350
351 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS 3
352 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT \
353 (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT - \
354 SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS)
355
356 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS 4
357 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT \
358 (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT - \
359 SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS)
360 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK \
361 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) - 1)
362
363 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA 0x0
364 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL 0x1
365 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID 0x2
366 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL 0x3
367 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA 0x4
368 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL 0x5
369 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID 0x6
370 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL 0x7
371
372 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS 1
373 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT \
374 (SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT - \
375 SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS)
376
377 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS 7
378 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT \
379 (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT - \
380 SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS)
381 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK \
382 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) - 1)
383 #define SBI_NACL_SHMEM_HFENCE_ORDER_BASE 12
384
385 #if __riscv_xlen == 32
386 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 9
387 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 7
388 #else
389 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 16
390 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 14
391 #endif
392 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT \
393 SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS
394 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK \
395 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS) - 1)
396 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK \
397 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS) - 1)
398
399 #define SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS BIT(0)
400 #define SBI_NACL_SHMEM_AUTOSWAP_HSTATUS ((__riscv_xlen / 8) * 1)
401
402 #define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i))
403 #define SBI_NACL_SHMEM_SRET_X_LAST 31
404
405 /* SBI spec version fields */
406 #define SBI_SPEC_VERSION_DEFAULT 0x1
407 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24
408 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
409 #define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
410
411 /* SBI return error codes */
412 #define SBI_SUCCESS 0
413 #define SBI_ERR_FAILURE -1
414 #define SBI_ERR_NOT_SUPPORTED -2
415 #define SBI_ERR_INVALID_PARAM -3
416 #define SBI_ERR_DENIED -4
417 #define SBI_ERR_INVALID_ADDRESS -5
418 #define SBI_ERR_ALREADY_AVAILABLE -6
419 #define SBI_ERR_ALREADY_STARTED -7
420 #define SBI_ERR_ALREADY_STOPPED -8
421 #define SBI_ERR_NO_SHMEM -9
422
423 extern unsigned long sbi_spec_version;
424 struct sbiret {
425 long error;
426 long value;
427 };
428
429 void sbi_init(void);
430 long __sbi_base_ecall(int fid);
431 struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
432 unsigned long arg2, unsigned long arg3,
433 unsigned long arg4, unsigned long arg5,
434 int fid, int ext);
435 #define sbi_ecall(e, f, a0, a1, a2, a3, a4, a5) \
436 __sbi_ecall(a0, a1, a2, a3, a4, a5, f, e)
437
438 #ifdef CONFIG_RISCV_SBI_V01
439 void sbi_console_putchar(int ch);
440 int sbi_console_getchar(void);
441 #else
sbi_console_putchar(int ch)442 static inline void sbi_console_putchar(int ch) { }
sbi_console_getchar(void)443 static inline int sbi_console_getchar(void) { return -ENOENT; }
444 #endif
445 long sbi_get_mvendorid(void);
446 long sbi_get_marchid(void);
447 long sbi_get_mimpid(void);
448 void sbi_set_timer(uint64_t stime_value);
449 void sbi_shutdown(void);
450 void sbi_send_ipi(unsigned int cpu);
451 int sbi_remote_fence_i(const struct cpumask *cpu_mask);
452
453 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
454 unsigned long start,
455 unsigned long size,
456 unsigned long asid);
457 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
458 unsigned long start,
459 unsigned long size);
460 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
461 unsigned long start,
462 unsigned long size,
463 unsigned long vmid);
464 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
465 unsigned long start,
466 unsigned long size);
467 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
468 unsigned long start,
469 unsigned long size,
470 unsigned long asid);
471 long sbi_probe_extension(int ext);
472
473 /* Check if current SBI specification version is 0.1 or not */
sbi_spec_is_0_1(void)474 static inline int sbi_spec_is_0_1(void)
475 {
476 return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0;
477 }
478
479 /* Get the major version of SBI */
sbi_major_version(void)480 static inline unsigned long sbi_major_version(void)
481 {
482 return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) &
483 SBI_SPEC_VERSION_MAJOR_MASK;
484 }
485
486 /* Get the minor version of SBI */
sbi_minor_version(void)487 static inline unsigned long sbi_minor_version(void)
488 {
489 return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
490 }
491
492 /* Make SBI version */
sbi_mk_version(unsigned long major,unsigned long minor)493 static inline unsigned long sbi_mk_version(unsigned long major,
494 unsigned long minor)
495 {
496 return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << SBI_SPEC_VERSION_MAJOR_SHIFT)
497 | (minor & SBI_SPEC_VERSION_MINOR_MASK);
498 }
499
sbi_err_map_linux_errno(int err)500 static inline int sbi_err_map_linux_errno(int err)
501 {
502 switch (err) {
503 case SBI_SUCCESS:
504 return 0;
505 case SBI_ERR_DENIED:
506 return -EPERM;
507 case SBI_ERR_INVALID_PARAM:
508 return -EINVAL;
509 case SBI_ERR_INVALID_ADDRESS:
510 return -EFAULT;
511 case SBI_ERR_NOT_SUPPORTED:
512 case SBI_ERR_FAILURE:
513 default:
514 return -ENOTSUPP;
515 };
516 }
517
518 extern bool sbi_debug_console_available;
519 int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
520 int sbi_debug_console_read(char *bytes, unsigned int num_bytes);
521
522 #else /* CONFIG_RISCV_SBI */
sbi_remote_fence_i(const struct cpumask * cpu_mask)523 static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
sbi_init(void)524 static inline void sbi_init(void) {}
525 #endif /* CONFIG_RISCV_SBI */
526
527 unsigned long riscv_get_mvendorid(void);
528 unsigned long riscv_get_marchid(void);
529 unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
530 unsigned long riscv_cached_marchid(unsigned int cpu_id);
531 unsigned long riscv_cached_mimpid(unsigned int cpu_id);
532
533 #if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
534 DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
535 #define riscv_use_sbi_for_rfence() \
536 static_branch_unlikely(&riscv_sbi_for_rfence)
537 void sbi_ipi_init(void);
538 #else
riscv_use_sbi_for_rfence(void)539 static inline bool riscv_use_sbi_for_rfence(void) { return false; }
sbi_ipi_init(void)540 static inline void sbi_ipi_init(void) { }
541 #endif
542
543 #endif /* _ASM_RISCV_SBI_H */
544