1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Regents of the University of California 4 * Copyright (c) 2020 Western Digital Corporation or its affiliates. 5 */ 6 7 #ifndef _ASM_RISCV_SBI_H 8 #define _ASM_RISCV_SBI_H 9 10 #include <linux/types.h> 11 #include <linux/cpumask.h> 12 #include <linux/jump_label.h> 13 14 #ifdef CONFIG_RISCV_SBI 15 enum sbi_ext_id { 16 #ifdef CONFIG_RISCV_SBI_V01 17 SBI_EXT_0_1_SET_TIMER = 0x0, 18 SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1, 19 SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2, 20 SBI_EXT_0_1_CLEAR_IPI = 0x3, 21 SBI_EXT_0_1_SEND_IPI = 0x4, 22 SBI_EXT_0_1_REMOTE_FENCE_I = 0x5, 23 SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6, 24 SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7, 25 SBI_EXT_0_1_SHUTDOWN = 0x8, 26 #endif 27 SBI_EXT_BASE = 0x10, 28 SBI_EXT_TIME = 0x54494D45, 29 SBI_EXT_IPI = 0x735049, 30 SBI_EXT_RFENCE = 0x52464E43, 31 SBI_EXT_HSM = 0x48534D, 32 SBI_EXT_SRST = 0x53525354, 33 SBI_EXT_SUSP = 0x53555350, 34 SBI_EXT_PMU = 0x504D55, 35 SBI_EXT_DBCN = 0x4442434E, 36 SBI_EXT_STA = 0x535441, 37 SBI_EXT_NACL = 0x4E41434C, 38 SBI_EXT_FWFT = 0x46574654, 39 40 /* Experimentals extensions must lie within this range */ 41 SBI_EXT_EXPERIMENTAL_START = 0x08000000, 42 SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF, 43 44 /* Vendor extensions must lie within this range */ 45 SBI_EXT_VENDOR_START = 0x09000000, 46 SBI_EXT_VENDOR_END = 0x09FFFFFF, 47 }; 48 49 enum sbi_ext_base_fid { 50 SBI_EXT_BASE_GET_SPEC_VERSION = 0, 51 SBI_EXT_BASE_GET_IMP_ID, 52 SBI_EXT_BASE_GET_IMP_VERSION, 53 SBI_EXT_BASE_PROBE_EXT, 54 SBI_EXT_BASE_GET_MVENDORID, 55 SBI_EXT_BASE_GET_MARCHID, 56 SBI_EXT_BASE_GET_MIMPID, 57 }; 58 59 enum sbi_ext_time_fid { 60 SBI_EXT_TIME_SET_TIMER = 0, 61 }; 62 63 enum sbi_ext_ipi_fid { 64 SBI_EXT_IPI_SEND_IPI = 0, 65 }; 66 67 enum sbi_ext_rfence_fid { 68 SBI_EXT_RFENCE_REMOTE_FENCE_I = 0, 69 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, 70 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, 71 SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID, 72 SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, 73 SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID, 74 SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, 75 }; 76 77 enum sbi_ext_hsm_fid { 78 SBI_EXT_HSM_HART_START = 0, 79 SBI_EXT_HSM_HART_STOP, 80 SBI_EXT_HSM_HART_STATUS, 81 SBI_EXT_HSM_HART_SUSPEND, 82 }; 83 84 enum sbi_hsm_hart_state { 85 SBI_HSM_STATE_STARTED = 0, 86 SBI_HSM_STATE_STOPPED, 87 SBI_HSM_STATE_START_PENDING, 88 SBI_HSM_STATE_STOP_PENDING, 89 SBI_HSM_STATE_SUSPENDED, 90 SBI_HSM_STATE_SUSPEND_PENDING, 91 SBI_HSM_STATE_RESUME_PENDING, 92 }; 93 94 #define SBI_HSM_SUSP_BASE_MASK 0x7fffffff 95 #define SBI_HSM_SUSP_NON_RET_BIT 0x80000000 96 #define SBI_HSM_SUSP_PLAT_BASE 0x10000000 97 98 #define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000 99 #define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE 100 #define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK 101 #define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT 102 #define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \ 103 SBI_HSM_SUSP_PLAT_BASE) 104 #define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \ 105 SBI_HSM_SUSP_BASE_MASK) 106 107 enum sbi_ext_srst_fid { 108 SBI_EXT_SRST_RESET = 0, 109 }; 110 111 enum sbi_srst_reset_type { 112 SBI_SRST_RESET_TYPE_SHUTDOWN = 0, 113 SBI_SRST_RESET_TYPE_COLD_REBOOT, 114 SBI_SRST_RESET_TYPE_WARM_REBOOT, 115 }; 116 117 enum sbi_srst_reset_reason { 118 SBI_SRST_RESET_REASON_NONE = 0, 119 SBI_SRST_RESET_REASON_SYS_FAILURE, 120 }; 121 122 enum sbi_ext_susp_fid { 123 SBI_EXT_SUSP_SYSTEM_SUSPEND = 0, 124 }; 125 126 enum sbi_ext_susp_sleep_type { 127 SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0, 128 }; 129 130 enum sbi_ext_pmu_fid { 131 SBI_EXT_PMU_NUM_COUNTERS = 0, 132 SBI_EXT_PMU_COUNTER_GET_INFO, 133 SBI_EXT_PMU_COUNTER_CFG_MATCH, 134 SBI_EXT_PMU_COUNTER_START, 135 SBI_EXT_PMU_COUNTER_STOP, 136 SBI_EXT_PMU_COUNTER_FW_READ, 137 SBI_EXT_PMU_COUNTER_FW_READ_HI, 138 SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, 139 }; 140 141 union sbi_pmu_ctr_info { 142 unsigned long value; 143 struct { 144 unsigned long csr:12; 145 unsigned long width:6; 146 #if __riscv_xlen == 32 147 unsigned long reserved:13; 148 #else 149 unsigned long reserved:45; 150 #endif 151 unsigned long type:1; 152 }; 153 }; 154 155 /* Data structure to contain the pmu snapshot data */ 156 struct riscv_pmu_snapshot_data { 157 u64 ctr_overflow_mask; 158 u64 ctr_values[64]; 159 u64 reserved[447]; 160 }; 161 162 #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) 163 #define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0) 164 #define RISCV_PMU_RAW_EVENT_IDX 0x20000 165 #define RISCV_PLAT_FW_EVENT 0xFFFF 166 167 /** General pmu event codes specified in SBI PMU extension */ 168 enum sbi_pmu_hw_generic_events_t { 169 SBI_PMU_HW_NO_EVENT = 0, 170 SBI_PMU_HW_CPU_CYCLES = 1, 171 SBI_PMU_HW_INSTRUCTIONS = 2, 172 SBI_PMU_HW_CACHE_REFERENCES = 3, 173 SBI_PMU_HW_CACHE_MISSES = 4, 174 SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5, 175 SBI_PMU_HW_BRANCH_MISSES = 6, 176 SBI_PMU_HW_BUS_CYCLES = 7, 177 SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8, 178 SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9, 179 SBI_PMU_HW_REF_CPU_CYCLES = 10, 180 181 SBI_PMU_HW_GENERAL_MAX, 182 }; 183 184 /** 185 * Special "firmware" events provided by the firmware, even if the hardware 186 * does not support performance events. These events are encoded as a raw 187 * event type in Linux kernel perf framework. 188 */ 189 enum sbi_pmu_fw_generic_events_t { 190 SBI_PMU_FW_MISALIGNED_LOAD = 0, 191 SBI_PMU_FW_MISALIGNED_STORE = 1, 192 SBI_PMU_FW_ACCESS_LOAD = 2, 193 SBI_PMU_FW_ACCESS_STORE = 3, 194 SBI_PMU_FW_ILLEGAL_INSN = 4, 195 SBI_PMU_FW_SET_TIMER = 5, 196 SBI_PMU_FW_IPI_SENT = 6, 197 SBI_PMU_FW_IPI_RCVD = 7, 198 SBI_PMU_FW_FENCE_I_SENT = 8, 199 SBI_PMU_FW_FENCE_I_RCVD = 9, 200 SBI_PMU_FW_SFENCE_VMA_SENT = 10, 201 SBI_PMU_FW_SFENCE_VMA_RCVD = 11, 202 SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12, 203 SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13, 204 205 SBI_PMU_FW_HFENCE_GVMA_SENT = 14, 206 SBI_PMU_FW_HFENCE_GVMA_RCVD = 15, 207 SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16, 208 SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17, 209 210 SBI_PMU_FW_HFENCE_VVMA_SENT = 18, 211 SBI_PMU_FW_HFENCE_VVMA_RCVD = 19, 212 SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20, 213 SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21, 214 SBI_PMU_FW_MAX, 215 }; 216 217 /* SBI PMU event types */ 218 enum sbi_pmu_event_type { 219 SBI_PMU_EVENT_TYPE_HW = 0x0, 220 SBI_PMU_EVENT_TYPE_CACHE = 0x1, 221 SBI_PMU_EVENT_TYPE_RAW = 0x2, 222 SBI_PMU_EVENT_TYPE_FW = 0xf, 223 }; 224 225 /* SBI PMU event types */ 226 enum sbi_pmu_ctr_type { 227 SBI_PMU_CTR_TYPE_HW = 0x0, 228 SBI_PMU_CTR_TYPE_FW, 229 }; 230 231 /* Helper macros to decode event idx */ 232 #define SBI_PMU_EVENT_IDX_OFFSET 20 233 #define SBI_PMU_EVENT_IDX_MASK 0xFFFFF 234 #define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF 235 #define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000 236 #define SBI_PMU_EVENT_RAW_IDX 0x20000 237 #define SBI_PMU_FIXED_CTR_MASK 0x07 238 239 #define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8 240 #define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06 241 #define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01 242 243 #define SBI_PMU_EVENT_CACHE_ID_SHIFT 3 244 #define SBI_PMU_EVENT_CACHE_OP_SHIFT 1 245 246 #define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF 247 248 /* Flags defined for config matching function */ 249 #define SBI_PMU_CFG_FLAG_SKIP_MATCH BIT(0) 250 #define SBI_PMU_CFG_FLAG_CLEAR_VALUE BIT(1) 251 #define SBI_PMU_CFG_FLAG_AUTO_START BIT(2) 252 #define SBI_PMU_CFG_FLAG_SET_VUINH BIT(3) 253 #define SBI_PMU_CFG_FLAG_SET_VSINH BIT(4) 254 #define SBI_PMU_CFG_FLAG_SET_UINH BIT(5) 255 #define SBI_PMU_CFG_FLAG_SET_SINH BIT(6) 256 #define SBI_PMU_CFG_FLAG_SET_MINH BIT(7) 257 258 /* Flags defined for counter start function */ 259 #define SBI_PMU_START_FLAG_SET_INIT_VALUE BIT(0) 260 #define SBI_PMU_START_FLAG_INIT_SNAPSHOT BIT(1) 261 262 /* Flags defined for counter stop function */ 263 #define SBI_PMU_STOP_FLAG_RESET BIT(0) 264 #define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT BIT(1) 265 266 enum sbi_ext_dbcn_fid { 267 SBI_EXT_DBCN_CONSOLE_WRITE = 0, 268 SBI_EXT_DBCN_CONSOLE_READ = 1, 269 SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2, 270 }; 271 272 /* SBI STA (steal-time accounting) extension */ 273 enum sbi_ext_sta_fid { 274 SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0, 275 }; 276 277 struct sbi_sta_struct { 278 __le32 sequence; 279 __le32 flags; 280 __le64 steal; 281 u8 preempted; 282 u8 pad[47]; 283 } __packed; 284 285 #define SBI_SHMEM_DISABLE -1 286 287 enum sbi_ext_nacl_fid { 288 SBI_EXT_NACL_PROBE_FEATURE = 0x0, 289 SBI_EXT_NACL_SET_SHMEM = 0x1, 290 SBI_EXT_NACL_SYNC_CSR = 0x2, 291 SBI_EXT_NACL_SYNC_HFENCE = 0x3, 292 SBI_EXT_NACL_SYNC_SRET = 0x4, 293 }; 294 295 enum sbi_ext_nacl_feature { 296 SBI_NACL_FEAT_SYNC_CSR = 0x0, 297 SBI_NACL_FEAT_SYNC_HFENCE = 0x1, 298 SBI_NACL_FEAT_SYNC_SRET = 0x2, 299 SBI_NACL_FEAT_AUTOSWAP_CSR = 0x3, 300 }; 301 302 #define SBI_NACL_SHMEM_ADDR_SHIFT 12 303 #define SBI_NACL_SHMEM_SCRATCH_OFFSET 0x0000 304 #define SBI_NACL_SHMEM_SCRATCH_SIZE 0x1000 305 #define SBI_NACL_SHMEM_SRET_OFFSET 0x0000 306 #define SBI_NACL_SHMEM_SRET_SIZE 0x0200 307 #define SBI_NACL_SHMEM_AUTOSWAP_OFFSET (SBI_NACL_SHMEM_SRET_OFFSET + \ 308 SBI_NACL_SHMEM_SRET_SIZE) 309 #define SBI_NACL_SHMEM_AUTOSWAP_SIZE 0x0080 310 #define SBI_NACL_SHMEM_UNUSED_OFFSET (SBI_NACL_SHMEM_AUTOSWAP_OFFSET + \ 311 SBI_NACL_SHMEM_AUTOSWAP_SIZE) 312 #define SBI_NACL_SHMEM_UNUSED_SIZE 0x0580 313 #define SBI_NACL_SHMEM_HFENCE_OFFSET (SBI_NACL_SHMEM_UNUSED_OFFSET + \ 314 SBI_NACL_SHMEM_UNUSED_SIZE) 315 #define SBI_NACL_SHMEM_HFENCE_SIZE 0x0780 316 #define SBI_NACL_SHMEM_DBITMAP_OFFSET (SBI_NACL_SHMEM_HFENCE_OFFSET + \ 317 SBI_NACL_SHMEM_HFENCE_SIZE) 318 #define SBI_NACL_SHMEM_DBITMAP_SIZE 0x0080 319 #define SBI_NACL_SHMEM_CSR_OFFSET (SBI_NACL_SHMEM_DBITMAP_OFFSET + \ 320 SBI_NACL_SHMEM_DBITMAP_SIZE) 321 #define SBI_NACL_SHMEM_CSR_SIZE ((__riscv_xlen / 8) * 1024) 322 #define SBI_NACL_SHMEM_SIZE (SBI_NACL_SHMEM_CSR_OFFSET + \ 323 SBI_NACL_SHMEM_CSR_SIZE) 324 325 #define SBI_NACL_SHMEM_CSR_INDEX(__csr_num) \ 326 ((((__csr_num) & 0xc00) >> 2) | ((__csr_num) & 0xff)) 327 328 #define SBI_NACL_SHMEM_HFENCE_ENTRY_SZ ((__riscv_xlen / 8) * 4) 329 #define SBI_NACL_SHMEM_HFENCE_ENTRY_MAX \ 330 (SBI_NACL_SHMEM_HFENCE_SIZE / \ 331 SBI_NACL_SHMEM_HFENCE_ENTRY_SZ) 332 #define SBI_NACL_SHMEM_HFENCE_ENTRY(__num) \ 333 (SBI_NACL_SHMEM_HFENCE_OFFSET + \ 334 (__num) * SBI_NACL_SHMEM_HFENCE_ENTRY_SZ) 335 #define SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(__num) \ 336 SBI_NACL_SHMEM_HFENCE_ENTRY(__num) 337 #define SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(__num)\ 338 (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + (__riscv_xlen / 8)) 339 #define SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(__num)\ 340 (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + \ 341 ((__riscv_xlen / 8) * 3)) 342 343 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS 1 344 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT \ 345 (__riscv_xlen - SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) 346 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK \ 347 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) - 1) 348 #define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND \ 349 (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK << \ 350 SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT) 351 352 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS 3 353 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT \ 354 (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT - \ 355 SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS) 356 357 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS 4 358 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT \ 359 (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT - \ 360 SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) 361 #define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK \ 362 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) - 1) 363 364 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA 0x0 365 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL 0x1 366 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID 0x2 367 #define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL 0x3 368 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA 0x4 369 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL 0x5 370 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID 0x6 371 #define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL 0x7 372 373 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS 1 374 #define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT \ 375 (SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT - \ 376 SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS) 377 378 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS 7 379 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT \ 380 (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT - \ 381 SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) 382 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK \ 383 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) - 1) 384 #define SBI_NACL_SHMEM_HFENCE_ORDER_BASE 12 385 386 #if __riscv_xlen == 32 387 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 9 388 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 7 389 #else 390 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 16 391 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 14 392 #endif 393 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT \ 394 SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 395 #define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK \ 396 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS) - 1) 397 #define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK \ 398 ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS) - 1) 399 400 #define SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS BIT(0) 401 #define SBI_NACL_SHMEM_AUTOSWAP_HSTATUS ((__riscv_xlen / 8) * 1) 402 403 #define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i)) 404 #define SBI_NACL_SHMEM_SRET_X_LAST 31 405 406 /* SBI function IDs for FW feature extension */ 407 #define SBI_EXT_FWFT_SET 0x0 408 #define SBI_EXT_FWFT_GET 0x1 409 410 enum sbi_fwft_feature_t { 411 SBI_FWFT_MISALIGNED_EXC_DELEG = 0x0, 412 SBI_FWFT_LANDING_PAD = 0x1, 413 SBI_FWFT_SHADOW_STACK = 0x2, 414 SBI_FWFT_DOUBLE_TRAP = 0x3, 415 SBI_FWFT_PTE_AD_HW_UPDATING = 0x4, 416 SBI_FWFT_POINTER_MASKING_PMLEN = 0x5, 417 SBI_FWFT_LOCAL_RESERVED_START = 0x6, 418 SBI_FWFT_LOCAL_RESERVED_END = 0x3fffffff, 419 SBI_FWFT_LOCAL_PLATFORM_START = 0x40000000, 420 SBI_FWFT_LOCAL_PLATFORM_END = 0x7fffffff, 421 422 SBI_FWFT_GLOBAL_RESERVED_START = 0x80000000, 423 SBI_FWFT_GLOBAL_RESERVED_END = 0xbfffffff, 424 SBI_FWFT_GLOBAL_PLATFORM_START = 0xc0000000, 425 SBI_FWFT_GLOBAL_PLATFORM_END = 0xffffffff, 426 }; 427 428 #define SBI_FWFT_PLATFORM_FEATURE_BIT BIT(30) 429 #define SBI_FWFT_GLOBAL_FEATURE_BIT BIT(31) 430 431 #define SBI_FWFT_SET_FLAG_LOCK BIT(0) 432 433 /* SBI spec version fields */ 434 #define SBI_SPEC_VERSION_DEFAULT 0x1 435 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 436 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f 437 #define SBI_SPEC_VERSION_MINOR_MASK 0xffffff 438 439 /* SBI return error codes */ 440 #define SBI_SUCCESS 0 441 #define SBI_ERR_FAILURE -1 442 #define SBI_ERR_NOT_SUPPORTED -2 443 #define SBI_ERR_INVALID_PARAM -3 444 #define SBI_ERR_DENIED -4 445 #define SBI_ERR_INVALID_ADDRESS -5 446 #define SBI_ERR_ALREADY_AVAILABLE -6 447 #define SBI_ERR_ALREADY_STARTED -7 448 #define SBI_ERR_ALREADY_STOPPED -8 449 #define SBI_ERR_NO_SHMEM -9 450 #define SBI_ERR_INVALID_STATE -10 451 #define SBI_ERR_BAD_RANGE -11 452 #define SBI_ERR_TIMEOUT -12 453 #define SBI_ERR_IO -13 454 #define SBI_ERR_DENIED_LOCKED -14 455 456 extern unsigned long sbi_spec_version; 457 struct sbiret { 458 long error; 459 long value; 460 }; 461 462 void sbi_init(void); 463 long __sbi_base_ecall(int fid); 464 struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, 465 unsigned long arg2, unsigned long arg3, 466 unsigned long arg4, unsigned long arg5, 467 int fid, int ext); 468 #define sbi_ecall(e, f, a0, a1, a2, a3, a4, a5) \ 469 __sbi_ecall(a0, a1, a2, a3, a4, a5, f, e) 470 471 #ifdef CONFIG_RISCV_SBI_V01 472 void sbi_console_putchar(int ch); 473 int sbi_console_getchar(void); 474 #else 475 static inline void sbi_console_putchar(int ch) { } 476 static inline int sbi_console_getchar(void) { return -ENOENT; } 477 #endif 478 long sbi_get_mvendorid(void); 479 long sbi_get_marchid(void); 480 long sbi_get_mimpid(void); 481 void sbi_set_timer(uint64_t stime_value); 482 void sbi_shutdown(void); 483 void sbi_send_ipi(unsigned int cpu); 484 int sbi_remote_fence_i(const struct cpumask *cpu_mask); 485 486 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, 487 unsigned long start, 488 unsigned long size, 489 unsigned long asid); 490 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask, 491 unsigned long start, 492 unsigned long size); 493 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask, 494 unsigned long start, 495 unsigned long size, 496 unsigned long vmid); 497 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, 498 unsigned long start, 499 unsigned long size); 500 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, 501 unsigned long start, 502 unsigned long size, 503 unsigned long asid); 504 long sbi_probe_extension(int ext); 505 506 int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags); 507 int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature, 508 unsigned long value, unsigned long flags); 509 /** 510 * sbi_fwft_set_online_cpus() - Set a feature on all online cpus 511 * @feature: The feature to be set 512 * @value: The feature value to be set 513 * @flags: FWFT feature set flags 514 * 515 * Return: 0 on success, appropriate linux error code otherwise. 516 */ 517 static inline int sbi_fwft_set_online_cpus(u32 feature, unsigned long value, 518 unsigned long flags) 519 { 520 return sbi_fwft_set_cpumask(cpu_online_mask, feature, value, flags); 521 } 522 523 /* Check if current SBI specification version is 0.1 or not */ 524 static inline int sbi_spec_is_0_1(void) 525 { 526 return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0; 527 } 528 529 /* Get the major version of SBI */ 530 static inline unsigned long sbi_major_version(void) 531 { 532 return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) & 533 SBI_SPEC_VERSION_MAJOR_MASK; 534 } 535 536 /* Get the minor version of SBI */ 537 static inline unsigned long sbi_minor_version(void) 538 { 539 return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK; 540 } 541 542 /* Make SBI version */ 543 static inline unsigned long sbi_mk_version(unsigned long major, 544 unsigned long minor) 545 { 546 return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << SBI_SPEC_VERSION_MAJOR_SHIFT) 547 | (minor & SBI_SPEC_VERSION_MINOR_MASK); 548 } 549 550 static inline int sbi_err_map_linux_errno(int err) 551 { 552 switch (err) { 553 case SBI_SUCCESS: 554 return 0; 555 case SBI_ERR_DENIED: 556 case SBI_ERR_DENIED_LOCKED: 557 return -EPERM; 558 case SBI_ERR_INVALID_PARAM: 559 case SBI_ERR_INVALID_STATE: 560 return -EINVAL; 561 case SBI_ERR_BAD_RANGE: 562 return -ERANGE; 563 case SBI_ERR_INVALID_ADDRESS: 564 return -EFAULT; 565 case SBI_ERR_NO_SHMEM: 566 return -ENOMEM; 567 case SBI_ERR_TIMEOUT: 568 return -ETIMEDOUT; 569 case SBI_ERR_IO: 570 return -EIO; 571 case SBI_ERR_NOT_SUPPORTED: 572 case SBI_ERR_FAILURE: 573 default: 574 return -ENOTSUPP; 575 }; 576 } 577 578 extern bool sbi_debug_console_available; 579 int sbi_debug_console_write(const char *bytes, unsigned int num_bytes); 580 int sbi_debug_console_read(char *bytes, unsigned int num_bytes); 581 582 #else /* CONFIG_RISCV_SBI */ 583 static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } 584 static inline void sbi_init(void) {} 585 #endif /* CONFIG_RISCV_SBI */ 586 587 unsigned long riscv_get_mvendorid(void); 588 unsigned long riscv_get_marchid(void); 589 unsigned long riscv_cached_mvendorid(unsigned int cpu_id); 590 unsigned long riscv_cached_marchid(unsigned int cpu_id); 591 unsigned long riscv_cached_mimpid(unsigned int cpu_id); 592 593 #if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI) 594 DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence); 595 #define riscv_use_sbi_for_rfence() \ 596 static_branch_unlikely(&riscv_sbi_for_rfence) 597 void sbi_ipi_init(void); 598 #else 599 static inline bool riscv_use_sbi_for_rfence(void) { return false; } 600 static inline void sbi_ipi_init(void) { } 601 #endif 602 603 #endif /* _ASM_RISCV_SBI_H */ 604