1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2014 Darius Rad <darius@bluespec.com> 5 * Copyright (C) 2017 SiFive 6 */ 7 8 #include <linux/syscalls.h> 9 #include <asm/cacheflush.h> 10 #include <asm/cpufeature.h> 11 #include <asm/hwprobe.h> 12 #include <asm/sbi.h> 13 #include <asm/vector.h> 14 #include <asm/switch_to.h> 15 #include <asm/uaccess.h> 16 #include <asm/unistd.h> 17 #include <asm-generic/mman-common.h> 18 #include <vdso/vsyscall.h> 19 20 static long riscv_sys_mmap(unsigned long addr, unsigned long len, 21 unsigned long prot, unsigned long flags, 22 unsigned long fd, off_t offset, 23 unsigned long page_shift_offset) 24 { 25 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) 26 return -EINVAL; 27 28 return ksys_mmap_pgoff(addr, len, prot, flags, fd, 29 offset >> (PAGE_SHIFT - page_shift_offset)); 30 } 31 32 #ifdef CONFIG_64BIT 33 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 34 unsigned long, prot, unsigned long, flags, 35 unsigned long, fd, off_t, offset) 36 { 37 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0); 38 } 39 #endif 40 41 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 42 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, 43 unsigned long, prot, unsigned long, flags, 44 unsigned long, fd, off_t, offset) 45 { 46 /* 47 * Note that the shift for mmap2 is constant (12), 48 * regardless of PAGE_SIZE 49 */ 50 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12); 51 } 52 #endif 53 54 /* 55 * Allows the instruction cache to be flushed from userspace. Despite RISC-V 56 * having a direct 'fence.i' instruction available to userspace (which we 57 * can't trap!), that's not actually viable when running on Linux because the 58 * kernel might schedule a process on another hart. There is no way for 59 * userspace to handle this without invoking the kernel (as it doesn't know the 60 * thread->hart mappings), so we've defined a RISC-V specific system call to 61 * flush the instruction cache. 62 * 63 * sys_riscv_flush_icache() is defined to flush the instruction cache over an 64 * address range, with the flush applying to either all threads or just the 65 * caller. We don't currently do anything with the address range, that's just 66 * in there for forwards compatibility. 67 */ 68 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, 69 uintptr_t, flags) 70 { 71 /* Check the reserved flags. */ 72 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) 73 return -EINVAL; 74 75 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); 76 77 return 0; 78 } 79 80 /* 81 * The hwprobe interface, for allowing userspace to probe to see which features 82 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more 83 * details. 84 */ 85 static void hwprobe_arch_id(struct riscv_hwprobe *pair, 86 const struct cpumask *cpus) 87 { 88 u64 id = -1ULL; 89 bool first = true; 90 int cpu; 91 92 for_each_cpu(cpu, cpus) { 93 u64 cpu_id; 94 95 switch (pair->key) { 96 case RISCV_HWPROBE_KEY_MVENDORID: 97 cpu_id = riscv_cached_mvendorid(cpu); 98 break; 99 case RISCV_HWPROBE_KEY_MIMPID: 100 cpu_id = riscv_cached_mimpid(cpu); 101 break; 102 case RISCV_HWPROBE_KEY_MARCHID: 103 cpu_id = riscv_cached_marchid(cpu); 104 break; 105 } 106 107 if (first) { 108 id = cpu_id; 109 first = false; 110 } 111 112 /* 113 * If there's a mismatch for the given set, return -1 in the 114 * value. 115 */ 116 if (id != cpu_id) { 117 id = -1ULL; 118 break; 119 } 120 } 121 122 pair->value = id; 123 } 124 125 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, 126 const struct cpumask *cpus) 127 { 128 int cpu; 129 u64 missing = 0; 130 131 pair->value = 0; 132 if (has_fpu()) 133 pair->value |= RISCV_HWPROBE_IMA_FD; 134 135 if (riscv_isa_extension_available(NULL, c)) 136 pair->value |= RISCV_HWPROBE_IMA_C; 137 138 if (has_vector()) 139 pair->value |= RISCV_HWPROBE_IMA_V; 140 141 /* 142 * Loop through and record extensions that 1) anyone has, and 2) anyone 143 * doesn't have. 144 */ 145 for_each_cpu(cpu, cpus) { 146 struct riscv_isainfo *isainfo = &hart_isa[cpu]; 147 148 #define EXT_KEY(ext) \ 149 do { \ 150 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ 151 pair->value |= RISCV_HWPROBE_EXT_##ext; \ 152 else \ 153 missing |= RISCV_HWPROBE_EXT_##ext; \ 154 } while (false) 155 156 /* 157 * Only use EXT_KEY() for extensions which can be exposed to userspace, 158 * regardless of the kernel's configuration, as no other checks, besides 159 * presence in the hart_isa bitmap, are made. 160 */ 161 EXT_KEY(ZBA); 162 EXT_KEY(ZBB); 163 EXT_KEY(ZBS); 164 EXT_KEY(ZICBOZ); 165 #undef EXT_KEY 166 } 167 168 /* Now turn off reporting features if any CPU is missing it. */ 169 pair->value &= ~missing; 170 } 171 172 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext) 173 { 174 struct riscv_hwprobe pair; 175 176 hwprobe_isa_ext0(&pair, cpus); 177 return (pair.value & ext); 178 } 179 180 static u64 hwprobe_misaligned(const struct cpumask *cpus) 181 { 182 int cpu; 183 u64 perf = -1ULL; 184 185 for_each_cpu(cpu, cpus) { 186 int this_perf = per_cpu(misaligned_access_speed, cpu); 187 188 if (perf == -1ULL) 189 perf = this_perf; 190 191 if (perf != this_perf) { 192 perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; 193 break; 194 } 195 } 196 197 if (perf == -1ULL) 198 return RISCV_HWPROBE_MISALIGNED_UNKNOWN; 199 200 return perf; 201 } 202 203 static void hwprobe_one_pair(struct riscv_hwprobe *pair, 204 const struct cpumask *cpus) 205 { 206 switch (pair->key) { 207 case RISCV_HWPROBE_KEY_MVENDORID: 208 case RISCV_HWPROBE_KEY_MARCHID: 209 case RISCV_HWPROBE_KEY_MIMPID: 210 hwprobe_arch_id(pair, cpus); 211 break; 212 /* 213 * The kernel already assumes that the base single-letter ISA 214 * extensions are supported on all harts, and only supports the 215 * IMA base, so just cheat a bit here and tell that to 216 * userspace. 217 */ 218 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: 219 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; 220 break; 221 222 case RISCV_HWPROBE_KEY_IMA_EXT_0: 223 hwprobe_isa_ext0(pair, cpus); 224 break; 225 226 case RISCV_HWPROBE_KEY_CPUPERF_0: 227 pair->value = hwprobe_misaligned(cpus); 228 break; 229 230 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: 231 pair->value = 0; 232 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) 233 pair->value = riscv_cboz_block_size; 234 break; 235 236 /* 237 * For forward compatibility, unknown keys don't fail the whole 238 * call, but get their element key set to -1 and value set to 0 239 * indicating they're unrecognized. 240 */ 241 default: 242 pair->key = -1; 243 pair->value = 0; 244 break; 245 } 246 } 247 248 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 249 size_t pair_count, size_t cpu_count, 250 unsigned long __user *cpus_user, 251 unsigned int flags) 252 { 253 size_t out; 254 int ret; 255 cpumask_t cpus; 256 257 /* Check the reserved flags. */ 258 if (flags != 0) 259 return -EINVAL; 260 261 /* 262 * The interface supports taking in a CPU mask, and returns values that 263 * are consistent across that mask. Allow userspace to specify NULL and 264 * 0 as a shortcut to all online CPUs. 265 */ 266 cpumask_clear(&cpus); 267 if (!cpu_count && !cpus_user) { 268 cpumask_copy(&cpus, cpu_online_mask); 269 } else { 270 if (cpu_count > cpumask_size()) 271 cpu_count = cpumask_size(); 272 273 ret = copy_from_user(&cpus, cpus_user, cpu_count); 274 if (ret) 275 return -EFAULT; 276 277 /* 278 * Userspace must provide at least one online CPU, without that 279 * there's no way to define what is supported. 280 */ 281 cpumask_and(&cpus, &cpus, cpu_online_mask); 282 if (cpumask_empty(&cpus)) 283 return -EINVAL; 284 } 285 286 for (out = 0; out < pair_count; out++, pairs++) { 287 struct riscv_hwprobe pair; 288 289 if (get_user(pair.key, &pairs->key)) 290 return -EFAULT; 291 292 pair.value = 0; 293 hwprobe_one_pair(&pair, &cpus); 294 ret = put_user(pair.key, &pairs->key); 295 if (ret == 0) 296 ret = put_user(pair.value, &pairs->value); 297 298 if (ret) 299 return -EFAULT; 300 } 301 302 return 0; 303 } 304 305 #ifdef CONFIG_MMU 306 307 static int __init init_hwprobe_vdso_data(void) 308 { 309 struct vdso_data *vd = __arch_get_k_vdso_data(); 310 struct arch_vdso_data *avd = &vd->arch_data; 311 u64 id_bitsmash = 0; 312 struct riscv_hwprobe pair; 313 int key; 314 315 /* 316 * Initialize vDSO data with the answers for the "all CPUs" case, to 317 * save a syscall in the common case. 318 */ 319 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { 320 pair.key = key; 321 hwprobe_one_pair(&pair, cpu_online_mask); 322 323 WARN_ON_ONCE(pair.key < 0); 324 325 avd->all_cpu_hwprobe_values[key] = pair.value; 326 /* 327 * Smash together the vendor, arch, and impl IDs to see if 328 * they're all 0 or any negative. 329 */ 330 if (key <= RISCV_HWPROBE_KEY_MIMPID) 331 id_bitsmash |= pair.value; 332 } 333 334 /* 335 * If the arch, vendor, and implementation ID are all the same across 336 * all harts, then assume all CPUs are the same, and allow the vDSO to 337 * answer queries for arbitrary masks. However if all values are 0 (not 338 * populated) or any value returns -1 (varies across CPUs), then the 339 * vDSO should defer to the kernel for exotic cpu masks. 340 */ 341 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; 342 return 0; 343 } 344 345 arch_initcall_sync(init_hwprobe_vdso_data); 346 347 #endif /* CONFIG_MMU */ 348 349 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, 350 size_t, pair_count, size_t, cpu_count, unsigned long __user *, 351 cpus, unsigned int, flags) 352 { 353 return do_riscv_hwprobe(pairs, pair_count, cpu_count, 354 cpus, flags); 355 } 356 357 /* Not defined using SYSCALL_DEFINE0 to avoid error injection */ 358 asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) 359 { 360 return -ENOSYS; 361 } 362