1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2014 Darius Rad <darius@bluespec.com> 5 * Copyright (C) 2017 SiFive 6 */ 7 8 #include <linux/syscalls.h> 9 #include <asm/cacheflush.h> 10 #include <asm/cpufeature.h> 11 #include <asm/hwprobe.h> 12 #include <asm/sbi.h> 13 #include <asm/vector.h> 14 #include <asm/switch_to.h> 15 #include <asm/uaccess.h> 16 #include <asm/unistd.h> 17 #include <asm-generic/mman-common.h> 18 #include <vdso/vsyscall.h> 19 20 static long riscv_sys_mmap(unsigned long addr, unsigned long len, 21 unsigned long prot, unsigned long flags, 22 unsigned long fd, off_t offset, 23 unsigned long page_shift_offset) 24 { 25 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) 26 return -EINVAL; 27 28 return ksys_mmap_pgoff(addr, len, prot, flags, fd, 29 offset >> (PAGE_SHIFT - page_shift_offset)); 30 } 31 32 #ifdef CONFIG_64BIT 33 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 34 unsigned long, prot, unsigned long, flags, 35 unsigned long, fd, off_t, offset) 36 { 37 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0); 38 } 39 #endif 40 41 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 42 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, 43 unsigned long, prot, unsigned long, flags, 44 unsigned long, fd, off_t, offset) 45 { 46 /* 47 * Note that the shift for mmap2 is constant (12), 48 * regardless of PAGE_SIZE 49 */ 50 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12); 51 } 52 #endif 53 54 /* 55 * Allows the instruction cache to be flushed from userspace. Despite RISC-V 56 * having a direct 'fence.i' instruction available to userspace (which we 57 * can't trap!), that's not actually viable when running on Linux because the 58 * kernel might schedule a process on another hart. There is no way for 59 * userspace to handle this without invoking the kernel (as it doesn't know the 60 * thread->hart mappings), so we've defined a RISC-V specific system call to 61 * flush the instruction cache. 62 * 63 * sys_riscv_flush_icache() is defined to flush the instruction cache over an 64 * address range, with the flush applying to either all threads or just the 65 * caller. We don't currently do anything with the address range, that's just 66 * in there for forwards compatibility. 67 */ 68 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, 69 uintptr_t, flags) 70 { 71 /* Check the reserved flags. */ 72 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) 73 return -EINVAL; 74 75 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); 76 77 return 0; 78 } 79 80 /* 81 * The hwprobe interface, for allowing userspace to probe to see which features 82 * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more 83 * details. 84 */ 85 static void hwprobe_arch_id(struct riscv_hwprobe *pair, 86 const struct cpumask *cpus) 87 { 88 u64 id = -1ULL; 89 bool first = true; 90 int cpu; 91 92 for_each_cpu(cpu, cpus) { 93 u64 cpu_id; 94 95 switch (pair->key) { 96 case RISCV_HWPROBE_KEY_MVENDORID: 97 cpu_id = riscv_cached_mvendorid(cpu); 98 break; 99 case RISCV_HWPROBE_KEY_MIMPID: 100 cpu_id = riscv_cached_mimpid(cpu); 101 break; 102 case RISCV_HWPROBE_KEY_MARCHID: 103 cpu_id = riscv_cached_marchid(cpu); 104 break; 105 } 106 107 if (first) { 108 id = cpu_id; 109 first = false; 110 } 111 112 /* 113 * If there's a mismatch for the given set, return -1 in the 114 * value. 115 */ 116 if (id != cpu_id) { 117 id = -1ULL; 118 break; 119 } 120 } 121 122 pair->value = id; 123 } 124 125 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, 126 const struct cpumask *cpus) 127 { 128 int cpu; 129 u64 missing = 0; 130 131 pair->value = 0; 132 if (has_fpu()) 133 pair->value |= RISCV_HWPROBE_IMA_FD; 134 135 if (riscv_isa_extension_available(NULL, c)) 136 pair->value |= RISCV_HWPROBE_IMA_C; 137 138 if (has_vector()) 139 pair->value |= RISCV_HWPROBE_IMA_V; 140 141 /* 142 * Loop through and record extensions that 1) anyone has, and 2) anyone 143 * doesn't have. 144 */ 145 for_each_cpu(cpu, cpus) { 146 struct riscv_isainfo *isainfo = &hart_isa[cpu]; 147 148 if (riscv_isa_extension_available(isainfo->isa, ZBA)) 149 pair->value |= RISCV_HWPROBE_EXT_ZBA; 150 else 151 missing |= RISCV_HWPROBE_EXT_ZBA; 152 153 if (riscv_isa_extension_available(isainfo->isa, ZBB)) 154 pair->value |= RISCV_HWPROBE_EXT_ZBB; 155 else 156 missing |= RISCV_HWPROBE_EXT_ZBB; 157 158 if (riscv_isa_extension_available(isainfo->isa, ZBS)) 159 pair->value |= RISCV_HWPROBE_EXT_ZBS; 160 else 161 missing |= RISCV_HWPROBE_EXT_ZBS; 162 } 163 164 /* Now turn off reporting features if any CPU is missing it. */ 165 pair->value &= ~missing; 166 } 167 168 static u64 hwprobe_misaligned(const struct cpumask *cpus) 169 { 170 int cpu; 171 u64 perf = -1ULL; 172 173 for_each_cpu(cpu, cpus) { 174 int this_perf = per_cpu(misaligned_access_speed, cpu); 175 176 if (perf == -1ULL) 177 perf = this_perf; 178 179 if (perf != this_perf) { 180 perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; 181 break; 182 } 183 } 184 185 if (perf == -1ULL) 186 return RISCV_HWPROBE_MISALIGNED_UNKNOWN; 187 188 return perf; 189 } 190 191 static void hwprobe_one_pair(struct riscv_hwprobe *pair, 192 const struct cpumask *cpus) 193 { 194 switch (pair->key) { 195 case RISCV_HWPROBE_KEY_MVENDORID: 196 case RISCV_HWPROBE_KEY_MARCHID: 197 case RISCV_HWPROBE_KEY_MIMPID: 198 hwprobe_arch_id(pair, cpus); 199 break; 200 /* 201 * The kernel already assumes that the base single-letter ISA 202 * extensions are supported on all harts, and only supports the 203 * IMA base, so just cheat a bit here and tell that to 204 * userspace. 205 */ 206 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: 207 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; 208 break; 209 210 case RISCV_HWPROBE_KEY_IMA_EXT_0: 211 hwprobe_isa_ext0(pair, cpus); 212 break; 213 214 case RISCV_HWPROBE_KEY_CPUPERF_0: 215 pair->value = hwprobe_misaligned(cpus); 216 break; 217 218 /* 219 * For forward compatibility, unknown keys don't fail the whole 220 * call, but get their element key set to -1 and value set to 0 221 * indicating they're unrecognized. 222 */ 223 default: 224 pair->key = -1; 225 pair->value = 0; 226 break; 227 } 228 } 229 230 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 231 size_t pair_count, size_t cpu_count, 232 unsigned long __user *cpus_user, 233 unsigned int flags) 234 { 235 size_t out; 236 int ret; 237 cpumask_t cpus; 238 239 /* Check the reserved flags. */ 240 if (flags != 0) 241 return -EINVAL; 242 243 /* 244 * The interface supports taking in a CPU mask, and returns values that 245 * are consistent across that mask. Allow userspace to specify NULL and 246 * 0 as a shortcut to all online CPUs. 247 */ 248 cpumask_clear(&cpus); 249 if (!cpu_count && !cpus_user) { 250 cpumask_copy(&cpus, cpu_online_mask); 251 } else { 252 if (cpu_count > cpumask_size()) 253 cpu_count = cpumask_size(); 254 255 ret = copy_from_user(&cpus, cpus_user, cpu_count); 256 if (ret) 257 return -EFAULT; 258 259 /* 260 * Userspace must provide at least one online CPU, without that 261 * there's no way to define what is supported. 262 */ 263 cpumask_and(&cpus, &cpus, cpu_online_mask); 264 if (cpumask_empty(&cpus)) 265 return -EINVAL; 266 } 267 268 for (out = 0; out < pair_count; out++, pairs++) { 269 struct riscv_hwprobe pair; 270 271 if (get_user(pair.key, &pairs->key)) 272 return -EFAULT; 273 274 pair.value = 0; 275 hwprobe_one_pair(&pair, &cpus); 276 ret = put_user(pair.key, &pairs->key); 277 if (ret == 0) 278 ret = put_user(pair.value, &pairs->value); 279 280 if (ret) 281 return -EFAULT; 282 } 283 284 return 0; 285 } 286 287 #ifdef CONFIG_MMU 288 289 static int __init init_hwprobe_vdso_data(void) 290 { 291 struct vdso_data *vd = __arch_get_k_vdso_data(); 292 struct arch_vdso_data *avd = &vd->arch_data; 293 u64 id_bitsmash = 0; 294 struct riscv_hwprobe pair; 295 int key; 296 297 /* 298 * Initialize vDSO data with the answers for the "all CPUs" case, to 299 * save a syscall in the common case. 300 */ 301 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { 302 pair.key = key; 303 hwprobe_one_pair(&pair, cpu_online_mask); 304 305 WARN_ON_ONCE(pair.key < 0); 306 307 avd->all_cpu_hwprobe_values[key] = pair.value; 308 /* 309 * Smash together the vendor, arch, and impl IDs to see if 310 * they're all 0 or any negative. 311 */ 312 if (key <= RISCV_HWPROBE_KEY_MIMPID) 313 id_bitsmash |= pair.value; 314 } 315 316 /* 317 * If the arch, vendor, and implementation ID are all the same across 318 * all harts, then assume all CPUs are the same, and allow the vDSO to 319 * answer queries for arbitrary masks. However if all values are 0 (not 320 * populated) or any value returns -1 (varies across CPUs), then the 321 * vDSO should defer to the kernel for exotic cpu masks. 322 */ 323 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; 324 return 0; 325 } 326 327 arch_initcall_sync(init_hwprobe_vdso_data); 328 329 #endif /* CONFIG_MMU */ 330 331 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, 332 size_t, pair_count, size_t, cpu_count, unsigned long __user *, 333 cpus, unsigned int, flags) 334 { 335 return do_riscv_hwprobe(pairs, pair_count, cpu_count, 336 cpus, flags); 337 } 338