1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2014 Darius Rad <darius@bluespec.com> 5 * Copyright (C) 2017 SiFive 6 */ 7 8 #include <linux/syscalls.h> 9 #include <asm/cacheflush.h> 10 #include <asm/cpufeature.h> 11 #include <asm/hwprobe.h> 12 #include <asm/sbi.h> 13 #include <asm/switch_to.h> 14 #include <asm/uaccess.h> 15 #include <asm/unistd.h> 16 #include <asm-generic/mman-common.h> 17 #include <vdso/vsyscall.h> 18 19 static long riscv_sys_mmap(unsigned long addr, unsigned long len, 20 unsigned long prot, unsigned long flags, 21 unsigned long fd, off_t offset, 22 unsigned long page_shift_offset) 23 { 24 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) 25 return -EINVAL; 26 27 return ksys_mmap_pgoff(addr, len, prot, flags, fd, 28 offset >> (PAGE_SHIFT - page_shift_offset)); 29 } 30 31 #ifdef CONFIG_64BIT 32 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 33 unsigned long, prot, unsigned long, flags, 34 unsigned long, fd, off_t, offset) 35 { 36 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0); 37 } 38 #endif 39 40 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 41 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, 42 unsigned long, prot, unsigned long, flags, 43 unsigned long, fd, off_t, offset) 44 { 45 /* 46 * Note that the shift for mmap2 is constant (12), 47 * regardless of PAGE_SIZE 48 */ 49 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12); 50 } 51 #endif 52 53 /* 54 * Allows the instruction cache to be flushed from userspace. Despite RISC-V 55 * having a direct 'fence.i' instruction available to userspace (which we 56 * can't trap!), that's not actually viable when running on Linux because the 57 * kernel might schedule a process on another hart. There is no way for 58 * userspace to handle this without invoking the kernel (as it doesn't know the 59 * thread->hart mappings), so we've defined a RISC-V specific system call to 60 * flush the instruction cache. 61 * 62 * sys_riscv_flush_icache() is defined to flush the instruction cache over an 63 * address range, with the flush applying to either all threads or just the 64 * caller. We don't currently do anything with the address range, that's just 65 * in there for forwards compatibility. 66 */ 67 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, 68 uintptr_t, flags) 69 { 70 /* Check the reserved flags. */ 71 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) 72 return -EINVAL; 73 74 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); 75 76 return 0; 77 } 78 79 /* 80 * The hwprobe interface, for allowing userspace to probe to see which features 81 * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more 82 * details. 83 */ 84 static void hwprobe_arch_id(struct riscv_hwprobe *pair, 85 const struct cpumask *cpus) 86 { 87 u64 id = -1ULL; 88 bool first = true; 89 int cpu; 90 91 for_each_cpu(cpu, cpus) { 92 u64 cpu_id; 93 94 switch (pair->key) { 95 case RISCV_HWPROBE_KEY_MVENDORID: 96 cpu_id = riscv_cached_mvendorid(cpu); 97 break; 98 case RISCV_HWPROBE_KEY_MIMPID: 99 cpu_id = riscv_cached_mimpid(cpu); 100 break; 101 case RISCV_HWPROBE_KEY_MARCHID: 102 cpu_id = riscv_cached_marchid(cpu); 103 break; 104 } 105 106 if (first) 107 id = cpu_id; 108 109 /* 110 * If there's a mismatch for the given set, return -1 in the 111 * value. 112 */ 113 if (id != cpu_id) { 114 id = -1ULL; 115 break; 116 } 117 } 118 119 pair->value = id; 120 } 121 122 static u64 hwprobe_misaligned(const struct cpumask *cpus) 123 { 124 int cpu; 125 u64 perf = -1ULL; 126 127 for_each_cpu(cpu, cpus) { 128 int this_perf = per_cpu(misaligned_access_speed, cpu); 129 130 if (perf == -1ULL) 131 perf = this_perf; 132 133 if (perf != this_perf) { 134 perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; 135 break; 136 } 137 } 138 139 if (perf == -1ULL) 140 return RISCV_HWPROBE_MISALIGNED_UNKNOWN; 141 142 return perf; 143 } 144 145 static void hwprobe_one_pair(struct riscv_hwprobe *pair, 146 const struct cpumask *cpus) 147 { 148 switch (pair->key) { 149 case RISCV_HWPROBE_KEY_MVENDORID: 150 case RISCV_HWPROBE_KEY_MARCHID: 151 case RISCV_HWPROBE_KEY_MIMPID: 152 hwprobe_arch_id(pair, cpus); 153 break; 154 /* 155 * The kernel already assumes that the base single-letter ISA 156 * extensions are supported on all harts, and only supports the 157 * IMA base, so just cheat a bit here and tell that to 158 * userspace. 159 */ 160 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: 161 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; 162 break; 163 164 case RISCV_HWPROBE_KEY_IMA_EXT_0: 165 pair->value = 0; 166 if (has_fpu()) 167 pair->value |= RISCV_HWPROBE_IMA_FD; 168 169 if (riscv_isa_extension_available(NULL, c)) 170 pair->value |= RISCV_HWPROBE_IMA_C; 171 172 break; 173 174 case RISCV_HWPROBE_KEY_CPUPERF_0: 175 pair->value = hwprobe_misaligned(cpus); 176 break; 177 178 /* 179 * For forward compatibility, unknown keys don't fail the whole 180 * call, but get their element key set to -1 and value set to 0 181 * indicating they're unrecognized. 182 */ 183 default: 184 pair->key = -1; 185 pair->value = 0; 186 break; 187 } 188 } 189 190 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 191 size_t pair_count, size_t cpu_count, 192 unsigned long __user *cpus_user, 193 unsigned int flags) 194 { 195 size_t out; 196 int ret; 197 cpumask_t cpus; 198 199 /* Check the reserved flags. */ 200 if (flags != 0) 201 return -EINVAL; 202 203 /* 204 * The interface supports taking in a CPU mask, and returns values that 205 * are consistent across that mask. Allow userspace to specify NULL and 206 * 0 as a shortcut to all online CPUs. 207 */ 208 cpumask_clear(&cpus); 209 if (!cpu_count && !cpus_user) { 210 cpumask_copy(&cpus, cpu_online_mask); 211 } else { 212 if (cpu_count > cpumask_size()) 213 cpu_count = cpumask_size(); 214 215 ret = copy_from_user(&cpus, cpus_user, cpu_count); 216 if (ret) 217 return -EFAULT; 218 219 /* 220 * Userspace must provide at least one online CPU, without that 221 * there's no way to define what is supported. 222 */ 223 cpumask_and(&cpus, &cpus, cpu_online_mask); 224 if (cpumask_empty(&cpus)) 225 return -EINVAL; 226 } 227 228 for (out = 0; out < pair_count; out++, pairs++) { 229 struct riscv_hwprobe pair; 230 231 if (get_user(pair.key, &pairs->key)) 232 return -EFAULT; 233 234 pair.value = 0; 235 hwprobe_one_pair(&pair, &cpus); 236 ret = put_user(pair.key, &pairs->key); 237 if (ret == 0) 238 ret = put_user(pair.value, &pairs->value); 239 240 if (ret) 241 return -EFAULT; 242 } 243 244 return 0; 245 } 246 247 #ifdef CONFIG_MMU 248 249 static int __init init_hwprobe_vdso_data(void) 250 { 251 struct vdso_data *vd = __arch_get_k_vdso_data(); 252 struct arch_vdso_data *avd = &vd->arch_data; 253 u64 id_bitsmash = 0; 254 struct riscv_hwprobe pair; 255 int key; 256 257 /* 258 * Initialize vDSO data with the answers for the "all CPUs" case, to 259 * save a syscall in the common case. 260 */ 261 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { 262 pair.key = key; 263 hwprobe_one_pair(&pair, cpu_online_mask); 264 265 WARN_ON_ONCE(pair.key < 0); 266 267 avd->all_cpu_hwprobe_values[key] = pair.value; 268 /* 269 * Smash together the vendor, arch, and impl IDs to see if 270 * they're all 0 or any negative. 271 */ 272 if (key <= RISCV_HWPROBE_KEY_MIMPID) 273 id_bitsmash |= pair.value; 274 } 275 276 /* 277 * If the arch, vendor, and implementation ID are all the same across 278 * all harts, then assume all CPUs are the same, and allow the vDSO to 279 * answer queries for arbitrary masks. However if all values are 0 (not 280 * populated) or any value returns -1 (varies across CPUs), then the 281 * vDSO should defer to the kernel for exotic cpu masks. 282 */ 283 avd->homogeneous_cpus = (id_bitsmash > 0); 284 return 0; 285 } 286 287 arch_initcall_sync(init_hwprobe_vdso_data); 288 289 #endif /* CONFIG_MMU */ 290 291 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, 292 size_t, pair_count, size_t, cpu_count, unsigned long __user *, 293 cpus, unsigned int, flags) 294 { 295 return do_riscv_hwprobe(pairs, pair_count, cpu_count, 296 cpus, flags); 297 } 298