1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 4 * 5 * This file contains the /proc/irq/ handling code. 6 */ 7 8 #include <linux/irq.h> 9 #include <linux/gfp.h> 10 #include <linux/proc_fs.h> 11 #include <linux/seq_file.h> 12 #include <linux/interrupt.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/mutex.h> 15 #include <linux/string.h> 16 17 #include "internals.h" 18 19 /* 20 * Access rules: 21 * 22 * procfs protects read/write of /proc/irq/N/ files against a 23 * concurrent free of the interrupt descriptor. remove_proc_entry() 24 * immediately prevents new read/writes to happen and waits for 25 * already running read/write functions to complete. 26 * 27 * We remove the proc entries first and then delete the interrupt 28 * descriptor from the radix tree and free it. So it is guaranteed 29 * that irq_to_desc(N) is valid as long as the read/writes are 30 * permitted by procfs. 31 * 32 * The read from /proc/interrupts is a different problem because there 33 * is no protection. So the lookup and the access to irqdesc 34 * information must be protected by sparse_irq_lock. 35 */ 36 static struct proc_dir_entry *root_irq_dir; 37 38 #ifdef CONFIG_SMP 39 40 enum { 41 AFFINITY, 42 AFFINITY_LIST, 43 EFFECTIVE, 44 EFFECTIVE_LIST, 45 }; 46 47 static int show_irq_affinity(int type, struct seq_file *m) 48 { 49 struct irq_desc *desc = irq_to_desc((long)m->private); 50 const struct cpumask *mask; 51 52 guard(raw_spinlock_irq)(&desc->lock); 53 54 switch (type) { 55 case AFFINITY: 56 case AFFINITY_LIST: 57 mask = desc->irq_common_data.affinity; 58 if (irq_move_pending(&desc->irq_data)) 59 mask = irq_desc_get_pending_mask(desc); 60 break; 61 case EFFECTIVE: 62 case EFFECTIVE_LIST: 63 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 64 mask = irq_data_get_effective_affinity_mask(&desc->irq_data); 65 break; 66 #endif 67 default: 68 return -EINVAL; 69 } 70 71 switch (type) { 72 case AFFINITY_LIST: 73 case EFFECTIVE_LIST: 74 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); 75 break; 76 case AFFINITY: 77 case EFFECTIVE: 78 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); 79 break; 80 } 81 return 0; 82 } 83 84 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) 85 { 86 struct irq_desc *desc = irq_to_desc((long)m->private); 87 cpumask_var_t mask; 88 89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 90 return -ENOMEM; 91 92 scoped_guard(raw_spinlock_irq, &desc->lock) { 93 if (desc->affinity_hint) 94 cpumask_copy(mask, desc->affinity_hint); 95 } 96 97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); 98 free_cpumask_var(mask); 99 return 0; 100 } 101 102 int no_irq_affinity; 103 static int irq_affinity_proc_show(struct seq_file *m, void *v) 104 { 105 return show_irq_affinity(AFFINITY, m); 106 } 107 108 static int irq_affinity_list_proc_show(struct seq_file *m, void *v) 109 { 110 return show_irq_affinity(AFFINITY_LIST, m); 111 } 112 113 #ifndef CONFIG_AUTO_IRQ_AFFINITY 114 static inline int irq_select_affinity_usr(unsigned int irq) 115 { 116 /* 117 * If the interrupt is started up already then this fails. The 118 * interrupt is assigned to an online CPU already. There is no 119 * point to move it around randomly. Tell user space that the 120 * selected mask is bogus. 121 * 122 * If not then any change to the affinity is pointless because the 123 * startup code invokes irq_setup_affinity() which will select 124 * a online CPU anyway. 125 */ 126 return -EINVAL; 127 } 128 #else 129 /* ALPHA magic affinity auto selector. Keep it for historical reasons. */ 130 static inline int irq_select_affinity_usr(unsigned int irq) 131 { 132 return irq_select_affinity(irq); 133 } 134 #endif 135 136 static ssize_t write_irq_affinity(int type, struct file *file, 137 const char __user *buffer, size_t count, loff_t *pos) 138 { 139 unsigned int irq = (int)(long)pde_data(file_inode(file)); 140 cpumask_var_t new_value; 141 int err; 142 143 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) 144 return -EPERM; 145 146 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL)) 147 return -ENOMEM; 148 149 if (type) 150 err = cpumask_parselist_user(buffer, count, new_value); 151 else 152 err = cpumask_parse_user(buffer, count, new_value); 153 if (err) 154 goto free_cpumask; 155 156 /* 157 * Do not allow disabling IRQs completely - it's a too easy 158 * way to make the system unusable accidentally :-) At least 159 * one online CPU still has to be targeted. 160 */ 161 if (!cpumask_intersects(new_value, cpu_online_mask)) { 162 /* 163 * Special case for empty set - allow the architecture code 164 * to set default SMP affinity. 165 */ 166 err = irq_select_affinity_usr(irq) ? -EINVAL : count; 167 } else { 168 err = irq_set_affinity(irq, new_value); 169 if (!err) 170 err = count; 171 } 172 173 free_cpumask: 174 free_cpumask_var(new_value); 175 return err; 176 } 177 178 static ssize_t irq_affinity_proc_write(struct file *file, 179 const char __user *buffer, size_t count, loff_t *pos) 180 { 181 return write_irq_affinity(0, file, buffer, count, pos); 182 } 183 184 static ssize_t irq_affinity_list_proc_write(struct file *file, 185 const char __user *buffer, size_t count, loff_t *pos) 186 { 187 return write_irq_affinity(1, file, buffer, count, pos); 188 } 189 190 static int irq_affinity_proc_open(struct inode *inode, struct file *file) 191 { 192 return single_open(file, irq_affinity_proc_show, pde_data(inode)); 193 } 194 195 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) 196 { 197 return single_open(file, irq_affinity_list_proc_show, pde_data(inode)); 198 } 199 200 static const struct proc_ops irq_affinity_proc_ops = { 201 .proc_open = irq_affinity_proc_open, 202 .proc_read = seq_read, 203 .proc_lseek = seq_lseek, 204 .proc_release = single_release, 205 .proc_write = irq_affinity_proc_write, 206 }; 207 208 static const struct proc_ops irq_affinity_list_proc_ops = { 209 .proc_open = irq_affinity_list_proc_open, 210 .proc_read = seq_read, 211 .proc_lseek = seq_lseek, 212 .proc_release = single_release, 213 .proc_write = irq_affinity_list_proc_write, 214 }; 215 216 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 217 static int irq_effective_aff_proc_show(struct seq_file *m, void *v) 218 { 219 return show_irq_affinity(EFFECTIVE, m); 220 } 221 222 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v) 223 { 224 return show_irq_affinity(EFFECTIVE_LIST, m); 225 } 226 #endif 227 228 static int default_affinity_show(struct seq_file *m, void *v) 229 { 230 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity)); 231 return 0; 232 } 233 234 static ssize_t default_affinity_write(struct file *file, 235 const char __user *buffer, size_t count, loff_t *ppos) 236 { 237 cpumask_var_t new_value; 238 int err; 239 240 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL)) 241 return -ENOMEM; 242 243 err = cpumask_parse_user(buffer, count, new_value); 244 if (err) 245 goto out; 246 247 /* 248 * Do not allow disabling IRQs completely - it's a too easy 249 * way to make the system unusable accidentally :-) At least 250 * one online CPU still has to be targeted. 251 */ 252 if (!cpumask_intersects(new_value, cpu_online_mask)) { 253 err = -EINVAL; 254 goto out; 255 } 256 257 cpumask_copy(irq_default_affinity, new_value); 258 err = count; 259 260 out: 261 free_cpumask_var(new_value); 262 return err; 263 } 264 265 static int default_affinity_open(struct inode *inode, struct file *file) 266 { 267 return single_open(file, default_affinity_show, pde_data(inode)); 268 } 269 270 static const struct proc_ops default_affinity_proc_ops = { 271 .proc_open = default_affinity_open, 272 .proc_read = seq_read, 273 .proc_lseek = seq_lseek, 274 .proc_release = single_release, 275 .proc_write = default_affinity_write, 276 }; 277 278 static int irq_node_proc_show(struct seq_file *m, void *v) 279 { 280 struct irq_desc *desc = irq_to_desc((long) m->private); 281 282 seq_printf(m, "%d\n", irq_desc_get_node(desc)); 283 return 0; 284 } 285 #endif 286 287 static int irq_spurious_proc_show(struct seq_file *m, void *v) 288 { 289 struct irq_desc *desc = irq_to_desc((long) m->private); 290 291 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", 292 desc->irq_count, desc->irqs_unhandled, 293 jiffies_to_msecs(desc->last_unhandled)); 294 return 0; 295 } 296 297 #define MAX_NAMELEN 128 298 299 static bool name_unique(unsigned int irq, struct irqaction *new_action) 300 { 301 struct irq_desc *desc = irq_to_desc(irq); 302 struct irqaction *action; 303 304 guard(raw_spinlock_irq)(&desc->lock); 305 for_each_action_of_desc(desc, action) { 306 if ((action != new_action) && action->name && 307 !strcmp(new_action->name, action->name)) 308 return false; 309 } 310 return true; 311 } 312 313 void register_handler_proc(unsigned int irq, struct irqaction *action) 314 { 315 char name[MAX_NAMELEN]; 316 struct irq_desc *desc = irq_to_desc(irq); 317 318 if (!desc->dir || action->dir || !action->name || !name_unique(irq, action)) 319 return; 320 321 strscpy(name, action->name); 322 323 /* create /proc/irq/1234/handler/ */ 324 action->dir = proc_mkdir(name, desc->dir); 325 } 326 327 #undef MAX_NAMELEN 328 329 #define MAX_NAMELEN 10 330 331 void register_irq_proc(unsigned int irq, struct irq_desc *desc) 332 { 333 static DEFINE_MUTEX(register_lock); 334 void __maybe_unused *irqp = (void *)(unsigned long) irq; 335 char name [MAX_NAMELEN]; 336 337 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) 338 return; 339 340 /* 341 * irq directories are registered only when a handler is 342 * added, not when the descriptor is created, so multiple 343 * tasks might try to register at the same time. 344 */ 345 guard(mutex)(®ister_lock); 346 347 if (desc->dir) 348 return; 349 350 /* create /proc/irq/1234 */ 351 sprintf(name, "%u", irq); 352 desc->dir = proc_mkdir(name, root_irq_dir); 353 if (!desc->dir) 354 return; 355 356 #ifdef CONFIG_SMP 357 umode_t umode = S_IRUGO; 358 359 if (irq_can_set_affinity_usr(desc->irq_data.irq)) 360 umode |= S_IWUSR; 361 362 /* create /proc/irq/<irq>/smp_affinity */ 363 proc_create_data("smp_affinity", umode, desc->dir, &irq_affinity_proc_ops, irqp); 364 365 /* create /proc/irq/<irq>/affinity_hint */ 366 proc_create_single_data("affinity_hint", 0444, desc->dir, 367 irq_affinity_hint_proc_show, irqp); 368 369 /* create /proc/irq/<irq>/smp_affinity_list */ 370 proc_create_data("smp_affinity_list", umode, desc->dir, 371 &irq_affinity_list_proc_ops, irqp); 372 373 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, irqp); 374 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 375 proc_create_single_data("effective_affinity", 0444, desc->dir, 376 irq_effective_aff_proc_show, irqp); 377 proc_create_single_data("effective_affinity_list", 0444, desc->dir, 378 irq_effective_aff_list_proc_show, irqp); 379 # endif 380 #endif 381 proc_create_single_data("spurious", 0444, desc->dir, 382 irq_spurious_proc_show, (void *)(long)irq); 383 384 } 385 386 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) 387 { 388 char name [MAX_NAMELEN]; 389 390 if (!root_irq_dir || !desc->dir) 391 return; 392 #ifdef CONFIG_SMP 393 remove_proc_entry("smp_affinity", desc->dir); 394 remove_proc_entry("affinity_hint", desc->dir); 395 remove_proc_entry("smp_affinity_list", desc->dir); 396 remove_proc_entry("node", desc->dir); 397 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 398 remove_proc_entry("effective_affinity", desc->dir); 399 remove_proc_entry("effective_affinity_list", desc->dir); 400 # endif 401 #endif 402 remove_proc_entry("spurious", desc->dir); 403 404 sprintf(name, "%u", irq); 405 remove_proc_entry(name, root_irq_dir); 406 } 407 408 #undef MAX_NAMELEN 409 410 void unregister_handler_proc(unsigned int irq, struct irqaction *action) 411 { 412 proc_remove(action->dir); 413 } 414 415 static void register_default_affinity_proc(void) 416 { 417 #ifdef CONFIG_SMP 418 proc_create("irq/default_smp_affinity", 0644, NULL, 419 &default_affinity_proc_ops); 420 #endif 421 } 422 423 void init_irq_proc(void) 424 { 425 unsigned int irq; 426 struct irq_desc *desc; 427 428 /* create /proc/irq */ 429 root_irq_dir = proc_mkdir("irq", NULL); 430 if (!root_irq_dir) 431 return; 432 433 register_default_affinity_proc(); 434 435 /* 436 * Create entries for all existing IRQs. 437 */ 438 for_each_irq_desc(irq, desc) 439 register_irq_proc(irq, desc); 440 } 441 442 #ifdef CONFIG_GENERIC_IRQ_SHOW 443 444 int __weak arch_show_interrupts(struct seq_file *p, int prec) 445 { 446 return 0; 447 } 448 449 #ifndef ACTUAL_NR_IRQS 450 # define ACTUAL_NR_IRQS irq_get_nr_irqs() 451 #endif 452 453 int show_interrupts(struct seq_file *p, void *v) 454 { 455 const unsigned int nr_irqs = irq_get_nr_irqs(); 456 static int prec; 457 458 int i = *(loff_t *) v, j; 459 struct irqaction *action; 460 struct irq_desc *desc; 461 462 if (i > ACTUAL_NR_IRQS) 463 return 0; 464 465 if (i == ACTUAL_NR_IRQS) 466 return arch_show_interrupts(p, prec); 467 468 /* print header and calculate the width of the first column */ 469 if (i == 0) { 470 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 471 j *= 10; 472 473 seq_printf(p, "%*s", prec + 8, ""); 474 for_each_online_cpu(j) 475 seq_printf(p, "CPU%-8d", j); 476 seq_putc(p, '\n'); 477 } 478 479 guard(rcu)(); 480 desc = irq_to_desc(i); 481 if (!desc || irq_settings_is_hidden(desc)) 482 return 0; 483 484 if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs) 485 return 0; 486 487 seq_printf(p, "%*d:", prec, i); 488 for_each_online_cpu(j) { 489 unsigned int cnt = desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0; 490 491 seq_put_decimal_ull_width(p, " ", cnt, 10); 492 } 493 seq_putc(p, ' '); 494 495 guard(raw_spinlock_irq)(&desc->lock); 496 if (desc->irq_data.chip) { 497 if (desc->irq_data.chip->irq_print_chip) 498 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); 499 else if (desc->irq_data.chip->name) 500 seq_printf(p, "%8s", desc->irq_data.chip->name); 501 else 502 seq_printf(p, "%8s", "-"); 503 } else { 504 seq_printf(p, "%8s", "None"); 505 } 506 if (desc->irq_data.domain) 507 seq_printf(p, " %*lu", prec, desc->irq_data.hwirq); 508 else 509 seq_printf(p, " %*s", prec, ""); 510 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL 511 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); 512 #endif 513 if (desc->name) 514 seq_printf(p, "-%-8s", desc->name); 515 516 action = desc->action; 517 if (action) { 518 seq_printf(p, " %s", action->name); 519 while ((action = action->next) != NULL) 520 seq_printf(p, ", %s", action->name); 521 } 522 523 seq_putc(p, '\n'); 524 return 0; 525 } 526 #endif 527