1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2024 Loongson Technologies, Inc. 4 */ 5 6 #include <linux/cpuhotplug.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/irq.h> 10 #include <linux/irqchip.h> 11 #include <linux/irqchip/chained_irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/kernel.h> 14 #include <linux/msi.h> 15 #include <linux/radix-tree.h> 16 #include <linux/spinlock.h> 17 18 #include <asm/loongarch.h> 19 #include <asm/setup.h> 20 21 #include "irq-msi-lib.h" 22 #include "irq-loongson.h" 23 24 #define VECTORS_PER_REG 64 25 #define IRR_VECTOR_MASK 0xffUL 26 #define IRR_INVALID_MASK 0x80000000UL 27 #define AVEC_MSG_OFFSET 0x100000 28 29 #ifdef CONFIG_SMP 30 struct pending_list { 31 struct list_head head; 32 }; 33 34 static struct cpumask intersect_mask; 35 static DEFINE_PER_CPU(struct pending_list, pending_list); 36 #endif 37 38 static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map); 39 40 struct avecintc_chip { 41 raw_spinlock_t lock; 42 struct fwnode_handle *fwnode; 43 struct irq_domain *domain; 44 struct irq_matrix *vector_matrix; 45 phys_addr_t msi_base_addr; 46 }; 47 48 static struct avecintc_chip loongarch_avec; 49 50 struct avecintc_data { 51 struct list_head entry; 52 unsigned int cpu; 53 unsigned int vec; 54 unsigned int prev_cpu; 55 unsigned int prev_vec; 56 unsigned int moving; 57 }; 58 59 static inline void avecintc_ack_irq(struct irq_data *d) 60 { 61 } 62 63 static inline void avecintc_mask_irq(struct irq_data *d) 64 { 65 } 66 67 static inline void avecintc_unmask_irq(struct irq_data *d) 68 { 69 } 70 71 #ifdef CONFIG_SMP 72 static inline void pending_list_init(int cpu) 73 { 74 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); 75 76 INIT_LIST_HEAD(&plist->head); 77 } 78 79 static void avecintc_sync(struct avecintc_data *adata) 80 { 81 struct pending_list *plist; 82 83 if (cpu_online(adata->prev_cpu)) { 84 plist = per_cpu_ptr(&pending_list, adata->prev_cpu); 85 list_add_tail(&adata->entry, &plist->head); 86 adata->moving = 1; 87 mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR); 88 } 89 } 90 91 static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) 92 { 93 int cpu, ret, vector; 94 struct avecintc_data *adata; 95 96 scoped_guard(raw_spinlock, &loongarch_avec.lock) { 97 adata = irq_data_get_irq_chip_data(data); 98 99 if (adata->moving) 100 return -EBUSY; 101 102 if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest)) 103 return 0; 104 105 cpumask_and(&intersect_mask, dest, cpu_online_mask); 106 107 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu); 108 if (ret < 0) 109 return ret; 110 111 vector = ret; 112 adata->cpu = cpu; 113 adata->vec = vector; 114 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data); 115 avecintc_sync(adata); 116 } 117 118 irq_data_update_effective_affinity(data, cpumask_of(cpu)); 119 120 return IRQ_SET_MASK_OK; 121 } 122 123 static int avecintc_cpu_online(unsigned int cpu) 124 { 125 if (!loongarch_avec.vector_matrix) 126 return 0; 127 128 guard(raw_spinlock)(&loongarch_avec.lock); 129 130 irq_matrix_online(loongarch_avec.vector_matrix); 131 132 pending_list_init(cpu); 133 134 return 0; 135 } 136 137 static int avecintc_cpu_offline(unsigned int cpu) 138 { 139 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); 140 141 if (!loongarch_avec.vector_matrix) 142 return 0; 143 144 guard(raw_spinlock)(&loongarch_avec.lock); 145 146 if (!list_empty(&plist->head)) 147 pr_warn("CPU#%d vector is busy\n", cpu); 148 149 irq_matrix_offline(loongarch_avec.vector_matrix); 150 151 return 0; 152 } 153 154 void complete_irq_moving(void) 155 { 156 struct pending_list *plist = this_cpu_ptr(&pending_list); 157 struct avecintc_data *adata, *tdata; 158 int cpu, vector, bias; 159 uint64_t isr; 160 161 guard(raw_spinlock)(&loongarch_avec.lock); 162 163 list_for_each_entry_safe(adata, tdata, &plist->head, entry) { 164 cpu = adata->prev_cpu; 165 vector = adata->prev_vec; 166 bias = vector / VECTORS_PER_REG; 167 switch (bias) { 168 case 0: 169 isr = csr_read64(LOONGARCH_CSR_ISR0); 170 break; 171 case 1: 172 isr = csr_read64(LOONGARCH_CSR_ISR1); 173 break; 174 case 2: 175 isr = csr_read64(LOONGARCH_CSR_ISR2); 176 break; 177 case 3: 178 isr = csr_read64(LOONGARCH_CSR_ISR3); 179 break; 180 } 181 182 if (isr & (1UL << (vector % VECTORS_PER_REG))) { 183 mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR); 184 continue; 185 } 186 list_del(&adata->entry); 187 irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false); 188 this_cpu_write(irq_map[vector], NULL); 189 adata->moving = 0; 190 adata->prev_cpu = adata->cpu; 191 adata->prev_vec = adata->vec; 192 } 193 } 194 #endif 195 196 static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 197 { 198 struct avecintc_data *adata = irq_data_get_irq_chip_data(d); 199 200 msg->address_hi = 0x0; 201 msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4) 202 | ((cpu_logical_map(adata->cpu & 0xffff)) << 12); 203 msg->data = 0x0; 204 } 205 206 static struct irq_chip avec_irq_controller = { 207 .name = "AVECINTC", 208 .irq_ack = avecintc_ack_irq, 209 .irq_mask = avecintc_mask_irq, 210 .irq_unmask = avecintc_unmask_irq, 211 #ifdef CONFIG_SMP 212 .irq_set_affinity = avecintc_set_affinity, 213 #endif 214 .irq_compose_msi_msg = avecintc_compose_msi_msg, 215 }; 216 217 static void avecintc_irq_dispatch(struct irq_desc *desc) 218 { 219 struct irq_chip *chip = irq_desc_get_chip(desc); 220 struct irq_desc *d; 221 222 chained_irq_enter(chip, desc); 223 224 while (true) { 225 unsigned long vector = csr_read64(LOONGARCH_CSR_IRR); 226 if (vector & IRR_INVALID_MASK) 227 break; 228 229 vector &= IRR_VECTOR_MASK; 230 231 d = this_cpu_read(irq_map[vector]); 232 if (d) { 233 generic_handle_irq_desc(d); 234 } else { 235 spurious_interrupt(); 236 pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector); 237 } 238 } 239 240 chained_irq_exit(chip, desc); 241 } 242 243 static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata) 244 { 245 int cpu, ret; 246 247 guard(raw_spinlock_irqsave)(&loongarch_avec.lock); 248 249 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu); 250 if (ret < 0) 251 return ret; 252 253 adata->prev_cpu = adata->cpu = cpu; 254 adata->prev_vec = adata->vec = ret; 255 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd); 256 257 return 0; 258 } 259 260 static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq, 261 unsigned int nr_irqs, void *arg) 262 { 263 for (unsigned int i = 0; i < nr_irqs; i++) { 264 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i); 265 struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL); 266 int ret; 267 268 if (!adata) 269 return -ENOMEM; 270 271 ret = avecintc_alloc_vector(irqd, adata); 272 if (ret < 0) { 273 kfree(adata); 274 return ret; 275 } 276 277 irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller, 278 adata, handle_edge_irq, NULL, NULL); 279 irqd_set_single_target(irqd); 280 irqd_set_affinity_on_activate(irqd); 281 } 282 283 return 0; 284 } 285 286 static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata) 287 { 288 guard(raw_spinlock_irqsave)(&loongarch_avec.lock); 289 290 per_cpu(irq_map, adata->cpu)[adata->vec] = NULL; 291 irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false); 292 293 #ifdef CONFIG_SMP 294 if (!adata->moving) 295 return; 296 297 per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL; 298 irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false); 299 list_del_init(&adata->entry); 300 #endif 301 } 302 303 static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq, 304 unsigned int nr_irqs) 305 { 306 for (unsigned int i = 0; i < nr_irqs; i++) { 307 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 308 309 if (d) { 310 struct avecintc_data *adata = irq_data_get_irq_chip_data(d); 311 312 avecintc_free_vector(d, adata); 313 irq_domain_reset_irq_data(d); 314 kfree(adata); 315 } 316 } 317 } 318 319 static const struct irq_domain_ops avecintc_domain_ops = { 320 .alloc = avecintc_domain_alloc, 321 .free = avecintc_domain_free, 322 .select = msi_lib_irq_domain_select, 323 }; 324 325 static int __init irq_matrix_init(void) 326 { 327 loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS); 328 if (!loongarch_avec.vector_matrix) 329 return -ENOMEM; 330 331 for (int i = 0; i < NR_LEGACY_VECTORS; i++) 332 irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false); 333 334 irq_matrix_online(loongarch_avec.vector_matrix); 335 336 return 0; 337 } 338 339 static int __init avecintc_init(struct irq_domain *parent) 340 { 341 int ret, parent_irq; 342 unsigned long value; 343 344 raw_spin_lock_init(&loongarch_avec.lock); 345 346 loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC"); 347 if (!loongarch_avec.fwnode) { 348 pr_err("Unable to allocate domain handle\n"); 349 ret = -ENOMEM; 350 goto out; 351 } 352 353 loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode, 354 &avecintc_domain_ops, NULL); 355 if (!loongarch_avec.domain) { 356 pr_err("Unable to create IRQ domain\n"); 357 ret = -ENOMEM; 358 goto out_free_handle; 359 } 360 361 parent_irq = irq_create_mapping(parent, INT_AVEC); 362 if (!parent_irq) { 363 pr_err("Failed to mapping hwirq\n"); 364 ret = -EINVAL; 365 goto out_remove_domain; 366 } 367 368 ret = irq_matrix_init(); 369 if (ret < 0) { 370 pr_err("Failed to init irq matrix\n"); 371 goto out_remove_domain; 372 } 373 irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL); 374 375 #ifdef CONFIG_SMP 376 pending_list_init(0); 377 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING, 378 "irqchip/loongarch/avecintc:starting", 379 avecintc_cpu_online, avecintc_cpu_offline); 380 #endif 381 value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 382 value |= IOCSR_MISC_FUNC_AVEC_EN; 383 iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); 384 385 return ret; 386 387 out_remove_domain: 388 irq_domain_remove(loongarch_avec.domain); 389 out_free_handle: 390 irq_domain_free_fwnode(loongarch_avec.fwnode); 391 out: 392 return ret; 393 } 394 395 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, 396 const unsigned long end) 397 { 398 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; 399 400 loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET; 401 402 return pch_msi_acpi_init_avec(loongarch_avec.domain); 403 } 404 405 static inline int __init acpi_cascade_irqdomain_init(void) 406 { 407 return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); 408 } 409 410 int __init avecintc_acpi_init(struct irq_domain *parent) 411 { 412 int ret = avecintc_init(parent); 413 if (ret < 0) { 414 pr_err("Failed to init IRQ domain\n"); 415 return ret; 416 } 417 418 ret = acpi_cascade_irqdomain_init(); 419 if (ret < 0) { 420 pr_err("Failed to init cascade IRQ domain\n"); 421 return ret; 422 } 423 424 return ret; 425 } 426