Lines Matching +full:hart +full:- +full:index +full:- +full:bits

1 // SPDX-License-Identifier: GPL-2.0
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
22 #include "irq-riscv-imsic-state.h"
63 return imsic ? &imsic->global : NULL; in imsic_get_global_config()
74 imask = BIT(id & (__riscv_xlen - 1)); in __imsic_eix_read_clear()
102 * are XLEN-wide and we must not touch IDs which in __imsic_eix_update()
106 for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) { in __imsic_eix_update()
134 lockdep_assert_held(&lpriv->lock); in __imsic_local_sync()
136 for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) { in __imsic_local_sync()
139 vec = &lpriv->vectors[i]; in __imsic_local_sync()
141 if (READ_ONCE(vec->enable)) in __imsic_local_sync()
150 mvec = READ_ONCE(vec->move_prev); in __imsic_local_sync()
154 * try again in the next sync-up call. in __imsic_local_sync()
156 if (READ_ONCE(mvec->move_next)) { in __imsic_local_sync()
161 WRITE_ONCE(vec->move_prev, NULL); in __imsic_local_sync()
167 * ID pending bit and re-trigger the new ID on other CPU using in __imsic_local_sync()
170 mvec = READ_ONCE(vec->move_next); in __imsic_local_sync()
173 * Devices having non-atomic MSI update might see in __imsic_local_sync()
179 tvec = vec->local_id == mvec->local_id ? in __imsic_local_sync()
180 NULL : &lpriv->vectors[mvec->local_id]; in __imsic_local_sync()
182 if (tvec && !irq_can_move_in_process_context(irq_get_irq_data(vec->irq)) && in __imsic_local_sync()
183 __imsic_id_read_clear_pending(tvec->local_id)) { in __imsic_local_sync()
184 /* Retrigger temporary vector if it was already in-use */ in __imsic_local_sync()
185 if (READ_ONCE(tvec->enable)) { in __imsic_local_sync()
186 tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu); in __imsic_local_sync()
187 writel_relaxed(tvec->local_id, tlocal->msi_va); in __imsic_local_sync()
190 mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); in __imsic_local_sync()
191 writel_relaxed(mvec->local_id, mlocal->msi_va); in __imsic_local_sync()
194 if (__imsic_id_read_clear_pending(vec->local_id)) { in __imsic_local_sync()
195 mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); in __imsic_local_sync()
196 writel_relaxed(mvec->local_id, mlocal->msi_va); in __imsic_local_sync()
199 WRITE_ONCE(vec->move_next, NULL); in __imsic_local_sync()
204 bitmap_clear(lpriv->dirty_bitmap, i, 1); in __imsic_local_sync()
213 lockdep_assert_held(&lpriv->lock); in __imsic_local_timer_start()
215 if (!timer_pending(&lpriv->timer)) { in __imsic_local_timer_start()
216 lpriv->timer.expires = jiffies + 1; in __imsic_local_timer_start()
217 add_timer_on(&lpriv->timer, cpu); in __imsic_local_timer_start()
228 struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); in imsic_local_sync_all()
231 raw_spin_lock_irqsave(&lpriv->lock, flags); in imsic_local_sync_all()
234 bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); in imsic_local_sync_all()
238 raw_spin_unlock_irqrestore(&lpriv->lock, flags); in imsic_local_sync_all()
261 lockdep_assert_held(&lpriv->lock); in __imsic_remote_sync()
287 lockdep_assert_held(&lpriv->lock); in __imsic_remote_sync()
296 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_mask()
297 if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) in imsic_vector_mask()
305 raw_spin_lock(&lpriv->lock); in imsic_vector_mask()
307 WRITE_ONCE(vec->enable, false); in imsic_vector_mask()
308 bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); in imsic_vector_mask()
309 __imsic_remote_sync(lpriv, vec->cpu); in imsic_vector_mask()
311 raw_spin_unlock(&lpriv->lock); in imsic_vector_mask()
318 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_unmask()
319 if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) in imsic_vector_unmask()
327 raw_spin_lock(&lpriv->lock); in imsic_vector_unmask()
329 WRITE_ONCE(vec->enable, true); in imsic_vector_unmask()
330 bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); in imsic_vector_unmask()
331 __imsic_remote_sync(lpriv, vec->cpu); in imsic_vector_unmask()
333 raw_spin_unlock(&lpriv->lock); in imsic_vector_unmask()
342 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_force_move_cleanup()
343 raw_spin_lock_irqsave(&lpriv->lock, flags); in imsic_vector_force_move_cleanup()
345 mvec = READ_ONCE(vec->move_prev); in imsic_vector_force_move_cleanup()
346 WRITE_ONCE(vec->move_prev, NULL); in imsic_vector_force_move_cleanup()
350 raw_spin_unlock_irqrestore(&lpriv->lock, flags); in imsic_vector_force_move_cleanup()
360 raw_spin_lock_irqsave(&lpriv->lock, flags); in imsic_vector_move_update()
363 enabled = READ_ONCE(vec->enable); in imsic_vector_move_update()
364 WRITE_ONCE(vec->enable, new_enable); in imsic_vector_move_update()
366 WRITE_ONCE(vec->move_next, move_vec); in imsic_vector_move_update()
368 WRITE_ONCE(vec->move_prev, move_vec); in imsic_vector_move_update()
371 bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); in imsic_vector_move_update()
372 __imsic_remote_sync(lpriv, vec->cpu); in imsic_vector_move_update()
374 raw_spin_unlock_irqrestore(&lpriv->lock, flags); in imsic_vector_move_update()
384 if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu)) in imsic_vector_move()
387 old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu); in imsic_vector_move()
388 if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec)) in imsic_vector_move()
391 new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu); in imsic_vector_move()
392 if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec)) in imsic_vector_move()
396 * Move and re-trigger the new vector based on the pending in imsic_vector_move()
412 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_debug_show()
413 if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) in imsic_vector_debug_show()
419 seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu); in imsic_vector_debug_show()
420 seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id); in imsic_vector_debug_show()
422 (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0); in imsic_vector_debug_show()
426 seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu); in imsic_vector_debug_show()
427 seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id); in imsic_vector_debug_show()
433 irq_matrix_debug_show(m, imsic->matrix, ind); in imsic_vector_debug_show_summary()
439 struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_vector_from_local_id()
441 if (!lpriv || imsic->global.nr_ids < local_id) in imsic_vector_from_local_id()
444 return &lpriv->vectors[local_id]; in imsic_vector_from_local_id()
455 raw_spin_lock_irqsave(&imsic->matrix_lock, flags); in imsic_vector_alloc()
456 local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu); in imsic_vector_alloc()
457 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); in imsic_vector_alloc()
461 lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_vector_alloc()
462 vec = &lpriv->vectors[local_id]; in imsic_vector_alloc()
463 vec->irq = irq; in imsic_vector_alloc()
464 vec->enable = false; in imsic_vector_alloc()
465 vec->move_next = NULL; in imsic_vector_alloc()
466 vec->move_prev = NULL; in imsic_vector_alloc()
475 raw_spin_lock_irqsave(&imsic->matrix_lock, flags); in imsic_vector_free()
476 vec->irq = 0; in imsic_vector_free()
477 irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false); in imsic_vector_free()
478 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); in imsic_vector_free()
487 lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_local_cleanup()
489 bitmap_free(lpriv->dirty_bitmap); in imsic_local_cleanup()
490 kfree(lpriv->vectors); in imsic_local_cleanup()
493 free_percpu(imsic->lpriv); in imsic_local_cleanup()
498 struct imsic_global_config *global = &imsic->global; in imsic_local_init()
503 /* Allocate per-CPU private state */ in imsic_local_init()
504 imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv)); in imsic_local_init()
505 if (!imsic->lpriv) in imsic_local_init()
506 return -ENOMEM; in imsic_local_init()
508 /* Setup per-CPU private state */ in imsic_local_init()
510 lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_local_init()
512 raw_spin_lock_init(&lpriv->lock); in imsic_local_init()
515 lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL); in imsic_local_init()
516 if (!lpriv->dirty_bitmap) in imsic_local_init()
521 timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED); in imsic_local_init()
525 lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors), in imsic_local_init()
527 if (!lpriv->vectors) in imsic_local_init()
531 for (i = 0; i <= global->nr_ids; i++) { in imsic_local_init()
532 vec = &lpriv->vectors[i]; in imsic_local_init()
533 vec->cpu = cpu; in imsic_local_init()
534 vec->local_id = i; in imsic_local_init()
535 vec->irq = 0; in imsic_local_init()
543 return -ENOMEM; in imsic_local_init()
550 raw_spin_lock_irqsave(&imsic->matrix_lock, flags); in imsic_state_online()
551 irq_matrix_online(imsic->matrix); in imsic_state_online()
552 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); in imsic_state_online()
559 raw_spin_lock_irqsave(&imsic->matrix_lock, flags); in imsic_state_offline()
560 irq_matrix_offline(imsic->matrix); in imsic_state_offline()
561 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); in imsic_state_offline()
564 struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); in imsic_state_offline()
566 raw_spin_lock_irqsave(&lpriv->lock, flags); in imsic_state_offline()
567 WARN_ON_ONCE(timer_delete_sync_try(&lpriv->timer) < 0); in imsic_state_offline()
568 raw_spin_unlock_irqrestore(&lpriv->lock, flags); in imsic_state_offline()
574 struct imsic_global_config *global = &imsic->global; in imsic_matrix_init()
576 raw_spin_lock_init(&imsic->matrix_lock); in imsic_matrix_init()
577 imsic->matrix = irq_alloc_matrix(global->nr_ids + 1, in imsic_matrix_init()
578 0, global->nr_ids + 1); in imsic_matrix_init()
579 if (!imsic->matrix) in imsic_matrix_init()
580 return -ENOMEM; in imsic_matrix_init()
583 irq_matrix_assign_system(imsic->matrix, 0, false); in imsic_matrix_init()
586 irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false); in imsic_matrix_init()
597 /* Find number of guest index bits in MSI address */ in imsic_populate_global_dt()
598 rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", in imsic_populate_global_dt()
599 &global->guest_index_bits); in imsic_populate_global_dt()
601 global->guest_index_bits = 0; in imsic_populate_global_dt()
603 /* Find number of HART index bits */ in imsic_populate_global_dt()
604 rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", in imsic_populate_global_dt()
605 &global->hart_index_bits); in imsic_populate_global_dt()
608 global->hart_index_bits = __fls(*nr_parent_irqs); in imsic_populate_global_dt()
609 if (BIT(global->hart_index_bits) < *nr_parent_irqs) in imsic_populate_global_dt()
610 global->hart_index_bits++; in imsic_populate_global_dt()
613 /* Find number of group index bits */ in imsic_populate_global_dt()
614 rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", in imsic_populate_global_dt()
615 &global->group_index_bits); in imsic_populate_global_dt()
617 global->group_index_bits = 0; in imsic_populate_global_dt()
620 * Find first bit position of group index. in imsic_populate_global_dt()
621 * If not specified assumed the default APLIC-IMSIC configuration. in imsic_populate_global_dt()
623 rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", in imsic_populate_global_dt()
624 &global->group_index_shift); in imsic_populate_global_dt()
626 global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; in imsic_populate_global_dt()
629 rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", in imsic_populate_global_dt()
630 &global->nr_ids); in imsic_populate_global_dt()
637 rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", in imsic_populate_global_dt()
638 &global->nr_guest_ids); in imsic_populate_global_dt()
640 global->nr_guest_ids = global->nr_ids; in imsic_populate_global_dt()
651 global->guest_index_bits = imsic->guest_index_bits; in imsic_populate_global_acpi()
652 global->hart_index_bits = imsic->hart_index_bits; in imsic_populate_global_acpi()
653 global->group_index_bits = imsic->group_index_bits; in imsic_populate_global_acpi()
654 global->group_index_shift = imsic->group_index_shift; in imsic_populate_global_acpi()
655 global->nr_ids = imsic->num_ids; in imsic_populate_global_acpi()
656 global->nr_guest_ids = imsic->num_guest_ids; in imsic_populate_global_acpi()
661 u32 index, unsigned long *hartid) in imsic_get_parent_hartid() argument
668 *hartid = acpi_rintc_index_to_hartid(index); in imsic_get_parent_hartid()
671 return -EINVAL; in imsic_get_parent_hartid()
676 rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); in imsic_get_parent_hartid()
685 return -EINVAL; in imsic_get_parent_hartid()
691 u32 index, struct resource *res) in imsic_get_mmio_resource() argument
694 return acpi_rintc_get_imsic_mmio_info(index, res); in imsic_get_mmio_resource()
696 return of_address_to_resource(to_of_node(fwnode), index, res); in imsic_get_mmio_resource()
718 return -EINVAL; in imsic_parse_fwnode()
729 /* Sanity check guest index bits */ in imsic_parse_fwnode()
730 i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT; in imsic_parse_fwnode()
731 if (i < global->guest_index_bits) { in imsic_parse_fwnode()
732 pr_err("%pfwP: guest index bits too big\n", fwnode); in imsic_parse_fwnode()
733 return -EINVAL; in imsic_parse_fwnode()
736 /* Sanity check HART index bits */ in imsic_parse_fwnode()
737 i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits; in imsic_parse_fwnode()
738 if (i < global->hart_index_bits) { in imsic_parse_fwnode()
739 pr_err("%pfwP: HART index bits too big\n", fwnode); in imsic_parse_fwnode()
740 return -EINVAL; in imsic_parse_fwnode()
743 /* Sanity check group index bits */ in imsic_parse_fwnode()
744 i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - in imsic_parse_fwnode()
745 global->guest_index_bits - global->hart_index_bits; in imsic_parse_fwnode()
746 if (i < global->group_index_bits) { in imsic_parse_fwnode()
747 pr_err("%pfwP: group index bits too big\n", fwnode); in imsic_parse_fwnode()
748 return -EINVAL; in imsic_parse_fwnode()
751 /* Sanity check group index shift */ in imsic_parse_fwnode()
752 i = global->group_index_bits + global->group_index_shift - 1; in imsic_parse_fwnode()
754 pr_err("%pfwP: group index shift too big\n", fwnode); in imsic_parse_fwnode()
755 return -EINVAL; in imsic_parse_fwnode()
759 if (global->nr_ids < IMSIC_MIN_ID || in imsic_parse_fwnode()
760 global->nr_ids >= IMSIC_MAX_ID || in imsic_parse_fwnode()
761 (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { in imsic_parse_fwnode()
763 return -EINVAL; in imsic_parse_fwnode()
767 if (global->nr_guest_ids < IMSIC_MIN_ID || in imsic_parse_fwnode()
768 global->nr_guest_ids >= IMSIC_MAX_ID || in imsic_parse_fwnode()
769 (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { in imsic_parse_fwnode()
771 return -EINVAL; in imsic_parse_fwnode()
778 return -EINVAL; in imsic_parse_fwnode()
780 global->base_addr = res.start; in imsic_parse_fwnode()
781 global->base_addr &= ~(BIT(global->guest_index_bits + in imsic_parse_fwnode()
782 global->hart_index_bits + in imsic_parse_fwnode()
783 IMSIC_MMIO_PAGE_SHIFT) - 1); in imsic_parse_fwnode()
784 global->base_addr &= ~((BIT(global->group_index_bits) - 1) << in imsic_parse_fwnode()
785 global->group_index_shift); in imsic_parse_fwnode()
796 u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0; in imsic_setup_state() local
807 * implementation of SMP IRQ affinity and per-CPU IPIs. in imsic_setup_state()
809 * This means on a multi-socket (or multi-die) platform we in imsic_setup_state()
814 return -EALREADY; in imsic_setup_state()
819 return -ENODEV; in imsic_setup_state()
824 return -ENOMEM; in imsic_setup_state()
825 imsic->fwnode = fwnode; in imsic_setup_state()
826 global = &imsic->global; in imsic_setup_state()
828 global->local = alloc_percpu(typeof(*global->local)); in imsic_setup_state()
829 if (!global->local) { in imsic_setup_state()
830 rc = -ENOMEM; in imsic_setup_state()
842 rc = -ENOMEM; in imsic_setup_state()
849 rc = -ENOMEM; in imsic_setup_state()
862 base_addr &= ~(BIT(global->guest_index_bits + in imsic_setup_state()
863 global->hart_index_bits + in imsic_setup_state()
864 IMSIC_MMIO_PAGE_SHIFT) - 1); in imsic_setup_state()
865 base_addr &= ~((BIT(global->group_index_bits) - 1) << in imsic_setup_state()
866 global->group_index_shift); in imsic_setup_state()
867 if (base_addr != global->base_addr) { in imsic_setup_state()
868 rc = -EINVAL; in imsic_setup_state()
875 rc = -EIO; in imsic_setup_state()
881 /* Initialize local (or per-CPU )state */ in imsic_setup_state()
893 pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i); in imsic_setup_state()
904 index = nr_mmios; in imsic_setup_state()
905 reloff = i * BIT(global->guest_index_bits) * in imsic_setup_state()
909 index = j; in imsic_setup_state()
915 * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ in imsic_setup_state()
918 reloff -= ALIGN(resource_size(&mmios[j]), in imsic_setup_state()
919 BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ); in imsic_setup_state()
921 if (index >= nr_mmios) { in imsic_setup_state()
926 local = per_cpu_ptr(global->local, cpu); in imsic_setup_state()
927 local->msi_pa = mmios[index].start + reloff; in imsic_setup_state()
928 local->msi_va = mmios_va[index] + reloff; in imsic_setup_state()
936 rc = -ENODEV; in imsic_setup_state()
947 /* We don't need MMIO arrays anymore so let's free-up */ in imsic_setup_state()
963 free_percpu(imsic->global.local); in imsic_setup_state()