Lines Matching refs:apicd
102 struct apic_chip_data *apicd = apic_chip_data(irqd); in irqd_cfg() local
104 return apicd ? &apicd->hw_irq_cfg : NULL; in irqd_cfg()
115 struct apic_chip_data *apicd; in alloc_apic_chip_data() local
117 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node); in alloc_apic_chip_data()
118 if (apicd) in alloc_apic_chip_data()
119 INIT_HLIST_NODE(&apicd->clist); in alloc_apic_chip_data()
120 return apicd; in alloc_apic_chip_data()
123 static void free_apic_chip_data(struct apic_chip_data *apicd) in free_apic_chip_data() argument
125 kfree(apicd); in free_apic_chip_data()
131 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_update_irq_cfg() local
135 apicd->hw_irq_cfg.vector = vector; in apic_update_irq_cfg()
136 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); in apic_update_irq_cfg()
141 trace_vector_config(irqd->irq, vector, cpu, apicd->hw_irq_cfg.dest_apicid); in apic_update_irq_cfg()
152 struct apic_chip_data *apicd = apic_chip_data(irqd); in chip_data_update() local
158 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, in chip_data_update()
159 apicd->cpu); in chip_data_update()
167 apicd->prev_vector = 0; in chip_data_update()
168 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) in chip_data_update()
178 if (cpu_online(apicd->cpu)) { in chip_data_update()
179 apicd->move_in_progress = true; in chip_data_update()
180 apicd->prev_vector = apicd->vector; in chip_data_update()
181 apicd->prev_cpu = apicd->cpu; in chip_data_update()
182 WARN_ON_ONCE(apicd->cpu == newcpu); in chip_data_update()
184 apic_free_vector(apicd->cpu, apicd->vector, managed); in chip_data_update()
188 apicd->vector = newvec; in chip_data_update()
189 apicd->cpu = newcpu; in chip_data_update()
205 struct apic_chip_data *apicd = apic_chip_data(irqd); in reserve_managed_vector() local
210 apicd->is_managed = true; in reserve_managed_vector()
219 struct apic_chip_data *apicd = apic_chip_data(irqd); in reserve_irq_vector_locked() local
222 apicd->can_reserve = true; in reserve_irq_vector_locked()
223 apicd->has_reserved = true; in reserve_irq_vector_locked()
242 struct apic_chip_data *apicd = apic_chip_data(irqd); in assign_vector_locked() local
243 bool resvd = apicd->has_reserved; in assign_vector_locked()
244 unsigned int cpu = apicd->cpu; in assign_vector_locked()
245 int vector = apicd->vector; in assign_vector_locked()
263 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) in assign_vector_locked()
333 struct apic_chip_data *apicd = apic_chip_data(irqd); in assign_managed_vector() local
339 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) in assign_managed_vector()
353 struct apic_chip_data *apicd = apic_chip_data(irqd); in clear_irq_vector() local
355 unsigned int vector = apicd->vector; in clear_irq_vector()
362 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, in clear_irq_vector()
363 apicd->prev_cpu); in clear_irq_vector()
365 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
366 apic_free_vector(apicd->cpu, vector, managed); in clear_irq_vector()
367 apicd->vector = 0; in clear_irq_vector()
370 vector = apicd->prev_vector; in clear_irq_vector()
374 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
375 apic_free_vector(apicd->prev_cpu, vector, managed); in clear_irq_vector()
376 apicd->prev_vector = 0; in clear_irq_vector()
377 apicd->move_in_progress = 0; in clear_irq_vector()
378 hlist_del_init(&apicd->clist); in clear_irq_vector()
383 struct apic_chip_data *apicd = apic_chip_data(irqd); in x86_vector_deactivate() local
386 trace_vector_deactivate(irqd->irq, apicd->is_managed, in x86_vector_deactivate()
387 apicd->can_reserve, false); in x86_vector_deactivate()
390 if (!apicd->is_managed && !apicd->can_reserve) in x86_vector_deactivate()
393 if (apicd->has_reserved) in x86_vector_deactivate()
398 if (apicd->can_reserve) in x86_vector_deactivate()
407 struct apic_chip_data *apicd = apic_chip_data(irqd); in activate_reserved() local
412 apicd->has_reserved = false; in activate_reserved()
421 apicd->can_reserve = false; in activate_reserved()
464 struct apic_chip_data *apicd = apic_chip_data(irqd); in x86_vector_activate() local
468 trace_vector_activate(irqd->irq, apicd->is_managed, in x86_vector_activate()
469 apicd->can_reserve, reserve); in x86_vector_activate()
472 if (!apicd->can_reserve && !apicd->is_managed) in x86_vector_activate()
476 else if (apicd->is_managed) in x86_vector_activate()
478 else if (apicd->has_reserved) in x86_vector_activate()
487 struct apic_chip_data *apicd = apic_chip_data(irqd); in vector_free_reserved_and_managed() local
489 trace_vector_teardown(irqd->irq, apicd->is_managed, in vector_free_reserved_and_managed()
490 apicd->has_reserved); in vector_free_reserved_and_managed()
492 if (apicd->has_reserved) in vector_free_reserved_and_managed()
494 if (apicd->is_managed) in vector_free_reserved_and_managed()
501 struct apic_chip_data *apicd; in x86_vector_free_irqs() local
512 apicd = irqd->chip_data; in x86_vector_free_irqs()
515 free_apic_chip_data(apicd); in x86_vector_free_irqs()
521 struct apic_chip_data *apicd) in vector_configure_legacy() argument
526 apicd->vector = ISA_IRQ_VECTOR(virq); in vector_configure_legacy()
527 apicd->cpu = 0; in vector_configure_legacy()
536 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); in vector_configure_legacy()
539 apicd->can_reserve = true; in vector_configure_legacy()
552 struct apic_chip_data *apicd; in x86_vector_alloc_irqs() local
572 apicd = alloc_apic_chip_data(node); in x86_vector_alloc_irqs()
573 if (!apicd) { in x86_vector_alloc_irqs()
578 apicd->irq = virq + i; in x86_vector_alloc_irqs()
580 irqd->chip_data = apicd; in x86_vector_alloc_irqs()
601 if (!vector_configure_legacy(virq + i, irqd, apicd)) in x86_vector_alloc_irqs()
609 free_apic_chip_data(apicd); in x86_vector_alloc_irqs()
625 struct apic_chip_data apicd; in x86_vector_debug_show() local
647 memcpy(&apicd, irqd->chip_data, sizeof(apicd)); in x86_vector_debug_show()
650 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector); in x86_vector_debug_show()
651 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); in x86_vector_debug_show()
652 if (apicd.prev_vector) { in x86_vector_debug_show()
653 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); in x86_vector_debug_show()
654 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); in x86_vector_debug_show()
656 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); in x86_vector_debug_show()
657 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0); in x86_vector_debug_show()
658 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0); in x86_vector_debug_show()
659 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0); in x86_vector_debug_show()
660 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist)); in x86_vector_debug_show()
897 static void free_moved_vector(struct apic_chip_data *apicd) in free_moved_vector() argument
899 unsigned int vector = apicd->prev_vector; in free_moved_vector()
900 unsigned int cpu = apicd->prev_cpu; in free_moved_vector()
901 bool managed = apicd->is_managed; in free_moved_vector()
913 trace_vector_free_moved(apicd->irq, cpu, vector, managed); in free_moved_vector()
916 hlist_del_init(&apicd->clist); in free_moved_vector()
917 apicd->prev_vector = 0; in free_moved_vector()
918 apicd->move_in_progress = 0; in free_moved_vector()
927 struct apic_chip_data *apicd; in apic_force_complete_move() local
931 apicd = apic_chip_data(irqd); in apic_force_complete_move()
932 if (!apicd) in apic_force_complete_move()
939 vector = apicd->prev_vector; in apic_force_complete_move()
940 if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu)) in apic_force_complete_move()
958 if (apicd->move_in_progress) { in apic_force_complete_move()
994 free_moved_vector(apicd); in apic_force_complete_move()
1004 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_retrigger_irq() local
1008 __apic_send_IPI(apicd->cpu, apicd->vector); in apic_retrigger_irq()
1045 struct apic_chip_data *apicd; in __vector_cleanup() local
1051 hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) { in __vector_cleanup()
1052 unsigned int vector = apicd->prev_vector; in __vector_cleanup()
1067 pr_warn_once("Moved interrupt pending in old target APIC %u\n", apicd->irq); in __vector_cleanup()
1071 free_moved_vector(apicd); in __vector_cleanup()
1092 static void __vector_schedule_cleanup(struct apic_chip_data *apicd) in __vector_schedule_cleanup() argument
1094 unsigned int cpu = apicd->prev_cpu; in __vector_schedule_cleanup()
1097 apicd->move_in_progress = 0; in __vector_schedule_cleanup()
1101 hlist_add_head(&apicd->clist, &cl->head); in __vector_schedule_cleanup()
1122 pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu); in __vector_schedule_cleanup()
1123 free_moved_vector(apicd); in __vector_schedule_cleanup()
1130 struct apic_chip_data *apicd; in vector_schedule_cleanup() local
1132 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); in vector_schedule_cleanup()
1133 if (apicd->move_in_progress) in vector_schedule_cleanup()
1134 __vector_schedule_cleanup(apicd); in vector_schedule_cleanup()
1139 struct apic_chip_data *apicd; in irq_complete_move() local
1141 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); in irq_complete_move()
1142 if (likely(!apicd->move_in_progress)) in irq_complete_move()
1151 if (apicd->cpu == smp_processor_id()) in irq_complete_move()
1152 __vector_schedule_cleanup(apicd); in irq_complete_move()