xref: /linux/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/irqdomain.h>
5 #include <linux/pci.h>
6 
7 #include "../libwx/wx_type.h"
8 #include "../libwx/wx_lib.h"
9 #include "../libwx/wx_hw.h"
10 #include "txgbe_type.h"
11 #include "txgbe_phy.h"
12 #include "txgbe_irq.h"
13 
14 /**
15  * txgbe_irq_enable - Enable default interrupt generation settings
16  * @wx: pointer to private structure
17  * @queues: enable irqs for queues
18  **/
19 void txgbe_irq_enable(struct wx *wx, bool queues)
20 {
21 	wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
22 
23 	/* unmask interrupt */
24 	wx_intr_enable(wx, TXGBE_INTR_MISC);
25 	if (queues)
26 		wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
27 }
28 
29 /**
30  * txgbe_request_queue_irqs - Initialize MSI-X queue interrupts
31  * @wx: board private structure
32  *
33  * Allocate MSI-X queue vectors and request interrupts from the kernel.
34  **/
35 int txgbe_request_queue_irqs(struct wx *wx)
36 {
37 	struct net_device *netdev = wx->netdev;
38 	int vector, err;
39 
40 	if (!wx->pdev->msix_enabled)
41 		return 0;
42 
43 	for (vector = 0; vector < wx->num_q_vectors; vector++) {
44 		struct wx_q_vector *q_vector = wx->q_vector[vector];
45 		struct msix_entry *entry = &wx->msix_q_entries[vector];
46 
47 		if (q_vector->tx.ring && q_vector->rx.ring)
48 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
49 				 "%s-TxRx-%d", netdev->name, entry->entry);
50 		else
51 			/* skip this unused q_vector */
52 			continue;
53 
54 		err = request_irq(entry->vector, wx_msix_clean_rings, 0,
55 				  q_vector->name, q_vector);
56 		if (err) {
57 			wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
58 			       q_vector->name, err);
59 			goto free_queue_irqs;
60 		}
61 	}
62 
63 	return 0;
64 
65 free_queue_irqs:
66 	while (vector) {
67 		vector--;
68 		free_irq(wx->msix_q_entries[vector].vector,
69 			 wx->q_vector[vector]);
70 	}
71 	wx_reset_interrupt_capability(wx);
72 	return err;
73 }
74 
75 static int txgbe_request_link_irq(struct txgbe *txgbe)
76 {
77 	txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
78 	return request_threaded_irq(txgbe->link_irq, NULL,
79 				    txgbe_link_irq_handler,
80 				    IRQF_ONESHOT, "txgbe-link-irq", txgbe);
81 }
82 
83 static const struct irq_chip txgbe_irq_chip = {
84 	.name = "txgbe-misc-irq",
85 };
86 
87 static int txgbe_misc_irq_domain_map(struct irq_domain *d,
88 				     unsigned int irq,
89 				     irq_hw_number_t hwirq)
90 {
91 	struct txgbe *txgbe = d->host_data;
92 
93 	irq_set_chip_data(irq, txgbe);
94 	irq_set_chip(irq, &txgbe->misc.chip);
95 	irq_set_nested_thread(irq, true);
96 	irq_set_noprobe(irq);
97 
98 	return 0;
99 }
100 
101 static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
102 	.map = txgbe_misc_irq_domain_map,
103 };
104 
105 static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
106 {
107 	struct wx_q_vector *q_vector;
108 	struct txgbe *txgbe = data;
109 	struct wx *wx = txgbe->wx;
110 	u32 eicr;
111 
112 	if (wx->pdev->msix_enabled)
113 		return IRQ_WAKE_THREAD;
114 
115 	eicr = wx_misc_isb(wx, WX_ISB_VEC0);
116 	if (!eicr) {
117 		/* shared interrupt alert!
118 		 * the interrupt that we masked before the ICR read.
119 		 */
120 		if (netif_running(wx->netdev))
121 			txgbe_irq_enable(wx, true);
122 		return IRQ_NONE;        /* Not our interrupt */
123 	}
124 	wx->isb_mem[WX_ISB_VEC0] = 0;
125 	if (!(wx->pdev->msi_enabled))
126 		wr32(wx, WX_PX_INTA, 1);
127 
128 	/* would disable interrupts here but it is auto disabled */
129 	q_vector = wx->q_vector[0];
130 	napi_schedule_irqoff(&q_vector->napi);
131 
132 	return IRQ_WAKE_THREAD;
133 }
134 
135 static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
136 {
137 	struct txgbe *txgbe = data;
138 	struct wx *wx = txgbe->wx;
139 	unsigned int nhandled = 0;
140 	unsigned int sub_irq;
141 	u32 eicr;
142 
143 	eicr = wx_misc_isb(wx, WX_ISB_MISC);
144 	if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
145 		    TXGBE_PX_MISC_ETH_AN)) {
146 		sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
147 		handle_nested_irq(sub_irq);
148 		nhandled++;
149 	}
150 
151 	wx_intr_enable(wx, TXGBE_INTR_MISC);
152 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
153 }
154 
155 static void txgbe_del_irq_domain(struct txgbe *txgbe)
156 {
157 	int hwirq, virq;
158 
159 	for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
160 		virq = irq_find_mapping(txgbe->misc.domain, hwirq);
161 		irq_dispose_mapping(virq);
162 	}
163 
164 	irq_domain_remove(txgbe->misc.domain);
165 }
166 
167 void txgbe_free_misc_irq(struct txgbe *txgbe)
168 {
169 	free_irq(txgbe->link_irq, txgbe);
170 	free_irq(txgbe->misc.irq, txgbe);
171 	txgbe_del_irq_domain(txgbe);
172 }
173 
174 int txgbe_setup_misc_irq(struct txgbe *txgbe)
175 {
176 	unsigned long flags = IRQF_ONESHOT;
177 	struct wx *wx = txgbe->wx;
178 	int hwirq, err;
179 
180 	txgbe->misc.nirqs = 1;
181 	txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
182 						   &txgbe_misc_irq_domain_ops, txgbe);
183 	if (!txgbe->misc.domain)
184 		return -ENOMEM;
185 
186 	for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
187 		irq_create_mapping(txgbe->misc.domain, hwirq);
188 
189 	txgbe->misc.chip = txgbe_irq_chip;
190 	if (wx->pdev->msix_enabled) {
191 		txgbe->misc.irq = wx->msix_entry->vector;
192 	} else {
193 		txgbe->misc.irq = wx->pdev->irq;
194 		if (!wx->pdev->msi_enabled)
195 			flags |= IRQF_SHARED;
196 	}
197 
198 	err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle,
199 				   txgbe_misc_irq_thread_fn,
200 				   flags,
201 				   wx->netdev->name, txgbe);
202 	if (err)
203 		goto del_misc_irq;
204 
205 	err = txgbe_request_link_irq(txgbe);
206 	if (err)
207 		goto free_msic_irq;
208 
209 	wx->misc_irq_domain = true;
210 
211 	return 0;
212 
213 free_msic_irq:
214 	free_irq(txgbe->misc.irq, txgbe);
215 del_misc_irq:
216 	txgbe_del_irq_domain(txgbe);
217 
218 	return err;
219 }
220