xref: /linux/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/irqdomain.h>
5 #include <linux/pci.h>
6 
7 #include "../libwx/wx_type.h"
8 #include "../libwx/wx_lib.h"
9 #include "../libwx/wx_hw.h"
10 #include "txgbe_type.h"
11 #include "txgbe_phy.h"
12 #include "txgbe_irq.h"
13 
14 /**
15  * txgbe_irq_enable - Enable default interrupt generation settings
16  * @wx: pointer to private structure
17  * @queues: enable irqs for queues
18  **/
19 void txgbe_irq_enable(struct wx *wx, bool queues)
20 {
21 	wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
22 
23 	/* unmask interrupt */
24 	wx_intr_enable(wx, TXGBE_INTR_MISC);
25 	if (queues)
26 		wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
27 }
28 
29 /**
30  * txgbe_intr - msi/legacy mode Interrupt Handler
31  * @irq: interrupt number
32  * @data: pointer to a network interface device structure
33  **/
34 static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
35 {
36 	struct wx_q_vector *q_vector;
37 	struct wx *wx  = data;
38 	struct pci_dev *pdev;
39 	u32 eicr;
40 
41 	q_vector = wx->q_vector[0];
42 	pdev = wx->pdev;
43 
44 	eicr = wx_misc_isb(wx, WX_ISB_VEC0);
45 	if (!eicr) {
46 		/* shared interrupt alert!
47 		 * the interrupt that we masked before the ICR read.
48 		 */
49 		if (netif_running(wx->netdev))
50 			txgbe_irq_enable(wx, true);
51 		return IRQ_NONE;        /* Not our interrupt */
52 	}
53 	wx->isb_mem[WX_ISB_VEC0] = 0;
54 	if (!(pdev->msi_enabled))
55 		wr32(wx, WX_PX_INTA, 1);
56 
57 	wx->isb_mem[WX_ISB_MISC] = 0;
58 	/* would disable interrupts here but it is auto disabled */
59 	napi_schedule_irqoff(&q_vector->napi);
60 
61 	/* re-enable link(maybe) and non-queue interrupts, no flush.
62 	 * txgbe_poll will re-enable the queue interrupts
63 	 */
64 	if (netif_running(wx->netdev))
65 		txgbe_irq_enable(wx, false);
66 
67 	return IRQ_HANDLED;
68 }
69 
70 /**
71  * txgbe_request_msix_irqs - Initialize MSI-X interrupts
72  * @wx: board private structure
73  *
74  * Allocate MSI-X vectors and request interrupts from the kernel.
75  **/
76 static int txgbe_request_msix_irqs(struct wx *wx)
77 {
78 	struct net_device *netdev = wx->netdev;
79 	int vector, err;
80 
81 	for (vector = 0; vector < wx->num_q_vectors; vector++) {
82 		struct wx_q_vector *q_vector = wx->q_vector[vector];
83 		struct msix_entry *entry = &wx->msix_q_entries[vector];
84 
85 		if (q_vector->tx.ring && q_vector->rx.ring)
86 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
87 				 "%s-TxRx-%d", netdev->name, entry->entry);
88 		else
89 			/* skip this unused q_vector */
90 			continue;
91 
92 		err = request_irq(entry->vector, wx_msix_clean_rings, 0,
93 				  q_vector->name, q_vector);
94 		if (err) {
95 			wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
96 			       q_vector->name, err);
97 			goto free_queue_irqs;
98 		}
99 	}
100 
101 	return 0;
102 
103 free_queue_irqs:
104 	while (vector) {
105 		vector--;
106 		free_irq(wx->msix_q_entries[vector].vector,
107 			 wx->q_vector[vector]);
108 	}
109 	wx_reset_interrupt_capability(wx);
110 	return err;
111 }
112 
113 /**
114  * txgbe_request_irq - initialize interrupts
115  * @wx: board private structure
116  *
117  * Attempt to configure interrupts using the best available
118  * capabilities of the hardware and kernel.
119  **/
120 int txgbe_request_irq(struct wx *wx)
121 {
122 	struct net_device *netdev = wx->netdev;
123 	struct pci_dev *pdev = wx->pdev;
124 	int err;
125 
126 	if (pdev->msix_enabled)
127 		err = txgbe_request_msix_irqs(wx);
128 	else if (pdev->msi_enabled)
129 		err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
130 				  netdev->name, wx);
131 	else
132 		err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
133 				  netdev->name, wx);
134 
135 	if (err)
136 		wx_err(wx, "request_irq failed, Error %d\n", err);
137 
138 	return err;
139 }
140 
141 static int txgbe_request_gpio_irq(struct txgbe *txgbe)
142 {
143 	txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
144 	return request_threaded_irq(txgbe->gpio_irq, NULL,
145 				    txgbe_gpio_irq_handler,
146 				    IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
147 }
148 
149 static int txgbe_request_link_irq(struct txgbe *txgbe)
150 {
151 	txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
152 	return request_threaded_irq(txgbe->link_irq, NULL,
153 				    txgbe_link_irq_handler,
154 				    IRQF_ONESHOT, "txgbe-link-irq", txgbe);
155 }
156 
157 static const struct irq_chip txgbe_irq_chip = {
158 	.name = "txgbe-misc-irq",
159 };
160 
161 static int txgbe_misc_irq_domain_map(struct irq_domain *d,
162 				     unsigned int irq,
163 				     irq_hw_number_t hwirq)
164 {
165 	struct txgbe *txgbe = d->host_data;
166 
167 	irq_set_chip_data(irq, txgbe);
168 	irq_set_chip(irq, &txgbe->misc.chip);
169 	irq_set_nested_thread(irq, true);
170 	irq_set_noprobe(irq);
171 
172 	return 0;
173 }
174 
175 static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
176 	.map = txgbe_misc_irq_domain_map,
177 };
178 
179 static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
180 {
181 	struct txgbe *txgbe = data;
182 	struct wx *wx = txgbe->wx;
183 	unsigned int nhandled = 0;
184 	unsigned int sub_irq;
185 	u32 eicr;
186 
187 	eicr = wx_misc_isb(wx, WX_ISB_MISC);
188 	if (eicr & TXGBE_PX_MISC_GPIO) {
189 		sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
190 		handle_nested_irq(sub_irq);
191 		nhandled++;
192 	}
193 	if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
194 		    TXGBE_PX_MISC_ETH_AN)) {
195 		sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
196 		handle_nested_irq(sub_irq);
197 		nhandled++;
198 	}
199 
200 	wx_intr_enable(wx, TXGBE_INTR_MISC);
201 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
202 }
203 
204 static void txgbe_del_irq_domain(struct txgbe *txgbe)
205 {
206 	int hwirq, virq;
207 
208 	for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
209 		virq = irq_find_mapping(txgbe->misc.domain, hwirq);
210 		irq_dispose_mapping(virq);
211 	}
212 
213 	irq_domain_remove(txgbe->misc.domain);
214 }
215 
216 void txgbe_free_misc_irq(struct txgbe *txgbe)
217 {
218 	free_irq(txgbe->gpio_irq, txgbe);
219 	free_irq(txgbe->link_irq, txgbe);
220 	free_irq(txgbe->misc.irq, txgbe);
221 	txgbe_del_irq_domain(txgbe);
222 }
223 
224 int txgbe_setup_misc_irq(struct txgbe *txgbe)
225 {
226 	struct wx *wx = txgbe->wx;
227 	int hwirq, err;
228 
229 	txgbe->misc.nirqs = 2;
230 	txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
231 						   &txgbe_misc_irq_domain_ops, txgbe);
232 	if (!txgbe->misc.domain)
233 		return -ENOMEM;
234 
235 	for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
236 		irq_create_mapping(txgbe->misc.domain, hwirq);
237 
238 	txgbe->misc.chip = txgbe_irq_chip;
239 	if (wx->pdev->msix_enabled)
240 		txgbe->misc.irq = wx->msix_entry->vector;
241 	else
242 		txgbe->misc.irq = wx->pdev->irq;
243 
244 	err = request_threaded_irq(txgbe->misc.irq, NULL,
245 				   txgbe_misc_irq_handle,
246 				   IRQF_ONESHOT,
247 				   wx->netdev->name, txgbe);
248 	if (err)
249 		goto del_misc_irq;
250 
251 	err = txgbe_request_gpio_irq(txgbe);
252 	if (err)
253 		goto free_msic_irq;
254 
255 	err = txgbe_request_link_irq(txgbe);
256 	if (err)
257 		goto free_gpio_irq;
258 
259 	return 0;
260 
261 free_gpio_irq:
262 	free_irq(txgbe->gpio_irq, txgbe);
263 free_msic_irq:
264 	free_irq(txgbe->misc.irq, txgbe);
265 del_misc_irq:
266 	txgbe_del_irq_domain(txgbe);
267 
268 	return err;
269 }
270