xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c (revision 0d2ab5f922e75d10162e7199826e14df9cfae5cc)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/netdevice.h>
5 
6 #include "hinic3_hw_comm.h"
7 #include "hinic3_hwdev.h"
8 #include "hinic3_hwif.h"
9 #include "hinic3_nic_dev.h"
10 #include "hinic3_rx.h"
11 #include "hinic3_tx.h"
12 
13 static int hinic3_poll(struct napi_struct *napi, int budget)
14 {
15 	struct hinic3_irq_cfg *irq_cfg =
16 		container_of(napi, struct hinic3_irq_cfg, napi);
17 	struct hinic3_nic_dev *nic_dev;
18 	bool busy = false;
19 	int work_done;
20 
21 	nic_dev = netdev_priv(irq_cfg->netdev);
22 
23 	busy |= hinic3_tx_poll(irq_cfg->txq, budget);
24 
25 	if (unlikely(!budget))
26 		return 0;
27 
28 	work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
29 	busy |= work_done >= budget;
30 
31 	if (busy)
32 		return budget;
33 
34 	if (likely(napi_complete_done(napi, work_done)))
35 		hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
36 				      HINIC3_MSIX_ENABLE);
37 
38 	return work_done;
39 }
40 
41 static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
42 {
43 	struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
44 
45 	netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
46 			     NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
47 	netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
48 			     NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
49 	netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
50 	napi_enable(&irq_cfg->napi);
51 }
52 
53 static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
54 {
55 	napi_disable(&irq_cfg->napi);
56 	netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
57 			     NETDEV_QUEUE_TYPE_RX, NULL);
58 	netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
59 			     NETDEV_QUEUE_TYPE_TX, NULL);
60 	netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
61 	netif_napi_del(&irq_cfg->napi);
62 }
63 
64 static irqreturn_t qp_irq(int irq, void *data)
65 {
66 	struct hinic3_irq_cfg *irq_cfg = data;
67 	struct hinic3_nic_dev *nic_dev;
68 
69 	nic_dev = netdev_priv(irq_cfg->netdev);
70 	hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
71 					  irq_cfg->msix_entry_idx, 1);
72 
73 	napi_schedule(&irq_cfg->napi);
74 
75 	return IRQ_HANDLED;
76 }
77 
78 static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
79 {
80 	struct hinic3_interrupt_info info = {};
81 	struct hinic3_nic_dev *nic_dev;
82 	struct net_device *netdev;
83 	int err;
84 
85 	netdev = irq_cfg->netdev;
86 	nic_dev = netdev_priv(netdev);
87 	qp_add_napi(irq_cfg);
88 
89 	info.msix_index = irq_cfg->msix_entry_idx;
90 	info.interrupt_coalesc_set = 1;
91 	info.pending_limit = nic_dev->intr_coalesce[q_id].pending_limit;
92 	info.coalesc_timer_cfg =
93 		nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
94 	info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
95 	err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
96 	if (err) {
97 		netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
98 		qp_del_napi(irq_cfg);
99 		return err;
100 	}
101 
102 	err = request_irq(irq_cfg->irq_id, qp_irq, 0, irq_cfg->irq_name,
103 			  irq_cfg);
104 	if (err) {
105 		qp_del_napi(irq_cfg);
106 		return err;
107 	}
108 
109 	irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
110 
111 	return 0;
112 }
113 
114 static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
115 {
116 	irq_set_affinity_hint(irq_cfg->irq_id, NULL);
117 	free_irq(irq_cfg->irq_id, irq_cfg);
118 }
119 
120 int hinic3_qps_irq_init(struct net_device *netdev)
121 {
122 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
123 	struct pci_dev *pdev = nic_dev->pdev;
124 	struct hinic3_irq_cfg *irq_cfg;
125 	struct msix_entry *msix_entry;
126 	u32 local_cpu;
127 	u16 q_id;
128 	int err;
129 
130 	for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
131 		msix_entry = &nic_dev->qps_msix_entries[q_id];
132 		irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
133 
134 		irq_cfg->irq_id = msix_entry->vector;
135 		irq_cfg->msix_entry_idx = msix_entry->entry;
136 		irq_cfg->netdev = netdev;
137 		irq_cfg->txq = &nic_dev->txqs[q_id];
138 		irq_cfg->rxq = &nic_dev->rxqs[q_id];
139 		nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
140 
141 		local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev));
142 		cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
143 
144 		snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
145 			 "%s_qp%u", netdev->name, q_id);
146 
147 		err = hinic3_request_irq(irq_cfg, q_id);
148 		if (err) {
149 			netdev_err(netdev, "Failed to request Rx irq\n");
150 			goto err_release_irqs;
151 		}
152 
153 		hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
154 						irq_cfg->msix_entry_idx,
155 						HINIC3_SET_MSIX_AUTO_MASK);
156 		hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
157 				      HINIC3_MSIX_ENABLE);
158 	}
159 
160 	return 0;
161 
162 err_release_irqs:
163 	while (q_id > 0) {
164 		q_id--;
165 		irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
166 		qp_del_napi(irq_cfg);
167 		hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
168 				      HINIC3_MSIX_DISABLE);
169 		hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
170 						irq_cfg->msix_entry_idx,
171 						HINIC3_CLR_MSIX_AUTO_MASK);
172 		hinic3_release_irq(irq_cfg);
173 	}
174 
175 	return err;
176 }
177 
178 void hinic3_qps_irq_uninit(struct net_device *netdev)
179 {
180 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
181 	struct hinic3_irq_cfg *irq_cfg;
182 	u16 q_id;
183 
184 	for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
185 		irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
186 		qp_del_napi(irq_cfg);
187 		hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
188 				      HINIC3_MSIX_DISABLE);
189 		hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
190 						irq_cfg->msix_entry_idx,
191 						HINIC3_CLR_MSIX_AUTO_MASK);
192 		hinic3_release_irq(irq_cfg);
193 	}
194 }
195