xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_vec.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  */
9 
10 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
11  * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
12  */
13 
14 #include "aq_vec.h"
15 #include "aq_nic.h"
16 #include "aq_ring.h"
17 #include "aq_hw.h"
18 
19 #include <linux/netdevice.h>
20 
21 struct aq_vec_s {
22 	const struct aq_hw_ops *aq_hw_ops;
23 	struct aq_hw_s *aq_hw;
24 	struct aq_nic_s *aq_nic;
25 	unsigned int tx_rings;
26 	unsigned int rx_rings;
27 	struct aq_ring_param_s aq_ring_param;
28 	struct napi_struct napi;
29 	struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
30 };
31 
32 #define AQ_VEC_TX_ID 0
33 #define AQ_VEC_RX_ID 1
34 
35 static int aq_vec_poll(struct napi_struct *napi, int budget)
36 {
37 	struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
38 	unsigned int sw_tail_old = 0U;
39 	struct aq_ring_s *ring = NULL;
40 	bool was_tx_cleaned = true;
41 	unsigned int i = 0U;
42 	int work_done = 0;
43 	int err = 0;
44 
45 	if (!self) {
46 		err = -EINVAL;
47 	} else {
48 		for (i = 0U, ring = self->ring[0];
49 			self->tx_rings > i; ++i, ring = self->ring[i]) {
50 			if (self->aq_hw_ops->hw_ring_tx_head_update) {
51 				err = self->aq_hw_ops->hw_ring_tx_head_update(
52 							self->aq_hw,
53 							&ring[AQ_VEC_TX_ID]);
54 				if (err < 0)
55 					goto err_exit;
56 			}
57 
58 			if (ring[AQ_VEC_TX_ID].sw_head !=
59 			    ring[AQ_VEC_TX_ID].hw_head) {
60 				was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
61 				aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
62 			}
63 
64 			err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
65 					    &ring[AQ_VEC_RX_ID]);
66 			if (err < 0)
67 				goto err_exit;
68 
69 			if (ring[AQ_VEC_RX_ID].sw_head !=
70 				ring[AQ_VEC_RX_ID].hw_head) {
71 				err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
72 						       napi,
73 						       &work_done,
74 						       budget - work_done);
75 				if (err < 0)
76 					goto err_exit;
77 
78 				sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
79 
80 				err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
81 				if (err < 0)
82 					goto err_exit;
83 
84 				err = self->aq_hw_ops->hw_ring_rx_fill(
85 					self->aq_hw,
86 					&ring[AQ_VEC_RX_ID], sw_tail_old);
87 				if (err < 0)
88 					goto err_exit;
89 			}
90 		}
91 
92 		if (!was_tx_cleaned)
93 			work_done = budget;
94 
95 		if (work_done < budget) {
96 			napi_complete_done(napi, work_done);
97 			self->aq_hw_ops->hw_irq_enable(self->aq_hw,
98 					1U << self->aq_ring_param.vec_idx);
99 		}
100 	}
101 err_exit:
102 	return work_done;
103 }
104 
105 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
106 			      struct aq_nic_cfg_s *aq_nic_cfg)
107 {
108 	struct aq_vec_s *self = NULL;
109 	struct aq_ring_s *ring = NULL;
110 	unsigned int i = 0U;
111 	int err = 0;
112 
113 	self = kzalloc(sizeof(*self), GFP_KERNEL);
114 	if (!self) {
115 		err = -ENOMEM;
116 		goto err_exit;
117 	}
118 
119 	self->aq_nic = aq_nic;
120 	self->aq_ring_param.vec_idx = idx;
121 	self->aq_ring_param.cpu =
122 		idx + aq_nic_cfg->aq_rss.base_cpu_number;
123 
124 	cpumask_set_cpu(self->aq_ring_param.cpu,
125 			&self->aq_ring_param.affinity_mask);
126 
127 	self->tx_rings = 0;
128 	self->rx_rings = 0;
129 
130 	netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
131 		       aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
132 
133 	for (i = 0; i < aq_nic_cfg->tcs; ++i) {
134 		unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
135 						self->tx_rings,
136 						self->aq_ring_param.vec_idx);
137 
138 		ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
139 					idx_ring, aq_nic_cfg);
140 		if (!ring) {
141 			err = -ENOMEM;
142 			goto err_exit;
143 		}
144 
145 		++self->tx_rings;
146 
147 		aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
148 
149 		ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
150 					idx_ring, aq_nic_cfg);
151 		if (!ring) {
152 			err = -ENOMEM;
153 			goto err_exit;
154 		}
155 
156 		++self->rx_rings;
157 	}
158 
159 err_exit:
160 	if (err < 0) {
161 		aq_vec_free(self);
162 		self = NULL;
163 	}
164 	return self;
165 }
166 
167 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
168 		struct aq_hw_s *aq_hw)
169 {
170 	struct aq_ring_s *ring = NULL;
171 	unsigned int i = 0U;
172 	int err = 0;
173 
174 	self->aq_hw_ops = aq_hw_ops;
175 	self->aq_hw = aq_hw;
176 
177 	for (i = 0U, ring = self->ring[0];
178 		self->tx_rings > i; ++i, ring = self->ring[i]) {
179 		err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
180 		if (err < 0)
181 			goto err_exit;
182 
183 		err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
184 						       &ring[AQ_VEC_TX_ID],
185 						       &self->aq_ring_param);
186 		if (err < 0)
187 			goto err_exit;
188 
189 		err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
190 		if (err < 0)
191 			goto err_exit;
192 
193 		err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
194 						       &ring[AQ_VEC_RX_ID],
195 						       &self->aq_ring_param);
196 		if (err < 0)
197 			goto err_exit;
198 
199 		err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
200 		if (err < 0)
201 			goto err_exit;
202 
203 		err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
204 						       &ring[AQ_VEC_RX_ID], 0U);
205 		if (err < 0)
206 			goto err_exit;
207 	}
208 
209 err_exit:
210 	return err;
211 }
212 
213 int aq_vec_start(struct aq_vec_s *self)
214 {
215 	struct aq_ring_s *ring = NULL;
216 	unsigned int i = 0U;
217 	int err = 0;
218 
219 	for (i = 0U, ring = self->ring[0];
220 		self->tx_rings > i; ++i, ring = self->ring[i]) {
221 		err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
222 							&ring[AQ_VEC_TX_ID]);
223 		if (err < 0)
224 			goto err_exit;
225 
226 		err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
227 							&ring[AQ_VEC_RX_ID]);
228 		if (err < 0)
229 			goto err_exit;
230 	}
231 
232 	napi_enable(&self->napi);
233 
234 err_exit:
235 	return err;
236 }
237 
238 void aq_vec_stop(struct aq_vec_s *self)
239 {
240 	struct aq_ring_s *ring = NULL;
241 	unsigned int i = 0U;
242 
243 	for (i = 0U, ring = self->ring[0];
244 		self->tx_rings > i; ++i, ring = self->ring[i]) {
245 		self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
246 						 &ring[AQ_VEC_TX_ID]);
247 
248 		self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
249 						 &ring[AQ_VEC_RX_ID]);
250 	}
251 
252 	napi_disable(&self->napi);
253 }
254 
255 void aq_vec_deinit(struct aq_vec_s *self)
256 {
257 	struct aq_ring_s *ring = NULL;
258 	unsigned int i = 0U;
259 
260 	if (!self)
261 		goto err_exit;
262 
263 	for (i = 0U, ring = self->ring[0];
264 		self->tx_rings > i; ++i, ring = self->ring[i]) {
265 		aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
266 		aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
267 	}
268 err_exit:;
269 }
270 
271 void aq_vec_free(struct aq_vec_s *self)
272 {
273 	struct aq_ring_s *ring = NULL;
274 	unsigned int i = 0U;
275 
276 	if (!self)
277 		goto err_exit;
278 
279 	for (i = 0U, ring = self->ring[0];
280 		self->tx_rings > i; ++i, ring = self->ring[i]) {
281 		aq_ring_free(&ring[AQ_VEC_TX_ID]);
282 		aq_ring_free(&ring[AQ_VEC_RX_ID]);
283 	}
284 
285 	netif_napi_del(&self->napi);
286 
287 	kfree(self);
288 
289 err_exit:;
290 }
291 
292 irqreturn_t aq_vec_isr(int irq, void *private)
293 {
294 	struct aq_vec_s *self = private;
295 	int err = 0;
296 
297 	if (!self) {
298 		err = -EINVAL;
299 		goto err_exit;
300 	}
301 	napi_schedule(&self->napi);
302 
303 err_exit:
304 	return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
305 }
306 
307 irqreturn_t aq_vec_isr_legacy(int irq, void *private)
308 {
309 	struct aq_vec_s *self = private;
310 	u64 irq_mask = 0U;
311 	irqreturn_t err = 0;
312 
313 	if (!self) {
314 		err = -EINVAL;
315 		goto err_exit;
316 	}
317 	err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
318 	if (err < 0)
319 		goto err_exit;
320 
321 	if (irq_mask) {
322 		self->aq_hw_ops->hw_irq_disable(self->aq_hw,
323 			      1U << self->aq_ring_param.vec_idx);
324 		napi_schedule(&self->napi);
325 	} else {
326 		self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
327 		err = IRQ_NONE;
328 	}
329 
330 err_exit:
331 	return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
332 }
333 
334 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
335 {
336 	return &self->aq_ring_param.affinity_mask;
337 }
338 
339 void aq_vec_add_stats(struct aq_vec_s *self,
340 		      struct aq_ring_stats_rx_s *stats_rx,
341 		      struct aq_ring_stats_tx_s *stats_tx)
342 {
343 	struct aq_ring_s *ring = NULL;
344 	unsigned int r = 0U;
345 
346 	for (r = 0U, ring = self->ring[0];
347 		self->tx_rings > r; ++r, ring = self->ring[r]) {
348 		struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
349 		struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
350 
351 		stats_rx->packets += rx->packets;
352 		stats_rx->bytes += rx->bytes;
353 		stats_rx->errors += rx->errors;
354 		stats_rx->jumbo_packets += rx->jumbo_packets;
355 		stats_rx->lro_packets += rx->lro_packets;
356 
357 		stats_tx->packets += tx->packets;
358 		stats_tx->bytes += tx->bytes;
359 		stats_tx->errors += tx->errors;
360 		stats_tx->queue_restarts += tx->queue_restarts;
361 	}
362 }
363 
364 int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
365 {
366 	unsigned int count = 0U;
367 	struct aq_ring_stats_rx_s stats_rx;
368 	struct aq_ring_stats_tx_s stats_tx;
369 
370 	memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
371 	memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
372 	aq_vec_add_stats(self, &stats_rx, &stats_tx);
373 
374 	/* This data should mimic aq_ethtool_queue_stat_names structure
375 	 */
376 	data[count] += stats_rx.packets;
377 	data[++count] += stats_tx.packets;
378 	data[++count] += stats_tx.queue_restarts;
379 	data[++count] += stats_rx.jumbo_packets;
380 	data[++count] += stats_rx.lro_packets;
381 	data[++count] += stats_rx.errors;
382 
383 	if (p_count)
384 		*p_count = ++count;
385 
386 	return 0;
387 }
388