1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * aQuantia Corporation Network Driver 4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 5 */ 6 7 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. 8 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. 9 */ 10 11 #include "aq_vec.h" 12 #include "aq_nic.h" 13 #include "aq_ring.h" 14 #include "aq_hw.h" 15 16 #include <linux/netdevice.h> 17 18 struct aq_vec_s { 19 const struct aq_hw_ops *aq_hw_ops; 20 struct aq_hw_s *aq_hw; 21 struct aq_nic_s *aq_nic; 22 unsigned int tx_rings; 23 unsigned int rx_rings; 24 struct aq_ring_param_s aq_ring_param; 25 struct napi_struct napi; 26 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; 27 }; 28 29 #define AQ_VEC_TX_ID 0 30 #define AQ_VEC_RX_ID 1 31 32 static int aq_vec_poll(struct napi_struct *napi, int budget) 33 { 34 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 35 unsigned int sw_tail_old = 0U; 36 struct aq_ring_s *ring = NULL; 37 bool was_tx_cleaned = true; 38 unsigned int i = 0U; 39 int work_done = 0; 40 int err = 0; 41 42 if (!self) { 43 err = -EINVAL; 44 } else { 45 for (i = 0U, ring = self->ring[0]; 46 self->tx_rings > i; ++i, ring = self->ring[i]) { 47 if (self->aq_hw_ops->hw_ring_tx_head_update) { 48 err = self->aq_hw_ops->hw_ring_tx_head_update( 49 self->aq_hw, 50 &ring[AQ_VEC_TX_ID]); 51 if (err < 0) 52 goto err_exit; 53 } 54 55 if (ring[AQ_VEC_TX_ID].sw_head != 56 ring[AQ_VEC_TX_ID].hw_head) { 57 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 58 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); 59 } 60 61 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 62 &ring[AQ_VEC_RX_ID]); 63 if (err < 0) 64 goto err_exit; 65 66 if (ring[AQ_VEC_RX_ID].sw_head != 67 ring[AQ_VEC_RX_ID].hw_head) { 68 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], 69 napi, 70 &work_done, 71 budget - work_done); 72 if (err < 0) 73 goto err_exit; 74 75 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; 76 77 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 78 if (err < 0) 79 goto err_exit; 80 81 err = self->aq_hw_ops->hw_ring_rx_fill( 82 self->aq_hw, 83 &ring[AQ_VEC_RX_ID], sw_tail_old); 84 if (err < 0) 85 goto err_exit; 86 } 87 } 88 89 err_exit: 90 if (!was_tx_cleaned) 91 work_done = budget; 92 93 if (work_done < budget) { 94 napi_complete_done(napi, work_done); 95 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 96 1U << self->aq_ring_param.vec_idx); 97 } 98 } 99 100 return work_done; 101 } 102 103 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, 104 struct aq_nic_cfg_s *aq_nic_cfg) 105 { 106 struct aq_ring_s *ring = NULL; 107 struct aq_vec_s *self = NULL; 108 unsigned int i = 0U; 109 int err = 0; 110 111 self = kzalloc(sizeof(*self), GFP_KERNEL); 112 if (!self) { 113 err = -ENOMEM; 114 goto err_exit; 115 } 116 117 self->aq_nic = aq_nic; 118 self->aq_ring_param.vec_idx = idx; 119 self->aq_ring_param.cpu = 120 idx + aq_nic_cfg->aq_rss.base_cpu_number; 121 122 cpumask_set_cpu(self->aq_ring_param.cpu, 123 &self->aq_ring_param.affinity_mask); 124 125 self->tx_rings = 0; 126 self->rx_rings = 0; 127 128 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, 129 aq_vec_poll, AQ_CFG_NAPI_WEIGHT); 130 131 for (i = 0; i < aq_nic_cfg->tcs; ++i) { 132 unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, 133 self->tx_rings, 134 self->aq_ring_param.vec_idx); 135 136 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, 137 idx_ring, aq_nic_cfg); 138 if (!ring) { 139 err = -ENOMEM; 140 goto err_exit; 141 } 142 143 ++self->tx_rings; 144 145 aq_nic_set_tx_ring(aq_nic, idx_ring, ring); 146 147 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, 148 idx_ring, aq_nic_cfg); 149 if (!ring) { 150 err = -ENOMEM; 151 goto err_exit; 152 } 153 154 ++self->rx_rings; 155 } 156 157 err_exit: 158 if (err < 0) { 159 aq_vec_free(self); 160 self = NULL; 161 } 162 163 return self; 164 } 165 166 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, 167 struct aq_hw_s *aq_hw) 168 { 169 struct aq_ring_s *ring = NULL; 170 unsigned int i = 0U; 171 int err = 0; 172 173 self->aq_hw_ops = aq_hw_ops; 174 self->aq_hw = aq_hw; 175 176 for (i = 0U, ring = self->ring[0]; 177 self->tx_rings > i; ++i, ring = self->ring[i]) { 178 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 179 if (err < 0) 180 goto err_exit; 181 182 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, 183 &ring[AQ_VEC_TX_ID], 184 &self->aq_ring_param); 185 if (err < 0) 186 goto err_exit; 187 188 err = aq_ring_init(&ring[AQ_VEC_RX_ID]); 189 if (err < 0) 190 goto err_exit; 191 192 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, 193 &ring[AQ_VEC_RX_ID], 194 &self->aq_ring_param); 195 if (err < 0) 196 goto err_exit; 197 198 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 199 if (err < 0) 200 goto err_exit; 201 202 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, 203 &ring[AQ_VEC_RX_ID], 0U); 204 if (err < 0) 205 goto err_exit; 206 } 207 208 err_exit: 209 return err; 210 } 211 212 int aq_vec_start(struct aq_vec_s *self) 213 { 214 struct aq_ring_s *ring = NULL; 215 unsigned int i = 0U; 216 int err = 0; 217 218 for (i = 0U, ring = self->ring[0]; 219 self->tx_rings > i; ++i, ring = self->ring[i]) { 220 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, 221 &ring[AQ_VEC_TX_ID]); 222 if (err < 0) 223 goto err_exit; 224 225 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, 226 &ring[AQ_VEC_RX_ID]); 227 if (err < 0) 228 goto err_exit; 229 } 230 231 napi_enable(&self->napi); 232 233 err_exit: 234 return err; 235 } 236 237 void aq_vec_stop(struct aq_vec_s *self) 238 { 239 struct aq_ring_s *ring = NULL; 240 unsigned int i = 0U; 241 242 for (i = 0U, ring = self->ring[0]; 243 self->tx_rings > i; ++i, ring = self->ring[i]) { 244 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, 245 &ring[AQ_VEC_TX_ID]); 246 247 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, 248 &ring[AQ_VEC_RX_ID]); 249 } 250 251 napi_disable(&self->napi); 252 } 253 254 void aq_vec_deinit(struct aq_vec_s *self) 255 { 256 struct aq_ring_s *ring = NULL; 257 unsigned int i = 0U; 258 259 if (!self) 260 goto err_exit; 261 262 for (i = 0U, ring = self->ring[0]; 263 self->tx_rings > i; ++i, ring = self->ring[i]) { 264 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 265 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); 266 } 267 268 err_exit:; 269 } 270 271 void aq_vec_free(struct aq_vec_s *self) 272 { 273 struct aq_ring_s *ring = NULL; 274 unsigned int i = 0U; 275 276 if (!self) 277 goto err_exit; 278 279 for (i = 0U, ring = self->ring[0]; 280 self->tx_rings > i; ++i, ring = self->ring[i]) { 281 aq_ring_free(&ring[AQ_VEC_TX_ID]); 282 aq_ring_free(&ring[AQ_VEC_RX_ID]); 283 } 284 285 netif_napi_del(&self->napi); 286 287 kfree(self); 288 289 err_exit:; 290 } 291 292 irqreturn_t aq_vec_isr(int irq, void *private) 293 { 294 struct aq_vec_s *self = private; 295 int err = 0; 296 297 if (!self) { 298 err = -EINVAL; 299 goto err_exit; 300 } 301 napi_schedule(&self->napi); 302 303 err_exit: 304 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 305 } 306 307 irqreturn_t aq_vec_isr_legacy(int irq, void *private) 308 { 309 struct aq_vec_s *self = private; 310 u64 irq_mask = 0U; 311 int err; 312 313 if (!self) 314 return IRQ_NONE; 315 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); 316 if (err < 0) 317 return IRQ_NONE; 318 319 if (irq_mask) { 320 self->aq_hw_ops->hw_irq_disable(self->aq_hw, 321 1U << self->aq_ring_param.vec_idx); 322 napi_schedule(&self->napi); 323 } else { 324 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); 325 return IRQ_NONE; 326 } 327 328 return IRQ_HANDLED; 329 } 330 331 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) 332 { 333 return &self->aq_ring_param.affinity_mask; 334 } 335 336 void aq_vec_add_stats(struct aq_vec_s *self, 337 struct aq_ring_stats_rx_s *stats_rx, 338 struct aq_ring_stats_tx_s *stats_tx) 339 { 340 struct aq_ring_s *ring = NULL; 341 unsigned int r = 0U; 342 343 for (r = 0U, ring = self->ring[0]; 344 self->tx_rings > r; ++r, ring = self->ring[r]) { 345 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; 346 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; 347 348 stats_rx->packets += rx->packets; 349 stats_rx->bytes += rx->bytes; 350 stats_rx->errors += rx->errors; 351 stats_rx->jumbo_packets += rx->jumbo_packets; 352 stats_rx->lro_packets += rx->lro_packets; 353 stats_rx->pg_losts += rx->pg_losts; 354 stats_rx->pg_flips += rx->pg_flips; 355 stats_rx->pg_reuses += rx->pg_reuses; 356 357 stats_tx->packets += tx->packets; 358 stats_tx->bytes += tx->bytes; 359 stats_tx->errors += tx->errors; 360 stats_tx->queue_restarts += tx->queue_restarts; 361 } 362 } 363 364 int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) 365 { 366 struct aq_ring_stats_rx_s stats_rx; 367 struct aq_ring_stats_tx_s stats_tx; 368 unsigned int count = 0U; 369 370 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 371 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 372 aq_vec_add_stats(self, &stats_rx, &stats_tx); 373 374 /* This data should mimic aq_ethtool_queue_stat_names structure 375 */ 376 data[count] += stats_rx.packets; 377 data[++count] += stats_tx.packets; 378 data[++count] += stats_tx.queue_restarts; 379 data[++count] += stats_rx.jumbo_packets; 380 data[++count] += stats_rx.lro_packets; 381 data[++count] += stats_rx.errors; 382 383 if (p_count) 384 *p_count = ++count; 385 386 return 0; 387 } 388