1 /* 2 * aQuantia Corporation Network Driver 3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 */ 9 10 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. 11 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. 12 */ 13 14 #include "aq_vec.h" 15 #include "aq_nic.h" 16 #include "aq_ring.h" 17 #include "aq_hw.h" 18 19 #include <linux/netdevice.h> 20 21 struct aq_vec_s { 22 struct aq_obj_s header; 23 struct aq_hw_ops *aq_hw_ops; 24 struct aq_hw_s *aq_hw; 25 struct aq_nic_s *aq_nic; 26 unsigned int tx_rings; 27 unsigned int rx_rings; 28 struct aq_ring_param_s aq_ring_param; 29 struct napi_struct napi; 30 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; 31 }; 32 33 #define AQ_VEC_TX_ID 0 34 #define AQ_VEC_RX_ID 1 35 36 static int aq_vec_poll(struct napi_struct *napi, int budget) 37 __releases(&self->lock) 38 __acquires(&self->lock) 39 { 40 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 41 struct aq_ring_s *ring = NULL; 42 int work_done = 0; 43 int err = 0; 44 unsigned int i = 0U; 45 unsigned int sw_tail_old = 0U; 46 bool was_tx_cleaned = false; 47 48 if (!self) { 49 err = -EINVAL; 50 } else if (spin_trylock(&self->header.lock)) { 51 for (i = 0U, ring = self->ring[0]; 52 self->tx_rings > i; ++i, ring = self->ring[i]) { 53 if (self->aq_hw_ops->hw_ring_tx_head_update) { 54 err = self->aq_hw_ops->hw_ring_tx_head_update( 55 self->aq_hw, 56 &ring[AQ_VEC_TX_ID]); 57 if (err < 0) 58 goto err_exit; 59 } 60 61 if (ring[AQ_VEC_TX_ID].sw_head != 62 ring[AQ_VEC_TX_ID].hw_head) { 63 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 64 65 if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > 66 AQ_CFG_SKB_FRAGS_MAX) { 67 aq_nic_ndev_queue_start(self->aq_nic, 68 ring[AQ_VEC_TX_ID].idx); 69 } 70 was_tx_cleaned = true; 71 } 72 73 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 74 &ring[AQ_VEC_RX_ID]); 75 if (err < 0) 76 goto err_exit; 77 78 if (ring[AQ_VEC_RX_ID].sw_head != 79 ring[AQ_VEC_RX_ID].hw_head) { 80 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], 81 &work_done, 82 budget - work_done); 83 if (err < 0) 84 goto err_exit; 85 86 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; 87 88 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 89 if (err < 0) 90 goto err_exit; 91 92 err = self->aq_hw_ops->hw_ring_rx_fill( 93 self->aq_hw, 94 &ring[AQ_VEC_RX_ID], sw_tail_old); 95 if (err < 0) 96 goto err_exit; 97 } 98 } 99 100 if (was_tx_cleaned) 101 work_done = budget; 102 103 if (work_done < budget) { 104 napi_complete_done(napi, work_done); 105 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 106 1U << self->aq_ring_param.vec_idx); 107 } 108 109 err_exit: 110 spin_unlock(&self->header.lock); 111 } 112 113 return work_done; 114 } 115 116 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, 117 struct aq_nic_cfg_s *aq_nic_cfg) 118 { 119 struct aq_vec_s *self = NULL; 120 struct aq_ring_s *ring = NULL; 121 unsigned int i = 0U; 122 int err = 0; 123 124 self = kzalloc(sizeof(*self), GFP_KERNEL); 125 if (!self) { 126 err = -ENOMEM; 127 goto err_exit; 128 } 129 130 self->aq_nic = aq_nic; 131 self->aq_ring_param.vec_idx = idx; 132 self->aq_ring_param.cpu = 133 idx + aq_nic_cfg->aq_rss.base_cpu_number; 134 135 cpumask_set_cpu(self->aq_ring_param.cpu, 136 &self->aq_ring_param.affinity_mask); 137 138 self->tx_rings = 0; 139 self->rx_rings = 0; 140 141 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, 142 aq_vec_poll, AQ_CFG_NAPI_WEIGHT); 143 144 for (i = 0; i < aq_nic_cfg->tcs; ++i) { 145 unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, 146 self->tx_rings, 147 self->aq_ring_param.vec_idx); 148 149 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, 150 idx_ring, aq_nic_cfg); 151 if (!ring) { 152 err = -ENOMEM; 153 goto err_exit; 154 } 155 156 ++self->tx_rings; 157 158 aq_nic_set_tx_ring(aq_nic, idx_ring, ring); 159 160 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, 161 idx_ring, aq_nic_cfg); 162 if (!ring) { 163 err = -ENOMEM; 164 goto err_exit; 165 } 166 167 ++self->rx_rings; 168 } 169 170 err_exit: 171 if (err < 0) { 172 aq_vec_free(self); 173 self = NULL; 174 } 175 return self; 176 } 177 178 int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, 179 struct aq_hw_s *aq_hw) 180 { 181 struct aq_ring_s *ring = NULL; 182 unsigned int i = 0U; 183 int err = 0; 184 185 self->aq_hw_ops = aq_hw_ops; 186 self->aq_hw = aq_hw; 187 188 spin_lock_init(&self->header.lock); 189 190 for (i = 0U, ring = self->ring[0]; 191 self->tx_rings > i; ++i, ring = self->ring[i]) { 192 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 193 if (err < 0) 194 goto err_exit; 195 196 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, 197 &ring[AQ_VEC_TX_ID], 198 &self->aq_ring_param); 199 if (err < 0) 200 goto err_exit; 201 202 err = aq_ring_init(&ring[AQ_VEC_RX_ID]); 203 if (err < 0) 204 goto err_exit; 205 206 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, 207 &ring[AQ_VEC_RX_ID], 208 &self->aq_ring_param); 209 if (err < 0) 210 goto err_exit; 211 212 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 213 if (err < 0) 214 goto err_exit; 215 216 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, 217 &ring[AQ_VEC_RX_ID], 0U); 218 if (err < 0) 219 goto err_exit; 220 } 221 222 err_exit: 223 return err; 224 } 225 226 int aq_vec_start(struct aq_vec_s *self) 227 { 228 struct aq_ring_s *ring = NULL; 229 unsigned int i = 0U; 230 int err = 0; 231 232 for (i = 0U, ring = self->ring[0]; 233 self->tx_rings > i; ++i, ring = self->ring[i]) { 234 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, 235 &ring[AQ_VEC_TX_ID]); 236 if (err < 0) 237 goto err_exit; 238 239 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, 240 &ring[AQ_VEC_RX_ID]); 241 if (err < 0) 242 goto err_exit; 243 } 244 245 napi_enable(&self->napi); 246 247 err_exit: 248 return err; 249 } 250 251 void aq_vec_stop(struct aq_vec_s *self) 252 { 253 struct aq_ring_s *ring = NULL; 254 unsigned int i = 0U; 255 256 for (i = 0U, ring = self->ring[0]; 257 self->tx_rings > i; ++i, ring = self->ring[i]) { 258 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, 259 &ring[AQ_VEC_TX_ID]); 260 261 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, 262 &ring[AQ_VEC_RX_ID]); 263 } 264 265 napi_disable(&self->napi); 266 } 267 268 void aq_vec_deinit(struct aq_vec_s *self) 269 { 270 struct aq_ring_s *ring = NULL; 271 unsigned int i = 0U; 272 273 if (!self) 274 goto err_exit; 275 276 for (i = 0U, ring = self->ring[0]; 277 self->tx_rings > i; ++i, ring = self->ring[i]) { 278 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 279 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); 280 } 281 err_exit:; 282 } 283 284 void aq_vec_free(struct aq_vec_s *self) 285 { 286 struct aq_ring_s *ring = NULL; 287 unsigned int i = 0U; 288 289 if (!self) 290 goto err_exit; 291 292 for (i = 0U, ring = self->ring[0]; 293 self->tx_rings > i; ++i, ring = self->ring[i]) { 294 aq_ring_free(&ring[AQ_VEC_TX_ID]); 295 aq_ring_free(&ring[AQ_VEC_RX_ID]); 296 } 297 298 netif_napi_del(&self->napi); 299 300 kfree(self); 301 302 err_exit:; 303 } 304 305 irqreturn_t aq_vec_isr(int irq, void *private) 306 { 307 struct aq_vec_s *self = private; 308 int err = 0; 309 310 if (!self) { 311 err = -EINVAL; 312 goto err_exit; 313 } 314 napi_schedule(&self->napi); 315 316 err_exit: 317 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 318 } 319 320 irqreturn_t aq_vec_isr_legacy(int irq, void *private) 321 { 322 struct aq_vec_s *self = private; 323 u64 irq_mask = 0U; 324 irqreturn_t err = 0; 325 326 if (!self) { 327 err = -EINVAL; 328 goto err_exit; 329 } 330 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); 331 if (err < 0) 332 goto err_exit; 333 334 if (irq_mask) { 335 self->aq_hw_ops->hw_irq_disable(self->aq_hw, 336 1U << self->aq_ring_param.vec_idx); 337 napi_schedule(&self->napi); 338 } else { 339 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); 340 err = IRQ_NONE; 341 } 342 343 err_exit: 344 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 345 } 346 347 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) 348 { 349 return &self->aq_ring_param.affinity_mask; 350 } 351 352 void aq_vec_add_stats(struct aq_vec_s *self, 353 struct aq_ring_stats_rx_s *stats_rx, 354 struct aq_ring_stats_tx_s *stats_tx) 355 { 356 struct aq_ring_s *ring = NULL; 357 unsigned int r = 0U; 358 359 for (r = 0U, ring = self->ring[0]; 360 self->tx_rings > r; ++r, ring = self->ring[r]) { 361 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; 362 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; 363 364 stats_rx->packets += rx->packets; 365 stats_rx->bytes += rx->bytes; 366 stats_rx->errors += rx->errors; 367 stats_rx->jumbo_packets += rx->jumbo_packets; 368 stats_rx->lro_packets += rx->lro_packets; 369 370 stats_tx->packets += tx->packets; 371 stats_tx->bytes += tx->bytes; 372 stats_tx->errors += tx->errors; 373 } 374 } 375 376 int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) 377 { 378 unsigned int count = 0U; 379 struct aq_ring_stats_rx_s stats_rx; 380 struct aq_ring_stats_tx_s stats_tx; 381 382 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 383 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 384 aq_vec_add_stats(self, &stats_rx, &stats_tx); 385 386 data[count] += stats_rx.packets; 387 data[++count] += stats_tx.packets; 388 data[++count] += stats_rx.jumbo_packets; 389 data[++count] += stats_rx.lro_packets; 390 data[++count] += stats_rx.errors; 391 392 if (p_count) 393 *p_count = ++count; 394 395 return 0; 396 } 397