1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * aQuantia Corporation Network Driver 4 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved 5 */ 6 7 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */ 8 9 #include "aq_ring.h" 10 #include "aq_nic.h" 11 #include "aq_hw.h" 12 #include "aq_hw_utils.h" 13 #include "aq_ptp.h" 14 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 18 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) 19 { 20 unsigned int len = PAGE_SIZE << rxpage->order; 21 22 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE); 23 24 /* Drop the ref for being in the ring. */ 25 __free_pages(rxpage->page, rxpage->order); 26 rxpage->page = NULL; 27 } 28 29 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, 30 struct device *dev) 31 { 32 struct page *page; 33 int ret = -ENOMEM; 34 dma_addr_t daddr; 35 36 page = dev_alloc_pages(order); 37 if (unlikely(!page)) 38 goto err_exit; 39 40 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order, 41 DMA_FROM_DEVICE); 42 43 if (unlikely(dma_mapping_error(dev, daddr))) 44 goto free_page; 45 46 rxpage->page = page; 47 rxpage->daddr = daddr; 48 rxpage->order = order; 49 rxpage->pg_off = 0; 50 51 return 0; 52 53 free_page: 54 __free_pages(page, order); 55 56 err_exit: 57 return ret; 58 } 59 60 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, 61 int order) 62 { 63 int ret; 64 65 if (rxbuf->rxdata.page) { 66 /* One means ring is the only user and can reuse */ 67 if (page_ref_count(rxbuf->rxdata.page) > 1) { 68 /* Try reuse buffer */ 69 rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; 70 if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= 71 (PAGE_SIZE << order)) { 72 self->stats.rx.pg_flips++; 73 } else { 74 /* Buffer exhausted. We have other users and 75 * should release this page and realloc 76 */ 77 aq_free_rxpage(&rxbuf->rxdata, 78 aq_nic_get_dev(self->aq_nic)); 79 self->stats.rx.pg_losts++; 80 } 81 } else { 82 rxbuf->rxdata.pg_off = 0; 83 self->stats.rx.pg_reuses++; 84 } 85 } 86 87 if (!rxbuf->rxdata.page) { 88 ret = aq_get_rxpage(&rxbuf->rxdata, order, 89 aq_nic_get_dev(self->aq_nic)); 90 return ret; 91 } 92 93 return 0; 94 } 95 96 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, 97 struct aq_nic_s *aq_nic) 98 { 99 int err = 0; 100 101 self->buff_ring = 102 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); 103 104 if (!self->buff_ring) { 105 err = -ENOMEM; 106 goto err_exit; 107 } 108 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), 109 self->size * self->dx_size, 110 &self->dx_ring_pa, GFP_KERNEL); 111 if (!self->dx_ring) { 112 err = -ENOMEM; 113 goto err_exit; 114 } 115 116 err_exit: 117 if (err < 0) { 118 aq_ring_free(self); 119 self = NULL; 120 } 121 122 return self; 123 } 124 125 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, 126 struct aq_nic_s *aq_nic, 127 unsigned int idx, 128 struct aq_nic_cfg_s *aq_nic_cfg) 129 { 130 int err = 0; 131 132 self->aq_nic = aq_nic; 133 self->idx = idx; 134 self->size = aq_nic_cfg->txds; 135 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; 136 137 self = aq_ring_alloc(self, aq_nic); 138 if (!self) { 139 err = -ENOMEM; 140 goto err_exit; 141 } 142 143 err_exit: 144 if (err < 0) { 145 aq_ring_free(self); 146 self = NULL; 147 } 148 149 return self; 150 } 151 152 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, 153 struct aq_nic_s *aq_nic, 154 unsigned int idx, 155 struct aq_nic_cfg_s *aq_nic_cfg) 156 { 157 int err = 0; 158 159 self->aq_nic = aq_nic; 160 self->idx = idx; 161 self->size = aq_nic_cfg->rxds; 162 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; 163 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + 164 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; 165 166 if (aq_nic_cfg->rxpageorder > self->page_order) 167 self->page_order = aq_nic_cfg->rxpageorder; 168 169 self = aq_ring_alloc(self, aq_nic); 170 if (!self) { 171 err = -ENOMEM; 172 goto err_exit; 173 } 174 175 err_exit: 176 if (err < 0) { 177 aq_ring_free(self); 178 self = NULL; 179 } 180 181 return self; 182 } 183 184 struct aq_ring_s * 185 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, 186 unsigned int idx, unsigned int size, unsigned int dx_size) 187 { 188 struct device *dev = aq_nic_get_dev(aq_nic); 189 size_t sz = size * dx_size + AQ_CFG_RXDS_DEF; 190 191 memset(self, 0, sizeof(*self)); 192 193 self->aq_nic = aq_nic; 194 self->idx = idx; 195 self->size = size; 196 self->dx_size = dx_size; 197 198 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa, 199 GFP_KERNEL); 200 if (!self->dx_ring) { 201 aq_ring_free(self); 202 return NULL; 203 } 204 205 return self; 206 } 207 208 int aq_ring_init(struct aq_ring_s *self) 209 { 210 self->hw_head = 0; 211 self->sw_head = 0; 212 self->sw_tail = 0; 213 214 return 0; 215 } 216 217 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i, 218 unsigned int t) 219 { 220 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); 221 } 222 223 void aq_ring_update_queue_state(struct aq_ring_s *ring) 224 { 225 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX) 226 aq_ring_queue_stop(ring); 227 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES) 228 aq_ring_queue_wake(ring); 229 } 230 231 void aq_ring_queue_wake(struct aq_ring_s *ring) 232 { 233 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); 234 235 if (__netif_subqueue_stopped(ndev, ring->idx)) { 236 netif_wake_subqueue(ndev, ring->idx); 237 ring->stats.tx.queue_restarts++; 238 } 239 } 240 241 void aq_ring_queue_stop(struct aq_ring_s *ring) 242 { 243 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); 244 245 if (!__netif_subqueue_stopped(ndev, ring->idx)) 246 netif_stop_subqueue(ndev, ring->idx); 247 } 248 249 bool aq_ring_tx_clean(struct aq_ring_s *self) 250 { 251 struct device *dev = aq_nic_get_dev(self->aq_nic); 252 unsigned int budget; 253 254 for (budget = AQ_CFG_TX_CLEAN_BUDGET; 255 budget && self->sw_head != self->hw_head; budget--) { 256 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 257 258 if (likely(buff->is_mapped)) { 259 if (unlikely(buff->is_sop)) { 260 if (!buff->is_eop && 261 buff->eop_index != 0xffffU && 262 (!aq_ring_dx_in_range(self->sw_head, 263 buff->eop_index, 264 self->hw_head))) 265 break; 266 267 dma_unmap_single(dev, buff->pa, buff->len, 268 DMA_TO_DEVICE); 269 } else { 270 dma_unmap_page(dev, buff->pa, buff->len, 271 DMA_TO_DEVICE); 272 } 273 } 274 275 if (unlikely(buff->is_eop)) { 276 ++self->stats.rx.packets; 277 self->stats.tx.bytes += buff->skb->len; 278 279 dev_kfree_skb_any(buff->skb); 280 } 281 buff->pa = 0U; 282 buff->eop_index = 0xffffU; 283 self->sw_head = aq_ring_next_dx(self, self->sw_head); 284 } 285 286 return !!budget; 287 } 288 289 static void aq_rx_checksum(struct aq_ring_s *self, 290 struct aq_ring_buff_s *buff, 291 struct sk_buff *skb) 292 { 293 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) 294 return; 295 296 if (unlikely(buff->is_cso_err)) { 297 ++self->stats.rx.errors; 298 skb->ip_summed = CHECKSUM_NONE; 299 return; 300 } 301 if (buff->is_ip_cso) { 302 __skb_incr_checksum_unnecessary(skb); 303 } else { 304 skb->ip_summed = CHECKSUM_NONE; 305 } 306 307 if (buff->is_udp_cso || buff->is_tcp_cso) 308 __skb_incr_checksum_unnecessary(skb); 309 } 310 311 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 312 int aq_ring_rx_clean(struct aq_ring_s *self, 313 struct napi_struct *napi, 314 int *work_done, 315 int budget) 316 { 317 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); 318 bool is_rsc_completed = true; 319 int err = 0; 320 321 for (; (self->sw_head != self->hw_head) && budget; 322 self->sw_head = aq_ring_next_dx(self, self->sw_head), 323 --budget, ++(*work_done)) { 324 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 325 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self); 326 struct aq_ring_buff_s *buff_ = NULL; 327 struct sk_buff *skb = NULL; 328 unsigned int next_ = 0U; 329 unsigned int i = 0U; 330 u16 hdr_len; 331 332 if (buff->is_cleaned) 333 continue; 334 335 if (!buff->is_eop) { 336 buff_ = buff; 337 do { 338 next_ = buff_->next, 339 buff_ = &self->buff_ring[next_]; 340 is_rsc_completed = 341 aq_ring_dx_in_range(self->sw_head, 342 next_, 343 self->hw_head); 344 345 if (unlikely(!is_rsc_completed)) 346 break; 347 348 buff->is_error |= buff_->is_error; 349 buff->is_cso_err |= buff_->is_cso_err; 350 351 } while (!buff_->is_eop); 352 353 if (!is_rsc_completed) { 354 err = 0; 355 goto err_exit; 356 } 357 if (buff->is_error || 358 (buff->is_lro && buff->is_cso_err)) { 359 buff_ = buff; 360 do { 361 next_ = buff_->next, 362 buff_ = &self->buff_ring[next_]; 363 364 buff_->is_cleaned = true; 365 } while (!buff_->is_eop); 366 367 ++self->stats.rx.errors; 368 continue; 369 } 370 } 371 372 if (buff->is_error) { 373 ++self->stats.rx.errors; 374 continue; 375 } 376 377 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), 378 buff->rxdata.daddr, 379 buff->rxdata.pg_off, 380 buff->len, DMA_FROM_DEVICE); 381 382 /* for single fragment packets use build_skb() */ 383 if (buff->is_eop && 384 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { 385 skb = build_skb(aq_buf_vaddr(&buff->rxdata), 386 AQ_CFG_RX_FRAME_MAX); 387 if (unlikely(!skb)) { 388 err = -ENOMEM; 389 goto err_exit; 390 } 391 if (is_ptp_ring) 392 buff->len -= 393 aq_ptp_extract_ts(self->aq_nic, skb, 394 aq_buf_vaddr(&buff->rxdata), 395 buff->len); 396 skb_put(skb, buff->len); 397 page_ref_inc(buff->rxdata.page); 398 } else { 399 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); 400 if (unlikely(!skb)) { 401 err = -ENOMEM; 402 goto err_exit; 403 } 404 if (is_ptp_ring) 405 buff->len -= 406 aq_ptp_extract_ts(self->aq_nic, skb, 407 aq_buf_vaddr(&buff->rxdata), 408 buff->len); 409 410 hdr_len = buff->len; 411 if (hdr_len > AQ_CFG_RX_HDR_SIZE) 412 hdr_len = eth_get_headlen(skb->dev, 413 aq_buf_vaddr(&buff->rxdata), 414 AQ_CFG_RX_HDR_SIZE); 415 416 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata), 417 ALIGN(hdr_len, sizeof(long))); 418 419 if (buff->len - hdr_len > 0) { 420 skb_add_rx_frag(skb, 0, buff->rxdata.page, 421 buff->rxdata.pg_off + hdr_len, 422 buff->len - hdr_len, 423 AQ_CFG_RX_FRAME_MAX); 424 page_ref_inc(buff->rxdata.page); 425 } 426 427 if (!buff->is_eop) { 428 buff_ = buff; 429 i = 1U; 430 do { 431 next_ = buff_->next, 432 buff_ = &self->buff_ring[next_]; 433 434 dma_sync_single_range_for_cpu( 435 aq_nic_get_dev(self->aq_nic), 436 buff_->rxdata.daddr, 437 buff_->rxdata.pg_off, 438 buff_->len, 439 DMA_FROM_DEVICE); 440 skb_add_rx_frag(skb, i++, 441 buff_->rxdata.page, 442 buff_->rxdata.pg_off, 443 buff_->len, 444 AQ_CFG_RX_FRAME_MAX); 445 page_ref_inc(buff_->rxdata.page); 446 buff_->is_cleaned = 1; 447 448 buff->is_ip_cso &= buff_->is_ip_cso; 449 buff->is_udp_cso &= buff_->is_udp_cso; 450 buff->is_tcp_cso &= buff_->is_tcp_cso; 451 buff->is_cso_err |= buff_->is_cso_err; 452 453 } while (!buff_->is_eop); 454 } 455 } 456 457 if (buff->is_vlan) 458 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 459 buff->vlan_rx_tag); 460 461 skb->protocol = eth_type_trans(skb, ndev); 462 463 aq_rx_checksum(self, buff, skb); 464 465 skb_set_hash(skb, buff->rss_hash, 466 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : 467 PKT_HASH_TYPE_NONE); 468 /* Send all PTP traffic to 0 queue */ 469 skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx); 470 471 ++self->stats.rx.packets; 472 self->stats.rx.bytes += skb->len; 473 474 napi_gro_receive(napi, skb); 475 } 476 477 err_exit: 478 return err; 479 } 480 481 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) 482 { 483 while (self->sw_head != self->hw_head) { 484 u64 ns; 485 486 aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw, 487 self->dx_ring + 488 (self->sw_head * self->dx_size), 489 self->dx_size, &ns); 490 aq_ptp_tx_hwtstamp(aq_nic, ns); 491 492 self->sw_head = aq_ring_next_dx(self, self->sw_head); 493 } 494 } 495 496 int aq_ring_rx_fill(struct aq_ring_s *self) 497 { 498 unsigned int page_order = self->page_order; 499 struct aq_ring_buff_s *buff = NULL; 500 int err = 0; 501 int i = 0; 502 503 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES, 504 self->size / 2)) 505 return err; 506 507 for (i = aq_ring_avail_dx(self); i--; 508 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { 509 buff = &self->buff_ring[self->sw_tail]; 510 511 buff->flags = 0U; 512 buff->len = AQ_CFG_RX_FRAME_MAX; 513 514 err = aq_get_rxpages(self, buff, page_order); 515 if (err) 516 goto err_exit; 517 518 buff->pa = aq_buf_daddr(&buff->rxdata); 519 buff = NULL; 520 } 521 522 err_exit: 523 return err; 524 } 525 526 void aq_ring_rx_deinit(struct aq_ring_s *self) 527 { 528 if (!self) 529 goto err_exit; 530 531 for (; self->sw_head != self->sw_tail; 532 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { 533 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 534 535 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); 536 } 537 538 err_exit:; 539 } 540 541 void aq_ring_free(struct aq_ring_s *self) 542 { 543 if (!self) 544 goto err_exit; 545 546 kfree(self->buff_ring); 547 548 if (self->dx_ring) 549 dma_free_coherent(aq_nic_get_dev(self->aq_nic), 550 self->size * self->dx_size, self->dx_ring, 551 self->dx_ring_pa); 552 553 err_exit:; 554 } 555