1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #include "gve.h" 8 #include "gve_dqo.h" 9 #include "gve_adminq.h" 10 #include "gve_utils.h" 11 #include <linux/ip.h> 12 #include <linux/ipv6.h> 13 #include <linux/skbuff.h> 14 #include <linux/slab.h> 15 #include <net/ip6_checksum.h> 16 #include <net/ipv6.h> 17 #include <net/tcp.h> 18 19 static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs) 20 { 21 return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias; 22 } 23 24 static void gve_free_page_dqo(struct gve_priv *priv, 25 struct gve_rx_buf_state_dqo *bs, 26 bool free_page) 27 { 28 page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1); 29 if (free_page) 30 gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr, 31 DMA_FROM_DEVICE); 32 bs->page_info.page = NULL; 33 } 34 35 static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx) 36 { 37 struct gve_rx_buf_state_dqo *buf_state; 38 s16 buffer_id; 39 40 buffer_id = rx->dqo.free_buf_states; 41 if (unlikely(buffer_id == -1)) 42 return NULL; 43 44 buf_state = &rx->dqo.buf_states[buffer_id]; 45 46 /* Remove buf_state from free list */ 47 rx->dqo.free_buf_states = buf_state->next; 48 49 /* Point buf_state to itself to mark it as allocated */ 50 buf_state->next = buffer_id; 51 52 return buf_state; 53 } 54 55 static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, 56 struct gve_rx_buf_state_dqo *buf_state) 57 { 58 s16 buffer_id = buf_state - rx->dqo.buf_states; 59 60 return buf_state->next == buffer_id; 61 } 62 63 static void gve_free_buf_state(struct gve_rx_ring *rx, 64 struct gve_rx_buf_state_dqo *buf_state) 65 { 66 s16 buffer_id = buf_state - rx->dqo.buf_states; 67 68 buf_state->next = rx->dqo.free_buf_states; 69 rx->dqo.free_buf_states = buffer_id; 70 } 71 72 static struct gve_rx_buf_state_dqo * 73 gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list) 74 { 75 struct gve_rx_buf_state_dqo *buf_state; 76 s16 buffer_id; 77 78 buffer_id = list->head; 79 if (unlikely(buffer_id == -1)) 80 return NULL; 81 82 buf_state = &rx->dqo.buf_states[buffer_id]; 83 84 /* Remove buf_state from list */ 85 list->head = buf_state->next; 86 if (buf_state->next == -1) 87 list->tail = -1; 88 89 /* Point buf_state to itself to mark it as allocated */ 90 buf_state->next = buffer_id; 91 92 return buf_state; 93 } 94 95 static void gve_enqueue_buf_state(struct gve_rx_ring *rx, 96 struct gve_index_list *list, 97 struct gve_rx_buf_state_dqo *buf_state) 98 { 99 s16 buffer_id = buf_state - rx->dqo.buf_states; 100 101 buf_state->next = -1; 102 103 if (list->head == -1) { 104 list->head = buffer_id; 105 list->tail = buffer_id; 106 } else { 107 int tail = list->tail; 108 109 rx->dqo.buf_states[tail].next = buffer_id; 110 list->tail = buffer_id; 111 } 112 } 113 114 static struct gve_rx_buf_state_dqo * 115 gve_get_recycled_buf_state(struct gve_rx_ring *rx) 116 { 117 struct gve_rx_buf_state_dqo *buf_state; 118 int i; 119 120 /* Recycled buf states are immediately usable. */ 121 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); 122 if (likely(buf_state)) 123 return buf_state; 124 125 if (unlikely(rx->dqo.used_buf_states.head == -1)) 126 return NULL; 127 128 /* Used buf states are only usable when ref count reaches 0, which means 129 * no SKBs refer to them. 130 * 131 * Search a limited number before giving up. 132 */ 133 for (i = 0; i < 5; i++) { 134 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); 135 if (gve_buf_ref_cnt(buf_state) == 0) { 136 rx->dqo.used_buf_states_cnt--; 137 return buf_state; 138 } 139 140 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); 141 } 142 143 /* For QPL, we cannot allocate any new buffers and must 144 * wait for the existing ones to be available. 145 */ 146 if (rx->dqo.qpl) 147 return NULL; 148 149 /* If there are no free buf states discard an entry from 150 * `used_buf_states` so it can be used. 151 */ 152 if (unlikely(rx->dqo.free_buf_states == -1)) { 153 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); 154 if (gve_buf_ref_cnt(buf_state) == 0) 155 return buf_state; 156 157 gve_free_page_dqo(rx->gve, buf_state, true); 158 gve_free_buf_state(rx, buf_state); 159 } 160 161 return NULL; 162 } 163 164 static int gve_alloc_page_dqo(struct gve_rx_ring *rx, 165 struct gve_rx_buf_state_dqo *buf_state) 166 { 167 struct gve_priv *priv = rx->gve; 168 u32 idx; 169 170 if (!rx->dqo.qpl) { 171 int err; 172 173 err = gve_alloc_page(priv, &priv->pdev->dev, 174 &buf_state->page_info.page, 175 &buf_state->addr, 176 DMA_FROM_DEVICE, GFP_ATOMIC); 177 if (err) 178 return err; 179 } else { 180 idx = rx->dqo.next_qpl_page_idx; 181 if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) { 182 net_err_ratelimited("%s: Out of QPL pages\n", 183 priv->dev->name); 184 return -ENOMEM; 185 } 186 buf_state->page_info.page = rx->dqo.qpl->pages[idx]; 187 buf_state->addr = rx->dqo.qpl->page_buses[idx]; 188 rx->dqo.next_qpl_page_idx++; 189 } 190 buf_state->page_info.page_offset = 0; 191 buf_state->page_info.page_address = 192 page_address(buf_state->page_info.page); 193 buf_state->last_single_ref_offset = 0; 194 195 /* The page already has 1 ref. */ 196 page_ref_add(buf_state->page_info.page, INT_MAX - 1); 197 buf_state->page_info.pagecnt_bias = INT_MAX; 198 199 return 0; 200 } 201 202 static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) 203 { 204 struct device *hdev = &priv->pdev->dev; 205 int buf_count = rx->dqo.bufq.mask + 1; 206 207 if (rx->dqo.hdr_bufs.data) { 208 dma_free_coherent(hdev, priv->header_buf_size * buf_count, 209 rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr); 210 rx->dqo.hdr_bufs.data = NULL; 211 } 212 } 213 214 void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx) 215 { 216 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); 217 218 if (!gve_rx_was_added_to_block(priv, idx)) 219 return; 220 221 gve_remove_napi(priv, ntfy_idx); 222 gve_rx_remove_from_block(priv, idx); 223 } 224 225 static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, 226 struct gve_rx_alloc_rings_cfg *cfg) 227 { 228 struct device *hdev = &priv->pdev->dev; 229 size_t completion_queue_slots; 230 size_t buffer_queue_slots; 231 int idx = rx->q_num; 232 size_t size; 233 int i; 234 235 completion_queue_slots = rx->dqo.complq.mask + 1; 236 buffer_queue_slots = rx->dqo.bufq.mask + 1; 237 238 if (rx->q_resources) { 239 dma_free_coherent(hdev, sizeof(*rx->q_resources), 240 rx->q_resources, rx->q_resources_bus); 241 rx->q_resources = NULL; 242 } 243 244 for (i = 0; i < rx->dqo.num_buf_states; i++) { 245 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; 246 /* Only free page for RDA. QPL pages are freed in gve_main. */ 247 if (bs->page_info.page) 248 gve_free_page_dqo(priv, bs, !rx->dqo.qpl); 249 } 250 251 rx->dqo.qpl = NULL; 252 253 if (rx->dqo.bufq.desc_ring) { 254 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; 255 dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring, 256 rx->dqo.bufq.bus); 257 rx->dqo.bufq.desc_ring = NULL; 258 } 259 260 if (rx->dqo.complq.desc_ring) { 261 size = sizeof(rx->dqo.complq.desc_ring[0]) * 262 completion_queue_slots; 263 dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring, 264 rx->dqo.complq.bus); 265 rx->dqo.complq.desc_ring = NULL; 266 } 267 268 kvfree(rx->dqo.buf_states); 269 rx->dqo.buf_states = NULL; 270 271 gve_rx_free_hdr_bufs(priv, rx); 272 273 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); 274 } 275 276 static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) 277 { 278 struct device *hdev = &priv->pdev->dev; 279 int buf_count = rx->dqo.bufq.mask + 1; 280 281 rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count, 282 &rx->dqo.hdr_bufs.addr, GFP_KERNEL); 283 if (!rx->dqo.hdr_bufs.data) 284 return -ENOMEM; 285 286 return 0; 287 } 288 289 void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx) 290 { 291 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); 292 293 gve_rx_add_to_block(priv, idx); 294 gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo); 295 } 296 297 static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, 298 struct gve_rx_alloc_rings_cfg *cfg, 299 struct gve_rx_ring *rx, 300 int idx) 301 { 302 struct device *hdev = &priv->pdev->dev; 303 size_t size; 304 int i; 305 306 const u32 buffer_queue_slots = cfg->ring_size; 307 const u32 completion_queue_slots = cfg->ring_size; 308 309 netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); 310 311 memset(rx, 0, sizeof(*rx)); 312 rx->gve = priv; 313 rx->q_num = idx; 314 rx->dqo.bufq.mask = buffer_queue_slots - 1; 315 rx->dqo.complq.num_free_slots = completion_queue_slots; 316 rx->dqo.complq.mask = completion_queue_slots - 1; 317 rx->ctx.skb_head = NULL; 318 rx->ctx.skb_tail = NULL; 319 320 rx->dqo.num_buf_states = cfg->raw_addressing ? 321 min_t(s16, S16_MAX, buffer_queue_slots * 4) : 322 gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); 323 rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, 324 sizeof(rx->dqo.buf_states[0]), 325 GFP_KERNEL); 326 if (!rx->dqo.buf_states) 327 return -ENOMEM; 328 329 /* Allocate header buffers for header-split */ 330 if (cfg->enable_header_split) 331 if (gve_rx_alloc_hdr_bufs(priv, rx)) 332 goto err; 333 334 /* Set up linked list of buffer IDs */ 335 for (i = 0; i < rx->dqo.num_buf_states - 1; i++) 336 rx->dqo.buf_states[i].next = i + 1; 337 338 rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; 339 rx->dqo.recycled_buf_states.head = -1; 340 rx->dqo.recycled_buf_states.tail = -1; 341 rx->dqo.used_buf_states.head = -1; 342 rx->dqo.used_buf_states.tail = -1; 343 344 /* Allocate RX completion queue */ 345 size = sizeof(rx->dqo.complq.desc_ring[0]) * 346 completion_queue_slots; 347 rx->dqo.complq.desc_ring = 348 dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL); 349 if (!rx->dqo.complq.desc_ring) 350 goto err; 351 352 /* Allocate RX buffer queue */ 353 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; 354 rx->dqo.bufq.desc_ring = 355 dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL); 356 if (!rx->dqo.bufq.desc_ring) 357 goto err; 358 359 if (!cfg->raw_addressing) { 360 u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); 361 362 rx->dqo.qpl = &cfg->qpls[qpl_id]; 363 rx->dqo.next_qpl_page_idx = 0; 364 } 365 366 rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), 367 &rx->q_resources_bus, GFP_KERNEL); 368 if (!rx->q_resources) 369 goto err; 370 371 return 0; 372 373 err: 374 gve_rx_free_ring_dqo(priv, rx, cfg); 375 return -ENOMEM; 376 } 377 378 void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx) 379 { 380 const struct gve_rx_ring *rx = &priv->rx[queue_idx]; 381 u64 index = be32_to_cpu(rx->q_resources->db_index); 382 383 iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); 384 } 385 386 int gve_rx_alloc_rings_dqo(struct gve_priv *priv, 387 struct gve_rx_alloc_rings_cfg *cfg) 388 { 389 struct gve_rx_ring *rx; 390 int err; 391 int i; 392 393 if (!cfg->raw_addressing && !cfg->qpls) { 394 netif_err(priv, drv, priv->dev, 395 "Cannot alloc QPL ring before allocing QPLs\n"); 396 return -EINVAL; 397 } 398 399 rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), 400 GFP_KERNEL); 401 if (!rx) 402 return -ENOMEM; 403 404 for (i = 0; i < cfg->qcfg->num_queues; i++) { 405 err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i); 406 if (err) { 407 netif_err(priv, drv, priv->dev, 408 "Failed to alloc rx ring=%d: err=%d\n", 409 i, err); 410 goto err; 411 } 412 } 413 414 cfg->rx = rx; 415 return 0; 416 417 err: 418 for (i--; i >= 0; i--) 419 gve_rx_free_ring_dqo(priv, &rx[i], cfg); 420 kvfree(rx); 421 return err; 422 } 423 424 void gve_rx_free_rings_dqo(struct gve_priv *priv, 425 struct gve_rx_alloc_rings_cfg *cfg) 426 { 427 struct gve_rx_ring *rx = cfg->rx; 428 int i; 429 430 if (!rx) 431 return; 432 433 for (i = 0; i < cfg->qcfg->num_queues; i++) 434 gve_rx_free_ring_dqo(priv, &rx[i], cfg); 435 436 kvfree(rx); 437 cfg->rx = NULL; 438 } 439 440 void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) 441 { 442 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; 443 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; 444 struct gve_priv *priv = rx->gve; 445 u32 num_avail_slots; 446 u32 num_full_slots; 447 u32 num_posted = 0; 448 449 num_full_slots = (bufq->tail - bufq->head) & bufq->mask; 450 num_avail_slots = bufq->mask - num_full_slots; 451 452 num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots); 453 while (num_posted < num_avail_slots) { 454 struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail]; 455 struct gve_rx_buf_state_dqo *buf_state; 456 457 buf_state = gve_get_recycled_buf_state(rx); 458 if (unlikely(!buf_state)) { 459 buf_state = gve_alloc_buf_state(rx); 460 if (unlikely(!buf_state)) 461 break; 462 463 if (unlikely(gve_alloc_page_dqo(rx, buf_state))) { 464 u64_stats_update_begin(&rx->statss); 465 rx->rx_buf_alloc_fail++; 466 u64_stats_update_end(&rx->statss); 467 gve_free_buf_state(rx, buf_state); 468 break; 469 } 470 } 471 472 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); 473 desc->buf_addr = cpu_to_le64(buf_state->addr + 474 buf_state->page_info.page_offset); 475 if (rx->dqo.hdr_bufs.data) 476 desc->header_buf_addr = 477 cpu_to_le64(rx->dqo.hdr_bufs.addr + 478 priv->header_buf_size * bufq->tail); 479 480 bufq->tail = (bufq->tail + 1) & bufq->mask; 481 complq->num_free_slots--; 482 num_posted++; 483 484 if ((bufq->tail & (GVE_RX_BUF_THRESH_DQO - 1)) == 0) 485 gve_rx_write_doorbell_dqo(priv, rx->q_num); 486 } 487 488 rx->fill_cnt += num_posted; 489 } 490 491 static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, 492 struct gve_rx_buf_state_dqo *buf_state) 493 { 494 const u16 data_buffer_size = priv->data_buffer_size_dqo; 495 int pagecount; 496 497 /* Can't reuse if we only fit one buffer per page */ 498 if (data_buffer_size * 2 > PAGE_SIZE) 499 goto mark_used; 500 501 pagecount = gve_buf_ref_cnt(buf_state); 502 503 /* Record the offset when we have a single remaining reference. 504 * 505 * When this happens, we know all of the other offsets of the page are 506 * usable. 507 */ 508 if (pagecount == 1) { 509 buf_state->last_single_ref_offset = 510 buf_state->page_info.page_offset; 511 } 512 513 /* Use the next buffer sized chunk in the page. */ 514 buf_state->page_info.page_offset += data_buffer_size; 515 buf_state->page_info.page_offset &= (PAGE_SIZE - 1); 516 517 /* If we wrap around to the same offset without ever dropping to 1 518 * reference, then we don't know if this offset was ever freed. 519 */ 520 if (buf_state->page_info.page_offset == 521 buf_state->last_single_ref_offset) { 522 goto mark_used; 523 } 524 525 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); 526 return; 527 528 mark_used: 529 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); 530 rx->dqo.used_buf_states_cnt++; 531 } 532 533 static void gve_rx_skb_csum(struct sk_buff *skb, 534 const struct gve_rx_compl_desc_dqo *desc, 535 struct gve_ptype ptype) 536 { 537 skb->ip_summed = CHECKSUM_NONE; 538 539 /* HW did not identify and process L3 and L4 headers. */ 540 if (unlikely(!desc->l3_l4_processed)) 541 return; 542 543 if (ptype.l3_type == GVE_L3_TYPE_IPV4) { 544 if (unlikely(desc->csum_ip_err || desc->csum_external_ip_err)) 545 return; 546 } else if (ptype.l3_type == GVE_L3_TYPE_IPV6) { 547 /* Checksum should be skipped if this flag is set. */ 548 if (unlikely(desc->ipv6_ex_add)) 549 return; 550 } 551 552 if (unlikely(desc->csum_l4_err)) 553 return; 554 555 switch (ptype.l4_type) { 556 case GVE_L4_TYPE_TCP: 557 case GVE_L4_TYPE_UDP: 558 case GVE_L4_TYPE_ICMP: 559 case GVE_L4_TYPE_SCTP: 560 skb->ip_summed = CHECKSUM_UNNECESSARY; 561 break; 562 default: 563 break; 564 } 565 } 566 567 static void gve_rx_skb_hash(struct sk_buff *skb, 568 const struct gve_rx_compl_desc_dqo *compl_desc, 569 struct gve_ptype ptype) 570 { 571 enum pkt_hash_types hash_type = PKT_HASH_TYPE_L2; 572 573 if (ptype.l4_type != GVE_L4_TYPE_UNKNOWN) 574 hash_type = PKT_HASH_TYPE_L4; 575 else if (ptype.l3_type != GVE_L3_TYPE_UNKNOWN) 576 hash_type = PKT_HASH_TYPE_L3; 577 578 skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type); 579 } 580 581 static void gve_rx_free_skb(struct gve_rx_ring *rx) 582 { 583 if (!rx->ctx.skb_head) 584 return; 585 586 dev_kfree_skb_any(rx->ctx.skb_head); 587 rx->ctx.skb_head = NULL; 588 rx->ctx.skb_tail = NULL; 589 } 590 591 static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx) 592 { 593 if (!rx->dqo.qpl) 594 return false; 595 if (rx->dqo.used_buf_states_cnt < 596 (rx->dqo.num_buf_states - 597 GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD)) 598 return false; 599 return true; 600 } 601 602 static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, 603 struct gve_rx_buf_state_dqo *buf_state, 604 u16 buf_len) 605 { 606 struct page *page = alloc_page(GFP_ATOMIC); 607 int num_frags; 608 609 if (!page) 610 return -ENOMEM; 611 612 memcpy(page_address(page), 613 buf_state->page_info.page_address + 614 buf_state->page_info.page_offset, 615 buf_len); 616 num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; 617 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page, 618 0, buf_len, PAGE_SIZE); 619 620 u64_stats_update_begin(&rx->statss); 621 rx->rx_frag_alloc_cnt++; 622 u64_stats_update_end(&rx->statss); 623 /* Return unused buffer. */ 624 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); 625 return 0; 626 } 627 628 /* Chains multi skbs for single rx packet. 629 * Returns 0 if buffer is appended, -1 otherwise. 630 */ 631 static int gve_rx_append_frags(struct napi_struct *napi, 632 struct gve_rx_buf_state_dqo *buf_state, 633 u16 buf_len, struct gve_rx_ring *rx, 634 struct gve_priv *priv) 635 { 636 int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; 637 638 if (unlikely(num_frags == MAX_SKB_FRAGS)) { 639 struct sk_buff *skb; 640 641 skb = napi_alloc_skb(napi, 0); 642 if (!skb) 643 return -1; 644 645 if (rx->ctx.skb_tail == rx->ctx.skb_head) 646 skb_shinfo(rx->ctx.skb_head)->frag_list = skb; 647 else 648 rx->ctx.skb_tail->next = skb; 649 rx->ctx.skb_tail = skb; 650 num_frags = 0; 651 } 652 if (rx->ctx.skb_tail != rx->ctx.skb_head) { 653 rx->ctx.skb_head->len += buf_len; 654 rx->ctx.skb_head->data_len += buf_len; 655 rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; 656 } 657 658 /* Trigger ondemand page allocation if we are running low on buffers */ 659 if (gve_rx_should_trigger_copy_ondemand(rx)) 660 return gve_rx_copy_ondemand(rx, buf_state, buf_len); 661 662 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, 663 buf_state->page_info.page, 664 buf_state->page_info.page_offset, 665 buf_len, priv->data_buffer_size_dqo); 666 gve_dec_pagecnt_bias(&buf_state->page_info); 667 668 /* Advances buffer page-offset if page is partially used. 669 * Marks buffer as used if page is full. 670 */ 671 gve_try_recycle_buf(priv, rx, buf_state); 672 return 0; 673 } 674 675 /* Returns 0 if descriptor is completed successfully. 676 * Returns -EINVAL if descriptor is invalid. 677 * Returns -ENOMEM if data cannot be copied to skb. 678 */ 679 static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, 680 const struct gve_rx_compl_desc_dqo *compl_desc, 681 u32 desc_idx, int queue_idx) 682 { 683 const u16 buffer_id = le16_to_cpu(compl_desc->buf_id); 684 const bool hbo = compl_desc->header_buffer_overflow; 685 const bool eop = compl_desc->end_of_packet != 0; 686 const bool hsplit = compl_desc->split_header; 687 struct gve_rx_buf_state_dqo *buf_state; 688 struct gve_priv *priv = rx->gve; 689 u16 buf_len; 690 u16 hdr_len; 691 692 if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { 693 net_err_ratelimited("%s: Invalid RX buffer_id=%u\n", 694 priv->dev->name, buffer_id); 695 return -EINVAL; 696 } 697 buf_state = &rx->dqo.buf_states[buffer_id]; 698 if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) { 699 net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n", 700 priv->dev->name, buffer_id); 701 return -EINVAL; 702 } 703 704 if (unlikely(compl_desc->rx_error)) { 705 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, 706 buf_state); 707 return -EINVAL; 708 } 709 710 buf_len = compl_desc->packet_len; 711 hdr_len = compl_desc->header_len; 712 713 /* Page might have not been used for awhile and was likely last written 714 * by a different thread. 715 */ 716 prefetch(buf_state->page_info.page); 717 718 /* Copy the header into the skb in the case of header split */ 719 if (hsplit) { 720 int unsplit = 0; 721 722 if (hdr_len && !hbo) { 723 rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi, 724 rx->dqo.hdr_bufs.data + 725 desc_idx * priv->header_buf_size, 726 hdr_len); 727 if (unlikely(!rx->ctx.skb_head)) 728 goto error; 729 rx->ctx.skb_tail = rx->ctx.skb_head; 730 } else { 731 unsplit = 1; 732 } 733 u64_stats_update_begin(&rx->statss); 734 rx->rx_hsplit_pkt++; 735 rx->rx_hsplit_unsplit_pkt += unsplit; 736 rx->rx_hsplit_bytes += hdr_len; 737 u64_stats_update_end(&rx->statss); 738 } 739 740 /* Sync the portion of dma buffer for CPU to read. */ 741 dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, 742 buf_state->page_info.page_offset, 743 buf_len, DMA_FROM_DEVICE); 744 745 /* Append to current skb if one exists. */ 746 if (rx->ctx.skb_head) { 747 if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, 748 priv)) != 0) { 749 goto error; 750 } 751 return 0; 752 } 753 754 if (eop && buf_len <= priv->rx_copybreak) { 755 rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, 756 &buf_state->page_info, buf_len); 757 if (unlikely(!rx->ctx.skb_head)) 758 goto error; 759 rx->ctx.skb_tail = rx->ctx.skb_head; 760 761 u64_stats_update_begin(&rx->statss); 762 rx->rx_copied_pkt++; 763 rx->rx_copybreak_pkt++; 764 u64_stats_update_end(&rx->statss); 765 766 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, 767 buf_state); 768 return 0; 769 } 770 771 rx->ctx.skb_head = napi_get_frags(napi); 772 if (unlikely(!rx->ctx.skb_head)) 773 goto error; 774 rx->ctx.skb_tail = rx->ctx.skb_head; 775 776 if (gve_rx_should_trigger_copy_ondemand(rx)) { 777 if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0) 778 goto error; 779 return 0; 780 } 781 782 skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, 783 buf_state->page_info.page_offset, buf_len, 784 priv->data_buffer_size_dqo); 785 gve_dec_pagecnt_bias(&buf_state->page_info); 786 787 gve_try_recycle_buf(priv, rx, buf_state); 788 return 0; 789 790 error: 791 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); 792 return -ENOMEM; 793 } 794 795 static int gve_rx_complete_rsc(struct sk_buff *skb, 796 const struct gve_rx_compl_desc_dqo *desc, 797 struct gve_ptype ptype) 798 { 799 struct skb_shared_info *shinfo = skb_shinfo(skb); 800 801 /* Only TCP is supported right now. */ 802 if (ptype.l4_type != GVE_L4_TYPE_TCP) 803 return -EINVAL; 804 805 switch (ptype.l3_type) { 806 case GVE_L3_TYPE_IPV4: 807 shinfo->gso_type = SKB_GSO_TCPV4; 808 break; 809 case GVE_L3_TYPE_IPV6: 810 shinfo->gso_type = SKB_GSO_TCPV6; 811 break; 812 default: 813 return -EINVAL; 814 } 815 816 shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len); 817 return 0; 818 } 819 820 /* Returns 0 if skb is completed successfully, -1 otherwise. */ 821 static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, 822 const struct gve_rx_compl_desc_dqo *desc, 823 netdev_features_t feat) 824 { 825 struct gve_ptype ptype = 826 rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; 827 int err; 828 829 skb_record_rx_queue(rx->ctx.skb_head, rx->q_num); 830 831 if (feat & NETIF_F_RXHASH) 832 gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype); 833 834 if (feat & NETIF_F_RXCSUM) 835 gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); 836 837 /* RSC packets must set gso_size otherwise the TCP stack will complain 838 * that packets are larger than MTU. 839 */ 840 if (desc->rsc) { 841 err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype); 842 if (err < 0) 843 return err; 844 } 845 846 if (skb_headlen(rx->ctx.skb_head) == 0) 847 napi_gro_frags(napi); 848 else 849 napi_gro_receive(napi, rx->ctx.skb_head); 850 851 return 0; 852 } 853 854 int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) 855 { 856 struct napi_struct *napi = &block->napi; 857 netdev_features_t feat = napi->dev->features; 858 859 struct gve_rx_ring *rx = block->rx; 860 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; 861 862 u32 work_done = 0; 863 u64 bytes = 0; 864 int err; 865 866 while (work_done < budget) { 867 struct gve_rx_compl_desc_dqo *compl_desc = 868 &complq->desc_ring[complq->head]; 869 u32 pkt_bytes; 870 871 /* No more new packets */ 872 if (compl_desc->generation == complq->cur_gen_bit) 873 break; 874 875 /* Prefetch the next two descriptors. */ 876 prefetch(&complq->desc_ring[(complq->head + 1) & complq->mask]); 877 prefetch(&complq->desc_ring[(complq->head + 2) & complq->mask]); 878 879 /* Do not read data until we own the descriptor */ 880 dma_rmb(); 881 882 err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num); 883 if (err < 0) { 884 gve_rx_free_skb(rx); 885 u64_stats_update_begin(&rx->statss); 886 if (err == -ENOMEM) 887 rx->rx_skb_alloc_fail++; 888 else if (err == -EINVAL) 889 rx->rx_desc_err_dropped_pkt++; 890 u64_stats_update_end(&rx->statss); 891 } 892 893 complq->head = (complq->head + 1) & complq->mask; 894 complq->num_free_slots++; 895 896 /* When the ring wraps, the generation bit is flipped. */ 897 complq->cur_gen_bit ^= (complq->head == 0); 898 899 /* Receiving a completion means we have space to post another 900 * buffer on the buffer queue. 901 */ 902 { 903 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; 904 905 bufq->head = (bufq->head + 1) & bufq->mask; 906 } 907 908 /* Free running counter of completed descriptors */ 909 rx->cnt++; 910 911 if (!rx->ctx.skb_head) 912 continue; 913 914 if (!compl_desc->end_of_packet) 915 continue; 916 917 work_done++; 918 pkt_bytes = rx->ctx.skb_head->len; 919 /* The ethernet header (first ETH_HLEN bytes) is snipped off 920 * by eth_type_trans. 921 */ 922 if (skb_headlen(rx->ctx.skb_head)) 923 pkt_bytes += ETH_HLEN; 924 925 /* gve_rx_complete_skb() will consume skb if successful */ 926 if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { 927 gve_rx_free_skb(rx); 928 u64_stats_update_begin(&rx->statss); 929 rx->rx_desc_err_dropped_pkt++; 930 u64_stats_update_end(&rx->statss); 931 continue; 932 } 933 934 bytes += pkt_bytes; 935 rx->ctx.skb_head = NULL; 936 rx->ctx.skb_tail = NULL; 937 } 938 939 gve_rx_post_buffers_dqo(rx); 940 941 u64_stats_update_begin(&rx->statss); 942 rx->rpackets += work_done; 943 rx->rbytes += bytes; 944 u64_stats_update_end(&rx->statss); 945 946 return work_done; 947 } 948