1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/ip.h> 12 #include <linux/etherdevice.h> 13 #include <net/ip.h> 14 #include <net/tso.h> 15 16 #include "nic_reg.h" 17 #include "nic.h" 18 #include "q_struct.h" 19 #include "nicvf_queues.h" 20 21 /* Poll a register for a specific value */ 22 static int nicvf_poll_reg(struct nicvf *nic, int qidx, 23 u64 reg, int bit_pos, int bits, int val) 24 { 25 u64 bit_mask; 26 u64 reg_val; 27 int timeout = 10; 28 29 bit_mask = (1ULL << bits) - 1; 30 bit_mask = (bit_mask << bit_pos); 31 32 while (timeout) { 33 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 34 if (((reg_val & bit_mask) >> bit_pos) == val) 35 return 0; 36 usleep_range(1000, 2000); 37 timeout--; 38 } 39 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 40 return 1; 41 } 42 43 /* Allocate memory for a queue's descriptors */ 44 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 45 int q_len, int desc_size, int align_bytes) 46 { 47 dmem->q_len = q_len; 48 dmem->size = (desc_size * q_len) + align_bytes; 49 /* Save address, need it while freeing */ 50 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 51 &dmem->dma, GFP_KERNEL); 52 if (!dmem->unalign_base) 53 return -ENOMEM; 54 55 /* Align memory address for 'align_bytes' */ 56 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 57 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 58 return 0; 59 } 60 61 /* Free queue's descriptor memory */ 62 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 63 { 64 if (!dmem) 65 return; 66 67 dma_free_coherent(&nic->pdev->dev, dmem->size, 68 dmem->unalign_base, dmem->dma); 69 dmem->unalign_base = NULL; 70 dmem->base = NULL; 71 } 72 73 /* Allocate buffer for packet reception 74 * HW returns memory address where packet is DMA'ed but not a pointer 75 * into RBDR ring, so save buffer address at the start of fragment and 76 * align the start address to a cache aligned address 77 */ 78 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 79 u32 buf_len, u64 **rbuf) 80 { 81 int order = get_order(buf_len); 82 83 /* Check if request can be accomodated in previous allocated page */ 84 if (nic->rb_page) { 85 if ((nic->rb_page_offset + buf_len + buf_len) > 86 (PAGE_SIZE << order)) { 87 nic->rb_page = NULL; 88 } else { 89 nic->rb_page_offset += buf_len; 90 get_page(nic->rb_page); 91 } 92 } 93 94 /* Allocate a new page */ 95 if (!nic->rb_page) { 96 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 97 order); 98 if (!nic->rb_page) { 99 netdev_err(nic->netdev, 100 "Failed to allocate new rcv buffer\n"); 101 return -ENOMEM; 102 } 103 nic->rb_page_offset = 0; 104 } 105 106 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); 107 108 return 0; 109 } 110 111 /* Build skb around receive buffer */ 112 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 113 u64 rb_ptr, int len) 114 { 115 void *data; 116 struct sk_buff *skb; 117 118 data = phys_to_virt(rb_ptr); 119 120 /* Now build an skb to give to stack */ 121 skb = build_skb(data, RCV_FRAG_LEN); 122 if (!skb) { 123 put_page(virt_to_page(data)); 124 return NULL; 125 } 126 127 prefetch(skb->data); 128 return skb; 129 } 130 131 /* Allocate RBDR ring and populate receive buffers */ 132 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 133 int ring_len, int buf_size) 134 { 135 int idx; 136 u64 *rbuf; 137 struct rbdr_entry_t *desc; 138 int err; 139 140 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 141 sizeof(struct rbdr_entry_t), 142 NICVF_RCV_BUF_ALIGN_BYTES); 143 if (err) 144 return err; 145 146 rbdr->desc = rbdr->dmem.base; 147 /* Buffer size has to be in multiples of 128 bytes */ 148 rbdr->dma_size = buf_size; 149 rbdr->enable = true; 150 rbdr->thresh = RBDR_THRESH; 151 152 nic->rb_page = NULL; 153 for (idx = 0; idx < ring_len; idx++) { 154 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 155 &rbuf); 156 if (err) 157 return err; 158 159 desc = GET_RBDR_DESC(rbdr, idx); 160 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 161 } 162 return 0; 163 } 164 165 /* Free RBDR ring and its receive buffers */ 166 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 167 { 168 int head, tail; 169 u64 buf_addr; 170 struct rbdr_entry_t *desc; 171 172 if (!rbdr) 173 return; 174 175 rbdr->enable = false; 176 if (!rbdr->dmem.base) 177 return; 178 179 head = rbdr->head; 180 tail = rbdr->tail; 181 182 /* Free SKBs */ 183 while (head != tail) { 184 desc = GET_RBDR_DESC(rbdr, head); 185 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 186 put_page(virt_to_page(phys_to_virt(buf_addr))); 187 head++; 188 head &= (rbdr->dmem.q_len - 1); 189 } 190 /* Free SKB of tail desc */ 191 desc = GET_RBDR_DESC(rbdr, tail); 192 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 193 put_page(virt_to_page(phys_to_virt(buf_addr))); 194 195 /* Free RBDR ring */ 196 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 197 } 198 199 /* Refill receive buffer descriptors with new buffers. 200 */ 201 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) 202 { 203 struct queue_set *qs = nic->qs; 204 int rbdr_idx = qs->rbdr_cnt; 205 int tail, qcount; 206 int refill_rb_cnt; 207 struct rbdr *rbdr; 208 struct rbdr_entry_t *desc; 209 u64 *rbuf; 210 int new_rb = 0; 211 212 refill: 213 if (!rbdr_idx) 214 return; 215 rbdr_idx--; 216 rbdr = &qs->rbdr[rbdr_idx]; 217 /* Check if it's enabled */ 218 if (!rbdr->enable) 219 goto next_rbdr; 220 221 /* Get no of desc's to be refilled */ 222 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 223 qcount &= 0x7FFFF; 224 /* Doorbell can be ringed with a max of ring size minus 1 */ 225 if (qcount >= (qs->rbdr_len - 1)) 226 goto next_rbdr; 227 else 228 refill_rb_cnt = qs->rbdr_len - qcount - 1; 229 230 /* Start filling descs from tail */ 231 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 232 while (refill_rb_cnt) { 233 tail++; 234 tail &= (rbdr->dmem.q_len - 1); 235 236 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) 237 break; 238 239 desc = GET_RBDR_DESC(rbdr, tail); 240 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 241 refill_rb_cnt--; 242 new_rb++; 243 } 244 245 /* make sure all memory stores are done before ringing doorbell */ 246 smp_wmb(); 247 248 /* Check if buffer allocation failed */ 249 if (refill_rb_cnt) 250 nic->rb_alloc_fail = true; 251 else 252 nic->rb_alloc_fail = false; 253 254 /* Notify HW */ 255 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 256 rbdr_idx, new_rb); 257 next_rbdr: 258 /* Re-enable RBDR interrupts only if buffer allocation is success */ 259 if (!nic->rb_alloc_fail && rbdr->enable) 260 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 261 262 if (rbdr_idx) 263 goto refill; 264 } 265 266 /* Alloc rcv buffers in non-atomic mode for better success */ 267 void nicvf_rbdr_work(struct work_struct *work) 268 { 269 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); 270 271 nicvf_refill_rbdr(nic, GFP_KERNEL); 272 if (nic->rb_alloc_fail) 273 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 274 else 275 nic->rb_work_scheduled = false; 276 } 277 278 /* In Softirq context, alloc rcv buffers in atomic mode */ 279 void nicvf_rbdr_task(unsigned long data) 280 { 281 struct nicvf *nic = (struct nicvf *)data; 282 283 nicvf_refill_rbdr(nic, GFP_ATOMIC); 284 if (nic->rb_alloc_fail) { 285 nic->rb_work_scheduled = true; 286 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 287 } 288 } 289 290 /* Initialize completion queue */ 291 static int nicvf_init_cmp_queue(struct nicvf *nic, 292 struct cmp_queue *cq, int q_len) 293 { 294 int err; 295 296 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 297 NICVF_CQ_BASE_ALIGN_BYTES); 298 if (err) 299 return err; 300 301 cq->desc = cq->dmem.base; 302 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; 303 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 304 305 return 0; 306 } 307 308 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 309 { 310 if (!cq) 311 return; 312 if (!cq->dmem.base) 313 return; 314 315 nicvf_free_q_desc_mem(nic, &cq->dmem); 316 } 317 318 /* Initialize transmit queue */ 319 static int nicvf_init_snd_queue(struct nicvf *nic, 320 struct snd_queue *sq, int q_len) 321 { 322 int err; 323 324 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 325 NICVF_SQ_BASE_ALIGN_BYTES); 326 if (err) 327 return err; 328 329 sq->desc = sq->dmem.base; 330 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 331 if (!sq->skbuff) 332 return -ENOMEM; 333 sq->head = 0; 334 sq->tail = 0; 335 atomic_set(&sq->free_cnt, q_len - 1); 336 sq->thresh = SND_QUEUE_THRESH; 337 338 /* Preallocate memory for TSO segment's header */ 339 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 340 q_len * TSO_HEADER_SIZE, 341 &sq->tso_hdrs_phys, GFP_KERNEL); 342 if (!sq->tso_hdrs) 343 return -ENOMEM; 344 345 return 0; 346 } 347 348 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 349 { 350 if (!sq) 351 return; 352 if (!sq->dmem.base) 353 return; 354 355 if (sq->tso_hdrs) 356 dma_free_coherent(&nic->pdev->dev, 357 sq->dmem.q_len * TSO_HEADER_SIZE, 358 sq->tso_hdrs, sq->tso_hdrs_phys); 359 360 kfree(sq->skbuff); 361 nicvf_free_q_desc_mem(nic, &sq->dmem); 362 } 363 364 static void nicvf_reclaim_snd_queue(struct nicvf *nic, 365 struct queue_set *qs, int qidx) 366 { 367 /* Disable send queue */ 368 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 369 /* Check if SQ is stopped */ 370 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 371 return; 372 /* Reset send queue */ 373 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 374 } 375 376 static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 377 struct queue_set *qs, int qidx) 378 { 379 union nic_mbx mbx = {}; 380 381 /* Make sure all packets in the pipeline are written back into mem */ 382 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 383 nicvf_send_msg_to_pf(nic, &mbx); 384 } 385 386 static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 387 struct queue_set *qs, int qidx) 388 { 389 /* Disable timer threshold (doesn't get reset upon CQ reset */ 390 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 391 /* Disable completion queue */ 392 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 393 /* Reset completion queue */ 394 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 395 } 396 397 static void nicvf_reclaim_rbdr(struct nicvf *nic, 398 struct rbdr *rbdr, int qidx) 399 { 400 u64 tmp, fifo_state; 401 int timeout = 10; 402 403 /* Save head and tail pointers for feeing up buffers */ 404 rbdr->head = nicvf_queue_reg_read(nic, 405 NIC_QSET_RBDR_0_1_HEAD, 406 qidx) >> 3; 407 rbdr->tail = nicvf_queue_reg_read(nic, 408 NIC_QSET_RBDR_0_1_TAIL, 409 qidx) >> 3; 410 411 /* If RBDR FIFO is in 'FAIL' state then do a reset first 412 * before relaiming. 413 */ 414 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 415 if (((fifo_state >> 62) & 0x03) == 0x3) 416 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 417 qidx, NICVF_RBDR_RESET); 418 419 /* Disable RBDR */ 420 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 421 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 422 return; 423 while (1) { 424 tmp = nicvf_queue_reg_read(nic, 425 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 426 qidx); 427 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 428 break; 429 usleep_range(1000, 2000); 430 timeout--; 431 if (!timeout) { 432 netdev_err(nic->netdev, 433 "Failed polling on prefetch status\n"); 434 return; 435 } 436 } 437 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 438 qidx, NICVF_RBDR_RESET); 439 440 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 441 return; 442 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 443 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 444 return; 445 } 446 447 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) 448 { 449 u64 rq_cfg; 450 int sqs; 451 452 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); 453 454 /* Enable first VLAN stripping */ 455 if (features & NETIF_F_HW_VLAN_CTAG_RX) 456 rq_cfg |= (1ULL << 25); 457 else 458 rq_cfg &= ~(1ULL << 25); 459 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 460 461 /* Configure Secondary Qsets, if any */ 462 for (sqs = 0; sqs < nic->sqs_count; sqs++) 463 if (nic->snicvf[sqs]) 464 nicvf_queue_reg_write(nic->snicvf[sqs], 465 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 466 } 467 468 /* Configures receive queue */ 469 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 470 int qidx, bool enable) 471 { 472 union nic_mbx mbx = {}; 473 struct rcv_queue *rq; 474 struct rq_cfg rq_cfg; 475 476 rq = &qs->rq[qidx]; 477 rq->enable = enable; 478 479 /* Disable receive queue */ 480 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 481 482 if (!rq->enable) { 483 nicvf_reclaim_rcv_queue(nic, qs, qidx); 484 return; 485 } 486 487 rq->cq_qs = qs->vnic_id; 488 rq->cq_idx = qidx; 489 rq->start_rbdr_qs = qs->vnic_id; 490 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 491 rq->cont_rbdr_qs = qs->vnic_id; 492 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 493 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 494 rq->caching = 1; 495 496 /* Send a mailbox msg to PF to config RQ */ 497 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 498 mbx.rq.qs_num = qs->vnic_id; 499 mbx.rq.rq_num = qidx; 500 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 501 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 502 (rq->cont_qs_rbdr_idx << 8) | 503 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); 504 nicvf_send_msg_to_pf(nic, &mbx); 505 506 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 507 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); 508 nicvf_send_msg_to_pf(nic, &mbx); 509 510 /* RQ drop config 511 * Enable CQ drop to reserve sufficient CQEs for all tx packets 512 */ 513 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 514 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 515 nicvf_send_msg_to_pf(nic, &mbx); 516 517 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 518 if (!nic->sqs_mode) 519 nicvf_config_vlan_stripping(nic, nic->netdev->features); 520 521 /* Enable Receive queue */ 522 rq_cfg.ena = 1; 523 rq_cfg.tcp_ena = 0; 524 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); 525 } 526 527 /* Configures completion queue */ 528 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 529 int qidx, bool enable) 530 { 531 struct cmp_queue *cq; 532 struct cq_cfg cq_cfg; 533 534 cq = &qs->cq[qidx]; 535 cq->enable = enable; 536 537 if (!cq->enable) { 538 nicvf_reclaim_cmp_queue(nic, qs, qidx); 539 return; 540 } 541 542 /* Reset completion queue */ 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 544 545 if (!cq->enable) 546 return; 547 548 spin_lock_init(&cq->lock); 549 /* Set completion queue base address */ 550 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 551 qidx, (u64)(cq->dmem.phys_base)); 552 553 /* Enable Completion queue */ 554 cq_cfg.ena = 1; 555 cq_cfg.reset = 0; 556 cq_cfg.caching = 0; 557 cq_cfg.qsize = CMP_QSIZE; 558 cq_cfg.avg_con = 0; 559 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); 560 561 /* Set threshold value for interrupt generation */ 562 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 563 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 564 qidx, CMP_QUEUE_TIMER_THRESH); 565 } 566 567 /* Configures transmit queue */ 568 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 569 int qidx, bool enable) 570 { 571 union nic_mbx mbx = {}; 572 struct snd_queue *sq; 573 struct sq_cfg sq_cfg; 574 575 sq = &qs->sq[qidx]; 576 sq->enable = enable; 577 578 if (!sq->enable) { 579 nicvf_reclaim_snd_queue(nic, qs, qidx); 580 return; 581 } 582 583 /* Reset send queue */ 584 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 585 586 sq->cq_qs = qs->vnic_id; 587 sq->cq_idx = qidx; 588 589 /* Send a mailbox msg to PF to config SQ */ 590 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 591 mbx.sq.qs_num = qs->vnic_id; 592 mbx.sq.sq_num = qidx; 593 mbx.sq.sqs_mode = nic->sqs_mode; 594 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 595 nicvf_send_msg_to_pf(nic, &mbx); 596 597 /* Set queue base address */ 598 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 599 qidx, (u64)(sq->dmem.phys_base)); 600 601 /* Enable send queue & set queue size */ 602 sq_cfg.ena = 1; 603 sq_cfg.reset = 0; 604 sq_cfg.ldwb = 0; 605 sq_cfg.qsize = SND_QSIZE; 606 sq_cfg.tstmp_bgx_intf = 0; 607 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); 608 609 /* Set threshold value for interrupt generation */ 610 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 611 612 /* Set queue:cpu affinity for better load distribution */ 613 if (cpu_online(qidx)) { 614 cpumask_set_cpu(qidx, &sq->affinity_mask); 615 netif_set_xps_queue(nic->netdev, 616 &sq->affinity_mask, qidx); 617 } 618 } 619 620 /* Configures receive buffer descriptor ring */ 621 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 622 int qidx, bool enable) 623 { 624 struct rbdr *rbdr; 625 struct rbdr_cfg rbdr_cfg; 626 627 rbdr = &qs->rbdr[qidx]; 628 nicvf_reclaim_rbdr(nic, rbdr, qidx); 629 if (!enable) 630 return; 631 632 /* Set descriptor base address */ 633 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 634 qidx, (u64)(rbdr->dmem.phys_base)); 635 636 /* Enable RBDR & set queue size */ 637 /* Buffer size should be in multiples of 128 bytes */ 638 rbdr_cfg.ena = 1; 639 rbdr_cfg.reset = 0; 640 rbdr_cfg.ldwb = 0; 641 rbdr_cfg.qsize = RBDR_SIZE; 642 rbdr_cfg.avg_con = 0; 643 rbdr_cfg.lines = rbdr->dma_size / 128; 644 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 645 qidx, *(u64 *)&rbdr_cfg); 646 647 /* Notify HW */ 648 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 649 qidx, qs->rbdr_len - 1); 650 651 /* Set threshold value for interrupt generation */ 652 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 653 qidx, rbdr->thresh - 1); 654 } 655 656 /* Requests PF to assign and enable Qset */ 657 void nicvf_qset_config(struct nicvf *nic, bool enable) 658 { 659 union nic_mbx mbx = {}; 660 struct queue_set *qs = nic->qs; 661 struct qs_cfg *qs_cfg; 662 663 if (!qs) { 664 netdev_warn(nic->netdev, 665 "Qset is still not allocated, don't init queues\n"); 666 return; 667 } 668 669 qs->enable = enable; 670 qs->vnic_id = nic->vf_id; 671 672 /* Send a mailbox msg to PF to config Qset */ 673 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 674 mbx.qs.num = qs->vnic_id; 675 mbx.qs.sqs_count = nic->sqs_count; 676 677 mbx.qs.cfg = 0; 678 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 679 if (qs->enable) { 680 qs_cfg->ena = 1; 681 #ifdef __BIG_ENDIAN 682 qs_cfg->be = 1; 683 #endif 684 qs_cfg->vnic = qs->vnic_id; 685 } 686 nicvf_send_msg_to_pf(nic, &mbx); 687 } 688 689 static void nicvf_free_resources(struct nicvf *nic) 690 { 691 int qidx; 692 struct queue_set *qs = nic->qs; 693 694 /* Free receive buffer descriptor ring */ 695 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 696 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 697 698 /* Free completion queue */ 699 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 700 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 701 702 /* Free send queue */ 703 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 704 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 705 } 706 707 static int nicvf_alloc_resources(struct nicvf *nic) 708 { 709 int qidx; 710 struct queue_set *qs = nic->qs; 711 712 /* Alloc receive buffer descriptor ring */ 713 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 714 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 715 DMA_BUFFER_LEN)) 716 goto alloc_fail; 717 } 718 719 /* Alloc send queue */ 720 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 721 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) 722 goto alloc_fail; 723 } 724 725 /* Alloc completion queue */ 726 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 727 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) 728 goto alloc_fail; 729 } 730 731 return 0; 732 alloc_fail: 733 nicvf_free_resources(nic); 734 return -ENOMEM; 735 } 736 737 int nicvf_set_qset_resources(struct nicvf *nic) 738 { 739 struct queue_set *qs; 740 741 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 742 if (!qs) 743 return -ENOMEM; 744 nic->qs = qs; 745 746 /* Set count of each queue */ 747 qs->rbdr_cnt = RBDR_CNT; 748 qs->rq_cnt = RCV_QUEUE_CNT; 749 qs->sq_cnt = SND_QUEUE_CNT; 750 qs->cq_cnt = CMP_QUEUE_CNT; 751 752 /* Set queue lengths */ 753 qs->rbdr_len = RCV_BUF_COUNT; 754 qs->sq_len = SND_QUEUE_LEN; 755 qs->cq_len = CMP_QUEUE_LEN; 756 757 nic->rx_queues = qs->rq_cnt; 758 nic->tx_queues = qs->sq_cnt; 759 760 return 0; 761 } 762 763 int nicvf_config_data_transfer(struct nicvf *nic, bool enable) 764 { 765 bool disable = false; 766 struct queue_set *qs = nic->qs; 767 int qidx; 768 769 if (!qs) 770 return 0; 771 772 if (enable) { 773 if (nicvf_alloc_resources(nic)) 774 return -ENOMEM; 775 776 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 777 nicvf_snd_queue_config(nic, qs, qidx, enable); 778 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 779 nicvf_cmp_queue_config(nic, qs, qidx, enable); 780 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 781 nicvf_rbdr_config(nic, qs, qidx, enable); 782 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 783 nicvf_rcv_queue_config(nic, qs, qidx, enable); 784 } else { 785 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 786 nicvf_rcv_queue_config(nic, qs, qidx, disable); 787 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 788 nicvf_rbdr_config(nic, qs, qidx, disable); 789 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 790 nicvf_snd_queue_config(nic, qs, qidx, disable); 791 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 792 nicvf_cmp_queue_config(nic, qs, qidx, disable); 793 794 nicvf_free_resources(nic); 795 } 796 797 return 0; 798 } 799 800 /* Get a free desc from SQ 801 * returns descriptor ponter & descriptor number 802 */ 803 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 804 { 805 int qentry; 806 807 qentry = sq->tail; 808 atomic_sub(desc_cnt, &sq->free_cnt); 809 sq->tail += desc_cnt; 810 sq->tail &= (sq->dmem.q_len - 1); 811 812 return qentry; 813 } 814 815 /* Free descriptor back to SQ for future use */ 816 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 817 { 818 atomic_add(desc_cnt, &sq->free_cnt); 819 sq->head += desc_cnt; 820 sq->head &= (sq->dmem.q_len - 1); 821 } 822 823 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 824 { 825 qentry++; 826 qentry &= (sq->dmem.q_len - 1); 827 return qentry; 828 } 829 830 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 831 { 832 u64 sq_cfg; 833 834 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 835 sq_cfg |= NICVF_SQ_EN; 836 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 837 /* Ring doorbell so that H/W restarts processing SQEs */ 838 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 839 } 840 841 void nicvf_sq_disable(struct nicvf *nic, int qidx) 842 { 843 u64 sq_cfg; 844 845 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 846 sq_cfg &= ~NICVF_SQ_EN; 847 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 848 } 849 850 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 851 int qidx) 852 { 853 u64 head, tail; 854 struct sk_buff *skb; 855 struct nicvf *nic = netdev_priv(netdev); 856 struct sq_hdr_subdesc *hdr; 857 858 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 859 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 860 while (sq->head != head) { 861 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 862 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 863 nicvf_put_sq_desc(sq, 1); 864 continue; 865 } 866 skb = (struct sk_buff *)sq->skbuff[sq->head]; 867 if (skb) 868 dev_kfree_skb_any(skb); 869 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 870 atomic64_add(hdr->tot_len, 871 (atomic64_t *)&netdev->stats.tx_bytes); 872 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 873 } 874 } 875 876 /* Calculate no of SQ subdescriptors needed to transmit all 877 * segments of this TSO packet. 878 * Taken from 'Tilera network driver' with a minor modification. 879 */ 880 static int nicvf_tso_count_subdescs(struct sk_buff *skb) 881 { 882 struct skb_shared_info *sh = skb_shinfo(skb); 883 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 884 unsigned int data_len = skb->len - sh_len; 885 unsigned int p_len = sh->gso_size; 886 long f_id = -1; /* id of the current fragment */ 887 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 888 long f_used = 0; /* bytes used from the current fragment */ 889 long n; /* size of the current piece of payload */ 890 int num_edescs = 0; 891 int segment; 892 893 for (segment = 0; segment < sh->gso_segs; segment++) { 894 unsigned int p_used = 0; 895 896 /* One edesc for header and for each piece of the payload. */ 897 for (num_edescs++; p_used < p_len; num_edescs++) { 898 /* Advance as needed. */ 899 while (f_used >= f_size) { 900 f_id++; 901 f_size = skb_frag_size(&sh->frags[f_id]); 902 f_used = 0; 903 } 904 905 /* Use bytes from the current fragment. */ 906 n = p_len - p_used; 907 if (n > f_size - f_used) 908 n = f_size - f_used; 909 f_used += n; 910 p_used += n; 911 } 912 913 /* The last segment may be less than gso_size. */ 914 data_len -= p_len; 915 if (data_len < p_len) 916 p_len = data_len; 917 } 918 919 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ 920 return num_edescs + sh->gso_segs; 921 } 922 923 /* Get the number of SQ descriptors needed to xmit this skb */ 924 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 925 { 926 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 927 928 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { 929 subdesc_cnt = nicvf_tso_count_subdescs(skb); 930 return subdesc_cnt; 931 } 932 933 if (skb_shinfo(skb)->nr_frags) 934 subdesc_cnt += skb_shinfo(skb)->nr_frags; 935 936 return subdesc_cnt; 937 } 938 939 /* Add SQ HEADER subdescriptor. 940 * First subdescriptor for every send descriptor. 941 */ 942 static inline void 943 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, 944 int subdesc_cnt, struct sk_buff *skb, int len) 945 { 946 int proto; 947 struct sq_hdr_subdesc *hdr; 948 949 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 950 sq->skbuff[qentry] = (u64)skb; 951 952 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 953 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 954 /* Enable notification via CQE after processing SQE */ 955 hdr->post_cqe = 1; 956 /* No of subdescriptors following this */ 957 hdr->subdesc_cnt = subdesc_cnt; 958 hdr->tot_len = len; 959 960 /* Offload checksum calculation to HW */ 961 if (skb->ip_summed == CHECKSUM_PARTIAL) { 962 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 963 hdr->l3_offset = skb_network_offset(skb); 964 hdr->l4_offset = skb_transport_offset(skb); 965 966 proto = ip_hdr(skb)->protocol; 967 switch (proto) { 968 case IPPROTO_TCP: 969 hdr->csum_l4 = SEND_L4_CSUM_TCP; 970 break; 971 case IPPROTO_UDP: 972 hdr->csum_l4 = SEND_L4_CSUM_UDP; 973 break; 974 case IPPROTO_SCTP: 975 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 976 break; 977 } 978 } 979 980 if (nic->hw_tso && skb_shinfo(skb)->gso_size) { 981 hdr->tso = 1; 982 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); 983 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 984 /* For non-tunneled pkts, point this to L2 ethertype */ 985 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 986 nic->drv_stats.tx_tso++; 987 } 988 } 989 990 /* SQ GATHER subdescriptor 991 * Must follow HDR descriptor 992 */ 993 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 994 int size, u64 data) 995 { 996 struct sq_gather_subdesc *gather; 997 998 qentry &= (sq->dmem.q_len - 1); 999 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1000 1001 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1002 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1003 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1004 gather->size = size; 1005 gather->addr = data; 1006 } 1007 1008 /* Segment a TSO packet into 'gso_size' segments and append 1009 * them to SQ for transfer 1010 */ 1011 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, 1012 int sq_num, int qentry, struct sk_buff *skb) 1013 { 1014 struct tso_t tso; 1015 int seg_subdescs = 0, desc_cnt = 0; 1016 int seg_len, total_len, data_left; 1017 int hdr_qentry = qentry; 1018 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1019 1020 tso_start(skb, &tso); 1021 total_len = skb->len - hdr_len; 1022 while (total_len > 0) { 1023 char *hdr; 1024 1025 /* Save Qentry for adding HDR_SUBDESC at the end */ 1026 hdr_qentry = qentry; 1027 1028 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1029 total_len -= data_left; 1030 1031 /* Add segment's header */ 1032 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1033 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; 1034 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1035 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, 1036 sq->tso_hdrs_phys + 1037 qentry * TSO_HEADER_SIZE); 1038 /* HDR_SUDESC + GATHER */ 1039 seg_subdescs = 2; 1040 seg_len = hdr_len; 1041 1042 /* Add segment's payload fragments */ 1043 while (data_left > 0) { 1044 int size; 1045 1046 size = min_t(int, tso.size, data_left); 1047 1048 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1049 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1050 virt_to_phys(tso.data)); 1051 seg_subdescs++; 1052 seg_len += size; 1053 1054 data_left -= size; 1055 tso_build_data(skb, &tso, size); 1056 } 1057 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, 1058 seg_subdescs - 1, skb, seg_len); 1059 sq->skbuff[hdr_qentry] = (u64)NULL; 1060 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1061 1062 desc_cnt += seg_subdescs; 1063 } 1064 /* Save SKB in the last segment for freeing */ 1065 sq->skbuff[hdr_qentry] = (u64)skb; 1066 1067 /* make sure all memory stores are done before ringing doorbell */ 1068 smp_wmb(); 1069 1070 /* Inform HW to xmit all TSO segments */ 1071 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1072 sq_num, desc_cnt); 1073 nic->drv_stats.tx_tso++; 1074 return 1; 1075 } 1076 1077 /* Append an skb to a SQ for packet transfer. */ 1078 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) 1079 { 1080 int i, size; 1081 int subdesc_cnt; 1082 int sq_num, qentry; 1083 struct queue_set *qs; 1084 struct snd_queue *sq; 1085 1086 sq_num = skb_get_queue_mapping(skb); 1087 if (sq_num >= MAX_SND_QUEUES_PER_QS) { 1088 /* Get secondary Qset's SQ structure */ 1089 i = sq_num / MAX_SND_QUEUES_PER_QS; 1090 if (!nic->snicvf[i - 1]) { 1091 netdev_warn(nic->netdev, 1092 "Secondary Qset#%d's ptr not initialized\n", 1093 i - 1); 1094 return 1; 1095 } 1096 nic = (struct nicvf *)nic->snicvf[i - 1]; 1097 sq_num = sq_num % MAX_SND_QUEUES_PER_QS; 1098 } 1099 1100 qs = nic->qs; 1101 sq = &qs->sq[sq_num]; 1102 1103 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1104 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1105 goto append_fail; 1106 1107 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1108 1109 /* Check if its a TSO packet */ 1110 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) 1111 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); 1112 1113 /* Add SQ header subdesc */ 1114 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1115 skb, skb->len); 1116 1117 /* Add SQ gather subdescs */ 1118 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1119 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1120 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1121 1122 /* Check for scattered buffer */ 1123 if (!skb_is_nonlinear(skb)) 1124 goto doorbell; 1125 1126 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1127 const struct skb_frag_struct *frag; 1128 1129 frag = &skb_shinfo(skb)->frags[i]; 1130 1131 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1132 size = skb_frag_size(frag); 1133 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1134 virt_to_phys( 1135 skb_frag_address(frag))); 1136 } 1137 1138 doorbell: 1139 /* make sure all memory stores are done before ringing doorbell */ 1140 smp_wmb(); 1141 1142 /* Inform HW to xmit new packet */ 1143 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1144 sq_num, subdesc_cnt); 1145 return 1; 1146 1147 append_fail: 1148 /* Use original PCI dev for debug log */ 1149 nic = nic->pnicvf; 1150 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1151 return 0; 1152 } 1153 1154 static inline unsigned frag_num(unsigned i) 1155 { 1156 #ifdef __BIG_ENDIAN 1157 return (i & ~3) + 3 - (i & 3); 1158 #else 1159 return i; 1160 #endif 1161 } 1162 1163 /* Returns SKB for a received packet */ 1164 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1165 { 1166 int frag; 1167 int payload_len = 0; 1168 struct sk_buff *skb = NULL; 1169 struct sk_buff *skb_frag = NULL; 1170 struct sk_buff *prev_frag = NULL; 1171 u16 *rb_lens = NULL; 1172 u64 *rb_ptrs = NULL; 1173 1174 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1175 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); 1176 1177 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", 1178 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1179 1180 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1181 payload_len = rb_lens[frag_num(frag)]; 1182 if (!frag) { 1183 /* First fragment */ 1184 skb = nicvf_rb_ptr_to_skb(nic, 1185 *rb_ptrs - cqe_rx->align_pad, 1186 payload_len); 1187 if (!skb) 1188 return NULL; 1189 skb_reserve(skb, cqe_rx->align_pad); 1190 skb_put(skb, payload_len); 1191 } else { 1192 /* Add fragments */ 1193 skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, 1194 payload_len); 1195 if (!skb_frag) { 1196 dev_kfree_skb(skb); 1197 return NULL; 1198 } 1199 1200 if (!skb_shinfo(skb)->frag_list) 1201 skb_shinfo(skb)->frag_list = skb_frag; 1202 else 1203 prev_frag->next = skb_frag; 1204 1205 prev_frag = skb_frag; 1206 skb->len += payload_len; 1207 skb->data_len += payload_len; 1208 skb_frag->len = payload_len; 1209 } 1210 /* Next buffer pointer */ 1211 rb_ptrs++; 1212 } 1213 return skb; 1214 } 1215 1216 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) 1217 { 1218 u64 reg_val; 1219 1220 switch (int_type) { 1221 case NICVF_INTR_CQ: 1222 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1223 break; 1224 case NICVF_INTR_SQ: 1225 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1226 break; 1227 case NICVF_INTR_RBDR: 1228 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1229 break; 1230 case NICVF_INTR_PKT_DROP: 1231 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1232 break; 1233 case NICVF_INTR_TCP_TIMER: 1234 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1235 break; 1236 case NICVF_INTR_MBOX: 1237 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); 1238 break; 1239 case NICVF_INTR_QS_ERR: 1240 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1241 break; 1242 default: 1243 reg_val = 0; 1244 } 1245 1246 return reg_val; 1247 } 1248 1249 /* Enable interrupt */ 1250 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1251 { 1252 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1253 1254 if (!mask) { 1255 netdev_dbg(nic->netdev, 1256 "Failed to enable interrupt: unknown type\n"); 1257 return; 1258 } 1259 nicvf_reg_write(nic, NIC_VF_ENA_W1S, 1260 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); 1261 } 1262 1263 /* Disable interrupt */ 1264 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1265 { 1266 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1267 1268 if (!mask) { 1269 netdev_dbg(nic->netdev, 1270 "Failed to disable interrupt: unknown type\n"); 1271 return; 1272 } 1273 1274 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); 1275 } 1276 1277 /* Clear interrupt */ 1278 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 1279 { 1280 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1281 1282 if (!mask) { 1283 netdev_dbg(nic->netdev, 1284 "Failed to clear interrupt: unknown type\n"); 1285 return; 1286 } 1287 1288 nicvf_reg_write(nic, NIC_VF_INT, mask); 1289 } 1290 1291 /* Check if interrupt is enabled */ 1292 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 1293 { 1294 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1295 /* If interrupt type is unknown, we treat it disabled. */ 1296 if (!mask) { 1297 netdev_dbg(nic->netdev, 1298 "Failed to check interrupt enable: unknown type\n"); 1299 return 0; 1300 } 1301 1302 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1303 } 1304 1305 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 1306 { 1307 struct rcv_queue *rq; 1308 1309 #define GET_RQ_STATS(reg) \ 1310 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1311 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1312 1313 rq = &nic->qs->rq[rq_idx]; 1314 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1315 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1316 } 1317 1318 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 1319 { 1320 struct snd_queue *sq; 1321 1322 #define GET_SQ_STATS(reg) \ 1323 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1324 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1325 1326 sq = &nic->qs->sq[sq_idx]; 1327 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1328 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1329 } 1330 1331 /* Check for errors in the receive cmp.queue entry */ 1332 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1333 { 1334 struct nicvf_hw_stats *stats = &nic->hw_stats; 1335 1336 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1337 return 0; 1338 1339 if (netif_msg_rx_err(nic)) 1340 netdev_err(nic->netdev, 1341 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", 1342 nic->netdev->name, 1343 cqe_rx->err_level, cqe_rx->err_opcode); 1344 1345 switch (cqe_rx->err_opcode) { 1346 case CQ_RX_ERROP_RE_PARTIAL: 1347 stats->rx_bgx_truncated_pkts++; 1348 break; 1349 case CQ_RX_ERROP_RE_JABBER: 1350 stats->rx_jabber_errs++; 1351 break; 1352 case CQ_RX_ERROP_RE_FCS: 1353 stats->rx_fcs_errs++; 1354 break; 1355 case CQ_RX_ERROP_RE_RX_CTL: 1356 stats->rx_bgx_errs++; 1357 break; 1358 case CQ_RX_ERROP_PREL2_ERR: 1359 stats->rx_prel2_errs++; 1360 break; 1361 case CQ_RX_ERROP_L2_MAL: 1362 stats->rx_l2_hdr_malformed++; 1363 break; 1364 case CQ_RX_ERROP_L2_OVERSIZE: 1365 stats->rx_oversize++; 1366 break; 1367 case CQ_RX_ERROP_L2_UNDERSIZE: 1368 stats->rx_undersize++; 1369 break; 1370 case CQ_RX_ERROP_L2_LENMISM: 1371 stats->rx_l2_len_mismatch++; 1372 break; 1373 case CQ_RX_ERROP_L2_PCLP: 1374 stats->rx_l2_pclp++; 1375 break; 1376 case CQ_RX_ERROP_IP_NOT: 1377 stats->rx_ip_ver_errs++; 1378 break; 1379 case CQ_RX_ERROP_IP_CSUM_ERR: 1380 stats->rx_ip_csum_errs++; 1381 break; 1382 case CQ_RX_ERROP_IP_MAL: 1383 stats->rx_ip_hdr_malformed++; 1384 break; 1385 case CQ_RX_ERROP_IP_MALD: 1386 stats->rx_ip_payload_malformed++; 1387 break; 1388 case CQ_RX_ERROP_IP_HOP: 1389 stats->rx_ip_ttl_errs++; 1390 break; 1391 case CQ_RX_ERROP_L3_PCLP: 1392 stats->rx_l3_pclp++; 1393 break; 1394 case CQ_RX_ERROP_L4_MAL: 1395 stats->rx_l4_malformed++; 1396 break; 1397 case CQ_RX_ERROP_L4_CHK: 1398 stats->rx_l4_csum_errs++; 1399 break; 1400 case CQ_RX_ERROP_UDP_LEN: 1401 stats->rx_udp_len_errs++; 1402 break; 1403 case CQ_RX_ERROP_L4_PORT: 1404 stats->rx_l4_port_errs++; 1405 break; 1406 case CQ_RX_ERROP_TCP_FLAG: 1407 stats->rx_tcp_flag_errs++; 1408 break; 1409 case CQ_RX_ERROP_TCP_OFFSET: 1410 stats->rx_tcp_offset_errs++; 1411 break; 1412 case CQ_RX_ERROP_L4_PCLP: 1413 stats->rx_l4_pclp++; 1414 break; 1415 case CQ_RX_ERROP_RBDR_TRUNC: 1416 stats->rx_truncated_pkts++; 1417 break; 1418 } 1419 1420 return 1; 1421 } 1422 1423 /* Check for errors in the send cmp.queue entry */ 1424 int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1425 struct cmp_queue *cq, struct cqe_send_t *cqe_tx) 1426 { 1427 struct cmp_queue_stats *stats = &cq->stats; 1428 1429 switch (cqe_tx->send_status) { 1430 case CQ_TX_ERROP_GOOD: 1431 stats->tx.good++; 1432 return 0; 1433 case CQ_TX_ERROP_DESC_FAULT: 1434 stats->tx.desc_fault++; 1435 break; 1436 case CQ_TX_ERROP_HDR_CONS_ERR: 1437 stats->tx.hdr_cons_err++; 1438 break; 1439 case CQ_TX_ERROP_SUBDC_ERR: 1440 stats->tx.subdesc_err++; 1441 break; 1442 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1443 stats->tx.imm_size_oflow++; 1444 break; 1445 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1446 stats->tx.data_seq_err++; 1447 break; 1448 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1449 stats->tx.mem_seq_err++; 1450 break; 1451 case CQ_TX_ERROP_LOCK_VIOL: 1452 stats->tx.lock_viol++; 1453 break; 1454 case CQ_TX_ERROP_DATA_FAULT: 1455 stats->tx.data_fault++; 1456 break; 1457 case CQ_TX_ERROP_TSTMP_CONFLICT: 1458 stats->tx.tstmp_conflict++; 1459 break; 1460 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1461 stats->tx.tstmp_timeout++; 1462 break; 1463 case CQ_TX_ERROP_MEM_FAULT: 1464 stats->tx.mem_fault++; 1465 break; 1466 case CQ_TX_ERROP_CK_OVERLAP: 1467 stats->tx.csum_overlap++; 1468 break; 1469 case CQ_TX_ERROP_CK_OFLOW: 1470 stats->tx.csum_overflow++; 1471 break; 1472 } 1473 1474 return 1; 1475 } 1476