1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/ip.h> 12 #include <linux/etherdevice.h> 13 #include <net/ip.h> 14 #include <net/tso.h> 15 16 #include "nic_reg.h" 17 #include "nic.h" 18 #include "q_struct.h" 19 #include "nicvf_queues.h" 20 21 struct rbuf_info { 22 struct page *page; 23 void *data; 24 u64 offset; 25 }; 26 27 #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) 28 29 /* Poll a register for a specific value */ 30 static int nicvf_poll_reg(struct nicvf *nic, int qidx, 31 u64 reg, int bit_pos, int bits, int val) 32 { 33 u64 bit_mask; 34 u64 reg_val; 35 int timeout = 10; 36 37 bit_mask = (1ULL << bits) - 1; 38 bit_mask = (bit_mask << bit_pos); 39 40 while (timeout) { 41 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 42 if (((reg_val & bit_mask) >> bit_pos) == val) 43 return 0; 44 usleep_range(1000, 2000); 45 timeout--; 46 } 47 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 48 return 1; 49 } 50 51 /* Allocate memory for a queue's descriptors */ 52 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 53 int q_len, int desc_size, int align_bytes) 54 { 55 dmem->q_len = q_len; 56 dmem->size = (desc_size * q_len) + align_bytes; 57 /* Save address, need it while freeing */ 58 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 59 &dmem->dma, GFP_KERNEL); 60 if (!dmem->unalign_base) 61 return -ENOMEM; 62 63 /* Align memory address for 'align_bytes' */ 64 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 65 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 66 return 0; 67 } 68 69 /* Free queue's descriptor memory */ 70 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 71 { 72 if (!dmem) 73 return; 74 75 dma_free_coherent(&nic->pdev->dev, dmem->size, 76 dmem->unalign_base, dmem->dma); 77 dmem->unalign_base = NULL; 78 dmem->base = NULL; 79 } 80 81 /* Allocate buffer for packet reception 82 * HW returns memory address where packet is DMA'ed but not a pointer 83 * into RBDR ring, so save buffer address at the start of fragment and 84 * align the start address to a cache aligned address 85 */ 86 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 87 u32 buf_len, u64 **rbuf) 88 { 89 u64 data; 90 struct rbuf_info *rinfo; 91 int order = get_order(buf_len); 92 93 /* Check if request can be accomodated in previous allocated page */ 94 if (nic->rb_page) { 95 if ((nic->rb_page_offset + buf_len + buf_len) > 96 (PAGE_SIZE << order)) { 97 nic->rb_page = NULL; 98 } else { 99 nic->rb_page_offset += buf_len; 100 get_page(nic->rb_page); 101 } 102 } 103 104 /* Allocate a new page */ 105 if (!nic->rb_page) { 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 107 order); 108 if (!nic->rb_page) { 109 netdev_err(nic->netdev, 110 "Failed to allocate new rcv buffer\n"); 111 return -ENOMEM; 112 } 113 nic->rb_page_offset = 0; 114 } 115 116 data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; 117 118 /* Align buffer addr to cache line i.e 128 bytes */ 119 rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); 120 /* Save page address for reference updation */ 121 rinfo->page = nic->rb_page; 122 /* Store start address for later retrieval */ 123 rinfo->data = (void *)data; 124 /* Store alignment offset */ 125 rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); 126 127 data += rinfo->offset; 128 129 /* Give next aligned address to hw for DMA */ 130 *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); 131 return 0; 132 } 133 134 /* Retrieve actual buffer start address and build skb for received packet */ 135 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 136 u64 rb_ptr, int len) 137 { 138 struct sk_buff *skb; 139 struct rbuf_info *rinfo; 140 141 rb_ptr = (u64)phys_to_virt(rb_ptr); 142 /* Get buffer start address and alignment offset */ 143 rinfo = GET_RBUF_INFO(rb_ptr); 144 145 /* Now build an skb to give to stack */ 146 skb = build_skb(rinfo->data, RCV_FRAG_LEN); 147 if (!skb) { 148 put_page(rinfo->page); 149 return NULL; 150 } 151 152 /* Set correct skb->data */ 153 skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); 154 155 prefetch((void *)rb_ptr); 156 return skb; 157 } 158 159 /* Allocate RBDR ring and populate receive buffers */ 160 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 161 int ring_len, int buf_size) 162 { 163 int idx; 164 u64 *rbuf; 165 struct rbdr_entry_t *desc; 166 int err; 167 168 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 169 sizeof(struct rbdr_entry_t), 170 NICVF_RCV_BUF_ALIGN_BYTES); 171 if (err) 172 return err; 173 174 rbdr->desc = rbdr->dmem.base; 175 /* Buffer size has to be in multiples of 128 bytes */ 176 rbdr->dma_size = buf_size; 177 rbdr->enable = true; 178 rbdr->thresh = RBDR_THRESH; 179 180 nic->rb_page = NULL; 181 for (idx = 0; idx < ring_len; idx++) { 182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 183 &rbuf); 184 if (err) 185 return err; 186 187 desc = GET_RBDR_DESC(rbdr, idx); 188 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 189 } 190 return 0; 191 } 192 193 /* Free RBDR ring and its receive buffers */ 194 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 195 { 196 int head, tail; 197 u64 buf_addr; 198 struct rbdr_entry_t *desc; 199 struct rbuf_info *rinfo; 200 201 if (!rbdr) 202 return; 203 204 rbdr->enable = false; 205 if (!rbdr->dmem.base) 206 return; 207 208 head = rbdr->head; 209 tail = rbdr->tail; 210 211 /* Free SKBs */ 212 while (head != tail) { 213 desc = GET_RBDR_DESC(rbdr, head); 214 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 215 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); 216 put_page(rinfo->page); 217 head++; 218 head &= (rbdr->dmem.q_len - 1); 219 } 220 /* Free SKB of tail desc */ 221 desc = GET_RBDR_DESC(rbdr, tail); 222 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 223 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); 224 put_page(rinfo->page); 225 226 /* Free RBDR ring */ 227 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 228 } 229 230 /* Refill receive buffer descriptors with new buffers. 231 */ 232 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) 233 { 234 struct queue_set *qs = nic->qs; 235 int rbdr_idx = qs->rbdr_cnt; 236 int tail, qcount; 237 int refill_rb_cnt; 238 struct rbdr *rbdr; 239 struct rbdr_entry_t *desc; 240 u64 *rbuf; 241 int new_rb = 0; 242 243 refill: 244 if (!rbdr_idx) 245 return; 246 rbdr_idx--; 247 rbdr = &qs->rbdr[rbdr_idx]; 248 /* Check if it's enabled */ 249 if (!rbdr->enable) 250 goto next_rbdr; 251 252 /* Get no of desc's to be refilled */ 253 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 254 qcount &= 0x7FFFF; 255 /* Doorbell can be ringed with a max of ring size minus 1 */ 256 if (qcount >= (qs->rbdr_len - 1)) 257 goto next_rbdr; 258 else 259 refill_rb_cnt = qs->rbdr_len - qcount - 1; 260 261 /* Start filling descs from tail */ 262 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 263 while (refill_rb_cnt) { 264 tail++; 265 tail &= (rbdr->dmem.q_len - 1); 266 267 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) 268 break; 269 270 desc = GET_RBDR_DESC(rbdr, tail); 271 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 272 refill_rb_cnt--; 273 new_rb++; 274 } 275 276 /* make sure all memory stores are done before ringing doorbell */ 277 smp_wmb(); 278 279 /* Check if buffer allocation failed */ 280 if (refill_rb_cnt) 281 nic->rb_alloc_fail = true; 282 else 283 nic->rb_alloc_fail = false; 284 285 /* Notify HW */ 286 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 287 rbdr_idx, new_rb); 288 next_rbdr: 289 /* Re-enable RBDR interrupts only if buffer allocation is success */ 290 if (!nic->rb_alloc_fail && rbdr->enable) 291 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 292 293 if (rbdr_idx) 294 goto refill; 295 } 296 297 /* Alloc rcv buffers in non-atomic mode for better success */ 298 void nicvf_rbdr_work(struct work_struct *work) 299 { 300 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); 301 302 nicvf_refill_rbdr(nic, GFP_KERNEL); 303 if (nic->rb_alloc_fail) 304 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 305 else 306 nic->rb_work_scheduled = false; 307 } 308 309 /* In Softirq context, alloc rcv buffers in atomic mode */ 310 void nicvf_rbdr_task(unsigned long data) 311 { 312 struct nicvf *nic = (struct nicvf *)data; 313 314 nicvf_refill_rbdr(nic, GFP_ATOMIC); 315 if (nic->rb_alloc_fail) { 316 nic->rb_work_scheduled = true; 317 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 318 } 319 } 320 321 /* Initialize completion queue */ 322 static int nicvf_init_cmp_queue(struct nicvf *nic, 323 struct cmp_queue *cq, int q_len) 324 { 325 int err; 326 327 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 328 NICVF_CQ_BASE_ALIGN_BYTES); 329 if (err) 330 return err; 331 332 cq->desc = cq->dmem.base; 333 cq->thresh = CMP_QUEUE_CQE_THRESH; 334 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 335 336 return 0; 337 } 338 339 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 340 { 341 if (!cq) 342 return; 343 if (!cq->dmem.base) 344 return; 345 346 nicvf_free_q_desc_mem(nic, &cq->dmem); 347 } 348 349 /* Initialize transmit queue */ 350 static int nicvf_init_snd_queue(struct nicvf *nic, 351 struct snd_queue *sq, int q_len) 352 { 353 int err; 354 355 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 356 NICVF_SQ_BASE_ALIGN_BYTES); 357 if (err) 358 return err; 359 360 sq->desc = sq->dmem.base; 361 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 362 if (!sq->skbuff) 363 return -ENOMEM; 364 sq->head = 0; 365 sq->tail = 0; 366 atomic_set(&sq->free_cnt, q_len - 1); 367 sq->thresh = SND_QUEUE_THRESH; 368 369 /* Preallocate memory for TSO segment's header */ 370 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 371 q_len * TSO_HEADER_SIZE, 372 &sq->tso_hdrs_phys, GFP_KERNEL); 373 if (!sq->tso_hdrs) 374 return -ENOMEM; 375 376 return 0; 377 } 378 379 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 380 { 381 if (!sq) 382 return; 383 if (!sq->dmem.base) 384 return; 385 386 if (sq->tso_hdrs) 387 dma_free_coherent(&nic->pdev->dev, 388 sq->dmem.q_len * TSO_HEADER_SIZE, 389 sq->tso_hdrs, sq->tso_hdrs_phys); 390 391 kfree(sq->skbuff); 392 nicvf_free_q_desc_mem(nic, &sq->dmem); 393 } 394 395 static void nicvf_reclaim_snd_queue(struct nicvf *nic, 396 struct queue_set *qs, int qidx) 397 { 398 /* Disable send queue */ 399 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 400 /* Check if SQ is stopped */ 401 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 402 return; 403 /* Reset send queue */ 404 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 405 } 406 407 static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 408 struct queue_set *qs, int qidx) 409 { 410 union nic_mbx mbx = {}; 411 412 /* Make sure all packets in the pipeline are written back into mem */ 413 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 414 nicvf_send_msg_to_pf(nic, &mbx); 415 } 416 417 static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 418 struct queue_set *qs, int qidx) 419 { 420 /* Disable timer threshold (doesn't get reset upon CQ reset */ 421 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 422 /* Disable completion queue */ 423 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 424 /* Reset completion queue */ 425 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 426 } 427 428 static void nicvf_reclaim_rbdr(struct nicvf *nic, 429 struct rbdr *rbdr, int qidx) 430 { 431 u64 tmp, fifo_state; 432 int timeout = 10; 433 434 /* Save head and tail pointers for feeing up buffers */ 435 rbdr->head = nicvf_queue_reg_read(nic, 436 NIC_QSET_RBDR_0_1_HEAD, 437 qidx) >> 3; 438 rbdr->tail = nicvf_queue_reg_read(nic, 439 NIC_QSET_RBDR_0_1_TAIL, 440 qidx) >> 3; 441 442 /* If RBDR FIFO is in 'FAIL' state then do a reset first 443 * before relaiming. 444 */ 445 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 446 if (((fifo_state >> 62) & 0x03) == 0x3) 447 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 448 qidx, NICVF_RBDR_RESET); 449 450 /* Disable RBDR */ 451 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 452 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 453 return; 454 while (1) { 455 tmp = nicvf_queue_reg_read(nic, 456 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 457 qidx); 458 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 459 break; 460 usleep_range(1000, 2000); 461 timeout--; 462 if (!timeout) { 463 netdev_err(nic->netdev, 464 "Failed polling on prefetch status\n"); 465 return; 466 } 467 } 468 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 469 qidx, NICVF_RBDR_RESET); 470 471 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 472 return; 473 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 474 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 475 return; 476 } 477 478 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) 479 { 480 u64 rq_cfg; 481 int sqs; 482 483 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); 484 485 /* Enable first VLAN stripping */ 486 if (features & NETIF_F_HW_VLAN_CTAG_RX) 487 rq_cfg |= (1ULL << 25); 488 else 489 rq_cfg &= ~(1ULL << 25); 490 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 491 492 /* Configure Secondary Qsets, if any */ 493 for (sqs = 0; sqs < nic->sqs_count; sqs++) 494 if (nic->snicvf[sqs]) 495 nicvf_queue_reg_write(nic->snicvf[sqs], 496 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 497 } 498 499 /* Configures receive queue */ 500 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 501 int qidx, bool enable) 502 { 503 union nic_mbx mbx = {}; 504 struct rcv_queue *rq; 505 struct rq_cfg rq_cfg; 506 507 rq = &qs->rq[qidx]; 508 rq->enable = enable; 509 510 /* Disable receive queue */ 511 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 512 513 if (!rq->enable) { 514 nicvf_reclaim_rcv_queue(nic, qs, qidx); 515 return; 516 } 517 518 rq->cq_qs = qs->vnic_id; 519 rq->cq_idx = qidx; 520 rq->start_rbdr_qs = qs->vnic_id; 521 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 522 rq->cont_rbdr_qs = qs->vnic_id; 523 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 524 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 525 rq->caching = 1; 526 527 /* Send a mailbox msg to PF to config RQ */ 528 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 529 mbx.rq.qs_num = qs->vnic_id; 530 mbx.rq.rq_num = qidx; 531 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 532 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 533 (rq->cont_qs_rbdr_idx << 8) | 534 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); 535 nicvf_send_msg_to_pf(nic, &mbx); 536 537 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 538 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); 539 nicvf_send_msg_to_pf(nic, &mbx); 540 541 /* RQ drop config 542 * Enable CQ drop to reserve sufficient CQEs for all tx packets 543 */ 544 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 545 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 546 nicvf_send_msg_to_pf(nic, &mbx); 547 548 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 549 if (!nic->sqs_mode) 550 nicvf_config_vlan_stripping(nic, nic->netdev->features); 551 552 /* Enable Receive queue */ 553 rq_cfg.ena = 1; 554 rq_cfg.tcp_ena = 0; 555 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); 556 } 557 558 /* Configures completion queue */ 559 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 560 int qidx, bool enable) 561 { 562 struct cmp_queue *cq; 563 struct cq_cfg cq_cfg; 564 565 cq = &qs->cq[qidx]; 566 cq->enable = enable; 567 568 if (!cq->enable) { 569 nicvf_reclaim_cmp_queue(nic, qs, qidx); 570 return; 571 } 572 573 /* Reset completion queue */ 574 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 575 576 if (!cq->enable) 577 return; 578 579 spin_lock_init(&cq->lock); 580 /* Set completion queue base address */ 581 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 582 qidx, (u64)(cq->dmem.phys_base)); 583 584 /* Enable Completion queue */ 585 cq_cfg.ena = 1; 586 cq_cfg.reset = 0; 587 cq_cfg.caching = 0; 588 cq_cfg.qsize = CMP_QSIZE; 589 cq_cfg.avg_con = 0; 590 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); 591 592 /* Set threshold value for interrupt generation */ 593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 595 qidx, nic->cq_coalesce_usecs); 596 } 597 598 /* Configures transmit queue */ 599 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 600 int qidx, bool enable) 601 { 602 union nic_mbx mbx = {}; 603 struct snd_queue *sq; 604 struct sq_cfg sq_cfg; 605 606 sq = &qs->sq[qidx]; 607 sq->enable = enable; 608 609 if (!sq->enable) { 610 nicvf_reclaim_snd_queue(nic, qs, qidx); 611 return; 612 } 613 614 /* Reset send queue */ 615 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 616 617 sq->cq_qs = qs->vnic_id; 618 sq->cq_idx = qidx; 619 620 /* Send a mailbox msg to PF to config SQ */ 621 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 622 mbx.sq.qs_num = qs->vnic_id; 623 mbx.sq.sq_num = qidx; 624 mbx.sq.sqs_mode = nic->sqs_mode; 625 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 626 nicvf_send_msg_to_pf(nic, &mbx); 627 628 /* Set queue base address */ 629 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 630 qidx, (u64)(sq->dmem.phys_base)); 631 632 /* Enable send queue & set queue size */ 633 sq_cfg.ena = 1; 634 sq_cfg.reset = 0; 635 sq_cfg.ldwb = 0; 636 sq_cfg.qsize = SND_QSIZE; 637 sq_cfg.tstmp_bgx_intf = 0; 638 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); 639 640 /* Set threshold value for interrupt generation */ 641 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 642 643 /* Set queue:cpu affinity for better load distribution */ 644 if (cpu_online(qidx)) { 645 cpumask_set_cpu(qidx, &sq->affinity_mask); 646 netif_set_xps_queue(nic->netdev, 647 &sq->affinity_mask, qidx); 648 } 649 } 650 651 /* Configures receive buffer descriptor ring */ 652 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 653 int qidx, bool enable) 654 { 655 struct rbdr *rbdr; 656 struct rbdr_cfg rbdr_cfg; 657 658 rbdr = &qs->rbdr[qidx]; 659 nicvf_reclaim_rbdr(nic, rbdr, qidx); 660 if (!enable) 661 return; 662 663 /* Set descriptor base address */ 664 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 665 qidx, (u64)(rbdr->dmem.phys_base)); 666 667 /* Enable RBDR & set queue size */ 668 /* Buffer size should be in multiples of 128 bytes */ 669 rbdr_cfg.ena = 1; 670 rbdr_cfg.reset = 0; 671 rbdr_cfg.ldwb = 0; 672 rbdr_cfg.qsize = RBDR_SIZE; 673 rbdr_cfg.avg_con = 0; 674 rbdr_cfg.lines = rbdr->dma_size / 128; 675 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 676 qidx, *(u64 *)&rbdr_cfg); 677 678 /* Notify HW */ 679 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 680 qidx, qs->rbdr_len - 1); 681 682 /* Set threshold value for interrupt generation */ 683 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 684 qidx, rbdr->thresh - 1); 685 } 686 687 /* Requests PF to assign and enable Qset */ 688 void nicvf_qset_config(struct nicvf *nic, bool enable) 689 { 690 union nic_mbx mbx = {}; 691 struct queue_set *qs = nic->qs; 692 struct qs_cfg *qs_cfg; 693 694 if (!qs) { 695 netdev_warn(nic->netdev, 696 "Qset is still not allocated, don't init queues\n"); 697 return; 698 } 699 700 qs->enable = enable; 701 qs->vnic_id = nic->vf_id; 702 703 /* Send a mailbox msg to PF to config Qset */ 704 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 705 mbx.qs.num = qs->vnic_id; 706 mbx.qs.sqs_count = nic->sqs_count; 707 708 mbx.qs.cfg = 0; 709 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 710 if (qs->enable) { 711 qs_cfg->ena = 1; 712 #ifdef __BIG_ENDIAN 713 qs_cfg->be = 1; 714 #endif 715 qs_cfg->vnic = qs->vnic_id; 716 } 717 nicvf_send_msg_to_pf(nic, &mbx); 718 } 719 720 static void nicvf_free_resources(struct nicvf *nic) 721 { 722 int qidx; 723 struct queue_set *qs = nic->qs; 724 725 /* Free receive buffer descriptor ring */ 726 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 727 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 728 729 /* Free completion queue */ 730 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 731 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 732 733 /* Free send queue */ 734 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 735 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 736 } 737 738 static int nicvf_alloc_resources(struct nicvf *nic) 739 { 740 int qidx; 741 struct queue_set *qs = nic->qs; 742 743 /* Alloc receive buffer descriptor ring */ 744 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 745 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 746 DMA_BUFFER_LEN)) 747 goto alloc_fail; 748 } 749 750 /* Alloc send queue */ 751 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 752 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) 753 goto alloc_fail; 754 } 755 756 /* Alloc completion queue */ 757 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 758 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) 759 goto alloc_fail; 760 } 761 762 return 0; 763 alloc_fail: 764 nicvf_free_resources(nic); 765 return -ENOMEM; 766 } 767 768 int nicvf_set_qset_resources(struct nicvf *nic) 769 { 770 struct queue_set *qs; 771 772 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 773 if (!qs) 774 return -ENOMEM; 775 nic->qs = qs; 776 777 /* Set count of each queue */ 778 qs->rbdr_cnt = RBDR_CNT; 779 qs->rq_cnt = RCV_QUEUE_CNT; 780 qs->sq_cnt = SND_QUEUE_CNT; 781 qs->cq_cnt = CMP_QUEUE_CNT; 782 783 /* Set queue lengths */ 784 qs->rbdr_len = RCV_BUF_COUNT; 785 qs->sq_len = SND_QUEUE_LEN; 786 qs->cq_len = CMP_QUEUE_LEN; 787 788 nic->rx_queues = qs->rq_cnt; 789 nic->tx_queues = qs->sq_cnt; 790 791 return 0; 792 } 793 794 int nicvf_config_data_transfer(struct nicvf *nic, bool enable) 795 { 796 bool disable = false; 797 struct queue_set *qs = nic->qs; 798 int qidx; 799 800 if (!qs) 801 return 0; 802 803 if (enable) { 804 if (nicvf_alloc_resources(nic)) 805 return -ENOMEM; 806 807 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 808 nicvf_snd_queue_config(nic, qs, qidx, enable); 809 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 810 nicvf_cmp_queue_config(nic, qs, qidx, enable); 811 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 812 nicvf_rbdr_config(nic, qs, qidx, enable); 813 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 814 nicvf_rcv_queue_config(nic, qs, qidx, enable); 815 } else { 816 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 817 nicvf_rcv_queue_config(nic, qs, qidx, disable); 818 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 819 nicvf_rbdr_config(nic, qs, qidx, disable); 820 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 821 nicvf_snd_queue_config(nic, qs, qidx, disable); 822 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 823 nicvf_cmp_queue_config(nic, qs, qidx, disable); 824 825 nicvf_free_resources(nic); 826 } 827 828 return 0; 829 } 830 831 /* Get a free desc from SQ 832 * returns descriptor ponter & descriptor number 833 */ 834 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 835 { 836 int qentry; 837 838 qentry = sq->tail; 839 atomic_sub(desc_cnt, &sq->free_cnt); 840 sq->tail += desc_cnt; 841 sq->tail &= (sq->dmem.q_len - 1); 842 843 return qentry; 844 } 845 846 /* Free descriptor back to SQ for future use */ 847 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 848 { 849 atomic_add(desc_cnt, &sq->free_cnt); 850 sq->head += desc_cnt; 851 sq->head &= (sq->dmem.q_len - 1); 852 } 853 854 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 855 { 856 qentry++; 857 qentry &= (sq->dmem.q_len - 1); 858 return qentry; 859 } 860 861 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 862 { 863 u64 sq_cfg; 864 865 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 866 sq_cfg |= NICVF_SQ_EN; 867 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 868 /* Ring doorbell so that H/W restarts processing SQEs */ 869 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 870 } 871 872 void nicvf_sq_disable(struct nicvf *nic, int qidx) 873 { 874 u64 sq_cfg; 875 876 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 877 sq_cfg &= ~NICVF_SQ_EN; 878 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 879 } 880 881 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 882 int qidx) 883 { 884 u64 head, tail; 885 struct sk_buff *skb; 886 struct nicvf *nic = netdev_priv(netdev); 887 struct sq_hdr_subdesc *hdr; 888 889 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 890 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 891 while (sq->head != head) { 892 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 893 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 894 nicvf_put_sq_desc(sq, 1); 895 continue; 896 } 897 skb = (struct sk_buff *)sq->skbuff[sq->head]; 898 if (skb) 899 dev_kfree_skb_any(skb); 900 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 901 atomic64_add(hdr->tot_len, 902 (atomic64_t *)&netdev->stats.tx_bytes); 903 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 904 } 905 } 906 907 /* Calculate no of SQ subdescriptors needed to transmit all 908 * segments of this TSO packet. 909 * Taken from 'Tilera network driver' with a minor modification. 910 */ 911 static int nicvf_tso_count_subdescs(struct sk_buff *skb) 912 { 913 struct skb_shared_info *sh = skb_shinfo(skb); 914 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 915 unsigned int data_len = skb->len - sh_len; 916 unsigned int p_len = sh->gso_size; 917 long f_id = -1; /* id of the current fragment */ 918 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 919 long f_used = 0; /* bytes used from the current fragment */ 920 long n; /* size of the current piece of payload */ 921 int num_edescs = 0; 922 int segment; 923 924 for (segment = 0; segment < sh->gso_segs; segment++) { 925 unsigned int p_used = 0; 926 927 /* One edesc for header and for each piece of the payload. */ 928 for (num_edescs++; p_used < p_len; num_edescs++) { 929 /* Advance as needed. */ 930 while (f_used >= f_size) { 931 f_id++; 932 f_size = skb_frag_size(&sh->frags[f_id]); 933 f_used = 0; 934 } 935 936 /* Use bytes from the current fragment. */ 937 n = p_len - p_used; 938 if (n > f_size - f_used) 939 n = f_size - f_used; 940 f_used += n; 941 p_used += n; 942 } 943 944 /* The last segment may be less than gso_size. */ 945 data_len -= p_len; 946 if (data_len < p_len) 947 p_len = data_len; 948 } 949 950 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ 951 return num_edescs + sh->gso_segs; 952 } 953 954 /* Get the number of SQ descriptors needed to xmit this skb */ 955 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 956 { 957 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 958 959 if (skb_shinfo(skb)->gso_size) { 960 subdesc_cnt = nicvf_tso_count_subdescs(skb); 961 return subdesc_cnt; 962 } 963 964 if (skb_shinfo(skb)->nr_frags) 965 subdesc_cnt += skb_shinfo(skb)->nr_frags; 966 967 return subdesc_cnt; 968 } 969 970 /* Add SQ HEADER subdescriptor. 971 * First subdescriptor for every send descriptor. 972 */ 973 static inline void 974 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 975 int subdesc_cnt, struct sk_buff *skb, int len) 976 { 977 int proto; 978 struct sq_hdr_subdesc *hdr; 979 980 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 981 sq->skbuff[qentry] = (u64)skb; 982 983 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 984 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 985 /* Enable notification via CQE after processing SQE */ 986 hdr->post_cqe = 1; 987 /* No of subdescriptors following this */ 988 hdr->subdesc_cnt = subdesc_cnt; 989 hdr->tot_len = len; 990 991 /* Offload checksum calculation to HW */ 992 if (skb->ip_summed == CHECKSUM_PARTIAL) { 993 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 994 hdr->l3_offset = skb_network_offset(skb); 995 hdr->l4_offset = skb_transport_offset(skb); 996 997 proto = ip_hdr(skb)->protocol; 998 switch (proto) { 999 case IPPROTO_TCP: 1000 hdr->csum_l4 = SEND_L4_CSUM_TCP; 1001 break; 1002 case IPPROTO_UDP: 1003 hdr->csum_l4 = SEND_L4_CSUM_UDP; 1004 break; 1005 case IPPROTO_SCTP: 1006 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1007 break; 1008 } 1009 } 1010 } 1011 1012 /* SQ GATHER subdescriptor 1013 * Must follow HDR descriptor 1014 */ 1015 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1016 int size, u64 data) 1017 { 1018 struct sq_gather_subdesc *gather; 1019 1020 qentry &= (sq->dmem.q_len - 1); 1021 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1022 1023 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1024 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1025 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1026 gather->size = size; 1027 gather->addr = data; 1028 } 1029 1030 /* Segment a TSO packet into 'gso_size' segments and append 1031 * them to SQ for transfer 1032 */ 1033 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, 1034 int sq_num, int qentry, struct sk_buff *skb) 1035 { 1036 struct tso_t tso; 1037 int seg_subdescs = 0, desc_cnt = 0; 1038 int seg_len, total_len, data_left; 1039 int hdr_qentry = qentry; 1040 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1041 1042 tso_start(skb, &tso); 1043 total_len = skb->len - hdr_len; 1044 while (total_len > 0) { 1045 char *hdr; 1046 1047 /* Save Qentry for adding HDR_SUBDESC at the end */ 1048 hdr_qentry = qentry; 1049 1050 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1051 total_len -= data_left; 1052 1053 /* Add segment's header */ 1054 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1055 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; 1056 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1057 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, 1058 sq->tso_hdrs_phys + 1059 qentry * TSO_HEADER_SIZE); 1060 /* HDR_SUDESC + GATHER */ 1061 seg_subdescs = 2; 1062 seg_len = hdr_len; 1063 1064 /* Add segment's payload fragments */ 1065 while (data_left > 0) { 1066 int size; 1067 1068 size = min_t(int, tso.size, data_left); 1069 1070 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1071 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1072 virt_to_phys(tso.data)); 1073 seg_subdescs++; 1074 seg_len += size; 1075 1076 data_left -= size; 1077 tso_build_data(skb, &tso, size); 1078 } 1079 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1080 seg_subdescs - 1, skb, seg_len); 1081 sq->skbuff[hdr_qentry] = (u64)NULL; 1082 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1083 1084 desc_cnt += seg_subdescs; 1085 } 1086 /* Save SKB in the last segment for freeing */ 1087 sq->skbuff[hdr_qentry] = (u64)skb; 1088 1089 /* make sure all memory stores are done before ringing doorbell */ 1090 smp_wmb(); 1091 1092 /* Inform HW to xmit all TSO segments */ 1093 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1094 sq_num, desc_cnt); 1095 nic->drv_stats.tx_tso++; 1096 return 1; 1097 } 1098 1099 /* Append an skb to a SQ for packet transfer. */ 1100 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) 1101 { 1102 int i, size; 1103 int subdesc_cnt; 1104 int sq_num, qentry; 1105 struct queue_set *qs; 1106 struct snd_queue *sq; 1107 1108 sq_num = skb_get_queue_mapping(skb); 1109 if (sq_num >= MAX_SND_QUEUES_PER_QS) { 1110 /* Get secondary Qset's SQ structure */ 1111 i = sq_num / MAX_SND_QUEUES_PER_QS; 1112 if (!nic->snicvf[i - 1]) { 1113 netdev_warn(nic->netdev, 1114 "Secondary Qset#%d's ptr not initialized\n", 1115 i - 1); 1116 return 1; 1117 } 1118 nic = (struct nicvf *)nic->snicvf[i - 1]; 1119 sq_num = sq_num % MAX_SND_QUEUES_PER_QS; 1120 } 1121 1122 qs = nic->qs; 1123 sq = &qs->sq[sq_num]; 1124 1125 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1126 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1127 goto append_fail; 1128 1129 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1130 1131 /* Check if its a TSO packet */ 1132 if (skb_shinfo(skb)->gso_size) 1133 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); 1134 1135 /* Add SQ header subdesc */ 1136 nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); 1137 1138 /* Add SQ gather subdescs */ 1139 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1140 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1141 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1142 1143 /* Check for scattered buffer */ 1144 if (!skb_is_nonlinear(skb)) 1145 goto doorbell; 1146 1147 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1148 const struct skb_frag_struct *frag; 1149 1150 frag = &skb_shinfo(skb)->frags[i]; 1151 1152 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1153 size = skb_frag_size(frag); 1154 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1155 virt_to_phys( 1156 skb_frag_address(frag))); 1157 } 1158 1159 doorbell: 1160 /* make sure all memory stores are done before ringing doorbell */ 1161 smp_wmb(); 1162 1163 /* Inform HW to xmit new packet */ 1164 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1165 sq_num, subdesc_cnt); 1166 return 1; 1167 1168 append_fail: 1169 /* Use original PCI dev for debug log */ 1170 nic = nic->pnicvf; 1171 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1172 return 0; 1173 } 1174 1175 static inline unsigned frag_num(unsigned i) 1176 { 1177 #ifdef __BIG_ENDIAN 1178 return (i & ~3) + 3 - (i & 3); 1179 #else 1180 return i; 1181 #endif 1182 } 1183 1184 /* Returns SKB for a received packet */ 1185 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1186 { 1187 int frag; 1188 int payload_len = 0; 1189 struct sk_buff *skb = NULL; 1190 struct sk_buff *skb_frag = NULL; 1191 struct sk_buff *prev_frag = NULL; 1192 u16 *rb_lens = NULL; 1193 u64 *rb_ptrs = NULL; 1194 1195 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1196 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); 1197 1198 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", 1199 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1200 1201 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1202 payload_len = rb_lens[frag_num(frag)]; 1203 if (!frag) { 1204 /* First fragment */ 1205 skb = nicvf_rb_ptr_to_skb(nic, 1206 *rb_ptrs - cqe_rx->align_pad, 1207 payload_len); 1208 if (!skb) 1209 return NULL; 1210 skb_reserve(skb, cqe_rx->align_pad); 1211 skb_put(skb, payload_len); 1212 } else { 1213 /* Add fragments */ 1214 skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, 1215 payload_len); 1216 if (!skb_frag) { 1217 dev_kfree_skb(skb); 1218 return NULL; 1219 } 1220 1221 if (!skb_shinfo(skb)->frag_list) 1222 skb_shinfo(skb)->frag_list = skb_frag; 1223 else 1224 prev_frag->next = skb_frag; 1225 1226 prev_frag = skb_frag; 1227 skb->len += payload_len; 1228 skb->data_len += payload_len; 1229 skb_frag->len = payload_len; 1230 } 1231 /* Next buffer pointer */ 1232 rb_ptrs++; 1233 } 1234 return skb; 1235 } 1236 1237 /* Enable interrupt */ 1238 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1239 { 1240 u64 reg_val; 1241 1242 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1243 1244 switch (int_type) { 1245 case NICVF_INTR_CQ: 1246 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1247 break; 1248 case NICVF_INTR_SQ: 1249 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1250 break; 1251 case NICVF_INTR_RBDR: 1252 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1253 break; 1254 case NICVF_INTR_PKT_DROP: 1255 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1256 break; 1257 case NICVF_INTR_TCP_TIMER: 1258 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1259 break; 1260 case NICVF_INTR_MBOX: 1261 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); 1262 break; 1263 case NICVF_INTR_QS_ERR: 1264 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1265 break; 1266 default: 1267 netdev_err(nic->netdev, 1268 "Failed to enable interrupt: unknown type\n"); 1269 break; 1270 } 1271 1272 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 1273 } 1274 1275 /* Disable interrupt */ 1276 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1277 { 1278 u64 reg_val = 0; 1279 1280 switch (int_type) { 1281 case NICVF_INTR_CQ: 1282 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1283 break; 1284 case NICVF_INTR_SQ: 1285 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1286 break; 1287 case NICVF_INTR_RBDR: 1288 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1289 break; 1290 case NICVF_INTR_PKT_DROP: 1291 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1292 break; 1293 case NICVF_INTR_TCP_TIMER: 1294 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1295 break; 1296 case NICVF_INTR_MBOX: 1297 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); 1298 break; 1299 case NICVF_INTR_QS_ERR: 1300 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1301 break; 1302 default: 1303 netdev_err(nic->netdev, 1304 "Failed to disable interrupt: unknown type\n"); 1305 break; 1306 } 1307 1308 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 1309 } 1310 1311 /* Clear interrupt */ 1312 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 1313 { 1314 u64 reg_val = 0; 1315 1316 switch (int_type) { 1317 case NICVF_INTR_CQ: 1318 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1319 break; 1320 case NICVF_INTR_SQ: 1321 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1322 break; 1323 case NICVF_INTR_RBDR: 1324 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1325 break; 1326 case NICVF_INTR_PKT_DROP: 1327 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1328 break; 1329 case NICVF_INTR_TCP_TIMER: 1330 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1331 break; 1332 case NICVF_INTR_MBOX: 1333 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); 1334 break; 1335 case NICVF_INTR_QS_ERR: 1336 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1337 break; 1338 default: 1339 netdev_err(nic->netdev, 1340 "Failed to clear interrupt: unknown type\n"); 1341 break; 1342 } 1343 1344 nicvf_reg_write(nic, NIC_VF_INT, reg_val); 1345 } 1346 1347 /* Check if interrupt is enabled */ 1348 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 1349 { 1350 u64 reg_val; 1351 u64 mask = 0xff; 1352 1353 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1354 1355 switch (int_type) { 1356 case NICVF_INTR_CQ: 1357 mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1358 break; 1359 case NICVF_INTR_SQ: 1360 mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1361 break; 1362 case NICVF_INTR_RBDR: 1363 mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1364 break; 1365 case NICVF_INTR_PKT_DROP: 1366 mask = NICVF_INTR_PKT_DROP_MASK; 1367 break; 1368 case NICVF_INTR_TCP_TIMER: 1369 mask = NICVF_INTR_TCP_TIMER_MASK; 1370 break; 1371 case NICVF_INTR_MBOX: 1372 mask = NICVF_INTR_MBOX_MASK; 1373 break; 1374 case NICVF_INTR_QS_ERR: 1375 mask = NICVF_INTR_QS_ERR_MASK; 1376 break; 1377 default: 1378 netdev_err(nic->netdev, 1379 "Failed to check interrupt enable: unknown type\n"); 1380 break; 1381 } 1382 1383 return (reg_val & mask); 1384 } 1385 1386 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 1387 { 1388 struct rcv_queue *rq; 1389 1390 #define GET_RQ_STATS(reg) \ 1391 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1392 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1393 1394 rq = &nic->qs->rq[rq_idx]; 1395 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1396 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1397 } 1398 1399 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 1400 { 1401 struct snd_queue *sq; 1402 1403 #define GET_SQ_STATS(reg) \ 1404 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1405 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1406 1407 sq = &nic->qs->sq[sq_idx]; 1408 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1409 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1410 } 1411 1412 /* Check for errors in the receive cmp.queue entry */ 1413 int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1414 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) 1415 { 1416 struct nicvf_hw_stats *stats = &nic->hw_stats; 1417 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1418 1419 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1420 drv_stats->rx_frames_ok++; 1421 return 0; 1422 } 1423 1424 if (netif_msg_rx_err(nic)) 1425 netdev_err(nic->netdev, 1426 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", 1427 nic->netdev->name, 1428 cqe_rx->err_level, cqe_rx->err_opcode); 1429 1430 switch (cqe_rx->err_opcode) { 1431 case CQ_RX_ERROP_RE_PARTIAL: 1432 stats->rx_bgx_truncated_pkts++; 1433 break; 1434 case CQ_RX_ERROP_RE_JABBER: 1435 stats->rx_jabber_errs++; 1436 break; 1437 case CQ_RX_ERROP_RE_FCS: 1438 stats->rx_fcs_errs++; 1439 break; 1440 case CQ_RX_ERROP_RE_RX_CTL: 1441 stats->rx_bgx_errs++; 1442 break; 1443 case CQ_RX_ERROP_PREL2_ERR: 1444 stats->rx_prel2_errs++; 1445 break; 1446 case CQ_RX_ERROP_L2_MAL: 1447 stats->rx_l2_hdr_malformed++; 1448 break; 1449 case CQ_RX_ERROP_L2_OVERSIZE: 1450 stats->rx_oversize++; 1451 break; 1452 case CQ_RX_ERROP_L2_UNDERSIZE: 1453 stats->rx_undersize++; 1454 break; 1455 case CQ_RX_ERROP_L2_LENMISM: 1456 stats->rx_l2_len_mismatch++; 1457 break; 1458 case CQ_RX_ERROP_L2_PCLP: 1459 stats->rx_l2_pclp++; 1460 break; 1461 case CQ_RX_ERROP_IP_NOT: 1462 stats->rx_ip_ver_errs++; 1463 break; 1464 case CQ_RX_ERROP_IP_CSUM_ERR: 1465 stats->rx_ip_csum_errs++; 1466 break; 1467 case CQ_RX_ERROP_IP_MAL: 1468 stats->rx_ip_hdr_malformed++; 1469 break; 1470 case CQ_RX_ERROP_IP_MALD: 1471 stats->rx_ip_payload_malformed++; 1472 break; 1473 case CQ_RX_ERROP_IP_HOP: 1474 stats->rx_ip_ttl_errs++; 1475 break; 1476 case CQ_RX_ERROP_L3_PCLP: 1477 stats->rx_l3_pclp++; 1478 break; 1479 case CQ_RX_ERROP_L4_MAL: 1480 stats->rx_l4_malformed++; 1481 break; 1482 case CQ_RX_ERROP_L4_CHK: 1483 stats->rx_l4_csum_errs++; 1484 break; 1485 case CQ_RX_ERROP_UDP_LEN: 1486 stats->rx_udp_len_errs++; 1487 break; 1488 case CQ_RX_ERROP_L4_PORT: 1489 stats->rx_l4_port_errs++; 1490 break; 1491 case CQ_RX_ERROP_TCP_FLAG: 1492 stats->rx_tcp_flag_errs++; 1493 break; 1494 case CQ_RX_ERROP_TCP_OFFSET: 1495 stats->rx_tcp_offset_errs++; 1496 break; 1497 case CQ_RX_ERROP_L4_PCLP: 1498 stats->rx_l4_pclp++; 1499 break; 1500 case CQ_RX_ERROP_RBDR_TRUNC: 1501 stats->rx_truncated_pkts++; 1502 break; 1503 } 1504 1505 return 1; 1506 } 1507 1508 /* Check for errors in the send cmp.queue entry */ 1509 int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1510 struct cmp_queue *cq, struct cqe_send_t *cqe_tx) 1511 { 1512 struct cmp_queue_stats *stats = &cq->stats; 1513 1514 switch (cqe_tx->send_status) { 1515 case CQ_TX_ERROP_GOOD: 1516 stats->tx.good++; 1517 return 0; 1518 case CQ_TX_ERROP_DESC_FAULT: 1519 stats->tx.desc_fault++; 1520 break; 1521 case CQ_TX_ERROP_HDR_CONS_ERR: 1522 stats->tx.hdr_cons_err++; 1523 break; 1524 case CQ_TX_ERROP_SUBDC_ERR: 1525 stats->tx.subdesc_err++; 1526 break; 1527 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1528 stats->tx.imm_size_oflow++; 1529 break; 1530 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1531 stats->tx.data_seq_err++; 1532 break; 1533 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1534 stats->tx.mem_seq_err++; 1535 break; 1536 case CQ_TX_ERROP_LOCK_VIOL: 1537 stats->tx.lock_viol++; 1538 break; 1539 case CQ_TX_ERROP_DATA_FAULT: 1540 stats->tx.data_fault++; 1541 break; 1542 case CQ_TX_ERROP_TSTMP_CONFLICT: 1543 stats->tx.tstmp_conflict++; 1544 break; 1545 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1546 stats->tx.tstmp_timeout++; 1547 break; 1548 case CQ_TX_ERROP_MEM_FAULT: 1549 stats->tx.mem_fault++; 1550 break; 1551 case CQ_TX_ERROP_CK_OVERLAP: 1552 stats->tx.csum_overlap++; 1553 break; 1554 case CQ_TX_ERROP_CK_OFLOW: 1555 stats->tx.csum_overflow++; 1556 break; 1557 } 1558 1559 return 1; 1560 } 1561