1 /*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: pv-drivers@vmware.com
24 *
25 */
26
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
29
30 #ifdef CONFIG_X86
31 #include <asm/msr.h>
32 #endif
33
34 #include "vmxnet3_int.h"
35 #include "vmxnet3_xdp.h"
36
37 char vmxnet3_driver_name[] = "vmxnet3";
38 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
39
40 /*
41 * PCI Device ID Table
42 * Last entry must be all 0s
43 */
44 static const struct pci_device_id vmxnet3_pciid_table[] = {
45 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
46 {0}
47 };
48
49 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
50
51 static int enable_mq = 1;
52
53 static void
54 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
55
56 /*
57 * Enable/Disable the given intr
58 */
59 static void
vmxnet3_enable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)60 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
61 {
62 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
63 }
64
65
66 static void
vmxnet3_disable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)67 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
68 {
69 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
70 }
71
72
73 /*
74 * Enable/Disable all intrs used by the device
75 */
76 static void
vmxnet3_enable_all_intrs(struct vmxnet3_adapter * adapter)77 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
78 {
79 int i;
80
81 for (i = 0; i < adapter->intr.num_intrs; i++)
82 vmxnet3_enable_intr(adapter, i);
83 if (!VMXNET3_VERSION_GE_6(adapter) ||
84 !adapter->queuesExtEnabled) {
85 adapter->shared->devRead.intrConf.intrCtrl &=
86 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
87 } else {
88 adapter->shared->devReadExt.intrConfExt.intrCtrl &=
89 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
90 }
91 }
92
93
94 static void
vmxnet3_disable_all_intrs(struct vmxnet3_adapter * adapter)95 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
96 {
97 int i;
98
99 if (!VMXNET3_VERSION_GE_6(adapter) ||
100 !adapter->queuesExtEnabled) {
101 adapter->shared->devRead.intrConf.intrCtrl |=
102 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
103 } else {
104 adapter->shared->devReadExt.intrConfExt.intrCtrl |=
105 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
106 }
107 for (i = 0; i < adapter->intr.num_intrs; i++)
108 vmxnet3_disable_intr(adapter, i);
109 }
110
111
112 static void
vmxnet3_ack_events(struct vmxnet3_adapter * adapter,u32 events)113 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
114 {
115 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
116 }
117
118
119 static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)120 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
121 {
122 return tq->stopped;
123 }
124
125
126 static void
vmxnet3_tq_start(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)127 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
128 {
129 tq->stopped = false;
130 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
131 }
132
133
134 static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)135 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
136 {
137 tq->stopped = false;
138 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
139 }
140
141
142 static void
vmxnet3_tq_stop(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)143 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
144 {
145 tq->stopped = true;
146 tq->num_stop++;
147 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
148 }
149
150 static u64
vmxnet3_get_cycles(int pmc)151 vmxnet3_get_cycles(int pmc)
152 {
153 #ifdef CONFIG_X86
154 return native_read_pmc(pmc);
155 #else
156 return 0;
157 #endif
158 }
159
160 static bool
vmxnet3_apply_timestamp(struct vmxnet3_tx_queue * tq,u16 rate)161 vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate)
162 {
163 #ifdef CONFIG_X86
164 if (rate > 0) {
165 if (tq->tsPktCount == 1) {
166 if (rate != 1)
167 tq->tsPktCount = rate;
168 return true;
169 }
170 tq->tsPktCount--;
171 }
172 #endif
173 return false;
174 }
175
176 /* Check if capability is supported by UPT device or
177 * UPT is even requested
178 */
179 bool
vmxnet3_check_ptcapability(u32 cap_supported,u32 cap)180 vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
181 {
182 if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
183 cap_supported & (1UL << cap)) {
184 return true;
185 }
186
187 return false;
188 }
189
190
191 /*
192 * Check the link state. This may start or stop the tx queue.
193 */
194 static void
vmxnet3_check_link(struct vmxnet3_adapter * adapter,bool affectTxQueue)195 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
196 {
197 u32 ret;
198 int i;
199 unsigned long flags;
200
201 spin_lock_irqsave(&adapter->cmd_lock, flags);
202 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
203 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
204 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
205
206 adapter->link_speed = ret >> 16;
207 if (ret & 1) { /* Link is up. */
208 /*
209 * From vmxnet3 v9, the hypervisor reports the speed in Gbps.
210 * Convert the speed to Mbps before rporting it to the kernel.
211 * Max link speed supported is 10000G.
212 */
213 if (VMXNET3_VERSION_GE_9(adapter) &&
214 adapter->link_speed < 10000)
215 adapter->link_speed = adapter->link_speed * 1000;
216 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
217 adapter->link_speed);
218 netif_carrier_on(adapter->netdev);
219
220 if (affectTxQueue) {
221 for (i = 0; i < adapter->num_tx_queues; i++)
222 vmxnet3_tq_start(&adapter->tx_queue[i],
223 adapter);
224 }
225 } else {
226 netdev_info(adapter->netdev, "NIC Link is Down\n");
227 netif_carrier_off(adapter->netdev);
228
229 if (affectTxQueue) {
230 for (i = 0; i < adapter->num_tx_queues; i++)
231 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
232 }
233 }
234 }
235
236 static void
vmxnet3_process_events(struct vmxnet3_adapter * adapter)237 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
238 {
239 int i;
240 unsigned long flags;
241 u32 events = le32_to_cpu(adapter->shared->ecr);
242 if (!events)
243 return;
244
245 vmxnet3_ack_events(adapter, events);
246
247 /* Check if link state has changed */
248 if (events & VMXNET3_ECR_LINK)
249 vmxnet3_check_link(adapter, true);
250
251 /* Check if there is an error on xmit/recv queues */
252 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
253 spin_lock_irqsave(&adapter->cmd_lock, flags);
254 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
255 VMXNET3_CMD_GET_QUEUE_STATUS);
256 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
257
258 for (i = 0; i < adapter->num_tx_queues; i++)
259 if (adapter->tqd_start[i].status.stopped)
260 dev_err(&adapter->netdev->dev,
261 "%s: tq[%d] error 0x%x\n",
262 adapter->netdev->name, i, le32_to_cpu(
263 adapter->tqd_start[i].status.error));
264 for (i = 0; i < adapter->num_rx_queues; i++)
265 if (adapter->rqd_start[i].status.stopped)
266 dev_err(&adapter->netdev->dev,
267 "%s: rq[%d] error 0x%x\n",
268 adapter->netdev->name, i,
269 adapter->rqd_start[i].status.error);
270
271 schedule_work(&adapter->work);
272 }
273 }
274
275 #ifdef __BIG_ENDIAN_BITFIELD
276 /*
277 * The device expects the bitfields in shared structures to be written in
278 * little endian. When CPU is big endian, the following routines are used to
279 * correctly read and write into ABI.
280 * The general technique used here is : double word bitfields are defined in
281 * opposite order for big endian architecture. Then before reading them in
282 * driver the complete double word is translated using le32_to_cpu. Similarly
283 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
284 * double words into required format.
285 * In order to avoid touching bits in shared structure more than once, temporary
286 * descriptors are used. These are passed as srcDesc to following functions.
287 */
vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc * srcDesc,struct Vmxnet3_RxDesc * dstDesc)288 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
289 struct Vmxnet3_RxDesc *dstDesc)
290 {
291 u32 *src = (u32 *)srcDesc + 2;
292 u32 *dst = (u32 *)dstDesc + 2;
293 dstDesc->addr = le64_to_cpu(srcDesc->addr);
294 *dst = le32_to_cpu(*src);
295 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
296 }
297
vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc * srcDesc,struct Vmxnet3_TxDesc * dstDesc)298 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
299 struct Vmxnet3_TxDesc *dstDesc)
300 {
301 int i;
302 u32 *src = (u32 *)(srcDesc + 1);
303 u32 *dst = (u32 *)(dstDesc + 1);
304
305 /* Working backwards so that the gen bit is set at the end. */
306 for (i = 2; i > 0; i--) {
307 src--;
308 dst--;
309 *dst = cpu_to_le32(*src);
310 }
311 }
312
313
vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc * srcDesc,struct Vmxnet3_RxCompDesc * dstDesc)314 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
315 struct Vmxnet3_RxCompDesc *dstDesc)
316 {
317 int i = 0;
318 u32 *src = (u32 *)srcDesc;
319 u32 *dst = (u32 *)dstDesc;
320 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
321 *dst = le32_to_cpu(*src);
322 src++;
323 dst++;
324 }
325 }
326
327
328 /* Used to read bitfield values from double words. */
get_bitfield32(const __le32 * bitfield,u32 pos,u32 size)329 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
330 {
331 u32 temp = le32_to_cpu(*bitfield);
332 u32 mask = ((1 << size) - 1) << pos;
333 temp &= mask;
334 temp >>= pos;
335 return temp;
336 }
337
338
339
340 #endif /* __BIG_ENDIAN_BITFIELD */
341
342 #ifdef __BIG_ENDIAN_BITFIELD
343
344 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
345 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
346 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
347 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
348 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
349 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
350 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
351 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
352 VMXNET3_TCD_GEN_SIZE)
353 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
354 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
355 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
356 (dstrcd) = (tmp); \
357 vmxnet3_RxCompToCPU((rcd), (tmp)); \
358 } while (0)
359 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
360 (dstrxd) = (tmp); \
361 vmxnet3_RxDescToCPU((rxd), (tmp)); \
362 } while (0)
363
364 #else
365
366 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
367 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
368 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
369 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
370 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
371 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
372
373 #endif /* __BIG_ENDIAN_BITFIELD */
374
375
376 static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info * tbi,struct pci_dev * pdev)377 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
378 struct pci_dev *pdev)
379 {
380 u32 map_type = tbi->map_type;
381
382 if (map_type & VMXNET3_MAP_SINGLE)
383 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
384 DMA_TO_DEVICE);
385 else if (map_type & VMXNET3_MAP_PAGE)
386 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
387 DMA_TO_DEVICE);
388 else
389 BUG_ON(map_type & ~VMXNET3_MAP_XDP);
390
391 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
392 }
393
394
395 static int
vmxnet3_unmap_pkt(u32 eop_idx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter,struct xdp_frame_bulk * bq)396 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
397 struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
398 struct xdp_frame_bulk *bq)
399 {
400 struct vmxnet3_tx_buf_info *tbi;
401 int entries = 0;
402 u32 map_type;
403
404 /* no out of order completion */
405 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
406 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
407
408 tbi = &tq->buf_info[eop_idx];
409 BUG_ON(!tbi->skb);
410 map_type = tbi->map_type;
411 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
412
413 while (tq->tx_ring.next2comp != eop_idx) {
414 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
415 pdev);
416
417 /* update next2comp w/o tx_lock. Since we are marking more,
418 * instead of less, tx ring entries avail, the worst case is
419 * that the tx routine incorrectly re-queues a pkt due to
420 * insufficient tx ring entries.
421 */
422 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
423 entries++;
424 }
425
426 if (map_type & VMXNET3_MAP_XDP)
427 xdp_return_frame_bulk(tbi->xdpf, bq);
428 else
429 dev_kfree_skb_any(tbi->skb);
430
431 /* xdpf and skb are in an anonymous union. */
432 tbi->skb = NULL;
433
434 return entries;
435 }
436
437
438 static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)439 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
440 struct vmxnet3_adapter *adapter)
441 {
442 union Vmxnet3_GenericDesc *gdesc;
443 struct xdp_frame_bulk bq;
444 int completed = 0;
445
446 xdp_frame_bulk_init(&bq);
447 rcu_read_lock();
448
449 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
450 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
451 /* Prevent any &gdesc->tcd field from being (speculatively)
452 * read before (&gdesc->tcd)->gen is read.
453 */
454 dma_rmb();
455
456 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
457 &gdesc->tcd), tq, adapter->pdev,
458 adapter, &bq);
459
460 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
461 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
462 }
463 xdp_flush_frame_bulk(&bq);
464 rcu_read_unlock();
465
466 if (completed) {
467 spin_lock(&tq->tx_lock);
468 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
469 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
470 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
471 netif_carrier_ok(adapter->netdev))) {
472 vmxnet3_tq_wake(tq, adapter);
473 }
474 spin_unlock(&tq->tx_lock);
475 }
476 return completed;
477 }
478
479
480 static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)481 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
482 struct vmxnet3_adapter *adapter)
483 {
484 struct xdp_frame_bulk bq;
485 u32 map_type;
486 int i;
487
488 xdp_frame_bulk_init(&bq);
489 rcu_read_lock();
490
491 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
492 struct vmxnet3_tx_buf_info *tbi;
493
494 tbi = tq->buf_info + tq->tx_ring.next2comp;
495 map_type = tbi->map_type;
496
497 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
498 if (tbi->skb) {
499 if (map_type & VMXNET3_MAP_XDP)
500 xdp_return_frame_bulk(tbi->xdpf, &bq);
501 else
502 dev_kfree_skb_any(tbi->skb);
503 tbi->skb = NULL;
504 }
505 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
506 }
507
508 xdp_flush_frame_bulk(&bq);
509 rcu_read_unlock();
510
511 /* sanity check, verify all buffers are indeed unmapped */
512 for (i = 0; i < tq->tx_ring.size; i++)
513 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
514
515 tq->tx_ring.gen = VMXNET3_INIT_GEN;
516 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
517
518 tq->comp_ring.gen = VMXNET3_INIT_GEN;
519 tq->comp_ring.next2proc = 0;
520 }
521
522
523 static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)524 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
525 struct vmxnet3_adapter *adapter)
526 {
527 if (tq->tx_ring.base) {
528 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
529 sizeof(struct Vmxnet3_TxDesc),
530 tq->tx_ring.base, tq->tx_ring.basePA);
531 tq->tx_ring.base = NULL;
532 }
533 if (tq->data_ring.base) {
534 dma_free_coherent(&adapter->pdev->dev,
535 tq->data_ring.size * tq->txdata_desc_size,
536 tq->data_ring.base, tq->data_ring.basePA);
537 tq->data_ring.base = NULL;
538 }
539 if (tq->ts_ring.base) {
540 dma_free_coherent(&adapter->pdev->dev,
541 tq->tx_ring.size * tq->tx_ts_desc_size,
542 tq->ts_ring.base, tq->ts_ring.basePA);
543 tq->ts_ring.base = NULL;
544 }
545 if (tq->comp_ring.base) {
546 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
547 sizeof(struct Vmxnet3_TxCompDesc),
548 tq->comp_ring.base, tq->comp_ring.basePA);
549 tq->comp_ring.base = NULL;
550 }
551 kfree(tq->buf_info);
552 tq->buf_info = NULL;
553 }
554
555
556 /* Destroy all tx queues */
557 void
vmxnet3_tq_destroy_all(struct vmxnet3_adapter * adapter)558 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
559 {
560 int i;
561
562 for (i = 0; i < adapter->num_tx_queues; i++)
563 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
564 }
565
566
567 static void
vmxnet3_tq_init(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)568 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
569 struct vmxnet3_adapter *adapter)
570 {
571 int i;
572
573 /* reset the tx ring contents to 0 and reset the tx ring states */
574 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
575 sizeof(struct Vmxnet3_TxDesc));
576 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
577 tq->tx_ring.gen = VMXNET3_INIT_GEN;
578
579 memset(tq->data_ring.base, 0,
580 tq->data_ring.size * tq->txdata_desc_size);
581
582 if (tq->ts_ring.base)
583 memset(tq->ts_ring.base, 0,
584 tq->tx_ring.size * tq->tx_ts_desc_size);
585
586 /* reset the tx comp ring contents to 0 and reset comp ring states */
587 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
588 sizeof(struct Vmxnet3_TxCompDesc));
589 tq->comp_ring.next2proc = 0;
590 tq->comp_ring.gen = VMXNET3_INIT_GEN;
591
592 /* reset the bookkeeping data */
593 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
594 for (i = 0; i < tq->tx_ring.size; i++)
595 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
596
597 /* stats are not reset */
598 }
599
600
601 static int
vmxnet3_tq_create(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)602 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
603 struct vmxnet3_adapter *adapter)
604 {
605 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
606 tq->comp_ring.base || tq->buf_info);
607
608 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
609 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
610 &tq->tx_ring.basePA, GFP_KERNEL);
611 if (!tq->tx_ring.base) {
612 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
613 goto err;
614 }
615
616 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
617 tq->data_ring.size * tq->txdata_desc_size,
618 &tq->data_ring.basePA, GFP_KERNEL);
619 if (!tq->data_ring.base) {
620 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
621 goto err;
622 }
623
624 if (tq->tx_ts_desc_size != 0) {
625 tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
626 tq->tx_ring.size * tq->tx_ts_desc_size,
627 &tq->ts_ring.basePA, GFP_KERNEL);
628 if (!tq->ts_ring.base) {
629 netdev_err(adapter->netdev, "failed to allocate tx ts ring\n");
630 tq->tx_ts_desc_size = 0;
631 }
632 } else {
633 tq->ts_ring.base = NULL;
634 }
635
636 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
637 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
638 &tq->comp_ring.basePA, GFP_KERNEL);
639 if (!tq->comp_ring.base) {
640 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
641 goto err;
642 }
643
644 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
645 GFP_KERNEL,
646 dev_to_node(&adapter->pdev->dev));
647 if (!tq->buf_info)
648 goto err;
649
650 return 0;
651
652 err:
653 vmxnet3_tq_destroy(tq, adapter);
654 return -ENOMEM;
655 }
656
657 static void
vmxnet3_tq_cleanup_all(struct vmxnet3_adapter * adapter)658 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
659 {
660 int i;
661
662 for (i = 0; i < adapter->num_tx_queues; i++)
663 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
664 }
665
666 /*
667 * starting from ring->next2fill, allocate rx buffers for the given ring
668 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
669 * are allocated or allocation fails
670 */
671
672 static int
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue * rq,u32 ring_idx,int num_to_alloc,struct vmxnet3_adapter * adapter)673 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
674 int num_to_alloc, struct vmxnet3_adapter *adapter)
675 {
676 int num_allocated = 0;
677 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
678 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
679 u32 val;
680
681 while (num_allocated <= num_to_alloc) {
682 struct vmxnet3_rx_buf_info *rbi;
683 union Vmxnet3_GenericDesc *gd;
684
685 rbi = rbi_base + ring->next2fill;
686 gd = ring->base + ring->next2fill;
687 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
688
689 if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
690 void *data = vmxnet3_pp_get_buff(rq->page_pool,
691 &rbi->dma_addr,
692 GFP_KERNEL);
693 if (!data) {
694 rq->stats.rx_buf_alloc_failure++;
695 break;
696 }
697 rbi->page = virt_to_page(data);
698 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
699 } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
700 if (rbi->skb == NULL) {
701 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
702 rbi->len,
703 GFP_KERNEL);
704 if (unlikely(rbi->skb == NULL)) {
705 rq->stats.rx_buf_alloc_failure++;
706 break;
707 }
708
709 rbi->dma_addr = dma_map_single(
710 &adapter->pdev->dev,
711 rbi->skb->data, rbi->len,
712 DMA_FROM_DEVICE);
713 if (dma_mapping_error(&adapter->pdev->dev,
714 rbi->dma_addr)) {
715 dev_kfree_skb_any(rbi->skb);
716 rbi->skb = NULL;
717 rq->stats.rx_buf_alloc_failure++;
718 break;
719 }
720 } else {
721 /* rx buffer skipped by the device */
722 }
723 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
724 } else {
725 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
726 rbi->len != PAGE_SIZE);
727
728 if (rbi->page == NULL) {
729 rbi->page = alloc_page(GFP_ATOMIC);
730 if (unlikely(rbi->page == NULL)) {
731 rq->stats.rx_buf_alloc_failure++;
732 break;
733 }
734 rbi->dma_addr = dma_map_page(
735 &adapter->pdev->dev,
736 rbi->page, 0, PAGE_SIZE,
737 DMA_FROM_DEVICE);
738 if (dma_mapping_error(&adapter->pdev->dev,
739 rbi->dma_addr)) {
740 put_page(rbi->page);
741 rbi->page = NULL;
742 rq->stats.rx_buf_alloc_failure++;
743 break;
744 }
745 } else {
746 /* rx buffers skipped by the device */
747 }
748 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
749 }
750
751 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
752 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
753 | val | rbi->len);
754
755 /* Fill the last buffer but dont mark it ready, or else the
756 * device will think that the queue is full */
757 if (num_allocated == num_to_alloc) {
758 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
759 break;
760 }
761
762 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
763 num_allocated++;
764 vmxnet3_cmd_ring_adv_next2fill(ring);
765 }
766
767 netdev_dbg(adapter->netdev,
768 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
769 num_allocated, ring->next2fill, ring->next2comp);
770
771 /* so that the device can distinguish a full ring and an empty ring */
772 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
773
774 return num_allocated;
775 }
776
777
778 static void
vmxnet3_append_frag(struct sk_buff * skb,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_buf_info * rbi)779 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
780 struct vmxnet3_rx_buf_info *rbi)
781 {
782 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
783
784 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
785
786 skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
787 skb->data_len += rcd->len;
788 skb->truesize += PAGE_SIZE;
789 skb_shinfo(skb)->nr_frags++;
790 }
791
792
793 static int
vmxnet3_map_pkt(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter)794 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
795 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
796 struct vmxnet3_adapter *adapter)
797 {
798 u32 dw2, len;
799 unsigned long buf_offset;
800 int i;
801 union Vmxnet3_GenericDesc *gdesc;
802 struct vmxnet3_tx_buf_info *tbi = NULL;
803
804 BUG_ON(ctx->copy_size > skb_headlen(skb));
805
806 /* use the previous gen bit for the SOP desc */
807 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
808
809 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
810 gdesc = ctx->sop_txd; /* both loops below can be skipped */
811
812 /* no need to map the buffer if headers are copied */
813 if (ctx->copy_size) {
814 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
815 tq->tx_ring.next2fill *
816 tq->txdata_desc_size);
817 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
818 ctx->sop_txd->dword[3] = 0;
819
820 tbi = tq->buf_info + tq->tx_ring.next2fill;
821 tbi->map_type = VMXNET3_MAP_NONE;
822
823 netdev_dbg(adapter->netdev,
824 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
825 tq->tx_ring.next2fill,
826 le64_to_cpu(ctx->sop_txd->txd.addr),
827 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
828 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
829
830 /* use the right gen for non-SOP desc */
831 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
832 }
833
834 /* linear part can use multiple tx desc if it's big */
835 len = skb_headlen(skb) - ctx->copy_size;
836 buf_offset = ctx->copy_size;
837 while (len) {
838 u32 buf_size;
839
840 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
841 buf_size = len;
842 dw2 |= len;
843 } else {
844 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
845 /* spec says that for TxDesc.len, 0 == 2^14 */
846 }
847
848 tbi = tq->buf_info + tq->tx_ring.next2fill;
849 tbi->map_type = VMXNET3_MAP_SINGLE;
850 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
851 skb->data + buf_offset, buf_size,
852 DMA_TO_DEVICE);
853 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
854 return -EFAULT;
855
856 tbi->len = buf_size;
857
858 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
859 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
860
861 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
862 gdesc->dword[2] = cpu_to_le32(dw2);
863 gdesc->dword[3] = 0;
864
865 netdev_dbg(adapter->netdev,
866 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
867 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
868 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
869 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
870 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
871
872 len -= buf_size;
873 buf_offset += buf_size;
874 }
875
876 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
877 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
878 u32 buf_size;
879
880 buf_offset = 0;
881 len = skb_frag_size(frag);
882 while (len) {
883 tbi = tq->buf_info + tq->tx_ring.next2fill;
884 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
885 buf_size = len;
886 dw2 |= len;
887 } else {
888 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
889 /* spec says that for TxDesc.len, 0 == 2^14 */
890 }
891 tbi->map_type = VMXNET3_MAP_PAGE;
892 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
893 buf_offset, buf_size,
894 DMA_TO_DEVICE);
895 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
896 return -EFAULT;
897
898 tbi->len = buf_size;
899
900 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
901 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
902
903 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
904 gdesc->dword[2] = cpu_to_le32(dw2);
905 gdesc->dword[3] = 0;
906
907 netdev_dbg(adapter->netdev,
908 "txd[%u]: 0x%llx %u %u\n",
909 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
910 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
911 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
912 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
913
914 len -= buf_size;
915 buf_offset += buf_size;
916 }
917 }
918
919 ctx->eop_txd = gdesc;
920
921 /* set the last buf_info for the pkt */
922 tbi->skb = skb;
923 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
924 if (tq->tx_ts_desc_size != 0) {
925 ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base +
926 tbi->sop_idx * tq->tx_ts_desc_size);
927 ctx->ts_txd->ts.tsi = 0;
928 }
929
930 return 0;
931 }
932
933
934 /* Init all tx queues */
935 static void
vmxnet3_tq_init_all(struct vmxnet3_adapter * adapter)936 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
937 {
938 int i;
939
940 for (i = 0; i < adapter->num_tx_queues; i++)
941 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
942 }
943
944
945 /*
946 * parse relevant protocol headers:
947 * For a tso pkt, relevant headers are L2/3/4 including options
948 * For a pkt requesting csum offloading, they are L2/3 and may include L4
949 * if it's a TCP/UDP pkt
950 *
951 * Returns:
952 * -1: error happens during parsing
953 * 0: protocol headers parsed, but too big to be copied
954 * 1: protocol headers parsed and copied
955 *
956 * Other effects:
957 * 1. related *ctx fields are updated.
958 * 2. ctx->copy_size is # of bytes copied
959 * 3. the portion to be copied is guaranteed to be in the linear part
960 *
961 */
962 static int
vmxnet3_parse_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)963 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
964 struct vmxnet3_tx_ctx *ctx,
965 struct vmxnet3_adapter *adapter)
966 {
967 u8 protocol = 0;
968
969 if (ctx->mss) { /* TSO */
970 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
971 ctx->l4_offset = skb_inner_transport_offset(skb);
972 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
973 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
974 } else {
975 ctx->l4_offset = skb_transport_offset(skb);
976 ctx->l4_hdr_size = tcp_hdrlen(skb);
977 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
978 }
979 } else {
980 if (skb->ip_summed == CHECKSUM_PARTIAL) {
981 /* For encap packets, skb_checksum_start_offset refers
982 * to inner L4 offset. Thus, below works for encap as
983 * well as non-encap case
984 */
985 ctx->l4_offset = skb_checksum_start_offset(skb);
986
987 if (VMXNET3_VERSION_GE_4(adapter) &&
988 skb->encapsulation) {
989 struct iphdr *iph = inner_ip_hdr(skb);
990
991 if (iph->version == 4) {
992 protocol = iph->protocol;
993 } else {
994 const struct ipv6hdr *ipv6h;
995
996 ipv6h = inner_ipv6_hdr(skb);
997 protocol = ipv6h->nexthdr;
998 }
999 } else {
1000 if (ctx->ipv4) {
1001 const struct iphdr *iph = ip_hdr(skb);
1002
1003 protocol = iph->protocol;
1004 } else if (ctx->ipv6) {
1005 const struct ipv6hdr *ipv6h;
1006
1007 ipv6h = ipv6_hdr(skb);
1008 protocol = ipv6h->nexthdr;
1009 }
1010 }
1011
1012 switch (protocol) {
1013 case IPPROTO_TCP:
1014 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
1015 tcp_hdrlen(skb);
1016 break;
1017 case IPPROTO_UDP:
1018 ctx->l4_hdr_size = sizeof(struct udphdr);
1019 break;
1020 default:
1021 ctx->l4_hdr_size = 0;
1022 break;
1023 }
1024
1025 ctx->copy_size = min(ctx->l4_offset +
1026 ctx->l4_hdr_size, skb->len);
1027 } else {
1028 ctx->l4_offset = 0;
1029 ctx->l4_hdr_size = 0;
1030 /* copy as much as allowed */
1031 ctx->copy_size = min_t(unsigned int,
1032 tq->txdata_desc_size,
1033 skb_headlen(skb));
1034 }
1035
1036 if (skb->len <= tq->txdata_desc_size)
1037 ctx->copy_size = skb->len;
1038
1039 /* make sure headers are accessible directly */
1040 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
1041 goto err;
1042 }
1043
1044 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
1045 tq->stats.oversized_hdr++;
1046 ctx->copy_size = 0;
1047 return 0;
1048 }
1049
1050 return 1;
1051 err:
1052 return -1;
1053 }
1054
1055 /*
1056 * copy relevant protocol headers to the transmit ring:
1057 * For a tso pkt, relevant headers are L2/3/4 including options
1058 * For a pkt requesting csum offloading, they are L2/3 and may include L4
1059 * if it's a TCP/UDP pkt
1060 *
1061 *
1062 * Note that this requires that vmxnet3_parse_hdr be called first to set the
1063 * appropriate bits in ctx first
1064 */
1065 static void
vmxnet3_copy_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)1066 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1067 struct vmxnet3_tx_ctx *ctx,
1068 struct vmxnet3_adapter *adapter)
1069 {
1070 struct Vmxnet3_TxDataDesc *tdd;
1071
1072 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
1073 tq->tx_ring.next2fill *
1074 tq->txdata_desc_size);
1075
1076 memcpy(tdd->data, skb->data, ctx->copy_size);
1077 netdev_dbg(adapter->netdev,
1078 "copy %u bytes to dataRing[%u]\n",
1079 ctx->copy_size, tq->tx_ring.next2fill);
1080 }
1081
1082
1083 static void
vmxnet3_prepare_inner_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)1084 vmxnet3_prepare_inner_tso(struct sk_buff *skb,
1085 struct vmxnet3_tx_ctx *ctx)
1086 {
1087 struct tcphdr *tcph = inner_tcp_hdr(skb);
1088 struct iphdr *iph = inner_ip_hdr(skb);
1089
1090 if (iph->version == 4) {
1091 iph->check = 0;
1092 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1093 IPPROTO_TCP, 0);
1094 } else {
1095 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
1096
1097 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
1098 IPPROTO_TCP, 0);
1099 }
1100 }
1101
1102 static void
vmxnet3_prepare_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)1103 vmxnet3_prepare_tso(struct sk_buff *skb,
1104 struct vmxnet3_tx_ctx *ctx)
1105 {
1106 struct tcphdr *tcph = tcp_hdr(skb);
1107
1108 if (ctx->ipv4) {
1109 struct iphdr *iph = ip_hdr(skb);
1110
1111 iph->check = 0;
1112 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1113 IPPROTO_TCP, 0);
1114 } else if (ctx->ipv6) {
1115 tcp_v6_gso_csum_prep(skb);
1116 }
1117 }
1118
txd_estimate(const struct sk_buff * skb)1119 static int txd_estimate(const struct sk_buff *skb)
1120 {
1121 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1122 int i;
1123
1124 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1125 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1126
1127 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1128 }
1129 return count;
1130 }
1131
1132 /*
1133 * Transmits a pkt thru a given tq
1134 * Returns:
1135 * NETDEV_TX_OK: descriptors are setup successfully
1136 * NETDEV_TX_OK: error occurred, the pkt is dropped
1137 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1138 *
1139 * Side-effects:
1140 * 1. tx ring may be changed
1141 * 2. tq stats may be updated accordingly
1142 * 3. shared->txNumDeferred may be updated
1143 */
1144
1145 static int
vmxnet3_tq_xmit(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter,struct net_device * netdev)1146 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1147 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1148 {
1149 int ret;
1150 u32 count;
1151 int num_pkts;
1152 int tx_num_deferred;
1153 unsigned long flags;
1154 struct vmxnet3_tx_ctx ctx;
1155 union Vmxnet3_GenericDesc *gdesc;
1156 #ifdef __BIG_ENDIAN_BITFIELD
1157 /* Use temporary descriptor to avoid touching bits multiple times */
1158 union Vmxnet3_GenericDesc tempTxDesc;
1159 #endif
1160
1161 count = txd_estimate(skb);
1162
1163 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1164 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1165
1166 ctx.mss = skb_shinfo(skb)->gso_size;
1167 if (ctx.mss) {
1168 if (skb_header_cloned(skb)) {
1169 if (unlikely(pskb_expand_head(skb, 0, 0,
1170 GFP_ATOMIC) != 0)) {
1171 tq->stats.drop_tso++;
1172 goto drop_pkt;
1173 }
1174 tq->stats.copy_skb_header++;
1175 }
1176 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1177 /* tso pkts must not use more than
1178 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1179 */
1180 if (skb_linearize(skb) != 0) {
1181 tq->stats.drop_too_many_frags++;
1182 goto drop_pkt;
1183 }
1184 tq->stats.linearized++;
1185
1186 /* recalculate the # of descriptors to use */
1187 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1188 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1189 tq->stats.drop_too_many_frags++;
1190 goto drop_pkt;
1191 }
1192 }
1193 if (skb->encapsulation) {
1194 vmxnet3_prepare_inner_tso(skb, &ctx);
1195 } else {
1196 vmxnet3_prepare_tso(skb, &ctx);
1197 }
1198 } else {
1199 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1200
1201 /* non-tso pkts must not use more than
1202 * VMXNET3_MAX_TXD_PER_PKT entries
1203 */
1204 if (skb_linearize(skb) != 0) {
1205 tq->stats.drop_too_many_frags++;
1206 goto drop_pkt;
1207 }
1208 tq->stats.linearized++;
1209
1210 /* recalculate the # of descriptors to use */
1211 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1212 }
1213 }
1214
1215 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1216 if (ret >= 0) {
1217 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1218 /* hdrs parsed, check against other limits */
1219 if (ctx.mss) {
1220 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1221 VMXNET3_MAX_TX_BUF_SIZE)) {
1222 tq->stats.drop_oversized_hdr++;
1223 goto drop_pkt;
1224 }
1225 } else {
1226 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1227 if (unlikely(ctx.l4_offset +
1228 skb->csum_offset >
1229 VMXNET3_MAX_CSUM_OFFSET)) {
1230 tq->stats.drop_oversized_hdr++;
1231 goto drop_pkt;
1232 }
1233 }
1234 }
1235 } else {
1236 tq->stats.drop_hdr_inspect_err++;
1237 goto drop_pkt;
1238 }
1239
1240 spin_lock_irqsave(&tq->tx_lock, flags);
1241
1242 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1243 tq->stats.tx_ring_full++;
1244 netdev_dbg(adapter->netdev,
1245 "tx queue stopped on %s, next2comp %u"
1246 " next2fill %u\n", adapter->netdev->name,
1247 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1248
1249 vmxnet3_tq_stop(tq, adapter);
1250 spin_unlock_irqrestore(&tq->tx_lock, flags);
1251 return NETDEV_TX_BUSY;
1252 }
1253
1254
1255 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1256
1257 /* fill tx descs related to addr & len */
1258 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1259 goto unlock_drop_pkt;
1260
1261 /* setup the EOP desc */
1262 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1263
1264 /* setup the SOP desc */
1265 #ifdef __BIG_ENDIAN_BITFIELD
1266 gdesc = &tempTxDesc;
1267 gdesc->dword[2] = ctx.sop_txd->dword[2];
1268 gdesc->dword[3] = ctx.sop_txd->dword[3];
1269 #else
1270 gdesc = ctx.sop_txd;
1271 #endif
1272 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1273 if (ctx.mss) {
1274 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1275 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1276 if (VMXNET3_VERSION_GE_7(adapter)) {
1277 gdesc->txd.om = VMXNET3_OM_TSO;
1278 gdesc->txd.ext1 = 1;
1279 } else {
1280 gdesc->txd.om = VMXNET3_OM_ENCAP;
1281 }
1282 gdesc->txd.msscof = ctx.mss;
1283
1284 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1285 gdesc->txd.oco = 1;
1286 } else {
1287 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1288 gdesc->txd.om = VMXNET3_OM_TSO;
1289 gdesc->txd.msscof = ctx.mss;
1290 }
1291 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1292 } else {
1293 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1294 if (VMXNET3_VERSION_GE_4(adapter) &&
1295 skb->encapsulation) {
1296 gdesc->txd.hlen = ctx.l4_offset +
1297 ctx.l4_hdr_size;
1298 if (VMXNET3_VERSION_GE_7(adapter)) {
1299 gdesc->txd.om = VMXNET3_OM_CSUM;
1300 gdesc->txd.msscof = ctx.l4_offset +
1301 skb->csum_offset;
1302 gdesc->txd.ext1 = 1;
1303 } else {
1304 gdesc->txd.om = VMXNET3_OM_ENCAP;
1305 gdesc->txd.msscof = 0; /* Reserved */
1306 }
1307 } else {
1308 gdesc->txd.hlen = ctx.l4_offset;
1309 gdesc->txd.om = VMXNET3_OM_CSUM;
1310 gdesc->txd.msscof = ctx.l4_offset +
1311 skb->csum_offset;
1312 }
1313 } else {
1314 gdesc->txd.om = 0;
1315 gdesc->txd.msscof = 0;
1316 }
1317 num_pkts = 1;
1318 }
1319 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1320 tx_num_deferred += num_pkts;
1321
1322 if (skb_vlan_tag_present(skb)) {
1323 gdesc->txd.ti = 1;
1324 gdesc->txd.tci = skb_vlan_tag_get(skb);
1325 }
1326
1327 if (tq->tx_ts_desc_size != 0 &&
1328 adapter->latencyConf->sampleRate != 0) {
1329 if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) {
1330 ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
1331 ctx.ts_txd->ts.tsi = 1;
1332 }
1333 }
1334
1335 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1336 * all other writes to &gdesc->txd.
1337 */
1338 dma_wmb();
1339
1340 /* finally flips the GEN bit of the SOP desc. */
1341 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1342 VMXNET3_TXD_GEN);
1343 #ifdef __BIG_ENDIAN_BITFIELD
1344 /* Finished updating in bitfields of Tx Desc, so write them in original
1345 * place.
1346 */
1347 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1348 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1349 gdesc = ctx.sop_txd;
1350 #endif
1351 netdev_dbg(adapter->netdev,
1352 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1353 (u32)(ctx.sop_txd -
1354 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1355 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1356
1357 spin_unlock_irqrestore(&tq->tx_lock, flags);
1358
1359 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1360 tq->shared->txNumDeferred = 0;
1361 VMXNET3_WRITE_BAR0_REG(adapter,
1362 adapter->tx_prod_offset + tq->qid * 8,
1363 tq->tx_ring.next2fill);
1364 }
1365
1366 return NETDEV_TX_OK;
1367
1368 unlock_drop_pkt:
1369 spin_unlock_irqrestore(&tq->tx_lock, flags);
1370 drop_pkt:
1371 tq->stats.drop_total++;
1372 dev_kfree_skb_any(skb);
1373 return NETDEV_TX_OK;
1374 }
1375
1376 static int
vmxnet3_create_pp(struct vmxnet3_adapter * adapter,struct vmxnet3_rx_queue * rq,int size)1377 vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
1378 struct vmxnet3_rx_queue *rq, int size)
1379 {
1380 bool xdp_prog = vmxnet3_xdp_enabled(adapter);
1381 const struct page_pool_params pp_params = {
1382 .order = 0,
1383 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1384 .pool_size = size,
1385 .nid = NUMA_NO_NODE,
1386 .dev = &adapter->pdev->dev,
1387 .offset = VMXNET3_XDP_RX_OFFSET,
1388 .max_len = VMXNET3_XDP_MAX_FRSIZE,
1389 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
1390 };
1391 struct page_pool *pp;
1392 int err;
1393
1394 pp = page_pool_create(&pp_params);
1395 if (IS_ERR(pp))
1396 return PTR_ERR(pp);
1397
1398 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
1399 rq->napi.napi_id);
1400 if (err < 0)
1401 goto err_free_pp;
1402
1403 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
1404 if (err)
1405 goto err_unregister_rxq;
1406
1407 rq->page_pool = pp;
1408
1409 return 0;
1410
1411 err_unregister_rxq:
1412 xdp_rxq_info_unreg(&rq->xdp_rxq);
1413 err_free_pp:
1414 page_pool_destroy(pp);
1415
1416 return err;
1417 }
1418
1419 void *
vmxnet3_pp_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1420 vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1421 gfp_t gfp_mask)
1422 {
1423 struct page *page;
1424
1425 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1426 if (unlikely(!page))
1427 return NULL;
1428
1429 *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
1430
1431 return page_address(page);
1432 }
1433
1434 static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1435 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1436 {
1437 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1438
1439 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1440 return vmxnet3_tq_xmit(skb,
1441 &adapter->tx_queue[skb->queue_mapping],
1442 adapter, netdev);
1443 }
1444
1445
1446 static void
vmxnet3_rx_csum(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)1447 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1448 struct sk_buff *skb,
1449 union Vmxnet3_GenericDesc *gdesc)
1450 {
1451 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1452 if (gdesc->rcd.v4 &&
1453 (le32_to_cpu(gdesc->dword[3]) &
1454 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1455 skb->ip_summed = CHECKSUM_UNNECESSARY;
1456 if ((le32_to_cpu(gdesc->dword[0]) &
1457 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1458 skb->csum_level = 1;
1459 }
1460 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1461 !(le32_to_cpu(gdesc->dword[0]) &
1462 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1463 WARN_ON_ONCE(gdesc->rcd.frg &&
1464 !(le32_to_cpu(gdesc->dword[0]) &
1465 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1466 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1467 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1468 skb->ip_summed = CHECKSUM_UNNECESSARY;
1469 if ((le32_to_cpu(gdesc->dword[0]) &
1470 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1471 skb->csum_level = 1;
1472 }
1473 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1474 !(le32_to_cpu(gdesc->dword[0]) &
1475 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1476 WARN_ON_ONCE(gdesc->rcd.frg &&
1477 !(le32_to_cpu(gdesc->dword[0]) &
1478 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1479 } else {
1480 if (gdesc->rcd.csum) {
1481 skb->csum = htons(gdesc->rcd.csum);
1482 skb->ip_summed = CHECKSUM_PARTIAL;
1483 } else {
1484 skb_checksum_none_assert(skb);
1485 }
1486 }
1487 } else {
1488 skb_checksum_none_assert(skb);
1489 }
1490 }
1491
1492
1493 static void
vmxnet3_rx_error(struct vmxnet3_rx_queue * rq,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_ctx * ctx,struct vmxnet3_adapter * adapter)1494 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1495 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1496 {
1497 rq->stats.drop_err++;
1498 if (!rcd->fcs)
1499 rq->stats.drop_fcs++;
1500
1501 rq->stats.drop_total++;
1502
1503 /*
1504 * We do not unmap and chain the rx buffer to the skb.
1505 * We basically pretend this buffer is not used and will be recycled
1506 * by vmxnet3_rq_alloc_rx_buf()
1507 */
1508
1509 /*
1510 * ctx->skb may be NULL if this is the first and the only one
1511 * desc for the pkt
1512 */
1513 if (ctx->skb)
1514 dev_kfree_skb_irq(ctx->skb);
1515
1516 ctx->skb = NULL;
1517 }
1518
1519
1520 static u32
vmxnet3_get_hdr_len(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)1521 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1522 union Vmxnet3_GenericDesc *gdesc)
1523 {
1524 u32 hlen, maplen;
1525 union {
1526 void *ptr;
1527 struct ethhdr *eth;
1528 struct vlan_ethhdr *veth;
1529 struct iphdr *ipv4;
1530 struct ipv6hdr *ipv6;
1531 struct tcphdr *tcp;
1532 } hdr;
1533 BUG_ON(gdesc->rcd.tcp == 0);
1534
1535 maplen = skb_headlen(skb);
1536 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1537 return 0;
1538
1539 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1540 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1541 hlen = sizeof(struct vlan_ethhdr);
1542 else
1543 hlen = sizeof(struct ethhdr);
1544
1545 hdr.eth = eth_hdr(skb);
1546 if (gdesc->rcd.v4) {
1547 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1548 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1549 hdr.ptr += hlen;
1550 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1551 hlen = hdr.ipv4->ihl << 2;
1552 hdr.ptr += hdr.ipv4->ihl << 2;
1553 } else if (gdesc->rcd.v6) {
1554 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1555 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1556 hdr.ptr += hlen;
1557 /* Use an estimated value, since we also need to handle
1558 * TSO case.
1559 */
1560 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1561 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1562 hlen = sizeof(struct ipv6hdr);
1563 hdr.ptr += sizeof(struct ipv6hdr);
1564 } else {
1565 /* Non-IP pkt, dont estimate header length */
1566 return 0;
1567 }
1568
1569 if (hlen + sizeof(struct tcphdr) > maplen)
1570 return 0;
1571
1572 return (hlen + (hdr.tcp->doff << 2));
1573 }
1574
1575 static void
vmxnet3_lro_tunnel(struct sk_buff * skb,__be16 ip_proto)1576 vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1577 {
1578 struct udphdr *uh = NULL;
1579
1580 if (ip_proto == htons(ETH_P_IP)) {
1581 struct iphdr *iph = (struct iphdr *)skb->data;
1582
1583 if (iph->protocol == IPPROTO_UDP)
1584 uh = (struct udphdr *)(iph + 1);
1585 } else {
1586 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1587
1588 if (iph->nexthdr == IPPROTO_UDP)
1589 uh = (struct udphdr *)(iph + 1);
1590 }
1591 if (uh) {
1592 if (uh->check)
1593 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1594 else
1595 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1596 }
1597 }
1598
1599 static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter,int quota)1600 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1601 struct vmxnet3_adapter *adapter, int quota)
1602 {
1603 u32 rxprod_reg[2] = {
1604 adapter->rx_prod_offset, adapter->rx_prod2_offset
1605 };
1606 u32 num_pkts = 0;
1607 bool skip_page_frags = false;
1608 bool encap_lro = false;
1609 struct Vmxnet3_RxCompDesc *rcd;
1610 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1611 u16 segCnt = 0, mss = 0;
1612 int comp_offset, fill_offset;
1613 #ifdef __BIG_ENDIAN_BITFIELD
1614 struct Vmxnet3_RxDesc rxCmdDesc;
1615 struct Vmxnet3_RxCompDesc rxComp;
1616 #endif
1617 bool need_flush = false;
1618
1619 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1620 &rxComp);
1621 while (rcd->gen == rq->comp_ring.gen) {
1622 struct vmxnet3_rx_buf_info *rbi;
1623 struct sk_buff *skb, *new_skb = NULL;
1624 struct page *new_page = NULL;
1625 dma_addr_t new_dma_addr;
1626 int num_to_alloc;
1627 struct Vmxnet3_RxDesc *rxd;
1628 u32 idx, ring_idx;
1629 struct vmxnet3_cmd_ring *ring = NULL;
1630 if (num_pkts >= quota) {
1631 /* we may stop even before we see the EOP desc of
1632 * the current pkt
1633 */
1634 break;
1635 }
1636
1637 /* Prevent any rcd field from being (speculatively) read before
1638 * rcd->gen is read.
1639 */
1640 dma_rmb();
1641
1642 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1643 rcd->rqID != rq->dataRingQid);
1644 idx = rcd->rxdIdx;
1645 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1646 ring = rq->rx_ring + ring_idx;
1647 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1648 &rxCmdDesc);
1649 rbi = rq->buf_info[ring_idx] + idx;
1650
1651 BUG_ON(rxd->addr != rbi->dma_addr ||
1652 rxd->len != rbi->len);
1653
1654 if (unlikely(rcd->eop && rcd->err)) {
1655 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1656 goto rcd_done;
1657 }
1658
1659 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
1660 struct sk_buff *skb_xdp_pass;
1661 int act;
1662
1663 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
1664 ctx->skb = NULL;
1665 goto skip_xdp; /* Handle it later. */
1666 }
1667
1668 if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
1669 goto rcd_done;
1670
1671 act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
1672 &skb_xdp_pass);
1673 if (act == XDP_PASS) {
1674 ctx->skb = skb_xdp_pass;
1675 goto sop_done;
1676 }
1677 ctx->skb = NULL;
1678 need_flush |= act == XDP_REDIRECT;
1679
1680 goto rcd_done;
1681 }
1682 skip_xdp:
1683
1684 if (rcd->sop) { /* first buf of the pkt */
1685 bool rxDataRingUsed;
1686 u16 len;
1687
1688 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1689 (rcd->rqID != rq->qid &&
1690 rcd->rqID != rq->dataRingQid));
1691
1692 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
1693 rbi->buf_type != VMXNET3_RX_BUF_XDP);
1694 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1695
1696 if (unlikely(rcd->len == 0)) {
1697 /* Pretend the rx buffer is skipped. */
1698 BUG_ON(!(rcd->sop && rcd->eop));
1699 netdev_dbg(adapter->netdev,
1700 "rxRing[%u][%u] 0 length\n",
1701 ring_idx, idx);
1702 goto rcd_done;
1703 }
1704
1705 skip_page_frags = false;
1706 ctx->skb = rbi->skb;
1707
1708 if (rq->rx_ts_desc_size != 0 && rcd->ext2) {
1709 struct Vmxnet3_RxTSDesc *ts_rxd;
1710
1711 ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base +
1712 idx * rq->rx_ts_desc_size);
1713 ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
1714 ts_rxd->ts.tsi = 1;
1715 }
1716
1717 rxDataRingUsed =
1718 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1719 len = rxDataRingUsed ? rcd->len : rbi->len;
1720
1721 if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
1722 struct sk_buff *skb_xdp_pass;
1723 size_t sz;
1724 int act;
1725
1726 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1727 act = vmxnet3_process_xdp_small(adapter, rq,
1728 &rq->data_ring.base[sz],
1729 rcd->len,
1730 &skb_xdp_pass);
1731 if (act == XDP_PASS) {
1732 ctx->skb = skb_xdp_pass;
1733 goto sop_done;
1734 }
1735 need_flush |= act == XDP_REDIRECT;
1736
1737 goto rcd_done;
1738 }
1739 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1740 len);
1741 if (new_skb == NULL) {
1742 /* Skb allocation failed, do not handover this
1743 * skb to stack. Reuse it. Drop the existing pkt
1744 */
1745 rq->stats.rx_buf_alloc_failure++;
1746 ctx->skb = NULL;
1747 rq->stats.drop_total++;
1748 skip_page_frags = true;
1749 goto rcd_done;
1750 }
1751
1752 if (rxDataRingUsed && adapter->rxdataring_enabled) {
1753 size_t sz;
1754
1755 BUG_ON(rcd->len > rq->data_ring.desc_size);
1756
1757 ctx->skb = new_skb;
1758 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1759 memcpy(new_skb->data,
1760 &rq->data_ring.base[sz], rcd->len);
1761 } else {
1762 ctx->skb = rbi->skb;
1763
1764 new_dma_addr =
1765 dma_map_single(&adapter->pdev->dev,
1766 new_skb->data, rbi->len,
1767 DMA_FROM_DEVICE);
1768 if (dma_mapping_error(&adapter->pdev->dev,
1769 new_dma_addr)) {
1770 dev_kfree_skb(new_skb);
1771 /* Skb allocation failed, do not
1772 * handover this skb to stack. Reuse
1773 * it. Drop the existing pkt.
1774 */
1775 rq->stats.rx_buf_alloc_failure++;
1776 ctx->skb = NULL;
1777 rq->stats.drop_total++;
1778 skip_page_frags = true;
1779 goto rcd_done;
1780 }
1781
1782 dma_unmap_single(&adapter->pdev->dev,
1783 rbi->dma_addr,
1784 rbi->len,
1785 DMA_FROM_DEVICE);
1786
1787 /* Immediate refill */
1788 rbi->skb = new_skb;
1789 rbi->dma_addr = new_dma_addr;
1790 rxd->addr = cpu_to_le64(rbi->dma_addr);
1791 rxd->len = rbi->len;
1792 }
1793
1794 skb_record_rx_queue(ctx->skb, rq->qid);
1795 skb_put(ctx->skb, rcd->len);
1796
1797 if (VMXNET3_VERSION_GE_2(adapter) &&
1798 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1799 struct Vmxnet3_RxCompDescExt *rcdlro;
1800 union Vmxnet3_GenericDesc *gdesc;
1801
1802 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1803 gdesc = (union Vmxnet3_GenericDesc *)rcd;
1804
1805 segCnt = rcdlro->segCnt;
1806 WARN_ON_ONCE(segCnt == 0);
1807 mss = rcdlro->mss;
1808 if (unlikely(segCnt <= 1))
1809 segCnt = 0;
1810 encap_lro = (le32_to_cpu(gdesc->dword[0]) &
1811 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
1812 } else {
1813 segCnt = 0;
1814 }
1815 } else {
1816 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1817
1818 /* non SOP buffer must be type 1 in most cases */
1819 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1820 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1821
1822 /* If an sop buffer was dropped, skip all
1823 * following non-sop fragments. They will be reused.
1824 */
1825 if (skip_page_frags)
1826 goto rcd_done;
1827
1828 if (rcd->len) {
1829 new_page = alloc_page(GFP_ATOMIC);
1830 /* Replacement page frag could not be allocated.
1831 * Reuse this page. Drop the pkt and free the
1832 * skb which contained this page as a frag. Skip
1833 * processing all the following non-sop frags.
1834 */
1835 if (unlikely(!new_page)) {
1836 rq->stats.rx_buf_alloc_failure++;
1837 dev_kfree_skb(ctx->skb);
1838 ctx->skb = NULL;
1839 skip_page_frags = true;
1840 goto rcd_done;
1841 }
1842 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1843 new_page,
1844 0, PAGE_SIZE,
1845 DMA_FROM_DEVICE);
1846 if (dma_mapping_error(&adapter->pdev->dev,
1847 new_dma_addr)) {
1848 put_page(new_page);
1849 rq->stats.rx_buf_alloc_failure++;
1850 dev_kfree_skb(ctx->skb);
1851 ctx->skb = NULL;
1852 skip_page_frags = true;
1853 goto rcd_done;
1854 }
1855
1856 dma_unmap_page(&adapter->pdev->dev,
1857 rbi->dma_addr, rbi->len,
1858 DMA_FROM_DEVICE);
1859
1860 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1861
1862 /* Immediate refill */
1863 rbi->page = new_page;
1864 rbi->dma_addr = new_dma_addr;
1865 rxd->addr = cpu_to_le64(rbi->dma_addr);
1866 rxd->len = rbi->len;
1867 }
1868 }
1869
1870
1871 sop_done:
1872 skb = ctx->skb;
1873 if (rcd->eop) {
1874 u32 mtu = adapter->netdev->mtu;
1875 skb->len += skb->data_len;
1876
1877 #ifdef VMXNET3_RSS
1878 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1879 (adapter->netdev->features & NETIF_F_RXHASH)) {
1880 enum pkt_hash_types hash_type;
1881
1882 switch (rcd->rssType) {
1883 case VMXNET3_RCD_RSS_TYPE_IPV4:
1884 case VMXNET3_RCD_RSS_TYPE_IPV6:
1885 hash_type = PKT_HASH_TYPE_L3;
1886 break;
1887 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1888 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1889 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1890 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1891 hash_type = PKT_HASH_TYPE_L4;
1892 break;
1893 default:
1894 hash_type = PKT_HASH_TYPE_L3;
1895 break;
1896 }
1897 skb_set_hash(skb,
1898 le32_to_cpu(rcd->rssHash),
1899 hash_type);
1900 }
1901 #endif
1902 vmxnet3_rx_csum(adapter, skb,
1903 (union Vmxnet3_GenericDesc *)rcd);
1904 skb->protocol = eth_type_trans(skb, adapter->netdev);
1905 if ((!rcd->tcp && !encap_lro) ||
1906 !(adapter->netdev->features & NETIF_F_LRO))
1907 goto not_lro;
1908
1909 if (segCnt != 0 && mss != 0) {
1910 skb_shinfo(skb)->gso_type = rcd->v4 ?
1911 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1912 if (encap_lro)
1913 vmxnet3_lro_tunnel(skb, skb->protocol);
1914 skb_shinfo(skb)->gso_size = mss;
1915 skb_shinfo(skb)->gso_segs = segCnt;
1916 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
1917 u32 hlen;
1918
1919 hlen = vmxnet3_get_hdr_len(adapter, skb,
1920 (union Vmxnet3_GenericDesc *)rcd);
1921 if (hlen == 0)
1922 goto not_lro;
1923
1924 skb_shinfo(skb)->gso_type =
1925 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1926 if (segCnt != 0) {
1927 skb_shinfo(skb)->gso_segs = segCnt;
1928 skb_shinfo(skb)->gso_size =
1929 DIV_ROUND_UP(skb->len -
1930 hlen, segCnt);
1931 } else {
1932 skb_shinfo(skb)->gso_size = mtu - hlen;
1933 }
1934 }
1935 not_lro:
1936 if (unlikely(rcd->ts))
1937 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1938
1939 /* Use GRO callback if UPT is enabled */
1940 if ((adapter->netdev->features & NETIF_F_LRO) &&
1941 !rq->shared->updateRxProd)
1942 netif_receive_skb(skb);
1943 else
1944 napi_gro_receive(&rq->napi, skb);
1945
1946 ctx->skb = NULL;
1947 encap_lro = false;
1948 num_pkts++;
1949 }
1950
1951 rcd_done:
1952 /* device may have skipped some rx descs */
1953 ring = rq->rx_ring + ring_idx;
1954 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
1955
1956 comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
1957 fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
1958 idx - ring->next2fill - 1;
1959 if (!ring->isOutOfOrder || fill_offset >= comp_offset)
1960 ring->next2comp = idx;
1961 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1962
1963 /* Ensure that the writes to rxd->gen bits will be observed
1964 * after all other writes to rxd objects.
1965 */
1966 dma_wmb();
1967
1968 while (num_to_alloc) {
1969 rbi = rq->buf_info[ring_idx] + ring->next2fill;
1970 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1971 goto refill_buf;
1972 if (ring_idx == 0) {
1973 /* ring0 Type1 buffers can get skipped; re-fill them */
1974 if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
1975 goto refill_buf;
1976 }
1977 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
1978 refill_buf:
1979 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1980 &rxCmdDesc);
1981 WARN_ON(!rxd->addr);
1982
1983 /* Recv desc is ready to be used by the device */
1984 rxd->gen = ring->gen;
1985 vmxnet3_cmd_ring_adv_next2fill(ring);
1986 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
1987 num_to_alloc--;
1988 } else {
1989 /* rx completion hasn't occurred */
1990 ring->isOutOfOrder = 1;
1991 break;
1992 }
1993 }
1994
1995 if (num_to_alloc == 0) {
1996 ring->isOutOfOrder = 0;
1997 }
1998
1999 /* if needed, update the register */
2000 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
2001 VMXNET3_WRITE_BAR0_REG(adapter,
2002 rxprod_reg[ring_idx] + rq->qid * 8,
2003 ring->next2fill);
2004 }
2005
2006 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
2007 vmxnet3_getRxComp(rcd,
2008 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
2009 }
2010 if (need_flush)
2011 xdp_do_flush();
2012
2013 return num_pkts;
2014 }
2015
2016
2017 static void
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2018 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
2019 struct vmxnet3_adapter *adapter)
2020 {
2021 u32 i, ring_idx;
2022 struct Vmxnet3_RxDesc *rxd;
2023
2024 /* ring has already been cleaned up */
2025 if (!rq->rx_ring[0].base)
2026 return;
2027
2028 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
2029 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
2030 struct vmxnet3_rx_buf_info *rbi;
2031 #ifdef __BIG_ENDIAN_BITFIELD
2032 struct Vmxnet3_RxDesc rxDesc;
2033 #endif
2034
2035 rbi = &rq->buf_info[ring_idx][i];
2036 vmxnet3_getRxDesc(rxd,
2037 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
2038
2039 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
2040 rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
2041 page_pool_recycle_direct(rq->page_pool,
2042 rbi->page);
2043 rbi->page = NULL;
2044 } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
2045 rbi->skb) {
2046 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
2047 rxd->len, DMA_FROM_DEVICE);
2048 dev_kfree_skb(rbi->skb);
2049 rbi->skb = NULL;
2050 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
2051 rbi->page) {
2052 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
2053 rxd->len, DMA_FROM_DEVICE);
2054 put_page(rbi->page);
2055 rbi->page = NULL;
2056 }
2057 }
2058
2059 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
2060 rq->rx_ring[ring_idx].next2fill =
2061 rq->rx_ring[ring_idx].next2comp = 0;
2062 }
2063
2064 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2065 rq->comp_ring.next2proc = 0;
2066
2067 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
2068 xdp_rxq_info_unreg(&rq->xdp_rxq);
2069 page_pool_destroy(rq->page_pool);
2070 rq->page_pool = NULL;
2071 }
2072
2073
2074 static void
vmxnet3_rq_cleanup_all(struct vmxnet3_adapter * adapter)2075 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
2076 {
2077 int i;
2078
2079 for (i = 0; i < adapter->num_rx_queues; i++)
2080 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
2081 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
2082 }
2083
2084
vmxnet3_rq_destroy(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2085 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
2086 struct vmxnet3_adapter *adapter)
2087 {
2088 int i;
2089 int j;
2090
2091 /* all rx buffers must have already been freed */
2092 for (i = 0; i < 2; i++) {
2093 if (rq->buf_info[i]) {
2094 for (j = 0; j < rq->rx_ring[i].size; j++)
2095 BUG_ON(rq->buf_info[i][j].page != NULL);
2096 }
2097 }
2098
2099
2100 for (i = 0; i < 2; i++) {
2101 if (rq->rx_ring[i].base) {
2102 dma_free_coherent(&adapter->pdev->dev,
2103 rq->rx_ring[i].size
2104 * sizeof(struct Vmxnet3_RxDesc),
2105 rq->rx_ring[i].base,
2106 rq->rx_ring[i].basePA);
2107 rq->rx_ring[i].base = NULL;
2108 }
2109 }
2110
2111 if (rq->data_ring.base) {
2112 dma_free_coherent(&adapter->pdev->dev,
2113 rq->rx_ring[0].size * rq->data_ring.desc_size,
2114 rq->data_ring.base, rq->data_ring.basePA);
2115 rq->data_ring.base = NULL;
2116 }
2117
2118 if (rq->ts_ring.base) {
2119 dma_free_coherent(&adapter->pdev->dev,
2120 rq->rx_ring[0].size * rq->rx_ts_desc_size,
2121 rq->ts_ring.base, rq->ts_ring.basePA);
2122 rq->ts_ring.base = NULL;
2123 }
2124
2125 if (rq->comp_ring.base) {
2126 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2127 * sizeof(struct Vmxnet3_RxCompDesc),
2128 rq->comp_ring.base, rq->comp_ring.basePA);
2129 rq->comp_ring.base = NULL;
2130 }
2131
2132 kfree(rq->buf_info[0]);
2133 rq->buf_info[0] = NULL;
2134 rq->buf_info[1] = NULL;
2135 }
2136
2137 static void
vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter * adapter)2138 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
2139 {
2140 int i;
2141
2142 for (i = 0; i < adapter->num_rx_queues; i++) {
2143 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2144
2145 if (rq->data_ring.base) {
2146 dma_free_coherent(&adapter->pdev->dev,
2147 (rq->rx_ring[0].size *
2148 rq->data_ring.desc_size),
2149 rq->data_ring.base,
2150 rq->data_ring.basePA);
2151 rq->data_ring.base = NULL;
2152 }
2153 rq->data_ring.desc_size = 0;
2154 }
2155 }
2156
2157 static int
vmxnet3_rq_init(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2158 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
2159 struct vmxnet3_adapter *adapter)
2160 {
2161 int i, err;
2162
2163 /* initialize buf_info */
2164 for (i = 0; i < rq->rx_ring[0].size; i++) {
2165
2166 /* 1st buf for a pkt is skbuff or xdp page */
2167 if (i % adapter->rx_buf_per_pkt == 0) {
2168 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
2169 VMXNET3_RX_BUF_XDP :
2170 VMXNET3_RX_BUF_SKB;
2171 rq->buf_info[0][i].len = adapter->skb_buf_size;
2172 } else { /* subsequent bufs for a pkt is frag */
2173 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
2174 rq->buf_info[0][i].len = PAGE_SIZE;
2175 }
2176 }
2177 for (i = 0; i < rq->rx_ring[1].size; i++) {
2178 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
2179 rq->buf_info[1][i].len = PAGE_SIZE;
2180 }
2181
2182 /* reset internal state and allocate buffers for both rings */
2183 for (i = 0; i < 2; i++) {
2184 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
2185
2186 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
2187 sizeof(struct Vmxnet3_RxDesc));
2188 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
2189 rq->rx_ring[i].isOutOfOrder = 0;
2190 }
2191
2192 err = vmxnet3_create_pp(adapter, rq,
2193 rq->rx_ring[0].size + rq->rx_ring[1].size);
2194 if (err)
2195 return err;
2196
2197 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
2198 adapter) == 0) {
2199 xdp_rxq_info_unreg(&rq->xdp_rxq);
2200 page_pool_destroy(rq->page_pool);
2201 rq->page_pool = NULL;
2202
2203 /* at least has 1 rx buffer for the 1st ring */
2204 return -ENOMEM;
2205 }
2206 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2207
2208 if (rq->ts_ring.base)
2209 memset(rq->ts_ring.base, 0,
2210 rq->rx_ring[0].size * rq->rx_ts_desc_size);
2211
2212 /* reset the comp ring */
2213 rq->comp_ring.next2proc = 0;
2214 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
2215 sizeof(struct Vmxnet3_RxCompDesc));
2216 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2217
2218 /* reset rxctx */
2219 rq->rx_ctx.skb = NULL;
2220
2221 /* stats are not reset */
2222 return 0;
2223 }
2224
2225
2226 static int
vmxnet3_rq_init_all(struct vmxnet3_adapter * adapter)2227 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
2228 {
2229 int i, err = 0;
2230
2231 for (i = 0; i < adapter->num_rx_queues; i++) {
2232 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
2233 if (unlikely(err)) {
2234 dev_err(&adapter->netdev->dev, "%s: failed to "
2235 "initialize rx queue%i\n",
2236 adapter->netdev->name, i);
2237 break;
2238 }
2239 }
2240 return err;
2241
2242 }
2243
2244
2245 static int
vmxnet3_rq_create(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2246 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2247 {
2248 int i;
2249 size_t sz;
2250 struct vmxnet3_rx_buf_info *bi;
2251
2252 for (i = 0; i < 2; i++) {
2253
2254 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
2255 rq->rx_ring[i].base = dma_alloc_coherent(
2256 &adapter->pdev->dev, sz,
2257 &rq->rx_ring[i].basePA,
2258 GFP_KERNEL);
2259 if (!rq->rx_ring[i].base) {
2260 netdev_err(adapter->netdev,
2261 "failed to allocate rx ring %d\n", i);
2262 goto err;
2263 }
2264 }
2265
2266 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
2267 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
2268 rq->data_ring.base =
2269 dma_alloc_coherent(&adapter->pdev->dev, sz,
2270 &rq->data_ring.basePA,
2271 GFP_KERNEL);
2272 if (!rq->data_ring.base) {
2273 netdev_err(adapter->netdev,
2274 "rx data ring will be disabled\n");
2275 adapter->rxdataring_enabled = false;
2276 }
2277 } else {
2278 rq->data_ring.base = NULL;
2279 rq->data_ring.desc_size = 0;
2280 }
2281
2282 if (rq->rx_ts_desc_size != 0) {
2283 sz = rq->rx_ring[0].size * rq->rx_ts_desc_size;
2284 rq->ts_ring.base =
2285 dma_alloc_coherent(&adapter->pdev->dev, sz,
2286 &rq->ts_ring.basePA,
2287 GFP_KERNEL);
2288 if (!rq->ts_ring.base) {
2289 netdev_err(adapter->netdev,
2290 "rx ts ring will be disabled\n");
2291 rq->rx_ts_desc_size = 0;
2292 }
2293 } else {
2294 rq->ts_ring.base = NULL;
2295 }
2296
2297 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
2298 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2299 &rq->comp_ring.basePA,
2300 GFP_KERNEL);
2301 if (!rq->comp_ring.base) {
2302 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
2303 goto err;
2304 }
2305
2306 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2307 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2308 dev_to_node(&adapter->pdev->dev));
2309 if (!bi)
2310 goto err;
2311
2312 rq->buf_info[0] = bi;
2313 rq->buf_info[1] = bi + rq->rx_ring[0].size;
2314
2315 return 0;
2316
2317 err:
2318 vmxnet3_rq_destroy(rq, adapter);
2319 return -ENOMEM;
2320 }
2321
2322
2323 int
vmxnet3_rq_create_all(struct vmxnet3_adapter * adapter)2324 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2325 {
2326 int i, err = 0;
2327
2328 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2329
2330 for (i = 0; i < adapter->num_rx_queues; i++) {
2331 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2332 if (unlikely(err)) {
2333 dev_err(&adapter->netdev->dev,
2334 "%s: failed to create rx queue%i\n",
2335 adapter->netdev->name, i);
2336 goto err_out;
2337 }
2338 }
2339
2340 if (!adapter->rxdataring_enabled)
2341 vmxnet3_rq_destroy_all_rxdataring(adapter);
2342
2343 return err;
2344 err_out:
2345 vmxnet3_rq_destroy_all(adapter);
2346 return err;
2347
2348 }
2349
2350 /* Multiple queue aware polling function for tx and rx */
2351
2352 static int
vmxnet3_do_poll(struct vmxnet3_adapter * adapter,int budget)2353 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2354 {
2355 int rcd_done = 0, i;
2356 if (unlikely(adapter->shared->ecr))
2357 vmxnet3_process_events(adapter);
2358 for (i = 0; i < adapter->num_tx_queues; i++)
2359 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2360
2361 for (i = 0; i < adapter->num_rx_queues; i++)
2362 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2363 adapter, budget);
2364 return rcd_done;
2365 }
2366
2367
2368 static int
vmxnet3_poll(struct napi_struct * napi,int budget)2369 vmxnet3_poll(struct napi_struct *napi, int budget)
2370 {
2371 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
2372 struct vmxnet3_rx_queue, napi);
2373 int rxd_done;
2374
2375 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2376
2377 if (rxd_done < budget) {
2378 napi_complete_done(napi, rxd_done);
2379 vmxnet3_enable_all_intrs(rx_queue->adapter);
2380 }
2381 return rxd_done;
2382 }
2383
2384 /*
2385 * NAPI polling function for MSI-X mode with multiple Rx queues
2386 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2387 */
2388
2389 static int
vmxnet3_poll_rx_only(struct napi_struct * napi,int budget)2390 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
2391 {
2392 struct vmxnet3_rx_queue *rq = container_of(napi,
2393 struct vmxnet3_rx_queue, napi);
2394 struct vmxnet3_adapter *adapter = rq->adapter;
2395 int rxd_done;
2396
2397 /* When sharing interrupt with corresponding tx queue, process
2398 * tx completions in that queue as well
2399 */
2400 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2401 struct vmxnet3_tx_queue *tq =
2402 &adapter->tx_queue[rq - adapter->rx_queue];
2403 vmxnet3_tq_tx_complete(tq, adapter);
2404 }
2405
2406 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2407
2408 if (rxd_done < budget) {
2409 napi_complete_done(napi, rxd_done);
2410 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2411 }
2412 return rxd_done;
2413 }
2414
2415
2416 #ifdef CONFIG_PCI_MSI
2417
2418 /*
2419 * Handle completion interrupts on tx queues
2420 * Returns whether or not the intr is handled
2421 */
2422
2423 static irqreturn_t
vmxnet3_msix_tx(int irq,void * data)2424 vmxnet3_msix_tx(int irq, void *data)
2425 {
2426 struct vmxnet3_tx_queue *tq = data;
2427 struct vmxnet3_adapter *adapter = tq->adapter;
2428
2429 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2430 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2431
2432 /* Handle the case where only one irq is allocate for all tx queues */
2433 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2434 int i;
2435 for (i = 0; i < adapter->num_tx_queues; i++) {
2436 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2437 vmxnet3_tq_tx_complete(txq, adapter);
2438 }
2439 } else {
2440 vmxnet3_tq_tx_complete(tq, adapter);
2441 }
2442 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2443
2444 return IRQ_HANDLED;
2445 }
2446
2447
2448 /*
2449 * Handle completion interrupts on rx queues. Returns whether or not the
2450 * intr is handled
2451 */
2452
2453 static irqreturn_t
vmxnet3_msix_rx(int irq,void * data)2454 vmxnet3_msix_rx(int irq, void *data)
2455 {
2456 struct vmxnet3_rx_queue *rq = data;
2457 struct vmxnet3_adapter *adapter = rq->adapter;
2458
2459 /* disable intr if needed */
2460 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2461 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2462 napi_schedule(&rq->napi);
2463
2464 return IRQ_HANDLED;
2465 }
2466
2467 /*
2468 *----------------------------------------------------------------------------
2469 *
2470 * vmxnet3_msix_event --
2471 *
2472 * vmxnet3 msix event intr handler
2473 *
2474 * Result:
2475 * whether or not the intr is handled
2476 *
2477 *----------------------------------------------------------------------------
2478 */
2479
2480 static irqreturn_t
vmxnet3_msix_event(int irq,void * data)2481 vmxnet3_msix_event(int irq, void *data)
2482 {
2483 struct net_device *dev = data;
2484 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2485
2486 /* disable intr if needed */
2487 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2488 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2489
2490 if (adapter->shared->ecr)
2491 vmxnet3_process_events(adapter);
2492
2493 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2494
2495 return IRQ_HANDLED;
2496 }
2497
2498 #endif /* CONFIG_PCI_MSI */
2499
2500
2501 /* Interrupt handler for vmxnet3 */
2502 static irqreturn_t
vmxnet3_intr(int irq,void * dev_id)2503 vmxnet3_intr(int irq, void *dev_id)
2504 {
2505 struct net_device *dev = dev_id;
2506 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2507
2508 if (adapter->intr.type == VMXNET3_IT_INTX) {
2509 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2510 if (unlikely(icr == 0))
2511 /* not ours */
2512 return IRQ_NONE;
2513 }
2514
2515
2516 /* disable intr if needed */
2517 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2518 vmxnet3_disable_all_intrs(adapter);
2519
2520 napi_schedule(&adapter->rx_queue[0].napi);
2521
2522 return IRQ_HANDLED;
2523 }
2524
2525 #ifdef CONFIG_NET_POLL_CONTROLLER
2526
2527 /* netpoll callback. */
2528 static void
vmxnet3_netpoll(struct net_device * netdev)2529 vmxnet3_netpoll(struct net_device *netdev)
2530 {
2531 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2532
2533 switch (adapter->intr.type) {
2534 #ifdef CONFIG_PCI_MSI
2535 case VMXNET3_IT_MSIX: {
2536 int i;
2537 for (i = 0; i < adapter->num_rx_queues; i++)
2538 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2539 break;
2540 }
2541 #endif
2542 case VMXNET3_IT_MSI:
2543 default:
2544 vmxnet3_intr(0, adapter->netdev);
2545 break;
2546 }
2547
2548 }
2549 #endif /* CONFIG_NET_POLL_CONTROLLER */
2550
2551 static int
vmxnet3_request_irqs(struct vmxnet3_adapter * adapter)2552 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2553 {
2554 struct vmxnet3_intr *intr = &adapter->intr;
2555 int err = 0, i;
2556 int vector = 0;
2557
2558 #ifdef CONFIG_PCI_MSI
2559 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2560 for (i = 0; i < adapter->num_tx_queues; i++) {
2561 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2562 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2563 adapter->netdev->name, vector);
2564 err = request_irq(
2565 intr->msix_entries[vector].vector,
2566 vmxnet3_msix_tx, 0,
2567 adapter->tx_queue[i].name,
2568 &adapter->tx_queue[i]);
2569 } else {
2570 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2571 adapter->netdev->name, vector);
2572 }
2573 if (err) {
2574 dev_err(&adapter->netdev->dev,
2575 "Failed to request irq for MSIX, %s, "
2576 "error %d\n",
2577 adapter->tx_queue[i].name, err);
2578 return err;
2579 }
2580
2581 /* Handle the case where only 1 MSIx was allocated for
2582 * all tx queues */
2583 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2584 for (; i < adapter->num_tx_queues; i++)
2585 adapter->tx_queue[i].comp_ring.intr_idx
2586 = vector;
2587 vector++;
2588 break;
2589 } else {
2590 adapter->tx_queue[i].comp_ring.intr_idx
2591 = vector++;
2592 }
2593 }
2594 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2595 vector = 0;
2596
2597 for (i = 0; i < adapter->num_rx_queues; i++) {
2598 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2599 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2600 adapter->netdev->name, vector);
2601 else
2602 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2603 adapter->netdev->name, vector);
2604 err = request_irq(intr->msix_entries[vector].vector,
2605 vmxnet3_msix_rx, 0,
2606 adapter->rx_queue[i].name,
2607 &(adapter->rx_queue[i]));
2608 if (err) {
2609 netdev_err(adapter->netdev,
2610 "Failed to request irq for MSIX, "
2611 "%s, error %d\n",
2612 adapter->rx_queue[i].name, err);
2613 return err;
2614 }
2615
2616 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2617 }
2618
2619 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2620 adapter->netdev->name, vector);
2621 err = request_irq(intr->msix_entries[vector].vector,
2622 vmxnet3_msix_event, 0,
2623 intr->event_msi_vector_name, adapter->netdev);
2624 intr->event_intr_idx = vector;
2625
2626 } else if (intr->type == VMXNET3_IT_MSI) {
2627 adapter->num_rx_queues = 1;
2628 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2629 adapter->netdev->name, adapter->netdev);
2630 } else {
2631 #endif
2632 adapter->num_rx_queues = 1;
2633 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2634 IRQF_SHARED, adapter->netdev->name,
2635 adapter->netdev);
2636 #ifdef CONFIG_PCI_MSI
2637 }
2638 #endif
2639 intr->num_intrs = vector + 1;
2640 if (err) {
2641 netdev_err(adapter->netdev,
2642 "Failed to request irq (intr type:%d), error %d\n",
2643 intr->type, err);
2644 } else {
2645 /* Number of rx queues will not change after this */
2646 for (i = 0; i < adapter->num_rx_queues; i++) {
2647 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2648 rq->qid = i;
2649 rq->qid2 = i + adapter->num_rx_queues;
2650 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2651 }
2652
2653 /* init our intr settings */
2654 for (i = 0; i < intr->num_intrs; i++)
2655 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2656 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2657 adapter->intr.event_intr_idx = 0;
2658 for (i = 0; i < adapter->num_tx_queues; i++)
2659 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2660 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2661 }
2662
2663 netdev_info(adapter->netdev,
2664 "intr type %u, mode %u, %u vectors allocated\n",
2665 intr->type, intr->mask_mode, intr->num_intrs);
2666 }
2667
2668 return err;
2669 }
2670
2671
2672 static void
vmxnet3_free_irqs(struct vmxnet3_adapter * adapter)2673 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2674 {
2675 struct vmxnet3_intr *intr = &adapter->intr;
2676 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2677
2678 switch (intr->type) {
2679 #ifdef CONFIG_PCI_MSI
2680 case VMXNET3_IT_MSIX:
2681 {
2682 int i, vector = 0;
2683
2684 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2685 for (i = 0; i < adapter->num_tx_queues; i++) {
2686 free_irq(intr->msix_entries[vector++].vector,
2687 &(adapter->tx_queue[i]));
2688 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2689 break;
2690 }
2691 }
2692
2693 for (i = 0; i < adapter->num_rx_queues; i++) {
2694 free_irq(intr->msix_entries[vector++].vector,
2695 &(adapter->rx_queue[i]));
2696 }
2697
2698 free_irq(intr->msix_entries[vector].vector,
2699 adapter->netdev);
2700 BUG_ON(vector >= intr->num_intrs);
2701 break;
2702 }
2703 #endif
2704 case VMXNET3_IT_MSI:
2705 free_irq(adapter->pdev->irq, adapter->netdev);
2706 break;
2707 case VMXNET3_IT_INTX:
2708 free_irq(adapter->pdev->irq, adapter->netdev);
2709 break;
2710 default:
2711 BUG();
2712 }
2713 }
2714
2715
2716 static void
vmxnet3_restore_vlan(struct vmxnet3_adapter * adapter)2717 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2718 {
2719 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2720 u16 vid;
2721
2722 /* allow untagged pkts */
2723 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2724
2725 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2726 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2727 }
2728
2729
2730 static int
vmxnet3_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2731 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2732 {
2733 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2734
2735 if (!(netdev->flags & IFF_PROMISC)) {
2736 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2737 unsigned long flags;
2738
2739 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2740 spin_lock_irqsave(&adapter->cmd_lock, flags);
2741 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2742 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2743 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2744 }
2745
2746 set_bit(vid, adapter->active_vlans);
2747
2748 return 0;
2749 }
2750
2751
2752 static int
vmxnet3_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2753 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2754 {
2755 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2756
2757 if (!(netdev->flags & IFF_PROMISC)) {
2758 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2759 unsigned long flags;
2760
2761 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2762 spin_lock_irqsave(&adapter->cmd_lock, flags);
2763 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2764 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2765 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2766 }
2767
2768 clear_bit(vid, adapter->active_vlans);
2769
2770 return 0;
2771 }
2772
2773
2774 static u8 *
vmxnet3_copy_mc(struct net_device * netdev)2775 vmxnet3_copy_mc(struct net_device *netdev)
2776 {
2777 u8 *buf = NULL;
2778 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2779
2780 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2781 if (sz <= 0xffff) {
2782 /* We may be called with BH disabled */
2783 buf = kmalloc(sz, GFP_ATOMIC);
2784 if (buf) {
2785 struct netdev_hw_addr *ha;
2786 int i = 0;
2787
2788 netdev_for_each_mc_addr(ha, netdev)
2789 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2790 ETH_ALEN);
2791 }
2792 }
2793 return buf;
2794 }
2795
2796
2797 static void
vmxnet3_set_mc(struct net_device * netdev)2798 vmxnet3_set_mc(struct net_device *netdev)
2799 {
2800 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2801 unsigned long flags;
2802 struct Vmxnet3_RxFilterConf *rxConf =
2803 &adapter->shared->devRead.rxFilterConf;
2804 u8 *new_table = NULL;
2805 dma_addr_t new_table_pa = 0;
2806 bool new_table_pa_valid = false;
2807 u32 new_mode = VMXNET3_RXM_UCAST;
2808
2809 if (netdev->flags & IFF_PROMISC) {
2810 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2811 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2812
2813 new_mode |= VMXNET3_RXM_PROMISC;
2814 } else {
2815 vmxnet3_restore_vlan(adapter);
2816 }
2817
2818 if (netdev->flags & IFF_BROADCAST)
2819 new_mode |= VMXNET3_RXM_BCAST;
2820
2821 if (netdev->flags & IFF_ALLMULTI)
2822 new_mode |= VMXNET3_RXM_ALL_MULTI;
2823 else
2824 if (!netdev_mc_empty(netdev)) {
2825 new_table = vmxnet3_copy_mc(netdev);
2826 if (new_table) {
2827 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2828
2829 rxConf->mfTableLen = cpu_to_le16(sz);
2830 new_table_pa = dma_map_single(
2831 &adapter->pdev->dev,
2832 new_table,
2833 sz,
2834 DMA_TO_DEVICE);
2835 if (!dma_mapping_error(&adapter->pdev->dev,
2836 new_table_pa)) {
2837 new_mode |= VMXNET3_RXM_MCAST;
2838 new_table_pa_valid = true;
2839 rxConf->mfTablePA = cpu_to_le64(
2840 new_table_pa);
2841 }
2842 }
2843 if (!new_table_pa_valid) {
2844 netdev_info(netdev,
2845 "failed to copy mcast list, setting ALL_MULTI\n");
2846 new_mode |= VMXNET3_RXM_ALL_MULTI;
2847 }
2848 }
2849
2850 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2851 rxConf->mfTableLen = 0;
2852 rxConf->mfTablePA = 0;
2853 }
2854
2855 spin_lock_irqsave(&adapter->cmd_lock, flags);
2856 if (new_mode != rxConf->rxMode) {
2857 rxConf->rxMode = cpu_to_le32(new_mode);
2858 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2859 VMXNET3_CMD_UPDATE_RX_MODE);
2860 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2861 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2862 }
2863
2864 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2865 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2866 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2867
2868 if (new_table_pa_valid)
2869 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2870 rxConf->mfTableLen, DMA_TO_DEVICE);
2871 kfree(new_table);
2872 }
2873
2874 void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter * adapter)2875 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2876 {
2877 int i;
2878
2879 for (i = 0; i < adapter->num_rx_queues; i++)
2880 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2881 }
2882
2883
2884 /*
2885 * Set up driver_shared based on settings in adapter.
2886 */
2887
2888 static void
vmxnet3_setup_driver_shared(struct vmxnet3_adapter * adapter)2889 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2890 {
2891 struct Vmxnet3_DriverShared *shared = adapter->shared;
2892 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2893 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2894 struct Vmxnet3_TxQueueConf *tqc;
2895 struct Vmxnet3_RxQueueConf *rqc;
2896 struct Vmxnet3_TxQueueTSConf *tqtsc;
2897 struct Vmxnet3_RxQueueTSConf *rqtsc;
2898 int i;
2899
2900 memset(shared, 0, sizeof(*shared));
2901
2902 /* driver settings */
2903 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2904 devRead->misc.driverInfo.version = cpu_to_le32(
2905 VMXNET3_DRIVER_VERSION_NUM);
2906 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2907 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2908 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2909 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2910 *((u32 *)&devRead->misc.driverInfo.gos));
2911 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2912 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2913
2914 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2915 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2916
2917 /* set up feature flags */
2918 if (adapter->netdev->features & NETIF_F_RXCSUM)
2919 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2920
2921 if (adapter->netdev->features & NETIF_F_LRO) {
2922 devRead->misc.uptFeatures |= UPT1_F_LRO;
2923 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2924 }
2925 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2926 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2927
2928 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2929 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2930 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2931
2932 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2933 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2934 devRead->misc.queueDescLen = cpu_to_le32(
2935 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2936 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2937
2938 /* tx queue settings */
2939 devRead->misc.numTxQueues = adapter->num_tx_queues;
2940 for (i = 0; i < adapter->num_tx_queues; i++) {
2941 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2942 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2943 tqc = &adapter->tqd_start[i].conf;
2944 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2945 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2946 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2947 tqc->ddPA = cpu_to_le64(~0ULL);
2948 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2949 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2950 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2951 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2952 tqc->ddLen = cpu_to_le32(0);
2953 tqc->intrIdx = tq->comp_ring.intr_idx;
2954 if (VMXNET3_VERSION_GE_9(adapter)) {
2955 tqtsc = &adapter->tqd_start[i].tsConf;
2956 tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA);
2957 tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size);
2958 }
2959 }
2960
2961 /* rx queue settings */
2962 devRead->misc.numRxQueues = adapter->num_rx_queues;
2963 for (i = 0; i < adapter->num_rx_queues; i++) {
2964 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2965 rqc = &adapter->rqd_start[i].conf;
2966 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2967 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2968 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2969 rqc->ddPA = cpu_to_le64(~0ULL);
2970 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2971 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2972 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2973 rqc->ddLen = cpu_to_le32(0);
2974 rqc->intrIdx = rq->comp_ring.intr_idx;
2975 if (VMXNET3_VERSION_GE_3(adapter)) {
2976 rqc->rxDataRingBasePA =
2977 cpu_to_le64(rq->data_ring.basePA);
2978 rqc->rxDataRingDescSize =
2979 cpu_to_le16(rq->data_ring.desc_size);
2980 }
2981 if (VMXNET3_VERSION_GE_9(adapter)) {
2982 rqtsc = &adapter->rqd_start[i].tsConf;
2983 rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA);
2984 rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size);
2985 }
2986 }
2987
2988 #ifdef VMXNET3_RSS
2989 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2990
2991 if (adapter->rss) {
2992 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2993
2994 devRead->misc.uptFeatures |= UPT1_F_RSS;
2995 devRead->misc.numRxQueues = adapter->num_rx_queues;
2996 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2997 UPT1_RSS_HASH_TYPE_IPV4 |
2998 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2999 UPT1_RSS_HASH_TYPE_IPV6;
3000 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
3001 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
3002 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
3003 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
3004
3005 for (i = 0; i < rssConf->indTableSize; i++)
3006 rssConf->indTable[i] = ethtool_rxfh_indir_default(
3007 i, adapter->num_rx_queues);
3008
3009 devRead->rssConfDesc.confVer = 1;
3010 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
3011 devRead->rssConfDesc.confPA =
3012 cpu_to_le64(adapter->rss_conf_pa);
3013 }
3014
3015 #endif /* VMXNET3_RSS */
3016
3017 /* intr settings */
3018 if (!VMXNET3_VERSION_GE_6(adapter) ||
3019 !adapter->queuesExtEnabled) {
3020 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
3021 VMXNET3_IMM_AUTO;
3022 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
3023 for (i = 0; i < adapter->intr.num_intrs; i++)
3024 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
3025
3026 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
3027 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
3028 } else {
3029 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
3030 VMXNET3_IMM_AUTO;
3031 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
3032 for (i = 0; i < adapter->intr.num_intrs; i++)
3033 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
3034
3035 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
3036 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
3037 }
3038
3039 /* rx filter settings */
3040 devRead->rxFilterConf.rxMode = 0;
3041 vmxnet3_restore_vlan(adapter);
3042 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
3043
3044 /* the rest are already zeroed */
3045 }
3046
3047 static void
vmxnet3_init_bufsize(struct vmxnet3_adapter * adapter)3048 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
3049 {
3050 struct Vmxnet3_DriverShared *shared = adapter->shared;
3051 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3052 unsigned long flags;
3053
3054 if (!VMXNET3_VERSION_GE_7(adapter))
3055 return;
3056
3057 cmdInfo->ringBufSize = adapter->ringBufSize;
3058 spin_lock_irqsave(&adapter->cmd_lock, flags);
3059 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3060 VMXNET3_CMD_SET_RING_BUFFER_SIZE);
3061 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3062 }
3063
3064 static void
vmxnet3_init_coalesce(struct vmxnet3_adapter * adapter)3065 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
3066 {
3067 struct Vmxnet3_DriverShared *shared = adapter->shared;
3068 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3069 unsigned long flags;
3070
3071 if (!VMXNET3_VERSION_GE_3(adapter))
3072 return;
3073
3074 spin_lock_irqsave(&adapter->cmd_lock, flags);
3075 cmdInfo->varConf.confVer = 1;
3076 cmdInfo->varConf.confLen =
3077 cpu_to_le32(sizeof(*adapter->coal_conf));
3078 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
3079
3080 if (adapter->default_coal_mode) {
3081 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3082 VMXNET3_CMD_GET_COALESCE);
3083 } else {
3084 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3085 VMXNET3_CMD_SET_COALESCE);
3086 }
3087
3088 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3089 }
3090
3091 static void
vmxnet3_init_rssfields(struct vmxnet3_adapter * adapter)3092 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
3093 {
3094 struct Vmxnet3_DriverShared *shared = adapter->shared;
3095 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3096 unsigned long flags;
3097
3098 if (!VMXNET3_VERSION_GE_4(adapter))
3099 return;
3100
3101 spin_lock_irqsave(&adapter->cmd_lock, flags);
3102
3103 if (adapter->default_rss_fields) {
3104 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3105 VMXNET3_CMD_GET_RSS_FIELDS);
3106 adapter->rss_fields =
3107 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3108 } else {
3109 if (VMXNET3_VERSION_GE_7(adapter)) {
3110 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
3111 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
3112 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3113 VMXNET3_CAP_UDP_RSS)) {
3114 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
3115 } else {
3116 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
3117 }
3118
3119 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
3120 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3121 VMXNET3_CAP_ESP_RSS_IPV4)) {
3122 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
3123 } else {
3124 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
3125 }
3126
3127 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
3128 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3129 VMXNET3_CAP_ESP_RSS_IPV6)) {
3130 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
3131 } else {
3132 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
3133 }
3134
3135 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3136 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3137 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3138 }
3139 cmdInfo->setRssFields = adapter->rss_fields;
3140 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3141 VMXNET3_CMD_SET_RSS_FIELDS);
3142 /* Not all requested RSS may get applied, so get and
3143 * cache what was actually applied.
3144 */
3145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3146 VMXNET3_CMD_GET_RSS_FIELDS);
3147 adapter->rss_fields =
3148 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3149 }
3150
3151 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3152 }
3153
3154 int
vmxnet3_activate_dev(struct vmxnet3_adapter * adapter)3155 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
3156 {
3157 int err, i;
3158 u32 ret;
3159 unsigned long flags;
3160
3161 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
3162 " ring sizes %u %u %u\n", adapter->netdev->name,
3163 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
3164 adapter->tx_queue[0].tx_ring.size,
3165 adapter->rx_queue[0].rx_ring[0].size,
3166 adapter->rx_queue[0].rx_ring[1].size);
3167
3168 vmxnet3_tq_init_all(adapter);
3169 err = vmxnet3_rq_init_all(adapter);
3170 if (err) {
3171 netdev_err(adapter->netdev,
3172 "Failed to init rx queue error %d\n", err);
3173 goto rq_err;
3174 }
3175
3176 err = vmxnet3_request_irqs(adapter);
3177 if (err) {
3178 netdev_err(adapter->netdev,
3179 "Failed to setup irq for error %d\n", err);
3180 goto irq_err;
3181 }
3182
3183 vmxnet3_setup_driver_shared(adapter);
3184
3185 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
3186 adapter->shared_pa));
3187 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
3188 adapter->shared_pa));
3189 spin_lock_irqsave(&adapter->cmd_lock, flags);
3190 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3191 VMXNET3_CMD_ACTIVATE_DEV);
3192 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3193 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3194
3195 if (ret != 0) {
3196 netdev_err(adapter->netdev,
3197 "Failed to activate dev: error %u\n", ret);
3198 err = -EINVAL;
3199 goto activate_err;
3200 }
3201
3202 vmxnet3_init_bufsize(adapter);
3203 vmxnet3_init_coalesce(adapter);
3204 vmxnet3_init_rssfields(adapter);
3205
3206 for (i = 0; i < adapter->num_rx_queues; i++) {
3207 VMXNET3_WRITE_BAR0_REG(adapter,
3208 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
3209 adapter->rx_queue[i].rx_ring[0].next2fill);
3210 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
3211 (i * VMXNET3_REG_ALIGN)),
3212 adapter->rx_queue[i].rx_ring[1].next2fill);
3213 }
3214
3215 /* Apply the rx filter settins last. */
3216 vmxnet3_set_mc(adapter->netdev);
3217
3218 /*
3219 * Check link state when first activating device. It will start the
3220 * tx queue if the link is up.
3221 */
3222 vmxnet3_check_link(adapter, true);
3223 netif_tx_wake_all_queues(adapter->netdev);
3224 for (i = 0; i < adapter->num_rx_queues; i++)
3225 napi_enable(&adapter->rx_queue[i].napi);
3226 vmxnet3_enable_all_intrs(adapter);
3227 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3228 return 0;
3229
3230 activate_err:
3231 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
3232 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
3233 vmxnet3_free_irqs(adapter);
3234 irq_err:
3235 rq_err:
3236 /* free up buffers we allocated */
3237 vmxnet3_rq_cleanup_all(adapter);
3238 return err;
3239 }
3240
3241
3242 void
vmxnet3_reset_dev(struct vmxnet3_adapter * adapter)3243 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
3244 {
3245 unsigned long flags;
3246 spin_lock_irqsave(&adapter->cmd_lock, flags);
3247 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
3248 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3249 }
3250
3251
3252 int
vmxnet3_quiesce_dev(struct vmxnet3_adapter * adapter)3253 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
3254 {
3255 int i;
3256 unsigned long flags;
3257 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
3258 return 0;
3259
3260
3261 spin_lock_irqsave(&adapter->cmd_lock, flags);
3262 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3263 VMXNET3_CMD_QUIESCE_DEV);
3264 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3265 vmxnet3_disable_all_intrs(adapter);
3266
3267 for (i = 0; i < adapter->num_rx_queues; i++)
3268 napi_disable(&adapter->rx_queue[i].napi);
3269 netif_tx_disable(adapter->netdev);
3270 adapter->link_speed = 0;
3271 netif_carrier_off(adapter->netdev);
3272
3273 vmxnet3_tq_cleanup_all(adapter);
3274 vmxnet3_rq_cleanup_all(adapter);
3275 vmxnet3_free_irqs(adapter);
3276 return 0;
3277 }
3278
3279
3280 static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter * adapter,const u8 * mac)3281 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
3282 {
3283 u32 tmp;
3284
3285 tmp = *(u32 *)mac;
3286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
3287
3288 tmp = (mac[5] << 8) | mac[4];
3289 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
3290 }
3291
3292
3293 static int
vmxnet3_set_mac_addr(struct net_device * netdev,void * p)3294 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
3295 {
3296 struct sockaddr *addr = p;
3297 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3298
3299 dev_addr_set(netdev, addr->sa_data);
3300 vmxnet3_write_mac_addr(adapter, addr->sa_data);
3301
3302 return 0;
3303 }
3304
3305
3306 /* ==================== initialization and cleanup routines ============ */
3307
3308 static int
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter * adapter)3309 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
3310 {
3311 int err;
3312 unsigned long mmio_start, mmio_len;
3313 struct pci_dev *pdev = adapter->pdev;
3314
3315 err = pci_enable_device(pdev);
3316 if (err) {
3317 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3318 return err;
3319 }
3320
3321 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
3322 vmxnet3_driver_name);
3323 if (err) {
3324 dev_err(&pdev->dev,
3325 "Failed to request region for adapter: error %d\n", err);
3326 goto err_enable_device;
3327 }
3328
3329 pci_set_master(pdev);
3330
3331 mmio_start = pci_resource_start(pdev, 0);
3332 mmio_len = pci_resource_len(pdev, 0);
3333 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3334 if (!adapter->hw_addr0) {
3335 dev_err(&pdev->dev, "Failed to map bar0\n");
3336 err = -EIO;
3337 goto err_ioremap;
3338 }
3339
3340 mmio_start = pci_resource_start(pdev, 1);
3341 mmio_len = pci_resource_len(pdev, 1);
3342 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3343 if (!adapter->hw_addr1) {
3344 dev_err(&pdev->dev, "Failed to map bar1\n");
3345 err = -EIO;
3346 goto err_bar1;
3347 }
3348 return 0;
3349
3350 err_bar1:
3351 iounmap(adapter->hw_addr0);
3352 err_ioremap:
3353 pci_release_selected_regions(pdev, (1 << 2) - 1);
3354 err_enable_device:
3355 pci_disable_device(pdev);
3356 return err;
3357 }
3358
3359
3360 static void
vmxnet3_free_pci_resources(struct vmxnet3_adapter * adapter)3361 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3362 {
3363 BUG_ON(!adapter->pdev);
3364
3365 iounmap(adapter->hw_addr0);
3366 iounmap(adapter->hw_addr1);
3367 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3368 pci_disable_device(adapter->pdev);
3369 }
3370
3371
3372 void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter * adapter)3373 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3374 {
3375 size_t sz, i, ring0_size, ring1_size, comp_size;
3376 /* With version7 ring1 will have only T0 buffers */
3377 if (!VMXNET3_VERSION_GE_7(adapter)) {
3378 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3379 VMXNET3_MAX_ETH_HDR_SIZE) {
3380 adapter->skb_buf_size = adapter->netdev->mtu +
3381 VMXNET3_MAX_ETH_HDR_SIZE;
3382 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3383 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3384
3385 adapter->rx_buf_per_pkt = 1;
3386 } else {
3387 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3388 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3389 VMXNET3_MAX_ETH_HDR_SIZE;
3390 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3391 }
3392 } else {
3393 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3394 VMXNET3_MAX_SKB_BUF_SIZE);
3395 adapter->rx_buf_per_pkt = 1;
3396 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3397 adapter->ringBufSize.ring1BufSizeType1 = 0;
3398 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3399 }
3400
3401 /*
3402 * for simplicity, force the ring0 size to be a multiple of
3403 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3404 */
3405 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3406 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3407 ring0_size = (ring0_size + sz - 1) / sz * sz;
3408 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
3409 sz * sz);
3410 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3411 ring1_size = (ring1_size + sz - 1) / sz * sz;
3412 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
3413 sz * sz);
3414 /* For v7 and later, keep ring size power of 2 for UPT */
3415 if (VMXNET3_VERSION_GE_7(adapter)) {
3416 ring0_size = rounddown_pow_of_two(ring0_size);
3417 ring1_size = rounddown_pow_of_two(ring1_size);
3418 }
3419 comp_size = ring0_size + ring1_size;
3420
3421 for (i = 0; i < adapter->num_rx_queues; i++) {
3422 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3423
3424 rq->rx_ring[0].size = ring0_size;
3425 rq->rx_ring[1].size = ring1_size;
3426 rq->comp_ring.size = comp_size;
3427 }
3428 }
3429
3430
3431 int
vmxnet3_create_queues(struct vmxnet3_adapter * adapter,u32 tx_ring_size,u32 rx_ring_size,u32 rx_ring2_size,u16 txdata_desc_size,u16 rxdata_desc_size)3432 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3433 u32 rx_ring_size, u32 rx_ring2_size,
3434 u16 txdata_desc_size, u16 rxdata_desc_size)
3435 {
3436 int err = 0, i;
3437
3438 for (i = 0; i < adapter->num_tx_queues; i++) {
3439 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3440 tq->tx_ring.size = tx_ring_size;
3441 tq->data_ring.size = tx_ring_size;
3442 tq->comp_ring.size = tx_ring_size;
3443 tq->txdata_desc_size = txdata_desc_size;
3444 tq->shared = &adapter->tqd_start[i].ctrl;
3445 tq->stopped = true;
3446 tq->adapter = adapter;
3447 tq->qid = i;
3448 tq->tx_ts_desc_size = adapter->tx_ts_desc_size;
3449 tq->tsPktCount = 1;
3450 err = vmxnet3_tq_create(tq, adapter);
3451 /*
3452 * Too late to change num_tx_queues. We cannot do away with
3453 * lesser number of queues than what we asked for
3454 */
3455 if (err)
3456 goto queue_err;
3457 }
3458
3459 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3460 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3461 vmxnet3_adjust_rx_ring_size(adapter);
3462
3463 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3464 for (i = 0; i < adapter->num_rx_queues; i++) {
3465 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3466 /* qid and qid2 for rx queues will be assigned later when num
3467 * of rx queues is finalized after allocating intrs */
3468 rq->shared = &adapter->rqd_start[i].ctrl;
3469 rq->adapter = adapter;
3470 rq->data_ring.desc_size = rxdata_desc_size;
3471 rq->rx_ts_desc_size = adapter->rx_ts_desc_size;
3472 err = vmxnet3_rq_create(rq, adapter);
3473 if (err) {
3474 if (i == 0) {
3475 netdev_err(adapter->netdev,
3476 "Could not allocate any rx queues. "
3477 "Aborting.\n");
3478 goto queue_err;
3479 } else {
3480 netdev_info(adapter->netdev,
3481 "Number of rx queues changed "
3482 "to : %d.\n", i);
3483 adapter->num_rx_queues = i;
3484 err = 0;
3485 break;
3486 }
3487 }
3488 }
3489
3490 if (!adapter->rxdataring_enabled)
3491 vmxnet3_rq_destroy_all_rxdataring(adapter);
3492
3493 return err;
3494 queue_err:
3495 vmxnet3_tq_destroy_all(adapter);
3496 return err;
3497 }
3498
3499 static int
vmxnet3_open(struct net_device * netdev)3500 vmxnet3_open(struct net_device *netdev)
3501 {
3502 struct vmxnet3_adapter *adapter;
3503 int err, i;
3504
3505 adapter = netdev_priv(netdev);
3506
3507 for (i = 0; i < adapter->num_tx_queues; i++)
3508 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3509
3510 if (VMXNET3_VERSION_GE_3(adapter)) {
3511 unsigned long flags;
3512 u16 txdata_desc_size;
3513 u32 ret;
3514
3515 spin_lock_irqsave(&adapter->cmd_lock, flags);
3516 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3517 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3518 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3519 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3520
3521 txdata_desc_size = ret & 0xffff;
3522 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3523 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3524 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3525 adapter->txdata_desc_size =
3526 sizeof(struct Vmxnet3_TxDataDesc);
3527 } else {
3528 adapter->txdata_desc_size = txdata_desc_size;
3529 }
3530 if (VMXNET3_VERSION_GE_9(adapter))
3531 adapter->rxdata_desc_size = (ret >> 16) & 0xffff;
3532 } else {
3533 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3534 }
3535
3536 if (VMXNET3_VERSION_GE_9(adapter)) {
3537 unsigned long flags;
3538 u16 tx_ts_desc_size = 0;
3539 u16 rx_ts_desc_size = 0;
3540 u32 ret;
3541
3542 spin_lock_irqsave(&adapter->cmd_lock, flags);
3543 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3544 VMXNET3_CMD_GET_TSRING_DESC_SIZE);
3545 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3546 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3547 if (ret > 0) {
3548 tx_ts_desc_size = (ret & 0xff);
3549 rx_ts_desc_size = ((ret >> 16) & 0xff);
3550 }
3551 if (tx_ts_desc_size > VMXNET3_TXTS_DESC_MAX_SIZE ||
3552 tx_ts_desc_size & VMXNET3_TXTS_DESC_SIZE_MASK)
3553 tx_ts_desc_size = 0;
3554 if (rx_ts_desc_size > VMXNET3_RXTS_DESC_MAX_SIZE ||
3555 rx_ts_desc_size & VMXNET3_RXTS_DESC_SIZE_MASK)
3556 rx_ts_desc_size = 0;
3557 adapter->tx_ts_desc_size = tx_ts_desc_size;
3558 adapter->rx_ts_desc_size = rx_ts_desc_size;
3559 } else {
3560 adapter->tx_ts_desc_size = 0;
3561 adapter->rx_ts_desc_size = 0;
3562 }
3563
3564 err = vmxnet3_create_queues(adapter,
3565 adapter->tx_ring_size,
3566 adapter->rx_ring_size,
3567 adapter->rx_ring2_size,
3568 adapter->txdata_desc_size,
3569 adapter->rxdata_desc_size);
3570 if (err)
3571 goto queue_err;
3572
3573 err = vmxnet3_activate_dev(adapter);
3574 if (err)
3575 goto activate_err;
3576
3577 return 0;
3578
3579 activate_err:
3580 vmxnet3_rq_destroy_all(adapter);
3581 vmxnet3_tq_destroy_all(adapter);
3582 queue_err:
3583 return err;
3584 }
3585
3586
3587 static int
vmxnet3_close(struct net_device * netdev)3588 vmxnet3_close(struct net_device *netdev)
3589 {
3590 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3591
3592 /*
3593 * Reset_work may be in the middle of resetting the device, wait for its
3594 * completion.
3595 */
3596 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3597 usleep_range(1000, 2000);
3598
3599 vmxnet3_quiesce_dev(adapter);
3600
3601 vmxnet3_rq_destroy_all(adapter);
3602 vmxnet3_tq_destroy_all(adapter);
3603
3604 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3605
3606
3607 return 0;
3608 }
3609
3610
3611 void
vmxnet3_force_close(struct vmxnet3_adapter * adapter)3612 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3613 {
3614 int i;
3615
3616 /*
3617 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3618 * vmxnet3_close() will deadlock.
3619 */
3620 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3621
3622 /* we need to enable NAPI, otherwise dev_close will deadlock */
3623 for (i = 0; i < adapter->num_rx_queues; i++)
3624 napi_enable(&adapter->rx_queue[i].napi);
3625 /*
3626 * Need to clear the quiesce bit to ensure that vmxnet3_close
3627 * can quiesce the device properly
3628 */
3629 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3630 dev_close(adapter->netdev);
3631 }
3632
3633
3634 static int
vmxnet3_change_mtu(struct net_device * netdev,int new_mtu)3635 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3636 {
3637 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3638 int err = 0;
3639
3640 /*
3641 * Reset_work may be in the middle of resetting the device, wait for its
3642 * completion.
3643 */
3644 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3645 usleep_range(1000, 2000);
3646
3647 if (netif_running(netdev)) {
3648 vmxnet3_quiesce_dev(adapter);
3649 vmxnet3_reset_dev(adapter);
3650
3651 /* we need to re-create the rx queue based on the new mtu */
3652 vmxnet3_rq_destroy_all(adapter);
3653 WRITE_ONCE(netdev->mtu, new_mtu);
3654 vmxnet3_adjust_rx_ring_size(adapter);
3655 err = vmxnet3_rq_create_all(adapter);
3656 if (err) {
3657 netdev_err(netdev,
3658 "failed to re-create rx queues, "
3659 " error %d. Closing it.\n", err);
3660 goto out;
3661 }
3662
3663 err = vmxnet3_activate_dev(adapter);
3664 if (err) {
3665 netdev_err(netdev,
3666 "failed to re-activate, error %d. "
3667 "Closing it\n", err);
3668 goto out;
3669 }
3670 } else {
3671 WRITE_ONCE(netdev->mtu, new_mtu);
3672 }
3673
3674 out:
3675 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3676 if (err)
3677 vmxnet3_force_close(adapter);
3678
3679 return err;
3680 }
3681
3682
3683 static void
vmxnet3_declare_features(struct vmxnet3_adapter * adapter)3684 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3685 {
3686 struct net_device *netdev = adapter->netdev;
3687 unsigned long flags;
3688
3689 if (VMXNET3_VERSION_GE_9(adapter)) {
3690 spin_lock_irqsave(&adapter->cmd_lock, flags);
3691 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3692 VMXNET3_CMD_GET_DISABLED_OFFLOADS);
3693 adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3694 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3695 }
3696
3697 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3698 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3699 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3700 NETIF_F_LRO | NETIF_F_HIGHDMA;
3701
3702 if (VMXNET3_VERSION_GE_4(adapter)) {
3703 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3704 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3705
3706 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3707 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3708 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3709 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3710 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3711 }
3712
3713 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) {
3714 netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3715 netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3716 }
3717
3718 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) {
3719 netdev->hw_features &= ~(NETIF_F_LRO);
3720 netdev->hw_enc_features &= ~(NETIF_F_LRO);
3721 }
3722
3723 if (VMXNET3_VERSION_GE_7(adapter)) {
3724 unsigned long flags;
3725
3726 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3727 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
3728 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3729 }
3730 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3731 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
3732 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3733 }
3734 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3735 VMXNET3_CAP_GENEVE_TSO)) {
3736 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3737 }
3738 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3739 VMXNET3_CAP_VXLAN_TSO)) {
3740 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3741 }
3742 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3743 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
3744 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3745 }
3746 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3747 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
3748 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3749 }
3750
3751 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3752 spin_lock_irqsave(&adapter->cmd_lock, flags);
3753 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3754 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3755 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3756
3757 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3758 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3759 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3760 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3761 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3762 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3763 }
3764 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3765 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3766 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3767 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3768 }
3769 }
3770
3771 netdev->vlan_features = netdev->hw_features &
3772 ~(NETIF_F_HW_VLAN_CTAG_TX |
3773 NETIF_F_HW_VLAN_CTAG_RX);
3774 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3775 }
3776
3777
3778 static void
vmxnet3_read_mac_addr(struct vmxnet3_adapter * adapter,u8 * mac)3779 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3780 {
3781 u32 tmp;
3782
3783 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3784 *(u32 *)mac = tmp;
3785
3786 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3787 mac[4] = tmp & 0xff;
3788 mac[5] = (tmp >> 8) & 0xff;
3789 }
3790
3791 #ifdef CONFIG_PCI_MSI
3792
3793 /*
3794 * Enable MSIx vectors.
3795 * Returns :
3796 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3797 * were enabled.
3798 * number of vectors which were enabled otherwise (this number is greater
3799 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3800 */
3801
3802 static int
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter * adapter,int nvec)3803 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3804 {
3805 int ret = pci_enable_msix_range(adapter->pdev,
3806 adapter->intr.msix_entries, nvec, nvec);
3807
3808 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3809 dev_err(&adapter->netdev->dev,
3810 "Failed to enable %d MSI-X, trying %d\n",
3811 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3812
3813 ret = pci_enable_msix_range(adapter->pdev,
3814 adapter->intr.msix_entries,
3815 VMXNET3_LINUX_MIN_MSIX_VECT,
3816 VMXNET3_LINUX_MIN_MSIX_VECT);
3817 }
3818
3819 if (ret < 0) {
3820 dev_err(&adapter->netdev->dev,
3821 "Failed to enable MSI-X, error: %d\n", ret);
3822 }
3823
3824 return ret;
3825 }
3826
3827
3828 #endif /* CONFIG_PCI_MSI */
3829
3830 static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter * adapter)3831 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3832 {
3833 u32 cfg;
3834 unsigned long flags;
3835
3836 /* intr settings */
3837 spin_lock_irqsave(&adapter->cmd_lock, flags);
3838 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3839 VMXNET3_CMD_GET_CONF_INTR);
3840 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3841 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3842 adapter->intr.type = cfg & 0x3;
3843 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3844
3845 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3846 adapter->intr.type = VMXNET3_IT_MSIX;
3847 }
3848
3849 #ifdef CONFIG_PCI_MSI
3850 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3851 int i, nvec, nvec_allocated;
3852
3853 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3854 1 : adapter->num_tx_queues;
3855 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3856 0 : adapter->num_rx_queues;
3857 nvec += 1; /* for link event */
3858 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3859 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3860
3861 for (i = 0; i < nvec; i++)
3862 adapter->intr.msix_entries[i].entry = i;
3863
3864 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3865 if (nvec_allocated < 0)
3866 goto msix_err;
3867
3868 /* If we cannot allocate one MSIx vector per queue
3869 * then limit the number of rx queues to 1
3870 */
3871 if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3872 nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3873 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3874 || adapter->num_rx_queues != 1) {
3875 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3876 netdev_err(adapter->netdev,
3877 "Number of rx queues : 1\n");
3878 adapter->num_rx_queues = 1;
3879 }
3880 }
3881
3882 adapter->intr.num_intrs = nvec_allocated;
3883 return;
3884
3885 msix_err:
3886 /* If we cannot allocate MSIx vectors use only one rx queue */
3887 dev_info(&adapter->pdev->dev,
3888 "Failed to enable MSI-X, error %d. "
3889 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3890
3891 adapter->intr.type = VMXNET3_IT_MSI;
3892 }
3893
3894 if (adapter->intr.type == VMXNET3_IT_MSI) {
3895 if (!pci_enable_msi(adapter->pdev)) {
3896 adapter->num_rx_queues = 1;
3897 adapter->intr.num_intrs = 1;
3898 return;
3899 }
3900 }
3901 #endif /* CONFIG_PCI_MSI */
3902
3903 adapter->num_rx_queues = 1;
3904 dev_info(&adapter->netdev->dev,
3905 "Using INTx interrupt, #Rx queues: 1.\n");
3906 adapter->intr.type = VMXNET3_IT_INTX;
3907
3908 /* INT-X related setting */
3909 adapter->intr.num_intrs = 1;
3910 }
3911
3912
3913 static void
vmxnet3_free_intr_resources(struct vmxnet3_adapter * adapter)3914 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3915 {
3916 if (adapter->intr.type == VMXNET3_IT_MSIX)
3917 pci_disable_msix(adapter->pdev);
3918 else if (adapter->intr.type == VMXNET3_IT_MSI)
3919 pci_disable_msi(adapter->pdev);
3920 else
3921 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3922 }
3923
3924
3925 static void
vmxnet3_tx_timeout(struct net_device * netdev,unsigned int txqueue)3926 vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3927 {
3928 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3929 adapter->tx_timeout_count++;
3930
3931 netdev_err(adapter->netdev, "tx hang\n");
3932 schedule_work(&adapter->work);
3933 }
3934
3935
3936 static void
vmxnet3_reset_work(struct work_struct * data)3937 vmxnet3_reset_work(struct work_struct *data)
3938 {
3939 struct vmxnet3_adapter *adapter;
3940
3941 adapter = container_of(data, struct vmxnet3_adapter, work);
3942
3943 /* if another thread is resetting the device, no need to proceed */
3944 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3945 return;
3946
3947 /* if the device is closed, we must leave it alone */
3948 rtnl_lock();
3949 if (netif_running(adapter->netdev)) {
3950 netdev_notice(adapter->netdev, "resetting\n");
3951 vmxnet3_quiesce_dev(adapter);
3952 vmxnet3_reset_dev(adapter);
3953 vmxnet3_activate_dev(adapter);
3954 } else {
3955 netdev_info(adapter->netdev, "already closed\n");
3956 }
3957 rtnl_unlock();
3958
3959 netif_wake_queue(adapter->netdev);
3960 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3961 }
3962
3963
3964 static int
vmxnet3_probe_device(struct pci_dev * pdev,const struct pci_device_id * id)3965 vmxnet3_probe_device(struct pci_dev *pdev,
3966 const struct pci_device_id *id)
3967 {
3968 static const struct net_device_ops vmxnet3_netdev_ops = {
3969 .ndo_open = vmxnet3_open,
3970 .ndo_stop = vmxnet3_close,
3971 .ndo_start_xmit = vmxnet3_xmit_frame,
3972 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3973 .ndo_change_mtu = vmxnet3_change_mtu,
3974 .ndo_fix_features = vmxnet3_fix_features,
3975 .ndo_set_features = vmxnet3_set_features,
3976 .ndo_features_check = vmxnet3_features_check,
3977 .ndo_get_stats64 = vmxnet3_get_stats64,
3978 .ndo_tx_timeout = vmxnet3_tx_timeout,
3979 .ndo_set_rx_mode = vmxnet3_set_mc,
3980 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3981 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3982 #ifdef CONFIG_NET_POLL_CONTROLLER
3983 .ndo_poll_controller = vmxnet3_netpoll,
3984 #endif
3985 .ndo_bpf = vmxnet3_xdp,
3986 .ndo_xdp_xmit = vmxnet3_xdp_xmit,
3987 };
3988 int err;
3989 u32 ver;
3990 struct net_device *netdev;
3991 struct vmxnet3_adapter *adapter;
3992 u8 mac[ETH_ALEN];
3993 int size, i;
3994 int num_tx_queues;
3995 int num_rx_queues;
3996 int queues;
3997 unsigned long flags;
3998
3999 if (!pci_msi_enabled())
4000 enable_mq = 0;
4001
4002 #ifdef VMXNET3_RSS
4003 if (enable_mq)
4004 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
4005 (int)num_online_cpus());
4006 else
4007 #endif
4008 num_rx_queues = 1;
4009
4010 if (enable_mq)
4011 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
4012 (int)num_online_cpus());
4013 else
4014 num_tx_queues = 1;
4015
4016 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
4017 max(num_tx_queues, num_rx_queues));
4018 if (!netdev)
4019 return -ENOMEM;
4020
4021 pci_set_drvdata(pdev, netdev);
4022 adapter = netdev_priv(netdev);
4023 adapter->netdev = netdev;
4024 adapter->pdev = pdev;
4025
4026 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
4027 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
4028 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
4029
4030 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4031 if (err) {
4032 dev_err(&pdev->dev, "dma_set_mask failed\n");
4033 goto err_set_mask;
4034 }
4035
4036 spin_lock_init(&adapter->cmd_lock);
4037 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
4038 sizeof(struct vmxnet3_adapter),
4039 DMA_TO_DEVICE);
4040 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
4041 dev_err(&pdev->dev, "Failed to map dma\n");
4042 err = -EFAULT;
4043 goto err_set_mask;
4044 }
4045 adapter->shared = dma_alloc_coherent(
4046 &adapter->pdev->dev,
4047 sizeof(struct Vmxnet3_DriverShared),
4048 &adapter->shared_pa, GFP_KERNEL);
4049 if (!adapter->shared) {
4050 dev_err(&pdev->dev, "Failed to allocate memory\n");
4051 err = -ENOMEM;
4052 goto err_alloc_shared;
4053 }
4054
4055 err = vmxnet3_alloc_pci_resources(adapter);
4056 if (err < 0)
4057 goto err_alloc_pci;
4058
4059 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
4060 for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) {
4061 if (ver & (1 << i)) {
4062 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i);
4063 adapter->version = i + 1;
4064 break;
4065 }
4066 }
4067 if (i < VMXNET3_REV_1) {
4068 dev_err(&pdev->dev,
4069 "Incompatible h/w version (0x%x) for adapter\n", ver);
4070 err = -EBUSY;
4071 goto err_ver;
4072 }
4073 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
4074
4075 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
4076 if (ver & 1) {
4077 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
4078 } else {
4079 dev_err(&pdev->dev,
4080 "Incompatible upt version (0x%x) for adapter\n", ver);
4081 err = -EBUSY;
4082 goto err_ver;
4083 }
4084
4085 if (VMXNET3_VERSION_GE_7(adapter)) {
4086 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
4087 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
4088 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
4089 adapter->dev_caps[0] = adapter->devcap_supported[0] &
4090 (1UL << VMXNET3_CAP_LARGE_BAR);
4091 }
4092 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
4093 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
4094 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
4095 adapter->dev_caps[0] |= adapter->devcap_supported[0] &
4096 (1UL << VMXNET3_CAP_OOORX_COMP);
4097 }
4098 if (adapter->dev_caps[0])
4099 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
4100
4101 spin_lock_irqsave(&adapter->cmd_lock, flags);
4102 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
4103 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4104 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4105 }
4106
4107 if (VMXNET3_VERSION_GE_7(adapter) &&
4108 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
4109 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
4110 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
4111 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
4112 } else {
4113 adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
4114 adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
4115 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
4116 }
4117
4118 if (VMXNET3_VERSION_GE_6(adapter)) {
4119 spin_lock_irqsave(&adapter->cmd_lock, flags);
4120 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4121 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4122 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4123 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4124 if (queues > 0) {
4125 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
4126 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
4127 } else {
4128 adapter->num_rx_queues = min(num_rx_queues,
4129 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4130 adapter->num_tx_queues = min(num_tx_queues,
4131 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
4132 }
4133 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
4134 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
4135 adapter->queuesExtEnabled = true;
4136 } else {
4137 adapter->queuesExtEnabled = false;
4138 }
4139 } else {
4140 adapter->queuesExtEnabled = false;
4141 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4142 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
4143 adapter->num_rx_queues = min(num_rx_queues,
4144 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4145 adapter->num_tx_queues = min(num_tx_queues,
4146 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
4147 }
4148 dev_info(&pdev->dev,
4149 "# of Tx queues : %d, # of Rx queues : %d\n",
4150 adapter->num_tx_queues, adapter->num_rx_queues);
4151
4152 adapter->rx_buf_per_pkt = 1;
4153
4154 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4155 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
4156 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
4157 &adapter->queue_desc_pa,
4158 GFP_KERNEL);
4159
4160 if (!adapter->tqd_start) {
4161 dev_err(&pdev->dev, "Failed to allocate memory\n");
4162 err = -ENOMEM;
4163 goto err_ver;
4164 }
4165 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
4166 adapter->num_tx_queues);
4167 if (VMXNET3_VERSION_GE_9(adapter))
4168 adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf;
4169
4170 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
4171 sizeof(struct Vmxnet3_PMConf),
4172 &adapter->pm_conf_pa,
4173 GFP_KERNEL);
4174 if (adapter->pm_conf == NULL) {
4175 err = -ENOMEM;
4176 goto err_alloc_pm;
4177 }
4178
4179 #ifdef VMXNET3_RSS
4180
4181 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
4182 sizeof(struct UPT1_RSSConf),
4183 &adapter->rss_conf_pa,
4184 GFP_KERNEL);
4185 if (adapter->rss_conf == NULL) {
4186 err = -ENOMEM;
4187 goto err_alloc_rss;
4188 }
4189 #endif /* VMXNET3_RSS */
4190
4191 if (VMXNET3_VERSION_GE_3(adapter)) {
4192 adapter->coal_conf =
4193 dma_alloc_coherent(&adapter->pdev->dev,
4194 sizeof(struct Vmxnet3_CoalesceScheme)
4195 ,
4196 &adapter->coal_conf_pa,
4197 GFP_KERNEL);
4198 if (!adapter->coal_conf) {
4199 err = -ENOMEM;
4200 goto err_coal_conf;
4201 }
4202 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
4203 adapter->default_coal_mode = true;
4204 }
4205
4206 if (VMXNET3_VERSION_GE_4(adapter)) {
4207 adapter->default_rss_fields = true;
4208 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
4209 }
4210
4211 SET_NETDEV_DEV(netdev, &pdev->dev);
4212 vmxnet3_declare_features(adapter);
4213 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
4214 NETDEV_XDP_ACT_NDO_XMIT;
4215
4216 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
4217 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
4218
4219 if (adapter->num_tx_queues == adapter->num_rx_queues)
4220 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
4221 else
4222 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
4223
4224 vmxnet3_alloc_intr_resources(adapter);
4225
4226 #ifdef VMXNET3_RSS
4227 if (adapter->num_rx_queues > 1 &&
4228 adapter->intr.type == VMXNET3_IT_MSIX) {
4229 adapter->rss = true;
4230 netdev->hw_features |= NETIF_F_RXHASH;
4231 netdev->features |= NETIF_F_RXHASH;
4232 dev_dbg(&pdev->dev, "RSS is enabled.\n");
4233 } else {
4234 adapter->rss = false;
4235 }
4236 #endif
4237
4238 vmxnet3_read_mac_addr(adapter, mac);
4239 dev_addr_set(netdev, mac);
4240
4241 netdev->netdev_ops = &vmxnet3_netdev_ops;
4242 vmxnet3_set_ethtool_ops(netdev);
4243 netdev->watchdog_timeo = 5 * HZ;
4244
4245 /* MTU range: 60 - 9190 */
4246 netdev->min_mtu = VMXNET3_MIN_MTU;
4247 if (VMXNET3_VERSION_GE_6(adapter))
4248 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
4249 else
4250 netdev->max_mtu = VMXNET3_MAX_MTU;
4251
4252 INIT_WORK(&adapter->work, vmxnet3_reset_work);
4253 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4254
4255 if (adapter->intr.type == VMXNET3_IT_MSIX) {
4256 int i;
4257 for (i = 0; i < adapter->num_rx_queues; i++) {
4258 netif_napi_add(adapter->netdev,
4259 &adapter->rx_queue[i].napi,
4260 vmxnet3_poll_rx_only);
4261 }
4262 } else {
4263 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
4264 vmxnet3_poll);
4265 }
4266
4267 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4268 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
4269
4270 netif_carrier_off(netdev);
4271 err = register_netdev(netdev);
4272
4273 if (err) {
4274 dev_err(&pdev->dev, "Failed to register adapter\n");
4275 goto err_register;
4276 }
4277
4278 vmxnet3_check_link(adapter, false);
4279 return 0;
4280
4281 err_register:
4282 if (VMXNET3_VERSION_GE_3(adapter)) {
4283 dma_free_coherent(&adapter->pdev->dev,
4284 sizeof(struct Vmxnet3_CoalesceScheme),
4285 adapter->coal_conf, adapter->coal_conf_pa);
4286 }
4287 vmxnet3_free_intr_resources(adapter);
4288 err_coal_conf:
4289 #ifdef VMXNET3_RSS
4290 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4291 adapter->rss_conf, adapter->rss_conf_pa);
4292 err_alloc_rss:
4293 #endif
4294 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4295 adapter->pm_conf, adapter->pm_conf_pa);
4296 err_alloc_pm:
4297 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4298 adapter->queue_desc_pa);
4299 err_ver:
4300 vmxnet3_free_pci_resources(adapter);
4301 err_alloc_pci:
4302 dma_free_coherent(&adapter->pdev->dev,
4303 sizeof(struct Vmxnet3_DriverShared),
4304 adapter->shared, adapter->shared_pa);
4305 err_alloc_shared:
4306 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4307 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4308 err_set_mask:
4309 free_netdev(netdev);
4310 return err;
4311 }
4312
4313
4314 static void
vmxnet3_remove_device(struct pci_dev * pdev)4315 vmxnet3_remove_device(struct pci_dev *pdev)
4316 {
4317 struct net_device *netdev = pci_get_drvdata(pdev);
4318 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4319 int size = 0;
4320 int num_rx_queues, rx_queues;
4321 unsigned long flags;
4322
4323 #ifdef VMXNET3_RSS
4324 if (enable_mq)
4325 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
4326 (int)num_online_cpus());
4327 else
4328 #endif
4329 num_rx_queues = 1;
4330 if (!VMXNET3_VERSION_GE_6(adapter)) {
4331 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4332 }
4333 if (VMXNET3_VERSION_GE_6(adapter)) {
4334 spin_lock_irqsave(&adapter->cmd_lock, flags);
4335 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4336 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4337 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4338 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4339 if (rx_queues > 0)
4340 rx_queues = (rx_queues >> 8) & 0xff;
4341 else
4342 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4343 num_rx_queues = min(num_rx_queues, rx_queues);
4344 } else {
4345 num_rx_queues = min(num_rx_queues,
4346 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4347 }
4348
4349 cancel_work_sync(&adapter->work);
4350
4351 unregister_netdev(netdev);
4352
4353 vmxnet3_free_intr_resources(adapter);
4354 vmxnet3_free_pci_resources(adapter);
4355 if (VMXNET3_VERSION_GE_3(adapter)) {
4356 dma_free_coherent(&adapter->pdev->dev,
4357 sizeof(struct Vmxnet3_CoalesceScheme),
4358 adapter->coal_conf, adapter->coal_conf_pa);
4359 }
4360 #ifdef VMXNET3_RSS
4361 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4362 adapter->rss_conf, adapter->rss_conf_pa);
4363 #endif
4364 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4365 adapter->pm_conf, adapter->pm_conf_pa);
4366
4367 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4368 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4369 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4370 adapter->queue_desc_pa);
4371 dma_free_coherent(&adapter->pdev->dev,
4372 sizeof(struct Vmxnet3_DriverShared),
4373 adapter->shared, adapter->shared_pa);
4374 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4375 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4376 free_netdev(netdev);
4377 }
4378
vmxnet3_shutdown_device(struct pci_dev * pdev)4379 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4380 {
4381 struct net_device *netdev = pci_get_drvdata(pdev);
4382 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4383 unsigned long flags;
4384
4385 /* Reset_work may be in the middle of resetting the device, wait for its
4386 * completion.
4387 */
4388 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4389 usleep_range(1000, 2000);
4390
4391 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4392 &adapter->state)) {
4393 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4394 return;
4395 }
4396 spin_lock_irqsave(&adapter->cmd_lock, flags);
4397 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4398 VMXNET3_CMD_QUIESCE_DEV);
4399 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4400 vmxnet3_disable_all_intrs(adapter);
4401
4402 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4403 }
4404
4405
4406 #ifdef CONFIG_PM
4407
4408 static int
vmxnet3_suspend(struct device * device)4409 vmxnet3_suspend(struct device *device)
4410 {
4411 struct pci_dev *pdev = to_pci_dev(device);
4412 struct net_device *netdev = pci_get_drvdata(pdev);
4413 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4414 struct Vmxnet3_PMConf *pmConf;
4415 struct ethhdr *ehdr;
4416 struct arphdr *ahdr;
4417 u8 *arpreq;
4418 struct in_device *in_dev;
4419 struct in_ifaddr *ifa;
4420 unsigned long flags;
4421 int i = 0;
4422
4423 if (!netif_running(netdev))
4424 return 0;
4425
4426 for (i = 0; i < adapter->num_rx_queues; i++)
4427 napi_disable(&adapter->rx_queue[i].napi);
4428
4429 vmxnet3_disable_all_intrs(adapter);
4430 vmxnet3_free_irqs(adapter);
4431 vmxnet3_free_intr_resources(adapter);
4432
4433 netif_device_detach(netdev);
4434
4435 /* Create wake-up filters. */
4436 pmConf = adapter->pm_conf;
4437 memset(pmConf, 0, sizeof(*pmConf));
4438
4439 if (adapter->wol & WAKE_UCAST) {
4440 pmConf->filters[i].patternSize = ETH_ALEN;
4441 pmConf->filters[i].maskSize = 1;
4442 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4443 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4444
4445 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4446 i++;
4447 }
4448
4449 if (adapter->wol & WAKE_ARP) {
4450 rcu_read_lock();
4451
4452 in_dev = __in_dev_get_rcu(netdev);
4453 if (!in_dev) {
4454 rcu_read_unlock();
4455 goto skip_arp;
4456 }
4457
4458 ifa = rcu_dereference(in_dev->ifa_list);
4459 if (!ifa) {
4460 rcu_read_unlock();
4461 goto skip_arp;
4462 }
4463
4464 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4465 sizeof(struct arphdr) + /* ARP header */
4466 2 * ETH_ALEN + /* 2 Ethernet addresses*/
4467 2 * sizeof(u32); /*2 IPv4 addresses */
4468 pmConf->filters[i].maskSize =
4469 (pmConf->filters[i].patternSize - 1) / 8 + 1;
4470
4471 /* ETH_P_ARP in Ethernet header. */
4472 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4473 ehdr->h_proto = htons(ETH_P_ARP);
4474
4475 /* ARPOP_REQUEST in ARP header. */
4476 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4477 ahdr->ar_op = htons(ARPOP_REQUEST);
4478 arpreq = (u8 *)(ahdr + 1);
4479
4480 /* The Unicast IPv4 address in 'tip' field. */
4481 arpreq += 2 * ETH_ALEN + sizeof(u32);
4482 *(__be32 *)arpreq = ifa->ifa_address;
4483
4484 rcu_read_unlock();
4485
4486 /* The mask for the relevant bits. */
4487 pmConf->filters[i].mask[0] = 0x00;
4488 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4489 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4490 pmConf->filters[i].mask[3] = 0x00;
4491 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4492 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4493
4494 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4495 i++;
4496 }
4497
4498 skip_arp:
4499 if (adapter->wol & WAKE_MAGIC)
4500 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4501
4502 pmConf->numFilters = i;
4503
4504 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4505 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4506 *pmConf));
4507 adapter->shared->devRead.pmConfDesc.confPA =
4508 cpu_to_le64(adapter->pm_conf_pa);
4509
4510 spin_lock_irqsave(&adapter->cmd_lock, flags);
4511 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4512 VMXNET3_CMD_UPDATE_PMCFG);
4513 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4514
4515 pci_save_state(pdev);
4516 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4517 adapter->wol);
4518 pci_disable_device(pdev);
4519 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4520
4521 return 0;
4522 }
4523
4524
4525 static int
vmxnet3_resume(struct device * device)4526 vmxnet3_resume(struct device *device)
4527 {
4528 int err;
4529 unsigned long flags;
4530 struct pci_dev *pdev = to_pci_dev(device);
4531 struct net_device *netdev = pci_get_drvdata(pdev);
4532 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4533
4534 if (!netif_running(netdev))
4535 return 0;
4536
4537 pci_set_power_state(pdev, PCI_D0);
4538 pci_restore_state(pdev);
4539 err = pci_enable_device_mem(pdev);
4540 if (err != 0)
4541 return err;
4542
4543 pci_enable_wake(pdev, PCI_D0, 0);
4544
4545 vmxnet3_alloc_intr_resources(adapter);
4546
4547 /* During hibernate and suspend, device has to be reinitialized as the
4548 * device state need not be preserved.
4549 */
4550
4551 /* Need not check adapter state as other reset tasks cannot run during
4552 * device resume.
4553 */
4554 spin_lock_irqsave(&adapter->cmd_lock, flags);
4555 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4556 VMXNET3_CMD_QUIESCE_DEV);
4557 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4558 vmxnet3_tq_cleanup_all(adapter);
4559 vmxnet3_rq_cleanup_all(adapter);
4560
4561 vmxnet3_reset_dev(adapter);
4562 err = vmxnet3_activate_dev(adapter);
4563 if (err != 0) {
4564 netdev_err(netdev,
4565 "failed to re-activate on resume, error: %d", err);
4566 vmxnet3_force_close(adapter);
4567 return err;
4568 }
4569 netif_device_attach(netdev);
4570
4571 return 0;
4572 }
4573
4574 static const struct dev_pm_ops vmxnet3_pm_ops = {
4575 .suspend = vmxnet3_suspend,
4576 .resume = vmxnet3_resume,
4577 .freeze = vmxnet3_suspend,
4578 .restore = vmxnet3_resume,
4579 };
4580 #endif
4581
4582 static struct pci_driver vmxnet3_driver = {
4583 .name = vmxnet3_driver_name,
4584 .id_table = vmxnet3_pciid_table,
4585 .probe = vmxnet3_probe_device,
4586 .remove = vmxnet3_remove_device,
4587 .shutdown = vmxnet3_shutdown_device,
4588 #ifdef CONFIG_PM
4589 .driver.pm = &vmxnet3_pm_ops,
4590 #endif
4591 };
4592
4593
4594 static int __init
vmxnet3_init_module(void)4595 vmxnet3_init_module(void)
4596 {
4597 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4598 VMXNET3_DRIVER_VERSION_REPORT);
4599 return pci_register_driver(&vmxnet3_driver);
4600 }
4601
4602 module_init(vmxnet3_init_module);
4603
4604
4605 static void
vmxnet3_exit_module(void)4606 vmxnet3_exit_module(void)
4607 {
4608 pci_unregister_driver(&vmxnet3_driver);
4609 }
4610
4611 module_exit(vmxnet3_exit_module);
4612
4613 MODULE_AUTHOR("VMware, Inc.");
4614 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4615 MODULE_LICENSE("GPL v2");
4616 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
4617