1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IBM Power Virtual Ethernet Device Driver
4 *
5 * Copyright (C) IBM Corporation, 2003, 2010
6 *
7 * Authors: Dave Larson <larson1@us.ibm.com>
8 * Santiago Leon <santil@linux.vnet.ibm.com>
9 * Brian King <brking@linux.vnet.ibm.com>
10 * Robert Jennings <rcj@linux.vnet.ibm.com>
11 * Anton Blanchard <anton@au.ibm.com>
12 */
13
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/kernel.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/mm.h>
25 #include <linux/pm.h>
26 #include <linux/ethtool.h>
27 #include <linux/in.h>
28 #include <linux/ip.h>
29 #include <linux/ipv6.h>
30 #include <linux/slab.h>
31 #include <asm/hvcall.h>
32 #include <linux/atomic.h>
33 #include <asm/vio.h>
34 #include <asm/iommu.h>
35 #include <asm/firmware.h>
36 #include <net/tcp.h>
37 #include <net/ip6_checksum.h>
38
39 #include "ibmveth.h"
40
41 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
42 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
43
44 static struct kobj_type ktype_veth_pool;
45
46
47 static const char ibmveth_driver_name[] = "ibmveth";
48 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
49 #define ibmveth_driver_version "1.06"
50
51 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
52 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
53 MODULE_LICENSE("GPL");
54 MODULE_VERSION(ibmveth_driver_version);
55
56 static unsigned int tx_copybreak __read_mostly = 128;
57 module_param(tx_copybreak, uint, 0644);
58 MODULE_PARM_DESC(tx_copybreak,
59 "Maximum size of packet that is copied to a new buffer on transmit");
60
61 static unsigned int rx_copybreak __read_mostly = 128;
62 module_param(rx_copybreak, uint, 0644);
63 MODULE_PARM_DESC(rx_copybreak,
64 "Maximum size of packet that is copied to a new buffer on receive");
65
66 static unsigned int rx_flush __read_mostly = 0;
67 module_param(rx_flush, uint, 0644);
68 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
69
70 static bool old_large_send __read_mostly;
71 module_param(old_large_send, bool, 0444);
72 MODULE_PARM_DESC(old_large_send,
73 "Use old large send method on firmware that supports the new method");
74
75 struct ibmveth_stat {
76 char name[ETH_GSTRING_LEN];
77 int offset;
78 };
79
80 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
81 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
82
83 static struct ibmveth_stat ibmveth_stats[] = {
84 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
85 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
86 { "replenish_add_buff_failure",
87 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
88 { "replenish_add_buff_success",
89 IBMVETH_STAT_OFF(replenish_add_buff_success) },
90 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
91 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
92 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
93 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
94 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
95 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
96 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
97 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
98 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
99 };
100
101 /* simple methods of getting data from the current rxq entry */
ibmveth_rxq_flags(struct ibmveth_adapter * adapter)102 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
103 {
104 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
105 }
106
ibmveth_rxq_toggle(struct ibmveth_adapter * adapter)107 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
108 {
109 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
110 IBMVETH_RXQ_TOGGLE_SHIFT;
111 }
112
ibmveth_rxq_pending_buffer(struct ibmveth_adapter * adapter)113 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
114 {
115 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
116 }
117
ibmveth_rxq_buffer_valid(struct ibmveth_adapter * adapter)118 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
119 {
120 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
121 }
122
ibmveth_rxq_frame_offset(struct ibmveth_adapter * adapter)123 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
124 {
125 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
126 }
127
ibmveth_rxq_large_packet(struct ibmveth_adapter * adapter)128 static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
129 {
130 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
131 }
132
ibmveth_rxq_frame_length(struct ibmveth_adapter * adapter)133 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134 {
135 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
136 }
137
ibmveth_rxq_csum_good(struct ibmveth_adapter * adapter)138 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
139 {
140 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
141 }
142
ibmveth_real_max_tx_queues(void)143 static unsigned int ibmveth_real_max_tx_queues(void)
144 {
145 unsigned int n_cpu = num_online_cpus();
146
147 return min(n_cpu, IBMVETH_MAX_QUEUES);
148 }
149
150 /* setup the initial settings for a buffer pool */
ibmveth_init_buffer_pool(struct ibmveth_buff_pool * pool,u32 pool_index,u32 pool_size,u32 buff_size,u32 pool_active)151 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
152 u32 pool_index, u32 pool_size,
153 u32 buff_size, u32 pool_active)
154 {
155 pool->size = pool_size;
156 pool->index = pool_index;
157 pool->buff_size = buff_size;
158 pool->threshold = pool_size * 7 / 8;
159 pool->active = pool_active;
160 }
161
162 /* allocate and setup an buffer pool - called during open */
ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool * pool)163 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
164 {
165 int i;
166
167 pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
168
169 if (!pool->free_map)
170 return -1;
171
172 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
173 if (!pool->dma_addr) {
174 kfree(pool->free_map);
175 pool->free_map = NULL;
176 return -1;
177 }
178
179 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
180
181 if (!pool->skbuff) {
182 kfree(pool->dma_addr);
183 pool->dma_addr = NULL;
184
185 kfree(pool->free_map);
186 pool->free_map = NULL;
187 return -1;
188 }
189
190 for (i = 0; i < pool->size; ++i)
191 pool->free_map[i] = i;
192
193 atomic_set(&pool->available, 0);
194 pool->producer_index = 0;
195 pool->consumer_index = 0;
196
197 return 0;
198 }
199
ibmveth_flush_buffer(void * addr,unsigned long length)200 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
201 {
202 unsigned long offset;
203
204 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
205 asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
206 }
207
208 /* replenish the buffers for a pool. note that we don't need to
209 * skb_reserve these since they are used for incoming...
210 */
ibmveth_replenish_buffer_pool(struct ibmveth_adapter * adapter,struct ibmveth_buff_pool * pool)211 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
212 struct ibmveth_buff_pool *pool)
213 {
214 union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
215 u32 remaining = pool->size - atomic_read(&pool->available);
216 u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0};
217 unsigned long lpar_rc;
218 u32 buffers_added = 0;
219 u32 i, filled, batch;
220 struct vio_dev *vdev;
221 dma_addr_t dma_addr;
222 struct device *dev;
223 u32 index;
224
225 vdev = adapter->vdev;
226 dev = &vdev->dev;
227
228 mb();
229
230 batch = adapter->rx_buffers_per_hcall;
231
232 while (remaining > 0) {
233 unsigned int free_index = pool->consumer_index;
234
235 /* Fill a batch of descriptors */
236 for (filled = 0; filled < min(remaining, batch); filled++) {
237 index = pool->free_map[free_index];
238 if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
239 adapter->replenish_add_buff_failure++;
240 netdev_info(adapter->netdev,
241 "Invalid map index %u, reset\n",
242 index);
243 schedule_work(&adapter->work);
244 break;
245 }
246
247 if (!pool->skbuff[index]) {
248 struct sk_buff *skb = NULL;
249
250 skb = netdev_alloc_skb(adapter->netdev,
251 pool->buff_size);
252 if (!skb) {
253 adapter->replenish_no_mem++;
254 adapter->replenish_add_buff_failure++;
255 break;
256 }
257
258 dma_addr = dma_map_single(dev, skb->data,
259 pool->buff_size,
260 DMA_FROM_DEVICE);
261 if (dma_mapping_error(dev, dma_addr)) {
262 dev_kfree_skb_any(skb);
263 adapter->replenish_add_buff_failure++;
264 break;
265 }
266
267 pool->dma_addr[index] = dma_addr;
268 pool->skbuff[index] = skb;
269 } else {
270 /* re-use case */
271 dma_addr = pool->dma_addr[index];
272 }
273
274 if (rx_flush) {
275 unsigned int len;
276
277 len = adapter->netdev->mtu + IBMVETH_BUFF_OH;
278 len = min(pool->buff_size, len);
279 ibmveth_flush_buffer(pool->skbuff[index]->data,
280 len);
281 }
282
283 descs[filled].fields.flags_len = IBMVETH_BUF_VALID |
284 pool->buff_size;
285 descs[filled].fields.address = dma_addr;
286
287 correlators[filled] = ((u64)pool->index << 32) | index;
288 *(u64 *)pool->skbuff[index]->data = correlators[filled];
289
290 free_index++;
291 if (free_index >= pool->size)
292 free_index = 0;
293 }
294
295 if (!filled)
296 break;
297
298 /* single buffer case*/
299 if (filled == 1)
300 lpar_rc = h_add_logical_lan_buffer(vdev->unit_address,
301 descs[0].desc);
302 else
303 /* Multi-buffer hcall */
304 lpar_rc = h_add_logical_lan_buffers(vdev->unit_address,
305 descs[0].desc,
306 descs[1].desc,
307 descs[2].desc,
308 descs[3].desc,
309 descs[4].desc,
310 descs[5].desc,
311 descs[6].desc,
312 descs[7].desc);
313 if (lpar_rc != H_SUCCESS) {
314 dev_warn_ratelimited(dev,
315 "RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n",
316 filled, lpar_rc, batch);
317 goto hcall_failure;
318 }
319
320 /* Only update pool state after hcall succeeds */
321 for (i = 0; i < filled; i++) {
322 free_index = pool->consumer_index;
323 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
324
325 pool->consumer_index++;
326 if (pool->consumer_index >= pool->size)
327 pool->consumer_index = 0;
328 }
329
330 buffers_added += filled;
331 adapter->replenish_add_buff_success += filled;
332 remaining -= filled;
333
334 memset(&descs, 0, sizeof(descs));
335 memset(&correlators, 0, sizeof(correlators));
336 continue;
337
338 hcall_failure:
339 for (i = 0; i < filled; i++) {
340 index = correlators[i] & 0xffffffffUL;
341 dma_addr = pool->dma_addr[index];
342
343 if (pool->skbuff[index]) {
344 if (dma_addr &&
345 !dma_mapping_error(dev, dma_addr))
346 dma_unmap_single(dev, dma_addr,
347 pool->buff_size,
348 DMA_FROM_DEVICE);
349
350 dev_kfree_skb_any(pool->skbuff[index]);
351 pool->skbuff[index] = NULL;
352 }
353 }
354 adapter->replenish_add_buff_failure += filled;
355
356 /*
357 * If multi rx buffers hcall is no longer supported by FW
358 * e.g. in the case of Live Parttion Migration
359 */
360 if (batch > 1 && lpar_rc == H_FUNCTION) {
361 /*
362 * Instead of retry submit single buffer individually
363 * here just set the max rx buffer per hcall to 1
364 * buffers will be respleshed next time
365 * when ibmveth_replenish_buffer_pool() is called again
366 * with single-buffer case
367 */
368 netdev_info(adapter->netdev,
369 "RX Multi buffers not supported by FW, rc=%lu\n",
370 lpar_rc);
371 adapter->rx_buffers_per_hcall = 1;
372 netdev_info(adapter->netdev,
373 "Next rx replesh will fall back to single-buffer hcall\n");
374 }
375 break;
376 }
377
378 mb();
379 atomic_add(buffers_added, &(pool->available));
380 }
381
382 /*
383 * The final 8 bytes of the buffer list is a counter of frames dropped
384 * because there was not a buffer in the buffer list capable of holding
385 * the frame.
386 */
ibmveth_update_rx_no_buffer(struct ibmveth_adapter * adapter)387 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
388 {
389 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
390
391 adapter->rx_no_buffer = be64_to_cpup(p);
392 }
393
394 /* replenish routine */
ibmveth_replenish_task(struct ibmveth_adapter * adapter)395 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
396 {
397 int i;
398
399 adapter->replenish_task_cycles++;
400
401 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
402 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
403
404 if (pool->active &&
405 (atomic_read(&pool->available) < pool->threshold))
406 ibmveth_replenish_buffer_pool(adapter, pool);
407 }
408
409 ibmveth_update_rx_no_buffer(adapter);
410 }
411
412 /* empty and free ana buffer pool - also used to do cleanup in error paths */
ibmveth_free_buffer_pool(struct ibmveth_adapter * adapter,struct ibmveth_buff_pool * pool)413 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
414 struct ibmveth_buff_pool *pool)
415 {
416 int i;
417
418 kfree(pool->free_map);
419 pool->free_map = NULL;
420
421 if (pool->skbuff && pool->dma_addr) {
422 for (i = 0; i < pool->size; ++i) {
423 struct sk_buff *skb = pool->skbuff[i];
424 if (skb) {
425 dma_unmap_single(&adapter->vdev->dev,
426 pool->dma_addr[i],
427 pool->buff_size,
428 DMA_FROM_DEVICE);
429 dev_kfree_skb_any(skb);
430 pool->skbuff[i] = NULL;
431 }
432 }
433 }
434
435 if (pool->dma_addr) {
436 kfree(pool->dma_addr);
437 pool->dma_addr = NULL;
438 }
439
440 if (pool->skbuff) {
441 kfree(pool->skbuff);
442 pool->skbuff = NULL;
443 }
444 }
445
446 /**
447 * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
448 * @adapter: adapter instance
449 * @correlator: identifies pool and index
450 * @reuse: whether to reuse buffer
451 *
452 * Return:
453 * * %0 - success
454 * * %-EINVAL - correlator maps to pool or index out of range
455 * * %-EFAULT - pool and index map to null skb
456 */
ibmveth_remove_buffer_from_pool(struct ibmveth_adapter * adapter,u64 correlator,bool reuse)457 static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
458 u64 correlator, bool reuse)
459 {
460 unsigned int pool = correlator >> 32;
461 unsigned int index = correlator & 0xffffffffUL;
462 unsigned int free_index;
463 struct sk_buff *skb;
464
465 if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
466 WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
467 schedule_work(&adapter->work);
468 return -EINVAL;
469 }
470
471 skb = adapter->rx_buff_pool[pool].skbuff[index];
472 if (WARN_ON(!skb)) {
473 schedule_work(&adapter->work);
474 return -EFAULT;
475 }
476
477 /* if we are going to reuse the buffer then keep the pointers around
478 * but mark index as available. replenish will see the skb pointer and
479 * assume it is to be recycled.
480 */
481 if (!reuse) {
482 /* remove the skb pointer to mark free. actual freeing is done
483 * by upper level networking after gro_recieve
484 */
485 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
486
487 dma_unmap_single(&adapter->vdev->dev,
488 adapter->rx_buff_pool[pool].dma_addr[index],
489 adapter->rx_buff_pool[pool].buff_size,
490 DMA_FROM_DEVICE);
491 }
492
493 free_index = adapter->rx_buff_pool[pool].producer_index;
494 adapter->rx_buff_pool[pool].producer_index++;
495 if (adapter->rx_buff_pool[pool].producer_index >=
496 adapter->rx_buff_pool[pool].size)
497 adapter->rx_buff_pool[pool].producer_index = 0;
498 adapter->rx_buff_pool[pool].free_map[free_index] = index;
499
500 mb();
501
502 atomic_dec(&(adapter->rx_buff_pool[pool].available));
503
504 return 0;
505 }
506
507 /* get the current buffer on the rx queue */
ibmveth_rxq_get_buffer(struct ibmveth_adapter * adapter)508 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
509 {
510 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
511 unsigned int pool = correlator >> 32;
512 unsigned int index = correlator & 0xffffffffUL;
513
514 if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
515 WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
516 schedule_work(&adapter->work);
517 return NULL;
518 }
519
520 return adapter->rx_buff_pool[pool].skbuff[index];
521 }
522
523 /**
524 * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
525 *
526 * @adapter: pointer to adapter
527 * @reuse: whether to reuse buffer
528 *
529 * Context: called from ibmveth_poll
530 *
531 * Return:
532 * * %0 - success
533 * * other - non-zero return from ibmveth_remove_buffer_from_pool
534 */
ibmveth_rxq_harvest_buffer(struct ibmveth_adapter * adapter,bool reuse)535 static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
536 bool reuse)
537 {
538 u64 cor;
539 int rc;
540
541 cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
542 rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
543 if (unlikely(rc))
544 return rc;
545
546 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
547 adapter->rx_queue.index = 0;
548 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
549 }
550
551 return 0;
552 }
553
ibmveth_free_tx_ltb(struct ibmveth_adapter * adapter,int idx)554 static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
555 {
556 dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx],
557 adapter->tx_ltb_size, DMA_TO_DEVICE);
558 kfree(adapter->tx_ltb_ptr[idx]);
559 adapter->tx_ltb_ptr[idx] = NULL;
560 }
561
ibmveth_allocate_tx_ltb(struct ibmveth_adapter * adapter,int idx)562 static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
563 {
564 adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
565 GFP_KERNEL);
566 if (!adapter->tx_ltb_ptr[idx]) {
567 netdev_err(adapter->netdev,
568 "unable to allocate tx long term buffer\n");
569 return -ENOMEM;
570 }
571 adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
572 adapter->tx_ltb_ptr[idx],
573 adapter->tx_ltb_size,
574 DMA_TO_DEVICE);
575 if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
576 netdev_err(adapter->netdev,
577 "unable to DMA map tx long term buffer\n");
578 kfree(adapter->tx_ltb_ptr[idx]);
579 adapter->tx_ltb_ptr[idx] = NULL;
580 return -ENOMEM;
581 }
582
583 return 0;
584 }
585
ibmveth_register_logical_lan(struct ibmveth_adapter * adapter,union ibmveth_buf_desc rxq_desc,u64 mac_address)586 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
587 union ibmveth_buf_desc rxq_desc, u64 mac_address)
588 {
589 int rc, try_again = 1;
590
591 /*
592 * After a kexec the adapter will still be open, so our attempt to
593 * open it will fail. So if we get a failure we free the adapter and
594 * try again, but only once.
595 */
596 retry:
597 rc = h_register_logical_lan(adapter->vdev->unit_address,
598 adapter->buffer_list_dma, rxq_desc.desc,
599 adapter->filter_list_dma, mac_address);
600
601 if (rc != H_SUCCESS && try_again) {
602 do {
603 rc = h_free_logical_lan(adapter->vdev->unit_address);
604 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
605
606 try_again = 0;
607 goto retry;
608 }
609
610 return rc;
611 }
612
ibmveth_open(struct net_device * netdev)613 static int ibmveth_open(struct net_device *netdev)
614 {
615 struct ibmveth_adapter *adapter = netdev_priv(netdev);
616 u64 mac_address;
617 int rxq_entries = 1;
618 unsigned long lpar_rc;
619 int rc;
620 union ibmveth_buf_desc rxq_desc;
621 int i;
622 struct device *dev;
623
624 netdev_dbg(netdev, "open starting\n");
625
626 napi_enable(&adapter->napi);
627
628 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
629 rxq_entries += adapter->rx_buff_pool[i].size;
630
631 rc = -ENOMEM;
632 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
633 if (!adapter->buffer_list_addr) {
634 netdev_err(netdev, "unable to allocate list pages\n");
635 goto out;
636 }
637
638 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
639 if (!adapter->filter_list_addr) {
640 netdev_err(netdev, "unable to allocate filter pages\n");
641 goto out_free_buffer_list;
642 }
643
644 dev = &adapter->vdev->dev;
645
646 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
647 rxq_entries;
648 adapter->rx_queue.queue_addr =
649 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
650 &adapter->rx_queue.queue_dma, GFP_KERNEL);
651 if (!adapter->rx_queue.queue_addr)
652 goto out_free_filter_list;
653
654 adapter->buffer_list_dma = dma_map_single(dev,
655 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
656 if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
657 netdev_err(netdev, "unable to map buffer list pages\n");
658 goto out_free_queue_mem;
659 }
660
661 adapter->filter_list_dma = dma_map_single(dev,
662 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
663 if (dma_mapping_error(dev, adapter->filter_list_dma)) {
664 netdev_err(netdev, "unable to map filter list pages\n");
665 goto out_unmap_buffer_list;
666 }
667
668 for (i = 0; i < netdev->real_num_tx_queues; i++) {
669 if (ibmveth_allocate_tx_ltb(adapter, i))
670 goto out_free_tx_ltb;
671 }
672
673 adapter->rx_queue.index = 0;
674 adapter->rx_queue.num_slots = rxq_entries;
675 adapter->rx_queue.toggle = 1;
676
677 mac_address = ether_addr_to_u64(netdev->dev_addr);
678
679 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
680 adapter->rx_queue.queue_len;
681 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
682
683 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
684 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
685 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
686
687 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
688
689 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
690
691 if (lpar_rc != H_SUCCESS) {
692 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
693 lpar_rc);
694 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
695 "desc:0x%llx MAC:0x%llx\n",
696 adapter->buffer_list_dma,
697 adapter->filter_list_dma,
698 rxq_desc.desc,
699 mac_address);
700 rc = -ENONET;
701 goto out_unmap_filter_list;
702 }
703
704 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
705 if (!adapter->rx_buff_pool[i].active)
706 continue;
707 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
708 netdev_err(netdev, "unable to alloc pool\n");
709 adapter->rx_buff_pool[i].active = 0;
710 rc = -ENOMEM;
711 goto out_free_buffer_pools;
712 }
713 }
714
715 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
716 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
717 netdev);
718 if (rc != 0) {
719 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
720 netdev->irq, rc);
721 do {
722 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
723 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
724
725 goto out_free_buffer_pools;
726 }
727
728 rc = -ENOMEM;
729
730 netdev_dbg(netdev, "initial replenish cycle\n");
731 ibmveth_interrupt(netdev->irq, netdev);
732
733 netif_tx_start_all_queues(netdev);
734
735 netdev_dbg(netdev, "open complete\n");
736
737 return 0;
738
739 out_free_buffer_pools:
740 while (--i >= 0) {
741 if (adapter->rx_buff_pool[i].active)
742 ibmveth_free_buffer_pool(adapter,
743 &adapter->rx_buff_pool[i]);
744 }
745 out_unmap_filter_list:
746 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
747 DMA_BIDIRECTIONAL);
748
749 out_free_tx_ltb:
750 while (--i >= 0) {
751 ibmveth_free_tx_ltb(adapter, i);
752 }
753
754 out_unmap_buffer_list:
755 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
756 DMA_BIDIRECTIONAL);
757 out_free_queue_mem:
758 dma_free_coherent(dev, adapter->rx_queue.queue_len,
759 adapter->rx_queue.queue_addr,
760 adapter->rx_queue.queue_dma);
761 out_free_filter_list:
762 free_page((unsigned long)adapter->filter_list_addr);
763 out_free_buffer_list:
764 free_page((unsigned long)adapter->buffer_list_addr);
765 out:
766 napi_disable(&adapter->napi);
767 return rc;
768 }
769
ibmveth_close(struct net_device * netdev)770 static int ibmveth_close(struct net_device *netdev)
771 {
772 struct ibmveth_adapter *adapter = netdev_priv(netdev);
773 struct device *dev = &adapter->vdev->dev;
774 long lpar_rc;
775 int i;
776
777 netdev_dbg(netdev, "close starting\n");
778
779 napi_disable(&adapter->napi);
780
781 netif_tx_stop_all_queues(netdev);
782
783 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
784
785 do {
786 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
787 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
788
789 if (lpar_rc != H_SUCCESS) {
790 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
791 "continuing with close\n", lpar_rc);
792 }
793
794 free_irq(netdev->irq, netdev);
795
796 ibmveth_update_rx_no_buffer(adapter);
797
798 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
799 DMA_BIDIRECTIONAL);
800 free_page((unsigned long)adapter->buffer_list_addr);
801
802 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
803 DMA_BIDIRECTIONAL);
804 free_page((unsigned long)adapter->filter_list_addr);
805
806 dma_free_coherent(dev, adapter->rx_queue.queue_len,
807 adapter->rx_queue.queue_addr,
808 adapter->rx_queue.queue_dma);
809
810 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
811 if (adapter->rx_buff_pool[i].active)
812 ibmveth_free_buffer_pool(adapter,
813 &adapter->rx_buff_pool[i]);
814
815 for (i = 0; i < netdev->real_num_tx_queues; i++)
816 ibmveth_free_tx_ltb(adapter, i);
817
818 netdev_dbg(netdev, "close complete\n");
819
820 return 0;
821 }
822
823 /**
824 * ibmveth_reset - Handle scheduled reset work
825 *
826 * @w: pointer to work_struct embedded in adapter structure
827 *
828 * Context: This routine acquires rtnl_mutex and disables its NAPI through
829 * ibmveth_close. It can't be called directly in a context that has
830 * already acquired rtnl_mutex or disabled its NAPI, or directly from
831 * a poll routine.
832 *
833 * Return: void
834 */
ibmveth_reset(struct work_struct * w)835 static void ibmveth_reset(struct work_struct *w)
836 {
837 struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
838 struct net_device *netdev = adapter->netdev;
839
840 netdev_dbg(netdev, "reset starting\n");
841
842 rtnl_lock();
843
844 dev_close(adapter->netdev);
845 dev_open(adapter->netdev, NULL);
846
847 rtnl_unlock();
848
849 netdev_dbg(netdev, "reset complete\n");
850 }
851
ibmveth_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)852 static int ibmveth_set_link_ksettings(struct net_device *dev,
853 const struct ethtool_link_ksettings *cmd)
854 {
855 struct ibmveth_adapter *adapter = netdev_priv(dev);
856
857 return ethtool_virtdev_set_link_ksettings(dev, cmd,
858 &adapter->speed,
859 &adapter->duplex);
860 }
861
ibmveth_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)862 static int ibmveth_get_link_ksettings(struct net_device *dev,
863 struct ethtool_link_ksettings *cmd)
864 {
865 struct ibmveth_adapter *adapter = netdev_priv(dev);
866
867 cmd->base.speed = adapter->speed;
868 cmd->base.duplex = adapter->duplex;
869 cmd->base.port = PORT_OTHER;
870
871 return 0;
872 }
873
ibmveth_init_link_settings(struct net_device * dev)874 static void ibmveth_init_link_settings(struct net_device *dev)
875 {
876 struct ibmveth_adapter *adapter = netdev_priv(dev);
877
878 adapter->speed = SPEED_1000;
879 adapter->duplex = DUPLEX_FULL;
880 }
881
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)882 static void netdev_get_drvinfo(struct net_device *dev,
883 struct ethtool_drvinfo *info)
884 {
885 strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
886 strscpy(info->version, ibmveth_driver_version, sizeof(info->version));
887 }
888
ibmveth_fix_features(struct net_device * dev,netdev_features_t features)889 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
890 netdev_features_t features)
891 {
892 /*
893 * Since the ibmveth firmware interface does not have the
894 * concept of separate tx/rx checksum offload enable, if rx
895 * checksum is disabled we also have to disable tx checksum
896 * offload. Once we disable rx checksum offload, we are no
897 * longer allowed to send tx buffers that are not properly
898 * checksummed.
899 */
900
901 if (!(features & NETIF_F_RXCSUM))
902 features &= ~NETIF_F_CSUM_MASK;
903
904 return features;
905 }
906
ibmveth_set_csum_offload(struct net_device * dev,u32 data)907 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
908 {
909 struct ibmveth_adapter *adapter = netdev_priv(dev);
910 unsigned long set_attr, clr_attr, ret_attr;
911 unsigned long set_attr6, clr_attr6;
912 long ret, ret4, ret6;
913 int rc1 = 0, rc2 = 0;
914 int restart = 0;
915
916 if (netif_running(dev)) {
917 restart = 1;
918 ibmveth_close(dev);
919 }
920
921 set_attr = 0;
922 clr_attr = 0;
923 set_attr6 = 0;
924 clr_attr6 = 0;
925
926 if (data) {
927 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
928 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
929 } else {
930 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
931 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
932 }
933
934 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
935
936 if (ret == H_SUCCESS &&
937 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
938 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
939 set_attr, &ret_attr);
940
941 if (ret4 != H_SUCCESS) {
942 netdev_err(dev, "unable to change IPv4 checksum "
943 "offload settings. %d rc=%ld\n",
944 data, ret4);
945
946 h_illan_attributes(adapter->vdev->unit_address,
947 set_attr, clr_attr, &ret_attr);
948
949 if (data == 1)
950 dev->features &= ~NETIF_F_IP_CSUM;
951
952 } else {
953 adapter->fw_ipv4_csum_support = data;
954 }
955
956 ret6 = h_illan_attributes(adapter->vdev->unit_address,
957 clr_attr6, set_attr6, &ret_attr);
958
959 if (ret6 != H_SUCCESS) {
960 netdev_err(dev, "unable to change IPv6 checksum "
961 "offload settings. %d rc=%ld\n",
962 data, ret6);
963
964 h_illan_attributes(adapter->vdev->unit_address,
965 set_attr6, clr_attr6, &ret_attr);
966
967 if (data == 1)
968 dev->features &= ~NETIF_F_IPV6_CSUM;
969
970 } else
971 adapter->fw_ipv6_csum_support = data;
972
973 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
974 adapter->rx_csum = data;
975 else
976 rc1 = -EIO;
977 } else {
978 rc1 = -EIO;
979 netdev_err(dev, "unable to change checksum offload settings."
980 " %d rc=%ld ret_attr=%lx\n", data, ret,
981 ret_attr);
982 }
983
984 if (restart)
985 rc2 = ibmveth_open(dev);
986
987 return rc1 ? rc1 : rc2;
988 }
989
ibmveth_set_tso(struct net_device * dev,u32 data)990 static int ibmveth_set_tso(struct net_device *dev, u32 data)
991 {
992 struct ibmveth_adapter *adapter = netdev_priv(dev);
993 unsigned long set_attr, clr_attr, ret_attr;
994 long ret1, ret2;
995 int rc1 = 0, rc2 = 0;
996 int restart = 0;
997
998 if (netif_running(dev)) {
999 restart = 1;
1000 ibmveth_close(dev);
1001 }
1002
1003 set_attr = 0;
1004 clr_attr = 0;
1005
1006 if (data)
1007 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
1008 else
1009 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
1010
1011 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1012
1013 if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1014 !old_large_send) {
1015 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
1016 set_attr, &ret_attr);
1017
1018 if (ret2 != H_SUCCESS) {
1019 netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
1020 data, ret2);
1021
1022 h_illan_attributes(adapter->vdev->unit_address,
1023 set_attr, clr_attr, &ret_attr);
1024
1025 if (data == 1)
1026 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1027 rc1 = -EIO;
1028
1029 } else {
1030 adapter->fw_large_send_support = data;
1031 adapter->large_send = data;
1032 }
1033 } else {
1034 /* Older firmware version of large send offload does not
1035 * support tcp6/ipv6
1036 */
1037 if (data == 1) {
1038 dev->features &= ~NETIF_F_TSO6;
1039 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
1040 }
1041 adapter->large_send = data;
1042 }
1043
1044 if (restart)
1045 rc2 = ibmveth_open(dev);
1046
1047 return rc1 ? rc1 : rc2;
1048 }
1049
ibmveth_set_features(struct net_device * dev,netdev_features_t features)1050 static int ibmveth_set_features(struct net_device *dev,
1051 netdev_features_t features)
1052 {
1053 struct ibmveth_adapter *adapter = netdev_priv(dev);
1054 int rx_csum = !!(features & NETIF_F_RXCSUM);
1055 int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
1056 int rc1 = 0, rc2 = 0;
1057
1058 if (rx_csum != adapter->rx_csum) {
1059 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
1060 if (rc1 && !adapter->rx_csum)
1061 dev->features =
1062 features & ~(NETIF_F_CSUM_MASK |
1063 NETIF_F_RXCSUM);
1064 }
1065
1066 if (large_send != adapter->large_send) {
1067 rc2 = ibmveth_set_tso(dev, large_send);
1068 if (rc2 && !adapter->large_send)
1069 dev->features =
1070 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
1071 }
1072
1073 return rc1 ? rc1 : rc2;
1074 }
1075
ibmveth_get_strings(struct net_device * dev,u32 stringset,u8 * data)1076 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1077 {
1078 int i;
1079
1080 if (stringset != ETH_SS_STATS)
1081 return;
1082
1083 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
1084 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
1085 }
1086
ibmveth_get_sset_count(struct net_device * dev,int sset)1087 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
1088 {
1089 switch (sset) {
1090 case ETH_SS_STATS:
1091 return ARRAY_SIZE(ibmveth_stats);
1092 default:
1093 return -EOPNOTSUPP;
1094 }
1095 }
1096
ibmveth_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1097 static void ibmveth_get_ethtool_stats(struct net_device *dev,
1098 struct ethtool_stats *stats, u64 *data)
1099 {
1100 int i;
1101 struct ibmveth_adapter *adapter = netdev_priv(dev);
1102
1103 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
1104 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
1105 }
1106
ibmveth_get_channels(struct net_device * netdev,struct ethtool_channels * channels)1107 static void ibmveth_get_channels(struct net_device *netdev,
1108 struct ethtool_channels *channels)
1109 {
1110 channels->max_tx = ibmveth_real_max_tx_queues();
1111 channels->tx_count = netdev->real_num_tx_queues;
1112
1113 channels->max_rx = netdev->real_num_rx_queues;
1114 channels->rx_count = netdev->real_num_rx_queues;
1115 }
1116
ibmveth_set_channels(struct net_device * netdev,struct ethtool_channels * channels)1117 static int ibmveth_set_channels(struct net_device *netdev,
1118 struct ethtool_channels *channels)
1119 {
1120 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1121 unsigned int old = netdev->real_num_tx_queues,
1122 goal = channels->tx_count;
1123 int rc, i;
1124
1125 /* If ndo_open has not been called yet then don't allocate, just set
1126 * desired netdev_queue's and return
1127 */
1128 if (!(netdev->flags & IFF_UP))
1129 return netif_set_real_num_tx_queues(netdev, goal);
1130
1131 /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated
1132 * but we may need to alloc/free the ltb's.
1133 */
1134 netif_tx_stop_all_queues(netdev);
1135
1136 /* Allocate any queue that we need */
1137 for (i = old; i < goal; i++) {
1138 if (adapter->tx_ltb_ptr[i])
1139 continue;
1140
1141 rc = ibmveth_allocate_tx_ltb(adapter, i);
1142 if (!rc)
1143 continue;
1144
1145 /* if something goes wrong, free everything we just allocated */
1146 netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n",
1147 old);
1148 goal = old;
1149 old = i;
1150 break;
1151 }
1152 rc = netif_set_real_num_tx_queues(netdev, goal);
1153 if (rc) {
1154 netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n",
1155 old);
1156 goal = old;
1157 old = i;
1158 }
1159 /* Free any that are no longer needed */
1160 for (i = old; i > goal; i--) {
1161 if (adapter->tx_ltb_ptr[i - 1])
1162 ibmveth_free_tx_ltb(adapter, i - 1);
1163 }
1164
1165 netif_tx_wake_all_queues(netdev);
1166
1167 return rc;
1168 }
1169
1170 static const struct ethtool_ops netdev_ethtool_ops = {
1171 .get_drvinfo = netdev_get_drvinfo,
1172 .get_link = ethtool_op_get_link,
1173 .get_strings = ibmveth_get_strings,
1174 .get_sset_count = ibmveth_get_sset_count,
1175 .get_ethtool_stats = ibmveth_get_ethtool_stats,
1176 .get_link_ksettings = ibmveth_get_link_ksettings,
1177 .set_link_ksettings = ibmveth_set_link_ksettings,
1178 .get_channels = ibmveth_get_channels,
1179 .set_channels = ibmveth_set_channels
1180 };
1181
ibmveth_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1182 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1183 {
1184 return -EOPNOTSUPP;
1185 }
1186
ibmveth_send(struct ibmveth_adapter * adapter,unsigned long desc,unsigned long mss)1187 static int ibmveth_send(struct ibmveth_adapter *adapter,
1188 unsigned long desc, unsigned long mss)
1189 {
1190 unsigned long correlator;
1191 unsigned int retry_count;
1192 unsigned long ret;
1193
1194 /*
1195 * The retry count sets a maximum for the number of broadcast and
1196 * multicast destinations within the system.
1197 */
1198 retry_count = 1024;
1199 correlator = 0;
1200 do {
1201 ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
1202 correlator, &correlator, mss,
1203 adapter->fw_large_send_support);
1204 } while ((ret == H_BUSY) && (retry_count--));
1205
1206 if (ret != H_SUCCESS && ret != H_DROPPED) {
1207 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1208 "with rc=%ld\n", ret);
1209 return 1;
1210 }
1211
1212 return 0;
1213 }
1214
ibmveth_is_packet_unsupported(struct sk_buff * skb,struct net_device * netdev)1215 static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
1216 struct net_device *netdev)
1217 {
1218 struct ethhdr *ether_header;
1219 int ret = 0;
1220
1221 ether_header = eth_hdr(skb);
1222
1223 if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
1224 netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
1225 netdev->stats.tx_dropped++;
1226 ret = -EOPNOTSUPP;
1227 }
1228
1229 return ret;
1230 }
1231
ibmveth_start_xmit(struct sk_buff * skb,struct net_device * netdev)1232 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1233 struct net_device *netdev)
1234 {
1235 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1236 unsigned int desc_flags, total_bytes;
1237 union ibmveth_buf_desc desc;
1238 int i, queue_num = skb_get_queue_mapping(skb);
1239 unsigned long mss = 0;
1240
1241 if (ibmveth_is_packet_unsupported(skb, netdev))
1242 goto out;
1243 /* veth can't checksum offload UDP */
1244 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1245 ((skb->protocol == htons(ETH_P_IP) &&
1246 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1247 (skb->protocol == htons(ETH_P_IPV6) &&
1248 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1249 skb_checksum_help(skb)) {
1250
1251 netdev_err(netdev, "tx: failed to checksum packet\n");
1252 netdev->stats.tx_dropped++;
1253 goto out;
1254 }
1255
1256 desc_flags = IBMVETH_BUF_VALID;
1257
1258 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1259 unsigned char *buf = skb_transport_header(skb) +
1260 skb->csum_offset;
1261
1262 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1263
1264 /* Need to zero out the checksum */
1265 buf[0] = 0;
1266 buf[1] = 0;
1267
1268 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1269 desc_flags |= IBMVETH_BUF_LRG_SND;
1270 }
1271
1272 if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1273 if (adapter->fw_large_send_support) {
1274 mss = (unsigned long)skb_shinfo(skb)->gso_size;
1275 adapter->tx_large_packets++;
1276 } else if (!skb_is_gso_v6(skb)) {
1277 /* Put -1 in the IP checksum to tell phyp it
1278 * is a largesend packet. Put the mss in
1279 * the TCP checksum.
1280 */
1281 ip_hdr(skb)->check = 0xffff;
1282 tcp_hdr(skb)->check =
1283 cpu_to_be16(skb_shinfo(skb)->gso_size);
1284 adapter->tx_large_packets++;
1285 }
1286 }
1287
1288 /* Copy header into mapped buffer */
1289 if (unlikely(skb->len > adapter->tx_ltb_size)) {
1290 netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
1291 skb->len, adapter->tx_ltb_size);
1292 netdev->stats.tx_dropped++;
1293 goto out;
1294 }
1295 memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
1296 total_bytes = skb_headlen(skb);
1297 /* Copy frags into mapped buffers */
1298 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1299 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1300
1301 memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
1302 skb_frag_address_safe(frag), skb_frag_size(frag));
1303 total_bytes += skb_frag_size(frag);
1304 }
1305
1306 if (unlikely(total_bytes != skb->len)) {
1307 netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n",
1308 skb->len, total_bytes);
1309 netdev->stats.tx_dropped++;
1310 goto out;
1311 }
1312 desc.fields.flags_len = desc_flags | skb->len;
1313 desc.fields.address = adapter->tx_ltb_dma[queue_num];
1314 /* finish writing to long_term_buff before VIOS accessing it */
1315 dma_wmb();
1316
1317 if (ibmveth_send(adapter, desc.desc, mss)) {
1318 adapter->tx_send_failed++;
1319 netdev->stats.tx_dropped++;
1320 } else {
1321 netdev->stats.tx_packets++;
1322 netdev->stats.tx_bytes += skb->len;
1323 }
1324
1325 out:
1326 dev_consume_skb_any(skb);
1327 return NETDEV_TX_OK;
1328
1329
1330 }
1331
ibmveth_rx_mss_helper(struct sk_buff * skb,u16 mss,int lrg_pkt)1332 static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1333 {
1334 struct tcphdr *tcph;
1335 int offset = 0;
1336 int hdr_len;
1337
1338 /* only TCP packets will be aggregated */
1339 if (skb->protocol == htons(ETH_P_IP)) {
1340 struct iphdr *iph = (struct iphdr *)skb->data;
1341
1342 if (iph->protocol == IPPROTO_TCP) {
1343 offset = iph->ihl * 4;
1344 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1345 } else {
1346 return;
1347 }
1348 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1349 struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1350
1351 if (iph6->nexthdr == IPPROTO_TCP) {
1352 offset = sizeof(struct ipv6hdr);
1353 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1354 } else {
1355 return;
1356 }
1357 } else {
1358 return;
1359 }
1360 /* if mss is not set through Large Packet bit/mss in rx buffer,
1361 * expect that the mss will be written to the tcp header checksum.
1362 */
1363 tcph = (struct tcphdr *)(skb->data + offset);
1364 if (lrg_pkt) {
1365 skb_shinfo(skb)->gso_size = mss;
1366 } else if (offset) {
1367 skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1368 tcph->check = 0;
1369 }
1370
1371 if (skb_shinfo(skb)->gso_size) {
1372 hdr_len = offset + tcph->doff * 4;
1373 skb_shinfo(skb)->gso_segs =
1374 DIV_ROUND_UP(skb->len - hdr_len,
1375 skb_shinfo(skb)->gso_size);
1376 }
1377 }
1378
ibmveth_rx_csum_helper(struct sk_buff * skb,struct ibmveth_adapter * adapter)1379 static void ibmveth_rx_csum_helper(struct sk_buff *skb,
1380 struct ibmveth_adapter *adapter)
1381 {
1382 struct iphdr *iph = NULL;
1383 struct ipv6hdr *iph6 = NULL;
1384 __be16 skb_proto = 0;
1385 u16 iphlen = 0;
1386 u16 iph_proto = 0;
1387 u16 tcphdrlen = 0;
1388
1389 skb_proto = be16_to_cpu(skb->protocol);
1390
1391 if (skb_proto == ETH_P_IP) {
1392 iph = (struct iphdr *)skb->data;
1393
1394 /* If the IP checksum is not offloaded and if the packet
1395 * is large send, the checksum must be rebuilt.
1396 */
1397 if (iph->check == 0xffff) {
1398 iph->check = 0;
1399 iph->check = ip_fast_csum((unsigned char *)iph,
1400 iph->ihl);
1401 }
1402
1403 iphlen = iph->ihl * 4;
1404 iph_proto = iph->protocol;
1405 } else if (skb_proto == ETH_P_IPV6) {
1406 iph6 = (struct ipv6hdr *)skb->data;
1407 iphlen = sizeof(struct ipv6hdr);
1408 iph_proto = iph6->nexthdr;
1409 }
1410
1411 /* When CSO is enabled the TCP checksum may have be set to NULL by
1412 * the sender given that we zeroed out TCP checksum field in
1413 * transmit path (refer ibmveth_start_xmit routine). In this case set
1414 * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will
1415 * then be recalculated by the destination NIC (CSO must be enabled
1416 * on the destination NIC).
1417 *
1418 * In an OVS environment, when a flow is not cached, specifically for a
1419 * new TCP connection, the first packet information is passed up to
1420 * the user space for finding a flow. During this process, OVS computes
1421 * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
1422 *
1423 * So, re-compute TCP pseudo header checksum.
1424 */
1425
1426 if (iph_proto == IPPROTO_TCP) {
1427 struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
1428
1429 if (tcph->check == 0x0000) {
1430 /* Recompute TCP pseudo header checksum */
1431 tcphdrlen = skb->len - iphlen;
1432 if (skb_proto == ETH_P_IP)
1433 tcph->check =
1434 ~csum_tcpudp_magic(iph->saddr,
1435 iph->daddr, tcphdrlen, iph_proto, 0);
1436 else if (skb_proto == ETH_P_IPV6)
1437 tcph->check =
1438 ~csum_ipv6_magic(&iph6->saddr,
1439 &iph6->daddr, tcphdrlen, iph_proto, 0);
1440 /* Setup SKB fields for checksum offload */
1441 skb_partial_csum_set(skb, iphlen,
1442 offsetof(struct tcphdr, check));
1443 skb_reset_network_header(skb);
1444 }
1445 }
1446 }
1447
ibmveth_poll(struct napi_struct * napi,int budget)1448 static int ibmveth_poll(struct napi_struct *napi, int budget)
1449 {
1450 struct ibmveth_adapter *adapter =
1451 container_of(napi, struct ibmveth_adapter, napi);
1452 struct net_device *netdev = adapter->netdev;
1453 int frames_processed = 0;
1454 unsigned long lpar_rc;
1455 u16 mss = 0;
1456
1457 restart_poll:
1458 while (frames_processed < budget) {
1459 if (!ibmveth_rxq_pending_buffer(adapter))
1460 break;
1461
1462 smp_rmb();
1463 if (!ibmveth_rxq_buffer_valid(adapter)) {
1464 wmb(); /* suggested by larson1 */
1465 adapter->rx_invalid_buffer++;
1466 netdev_dbg(netdev, "recycling invalid buffer\n");
1467 if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
1468 break;
1469 } else {
1470 struct sk_buff *skb, *new_skb;
1471 int length = ibmveth_rxq_frame_length(adapter);
1472 int offset = ibmveth_rxq_frame_offset(adapter);
1473 int csum_good = ibmveth_rxq_csum_good(adapter);
1474 int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1475 __sum16 iph_check = 0;
1476
1477 skb = ibmveth_rxq_get_buffer(adapter);
1478 if (unlikely(!skb))
1479 break;
1480
1481 /* if the large packet bit is set in the rx queue
1482 * descriptor, the mss will be written by PHYP eight
1483 * bytes from the start of the rx buffer, which is
1484 * skb->data at this stage
1485 */
1486 if (lrg_pkt) {
1487 __be64 *rxmss = (__be64 *)(skb->data + 8);
1488
1489 mss = (u16)be64_to_cpu(*rxmss);
1490 }
1491
1492 new_skb = NULL;
1493 if (length < rx_copybreak)
1494 new_skb = netdev_alloc_skb(netdev, length);
1495
1496 if (new_skb) {
1497 skb_copy_to_linear_data(new_skb,
1498 skb->data + offset,
1499 length);
1500 if (rx_flush)
1501 ibmveth_flush_buffer(skb->data,
1502 length + offset);
1503 if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
1504 break;
1505 skb = new_skb;
1506 } else {
1507 if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
1508 break;
1509 skb_reserve(skb, offset);
1510 }
1511
1512 skb_put(skb, length);
1513 skb->protocol = eth_type_trans(skb, netdev);
1514
1515 /* PHYP without PLSO support places a -1 in the ip
1516 * checksum for large send frames.
1517 */
1518 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1519 struct iphdr *iph = (struct iphdr *)skb->data;
1520
1521 iph_check = iph->check;
1522 }
1523
1524 if ((length > netdev->mtu + ETH_HLEN) ||
1525 lrg_pkt || iph_check == 0xffff) {
1526 ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1527 adapter->rx_large_packets++;
1528 }
1529
1530 if (csum_good) {
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 ibmveth_rx_csum_helper(skb, adapter);
1533 }
1534
1535 napi_gro_receive(napi, skb); /* send it up */
1536
1537 netdev->stats.rx_packets++;
1538 netdev->stats.rx_bytes += length;
1539 frames_processed++;
1540 }
1541 }
1542
1543 ibmveth_replenish_task(adapter);
1544
1545 if (frames_processed == budget)
1546 goto out;
1547
1548 if (!napi_complete_done(napi, frames_processed))
1549 goto out;
1550
1551 /* We think we are done - reenable interrupts,
1552 * then check once more to make sure we are done.
1553 */
1554 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
1555 if (WARN_ON(lpar_rc != H_SUCCESS)) {
1556 schedule_work(&adapter->work);
1557 goto out;
1558 }
1559
1560 if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
1561 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1562 VIO_IRQ_DISABLE);
1563 goto restart_poll;
1564 }
1565
1566 out:
1567 return frames_processed;
1568 }
1569
ibmveth_interrupt(int irq,void * dev_instance)1570 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1571 {
1572 struct net_device *netdev = dev_instance;
1573 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1574 unsigned long lpar_rc;
1575
1576 if (napi_schedule_prep(&adapter->napi)) {
1577 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1578 VIO_IRQ_DISABLE);
1579 WARN_ON(lpar_rc != H_SUCCESS);
1580 __napi_schedule(&adapter->napi);
1581 }
1582 return IRQ_HANDLED;
1583 }
1584
ibmveth_set_multicast_list(struct net_device * netdev)1585 static void ibmveth_set_multicast_list(struct net_device *netdev)
1586 {
1587 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1588 unsigned long lpar_rc;
1589
1590 if ((netdev->flags & IFF_PROMISC) ||
1591 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1592 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1593 IbmVethMcastEnableRecv |
1594 IbmVethMcastDisableFiltering,
1595 0);
1596 if (lpar_rc != H_SUCCESS) {
1597 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1598 "entering promisc mode\n", lpar_rc);
1599 }
1600 } else {
1601 struct netdev_hw_addr *ha;
1602 /* clear the filter table & disable filtering */
1603 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1604 IbmVethMcastEnableRecv |
1605 IbmVethMcastDisableFiltering |
1606 IbmVethMcastClearFilterTable,
1607 0);
1608 if (lpar_rc != H_SUCCESS) {
1609 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1610 "attempting to clear filter table\n",
1611 lpar_rc);
1612 }
1613 /* add the addresses to the filter table */
1614 netdev_for_each_mc_addr(ha, netdev) {
1615 /* add the multicast address to the filter table */
1616 u64 mcast_addr;
1617 mcast_addr = ether_addr_to_u64(ha->addr);
1618 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1619 IbmVethMcastAddFilter,
1620 mcast_addr);
1621 if (lpar_rc != H_SUCCESS) {
1622 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1623 "when adding an entry to the filter "
1624 "table\n", lpar_rc);
1625 }
1626 }
1627
1628 /* re-enable filtering */
1629 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1630 IbmVethMcastEnableFiltering,
1631 0);
1632 if (lpar_rc != H_SUCCESS) {
1633 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1634 "enabling filtering\n", lpar_rc);
1635 }
1636 }
1637 }
1638
ibmveth_change_mtu(struct net_device * dev,int new_mtu)1639 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1640 {
1641 struct ibmveth_adapter *adapter = netdev_priv(dev);
1642 struct vio_dev *viodev = adapter->vdev;
1643 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1644 int i, rc;
1645 int need_restart = 0;
1646
1647 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1648 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1649 break;
1650
1651 if (i == IBMVETH_NUM_BUFF_POOLS)
1652 return -EINVAL;
1653
1654 /* Deactivate all the buffer pools so that the next loop can activate
1655 only the buffer pools necessary to hold the new MTU */
1656 if (netif_running(adapter->netdev)) {
1657 need_restart = 1;
1658 ibmveth_close(adapter->netdev);
1659 }
1660
1661 /* Look for an active buffer pool that can hold the new MTU */
1662 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1663 adapter->rx_buff_pool[i].active = 1;
1664
1665 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1666 WRITE_ONCE(dev->mtu, new_mtu);
1667 vio_cmo_set_dev_desired(viodev,
1668 ibmveth_get_desired_dma
1669 (viodev));
1670 if (need_restart) {
1671 return ibmveth_open(adapter->netdev);
1672 }
1673 return 0;
1674 }
1675 }
1676
1677 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1678 return rc;
1679
1680 return -EINVAL;
1681 }
1682
1683 #ifdef CONFIG_NET_POLL_CONTROLLER
ibmveth_poll_controller(struct net_device * dev)1684 static void ibmveth_poll_controller(struct net_device *dev)
1685 {
1686 ibmveth_replenish_task(netdev_priv(dev));
1687 ibmveth_interrupt(dev->irq, dev);
1688 }
1689 #endif
1690
1691 /**
1692 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1693 *
1694 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1695 *
1696 * Return value:
1697 * Number of bytes of IO data the driver will need to perform well.
1698 */
ibmveth_get_desired_dma(struct vio_dev * vdev)1699 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1700 {
1701 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1702 struct ibmveth_adapter *adapter;
1703 struct iommu_table *tbl;
1704 unsigned long ret;
1705 int i;
1706 int rxqentries = 1;
1707
1708 tbl = get_iommu_table_base(&vdev->dev);
1709
1710 /* netdev inits at probe time along with the structures we need below*/
1711 if (netdev == NULL)
1712 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1713
1714 adapter = netdev_priv(netdev);
1715
1716 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1717 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1718 /* add size of mapped tx buffers */
1719 ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl);
1720
1721 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1722 /* add the size of the active receive buffers */
1723 if (adapter->rx_buff_pool[i].active)
1724 ret +=
1725 adapter->rx_buff_pool[i].size *
1726 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1727 buff_size, tbl);
1728 rxqentries += adapter->rx_buff_pool[i].size;
1729 }
1730 /* add the size of the receive queue entries */
1731 ret += IOMMU_PAGE_ALIGN(
1732 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1733
1734 return ret;
1735 }
1736
ibmveth_set_mac_addr(struct net_device * dev,void * p)1737 static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1738 {
1739 struct ibmveth_adapter *adapter = netdev_priv(dev);
1740 struct sockaddr *addr = p;
1741 u64 mac_address;
1742 int rc;
1743
1744 if (!is_valid_ether_addr(addr->sa_data))
1745 return -EADDRNOTAVAIL;
1746
1747 mac_address = ether_addr_to_u64(addr->sa_data);
1748 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1749 if (rc) {
1750 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1751 return rc;
1752 }
1753
1754 eth_hw_addr_set(dev, addr->sa_data);
1755
1756 return 0;
1757 }
1758
1759 static const struct net_device_ops ibmveth_netdev_ops = {
1760 .ndo_open = ibmveth_open,
1761 .ndo_stop = ibmveth_close,
1762 .ndo_start_xmit = ibmveth_start_xmit,
1763 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1764 .ndo_eth_ioctl = ibmveth_ioctl,
1765 .ndo_change_mtu = ibmveth_change_mtu,
1766 .ndo_fix_features = ibmveth_fix_features,
1767 .ndo_set_features = ibmveth_set_features,
1768 .ndo_validate_addr = eth_validate_addr,
1769 .ndo_set_mac_address = ibmveth_set_mac_addr,
1770 #ifdef CONFIG_NET_POLL_CONTROLLER
1771 .ndo_poll_controller = ibmveth_poll_controller,
1772 #endif
1773 };
1774
ibmveth_probe(struct vio_dev * dev,const struct vio_device_id * id)1775 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1776 {
1777 int rc, i, mac_len;
1778 struct net_device *netdev;
1779 struct ibmveth_adapter *adapter;
1780 unsigned char *mac_addr_p;
1781 __be32 *mcastFilterSize_p;
1782 long ret;
1783 unsigned long ret_attr;
1784
1785 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1786 dev->unit_address);
1787
1788 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1789 &mac_len);
1790 if (!mac_addr_p) {
1791 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1792 return -EINVAL;
1793 }
1794 /* Workaround for old/broken pHyp */
1795 if (mac_len == 8)
1796 mac_addr_p += 2;
1797 else if (mac_len != 6) {
1798 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1799 mac_len);
1800 return -EINVAL;
1801 }
1802
1803 mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1804 VETH_MCAST_FILTER_SIZE,
1805 NULL);
1806 if (!mcastFilterSize_p) {
1807 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1808 "attribute\n");
1809 return -EINVAL;
1810 }
1811
1812 netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1);
1813 if (!netdev)
1814 return -ENOMEM;
1815
1816 adapter = netdev_priv(netdev);
1817 dev_set_drvdata(&dev->dev, netdev);
1818
1819 adapter->vdev = dev;
1820 adapter->netdev = netdev;
1821 INIT_WORK(&adapter->work, ibmveth_reset);
1822 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1823 ibmveth_init_link_settings(netdev);
1824
1825 netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
1826
1827 netdev->irq = dev->irq;
1828 netdev->netdev_ops = &ibmveth_netdev_ops;
1829 netdev->ethtool_ops = &netdev_ethtool_ops;
1830 SET_NETDEV_DEV(netdev, &dev->dev);
1831 netdev->hw_features = NETIF_F_SG;
1832 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1833 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1834 NETIF_F_RXCSUM;
1835 }
1836
1837 netdev->features |= netdev->hw_features;
1838
1839 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1840
1841 /* If running older firmware, TSO should not be enabled by default */
1842 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1843 !old_large_send) {
1844 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1845 netdev->features |= netdev->hw_features;
1846 } else {
1847 netdev->hw_features |= NETIF_F_TSO;
1848 }
1849
1850 adapter->is_active_trunk = false;
1851 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) {
1852 adapter->is_active_trunk = true;
1853 netdev->hw_features |= NETIF_F_FRAGLIST;
1854 netdev->features |= NETIF_F_FRAGLIST;
1855 }
1856
1857 if (ret == H_SUCCESS &&
1858 (ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) {
1859 adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL;
1860 netdev_dbg(netdev,
1861 "RX Multi-buffer hcall supported by FW, batch set to %u\n",
1862 adapter->rx_buffers_per_hcall);
1863 } else {
1864 adapter->rx_buffers_per_hcall = 1;
1865 netdev_dbg(netdev,
1866 "RX Single-buffer hcall mode, batch set to %u\n",
1867 adapter->rx_buffers_per_hcall);
1868 }
1869
1870 netdev->min_mtu = IBMVETH_MIN_MTU;
1871 netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
1872
1873 eth_hw_addr_set(netdev, mac_addr_p);
1874
1875 if (firmware_has_feature(FW_FEATURE_CMO))
1876 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1877
1878 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1879 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1880 int error;
1881
1882 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1883 pool_count[i], pool_size[i],
1884 pool_active[i]);
1885 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1886 &dev->dev.kobj, "pool%d", i);
1887 if (!error)
1888 kobject_uevent(kobj, KOBJ_ADD);
1889 }
1890
1891 rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(),
1892 IBMVETH_DEFAULT_QUEUES));
1893 if (rc) {
1894 netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
1895 rc);
1896 free_netdev(netdev);
1897 return rc;
1898 }
1899 adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
1900 for (i = 0; i < IBMVETH_MAX_QUEUES; i++)
1901 adapter->tx_ltb_ptr[i] = NULL;
1902
1903 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1904 netdev_dbg(netdev, "registering netdev...\n");
1905
1906 ibmveth_set_features(netdev, netdev->features);
1907
1908 rc = register_netdev(netdev);
1909
1910 if (rc) {
1911 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1912 free_netdev(netdev);
1913 return rc;
1914 }
1915
1916 netdev_dbg(netdev, "registered\n");
1917
1918 return 0;
1919 }
1920
ibmveth_remove(struct vio_dev * dev)1921 static void ibmveth_remove(struct vio_dev *dev)
1922 {
1923 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1924 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1925 int i;
1926
1927 cancel_work_sync(&adapter->work);
1928
1929 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1930 kobject_put(&adapter->rx_buff_pool[i].kobj);
1931
1932 unregister_netdev(netdev);
1933
1934 free_netdev(netdev);
1935 dev_set_drvdata(&dev->dev, NULL);
1936 }
1937
1938 static struct attribute veth_active_attr;
1939 static struct attribute veth_num_attr;
1940 static struct attribute veth_size_attr;
1941
veth_pool_show(struct kobject * kobj,struct attribute * attr,char * buf)1942 static ssize_t veth_pool_show(struct kobject *kobj,
1943 struct attribute *attr, char *buf)
1944 {
1945 struct ibmveth_buff_pool *pool = container_of(kobj,
1946 struct ibmveth_buff_pool,
1947 kobj);
1948
1949 if (attr == &veth_active_attr)
1950 return sprintf(buf, "%d\n", pool->active);
1951 else if (attr == &veth_num_attr)
1952 return sprintf(buf, "%d\n", pool->size);
1953 else if (attr == &veth_size_attr)
1954 return sprintf(buf, "%d\n", pool->buff_size);
1955 return 0;
1956 }
1957
1958 /**
1959 * veth_pool_store - sysfs store handler for pool attributes
1960 * @kobj: kobject embedded in pool
1961 * @attr: attribute being changed
1962 * @buf: value being stored
1963 * @count: length of @buf in bytes
1964 *
1965 * Stores new value in pool attribute. Verifies the range of the new value for
1966 * size and buff_size. Verifies that at least one pool remains available to
1967 * receive MTU-sized packets.
1968 *
1969 * Context: Process context.
1970 * Takes and releases rtnl_mutex to ensure correct ordering of close
1971 * and open calls.
1972 * Return:
1973 * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
1974 * * %-EINVAL - New pool size or buffer size is out of range
1975 * * count - Return count for success
1976 * * other - Return value from a failed ibmveth_open call
1977 */
veth_pool_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1978 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1979 const char *buf, size_t count)
1980 {
1981 struct ibmveth_buff_pool *pool = container_of(kobj,
1982 struct ibmveth_buff_pool,
1983 kobj);
1984 struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
1985 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1986 long value = simple_strtol(buf, NULL, 10);
1987 bool change = false;
1988 u32 newbuff_size;
1989 u32 oldbuff_size;
1990 int newactive;
1991 int oldactive;
1992 u32 newsize;
1993 u32 oldsize;
1994 long rc;
1995
1996 rtnl_lock();
1997
1998 oldbuff_size = pool->buff_size;
1999 oldactive = pool->active;
2000 oldsize = pool->size;
2001
2002 newbuff_size = oldbuff_size;
2003 newactive = oldactive;
2004 newsize = oldsize;
2005
2006 if (attr == &veth_active_attr) {
2007 if (value && !oldactive) {
2008 newactive = 1;
2009 change = true;
2010 } else if (!value && oldactive) {
2011 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
2012 int i;
2013 /* Make sure there is a buffer pool with buffers that
2014 can hold a packet of the size of the MTU */
2015 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
2016 if (pool == &adapter->rx_buff_pool[i])
2017 continue;
2018 if (!adapter->rx_buff_pool[i].active)
2019 continue;
2020 if (mtu <= adapter->rx_buff_pool[i].buff_size)
2021 break;
2022 }
2023
2024 if (i == IBMVETH_NUM_BUFF_POOLS) {
2025 netdev_err(netdev, "no active pool >= MTU\n");
2026 rc = -EPERM;
2027 goto unlock_err;
2028 }
2029
2030 newactive = 0;
2031 change = true;
2032 }
2033 } else if (attr == &veth_num_attr) {
2034 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
2035 rc = -EINVAL;
2036 goto unlock_err;
2037 }
2038 if (value != oldsize) {
2039 newsize = value;
2040 change = true;
2041 }
2042 } else if (attr == &veth_size_attr) {
2043 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
2044 rc = -EINVAL;
2045 goto unlock_err;
2046 }
2047 if (value != oldbuff_size) {
2048 newbuff_size = value;
2049 change = true;
2050 }
2051 }
2052
2053 if (change) {
2054 if (netif_running(netdev))
2055 ibmveth_close(netdev);
2056
2057 pool->active = newactive;
2058 pool->buff_size = newbuff_size;
2059 pool->size = newsize;
2060
2061 if (netif_running(netdev)) {
2062 rc = ibmveth_open(netdev);
2063 if (rc) {
2064 pool->active = oldactive;
2065 pool->buff_size = oldbuff_size;
2066 pool->size = oldsize;
2067 goto unlock_err;
2068 }
2069 }
2070 }
2071 rtnl_unlock();
2072
2073 /* kick the interrupt handler to allocate/deallocate pools */
2074 ibmveth_interrupt(netdev->irq, netdev);
2075 return count;
2076
2077 unlock_err:
2078 rtnl_unlock();
2079 return rc;
2080 }
2081
2082
2083 #define ATTR(_name, _mode) \
2084 struct attribute veth_##_name##_attr = { \
2085 .name = __stringify(_name), .mode = _mode, \
2086 };
2087
2088 static ATTR(active, 0644);
2089 static ATTR(num, 0644);
2090 static ATTR(size, 0644);
2091
2092 static struct attribute *veth_pool_attrs[] = {
2093 &veth_active_attr,
2094 &veth_num_attr,
2095 &veth_size_attr,
2096 NULL,
2097 };
2098 ATTRIBUTE_GROUPS(veth_pool);
2099
2100 static const struct sysfs_ops veth_pool_ops = {
2101 .show = veth_pool_show,
2102 .store = veth_pool_store,
2103 };
2104
2105 static struct kobj_type ktype_veth_pool = {
2106 .release = NULL,
2107 .sysfs_ops = &veth_pool_ops,
2108 .default_groups = veth_pool_groups,
2109 };
2110
ibmveth_resume(struct device * dev)2111 static int ibmveth_resume(struct device *dev)
2112 {
2113 struct net_device *netdev = dev_get_drvdata(dev);
2114 ibmveth_interrupt(netdev->irq, netdev);
2115 return 0;
2116 }
2117
2118 static const struct vio_device_id ibmveth_device_table[] = {
2119 { "network", "IBM,l-lan"},
2120 { "", "" }
2121 };
2122 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
2123
2124 static const struct dev_pm_ops ibmveth_pm_ops = {
2125 .resume = ibmveth_resume
2126 };
2127
2128 static struct vio_driver ibmveth_driver = {
2129 .id_table = ibmveth_device_table,
2130 .probe = ibmveth_probe,
2131 .remove = ibmveth_remove,
2132 .get_desired_dma = ibmveth_get_desired_dma,
2133 .name = ibmveth_driver_name,
2134 .pm = &ibmveth_pm_ops,
2135 };
2136
ibmveth_module_init(void)2137 static int __init ibmveth_module_init(void)
2138 {
2139 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
2140 ibmveth_driver_string, ibmveth_driver_version);
2141
2142 return vio_register_driver(&ibmveth_driver);
2143 }
2144
ibmveth_module_exit(void)2145 static void __exit ibmveth_module_exit(void)
2146 {
2147 vio_unregister_driver(&ibmveth_driver);
2148 }
2149
2150 module_init(ibmveth_module_init);
2151 module_exit(ibmveth_module_exit);
2152
2153 #ifdef CONFIG_IBMVETH_KUNIT_TEST
2154 #include <kunit/test.h>
2155
2156 /**
2157 * ibmveth_reset_kunit - reset routine for running in KUnit environment
2158 *
2159 * @w: pointer to work_struct embedded in adapter structure
2160 *
2161 * Context: Called in the KUnit environment. Does nothing.
2162 *
2163 * Return: void
2164 */
ibmveth_reset_kunit(struct work_struct * w)2165 static void ibmveth_reset_kunit(struct work_struct *w)
2166 {
2167 netdev_dbg(NULL, "reset_kunit starting\n");
2168 netdev_dbg(NULL, "reset_kunit complete\n");
2169 }
2170
2171 /**
2172 * ibmveth_remove_buffer_from_pool_test - unit test for some of
2173 * ibmveth_remove_buffer_from_pool
2174 * @test: pointer to kunit structure
2175 *
2176 * Tests the error returns from ibmveth_remove_buffer_from_pool.
2177 * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be
2178 * checked to see that these warnings happened.
2179 *
2180 * Return: void
2181 */
ibmveth_remove_buffer_from_pool_test(struct kunit * test)2182 static void ibmveth_remove_buffer_from_pool_test(struct kunit *test)
2183 {
2184 struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
2185 struct ibmveth_buff_pool *pool;
2186 u64 correlator;
2187
2188 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
2189
2190 INIT_WORK(&adapter->work, ibmveth_reset_kunit);
2191
2192 /* Set sane values for buffer pools */
2193 for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
2194 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
2195 pool_count[i], pool_size[i],
2196 pool_active[i]);
2197
2198 pool = &adapter->rx_buff_pool[0];
2199 pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
2200 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
2201
2202 correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0;
2203 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
2204 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
2205
2206 correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size;
2207 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
2208 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
2209
2210 correlator = (u64)0 | 0;
2211 pool->skbuff[0] = NULL;
2212 KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
2213 KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
2214
2215 flush_work(&adapter->work);
2216 }
2217
2218 /**
2219 * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
2220 * @test: pointer to kunit structure
2221 *
2222 * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for
2223 * the NULL returns, so dmesg should be checked to see that these warnings
2224 * happened.
2225 *
2226 * Return: void
2227 */
ibmveth_rxq_get_buffer_test(struct kunit * test)2228 static void ibmveth_rxq_get_buffer_test(struct kunit *test)
2229 {
2230 struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
2231 struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL);
2232 struct ibmveth_buff_pool *pool;
2233
2234 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
2235 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
2236
2237 INIT_WORK(&adapter->work, ibmveth_reset_kunit);
2238
2239 adapter->rx_queue.queue_len = 1;
2240 adapter->rx_queue.index = 0;
2241 adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry),
2242 GFP_KERNEL);
2243 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr);
2244
2245 /* Set sane values for buffer pools */
2246 for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
2247 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
2248 pool_count[i], pool_size[i],
2249 pool_active[i]);
2250
2251 pool = &adapter->rx_buff_pool[0];
2252 pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
2253 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
2254
2255 adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0;
2256 KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
2257
2258 adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size;
2259 KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
2260
2261 pool->skbuff[0] = skb;
2262 adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0;
2263 KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter));
2264
2265 flush_work(&adapter->work);
2266 }
2267
2268 static struct kunit_case ibmveth_test_cases[] = {
2269 KUNIT_CASE(ibmveth_remove_buffer_from_pool_test),
2270 KUNIT_CASE(ibmveth_rxq_get_buffer_test),
2271 {}
2272 };
2273
2274 static struct kunit_suite ibmveth_test_suite = {
2275 .name = "ibmveth-kunit-test",
2276 .test_cases = ibmveth_test_cases,
2277 };
2278
2279 kunit_test_suite(ibmveth_test_suite);
2280 #endif
2281