1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
3 /* */
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* */
10 /* */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
15 /* */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
25 /* */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
32 /* */
33 /**************************************************************************/
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/irqdomain.h>
57 #include <linux/kthread.h>
58 #include <linux/seq_file.h>
59 #include <linux/interrupt.h>
60 #include <net/net_namespace.h>
61 #include <asm/hvcall.h>
62 #include <linux/atomic.h>
63 #include <asm/vio.h>
64 #include <asm/xive.h>
65 #include <asm/iommu.h>
66 #include <linux/uaccess.h>
67 #include <asm/firmware.h>
68 #include <linux/workqueue.h>
69 #include <linux/if_vlan.h>
70 #include <linux/utsname.h>
71 #include <linux/cpu.h>
72
73 #include "ibmvnic.h"
74
75 static const char ibmvnic_driver_name[] = "ibmvnic";
76 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
77
78 MODULE_AUTHOR("Santiago Leon");
79 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
82
83 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
84 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
85 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
87 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
88 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
101 static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
102 static void send_query_map(struct ibmvnic_adapter *adapter);
103 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
104 static int send_request_unmap(struct ibmvnic_adapter *, u8);
105 static int send_login(struct ibmvnic_adapter *adapter);
106 static void send_query_cap(struct ibmvnic_adapter *adapter);
107 static int init_sub_crqs(struct ibmvnic_adapter *);
108 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
109 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
110 static void release_crq_queue(struct ibmvnic_adapter *);
111 static int __ibmvnic_set_mac(struct net_device *, u8 *);
112 static int init_crq_queue(struct ibmvnic_adapter *adapter);
113 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
114 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
115 struct ibmvnic_sub_crq_queue *tx_scrq);
116 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
117 struct ibmvnic_long_term_buff *ltb);
118 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
119 static void flush_reset_queue(struct ibmvnic_adapter *adapter);
120 static void print_subcrq_error(struct device *dev, int rc, const char *func);
121
122 struct ibmvnic_stat {
123 char name[ETH_GSTRING_LEN];
124 int offset;
125 };
126
127 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128 offsetof(struct ibmvnic_statistics, stat))
129 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
130
131 static const struct ibmvnic_stat ibmvnic_stats[] = {
132 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
154 };
155
send_crq_init_complete(struct ibmvnic_adapter * adapter)156 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
157 {
158 union ibmvnic_crq crq;
159
160 memset(&crq, 0, sizeof(crq));
161 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
162 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
163
164 return ibmvnic_send_crq(adapter, &crq);
165 }
166
send_version_xchg(struct ibmvnic_adapter * adapter)167 static int send_version_xchg(struct ibmvnic_adapter *adapter)
168 {
169 union ibmvnic_crq crq;
170
171 memset(&crq, 0, sizeof(crq));
172 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
173 crq.version_exchange.cmd = VERSION_EXCHANGE;
174 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
175
176 return ibmvnic_send_crq(adapter, &crq);
177 }
178
ibmvnic_clean_queue_affinity(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * queue)179 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
180 struct ibmvnic_sub_crq_queue *queue)
181 {
182 if (!(queue && queue->irq))
183 return;
184
185 cpumask_clear(queue->affinity_mask);
186
187 if (irq_set_affinity_and_hint(queue->irq, NULL))
188 netdev_warn(adapter->netdev,
189 "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
190 __func__, queue, queue->irq);
191 }
192
ibmvnic_clean_affinity(struct ibmvnic_adapter * adapter)193 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
194 {
195 struct ibmvnic_sub_crq_queue **rxqs;
196 struct ibmvnic_sub_crq_queue **txqs;
197 int num_rxqs, num_txqs;
198 int i;
199
200 rxqs = adapter->rx_scrq;
201 txqs = adapter->tx_scrq;
202 num_txqs = adapter->num_active_tx_scrqs;
203 num_rxqs = adapter->num_active_rx_scrqs;
204
205 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
206 if (txqs) {
207 for (i = 0; i < num_txqs; i++)
208 ibmvnic_clean_queue_affinity(adapter, txqs[i]);
209 }
210 if (rxqs) {
211 for (i = 0; i < num_rxqs; i++)
212 ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
213 }
214 }
215
ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue * queue,unsigned int * cpu,int * stragglers,int stride)216 static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
217 unsigned int *cpu, int *stragglers,
218 int stride)
219 {
220 cpumask_var_t mask;
221 int i;
222 int rc = 0;
223
224 if (!(queue && queue->irq))
225 return rc;
226
227 /* cpumask_var_t is either a pointer or array, allocation works here */
228 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
229 return -ENOMEM;
230
231 /* while we have extra cpu give one extra to this irq */
232 if (*stragglers) {
233 stride++;
234 (*stragglers)--;
235 }
236 /* atomic write is safer than writing bit by bit directly */
237 for_each_online_cpu_wrap(i, *cpu) {
238 if (!stride--) {
239 /* For the next queue we start from the first
240 * unused CPU in this queue
241 */
242 *cpu = i;
243 break;
244 }
245 cpumask_set_cpu(i, mask);
246 }
247
248 /* set queue affinity mask */
249 cpumask_copy(queue->affinity_mask, mask);
250 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
251 free_cpumask_var(mask);
252
253 return rc;
254 }
255
256 /* assumes cpu read lock is held */
ibmvnic_set_affinity(struct ibmvnic_adapter * adapter)257 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
258 {
259 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
260 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
261 struct ibmvnic_sub_crq_queue *queue;
262 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
263 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
264 int total_queues, stride, stragglers, i;
265 unsigned int num_cpu, cpu = 0;
266 bool is_rx_queue;
267 int rc = 0;
268
269 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
270 if (!(adapter->rx_scrq && adapter->tx_scrq)) {
271 netdev_warn(adapter->netdev,
272 "%s: Set affinity failed, queues not allocated\n",
273 __func__);
274 return;
275 }
276
277 total_queues = num_rxqs + num_txqs;
278 num_cpu = num_online_cpus();
279 /* number of cpu's assigned per irq */
280 stride = max_t(int, num_cpu / total_queues, 1);
281 /* number of leftover cpu's */
282 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
283
284 for (i = 0; i < total_queues; i++) {
285 is_rx_queue = false;
286 /* balance core load by alternating rx and tx assignments
287 * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
288 */
289 if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
290 queue = rxqs[i_rxqs++];
291 is_rx_queue = true;
292 } else {
293 queue = txqs[i_txqs++];
294 }
295
296 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
297 stride);
298 if (rc)
299 goto out;
300
301 if (!queue || is_rx_queue)
302 continue;
303
304 rc = __netif_set_xps_queue(adapter->netdev,
305 cpumask_bits(queue->affinity_mask),
306 i_txqs - 1, XPS_CPUS);
307 if (rc)
308 netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
309 __func__, i_txqs - 1, rc);
310 }
311
312 out:
313 if (rc) {
314 netdev_warn(adapter->netdev,
315 "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
316 __func__, queue, queue->irq, rc);
317 ibmvnic_clean_affinity(adapter);
318 }
319 }
320
ibmvnic_cpu_online(unsigned int cpu,struct hlist_node * node)321 static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
322 {
323 struct ibmvnic_adapter *adapter;
324
325 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
326 ibmvnic_set_affinity(adapter);
327 return 0;
328 }
329
ibmvnic_cpu_dead(unsigned int cpu,struct hlist_node * node)330 static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
331 {
332 struct ibmvnic_adapter *adapter;
333
334 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
335 ibmvnic_set_affinity(adapter);
336 return 0;
337 }
338
ibmvnic_cpu_down_prep(unsigned int cpu,struct hlist_node * node)339 static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
340 {
341 struct ibmvnic_adapter *adapter;
342
343 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
344 ibmvnic_clean_affinity(adapter);
345 return 0;
346 }
347
348 static enum cpuhp_state ibmvnic_online;
349
ibmvnic_cpu_notif_add(struct ibmvnic_adapter * adapter)350 static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
351 {
352 int ret;
353
354 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
355 if (ret)
356 return ret;
357 ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD,
358 &adapter->node_dead);
359 if (!ret)
360 return ret;
361 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
362 return ret;
363 }
364
ibmvnic_cpu_notif_remove(struct ibmvnic_adapter * adapter)365 static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
366 {
367 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
368 cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD,
369 &adapter->node_dead);
370 }
371
h_reg_sub_crq(unsigned long unit_address,unsigned long token,unsigned long length,unsigned long * number,unsigned long * irq)372 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
373 unsigned long length, unsigned long *number,
374 unsigned long *irq)
375 {
376 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
377 long rc;
378
379 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
380 *number = retbuf[0];
381 *irq = retbuf[1];
382
383 return rc;
384 }
385
386 /**
387 * ibmvnic_wait_for_completion - Check device state and wait for completion
388 * @adapter: private device data
389 * @comp_done: completion structure to wait for
390 * @timeout: time to wait in milliseconds
391 *
392 * Wait for a completion signal or until the timeout limit is reached
393 * while checking that the device is still active.
394 */
ibmvnic_wait_for_completion(struct ibmvnic_adapter * adapter,struct completion * comp_done,unsigned long timeout)395 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
396 struct completion *comp_done,
397 unsigned long timeout)
398 {
399 struct net_device *netdev;
400 unsigned long div_timeout;
401 u8 retry;
402
403 netdev = adapter->netdev;
404 retry = 5;
405 div_timeout = msecs_to_jiffies(timeout / retry);
406 while (true) {
407 if (!adapter->crq.active) {
408 netdev_err(netdev, "Device down!\n");
409 return -ENODEV;
410 }
411 if (!retry--)
412 break;
413 if (wait_for_completion_timeout(comp_done, div_timeout))
414 return 0;
415 }
416 netdev_err(netdev, "Operation timed out.\n");
417 return -ETIMEDOUT;
418 }
419
420 /**
421 * reuse_ltb() - Check if a long term buffer can be reused
422 * @ltb: The long term buffer to be checked
423 * @size: The size of the long term buffer.
424 *
425 * An LTB can be reused unless its size has changed.
426 *
427 * Return: Return true if the LTB can be reused, false otherwise.
428 */
reuse_ltb(struct ibmvnic_long_term_buff * ltb,int size)429 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
430 {
431 return (ltb->buff && ltb->size == size);
432 }
433
434 /**
435 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
436 *
437 * @adapter: ibmvnic adapter associated to the LTB
438 * @ltb: container object for the LTB
439 * @size: size of the LTB
440 *
441 * Allocate an LTB of the specified size and notify VIOS.
442 *
443 * If the given @ltb already has the correct size, reuse it. Otherwise if
444 * its non-NULL, free it. Then allocate a new one of the correct size.
445 * Notify the VIOS either way since we may now be working with a new VIOS.
446 *
447 * Allocating larger chunks of memory during resets, specially LPM or under
448 * low memory situations can cause resets to fail/timeout and for LPAR to
449 * lose connectivity. So hold onto the LTB even if we fail to communicate
450 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
451 *
452 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
453 * a negative value otherwise.
454 */
alloc_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb,int size)455 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
456 struct ibmvnic_long_term_buff *ltb, int size)
457 {
458 struct device *dev = &adapter->vdev->dev;
459 u64 prev = 0;
460 int rc;
461
462 if (!reuse_ltb(ltb, size)) {
463 dev_dbg(dev,
464 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
465 ltb->size, size);
466 prev = ltb->size;
467 free_long_term_buff(adapter, ltb);
468 }
469
470 if (ltb->buff) {
471 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
472 ltb->map_id, ltb->size);
473 } else {
474 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
475 GFP_KERNEL);
476 if (!ltb->buff) {
477 dev_err(dev, "Couldn't alloc long term buffer\n");
478 return -ENOMEM;
479 }
480 ltb->size = size;
481
482 ltb->map_id = find_first_zero_bit(adapter->map_ids,
483 MAX_MAP_ID);
484 bitmap_set(adapter->map_ids, ltb->map_id, 1);
485
486 dev_dbg(dev,
487 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
488 ltb->map_id, ltb->size, prev);
489 }
490
491 /* Ensure ltb is zeroed - specially when reusing it. */
492 memset(ltb->buff, 0, ltb->size);
493
494 mutex_lock(&adapter->fw_lock);
495 adapter->fw_done_rc = 0;
496 reinit_completion(&adapter->fw_done);
497
498 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
499 if (rc) {
500 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
501 goto out;
502 }
503
504 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
505 if (rc) {
506 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
507 rc);
508 goto out;
509 }
510
511 if (adapter->fw_done_rc) {
512 dev_err(dev, "Couldn't map LTB, rc = %d\n",
513 adapter->fw_done_rc);
514 rc = -EIO;
515 goto out;
516 }
517 rc = 0;
518 out:
519 /* don't free LTB on communication error - see function header */
520 mutex_unlock(&adapter->fw_lock);
521 return rc;
522 }
523
free_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb)524 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
525 struct ibmvnic_long_term_buff *ltb)
526 {
527 struct device *dev = &adapter->vdev->dev;
528
529 if (!ltb->buff)
530 return;
531
532 /* VIOS automatically unmaps the long term buffer at remote
533 * end for the following resets:
534 * FAILOVER, MOBILITY, TIMEOUT.
535 */
536 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
537 adapter->reset_reason != VNIC_RESET_MOBILITY &&
538 adapter->reset_reason != VNIC_RESET_TIMEOUT)
539 send_request_unmap(adapter, ltb->map_id);
540
541 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
542
543 ltb->buff = NULL;
544 /* mark this map_id free */
545 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
546 ltb->map_id = 0;
547 }
548
549 /**
550 * free_ltb_set - free the given set of long term buffers (LTBS)
551 * @adapter: The ibmvnic adapter containing this ltb set
552 * @ltb_set: The ltb_set to be freed
553 *
554 * Free the set of LTBs in the given set.
555 */
556
free_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set)557 static void free_ltb_set(struct ibmvnic_adapter *adapter,
558 struct ibmvnic_ltb_set *ltb_set)
559 {
560 int i;
561
562 for (i = 0; i < ltb_set->num_ltbs; i++)
563 free_long_term_buff(adapter, <b_set->ltbs[i]);
564
565 kfree(ltb_set->ltbs);
566 ltb_set->ltbs = NULL;
567 ltb_set->num_ltbs = 0;
568 }
569
570 /**
571 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
572 *
573 * @adapter: ibmvnic adapter associated to the LTB
574 * @ltb_set: container object for the set of LTBs
575 * @num_buffs: Number of buffers in the LTB
576 * @buff_size: Size of each buffer in the LTB
577 *
578 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
579 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
580 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
581 * If new set needs more than in old set, allocate the remaining ones.
582 * Try and reuse as many LTBs as possible and avoid reallocation.
583 *
584 * Any changes to this allocation strategy must be reflected in
585 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
586 */
alloc_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set,int num_buffs,int buff_size)587 static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
588 struct ibmvnic_ltb_set *ltb_set, int num_buffs,
589 int buff_size)
590 {
591 struct device *dev = &adapter->vdev->dev;
592 struct ibmvnic_ltb_set old_set;
593 struct ibmvnic_ltb_set new_set;
594 int rem_size;
595 int tot_size; /* size of all ltbs */
596 int ltb_size; /* size of one ltb */
597 int nltbs;
598 int rc;
599 int n;
600 int i;
601
602 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
603 buff_size);
604
605 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
606 tot_size = num_buffs * buff_size;
607
608 if (ltb_size > tot_size)
609 ltb_size = tot_size;
610
611 nltbs = tot_size / ltb_size;
612 if (tot_size % ltb_size)
613 nltbs++;
614
615 old_set = *ltb_set;
616
617 if (old_set.num_ltbs == nltbs) {
618 new_set = old_set;
619 } else {
620 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
621
622 new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
623 if (!new_set.ltbs)
624 return -ENOMEM;
625
626 new_set.num_ltbs = nltbs;
627
628 /* Free any excess ltbs in old set */
629 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
630 free_long_term_buff(adapter, &old_set.ltbs[i]);
631
632 /* Copy remaining ltbs to new set. All LTBs except the
633 * last one are of the same size. alloc_long_term_buff()
634 * will realloc if the size changes.
635 */
636 n = min(old_set.num_ltbs, new_set.num_ltbs);
637 for (i = 0; i < n; i++)
638 new_set.ltbs[i] = old_set.ltbs[i];
639
640 /* Any additional ltbs in new set will have NULL ltbs for
641 * now and will be allocated in alloc_long_term_buff().
642 */
643
644 /* We no longer need the old_set so free it. Note that we
645 * may have reused some ltbs from old set and freed excess
646 * ltbs above. So we only need to free the container now
647 * not the LTBs themselves. (i.e. dont free_ltb_set()!)
648 */
649 kfree(old_set.ltbs);
650 old_set.ltbs = NULL;
651 old_set.num_ltbs = 0;
652
653 /* Install the new set. If allocations fail below, we will
654 * retry later and know what size LTBs we need.
655 */
656 *ltb_set = new_set;
657 }
658
659 i = 0;
660 rem_size = tot_size;
661 while (rem_size) {
662 if (ltb_size > rem_size)
663 ltb_size = rem_size;
664
665 rem_size -= ltb_size;
666
667 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
668 if (rc)
669 goto out;
670 i++;
671 }
672
673 WARN_ON(i != new_set.num_ltbs);
674
675 return 0;
676 out:
677 /* We may have allocated one/more LTBs before failing and we
678 * want to try and reuse on next reset. So don't free ltb set.
679 */
680 return rc;
681 }
682
683 /**
684 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
685 * @rxpool: The receive buffer pool containing buffer
686 * @bufidx: Index of buffer in rxpool
687 * @ltbp: (Output) pointer to the long term buffer containing the buffer
688 * @offset: (Output) offset of buffer in the LTB from @ltbp
689 *
690 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
691 * pool and its corresponding offset. Assume for now that each LTB is of
692 * different size but could possibly be optimized based on the allocation
693 * strategy in alloc_ltb_set().
694 */
map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool * rxpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)695 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
696 unsigned int bufidx,
697 struct ibmvnic_long_term_buff **ltbp,
698 unsigned int *offset)
699 {
700 struct ibmvnic_long_term_buff *ltb;
701 int nbufs; /* # of buffers in one ltb */
702 int i;
703
704 WARN_ON(bufidx >= rxpool->size);
705
706 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
707 ltb = &rxpool->ltb_set.ltbs[i];
708 nbufs = ltb->size / rxpool->buff_size;
709 if (bufidx < nbufs)
710 break;
711 bufidx -= nbufs;
712 }
713
714 *ltbp = ltb;
715 *offset = bufidx * rxpool->buff_size;
716 }
717
718 /**
719 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
720 * @txpool: The transmit buffer pool containing buffer
721 * @bufidx: Index of buffer in txpool
722 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
723 * @offset: (Output) offset of buffer in the LTB from @ltbp
724 *
725 * Map the given buffer identified by [txpool, bufidx] to an LTB in the
726 * pool and its corresponding offset.
727 */
map_txpool_buf_to_ltb(struct ibmvnic_tx_pool * txpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)728 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
729 unsigned int bufidx,
730 struct ibmvnic_long_term_buff **ltbp,
731 unsigned int *offset)
732 {
733 struct ibmvnic_long_term_buff *ltb;
734 int nbufs; /* # of buffers in one ltb */
735 int i;
736
737 WARN_ON_ONCE(bufidx >= txpool->num_buffers);
738
739 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
740 ltb = &txpool->ltb_set.ltbs[i];
741 nbufs = ltb->size / txpool->buf_size;
742 if (bufidx < nbufs)
743 break;
744 bufidx -= nbufs;
745 }
746
747 *ltbp = ltb;
748 *offset = bufidx * txpool->buf_size;
749 }
750
deactivate_rx_pools(struct ibmvnic_adapter * adapter)751 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
752 {
753 int i;
754
755 for (i = 0; i < adapter->num_active_rx_pools; i++)
756 adapter->rx_pool[i].active = 0;
757 }
758
replenish_rx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_pool * pool)759 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
760 struct ibmvnic_rx_pool *pool)
761 {
762 int count = pool->size - atomic_read(&pool->available);
763 u64 handle = adapter->rx_scrq[pool->index]->handle;
764 struct device *dev = &adapter->vdev->dev;
765 struct ibmvnic_ind_xmit_queue *ind_bufp;
766 struct ibmvnic_sub_crq_queue *rx_scrq;
767 struct ibmvnic_long_term_buff *ltb;
768 union sub_crq *sub_crq;
769 int buffers_added = 0;
770 unsigned long lpar_rc;
771 struct sk_buff *skb;
772 unsigned int offset;
773 dma_addr_t dma_addr;
774 unsigned char *dst;
775 int shift = 0;
776 int bufidx;
777 int i;
778
779 if (!pool->active)
780 return;
781
782 rx_scrq = adapter->rx_scrq[pool->index];
783 ind_bufp = &rx_scrq->ind_buf;
784
785 /* netdev_skb_alloc() could have failed after we saved a few skbs
786 * in the indir_buf and we would not have sent them to VIOS yet.
787 * To account for them, start the loop at ind_bufp->index rather
788 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
789 * be 0.
790 */
791 for (i = ind_bufp->index; i < count; ++i) {
792 bufidx = pool->free_map[pool->next_free];
793
794 /* We maybe reusing the skb from earlier resets. Allocate
795 * only if necessary. But since the LTB may have changed
796 * during reset (see init_rx_pools()), update LTB below
797 * even if reusing skb.
798 */
799 skb = pool->rx_buff[bufidx].skb;
800 if (!skb) {
801 skb = netdev_alloc_skb(adapter->netdev,
802 pool->buff_size);
803 if (!skb) {
804 dev_err(dev, "Couldn't replenish rx buff\n");
805 adapter->replenish_no_mem++;
806 break;
807 }
808 }
809
810 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
811 pool->next_free = (pool->next_free + 1) % pool->size;
812
813 /* Copy the skb to the long term mapped DMA buffer */
814 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset);
815 dst = ltb->buff + offset;
816 memset(dst, 0, pool->buff_size);
817 dma_addr = ltb->addr + offset;
818
819 /* add the skb to an rx_buff in the pool */
820 pool->rx_buff[bufidx].data = dst;
821 pool->rx_buff[bufidx].dma = dma_addr;
822 pool->rx_buff[bufidx].skb = skb;
823 pool->rx_buff[bufidx].pool_index = pool->index;
824 pool->rx_buff[bufidx].size = pool->buff_size;
825
826 /* queue the rx_buff for the next send_subcrq_indirect */
827 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
828 memset(sub_crq, 0, sizeof(*sub_crq));
829 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
830 sub_crq->rx_add.correlator =
831 cpu_to_be64((u64)&pool->rx_buff[bufidx]);
832 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
833 sub_crq->rx_add.map_id = ltb->map_id;
834
835 /* The length field of the sCRQ is defined to be 24 bits so the
836 * buffer size needs to be left shifted by a byte before it is
837 * converted to big endian to prevent the last byte from being
838 * truncated.
839 */
840 #ifdef __LITTLE_ENDIAN__
841 shift = 8;
842 #endif
843 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
844
845 /* if send_subcrq_indirect queue is full, flush to VIOS */
846 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
847 i == count - 1) {
848 lpar_rc =
849 send_subcrq_indirect(adapter, handle,
850 (u64)ind_bufp->indir_dma,
851 (u64)ind_bufp->index);
852 if (lpar_rc != H_SUCCESS)
853 goto failure;
854 buffers_added += ind_bufp->index;
855 adapter->replenish_add_buff_success += ind_bufp->index;
856 ind_bufp->index = 0;
857 }
858 }
859 atomic_add(buffers_added, &pool->available);
860 return;
861
862 failure:
863 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
864 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
865 for (i = ind_bufp->index - 1; i >= 0; --i) {
866 struct ibmvnic_rx_buff *rx_buff;
867
868 pool->next_free = pool->next_free == 0 ?
869 pool->size - 1 : pool->next_free - 1;
870 sub_crq = &ind_bufp->indir_arr[i];
871 rx_buff = (struct ibmvnic_rx_buff *)
872 be64_to_cpu(sub_crq->rx_add.correlator);
873 bufidx = (int)(rx_buff - pool->rx_buff);
874 pool->free_map[pool->next_free] = bufidx;
875 dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
876 pool->rx_buff[bufidx].skb = NULL;
877 }
878 adapter->replenish_add_buff_failure += ind_bufp->index;
879 atomic_add(buffers_added, &pool->available);
880 ind_bufp->index = 0;
881 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
882 /* Disable buffer pool replenishment and report carrier off if
883 * queue is closed or pending failover.
884 * Firmware guarantees that a signal will be sent to the
885 * driver, triggering a reset.
886 */
887 deactivate_rx_pools(adapter);
888 netif_carrier_off(adapter->netdev);
889 }
890 }
891
replenish_pools(struct ibmvnic_adapter * adapter)892 static void replenish_pools(struct ibmvnic_adapter *adapter)
893 {
894 int i;
895
896 adapter->replenish_task_cycles++;
897 for (i = 0; i < adapter->num_active_rx_pools; i++) {
898 if (adapter->rx_pool[i].active)
899 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
900 }
901
902 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
903 }
904
release_stats_buffers(struct ibmvnic_adapter * adapter)905 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
906 {
907 kfree(adapter->tx_stats_buffers);
908 kfree(adapter->rx_stats_buffers);
909 adapter->tx_stats_buffers = NULL;
910 adapter->rx_stats_buffers = NULL;
911 }
912
init_stats_buffers(struct ibmvnic_adapter * adapter)913 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
914 {
915 adapter->tx_stats_buffers =
916 kcalloc(IBMVNIC_MAX_QUEUES,
917 sizeof(struct ibmvnic_tx_queue_stats),
918 GFP_KERNEL);
919 if (!adapter->tx_stats_buffers)
920 return -ENOMEM;
921
922 adapter->rx_stats_buffers =
923 kcalloc(IBMVNIC_MAX_QUEUES,
924 sizeof(struct ibmvnic_rx_queue_stats),
925 GFP_KERNEL);
926 if (!adapter->rx_stats_buffers)
927 return -ENOMEM;
928
929 return 0;
930 }
931
release_stats_token(struct ibmvnic_adapter * adapter)932 static void release_stats_token(struct ibmvnic_adapter *adapter)
933 {
934 struct device *dev = &adapter->vdev->dev;
935
936 if (!adapter->stats_token)
937 return;
938
939 dma_unmap_single(dev, adapter->stats_token,
940 sizeof(struct ibmvnic_statistics),
941 DMA_FROM_DEVICE);
942 adapter->stats_token = 0;
943 }
944
init_stats_token(struct ibmvnic_adapter * adapter)945 static int init_stats_token(struct ibmvnic_adapter *adapter)
946 {
947 struct device *dev = &adapter->vdev->dev;
948 dma_addr_t stok;
949 int rc;
950
951 stok = dma_map_single(dev, &adapter->stats,
952 sizeof(struct ibmvnic_statistics),
953 DMA_FROM_DEVICE);
954 rc = dma_mapping_error(dev, stok);
955 if (rc) {
956 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
957 return rc;
958 }
959
960 adapter->stats_token = stok;
961 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
962 return 0;
963 }
964
965 /**
966 * release_rx_pools() - Release any rx pools attached to @adapter.
967 * @adapter: ibmvnic adapter
968 *
969 * Safe to call this multiple times - even if no pools are attached.
970 */
release_rx_pools(struct ibmvnic_adapter * adapter)971 static void release_rx_pools(struct ibmvnic_adapter *adapter)
972 {
973 struct ibmvnic_rx_pool *rx_pool;
974 int i, j;
975
976 if (!adapter->rx_pool)
977 return;
978
979 for (i = 0; i < adapter->num_active_rx_pools; i++) {
980 rx_pool = &adapter->rx_pool[i];
981
982 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
983
984 kfree(rx_pool->free_map);
985
986 free_ltb_set(adapter, &rx_pool->ltb_set);
987
988 if (!rx_pool->rx_buff)
989 continue;
990
991 for (j = 0; j < rx_pool->size; j++) {
992 if (rx_pool->rx_buff[j].skb) {
993 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
994 rx_pool->rx_buff[j].skb = NULL;
995 }
996 }
997
998 kfree(rx_pool->rx_buff);
999 }
1000
1001 kfree(adapter->rx_pool);
1002 adapter->rx_pool = NULL;
1003 adapter->num_active_rx_pools = 0;
1004 adapter->prev_rx_pool_size = 0;
1005 }
1006
1007 /**
1008 * reuse_rx_pools() - Check if the existing rx pools can be reused.
1009 * @adapter: ibmvnic adapter
1010 *
1011 * Check if the existing rx pools in the adapter can be reused. The
1012 * pools can be reused if the pool parameters (number of pools,
1013 * number of buffers in the pool and size of each buffer) have not
1014 * changed.
1015 *
1016 * NOTE: This assumes that all pools have the same number of buffers
1017 * which is the case currently. If that changes, we must fix this.
1018 *
1019 * Return: true if the rx pools can be reused, false otherwise.
1020 */
reuse_rx_pools(struct ibmvnic_adapter * adapter)1021 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
1022 {
1023 u64 old_num_pools, new_num_pools;
1024 u64 old_pool_size, new_pool_size;
1025 u64 old_buff_size, new_buff_size;
1026
1027 if (!adapter->rx_pool)
1028 return false;
1029
1030 old_num_pools = adapter->num_active_rx_pools;
1031 new_num_pools = adapter->req_rx_queues;
1032
1033 old_pool_size = adapter->prev_rx_pool_size;
1034 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
1035
1036 old_buff_size = adapter->prev_rx_buf_sz;
1037 new_buff_size = adapter->cur_rx_buf_sz;
1038
1039 if (old_buff_size != new_buff_size ||
1040 old_num_pools != new_num_pools ||
1041 old_pool_size != new_pool_size)
1042 return false;
1043
1044 return true;
1045 }
1046
1047 /**
1048 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
1049 * @netdev: net device associated with the vnic interface
1050 *
1051 * Initialize the set of receiver pools in the ibmvnic adapter associated
1052 * with the net_device @netdev. If possible, reuse the existing rx pools.
1053 * Otherwise free any existing pools and allocate a new set of pools
1054 * before initializing them.
1055 *
1056 * Return: 0 on success and negative value on error.
1057 */
init_rx_pools(struct net_device * netdev)1058 static int init_rx_pools(struct net_device *netdev)
1059 {
1060 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1061 struct device *dev = &adapter->vdev->dev;
1062 struct ibmvnic_rx_pool *rx_pool;
1063 u64 num_pools;
1064 u64 pool_size; /* # of buffers in one pool */
1065 u64 buff_size;
1066 int i, j, rc;
1067
1068 pool_size = adapter->req_rx_add_entries_per_subcrq;
1069 num_pools = adapter->req_rx_queues;
1070 buff_size = adapter->cur_rx_buf_sz;
1071
1072 if (reuse_rx_pools(adapter)) {
1073 dev_dbg(dev, "Reusing rx pools\n");
1074 goto update_ltb;
1075 }
1076
1077 /* Allocate/populate the pools. */
1078 release_rx_pools(adapter);
1079
1080 adapter->rx_pool = kcalloc(num_pools,
1081 sizeof(struct ibmvnic_rx_pool),
1082 GFP_KERNEL);
1083 if (!adapter->rx_pool) {
1084 dev_err(dev, "Failed to allocate rx pools\n");
1085 return -ENOMEM;
1086 }
1087
1088 /* Set num_active_rx_pools early. If we fail below after partial
1089 * allocation, release_rx_pools() will know how many to look for.
1090 */
1091 adapter->num_active_rx_pools = num_pools;
1092
1093 for (i = 0; i < num_pools; i++) {
1094 rx_pool = &adapter->rx_pool[i];
1095
1096 netdev_dbg(adapter->netdev,
1097 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
1098 i, pool_size, buff_size);
1099
1100 rx_pool->size = pool_size;
1101 rx_pool->index = i;
1102 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1103
1104 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
1105 GFP_KERNEL);
1106 if (!rx_pool->free_map) {
1107 dev_err(dev, "Couldn't alloc free_map %d\n", i);
1108 rc = -ENOMEM;
1109 goto out_release;
1110 }
1111
1112 rx_pool->rx_buff = kcalloc(rx_pool->size,
1113 sizeof(struct ibmvnic_rx_buff),
1114 GFP_KERNEL);
1115 if (!rx_pool->rx_buff) {
1116 dev_err(dev, "Couldn't alloc rx buffers\n");
1117 rc = -ENOMEM;
1118 goto out_release;
1119 }
1120 }
1121
1122 adapter->prev_rx_pool_size = pool_size;
1123 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
1124
1125 update_ltb:
1126 for (i = 0; i < num_pools; i++) {
1127 rx_pool = &adapter->rx_pool[i];
1128 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
1129 i, rx_pool->size, rx_pool->buff_size);
1130
1131 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
1132 rx_pool->size, rx_pool->buff_size);
1133 if (rc)
1134 goto out;
1135
1136 for (j = 0; j < rx_pool->size; ++j) {
1137 struct ibmvnic_rx_buff *rx_buff;
1138
1139 rx_pool->free_map[j] = j;
1140
1141 /* NOTE: Don't clear rx_buff->skb here - will leak
1142 * memory! replenish_rx_pool() will reuse skbs or
1143 * allocate as necessary.
1144 */
1145 rx_buff = &rx_pool->rx_buff[j];
1146 rx_buff->dma = 0;
1147 rx_buff->data = 0;
1148 rx_buff->size = 0;
1149 rx_buff->pool_index = 0;
1150 }
1151
1152 /* Mark pool "empty" so replenish_rx_pools() will
1153 * update the LTB info for each buffer
1154 */
1155 atomic_set(&rx_pool->available, 0);
1156 rx_pool->next_alloc = 0;
1157 rx_pool->next_free = 0;
1158 /* replenish_rx_pool() may have called deactivate_rx_pools()
1159 * on failover. Ensure pool is active now.
1160 */
1161 rx_pool->active = 1;
1162 }
1163 return 0;
1164 out_release:
1165 release_rx_pools(adapter);
1166 out:
1167 /* We failed to allocate one or more LTBs or map them on the VIOS.
1168 * Hold onto the pools and any LTBs that we did allocate/map.
1169 */
1170 return rc;
1171 }
1172
release_vpd_data(struct ibmvnic_adapter * adapter)1173 static void release_vpd_data(struct ibmvnic_adapter *adapter)
1174 {
1175 if (!adapter->vpd)
1176 return;
1177
1178 kfree(adapter->vpd->buff);
1179 kfree(adapter->vpd);
1180
1181 adapter->vpd = NULL;
1182 }
1183
release_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)1184 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
1185 struct ibmvnic_tx_pool *tx_pool)
1186 {
1187 kfree(tx_pool->tx_buff);
1188 kfree(tx_pool->free_map);
1189 free_ltb_set(adapter, &tx_pool->ltb_set);
1190 }
1191
1192 /**
1193 * release_tx_pools() - Release any tx pools attached to @adapter.
1194 * @adapter: ibmvnic adapter
1195 *
1196 * Safe to call this multiple times - even if no pools are attached.
1197 */
release_tx_pools(struct ibmvnic_adapter * adapter)1198 static void release_tx_pools(struct ibmvnic_adapter *adapter)
1199 {
1200 int i;
1201
1202 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
1203 * both NULL or both non-NULL. So we only need to check one.
1204 */
1205 if (!adapter->tx_pool)
1206 return;
1207
1208 for (i = 0; i < adapter->num_active_tx_pools; i++) {
1209 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1210 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1211 }
1212
1213 kfree(adapter->tx_pool);
1214 adapter->tx_pool = NULL;
1215 kfree(adapter->tso_pool);
1216 adapter->tso_pool = NULL;
1217 adapter->num_active_tx_pools = 0;
1218 adapter->prev_tx_pool_size = 0;
1219 }
1220
init_one_tx_pool(struct net_device * netdev,struct ibmvnic_tx_pool * tx_pool,int pool_size,int buf_size)1221 static int init_one_tx_pool(struct net_device *netdev,
1222 struct ibmvnic_tx_pool *tx_pool,
1223 int pool_size, int buf_size)
1224 {
1225 int i;
1226
1227 tx_pool->tx_buff = kcalloc(pool_size,
1228 sizeof(struct ibmvnic_tx_buff),
1229 GFP_KERNEL);
1230 if (!tx_pool->tx_buff)
1231 return -ENOMEM;
1232
1233 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
1234 if (!tx_pool->free_map) {
1235 kfree(tx_pool->tx_buff);
1236 tx_pool->tx_buff = NULL;
1237 return -ENOMEM;
1238 }
1239
1240 for (i = 0; i < pool_size; i++)
1241 tx_pool->free_map[i] = i;
1242
1243 tx_pool->consumer_index = 0;
1244 tx_pool->producer_index = 0;
1245 tx_pool->num_buffers = pool_size;
1246 tx_pool->buf_size = buf_size;
1247
1248 return 0;
1249 }
1250
1251 /**
1252 * reuse_tx_pools() - Check if the existing tx pools can be reused.
1253 * @adapter: ibmvnic adapter
1254 *
1255 * Check if the existing tx pools in the adapter can be reused. The
1256 * pools can be reused if the pool parameters (number of pools,
1257 * number of buffers in the pool and mtu) have not changed.
1258 *
1259 * NOTE: This assumes that all pools have the same number of buffers
1260 * which is the case currently. If that changes, we must fix this.
1261 *
1262 * Return: true if the tx pools can be reused, false otherwise.
1263 */
reuse_tx_pools(struct ibmvnic_adapter * adapter)1264 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1265 {
1266 u64 old_num_pools, new_num_pools;
1267 u64 old_pool_size, new_pool_size;
1268 u64 old_mtu, new_mtu;
1269
1270 if (!adapter->tx_pool)
1271 return false;
1272
1273 old_num_pools = adapter->num_active_tx_pools;
1274 new_num_pools = adapter->num_active_tx_scrqs;
1275 old_pool_size = adapter->prev_tx_pool_size;
1276 new_pool_size = adapter->req_tx_entries_per_subcrq;
1277 old_mtu = adapter->prev_mtu;
1278 new_mtu = adapter->req_mtu;
1279
1280 if (old_mtu != new_mtu ||
1281 old_num_pools != new_num_pools ||
1282 old_pool_size != new_pool_size)
1283 return false;
1284
1285 return true;
1286 }
1287
1288 /**
1289 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
1290 * @netdev: net device associated with the vnic interface
1291 *
1292 * Initialize the set of transmit pools in the ibmvnic adapter associated
1293 * with the net_device @netdev. If possible, reuse the existing tx pools.
1294 * Otherwise free any existing pools and allocate a new set of pools
1295 * before initializing them.
1296 *
1297 * Return: 0 on success and negative value on error.
1298 */
init_tx_pools(struct net_device * netdev)1299 static int init_tx_pools(struct net_device *netdev)
1300 {
1301 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1302 struct device *dev = &adapter->vdev->dev;
1303 int num_pools;
1304 u64 pool_size; /* # of buffers in pool */
1305 u64 buff_size;
1306 int i, j, rc;
1307
1308 num_pools = adapter->req_tx_queues;
1309
1310 /* We must notify the VIOS about the LTB on all resets - but we only
1311 * need to alloc/populate pools if either the number of buffers or
1312 * size of each buffer in the pool has changed.
1313 */
1314 if (reuse_tx_pools(adapter)) {
1315 netdev_dbg(netdev, "Reusing tx pools\n");
1316 goto update_ltb;
1317 }
1318
1319 /* Allocate/populate the pools. */
1320 release_tx_pools(adapter);
1321
1322 pool_size = adapter->req_tx_entries_per_subcrq;
1323 num_pools = adapter->num_active_tx_scrqs;
1324
1325 adapter->tx_pool = kcalloc(num_pools,
1326 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1327 if (!adapter->tx_pool)
1328 return -ENOMEM;
1329
1330 adapter->tso_pool = kcalloc(num_pools,
1331 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1332 /* To simplify release_tx_pools() ensure that ->tx_pool and
1333 * ->tso_pool are either both NULL or both non-NULL.
1334 */
1335 if (!adapter->tso_pool) {
1336 kfree(adapter->tx_pool);
1337 adapter->tx_pool = NULL;
1338 return -ENOMEM;
1339 }
1340
1341 /* Set num_active_tx_pools early. If we fail below after partial
1342 * allocation, release_tx_pools() will know how many to look for.
1343 */
1344 adapter->num_active_tx_pools = num_pools;
1345
1346 buff_size = adapter->req_mtu + VLAN_HLEN;
1347 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1348
1349 for (i = 0; i < num_pools; i++) {
1350 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
1351 i, adapter->req_tx_entries_per_subcrq, buff_size);
1352
1353 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
1354 pool_size, buff_size);
1355 if (rc)
1356 goto out_release;
1357
1358 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
1359 IBMVNIC_TSO_BUFS,
1360 IBMVNIC_TSO_BUF_SZ);
1361 if (rc)
1362 goto out_release;
1363 }
1364
1365 adapter->prev_tx_pool_size = pool_size;
1366 adapter->prev_mtu = adapter->req_mtu;
1367
1368 update_ltb:
1369 /* NOTE: All tx_pools have the same number of buffers (which is
1370 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
1371 * buffers (see calls init_one_tx_pool() for these).
1372 * For consistency, we use tx_pool->num_buffers and
1373 * tso_pool->num_buffers below.
1374 */
1375 rc = -1;
1376 for (i = 0; i < num_pools; i++) {
1377 struct ibmvnic_tx_pool *tso_pool;
1378 struct ibmvnic_tx_pool *tx_pool;
1379
1380 tx_pool = &adapter->tx_pool[i];
1381
1382 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
1383 i, tx_pool->num_buffers, tx_pool->buf_size);
1384
1385 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1386 tx_pool->num_buffers, tx_pool->buf_size);
1387 if (rc)
1388 goto out;
1389
1390 tx_pool->consumer_index = 0;
1391 tx_pool->producer_index = 0;
1392
1393 for (j = 0; j < tx_pool->num_buffers; j++)
1394 tx_pool->free_map[j] = j;
1395
1396 tso_pool = &adapter->tso_pool[i];
1397
1398 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
1399 i, tso_pool->num_buffers, tso_pool->buf_size);
1400
1401 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1402 tso_pool->num_buffers, tso_pool->buf_size);
1403 if (rc)
1404 goto out;
1405
1406 tso_pool->consumer_index = 0;
1407 tso_pool->producer_index = 0;
1408
1409 for (j = 0; j < tso_pool->num_buffers; j++)
1410 tso_pool->free_map[j] = j;
1411 }
1412
1413 return 0;
1414 out_release:
1415 release_tx_pools(adapter);
1416 out:
1417 /* We failed to allocate one or more LTBs or map them on the VIOS.
1418 * Hold onto the pools and any LTBs that we did allocate/map.
1419 */
1420 return rc;
1421 }
1422
ibmvnic_napi_enable(struct ibmvnic_adapter * adapter)1423 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1424 {
1425 int i;
1426
1427 if (adapter->napi_enabled)
1428 return;
1429
1430 for (i = 0; i < adapter->req_rx_queues; i++)
1431 napi_enable(&adapter->napi[i]);
1432
1433 adapter->napi_enabled = true;
1434 }
1435
ibmvnic_napi_disable(struct ibmvnic_adapter * adapter)1436 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1437 {
1438 int i;
1439
1440 if (!adapter->napi_enabled)
1441 return;
1442
1443 for (i = 0; i < adapter->req_rx_queues; i++) {
1444 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1445 napi_disable(&adapter->napi[i]);
1446 }
1447
1448 adapter->napi_enabled = false;
1449 }
1450
init_napi(struct ibmvnic_adapter * adapter)1451 static int init_napi(struct ibmvnic_adapter *adapter)
1452 {
1453 int i;
1454
1455 adapter->napi = kcalloc(adapter->req_rx_queues,
1456 sizeof(struct napi_struct), GFP_KERNEL);
1457 if (!adapter->napi)
1458 return -ENOMEM;
1459
1460 for (i = 0; i < adapter->req_rx_queues; i++) {
1461 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1462 netif_napi_add(adapter->netdev, &adapter->napi[i],
1463 ibmvnic_poll);
1464 }
1465
1466 adapter->num_active_rx_napi = adapter->req_rx_queues;
1467 return 0;
1468 }
1469
release_napi(struct ibmvnic_adapter * adapter)1470 static void release_napi(struct ibmvnic_adapter *adapter)
1471 {
1472 int i;
1473
1474 if (!adapter->napi)
1475 return;
1476
1477 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1478 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1479 netif_napi_del(&adapter->napi[i]);
1480 }
1481
1482 kfree(adapter->napi);
1483 adapter->napi = NULL;
1484 adapter->num_active_rx_napi = 0;
1485 adapter->napi_enabled = false;
1486 }
1487
adapter_state_to_string(enum vnic_state state)1488 static const char *adapter_state_to_string(enum vnic_state state)
1489 {
1490 switch (state) {
1491 case VNIC_PROBING:
1492 return "PROBING";
1493 case VNIC_PROBED:
1494 return "PROBED";
1495 case VNIC_OPENING:
1496 return "OPENING";
1497 case VNIC_OPEN:
1498 return "OPEN";
1499 case VNIC_CLOSING:
1500 return "CLOSING";
1501 case VNIC_CLOSED:
1502 return "CLOSED";
1503 case VNIC_REMOVING:
1504 return "REMOVING";
1505 case VNIC_REMOVED:
1506 return "REMOVED";
1507 case VNIC_DOWN:
1508 return "DOWN";
1509 }
1510 return "UNKNOWN";
1511 }
1512
ibmvnic_login(struct net_device * netdev)1513 static int ibmvnic_login(struct net_device *netdev)
1514 {
1515 unsigned long flags, timeout = msecs_to_jiffies(20000);
1516 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1517 int retry_count = 0;
1518 int retries = 10;
1519 bool retry;
1520 int rc;
1521
1522 do {
1523 retry = false;
1524 if (retry_count > retries) {
1525 netdev_warn(netdev, "Login attempts exceeded\n");
1526 return -EACCES;
1527 }
1528
1529 adapter->init_done_rc = 0;
1530 reinit_completion(&adapter->init_done);
1531 rc = send_login(adapter);
1532 if (rc)
1533 return rc;
1534
1535 if (!wait_for_completion_timeout(&adapter->init_done,
1536 timeout)) {
1537 netdev_warn(netdev, "Login timed out\n");
1538 adapter->login_pending = false;
1539 goto partial_reset;
1540 }
1541
1542 if (adapter->init_done_rc == ABORTED) {
1543 netdev_warn(netdev, "Login aborted, retrying...\n");
1544 retry = true;
1545 adapter->init_done_rc = 0;
1546 retry_count++;
1547 /* FW or device may be busy, so
1548 * wait a bit before retrying login
1549 */
1550 msleep(500);
1551 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1552 retry_count++;
1553 release_sub_crqs(adapter, 1);
1554
1555 retry = true;
1556 netdev_dbg(netdev,
1557 "Received partial success, retrying...\n");
1558 adapter->init_done_rc = 0;
1559 reinit_completion(&adapter->init_done);
1560 send_query_cap(adapter);
1561 if (!wait_for_completion_timeout(&adapter->init_done,
1562 timeout)) {
1563 netdev_warn(netdev,
1564 "Capabilities query timed out\n");
1565 return -ETIMEDOUT;
1566 }
1567
1568 rc = init_sub_crqs(adapter);
1569 if (rc) {
1570 netdev_warn(netdev,
1571 "SCRQ initialization failed\n");
1572 return rc;
1573 }
1574
1575 rc = init_sub_crq_irqs(adapter);
1576 if (rc) {
1577 netdev_warn(netdev,
1578 "SCRQ irq initialization failed\n");
1579 return rc;
1580 }
1581 /* Default/timeout error handling, reset and start fresh */
1582 } else if (adapter->init_done_rc) {
1583 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1584 adapter->init_done_rc);
1585
1586 partial_reset:
1587 /* adapter login failed, so free any CRQs or sub-CRQs
1588 * and register again before attempting to login again.
1589 * If we don't do this then the VIOS may think that
1590 * we are already logged in and reject any subsequent
1591 * attempts
1592 */
1593 netdev_warn(netdev,
1594 "Freeing and re-registering CRQs before attempting to login again\n");
1595 retry = true;
1596 adapter->init_done_rc = 0;
1597 release_sub_crqs(adapter, true);
1598 /* Much of this is similar logic as ibmvnic_probe(),
1599 * we are essentially re-initializing communication
1600 * with the server. We really should not run any
1601 * resets/failovers here because this is already a form
1602 * of reset and we do not want parallel resets occurring
1603 */
1604 do {
1605 reinit_init_done(adapter);
1606 /* Clear any failovers we got in the previous
1607 * pass since we are re-initializing the CRQ
1608 */
1609 adapter->failover_pending = false;
1610 release_crq_queue(adapter);
1611 /* If we don't sleep here then we risk an
1612 * unnecessary failover event from the VIOS.
1613 * This is a known VIOS issue caused by a vnic
1614 * device freeing and registering a CRQ too
1615 * quickly.
1616 */
1617 msleep(1500);
1618 /* Avoid any resets, since we are currently
1619 * resetting.
1620 */
1621 spin_lock_irqsave(&adapter->rwi_lock, flags);
1622 flush_reset_queue(adapter);
1623 spin_unlock_irqrestore(&adapter->rwi_lock,
1624 flags);
1625
1626 rc = init_crq_queue(adapter);
1627 if (rc) {
1628 netdev_err(netdev, "login recovery: init CRQ failed %d\n",
1629 rc);
1630 return -EIO;
1631 }
1632
1633 rc = ibmvnic_reset_init(adapter, false);
1634 if (rc)
1635 netdev_err(netdev, "login recovery: Reset init failed %d\n",
1636 rc);
1637 /* IBMVNIC_CRQ_INIT will return EAGAIN if it
1638 * fails, since ibmvnic_reset_init will free
1639 * irq's in failure, we won't be able to receive
1640 * new CRQs so we need to keep trying. probe()
1641 * handles this similarly.
1642 */
1643 } while (rc == -EAGAIN && retry_count++ < retries);
1644 }
1645 } while (retry);
1646
1647 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1648
1649 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1650 return 0;
1651 }
1652
release_login_buffer(struct ibmvnic_adapter * adapter)1653 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1654 {
1655 if (!adapter->login_buf)
1656 return;
1657
1658 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
1659 adapter->login_buf_sz, DMA_TO_DEVICE);
1660 kfree(adapter->login_buf);
1661 adapter->login_buf = NULL;
1662 }
1663
release_login_rsp_buffer(struct ibmvnic_adapter * adapter)1664 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1665 {
1666 if (!adapter->login_rsp_buf)
1667 return;
1668
1669 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
1670 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
1671 kfree(adapter->login_rsp_buf);
1672 adapter->login_rsp_buf = NULL;
1673 }
1674
release_resources(struct ibmvnic_adapter * adapter)1675 static void release_resources(struct ibmvnic_adapter *adapter)
1676 {
1677 release_vpd_data(adapter);
1678
1679 release_napi(adapter);
1680 release_login_buffer(adapter);
1681 release_login_rsp_buffer(adapter);
1682 }
1683
set_link_state(struct ibmvnic_adapter * adapter,u8 link_state)1684 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1685 {
1686 struct net_device *netdev = adapter->netdev;
1687 unsigned long timeout = msecs_to_jiffies(20000);
1688 union ibmvnic_crq crq;
1689 bool resend;
1690 int rc;
1691
1692 netdev_dbg(netdev, "setting link state %d\n", link_state);
1693
1694 memset(&crq, 0, sizeof(crq));
1695 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1696 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1697 crq.logical_link_state.link_state = link_state;
1698
1699 do {
1700 resend = false;
1701
1702 reinit_completion(&adapter->init_done);
1703 rc = ibmvnic_send_crq(adapter, &crq);
1704 if (rc) {
1705 netdev_err(netdev, "Failed to set link state\n");
1706 return rc;
1707 }
1708
1709 if (!wait_for_completion_timeout(&adapter->init_done,
1710 timeout)) {
1711 netdev_err(netdev, "timeout setting link state\n");
1712 return -ETIMEDOUT;
1713 }
1714
1715 if (adapter->init_done_rc == PARTIALSUCCESS) {
1716 /* Partuial success, delay and re-send */
1717 mdelay(1000);
1718 resend = true;
1719 } else if (adapter->init_done_rc) {
1720 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1721 adapter->init_done_rc);
1722 return adapter->init_done_rc;
1723 }
1724 } while (resend);
1725
1726 return 0;
1727 }
1728
set_real_num_queues(struct net_device * netdev)1729 static int set_real_num_queues(struct net_device *netdev)
1730 {
1731 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1732 int rc;
1733
1734 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1735 adapter->req_tx_queues, adapter->req_rx_queues);
1736
1737 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1738 if (rc) {
1739 netdev_err(netdev, "failed to set the number of tx queues\n");
1740 return rc;
1741 }
1742
1743 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1744 if (rc)
1745 netdev_err(netdev, "failed to set the number of rx queues\n");
1746
1747 return rc;
1748 }
1749
ibmvnic_get_vpd(struct ibmvnic_adapter * adapter)1750 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1751 {
1752 struct device *dev = &adapter->vdev->dev;
1753 union ibmvnic_crq crq;
1754 int len = 0;
1755 int rc;
1756
1757 if (adapter->vpd->buff)
1758 len = adapter->vpd->len;
1759
1760 mutex_lock(&adapter->fw_lock);
1761 adapter->fw_done_rc = 0;
1762 reinit_completion(&adapter->fw_done);
1763
1764 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1765 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1766 rc = ibmvnic_send_crq(adapter, &crq);
1767 if (rc) {
1768 mutex_unlock(&adapter->fw_lock);
1769 return rc;
1770 }
1771
1772 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1773 if (rc) {
1774 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1775 mutex_unlock(&adapter->fw_lock);
1776 return rc;
1777 }
1778 mutex_unlock(&adapter->fw_lock);
1779
1780 if (!adapter->vpd->len)
1781 return -ENODATA;
1782
1783 if (!adapter->vpd->buff)
1784 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1785 else if (adapter->vpd->len != len)
1786 adapter->vpd->buff =
1787 krealloc(adapter->vpd->buff,
1788 adapter->vpd->len, GFP_KERNEL);
1789
1790 if (!adapter->vpd->buff) {
1791 dev_err(dev, "Could allocate VPD buffer\n");
1792 return -ENOMEM;
1793 }
1794
1795 adapter->vpd->dma_addr =
1796 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1797 DMA_FROM_DEVICE);
1798 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1799 dev_err(dev, "Could not map VPD buffer\n");
1800 kfree(adapter->vpd->buff);
1801 adapter->vpd->buff = NULL;
1802 return -ENOMEM;
1803 }
1804
1805 mutex_lock(&adapter->fw_lock);
1806 adapter->fw_done_rc = 0;
1807 reinit_completion(&adapter->fw_done);
1808
1809 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1810 crq.get_vpd.cmd = GET_VPD;
1811 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1812 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1813 rc = ibmvnic_send_crq(adapter, &crq);
1814 if (rc) {
1815 kfree(adapter->vpd->buff);
1816 adapter->vpd->buff = NULL;
1817 mutex_unlock(&adapter->fw_lock);
1818 return rc;
1819 }
1820
1821 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1822 if (rc) {
1823 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1824 kfree(adapter->vpd->buff);
1825 adapter->vpd->buff = NULL;
1826 mutex_unlock(&adapter->fw_lock);
1827 return rc;
1828 }
1829
1830 mutex_unlock(&adapter->fw_lock);
1831 return 0;
1832 }
1833
init_resources(struct ibmvnic_adapter * adapter)1834 static int init_resources(struct ibmvnic_adapter *adapter)
1835 {
1836 struct net_device *netdev = adapter->netdev;
1837 int rc;
1838
1839 rc = set_real_num_queues(netdev);
1840 if (rc)
1841 return rc;
1842
1843 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1844 if (!adapter->vpd)
1845 return -ENOMEM;
1846
1847 /* Vital Product Data (VPD) */
1848 rc = ibmvnic_get_vpd(adapter);
1849 if (rc) {
1850 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1851 return rc;
1852 }
1853
1854 rc = init_napi(adapter);
1855 if (rc)
1856 return rc;
1857
1858 send_query_map(adapter);
1859
1860 rc = init_rx_pools(netdev);
1861 if (rc)
1862 return rc;
1863
1864 rc = init_tx_pools(netdev);
1865 return rc;
1866 }
1867
__ibmvnic_open(struct net_device * netdev)1868 static int __ibmvnic_open(struct net_device *netdev)
1869 {
1870 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1871 enum vnic_state prev_state = adapter->state;
1872 int i, rc;
1873
1874 adapter->state = VNIC_OPENING;
1875 replenish_pools(adapter);
1876 ibmvnic_napi_enable(adapter);
1877
1878 /* We're ready to receive frames, enable the sub-crq interrupts and
1879 * set the logical link state to up
1880 */
1881 for (i = 0; i < adapter->req_rx_queues; i++) {
1882 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1883 if (prev_state == VNIC_CLOSED)
1884 enable_irq(adapter->rx_scrq[i]->irq);
1885 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1886 }
1887
1888 for (i = 0; i < adapter->req_tx_queues; i++) {
1889 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1890 if (prev_state == VNIC_CLOSED)
1891 enable_irq(adapter->tx_scrq[i]->irq);
1892 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1893 /* netdev_tx_reset_queue will reset dql stats. During NON_FATAL
1894 * resets, don't reset the stats because there could be batched
1895 * skb's waiting to be sent. If we reset dql stats, we risk
1896 * num_completed being greater than num_queued. This will cause
1897 * a BUG_ON in dql_completed().
1898 */
1899 if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
1900 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1901 }
1902
1903 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1904 if (rc) {
1905 ibmvnic_napi_disable(adapter);
1906 ibmvnic_disable_irqs(adapter);
1907 return rc;
1908 }
1909
1910 adapter->tx_queues_active = true;
1911
1912 /* Since queues were stopped until now, there shouldn't be any
1913 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
1914 * don't need the synchronize_rcu()? Leaving it for consistency
1915 * with setting ->tx_queues_active = false.
1916 */
1917 synchronize_rcu();
1918
1919 netif_tx_start_all_queues(netdev);
1920
1921 if (prev_state == VNIC_CLOSED) {
1922 for (i = 0; i < adapter->req_rx_queues; i++)
1923 napi_schedule(&adapter->napi[i]);
1924 }
1925
1926 adapter->state = VNIC_OPEN;
1927 return rc;
1928 }
1929
ibmvnic_open(struct net_device * netdev)1930 static int ibmvnic_open(struct net_device *netdev)
1931 {
1932 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1933 int rc;
1934
1935 ASSERT_RTNL();
1936
1937 /* If device failover is pending or we are about to reset, just set
1938 * device state and return. Device operation will be handled by reset
1939 * routine.
1940 *
1941 * It should be safe to overwrite the adapter->state here. Since
1942 * we hold the rtnl, either the reset has not actually started or
1943 * the rtnl got dropped during the set_link_state() in do_reset().
1944 * In the former case, no one else is changing the state (again we
1945 * have the rtnl) and in the latter case, do_reset() will detect and
1946 * honor our setting below.
1947 */
1948 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1949 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1950 adapter_state_to_string(adapter->state),
1951 adapter->failover_pending);
1952 adapter->state = VNIC_OPEN;
1953 rc = 0;
1954 goto out;
1955 }
1956
1957 if (adapter->state != VNIC_CLOSED) {
1958 rc = ibmvnic_login(netdev);
1959 if (rc)
1960 goto out;
1961
1962 rc = init_resources(adapter);
1963 if (rc) {
1964 netdev_err(netdev, "failed to initialize resources\n");
1965 goto out;
1966 }
1967 }
1968
1969 rc = __ibmvnic_open(netdev);
1970
1971 out:
1972 /* If open failed and there is a pending failover or in-progress reset,
1973 * set device state and return. Device operation will be handled by
1974 * reset routine. See also comments above regarding rtnl.
1975 */
1976 if (rc &&
1977 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1978 adapter->state = VNIC_OPEN;
1979 rc = 0;
1980 }
1981
1982 if (rc) {
1983 release_resources(adapter);
1984 release_rx_pools(adapter);
1985 release_tx_pools(adapter);
1986 }
1987
1988 return rc;
1989 }
1990
clean_rx_pools(struct ibmvnic_adapter * adapter)1991 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1992 {
1993 struct ibmvnic_rx_pool *rx_pool;
1994 struct ibmvnic_rx_buff *rx_buff;
1995 u64 rx_entries;
1996 int rx_scrqs;
1997 int i, j;
1998
1999 if (!adapter->rx_pool)
2000 return;
2001
2002 rx_scrqs = adapter->num_active_rx_pools;
2003 rx_entries = adapter->req_rx_add_entries_per_subcrq;
2004
2005 /* Free any remaining skbs in the rx buffer pools */
2006 for (i = 0; i < rx_scrqs; i++) {
2007 rx_pool = &adapter->rx_pool[i];
2008 if (!rx_pool || !rx_pool->rx_buff)
2009 continue;
2010
2011 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
2012 for (j = 0; j < rx_entries; j++) {
2013 rx_buff = &rx_pool->rx_buff[j];
2014 if (rx_buff && rx_buff->skb) {
2015 dev_kfree_skb_any(rx_buff->skb);
2016 rx_buff->skb = NULL;
2017 }
2018 }
2019 }
2020 }
2021
clean_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)2022 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
2023 struct ibmvnic_tx_pool *tx_pool)
2024 {
2025 struct ibmvnic_tx_buff *tx_buff;
2026 u64 tx_entries;
2027 int i;
2028
2029 if (!tx_pool || !tx_pool->tx_buff)
2030 return;
2031
2032 tx_entries = tx_pool->num_buffers;
2033
2034 for (i = 0; i < tx_entries; i++) {
2035 tx_buff = &tx_pool->tx_buff[i];
2036 if (tx_buff && tx_buff->skb) {
2037 dev_kfree_skb_any(tx_buff->skb);
2038 tx_buff->skb = NULL;
2039 }
2040 }
2041 }
2042
clean_tx_pools(struct ibmvnic_adapter * adapter)2043 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
2044 {
2045 int tx_scrqs;
2046 int i;
2047
2048 if (!adapter->tx_pool || !adapter->tso_pool)
2049 return;
2050
2051 tx_scrqs = adapter->num_active_tx_pools;
2052
2053 /* Free any remaining skbs in the tx buffer pools */
2054 for (i = 0; i < tx_scrqs; i++) {
2055 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
2056 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
2057 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
2058 }
2059 }
2060
ibmvnic_disable_irqs(struct ibmvnic_adapter * adapter)2061 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
2062 {
2063 struct net_device *netdev = adapter->netdev;
2064 int i;
2065
2066 if (adapter->tx_scrq) {
2067 for (i = 0; i < adapter->req_tx_queues; i++)
2068 if (adapter->tx_scrq[i]->irq) {
2069 netdev_dbg(netdev,
2070 "Disabling tx_scrq[%d] irq\n", i);
2071 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
2072 disable_irq(adapter->tx_scrq[i]->irq);
2073 }
2074 }
2075
2076 if (adapter->rx_scrq) {
2077 for (i = 0; i < adapter->req_rx_queues; i++) {
2078 if (adapter->rx_scrq[i]->irq) {
2079 netdev_dbg(netdev,
2080 "Disabling rx_scrq[%d] irq\n", i);
2081 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
2082 disable_irq(adapter->rx_scrq[i]->irq);
2083 }
2084 }
2085 }
2086 }
2087
ibmvnic_cleanup(struct net_device * netdev)2088 static void ibmvnic_cleanup(struct net_device *netdev)
2089 {
2090 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2091
2092 /* ensure that transmissions are stopped if called by do_reset */
2093
2094 adapter->tx_queues_active = false;
2095
2096 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
2097 * update so they don't restart a queue after we stop it below.
2098 */
2099 synchronize_rcu();
2100
2101 if (test_bit(0, &adapter->resetting))
2102 netif_tx_disable(netdev);
2103 else
2104 netif_tx_stop_all_queues(netdev);
2105
2106 ibmvnic_napi_disable(adapter);
2107 ibmvnic_disable_irqs(adapter);
2108 }
2109
__ibmvnic_close(struct net_device * netdev)2110 static int __ibmvnic_close(struct net_device *netdev)
2111 {
2112 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2113 int rc = 0;
2114
2115 adapter->state = VNIC_CLOSING;
2116 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2117 adapter->state = VNIC_CLOSED;
2118 return rc;
2119 }
2120
ibmvnic_close(struct net_device * netdev)2121 static int ibmvnic_close(struct net_device *netdev)
2122 {
2123 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2124 int rc;
2125
2126 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
2127 adapter_state_to_string(adapter->state),
2128 adapter->failover_pending,
2129 adapter->force_reset_recovery);
2130
2131 /* If device failover is pending, just set device state and return.
2132 * Device operation will be handled by reset routine.
2133 */
2134 if (adapter->failover_pending) {
2135 adapter->state = VNIC_CLOSED;
2136 return 0;
2137 }
2138
2139 rc = __ibmvnic_close(netdev);
2140 ibmvnic_cleanup(netdev);
2141 clean_rx_pools(adapter);
2142 clean_tx_pools(adapter);
2143
2144 return rc;
2145 }
2146
2147 /**
2148 * get_hdr_lens - fills list of L2/L3/L4 hdr lens
2149 * @hdr_field: bitfield determining needed headers
2150 * @skb: socket buffer
2151 * @hdr_len: array of header lengths to be filled
2152 *
2153 * Reads hdr_field to determine which headers are needed by firmware.
2154 * Builds a buffer containing these headers. Saves individual header
2155 * lengths and total buffer length to be used to build descriptors.
2156 *
2157 * Return: total len of all headers
2158 */
get_hdr_lens(u8 hdr_field,struct sk_buff * skb,int * hdr_len)2159 static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb,
2160 int *hdr_len)
2161 {
2162 int len = 0;
2163
2164
2165 if ((hdr_field >> 6) & 1) {
2166 hdr_len[0] = skb_mac_header_len(skb);
2167 len += hdr_len[0];
2168 }
2169
2170 if ((hdr_field >> 5) & 1) {
2171 hdr_len[1] = skb_network_header_len(skb);
2172 len += hdr_len[1];
2173 }
2174
2175 if (!((hdr_field >> 4) & 1))
2176 return len;
2177
2178 if (skb->protocol == htons(ETH_P_IP)) {
2179 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2180 hdr_len[2] = tcp_hdrlen(skb);
2181 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2182 hdr_len[2] = sizeof(struct udphdr);
2183 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2184 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2185 hdr_len[2] = tcp_hdrlen(skb);
2186 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
2187 hdr_len[2] = sizeof(struct udphdr);
2188 }
2189
2190 return len + hdr_len[2];
2191 }
2192
2193 /**
2194 * create_hdr_descs - create header and header extension descriptors
2195 * @hdr_field: bitfield determining needed headers
2196 * @hdr_data: buffer containing header data
2197 * @len: length of data buffer
2198 * @hdr_len: array of individual header lengths
2199 * @scrq_arr: descriptor array
2200 *
2201 * Creates header and, if needed, header extension descriptors and
2202 * places them in a descriptor array, scrq_arr
2203 *
2204 * Return: Number of header descs
2205 */
2206
create_hdr_descs(u8 hdr_field,u8 * hdr_data,int len,int * hdr_len,union sub_crq * scrq_arr)2207 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
2208 union sub_crq *scrq_arr)
2209 {
2210 union sub_crq *hdr_desc;
2211 int tmp_len = len;
2212 int num_descs = 0;
2213 u8 *data, *cur;
2214 int tmp;
2215
2216 while (tmp_len > 0) {
2217 cur = hdr_data + len - tmp_len;
2218
2219 hdr_desc = &scrq_arr[num_descs];
2220 if (num_descs) {
2221 data = hdr_desc->hdr_ext.data;
2222 tmp = tmp_len > 29 ? 29 : tmp_len;
2223 hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD;
2224 hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
2225 hdr_desc->hdr_ext.len = tmp;
2226 } else {
2227 data = hdr_desc->hdr.data;
2228 tmp = tmp_len > 24 ? 24 : tmp_len;
2229 hdr_desc->hdr.first = IBMVNIC_CRQ_CMD;
2230 hdr_desc->hdr.type = IBMVNIC_HDR_DESC;
2231 hdr_desc->hdr.len = tmp;
2232 hdr_desc->hdr.l2_len = (u8)hdr_len[0];
2233 hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
2234 hdr_desc->hdr.l4_len = (u8)hdr_len[2];
2235 hdr_desc->hdr.flag = hdr_field << 1;
2236 }
2237 memcpy(data, cur, tmp);
2238 tmp_len -= tmp;
2239 num_descs++;
2240 }
2241
2242 return num_descs;
2243 }
2244
2245 /**
2246 * build_hdr_descs_arr - build a header descriptor array
2247 * @skb: tx socket buffer
2248 * @indir_arr: indirect array
2249 * @num_entries: number of descriptors to be sent
2250 * @hdr_field: bit field determining which headers will be sent
2251 *
2252 * This function will build a TX descriptor array with applicable
2253 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
2254 */
2255
build_hdr_descs_arr(struct sk_buff * skb,union sub_crq * indir_arr,int * num_entries,u8 hdr_field)2256 static void build_hdr_descs_arr(struct sk_buff *skb,
2257 union sub_crq *indir_arr,
2258 int *num_entries, u8 hdr_field)
2259 {
2260 int hdr_len[3] = {0, 0, 0};
2261 int tot_len;
2262
2263 tot_len = get_hdr_lens(hdr_field, skb, hdr_len);
2264 *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb),
2265 tot_len, hdr_len, indir_arr + 1);
2266 }
2267
ibmvnic_xmit_workarounds(struct sk_buff * skb,struct net_device * netdev)2268 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
2269 struct net_device *netdev)
2270 {
2271 /* For some backing devices, mishandling of small packets
2272 * can result in a loss of connection or TX stall. Device
2273 * architects recommend that no packet should be smaller
2274 * than the minimum MTU value provided to the driver, so
2275 * pad any packets to that length
2276 */
2277 if (skb->len < netdev->min_mtu)
2278 return skb_put_padto(skb, netdev->min_mtu);
2279
2280 return 0;
2281 }
2282
ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq)2283 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
2284 struct ibmvnic_sub_crq_queue *tx_scrq)
2285 {
2286 struct ibmvnic_ind_xmit_queue *ind_bufp;
2287 struct ibmvnic_tx_buff *tx_buff;
2288 struct ibmvnic_tx_pool *tx_pool;
2289 union sub_crq tx_scrq_entry;
2290 int queue_num;
2291 int entries;
2292 int index;
2293 int i;
2294
2295 ind_bufp = &tx_scrq->ind_buf;
2296 entries = (u64)ind_bufp->index;
2297 queue_num = tx_scrq->pool_index;
2298
2299 for (i = entries - 1; i >= 0; --i) {
2300 tx_scrq_entry = ind_bufp->indir_arr[i];
2301 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
2302 continue;
2303 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
2304 if (index & IBMVNIC_TSO_POOL_MASK) {
2305 tx_pool = &adapter->tso_pool[queue_num];
2306 index &= ~IBMVNIC_TSO_POOL_MASK;
2307 } else {
2308 tx_pool = &adapter->tx_pool[queue_num];
2309 }
2310 tx_pool->free_map[tx_pool->consumer_index] = index;
2311 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2312 tx_pool->num_buffers - 1 :
2313 tx_pool->consumer_index - 1;
2314 tx_buff = &tx_pool->tx_buff[index];
2315 adapter->netdev->stats.tx_packets--;
2316 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2317 adapter->tx_stats_buffers[queue_num].batched_packets--;
2318 adapter->tx_stats_buffers[queue_num].bytes -=
2319 tx_buff->skb->len;
2320 dev_kfree_skb_any(tx_buff->skb);
2321 tx_buff->skb = NULL;
2322 adapter->netdev->stats.tx_dropped++;
2323 }
2324
2325 ind_bufp->index = 0;
2326
2327 if (atomic_sub_return(entries, &tx_scrq->used) <=
2328 (adapter->req_tx_entries_per_subcrq / 2) &&
2329 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
2330 rcu_read_lock();
2331
2332 if (adapter->tx_queues_active) {
2333 netif_wake_subqueue(adapter->netdev, queue_num);
2334 netdev_dbg(adapter->netdev, "Started queue %d\n",
2335 queue_num);
2336 }
2337
2338 rcu_read_unlock();
2339 }
2340 }
2341
send_subcrq_direct(struct ibmvnic_adapter * adapter,u64 remote_handle,u64 * entry)2342 static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
2343 u64 remote_handle, u64 *entry)
2344 {
2345 unsigned int ua = adapter->vdev->unit_address;
2346 struct device *dev = &adapter->vdev->dev;
2347 int rc;
2348
2349 /* Make sure the hypervisor sees the complete request */
2350 dma_wmb();
2351 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2352 cpu_to_be64(remote_handle),
2353 cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
2354 cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
2355
2356 if (rc)
2357 print_subcrq_error(dev, rc, __func__);
2358
2359 return rc;
2360 }
2361
ibmvnic_tx_scrq_flush(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq,bool indirect)2362 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
2363 struct ibmvnic_sub_crq_queue *tx_scrq,
2364 bool indirect)
2365 {
2366 struct ibmvnic_ind_xmit_queue *ind_bufp;
2367 u64 dma_addr;
2368 u64 entries;
2369 u64 handle;
2370 int rc;
2371
2372 ind_bufp = &tx_scrq->ind_buf;
2373 dma_addr = (u64)ind_bufp->indir_dma;
2374 entries = (u64)ind_bufp->index;
2375 handle = tx_scrq->handle;
2376
2377 if (!entries)
2378 return 0;
2379
2380 if (indirect)
2381 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
2382 else
2383 rc = send_subcrq_direct(adapter, handle,
2384 (u64 *)ind_bufp->indir_arr);
2385
2386 if (rc)
2387 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
2388 else
2389 ind_bufp->index = 0;
2390 return rc;
2391 }
2392
ibmvnic_xmit(struct sk_buff * skb,struct net_device * netdev)2393 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2394 {
2395 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2396 int queue_num = skb_get_queue_mapping(skb);
2397 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2398 struct device *dev = &adapter->vdev->dev;
2399 struct ibmvnic_ind_xmit_queue *ind_bufp;
2400 struct ibmvnic_tx_buff *tx_buff = NULL;
2401 struct ibmvnic_sub_crq_queue *tx_scrq;
2402 struct ibmvnic_long_term_buff *ltb;
2403 struct ibmvnic_tx_pool *tx_pool;
2404 unsigned int tx_send_failed = 0;
2405 netdev_tx_t ret = NETDEV_TX_OK;
2406 unsigned int tx_map_failed = 0;
2407 union sub_crq indir_arr[16];
2408 unsigned int tx_dropped = 0;
2409 unsigned int tx_dpackets = 0;
2410 unsigned int tx_bpackets = 0;
2411 unsigned int tx_bytes = 0;
2412 dma_addr_t data_dma_addr;
2413 struct netdev_queue *txq;
2414 unsigned long lpar_rc;
2415 unsigned int skblen;
2416 union sub_crq tx_crq;
2417 unsigned int offset;
2418 bool use_scrq_send_direct = false;
2419 int num_entries = 1;
2420 unsigned char *dst;
2421 int bufidx = 0;
2422 u8 proto = 0;
2423
2424 /* If a reset is in progress, drop the packet since
2425 * the scrqs may get torn down. Otherwise use the
2426 * rcu to ensure reset waits for us to complete.
2427 */
2428 rcu_read_lock();
2429 if (!adapter->tx_queues_active) {
2430 dev_kfree_skb_any(skb);
2431
2432 tx_send_failed++;
2433 tx_dropped++;
2434 ret = NETDEV_TX_OK;
2435 goto out;
2436 }
2437
2438 tx_scrq = adapter->tx_scrq[queue_num];
2439 txq = netdev_get_tx_queue(netdev, queue_num);
2440 ind_bufp = &tx_scrq->ind_buf;
2441
2442 if (ibmvnic_xmit_workarounds(skb, netdev)) {
2443 tx_dropped++;
2444 tx_send_failed++;
2445 ret = NETDEV_TX_OK;
2446 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2447 if (lpar_rc != H_SUCCESS)
2448 goto tx_err;
2449 goto out;
2450 }
2451
2452 if (skb_is_gso(skb))
2453 tx_pool = &adapter->tso_pool[queue_num];
2454 else
2455 tx_pool = &adapter->tx_pool[queue_num];
2456
2457 bufidx = tx_pool->free_map[tx_pool->consumer_index];
2458
2459 if (bufidx == IBMVNIC_INVALID_MAP) {
2460 dev_kfree_skb_any(skb);
2461 tx_send_failed++;
2462 tx_dropped++;
2463 ret = NETDEV_TX_OK;
2464 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2465 if (lpar_rc != H_SUCCESS)
2466 goto tx_err;
2467 goto out;
2468 }
2469
2470 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
2471
2472 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset);
2473
2474 dst = ltb->buff + offset;
2475 memset(dst, 0, tx_pool->buf_size);
2476 data_dma_addr = ltb->addr + offset;
2477
2478 /* if we are going to send_subcrq_direct this then we need to
2479 * update the checksum before copying the data into ltb. Essentially
2480 * these packets force disable CSO so that we can guarantee that
2481 * FW does not need header info and we can send direct. Also, vnic
2482 * server must be able to xmit standard packets without header data
2483 */
2484 if (*hdrs == 0 && !skb_is_gso(skb) &&
2485 !ind_bufp->index && !netdev_xmit_more()) {
2486 use_scrq_send_direct = true;
2487 if (skb->ip_summed == CHECKSUM_PARTIAL &&
2488 skb_checksum_help(skb))
2489 use_scrq_send_direct = false;
2490 }
2491
2492 if (skb_shinfo(skb)->nr_frags) {
2493 int cur, i;
2494
2495 /* Copy the head */
2496 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
2497 cur = skb_headlen(skb);
2498
2499 /* Copy the frags */
2500 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2501 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2502
2503 memcpy(dst + cur, skb_frag_address(frag),
2504 skb_frag_size(frag));
2505 cur += skb_frag_size(frag);
2506 }
2507 } else {
2508 skb_copy_from_linear_data(skb, dst, skb->len);
2509 }
2510
2511 tx_pool->consumer_index =
2512 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2513
2514 tx_buff = &tx_pool->tx_buff[bufidx];
2515
2516 /* Sanity checks on our free map to make sure it points to an index
2517 * that is not being occupied by another skb. If skb memory is
2518 * not freed then we see congestion control kick in and halt tx.
2519 */
2520 if (unlikely(tx_buff->skb)) {
2521 dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
2522 skb_is_gso(skb) ? "tso_pool" : "tx_pool",
2523 queue_num, bufidx);
2524 dev_kfree_skb_any(tx_buff->skb);
2525 }
2526
2527 tx_buff->skb = skb;
2528 tx_buff->index = bufidx;
2529 tx_buff->pool_index = queue_num;
2530 skblen = skb->len;
2531
2532 memset(&tx_crq, 0, sizeof(tx_crq));
2533 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2534 tx_crq.v1.type = IBMVNIC_TX_DESC;
2535 tx_crq.v1.n_crq_elem = 1;
2536 tx_crq.v1.n_sge = 1;
2537 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
2538
2539 if (skb_is_gso(skb))
2540 tx_crq.v1.correlator =
2541 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
2542 else
2543 tx_crq.v1.correlator = cpu_to_be32(bufidx);
2544 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
2545 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2546 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2547
2548 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2549 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2550 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2551 }
2552
2553 if (skb->protocol == htons(ETH_P_IP)) {
2554 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2555 proto = ip_hdr(skb)->protocol;
2556 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2557 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2558 proto = ipv6_hdr(skb)->nexthdr;
2559 }
2560
2561 if (proto == IPPROTO_TCP)
2562 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2563 else if (proto == IPPROTO_UDP)
2564 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2565
2566 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2567 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2568 hdrs += 2;
2569 }
2570 if (skb_is_gso(skb)) {
2571 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2572 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2573 hdrs += 2;
2574 } else if (use_scrq_send_direct) {
2575 /* See above comment, CSO disabled with direct xmit */
2576 tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD);
2577 ind_bufp->index = 1;
2578 tx_buff->num_entries = 1;
2579 netdev_tx_sent_queue(txq, skb->len);
2580 ind_bufp->indir_arr[0] = tx_crq;
2581 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
2582 if (lpar_rc != H_SUCCESS)
2583 goto tx_err;
2584
2585 tx_dpackets++;
2586 goto early_exit;
2587 }
2588
2589 if ((*hdrs >> 7) & 1)
2590 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2591
2592 tx_crq.v1.n_crq_elem = num_entries;
2593 tx_buff->num_entries = num_entries;
2594 /* flush buffer if current entry can not fit */
2595 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2596 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2597 if (lpar_rc != H_SUCCESS)
2598 goto tx_flush_err;
2599 }
2600
2601 indir_arr[0] = tx_crq;
2602 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2603 num_entries * sizeof(struct ibmvnic_generic_scrq));
2604
2605 ind_bufp->index += num_entries;
2606 if (__netdev_tx_sent_queue(txq, skb->len,
2607 netdev_xmit_more() &&
2608 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2609 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2610 if (lpar_rc != H_SUCCESS)
2611 goto tx_err;
2612 }
2613
2614 tx_bpackets++;
2615
2616 early_exit:
2617 if (atomic_add_return(num_entries, &tx_scrq->used)
2618 >= adapter->req_tx_entries_per_subcrq) {
2619 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2620 netif_stop_subqueue(netdev, queue_num);
2621 }
2622
2623 tx_bytes += skblen;
2624 txq_trans_cond_update(txq);
2625 ret = NETDEV_TX_OK;
2626 goto out;
2627
2628 tx_flush_err:
2629 dev_kfree_skb_any(skb);
2630 tx_buff->skb = NULL;
2631 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2632 tx_pool->num_buffers - 1 :
2633 tx_pool->consumer_index - 1;
2634 tx_dropped++;
2635 tx_err:
2636 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2637 dev_err_ratelimited(dev, "tx: send failed\n");
2638
2639 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2640 /* Disable TX and report carrier off if queue is closed
2641 * or pending failover.
2642 * Firmware guarantees that a signal will be sent to the
2643 * driver, triggering a reset or some other action.
2644 */
2645 netif_tx_stop_all_queues(netdev);
2646 netif_carrier_off(netdev);
2647 }
2648 out:
2649 rcu_read_unlock();
2650 netdev->stats.tx_dropped += tx_dropped;
2651 netdev->stats.tx_bytes += tx_bytes;
2652 netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
2653 adapter->tx_send_failed += tx_send_failed;
2654 adapter->tx_map_failed += tx_map_failed;
2655 adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
2656 adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
2657 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2658 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2659
2660 return ret;
2661 }
2662
ibmvnic_set_multi(struct net_device * netdev)2663 static void ibmvnic_set_multi(struct net_device *netdev)
2664 {
2665 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2666 struct netdev_hw_addr *ha;
2667 union ibmvnic_crq crq;
2668
2669 memset(&crq, 0, sizeof(crq));
2670 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2671 crq.request_capability.cmd = REQUEST_CAPABILITY;
2672
2673 if (netdev->flags & IFF_PROMISC) {
2674 if (!adapter->promisc_supported)
2675 return;
2676 } else {
2677 if (netdev->flags & IFF_ALLMULTI) {
2678 /* Accept all multicast */
2679 memset(&crq, 0, sizeof(crq));
2680 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2681 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2682 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2683 ibmvnic_send_crq(adapter, &crq);
2684 } else if (netdev_mc_empty(netdev)) {
2685 /* Reject all multicast */
2686 memset(&crq, 0, sizeof(crq));
2687 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2688 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2689 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2690 ibmvnic_send_crq(adapter, &crq);
2691 } else {
2692 /* Accept one or more multicast(s) */
2693 netdev_for_each_mc_addr(ha, netdev) {
2694 memset(&crq, 0, sizeof(crq));
2695 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2696 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2697 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2698 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2699 ha->addr);
2700 ibmvnic_send_crq(adapter, &crq);
2701 }
2702 }
2703 }
2704 }
2705
__ibmvnic_set_mac(struct net_device * netdev,u8 * dev_addr)2706 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2707 {
2708 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2709 union ibmvnic_crq crq;
2710 int rc;
2711
2712 if (!is_valid_ether_addr(dev_addr)) {
2713 rc = -EADDRNOTAVAIL;
2714 goto err;
2715 }
2716
2717 memset(&crq, 0, sizeof(crq));
2718 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2719 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2720 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2721
2722 mutex_lock(&adapter->fw_lock);
2723 adapter->fw_done_rc = 0;
2724 reinit_completion(&adapter->fw_done);
2725
2726 rc = ibmvnic_send_crq(adapter, &crq);
2727 if (rc) {
2728 rc = -EIO;
2729 mutex_unlock(&adapter->fw_lock);
2730 goto err;
2731 }
2732
2733 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2734 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
2735 if (rc || adapter->fw_done_rc) {
2736 rc = -EIO;
2737 mutex_unlock(&adapter->fw_lock);
2738 goto err;
2739 }
2740 mutex_unlock(&adapter->fw_lock);
2741 return 0;
2742 err:
2743 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2744 return rc;
2745 }
2746
ibmvnic_set_mac(struct net_device * netdev,void * p)2747 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2748 {
2749 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2750 struct sockaddr *addr = p;
2751 int rc;
2752
2753 rc = 0;
2754 if (!is_valid_ether_addr(addr->sa_data))
2755 return -EADDRNOTAVAIL;
2756
2757 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2758 if (adapter->state != VNIC_PROBED)
2759 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2760
2761 return rc;
2762 }
2763
reset_reason_to_string(enum ibmvnic_reset_reason reason)2764 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2765 {
2766 switch (reason) {
2767 case VNIC_RESET_FAILOVER:
2768 return "FAILOVER";
2769 case VNIC_RESET_MOBILITY:
2770 return "MOBILITY";
2771 case VNIC_RESET_FATAL:
2772 return "FATAL";
2773 case VNIC_RESET_NON_FATAL:
2774 return "NON_FATAL";
2775 case VNIC_RESET_TIMEOUT:
2776 return "TIMEOUT";
2777 case VNIC_RESET_CHANGE_PARAM:
2778 return "CHANGE_PARAM";
2779 case VNIC_RESET_PASSIVE_INIT:
2780 return "PASSIVE_INIT";
2781 }
2782 return "UNKNOWN";
2783 }
2784
2785 /*
2786 * Initialize the init_done completion and return code values. We
2787 * can get a transport event just after registering the CRQ and the
2788 * tasklet will use this to communicate the transport event. To ensure
2789 * we don't miss the notification/error, initialize these _before_
2790 * regisering the CRQ.
2791 */
reinit_init_done(struct ibmvnic_adapter * adapter)2792 static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2793 {
2794 reinit_completion(&adapter->init_done);
2795 adapter->init_done_rc = 0;
2796 }
2797
2798 /*
2799 * do_reset returns zero if we are able to keep processing reset events, or
2800 * non-zero if we hit a fatal error and must halt.
2801 */
do_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)2802 static int do_reset(struct ibmvnic_adapter *adapter,
2803 struct ibmvnic_rwi *rwi, u32 reset_state)
2804 {
2805 struct net_device *netdev = adapter->netdev;
2806 u64 old_num_rx_queues, old_num_tx_queues;
2807 u64 old_num_rx_slots, old_num_tx_slots;
2808 int rc;
2809
2810 netdev_dbg(adapter->netdev,
2811 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2812 adapter_state_to_string(adapter->state),
2813 adapter->failover_pending,
2814 reset_reason_to_string(rwi->reset_reason),
2815 adapter_state_to_string(reset_state));
2816
2817 adapter->reset_reason = rwi->reset_reason;
2818 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2819 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2820 rtnl_lock();
2821
2822 /* Now that we have the rtnl lock, clear any pending failover.
2823 * This will ensure ibmvnic_open() has either completed or will
2824 * block until failover is complete.
2825 */
2826 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2827 adapter->failover_pending = false;
2828
2829 /* read the state and check (again) after getting rtnl */
2830 reset_state = adapter->state;
2831
2832 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2833 rc = -EBUSY;
2834 goto out;
2835 }
2836
2837 netif_carrier_off(netdev);
2838
2839 old_num_rx_queues = adapter->req_rx_queues;
2840 old_num_tx_queues = adapter->req_tx_queues;
2841 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2842 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2843
2844 ibmvnic_cleanup(netdev);
2845
2846 if (reset_state == VNIC_OPEN &&
2847 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2848 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2849 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2850 rc = __ibmvnic_close(netdev);
2851 if (rc)
2852 goto out;
2853 } else {
2854 adapter->state = VNIC_CLOSING;
2855
2856 /* Release the RTNL lock before link state change and
2857 * re-acquire after the link state change to allow
2858 * linkwatch_event to grab the RTNL lock and run during
2859 * a reset.
2860 */
2861 rtnl_unlock();
2862 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2863 rtnl_lock();
2864 if (rc)
2865 goto out;
2866
2867 if (adapter->state == VNIC_OPEN) {
2868 /* When we dropped rtnl, ibmvnic_open() got
2869 * it and noticed that we are resetting and
2870 * set the adapter state to OPEN. Update our
2871 * new "target" state, and resume the reset
2872 * from VNIC_CLOSING state.
2873 */
2874 netdev_dbg(netdev,
2875 "Open changed state from %s, updating.\n",
2876 adapter_state_to_string(reset_state));
2877 reset_state = VNIC_OPEN;
2878 adapter->state = VNIC_CLOSING;
2879 }
2880
2881 if (adapter->state != VNIC_CLOSING) {
2882 /* If someone else changed the adapter state
2883 * when we dropped the rtnl, fail the reset
2884 */
2885 rc = -EAGAIN;
2886 goto out;
2887 }
2888 adapter->state = VNIC_CLOSED;
2889 }
2890 }
2891
2892 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2893 release_resources(adapter);
2894 release_sub_crqs(adapter, 1);
2895 release_crq_queue(adapter);
2896 }
2897
2898 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2899 /* remove the closed state so when we call open it appears
2900 * we are coming from the probed state.
2901 */
2902 adapter->state = VNIC_PROBED;
2903
2904 reinit_init_done(adapter);
2905
2906 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2907 rc = init_crq_queue(adapter);
2908 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2909 rc = ibmvnic_reenable_crq_queue(adapter);
2910 release_sub_crqs(adapter, 1);
2911 } else {
2912 rc = ibmvnic_reset_crq(adapter);
2913 if (rc == H_CLOSED || rc == H_SUCCESS) {
2914 rc = vio_enable_interrupts(adapter->vdev);
2915 if (rc)
2916 netdev_err(adapter->netdev,
2917 "Reset failed to enable interrupts. rc=%d\n",
2918 rc);
2919 }
2920 }
2921
2922 if (rc) {
2923 netdev_err(adapter->netdev,
2924 "Reset couldn't initialize crq. rc=%d\n", rc);
2925 goto out;
2926 }
2927
2928 rc = ibmvnic_reset_init(adapter, true);
2929 if (rc)
2930 goto out;
2931
2932 /* If the adapter was in PROBE or DOWN state prior to the reset,
2933 * exit here.
2934 */
2935 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2936 rc = 0;
2937 goto out;
2938 }
2939
2940 rc = ibmvnic_login(netdev);
2941 if (rc)
2942 goto out;
2943
2944 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2945 rc = init_resources(adapter);
2946 if (rc)
2947 goto out;
2948 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2949 adapter->req_tx_queues != old_num_tx_queues ||
2950 adapter->req_rx_add_entries_per_subcrq !=
2951 old_num_rx_slots ||
2952 adapter->req_tx_entries_per_subcrq !=
2953 old_num_tx_slots ||
2954 !adapter->rx_pool ||
2955 !adapter->tso_pool ||
2956 !adapter->tx_pool) {
2957 release_napi(adapter);
2958 release_vpd_data(adapter);
2959
2960 rc = init_resources(adapter);
2961 if (rc)
2962 goto out;
2963
2964 } else {
2965 rc = init_tx_pools(netdev);
2966 if (rc) {
2967 netdev_dbg(netdev,
2968 "init tx pools failed (%d)\n",
2969 rc);
2970 goto out;
2971 }
2972
2973 rc = init_rx_pools(netdev);
2974 if (rc) {
2975 netdev_dbg(netdev,
2976 "init rx pools failed (%d)\n",
2977 rc);
2978 goto out;
2979 }
2980 }
2981 ibmvnic_disable_irqs(adapter);
2982 }
2983 adapter->state = VNIC_CLOSED;
2984
2985 if (reset_state == VNIC_CLOSED) {
2986 rc = 0;
2987 goto out;
2988 }
2989
2990 rc = __ibmvnic_open(netdev);
2991 if (rc) {
2992 rc = IBMVNIC_OPEN_FAILED;
2993 goto out;
2994 }
2995
2996 /* refresh device's multicast list */
2997 ibmvnic_set_multi(netdev);
2998
2999 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
3000 adapter->reset_reason == VNIC_RESET_MOBILITY)
3001 __netdev_notify_peers(netdev);
3002
3003 rc = 0;
3004
3005 out:
3006 /* restore the adapter state if reset failed */
3007 if (rc)
3008 adapter->state = reset_state;
3009 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
3010 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
3011 rtnl_unlock();
3012
3013 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
3014 adapter_state_to_string(adapter->state),
3015 adapter->failover_pending, rc);
3016 return rc;
3017 }
3018
do_hard_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)3019 static int do_hard_reset(struct ibmvnic_adapter *adapter,
3020 struct ibmvnic_rwi *rwi, u32 reset_state)
3021 {
3022 struct net_device *netdev = adapter->netdev;
3023 int rc;
3024
3025 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
3026 reset_reason_to_string(rwi->reset_reason));
3027
3028 /* read the state and check (again) after getting rtnl */
3029 reset_state = adapter->state;
3030
3031 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
3032 rc = -EBUSY;
3033 goto out;
3034 }
3035
3036 netif_carrier_off(netdev);
3037 adapter->reset_reason = rwi->reset_reason;
3038
3039 ibmvnic_cleanup(netdev);
3040 release_resources(adapter);
3041 release_sub_crqs(adapter, 0);
3042 release_crq_queue(adapter);
3043
3044 /* remove the closed state so when we call open it appears
3045 * we are coming from the probed state.
3046 */
3047 adapter->state = VNIC_PROBED;
3048
3049 reinit_init_done(adapter);
3050
3051 rc = init_crq_queue(adapter);
3052 if (rc) {
3053 netdev_err(adapter->netdev,
3054 "Couldn't initialize crq. rc=%d\n", rc);
3055 goto out;
3056 }
3057
3058 rc = ibmvnic_reset_init(adapter, false);
3059 if (rc)
3060 goto out;
3061
3062 /* If the adapter was in PROBE or DOWN state prior to the reset,
3063 * exit here.
3064 */
3065 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
3066 goto out;
3067
3068 rc = ibmvnic_login(netdev);
3069 if (rc)
3070 goto out;
3071
3072 rc = init_resources(adapter);
3073 if (rc)
3074 goto out;
3075
3076 ibmvnic_disable_irqs(adapter);
3077 adapter->state = VNIC_CLOSED;
3078
3079 if (reset_state == VNIC_CLOSED)
3080 goto out;
3081
3082 rc = __ibmvnic_open(netdev);
3083 if (rc) {
3084 rc = IBMVNIC_OPEN_FAILED;
3085 goto out;
3086 }
3087
3088 __netdev_notify_peers(netdev);
3089 out:
3090 /* restore adapter state if reset failed */
3091 if (rc)
3092 adapter->state = reset_state;
3093 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
3094 adapter_state_to_string(adapter->state),
3095 adapter->failover_pending, rc);
3096 return rc;
3097 }
3098
get_next_rwi(struct ibmvnic_adapter * adapter)3099 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
3100 {
3101 struct ibmvnic_rwi *rwi;
3102 unsigned long flags;
3103
3104 spin_lock_irqsave(&adapter->rwi_lock, flags);
3105
3106 if (!list_empty(&adapter->rwi_list)) {
3107 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
3108 list);
3109 list_del(&rwi->list);
3110 } else {
3111 rwi = NULL;
3112 }
3113
3114 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3115 return rwi;
3116 }
3117
3118 /**
3119 * do_passive_init - complete probing when partner device is detected.
3120 * @adapter: ibmvnic_adapter struct
3121 *
3122 * If the ibmvnic device does not have a partner device to communicate with at boot
3123 * and that partner device comes online at a later time, this function is called
3124 * to complete the initialization process of ibmvnic device.
3125 * Caller is expected to hold rtnl_lock().
3126 *
3127 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
3128 * in the down state.
3129 * Returns 0 upon success and the device is in PROBED state.
3130 */
3131
do_passive_init(struct ibmvnic_adapter * adapter)3132 static int do_passive_init(struct ibmvnic_adapter *adapter)
3133 {
3134 unsigned long timeout = msecs_to_jiffies(30000);
3135 struct net_device *netdev = adapter->netdev;
3136 struct device *dev = &adapter->vdev->dev;
3137 int rc;
3138
3139 netdev_dbg(netdev, "Partner device found, probing.\n");
3140
3141 adapter->state = VNIC_PROBING;
3142 reinit_completion(&adapter->init_done);
3143 adapter->init_done_rc = 0;
3144 adapter->crq.active = true;
3145
3146 rc = send_crq_init_complete(adapter);
3147 if (rc)
3148 goto out;
3149
3150 rc = send_version_xchg(adapter);
3151 if (rc)
3152 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
3153
3154 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3155 dev_err(dev, "Initialization sequence timed out\n");
3156 rc = -ETIMEDOUT;
3157 goto out;
3158 }
3159
3160 rc = init_sub_crqs(adapter);
3161 if (rc) {
3162 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
3163 goto out;
3164 }
3165
3166 rc = init_sub_crq_irqs(adapter);
3167 if (rc) {
3168 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
3169 goto init_failed;
3170 }
3171
3172 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3173 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3174 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3175
3176 adapter->state = VNIC_PROBED;
3177 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
3178
3179 return 0;
3180
3181 init_failed:
3182 release_sub_crqs(adapter, 1);
3183 out:
3184 adapter->state = VNIC_DOWN;
3185 return rc;
3186 }
3187
__ibmvnic_reset(struct work_struct * work)3188 static void __ibmvnic_reset(struct work_struct *work)
3189 {
3190 struct ibmvnic_adapter *adapter;
3191 unsigned int timeout = 5000;
3192 struct ibmvnic_rwi *tmprwi;
3193 bool saved_state = false;
3194 struct ibmvnic_rwi *rwi;
3195 unsigned long flags;
3196 struct device *dev;
3197 bool need_reset;
3198 int num_fails = 0;
3199 u32 reset_state;
3200 int rc = 0;
3201
3202 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
3203 dev = &adapter->vdev->dev;
3204
3205 /* Wait for ibmvnic_probe() to complete. If probe is taking too long
3206 * or if another reset is in progress, defer work for now. If probe
3207 * eventually fails it will flush and terminate our work.
3208 *
3209 * Three possibilities here:
3210 * 1. Adpater being removed - just return
3211 * 2. Timed out on probe or another reset in progress - delay the work
3212 * 3. Completed probe - perform any resets in queue
3213 */
3214 if (adapter->state == VNIC_PROBING &&
3215 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
3216 dev_err(dev, "Reset thread timed out on probe");
3217 queue_delayed_work(system_long_wq,
3218 &adapter->ibmvnic_delayed_reset,
3219 IBMVNIC_RESET_DELAY);
3220 return;
3221 }
3222
3223 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
3224 if (adapter->state == VNIC_REMOVING)
3225 return;
3226
3227 /* ->rwi_list is stable now (no one else is removing entries) */
3228
3229 /* ibmvnic_probe() may have purged the reset queue after we were
3230 * scheduled to process a reset so there maybe no resets to process.
3231 * Before setting the ->resetting bit though, we have to make sure
3232 * that there is infact a reset to process. Otherwise we may race
3233 * with ibmvnic_open() and end up leaving the vnic down:
3234 *
3235 * __ibmvnic_reset() ibmvnic_open()
3236 * ----------------- --------------
3237 *
3238 * set ->resetting bit
3239 * find ->resetting bit is set
3240 * set ->state to IBMVNIC_OPEN (i.e
3241 * assume reset will open device)
3242 * return
3243 * find reset queue empty
3244 * return
3245 *
3246 * Neither performed vnic login/open and vnic stays down
3247 *
3248 * If we hold the lock and conditionally set the bit, either we
3249 * or ibmvnic_open() will complete the open.
3250 */
3251 need_reset = false;
3252 spin_lock(&adapter->rwi_lock);
3253 if (!list_empty(&adapter->rwi_list)) {
3254 if (test_and_set_bit_lock(0, &adapter->resetting)) {
3255 queue_delayed_work(system_long_wq,
3256 &adapter->ibmvnic_delayed_reset,
3257 IBMVNIC_RESET_DELAY);
3258 } else {
3259 need_reset = true;
3260 }
3261 }
3262 spin_unlock(&adapter->rwi_lock);
3263
3264 if (!need_reset)
3265 return;
3266
3267 rwi = get_next_rwi(adapter);
3268 while (rwi) {
3269 spin_lock_irqsave(&adapter->state_lock, flags);
3270
3271 if (adapter->state == VNIC_REMOVING ||
3272 adapter->state == VNIC_REMOVED) {
3273 spin_unlock_irqrestore(&adapter->state_lock, flags);
3274 kfree(rwi);
3275 rc = EBUSY;
3276 break;
3277 }
3278
3279 if (!saved_state) {
3280 reset_state = adapter->state;
3281 saved_state = true;
3282 }
3283 spin_unlock_irqrestore(&adapter->state_lock, flags);
3284
3285 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
3286 rtnl_lock();
3287 rc = do_passive_init(adapter);
3288 rtnl_unlock();
3289 if (!rc)
3290 netif_carrier_on(adapter->netdev);
3291 } else if (adapter->force_reset_recovery) {
3292 /* Since we are doing a hard reset now, clear the
3293 * failover_pending flag so we don't ignore any
3294 * future MOBILITY or other resets.
3295 */
3296 adapter->failover_pending = false;
3297
3298 /* Transport event occurred during previous reset */
3299 if (adapter->wait_for_reset) {
3300 /* Previous was CHANGE_PARAM; caller locked */
3301 adapter->force_reset_recovery = false;
3302 rc = do_hard_reset(adapter, rwi, reset_state);
3303 } else {
3304 rtnl_lock();
3305 adapter->force_reset_recovery = false;
3306 rc = do_hard_reset(adapter, rwi, reset_state);
3307 rtnl_unlock();
3308 }
3309 if (rc)
3310 num_fails++;
3311 else
3312 num_fails = 0;
3313
3314 /* If auto-priority-failover is enabled we can get
3315 * back to back failovers during resets, resulting
3316 * in at least two failed resets (from high-priority
3317 * backing device to low-priority one and then back)
3318 * If resets continue to fail beyond that, give the
3319 * adapter some time to settle down before retrying.
3320 */
3321 if (num_fails >= 3) {
3322 netdev_dbg(adapter->netdev,
3323 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
3324 adapter_state_to_string(adapter->state),
3325 num_fails);
3326 set_current_state(TASK_UNINTERRUPTIBLE);
3327 schedule_timeout(60 * HZ);
3328 }
3329 } else {
3330 rc = do_reset(adapter, rwi, reset_state);
3331 }
3332 tmprwi = rwi;
3333 adapter->last_reset_time = jiffies;
3334
3335 if (rc)
3336 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3337
3338 rwi = get_next_rwi(adapter);
3339
3340 /*
3341 * If there are no resets queued and the previous reset failed,
3342 * the adapter would be in an undefined state. So retry the
3343 * previous reset as a hard reset.
3344 *
3345 * Else, free the previous rwi and, if there is another reset
3346 * queued, process the new reset even if previous reset failed
3347 * (the previous reset could have failed because of a fail
3348 * over for instance, so process the fail over).
3349 */
3350 if (!rwi && rc)
3351 rwi = tmprwi;
3352 else
3353 kfree(tmprwi);
3354
3355 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
3356 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
3357 adapter->force_reset_recovery = true;
3358 }
3359
3360 if (adapter->wait_for_reset) {
3361 adapter->reset_done_rc = rc;
3362 complete(&adapter->reset_done);
3363 }
3364
3365 clear_bit_unlock(0, &adapter->resetting);
3366
3367 netdev_dbg(adapter->netdev,
3368 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
3369 adapter_state_to_string(adapter->state),
3370 adapter->force_reset_recovery,
3371 adapter->wait_for_reset);
3372 }
3373
__ibmvnic_delayed_reset(struct work_struct * work)3374 static void __ibmvnic_delayed_reset(struct work_struct *work)
3375 {
3376 struct ibmvnic_adapter *adapter;
3377
3378 adapter = container_of(work, struct ibmvnic_adapter,
3379 ibmvnic_delayed_reset.work);
3380 __ibmvnic_reset(&adapter->ibmvnic_reset);
3381 }
3382
flush_reset_queue(struct ibmvnic_adapter * adapter)3383 static void flush_reset_queue(struct ibmvnic_adapter *adapter)
3384 {
3385 struct list_head *entry, *tmp_entry;
3386
3387 if (!list_empty(&adapter->rwi_list)) {
3388 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
3389 list_del(entry);
3390 kfree(list_entry(entry, struct ibmvnic_rwi, list));
3391 }
3392 }
3393 }
3394
ibmvnic_reset(struct ibmvnic_adapter * adapter,enum ibmvnic_reset_reason reason)3395 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3396 enum ibmvnic_reset_reason reason)
3397 {
3398 struct net_device *netdev = adapter->netdev;
3399 struct ibmvnic_rwi *rwi, *tmp;
3400 unsigned long flags;
3401 int ret;
3402
3403 spin_lock_irqsave(&adapter->rwi_lock, flags);
3404
3405 /* If failover is pending don't schedule any other reset.
3406 * Instead let the failover complete. If there is already a
3407 * a failover reset scheduled, we will detect and drop the
3408 * duplicate reset when walking the ->rwi_list below.
3409 */
3410 if (adapter->state == VNIC_REMOVING ||
3411 adapter->state == VNIC_REMOVED ||
3412 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3413 ret = EBUSY;
3414 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
3415 goto err;
3416 }
3417
3418 list_for_each_entry(tmp, &adapter->rwi_list, list) {
3419 if (tmp->reset_reason == reason) {
3420 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
3421 reset_reason_to_string(reason));
3422 ret = EBUSY;
3423 goto err;
3424 }
3425 }
3426
3427 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
3428 if (!rwi) {
3429 ret = ENOMEM;
3430 goto err;
3431 }
3432 /* if we just received a transport event,
3433 * flush reset queue and process this reset
3434 */
3435 if (adapter->force_reset_recovery)
3436 flush_reset_queue(adapter);
3437
3438 rwi->reset_reason = reason;
3439 list_add_tail(&rwi->list, &adapter->rwi_list);
3440 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3441 reset_reason_to_string(reason));
3442 queue_work(system_long_wq, &adapter->ibmvnic_reset);
3443
3444 ret = 0;
3445 err:
3446 /* ibmvnic_close() below can block, so drop the lock first */
3447 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3448
3449 if (ret == ENOMEM)
3450 ibmvnic_close(netdev);
3451
3452 return -ret;
3453 }
3454
ibmvnic_tx_timeout(struct net_device * dev,unsigned int txqueue)3455 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
3456 {
3457 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3458
3459 if (test_bit(0, &adapter->resetting)) {
3460 netdev_err(adapter->netdev,
3461 "Adapter is resetting, skip timeout reset\n");
3462 return;
3463 }
3464 /* No queuing up reset until at least 5 seconds (default watchdog val)
3465 * after last reset
3466 */
3467 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3468 netdev_dbg(dev, "Not yet time to tx timeout.\n");
3469 return;
3470 }
3471 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3472 }
3473
remove_buff_from_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_buff * rx_buff)3474 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3475 struct ibmvnic_rx_buff *rx_buff)
3476 {
3477 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3478
3479 rx_buff->skb = NULL;
3480
3481 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
3482 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
3483
3484 atomic_dec(&pool->available);
3485 }
3486
ibmvnic_poll(struct napi_struct * napi,int budget)3487 static int ibmvnic_poll(struct napi_struct *napi, int budget)
3488 {
3489 struct ibmvnic_sub_crq_queue *rx_scrq;
3490 struct ibmvnic_adapter *adapter;
3491 struct net_device *netdev;
3492 int frames_processed;
3493 int scrq_num;
3494
3495 netdev = napi->dev;
3496 adapter = netdev_priv(netdev);
3497 scrq_num = (int)(napi - adapter->napi);
3498 frames_processed = 0;
3499 rx_scrq = adapter->rx_scrq[scrq_num];
3500
3501 restart_poll:
3502 while (frames_processed < budget) {
3503 struct sk_buff *skb;
3504 struct ibmvnic_rx_buff *rx_buff;
3505 union sub_crq *next;
3506 u32 length;
3507 u16 offset;
3508 u8 flags = 0;
3509
3510 if (unlikely(test_bit(0, &adapter->resetting) &&
3511 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3512 enable_scrq_irq(adapter, rx_scrq);
3513 napi_complete_done(napi, frames_processed);
3514 return frames_processed;
3515 }
3516
3517 if (!pending_scrq(adapter, rx_scrq))
3518 break;
3519 next = ibmvnic_next_scrq(adapter, rx_scrq);
3520 rx_buff = (struct ibmvnic_rx_buff *)
3521 be64_to_cpu(next->rx_comp.correlator);
3522 /* do error checking */
3523 if (next->rx_comp.rc) {
3524 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
3525 be16_to_cpu(next->rx_comp.rc));
3526 /* free the entry */
3527 next->rx_comp.first = 0;
3528 dev_kfree_skb_any(rx_buff->skb);
3529 remove_buff_from_pool(adapter, rx_buff);
3530 continue;
3531 } else if (!rx_buff->skb) {
3532 /* free the entry */
3533 next->rx_comp.first = 0;
3534 remove_buff_from_pool(adapter, rx_buff);
3535 continue;
3536 }
3537
3538 length = be32_to_cpu(next->rx_comp.len);
3539 offset = be16_to_cpu(next->rx_comp.off_frame_data);
3540 flags = next->rx_comp.flags;
3541 skb = rx_buff->skb;
3542 /* load long_term_buff before copying to skb */
3543 dma_rmb();
3544 skb_copy_to_linear_data(skb, rx_buff->data + offset,
3545 length);
3546
3547 /* VLAN Header has been stripped by the system firmware and
3548 * needs to be inserted by the driver
3549 */
3550 if (adapter->rx_vlan_header_insertion &&
3551 (flags & IBMVNIC_VLAN_STRIPPED))
3552 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3553 ntohs(next->rx_comp.vlan_tci));
3554
3555 /* free the entry */
3556 next->rx_comp.first = 0;
3557 remove_buff_from_pool(adapter, rx_buff);
3558
3559 skb_put(skb, length);
3560 skb->protocol = eth_type_trans(skb, netdev);
3561 skb_record_rx_queue(skb, scrq_num);
3562
3563 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3564 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3565 skb->ip_summed = CHECKSUM_UNNECESSARY;
3566 }
3567
3568 length = skb->len;
3569 napi_gro_receive(napi, skb); /* send it up */
3570 netdev->stats.rx_packets++;
3571 netdev->stats.rx_bytes += length;
3572 adapter->rx_stats_buffers[scrq_num].packets++;
3573 adapter->rx_stats_buffers[scrq_num].bytes += length;
3574 frames_processed++;
3575 }
3576
3577 if (adapter->state != VNIC_CLOSING &&
3578 (atomic_read(&adapter->rx_pool[scrq_num].available) <
3579 adapter->req_rx_add_entries_per_subcrq / 2))
3580 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3581 if (frames_processed < budget) {
3582 if (napi_complete_done(napi, frames_processed)) {
3583 enable_scrq_irq(adapter, rx_scrq);
3584 if (pending_scrq(adapter, rx_scrq)) {
3585 if (napi_schedule(napi)) {
3586 disable_scrq_irq(adapter, rx_scrq);
3587 goto restart_poll;
3588 }
3589 }
3590 }
3591 }
3592 return frames_processed;
3593 }
3594
wait_for_reset(struct ibmvnic_adapter * adapter)3595 static int wait_for_reset(struct ibmvnic_adapter *adapter)
3596 {
3597 int rc, ret;
3598
3599 adapter->fallback.mtu = adapter->req_mtu;
3600 adapter->fallback.rx_queues = adapter->req_rx_queues;
3601 adapter->fallback.tx_queues = adapter->req_tx_queues;
3602 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3603 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3604
3605 reinit_completion(&adapter->reset_done);
3606 adapter->wait_for_reset = true;
3607 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3608
3609 if (rc) {
3610 ret = rc;
3611 goto out;
3612 }
3613 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3614 if (rc) {
3615 ret = -ENODEV;
3616 goto out;
3617 }
3618
3619 ret = 0;
3620 if (adapter->reset_done_rc) {
3621 ret = -EIO;
3622 adapter->desired.mtu = adapter->fallback.mtu;
3623 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3624 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3625 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3626 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3627
3628 reinit_completion(&adapter->reset_done);
3629 adapter->wait_for_reset = true;
3630 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3631 if (rc) {
3632 ret = rc;
3633 goto out;
3634 }
3635 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3636 60000);
3637 if (rc) {
3638 ret = -ENODEV;
3639 goto out;
3640 }
3641 }
3642 out:
3643 adapter->wait_for_reset = false;
3644
3645 return ret;
3646 }
3647
ibmvnic_change_mtu(struct net_device * netdev,int new_mtu)3648 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3649 {
3650 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3651
3652 adapter->desired.mtu = new_mtu + ETH_HLEN;
3653
3654 return wait_for_reset(adapter);
3655 }
3656
ibmvnic_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3657 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3658 struct net_device *dev,
3659 netdev_features_t features)
3660 {
3661 /* Some backing hardware adapters can not
3662 * handle packets with a MSS less than 224
3663 * or with only one segment.
3664 */
3665 if (skb_is_gso(skb)) {
3666 if (skb_shinfo(skb)->gso_size < 224 ||
3667 skb_shinfo(skb)->gso_segs == 1)
3668 features &= ~NETIF_F_GSO_MASK;
3669 }
3670
3671 return features;
3672 }
3673
3674 static const struct net_device_ops ibmvnic_netdev_ops = {
3675 .ndo_open = ibmvnic_open,
3676 .ndo_stop = ibmvnic_close,
3677 .ndo_start_xmit = ibmvnic_xmit,
3678 .ndo_set_rx_mode = ibmvnic_set_multi,
3679 .ndo_set_mac_address = ibmvnic_set_mac,
3680 .ndo_validate_addr = eth_validate_addr,
3681 .ndo_tx_timeout = ibmvnic_tx_timeout,
3682 .ndo_change_mtu = ibmvnic_change_mtu,
3683 .ndo_features_check = ibmvnic_features_check,
3684 };
3685
3686 /* ethtool functions */
3687
ibmvnic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)3688 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3689 struct ethtool_link_ksettings *cmd)
3690 {
3691 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3692 int rc;
3693
3694 rc = send_query_phys_parms(adapter);
3695 if (rc) {
3696 adapter->speed = SPEED_UNKNOWN;
3697 adapter->duplex = DUPLEX_UNKNOWN;
3698 }
3699 cmd->base.speed = adapter->speed;
3700 cmd->base.duplex = adapter->duplex;
3701 cmd->base.port = PORT_FIBRE;
3702 cmd->base.phy_address = 0;
3703 cmd->base.autoneg = AUTONEG_ENABLE;
3704
3705 return 0;
3706 }
3707
ibmvnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)3708 static void ibmvnic_get_drvinfo(struct net_device *netdev,
3709 struct ethtool_drvinfo *info)
3710 {
3711 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3712
3713 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3714 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3715 strscpy(info->fw_version, adapter->fw_version,
3716 sizeof(info->fw_version));
3717 }
3718
ibmvnic_get_msglevel(struct net_device * netdev)3719 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3720 {
3721 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3722
3723 return adapter->msg_enable;
3724 }
3725
ibmvnic_set_msglevel(struct net_device * netdev,u32 data)3726 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3727 {
3728 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3729
3730 adapter->msg_enable = data;
3731 }
3732
ibmvnic_get_link(struct net_device * netdev)3733 static u32 ibmvnic_get_link(struct net_device *netdev)
3734 {
3735 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3736
3737 /* Don't need to send a query because we request a logical link up at
3738 * init and then we wait for link state indications
3739 */
3740 return adapter->logical_link_state;
3741 }
3742
ibmvnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3743 static void ibmvnic_get_ringparam(struct net_device *netdev,
3744 struct ethtool_ringparam *ring,
3745 struct kernel_ethtool_ringparam *kernel_ring,
3746 struct netlink_ext_ack *extack)
3747 {
3748 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3749
3750 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3751 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3752 ring->rx_mini_max_pending = 0;
3753 ring->rx_jumbo_max_pending = 0;
3754 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3755 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3756 ring->rx_mini_pending = 0;
3757 ring->rx_jumbo_pending = 0;
3758 }
3759
ibmvnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3760 static int ibmvnic_set_ringparam(struct net_device *netdev,
3761 struct ethtool_ringparam *ring,
3762 struct kernel_ethtool_ringparam *kernel_ring,
3763 struct netlink_ext_ack *extack)
3764 {
3765 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3766
3767 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
3768 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3769 netdev_err(netdev, "Invalid request.\n");
3770 netdev_err(netdev, "Max tx buffers = %llu\n",
3771 adapter->max_rx_add_entries_per_subcrq);
3772 netdev_err(netdev, "Max rx buffers = %llu\n",
3773 adapter->max_tx_entries_per_subcrq);
3774 return -EINVAL;
3775 }
3776
3777 adapter->desired.rx_entries = ring->rx_pending;
3778 adapter->desired.tx_entries = ring->tx_pending;
3779
3780 return wait_for_reset(adapter);
3781 }
3782
ibmvnic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)3783 static void ibmvnic_get_channels(struct net_device *netdev,
3784 struct ethtool_channels *channels)
3785 {
3786 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3787
3788 channels->max_rx = adapter->max_rx_queues;
3789 channels->max_tx = adapter->max_tx_queues;
3790 channels->max_other = 0;
3791 channels->max_combined = 0;
3792 channels->rx_count = adapter->req_rx_queues;
3793 channels->tx_count = adapter->req_tx_queues;
3794 channels->other_count = 0;
3795 channels->combined_count = 0;
3796 }
3797
ibmvnic_set_channels(struct net_device * netdev,struct ethtool_channels * channels)3798 static int ibmvnic_set_channels(struct net_device *netdev,
3799 struct ethtool_channels *channels)
3800 {
3801 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3802
3803 adapter->desired.rx_queues = channels->rx_count;
3804 adapter->desired.tx_queues = channels->tx_count;
3805
3806 return wait_for_reset(adapter);
3807 }
3808
ibmvnic_get_strings(struct net_device * dev,u32 stringset,u8 * data)3809 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3810 {
3811 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3812 int i;
3813
3814 if (stringset != ETH_SS_STATS)
3815 return;
3816
3817 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3818 ethtool_puts(&data, ibmvnic_stats[i].name);
3819
3820 for (i = 0; i < adapter->req_tx_queues; i++) {
3821 ethtool_sprintf(&data, "tx%d_batched_packets", i);
3822 ethtool_sprintf(&data, "tx%d_direct_packets", i);
3823 ethtool_sprintf(&data, "tx%d_bytes", i);
3824 ethtool_sprintf(&data, "tx%d_dropped_packets", i);
3825 }
3826
3827 for (i = 0; i < adapter->req_rx_queues; i++) {
3828 ethtool_sprintf(&data, "rx%d_packets", i);
3829 ethtool_sprintf(&data, "rx%d_bytes", i);
3830 ethtool_sprintf(&data, "rx%d_interrupts", i);
3831 }
3832 }
3833
ibmvnic_get_sset_count(struct net_device * dev,int sset)3834 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3835 {
3836 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3837
3838 switch (sset) {
3839 case ETH_SS_STATS:
3840 return ARRAY_SIZE(ibmvnic_stats) +
3841 adapter->req_tx_queues * NUM_TX_STATS +
3842 adapter->req_rx_queues * NUM_RX_STATS;
3843 default:
3844 return -EOPNOTSUPP;
3845 }
3846 }
3847
ibmvnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)3848 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3849 struct ethtool_stats *stats, u64 *data)
3850 {
3851 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3852 union ibmvnic_crq crq;
3853 int i, j;
3854 int rc;
3855
3856 memset(&crq, 0, sizeof(crq));
3857 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3858 crq.request_statistics.cmd = REQUEST_STATISTICS;
3859 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3860 crq.request_statistics.len =
3861 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3862
3863 /* Wait for data to be written */
3864 reinit_completion(&adapter->stats_done);
3865 rc = ibmvnic_send_crq(adapter, &crq);
3866 if (rc)
3867 return;
3868 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3869 if (rc)
3870 return;
3871
3872 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3873 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3874 (adapter, ibmvnic_stats[i].offset));
3875
3876 for (j = 0; j < adapter->req_tx_queues; j++) {
3877 data[i] = adapter->tx_stats_buffers[j].batched_packets;
3878 i++;
3879 data[i] = adapter->tx_stats_buffers[j].direct_packets;
3880 i++;
3881 data[i] = adapter->tx_stats_buffers[j].bytes;
3882 i++;
3883 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3884 i++;
3885 }
3886
3887 for (j = 0; j < adapter->req_rx_queues; j++) {
3888 data[i] = adapter->rx_stats_buffers[j].packets;
3889 i++;
3890 data[i] = adapter->rx_stats_buffers[j].bytes;
3891 i++;
3892 data[i] = adapter->rx_stats_buffers[j].interrupts;
3893 i++;
3894 }
3895 }
3896
3897 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3898 .get_drvinfo = ibmvnic_get_drvinfo,
3899 .get_msglevel = ibmvnic_get_msglevel,
3900 .set_msglevel = ibmvnic_set_msglevel,
3901 .get_link = ibmvnic_get_link,
3902 .get_ringparam = ibmvnic_get_ringparam,
3903 .set_ringparam = ibmvnic_set_ringparam,
3904 .get_channels = ibmvnic_get_channels,
3905 .set_channels = ibmvnic_set_channels,
3906 .get_strings = ibmvnic_get_strings,
3907 .get_sset_count = ibmvnic_get_sset_count,
3908 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3909 .get_link_ksettings = ibmvnic_get_link_ksettings,
3910 };
3911
3912 /* Routines for managing CRQs/sCRQs */
3913
reset_one_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)3914 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3915 struct ibmvnic_sub_crq_queue *scrq)
3916 {
3917 int rc;
3918
3919 if (!scrq) {
3920 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3921 return -EINVAL;
3922 }
3923
3924 if (scrq->irq) {
3925 free_irq(scrq->irq, scrq);
3926 irq_dispose_mapping(scrq->irq);
3927 scrq->irq = 0;
3928 }
3929
3930 if (scrq->msgs) {
3931 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3932 atomic_set(&scrq->used, 0);
3933 scrq->cur = 0;
3934 scrq->ind_buf.index = 0;
3935 } else {
3936 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3937 return -EINVAL;
3938 }
3939
3940 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3941 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3942 return rc;
3943 }
3944
reset_sub_crq_queues(struct ibmvnic_adapter * adapter)3945 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3946 {
3947 int i, rc;
3948
3949 if (!adapter->tx_scrq || !adapter->rx_scrq)
3950 return -EINVAL;
3951
3952 ibmvnic_clean_affinity(adapter);
3953
3954 for (i = 0; i < adapter->req_tx_queues; i++) {
3955 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3956 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3957 if (rc)
3958 return rc;
3959 }
3960
3961 for (i = 0; i < adapter->req_rx_queues; i++) {
3962 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3963 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3964 if (rc)
3965 return rc;
3966 }
3967
3968 return rc;
3969 }
3970
release_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq,bool do_h_free)3971 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3972 struct ibmvnic_sub_crq_queue *scrq,
3973 bool do_h_free)
3974 {
3975 struct device *dev = &adapter->vdev->dev;
3976 long rc;
3977
3978 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3979
3980 if (do_h_free) {
3981 /* Close the sub-crqs */
3982 do {
3983 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3984 adapter->vdev->unit_address,
3985 scrq->crq_num);
3986 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3987
3988 if (rc) {
3989 netdev_err(adapter->netdev,
3990 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3991 scrq->crq_num, rc);
3992 }
3993 }
3994
3995 dma_free_coherent(dev,
3996 IBMVNIC_IND_ARR_SZ,
3997 scrq->ind_buf.indir_arr,
3998 scrq->ind_buf.indir_dma);
3999
4000 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
4001 DMA_BIDIRECTIONAL);
4002 free_pages((unsigned long)scrq->msgs, 2);
4003 free_cpumask_var(scrq->affinity_mask);
4004 kfree(scrq);
4005 }
4006
init_sub_crq_queue(struct ibmvnic_adapter * adapter)4007 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
4008 *adapter)
4009 {
4010 struct device *dev = &adapter->vdev->dev;
4011 struct ibmvnic_sub_crq_queue *scrq;
4012 int rc;
4013
4014 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
4015 if (!scrq)
4016 return NULL;
4017
4018 scrq->msgs =
4019 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
4020 if (!scrq->msgs) {
4021 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
4022 goto zero_page_failed;
4023 }
4024 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
4025 goto cpumask_alloc_failed;
4026
4027 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
4028 DMA_BIDIRECTIONAL);
4029 if (dma_mapping_error(dev, scrq->msg_token)) {
4030 dev_warn(dev, "Couldn't map crq queue messages page\n");
4031 goto map_failed;
4032 }
4033
4034 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
4035 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
4036
4037 if (rc == H_RESOURCE)
4038 rc = ibmvnic_reset_crq(adapter);
4039
4040 if (rc == H_CLOSED) {
4041 dev_warn(dev, "Partner adapter not ready, waiting.\n");
4042 } else if (rc) {
4043 dev_warn(dev, "Error %d registering sub-crq\n", rc);
4044 goto reg_failed;
4045 }
4046
4047 scrq->adapter = adapter;
4048 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
4049 scrq->ind_buf.index = 0;
4050
4051 scrq->ind_buf.indir_arr =
4052 dma_alloc_coherent(dev,
4053 IBMVNIC_IND_ARR_SZ,
4054 &scrq->ind_buf.indir_dma,
4055 GFP_KERNEL);
4056
4057 if (!scrq->ind_buf.indir_arr)
4058 goto indir_failed;
4059
4060 spin_lock_init(&scrq->lock);
4061
4062 netdev_dbg(adapter->netdev,
4063 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
4064 scrq->crq_num, scrq->hw_irq, scrq->irq);
4065
4066 return scrq;
4067
4068 indir_failed:
4069 do {
4070 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
4071 adapter->vdev->unit_address,
4072 scrq->crq_num);
4073 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
4074 reg_failed:
4075 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
4076 DMA_BIDIRECTIONAL);
4077 map_failed:
4078 free_cpumask_var(scrq->affinity_mask);
4079 cpumask_alloc_failed:
4080 free_pages((unsigned long)scrq->msgs, 2);
4081 zero_page_failed:
4082 kfree(scrq);
4083
4084 return NULL;
4085 }
4086
release_sub_crqs(struct ibmvnic_adapter * adapter,bool do_h_free)4087 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
4088 {
4089 int i;
4090
4091 ibmvnic_clean_affinity(adapter);
4092 if (adapter->tx_scrq) {
4093 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
4094 if (!adapter->tx_scrq[i])
4095 continue;
4096
4097 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
4098 i);
4099 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
4100 if (adapter->tx_scrq[i]->irq) {
4101 free_irq(adapter->tx_scrq[i]->irq,
4102 adapter->tx_scrq[i]);
4103 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
4104 adapter->tx_scrq[i]->irq = 0;
4105 }
4106
4107 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
4108 do_h_free);
4109 }
4110
4111 kfree(adapter->tx_scrq);
4112 adapter->tx_scrq = NULL;
4113 adapter->num_active_tx_scrqs = 0;
4114 }
4115
4116 /* Clean any remaining outstanding SKBs
4117 * we freed the irq so we won't be hearing
4118 * from them
4119 */
4120 clean_tx_pools(adapter);
4121
4122 if (adapter->rx_scrq) {
4123 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
4124 if (!adapter->rx_scrq[i])
4125 continue;
4126
4127 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
4128 i);
4129 if (adapter->rx_scrq[i]->irq) {
4130 free_irq(adapter->rx_scrq[i]->irq,
4131 adapter->rx_scrq[i]);
4132 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
4133 adapter->rx_scrq[i]->irq = 0;
4134 }
4135
4136 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
4137 do_h_free);
4138 }
4139
4140 kfree(adapter->rx_scrq);
4141 adapter->rx_scrq = NULL;
4142 adapter->num_active_rx_scrqs = 0;
4143 }
4144 }
4145
disable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4146 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
4147 struct ibmvnic_sub_crq_queue *scrq)
4148 {
4149 struct device *dev = &adapter->vdev->dev;
4150 unsigned long rc;
4151
4152 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4153 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4154 if (rc)
4155 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
4156 scrq->hw_irq, rc);
4157 return rc;
4158 }
4159
4160 /* We can not use the IRQ chip EOI handler because that has the
4161 * unintended effect of changing the interrupt priority.
4162 */
ibmvnic_xics_eoi(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)4163 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
4164 {
4165 u64 val = 0xff000000 | scrq->hw_irq;
4166 unsigned long rc;
4167
4168 rc = plpar_hcall_norets(H_EOI, val);
4169 if (rc)
4170 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
4171 }
4172
4173 /* Due to a firmware bug, the hypervisor can send an interrupt to a
4174 * transmit or receive queue just prior to a partition migration.
4175 * Force an EOI after migration.
4176 */
ibmvnic_clear_pending_interrupt(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)4177 static void ibmvnic_clear_pending_interrupt(struct device *dev,
4178 struct ibmvnic_sub_crq_queue *scrq)
4179 {
4180 if (!xive_enabled())
4181 ibmvnic_xics_eoi(dev, scrq);
4182 }
4183
enable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4184 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
4185 struct ibmvnic_sub_crq_queue *scrq)
4186 {
4187 struct device *dev = &adapter->vdev->dev;
4188 unsigned long rc;
4189
4190 if (scrq->hw_irq > 0x100000000ULL) {
4191 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
4192 return 1;
4193 }
4194
4195 if (test_bit(0, &adapter->resetting) &&
4196 adapter->reset_reason == VNIC_RESET_MOBILITY) {
4197 ibmvnic_clear_pending_interrupt(dev, scrq);
4198 }
4199
4200 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4201 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4202 if (rc)
4203 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
4204 scrq->hw_irq, rc);
4205 return rc;
4206 }
4207
ibmvnic_complete_tx(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4208 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
4209 struct ibmvnic_sub_crq_queue *scrq)
4210 {
4211 struct device *dev = &adapter->vdev->dev;
4212 int num_packets = 0, total_bytes = 0;
4213 struct ibmvnic_tx_pool *tx_pool;
4214 struct ibmvnic_tx_buff *txbuff;
4215 struct netdev_queue *txq;
4216 union sub_crq *next;
4217 int index, i;
4218
4219 restart_loop:
4220 while (pending_scrq(adapter, scrq)) {
4221 unsigned int pool = scrq->pool_index;
4222 int num_entries = 0;
4223 next = ibmvnic_next_scrq(adapter, scrq);
4224 for (i = 0; i < next->tx_comp.num_comps; i++) {
4225 index = be32_to_cpu(next->tx_comp.correlators[i]);
4226 if (index & IBMVNIC_TSO_POOL_MASK) {
4227 tx_pool = &adapter->tso_pool[pool];
4228 index &= ~IBMVNIC_TSO_POOL_MASK;
4229 } else {
4230 tx_pool = &adapter->tx_pool[pool];
4231 }
4232
4233 txbuff = &tx_pool->tx_buff[index];
4234 num_packets++;
4235 num_entries += txbuff->num_entries;
4236 if (txbuff->skb) {
4237 total_bytes += txbuff->skb->len;
4238 if (next->tx_comp.rcs[i]) {
4239 dev_err(dev, "tx error %x\n",
4240 next->tx_comp.rcs[i]);
4241 dev_kfree_skb_irq(txbuff->skb);
4242 } else {
4243 dev_consume_skb_irq(txbuff->skb);
4244 }
4245 txbuff->skb = NULL;
4246 } else {
4247 netdev_warn(adapter->netdev,
4248 "TX completion received with NULL socket buffer\n");
4249 }
4250 tx_pool->free_map[tx_pool->producer_index] = index;
4251 tx_pool->producer_index =
4252 (tx_pool->producer_index + 1) %
4253 tx_pool->num_buffers;
4254 }
4255 /* remove tx_comp scrq*/
4256 next->tx_comp.first = 0;
4257
4258
4259 if (atomic_sub_return(num_entries, &scrq->used) <=
4260 (adapter->req_tx_entries_per_subcrq / 2) &&
4261 __netif_subqueue_stopped(adapter->netdev,
4262 scrq->pool_index)) {
4263 rcu_read_lock();
4264 if (adapter->tx_queues_active) {
4265 netif_wake_subqueue(adapter->netdev,
4266 scrq->pool_index);
4267 netdev_dbg(adapter->netdev,
4268 "Started queue %d\n",
4269 scrq->pool_index);
4270 }
4271 rcu_read_unlock();
4272 }
4273 }
4274
4275 enable_scrq_irq(adapter, scrq);
4276
4277 if (pending_scrq(adapter, scrq)) {
4278 disable_scrq_irq(adapter, scrq);
4279 goto restart_loop;
4280 }
4281
4282 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
4283 netdev_tx_completed_queue(txq, num_packets, total_bytes);
4284
4285 return 0;
4286 }
4287
ibmvnic_interrupt_tx(int irq,void * instance)4288 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
4289 {
4290 struct ibmvnic_sub_crq_queue *scrq = instance;
4291 struct ibmvnic_adapter *adapter = scrq->adapter;
4292
4293 disable_scrq_irq(adapter, scrq);
4294 ibmvnic_complete_tx(adapter, scrq);
4295
4296 return IRQ_HANDLED;
4297 }
4298
ibmvnic_interrupt_rx(int irq,void * instance)4299 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
4300 {
4301 struct ibmvnic_sub_crq_queue *scrq = instance;
4302 struct ibmvnic_adapter *adapter = scrq->adapter;
4303
4304 /* When booting a kdump kernel we can hit pending interrupts
4305 * prior to completing driver initialization.
4306 */
4307 if (unlikely(adapter->state != VNIC_OPEN))
4308 return IRQ_NONE;
4309
4310 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
4311
4312 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
4313 disable_scrq_irq(adapter, scrq);
4314 __napi_schedule(&adapter->napi[scrq->scrq_num]);
4315 }
4316
4317 return IRQ_HANDLED;
4318 }
4319
init_sub_crq_irqs(struct ibmvnic_adapter * adapter)4320 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
4321 {
4322 struct device *dev = &adapter->vdev->dev;
4323 struct ibmvnic_sub_crq_queue *scrq;
4324 int i = 0, j = 0;
4325 int rc = 0;
4326
4327 for (i = 0; i < adapter->req_tx_queues; i++) {
4328 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
4329 i);
4330 scrq = adapter->tx_scrq[i];
4331 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4332
4333 if (!scrq->irq) {
4334 rc = -EINVAL;
4335 dev_err(dev, "Error mapping irq\n");
4336 goto req_tx_irq_failed;
4337 }
4338
4339 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
4340 adapter->vdev->unit_address, i);
4341 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
4342 0, scrq->name, scrq);
4343
4344 if (rc) {
4345 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
4346 scrq->irq, rc);
4347 irq_dispose_mapping(scrq->irq);
4348 goto req_tx_irq_failed;
4349 }
4350 }
4351
4352 for (i = 0; i < adapter->req_rx_queues; i++) {
4353 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4354 i);
4355 scrq = adapter->rx_scrq[i];
4356 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4357 if (!scrq->irq) {
4358 rc = -EINVAL;
4359 dev_err(dev, "Error mapping irq\n");
4360 goto req_rx_irq_failed;
4361 }
4362 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
4363 adapter->vdev->unit_address, i);
4364 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
4365 0, scrq->name, scrq);
4366 if (rc) {
4367 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
4368 scrq->irq, rc);
4369 irq_dispose_mapping(scrq->irq);
4370 goto req_rx_irq_failed;
4371 }
4372 }
4373
4374 cpus_read_lock();
4375 ibmvnic_set_affinity(adapter);
4376 cpus_read_unlock();
4377
4378 return rc;
4379
4380 req_rx_irq_failed:
4381 for (j = 0; j < i; j++) {
4382 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4383 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
4384 }
4385 i = adapter->req_tx_queues;
4386 req_tx_irq_failed:
4387 for (j = 0; j < i; j++) {
4388 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
4389 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
4390 }
4391 release_sub_crqs(adapter, 1);
4392 return rc;
4393 }
4394
init_sub_crqs(struct ibmvnic_adapter * adapter)4395 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4396 {
4397 struct device *dev = &adapter->vdev->dev;
4398 struct ibmvnic_sub_crq_queue **allqueues;
4399 int registered_queues = 0;
4400 int total_queues;
4401 int more = 0;
4402 int i;
4403
4404 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4405
4406 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
4407 if (!allqueues)
4408 return -ENOMEM;
4409
4410 for (i = 0; i < total_queues; i++) {
4411 allqueues[i] = init_sub_crq_queue(adapter);
4412 if (!allqueues[i]) {
4413 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
4414 break;
4415 }
4416 registered_queues++;
4417 }
4418
4419 /* Make sure we were able to register the minimum number of queues */
4420 if (registered_queues <
4421 adapter->min_tx_queues + adapter->min_rx_queues) {
4422 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
4423 goto tx_failed;
4424 }
4425
4426 /* Distribute the failed allocated queues*/
4427 for (i = 0; i < total_queues - registered_queues + more ; i++) {
4428 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4429 switch (i % 3) {
4430 case 0:
4431 if (adapter->req_rx_queues > adapter->min_rx_queues)
4432 adapter->req_rx_queues--;
4433 else
4434 more++;
4435 break;
4436 case 1:
4437 if (adapter->req_tx_queues > adapter->min_tx_queues)
4438 adapter->req_tx_queues--;
4439 else
4440 more++;
4441 break;
4442 }
4443 }
4444
4445 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
4446 sizeof(*adapter->tx_scrq), GFP_KERNEL);
4447 if (!adapter->tx_scrq)
4448 goto tx_failed;
4449
4450 for (i = 0; i < adapter->req_tx_queues; i++) {
4451 adapter->tx_scrq[i] = allqueues[i];
4452 adapter->tx_scrq[i]->pool_index = i;
4453 adapter->num_active_tx_scrqs++;
4454 }
4455
4456 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
4457 sizeof(*adapter->rx_scrq), GFP_KERNEL);
4458 if (!adapter->rx_scrq)
4459 goto rx_failed;
4460
4461 for (i = 0; i < adapter->req_rx_queues; i++) {
4462 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4463 adapter->rx_scrq[i]->scrq_num = i;
4464 adapter->num_active_rx_scrqs++;
4465 }
4466
4467 kfree(allqueues);
4468 return 0;
4469
4470 rx_failed:
4471 kfree(adapter->tx_scrq);
4472 adapter->tx_scrq = NULL;
4473 tx_failed:
4474 for (i = 0; i < registered_queues; i++)
4475 release_sub_crq_queue(adapter, allqueues[i], 1);
4476 kfree(allqueues);
4477 return -ENOMEM;
4478 }
4479
send_request_cap(struct ibmvnic_adapter * adapter,int retry)4480 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4481 {
4482 struct device *dev = &adapter->vdev->dev;
4483 union ibmvnic_crq crq;
4484 int max_entries;
4485 int cap_reqs;
4486
4487 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
4488 * the PROMISC flag). Initialize this count upfront. When the tasklet
4489 * receives a response to all of these, it will send the next protocol
4490 * message (QUERY_IP_OFFLOAD).
4491 */
4492 if (!(adapter->netdev->flags & IFF_PROMISC) ||
4493 adapter->promisc_supported)
4494 cap_reqs = 7;
4495 else
4496 cap_reqs = 6;
4497
4498 if (!retry) {
4499 /* Sub-CRQ entries are 32 byte long */
4500 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4501
4502 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4503
4504 if (adapter->min_tx_entries_per_subcrq > entries_page ||
4505 adapter->min_rx_add_entries_per_subcrq > entries_page) {
4506 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4507 return;
4508 }
4509
4510 if (adapter->desired.mtu)
4511 adapter->req_mtu = adapter->desired.mtu;
4512 else
4513 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4514
4515 if (!adapter->desired.tx_entries)
4516 adapter->desired.tx_entries =
4517 adapter->max_tx_entries_per_subcrq;
4518 if (!adapter->desired.rx_entries)
4519 adapter->desired.rx_entries =
4520 adapter->max_rx_add_entries_per_subcrq;
4521
4522 max_entries = IBMVNIC_LTB_SET_SIZE /
4523 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4524
4525 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4526 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4527 adapter->desired.tx_entries = max_entries;
4528 }
4529
4530 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4531 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4532 adapter->desired.rx_entries = max_entries;
4533 }
4534
4535 if (adapter->desired.tx_entries)
4536 adapter->req_tx_entries_per_subcrq =
4537 adapter->desired.tx_entries;
4538 else
4539 adapter->req_tx_entries_per_subcrq =
4540 adapter->max_tx_entries_per_subcrq;
4541
4542 if (adapter->desired.rx_entries)
4543 adapter->req_rx_add_entries_per_subcrq =
4544 adapter->desired.rx_entries;
4545 else
4546 adapter->req_rx_add_entries_per_subcrq =
4547 adapter->max_rx_add_entries_per_subcrq;
4548
4549 if (adapter->desired.tx_queues)
4550 adapter->req_tx_queues =
4551 adapter->desired.tx_queues;
4552 else
4553 adapter->req_tx_queues =
4554 adapter->opt_tx_comp_sub_queues;
4555
4556 if (adapter->desired.rx_queues)
4557 adapter->req_rx_queues =
4558 adapter->desired.rx_queues;
4559 else
4560 adapter->req_rx_queues =
4561 adapter->opt_rx_comp_queues;
4562
4563 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4564 } else {
4565 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4566 }
4567 memset(&crq, 0, sizeof(crq));
4568 crq.request_capability.first = IBMVNIC_CRQ_CMD;
4569 crq.request_capability.cmd = REQUEST_CAPABILITY;
4570
4571 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4572 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4573 cap_reqs--;
4574 ibmvnic_send_crq(adapter, &crq);
4575
4576 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4577 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4578 cap_reqs--;
4579 ibmvnic_send_crq(adapter, &crq);
4580
4581 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4582 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4583 cap_reqs--;
4584 ibmvnic_send_crq(adapter, &crq);
4585
4586 crq.request_capability.capability =
4587 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4588 crq.request_capability.number =
4589 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4590 cap_reqs--;
4591 ibmvnic_send_crq(adapter, &crq);
4592
4593 crq.request_capability.capability =
4594 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4595 crq.request_capability.number =
4596 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4597 cap_reqs--;
4598 ibmvnic_send_crq(adapter, &crq);
4599
4600 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4601 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4602 cap_reqs--;
4603 ibmvnic_send_crq(adapter, &crq);
4604
4605 if (adapter->netdev->flags & IFF_PROMISC) {
4606 if (adapter->promisc_supported) {
4607 crq.request_capability.capability =
4608 cpu_to_be16(PROMISC_REQUESTED);
4609 crq.request_capability.number = cpu_to_be64(1);
4610 cap_reqs--;
4611 ibmvnic_send_crq(adapter, &crq);
4612 }
4613 } else {
4614 crq.request_capability.capability =
4615 cpu_to_be16(PROMISC_REQUESTED);
4616 crq.request_capability.number = cpu_to_be64(0);
4617 cap_reqs--;
4618 ibmvnic_send_crq(adapter, &crq);
4619 }
4620
4621 /* Keep at end to catch any discrepancy between expected and actual
4622 * CRQs sent.
4623 */
4624 WARN_ON(cap_reqs != 0);
4625 }
4626
pending_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4627 static int pending_scrq(struct ibmvnic_adapter *adapter,
4628 struct ibmvnic_sub_crq_queue *scrq)
4629 {
4630 union sub_crq *entry = &scrq->msgs[scrq->cur];
4631 int rc;
4632
4633 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4634
4635 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4636 * contents of the SCRQ descriptor
4637 */
4638 dma_rmb();
4639
4640 return rc;
4641 }
4642
ibmvnic_next_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4643 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4644 struct ibmvnic_sub_crq_queue *scrq)
4645 {
4646 union sub_crq *entry;
4647 unsigned long flags;
4648
4649 spin_lock_irqsave(&scrq->lock, flags);
4650 entry = &scrq->msgs[scrq->cur];
4651 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4652 if (++scrq->cur == scrq->size)
4653 scrq->cur = 0;
4654 } else {
4655 entry = NULL;
4656 }
4657 spin_unlock_irqrestore(&scrq->lock, flags);
4658
4659 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4660 * contents of the SCRQ descriptor
4661 */
4662 dma_rmb();
4663
4664 return entry;
4665 }
4666
ibmvnic_next_crq(struct ibmvnic_adapter * adapter)4667 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4668 {
4669 struct ibmvnic_crq_queue *queue = &adapter->crq;
4670 union ibmvnic_crq *crq;
4671
4672 crq = &queue->msgs[queue->cur];
4673 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4674 if (++queue->cur == queue->size)
4675 queue->cur = 0;
4676 } else {
4677 crq = NULL;
4678 }
4679
4680 return crq;
4681 }
4682
print_subcrq_error(struct device * dev,int rc,const char * func)4683 static void print_subcrq_error(struct device *dev, int rc, const char *func)
4684 {
4685 switch (rc) {
4686 case H_PARAMETER:
4687 dev_warn_ratelimited(dev,
4688 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4689 func, rc);
4690 break;
4691 case H_CLOSED:
4692 dev_warn_ratelimited(dev,
4693 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4694 func, rc);
4695 break;
4696 default:
4697 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4698 break;
4699 }
4700 }
4701
send_subcrq_indirect(struct ibmvnic_adapter * adapter,u64 remote_handle,u64 ioba,u64 num_entries)4702 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4703 u64 remote_handle, u64 ioba, u64 num_entries)
4704 {
4705 unsigned int ua = adapter->vdev->unit_address;
4706 struct device *dev = &adapter->vdev->dev;
4707 int rc;
4708
4709 /* Make sure the hypervisor sees the complete request */
4710 dma_wmb();
4711 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4712 cpu_to_be64(remote_handle),
4713 ioba, num_entries);
4714
4715 if (rc)
4716 print_subcrq_error(dev, rc, __func__);
4717
4718 return rc;
4719 }
4720
ibmvnic_send_crq(struct ibmvnic_adapter * adapter,union ibmvnic_crq * crq)4721 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4722 union ibmvnic_crq *crq)
4723 {
4724 unsigned int ua = adapter->vdev->unit_address;
4725 struct device *dev = &adapter->vdev->dev;
4726 u64 *u64_crq = (u64 *)crq;
4727 int rc;
4728
4729 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4730 (unsigned long)cpu_to_be64(u64_crq[0]),
4731 (unsigned long)cpu_to_be64(u64_crq[1]));
4732
4733 if (!adapter->crq.active &&
4734 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4735 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4736 return -EINVAL;
4737 }
4738
4739 /* Make sure the hypervisor sees the complete request */
4740 dma_wmb();
4741
4742 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4743 cpu_to_be64(u64_crq[0]),
4744 cpu_to_be64(u64_crq[1]));
4745
4746 if (rc) {
4747 if (rc == H_CLOSED) {
4748 dev_warn(dev, "CRQ Queue closed\n");
4749 /* do not reset, report the fail, wait for passive init from server */
4750 }
4751
4752 dev_warn(dev, "Send error (rc=%d)\n", rc);
4753 }
4754
4755 return rc;
4756 }
4757
ibmvnic_send_crq_init(struct ibmvnic_adapter * adapter)4758 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4759 {
4760 struct device *dev = &adapter->vdev->dev;
4761 union ibmvnic_crq crq;
4762 int retries = 100;
4763 int rc;
4764
4765 memset(&crq, 0, sizeof(crq));
4766 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4767 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4768 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4769
4770 do {
4771 rc = ibmvnic_send_crq(adapter, &crq);
4772 if (rc != H_CLOSED)
4773 break;
4774 retries--;
4775 msleep(50);
4776
4777 } while (retries > 0);
4778
4779 if (rc) {
4780 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4781 return rc;
4782 }
4783
4784 return 0;
4785 }
4786
4787 struct vnic_login_client_data {
4788 u8 type;
4789 __be16 len;
4790 char name[];
4791 } __packed;
4792
vnic_client_data_len(struct ibmvnic_adapter * adapter)4793 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4794 {
4795 int len;
4796
4797 /* Calculate the amount of buffer space needed for the
4798 * vnic client data in the login buffer. There are four entries,
4799 * OS name, LPAR name, device name, and a null last entry.
4800 */
4801 len = 4 * sizeof(struct vnic_login_client_data);
4802 len += 6; /* "Linux" plus NULL */
4803 len += strlen(utsname()->nodename) + 1;
4804 len += strlen(adapter->netdev->name) + 1;
4805
4806 return len;
4807 }
4808
vnic_add_client_data(struct ibmvnic_adapter * adapter,struct vnic_login_client_data * vlcd)4809 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4810 struct vnic_login_client_data *vlcd)
4811 {
4812 const char *os_name = "Linux";
4813 int len;
4814
4815 /* Type 1 - LPAR OS */
4816 vlcd->type = 1;
4817 len = strlen(os_name) + 1;
4818 vlcd->len = cpu_to_be16(len);
4819 strscpy(vlcd->name, os_name, len);
4820 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4821
4822 /* Type 2 - LPAR name */
4823 vlcd->type = 2;
4824 len = strlen(utsname()->nodename) + 1;
4825 vlcd->len = cpu_to_be16(len);
4826 strscpy(vlcd->name, utsname()->nodename, len);
4827 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4828
4829 /* Type 3 - device name */
4830 vlcd->type = 3;
4831 len = strlen(adapter->netdev->name) + 1;
4832 vlcd->len = cpu_to_be16(len);
4833 strscpy(vlcd->name, adapter->netdev->name, len);
4834 }
4835
ibmvnic_print_hex_dump(struct net_device * dev,void * buf,size_t len)4836 static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf,
4837 size_t len)
4838 {
4839 unsigned char hex_str[16 * 3];
4840
4841 for (size_t i = 0; i < len; i += 16) {
4842 hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8,
4843 hex_str, sizeof(hex_str), false);
4844 netdev_dbg(dev, "%s\n", hex_str);
4845 }
4846 }
4847
send_login(struct ibmvnic_adapter * adapter)4848 static int send_login(struct ibmvnic_adapter *adapter)
4849 {
4850 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4851 struct ibmvnic_login_buffer *login_buffer;
4852 struct device *dev = &adapter->vdev->dev;
4853 struct vnic_login_client_data *vlcd;
4854 dma_addr_t rsp_buffer_token;
4855 dma_addr_t buffer_token;
4856 size_t rsp_buffer_size;
4857 union ibmvnic_crq crq;
4858 int client_data_len;
4859 size_t buffer_size;
4860 __be64 *tx_list_p;
4861 __be64 *rx_list_p;
4862 int rc;
4863 int i;
4864
4865 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4866 netdev_err(adapter->netdev,
4867 "RX or TX queues are not allocated, device login failed\n");
4868 return -ENOMEM;
4869 }
4870
4871 release_login_buffer(adapter);
4872 release_login_rsp_buffer(adapter);
4873
4874 client_data_len = vnic_client_data_len(adapter);
4875
4876 buffer_size =
4877 sizeof(struct ibmvnic_login_buffer) +
4878 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4879 client_data_len;
4880
4881 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4882 if (!login_buffer)
4883 goto buf_alloc_failed;
4884
4885 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4886 DMA_TO_DEVICE);
4887 if (dma_mapping_error(dev, buffer_token)) {
4888 dev_err(dev, "Couldn't map login buffer\n");
4889 goto buf_map_failed;
4890 }
4891
4892 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4893 sizeof(u64) * adapter->req_tx_queues +
4894 sizeof(u64) * adapter->req_rx_queues +
4895 sizeof(u64) * adapter->req_rx_queues +
4896 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4897
4898 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4899 if (!login_rsp_buffer)
4900 goto buf_rsp_alloc_failed;
4901
4902 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4903 rsp_buffer_size, DMA_FROM_DEVICE);
4904 if (dma_mapping_error(dev, rsp_buffer_token)) {
4905 dev_err(dev, "Couldn't map login rsp buffer\n");
4906 goto buf_rsp_map_failed;
4907 }
4908
4909 adapter->login_buf = login_buffer;
4910 adapter->login_buf_token = buffer_token;
4911 adapter->login_buf_sz = buffer_size;
4912 adapter->login_rsp_buf = login_rsp_buffer;
4913 adapter->login_rsp_buf_token = rsp_buffer_token;
4914 adapter->login_rsp_buf_sz = rsp_buffer_size;
4915
4916 login_buffer->len = cpu_to_be32(buffer_size);
4917 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4918 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4919 login_buffer->off_txcomp_subcrqs =
4920 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4921 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4922 login_buffer->off_rxcomp_subcrqs =
4923 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4924 sizeof(u64) * adapter->req_tx_queues);
4925 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4926 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4927
4928 tx_list_p = (__be64 *)((char *)login_buffer +
4929 sizeof(struct ibmvnic_login_buffer));
4930 rx_list_p = (__be64 *)((char *)login_buffer +
4931 sizeof(struct ibmvnic_login_buffer) +
4932 sizeof(u64) * adapter->req_tx_queues);
4933
4934 for (i = 0; i < adapter->req_tx_queues; i++) {
4935 if (adapter->tx_scrq[i]) {
4936 tx_list_p[i] =
4937 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4938 }
4939 }
4940
4941 for (i = 0; i < adapter->req_rx_queues; i++) {
4942 if (adapter->rx_scrq[i]) {
4943 rx_list_p[i] =
4944 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4945 }
4946 }
4947
4948 /* Insert vNIC login client data */
4949 vlcd = (struct vnic_login_client_data *)
4950 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4951 login_buffer->client_data_offset =
4952 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4953 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4954
4955 vnic_add_client_data(adapter, vlcd);
4956
4957 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4958 ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf,
4959 adapter->login_buf_sz);
4960
4961 memset(&crq, 0, sizeof(crq));
4962 crq.login.first = IBMVNIC_CRQ_CMD;
4963 crq.login.cmd = LOGIN;
4964 crq.login.ioba = cpu_to_be32(buffer_token);
4965 crq.login.len = cpu_to_be32(buffer_size);
4966
4967 adapter->login_pending = true;
4968 rc = ibmvnic_send_crq(adapter, &crq);
4969 if (rc) {
4970 adapter->login_pending = false;
4971 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4972 goto buf_send_failed;
4973 }
4974
4975 return 0;
4976
4977 buf_send_failed:
4978 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
4979 DMA_FROM_DEVICE);
4980 buf_rsp_map_failed:
4981 kfree(login_rsp_buffer);
4982 adapter->login_rsp_buf = NULL;
4983 buf_rsp_alloc_failed:
4984 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4985 buf_map_failed:
4986 kfree(login_buffer);
4987 adapter->login_buf = NULL;
4988 buf_alloc_failed:
4989 return -ENOMEM;
4990 }
4991
send_request_map(struct ibmvnic_adapter * adapter,dma_addr_t addr,u32 len,u8 map_id)4992 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4993 u32 len, u8 map_id)
4994 {
4995 union ibmvnic_crq crq;
4996
4997 memset(&crq, 0, sizeof(crq));
4998 crq.request_map.first = IBMVNIC_CRQ_CMD;
4999 crq.request_map.cmd = REQUEST_MAP;
5000 crq.request_map.map_id = map_id;
5001 crq.request_map.ioba = cpu_to_be32(addr);
5002 crq.request_map.len = cpu_to_be32(len);
5003 return ibmvnic_send_crq(adapter, &crq);
5004 }
5005
send_request_unmap(struct ibmvnic_adapter * adapter,u8 map_id)5006 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
5007 {
5008 union ibmvnic_crq crq;
5009
5010 memset(&crq, 0, sizeof(crq));
5011 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
5012 crq.request_unmap.cmd = REQUEST_UNMAP;
5013 crq.request_unmap.map_id = map_id;
5014 return ibmvnic_send_crq(adapter, &crq);
5015 }
5016
send_query_map(struct ibmvnic_adapter * adapter)5017 static void send_query_map(struct ibmvnic_adapter *adapter)
5018 {
5019 union ibmvnic_crq crq;
5020
5021 memset(&crq, 0, sizeof(crq));
5022 crq.query_map.first = IBMVNIC_CRQ_CMD;
5023 crq.query_map.cmd = QUERY_MAP;
5024 ibmvnic_send_crq(adapter, &crq);
5025 }
5026
5027 /* Send a series of CRQs requesting various capabilities of the VNIC server */
send_query_cap(struct ibmvnic_adapter * adapter)5028 static void send_query_cap(struct ibmvnic_adapter *adapter)
5029 {
5030 union ibmvnic_crq crq;
5031 int cap_reqs;
5032
5033 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
5034 * upfront. When the tasklet receives a response to all of these, it
5035 * can send out the next protocol messaage (REQUEST_CAPABILITY).
5036 */
5037 cap_reqs = 25;
5038
5039 atomic_set(&adapter->running_cap_crqs, cap_reqs);
5040
5041 memset(&crq, 0, sizeof(crq));
5042 crq.query_capability.first = IBMVNIC_CRQ_CMD;
5043 crq.query_capability.cmd = QUERY_CAPABILITY;
5044
5045 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
5046 ibmvnic_send_crq(adapter, &crq);
5047 cap_reqs--;
5048
5049 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
5050 ibmvnic_send_crq(adapter, &crq);
5051 cap_reqs--;
5052
5053 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
5054 ibmvnic_send_crq(adapter, &crq);
5055 cap_reqs--;
5056
5057 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
5058 ibmvnic_send_crq(adapter, &crq);
5059 cap_reqs--;
5060
5061 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
5062 ibmvnic_send_crq(adapter, &crq);
5063 cap_reqs--;
5064
5065 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
5066 ibmvnic_send_crq(adapter, &crq);
5067 cap_reqs--;
5068
5069 crq.query_capability.capability =
5070 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
5071 ibmvnic_send_crq(adapter, &crq);
5072 cap_reqs--;
5073
5074 crq.query_capability.capability =
5075 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
5076 ibmvnic_send_crq(adapter, &crq);
5077 cap_reqs--;
5078
5079 crq.query_capability.capability =
5080 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
5081 ibmvnic_send_crq(adapter, &crq);
5082 cap_reqs--;
5083
5084 crq.query_capability.capability =
5085 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
5086 ibmvnic_send_crq(adapter, &crq);
5087 cap_reqs--;
5088
5089 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
5090 ibmvnic_send_crq(adapter, &crq);
5091 cap_reqs--;
5092
5093 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
5094 ibmvnic_send_crq(adapter, &crq);
5095 cap_reqs--;
5096
5097 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
5098 ibmvnic_send_crq(adapter, &crq);
5099 cap_reqs--;
5100
5101 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
5102 ibmvnic_send_crq(adapter, &crq);
5103 cap_reqs--;
5104
5105 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
5106 ibmvnic_send_crq(adapter, &crq);
5107 cap_reqs--;
5108
5109 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
5110 ibmvnic_send_crq(adapter, &crq);
5111 cap_reqs--;
5112
5113 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
5114 ibmvnic_send_crq(adapter, &crq);
5115 cap_reqs--;
5116
5117 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
5118 ibmvnic_send_crq(adapter, &crq);
5119 cap_reqs--;
5120
5121 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
5122 ibmvnic_send_crq(adapter, &crq);
5123 cap_reqs--;
5124
5125 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
5126 ibmvnic_send_crq(adapter, &crq);
5127 cap_reqs--;
5128
5129 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
5130 ibmvnic_send_crq(adapter, &crq);
5131 cap_reqs--;
5132
5133 crq.query_capability.capability =
5134 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
5135 ibmvnic_send_crq(adapter, &crq);
5136 cap_reqs--;
5137
5138 crq.query_capability.capability =
5139 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
5140 ibmvnic_send_crq(adapter, &crq);
5141 cap_reqs--;
5142
5143 crq.query_capability.capability =
5144 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
5145 ibmvnic_send_crq(adapter, &crq);
5146 cap_reqs--;
5147
5148 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
5149
5150 ibmvnic_send_crq(adapter, &crq);
5151 cap_reqs--;
5152
5153 /* Keep at end to catch any discrepancy between expected and actual
5154 * CRQs sent.
5155 */
5156 WARN_ON(cap_reqs != 0);
5157 }
5158
send_query_ip_offload(struct ibmvnic_adapter * adapter)5159 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
5160 {
5161 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
5162 struct device *dev = &adapter->vdev->dev;
5163 union ibmvnic_crq crq;
5164
5165 adapter->ip_offload_tok =
5166 dma_map_single(dev,
5167 &adapter->ip_offload_buf,
5168 buf_sz,
5169 DMA_FROM_DEVICE);
5170
5171 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
5172 if (!firmware_has_feature(FW_FEATURE_CMO))
5173 dev_err(dev, "Couldn't map offload buffer\n");
5174 return;
5175 }
5176
5177 memset(&crq, 0, sizeof(crq));
5178 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
5179 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
5180 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
5181 crq.query_ip_offload.ioba =
5182 cpu_to_be32(adapter->ip_offload_tok);
5183
5184 ibmvnic_send_crq(adapter, &crq);
5185 }
5186
send_control_ip_offload(struct ibmvnic_adapter * adapter)5187 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
5188 {
5189 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
5190 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5191 struct device *dev = &adapter->vdev->dev;
5192 netdev_features_t old_hw_features = 0;
5193 union ibmvnic_crq crq;
5194
5195 adapter->ip_offload_ctrl_tok =
5196 dma_map_single(dev,
5197 ctrl_buf,
5198 sizeof(adapter->ip_offload_ctrl),
5199 DMA_TO_DEVICE);
5200
5201 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
5202 dev_err(dev, "Couldn't map ip offload control buffer\n");
5203 return;
5204 }
5205
5206 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5207 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
5208 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
5209 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
5210 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
5211 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
5212 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
5213 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
5214 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
5215 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
5216
5217 /* large_rx disabled for now, additional features needed */
5218 ctrl_buf->large_rx_ipv4 = 0;
5219 ctrl_buf->large_rx_ipv6 = 0;
5220
5221 if (adapter->state != VNIC_PROBING) {
5222 old_hw_features = adapter->netdev->hw_features;
5223 adapter->netdev->hw_features = 0;
5224 }
5225
5226 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
5227
5228 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
5229 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
5230
5231 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
5232 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
5233
5234 if ((adapter->netdev->features &
5235 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
5236 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
5237
5238 if (buf->large_tx_ipv4)
5239 adapter->netdev->hw_features |= NETIF_F_TSO;
5240 if (buf->large_tx_ipv6)
5241 adapter->netdev->hw_features |= NETIF_F_TSO6;
5242
5243 if (adapter->state == VNIC_PROBING) {
5244 adapter->netdev->features |= adapter->netdev->hw_features;
5245 } else if (old_hw_features != adapter->netdev->hw_features) {
5246 netdev_features_t tmp = 0;
5247
5248 /* disable features no longer supported */
5249 adapter->netdev->features &= adapter->netdev->hw_features;
5250 /* turn on features now supported if previously enabled */
5251 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
5252 adapter->netdev->hw_features;
5253 adapter->netdev->features |=
5254 tmp & adapter->netdev->wanted_features;
5255 }
5256
5257 memset(&crq, 0, sizeof(crq));
5258 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
5259 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
5260 crq.control_ip_offload.len =
5261 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5262 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
5263 ibmvnic_send_crq(adapter, &crq);
5264 }
5265
handle_vpd_size_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5266 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
5267 struct ibmvnic_adapter *adapter)
5268 {
5269 struct device *dev = &adapter->vdev->dev;
5270
5271 if (crq->get_vpd_size_rsp.rc.code) {
5272 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
5273 crq->get_vpd_size_rsp.rc.code);
5274 complete(&adapter->fw_done);
5275 return;
5276 }
5277
5278 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
5279 complete(&adapter->fw_done);
5280 }
5281
handle_vpd_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5282 static void handle_vpd_rsp(union ibmvnic_crq *crq,
5283 struct ibmvnic_adapter *adapter)
5284 {
5285 struct device *dev = &adapter->vdev->dev;
5286 unsigned char *substr = NULL;
5287 u8 fw_level_len = 0;
5288
5289 memset(adapter->fw_version, 0, 32);
5290
5291 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
5292 DMA_FROM_DEVICE);
5293
5294 if (crq->get_vpd_rsp.rc.code) {
5295 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
5296 crq->get_vpd_rsp.rc.code);
5297 goto complete;
5298 }
5299
5300 /* get the position of the firmware version info
5301 * located after the ASCII 'RM' substring in the buffer
5302 */
5303 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
5304 if (!substr) {
5305 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
5306 goto complete;
5307 }
5308
5309 /* get length of firmware level ASCII substring */
5310 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
5311 fw_level_len = *(substr + 2);
5312 } else {
5313 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
5314 goto complete;
5315 }
5316
5317 /* copy firmware version string from vpd into adapter */
5318 if ((substr + 3 + fw_level_len) <
5319 (adapter->vpd->buff + adapter->vpd->len)) {
5320 strscpy(adapter->fw_version, substr + 3,
5321 sizeof(adapter->fw_version));
5322 } else {
5323 dev_info(dev, "FW substr extrapolated VPD buff\n");
5324 }
5325
5326 complete:
5327 if (adapter->fw_version[0] == '\0')
5328 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
5329 complete(&adapter->fw_done);
5330 }
5331
handle_query_ip_offload_rsp(struct ibmvnic_adapter * adapter)5332 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
5333 {
5334 struct device *dev = &adapter->vdev->dev;
5335 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5336
5337 dma_unmap_single(dev, adapter->ip_offload_tok,
5338 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
5339
5340 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
5341 ibmvnic_print_hex_dump(adapter->netdev, buf,
5342 sizeof(adapter->ip_offload_buf));
5343
5344 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
5345 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
5346 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
5347 buf->tcp_ipv4_chksum);
5348 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
5349 buf->tcp_ipv6_chksum);
5350 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
5351 buf->udp_ipv4_chksum);
5352 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5353 buf->udp_ipv6_chksum);
5354 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5355 buf->large_tx_ipv4);
5356 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5357 buf->large_tx_ipv6);
5358 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5359 buf->large_rx_ipv4);
5360 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5361 buf->large_rx_ipv6);
5362 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5363 buf->max_ipv4_header_size);
5364 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5365 buf->max_ipv6_header_size);
5366 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5367 buf->max_tcp_header_size);
5368 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5369 buf->max_udp_header_size);
5370 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5371 buf->max_large_tx_size);
5372 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5373 buf->max_large_rx_size);
5374 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5375 buf->ipv6_extension_header);
5376 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5377 buf->tcp_pseudosum_req);
5378 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5379 buf->num_ipv6_ext_headers);
5380 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5381 buf->off_ipv6_ext_headers);
5382
5383 send_control_ip_offload(adapter);
5384 }
5385
ibmvnic_fw_err_cause(u16 cause)5386 static const char *ibmvnic_fw_err_cause(u16 cause)
5387 {
5388 switch (cause) {
5389 case ADAPTER_PROBLEM:
5390 return "adapter problem";
5391 case BUS_PROBLEM:
5392 return "bus problem";
5393 case FW_PROBLEM:
5394 return "firmware problem";
5395 case DD_PROBLEM:
5396 return "device driver problem";
5397 case EEH_RECOVERY:
5398 return "EEH recovery";
5399 case FW_UPDATED:
5400 return "firmware updated";
5401 case LOW_MEMORY:
5402 return "low Memory";
5403 default:
5404 return "unknown";
5405 }
5406 }
5407
handle_error_indication(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5408 static void handle_error_indication(union ibmvnic_crq *crq,
5409 struct ibmvnic_adapter *adapter)
5410 {
5411 struct device *dev = &adapter->vdev->dev;
5412 u16 cause;
5413
5414 cause = be16_to_cpu(crq->error_indication.error_cause);
5415
5416 dev_warn_ratelimited(dev,
5417 "Firmware reports %serror, cause: %s. Starting recovery...\n",
5418 crq->error_indication.flags
5419 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
5420 ibmvnic_fw_err_cause(cause));
5421
5422 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
5423 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5424 else
5425 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5426 }
5427
handle_change_mac_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5428 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
5429 struct ibmvnic_adapter *adapter)
5430 {
5431 struct net_device *netdev = adapter->netdev;
5432 struct device *dev = &adapter->vdev->dev;
5433 long rc;
5434
5435 rc = crq->change_mac_addr_rsp.rc.code;
5436 if (rc) {
5437 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
5438 goto out;
5439 }
5440 /* crq->change_mac_addr.mac_addr is the requested one
5441 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
5442 */
5443 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
5444 ether_addr_copy(adapter->mac_addr,
5445 &crq->change_mac_addr_rsp.mac_addr[0]);
5446 out:
5447 complete(&adapter->fw_done);
5448 return rc;
5449 }
5450
handle_request_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5451 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
5452 struct ibmvnic_adapter *adapter)
5453 {
5454 struct device *dev = &adapter->vdev->dev;
5455 u64 *req_value;
5456 char *name;
5457
5458 atomic_dec(&adapter->running_cap_crqs);
5459 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5460 atomic_read(&adapter->running_cap_crqs));
5461 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
5462 case REQ_TX_QUEUES:
5463 req_value = &adapter->req_tx_queues;
5464 name = "tx";
5465 break;
5466 case REQ_RX_QUEUES:
5467 req_value = &adapter->req_rx_queues;
5468 name = "rx";
5469 break;
5470 case REQ_RX_ADD_QUEUES:
5471 req_value = &adapter->req_rx_add_queues;
5472 name = "rx_add";
5473 break;
5474 case REQ_TX_ENTRIES_PER_SUBCRQ:
5475 req_value = &adapter->req_tx_entries_per_subcrq;
5476 name = "tx_entries_per_subcrq";
5477 break;
5478 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
5479 req_value = &adapter->req_rx_add_entries_per_subcrq;
5480 name = "rx_add_entries_per_subcrq";
5481 break;
5482 case REQ_MTU:
5483 req_value = &adapter->req_mtu;
5484 name = "mtu";
5485 break;
5486 case PROMISC_REQUESTED:
5487 req_value = &adapter->promisc;
5488 name = "promisc";
5489 break;
5490 default:
5491 dev_err(dev, "Got invalid cap request rsp %d\n",
5492 crq->request_capability.capability);
5493 return;
5494 }
5495
5496 switch (crq->request_capability_rsp.rc.code) {
5497 case SUCCESS:
5498 break;
5499 case PARTIALSUCCESS:
5500 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5501 *req_value,
5502 (long)be64_to_cpu(crq->request_capability_rsp.number),
5503 name);
5504
5505 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5506 REQ_MTU) {
5507 pr_err("mtu of %llu is not supported. Reverting.\n",
5508 *req_value);
5509 *req_value = adapter->fallback.mtu;
5510 } else {
5511 *req_value =
5512 be64_to_cpu(crq->request_capability_rsp.number);
5513 }
5514
5515 send_request_cap(adapter, 1);
5516 return;
5517 default:
5518 dev_err(dev, "Error %d in request cap rsp\n",
5519 crq->request_capability_rsp.rc.code);
5520 return;
5521 }
5522
5523 /* Done receiving requested capabilities, query IP offload support */
5524 if (atomic_read(&adapter->running_cap_crqs) == 0)
5525 send_query_ip_offload(adapter);
5526 }
5527
handle_login_rsp(union ibmvnic_crq * login_rsp_crq,struct ibmvnic_adapter * adapter)5528 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5529 struct ibmvnic_adapter *adapter)
5530 {
5531 struct device *dev = &adapter->vdev->dev;
5532 struct net_device *netdev = adapter->netdev;
5533 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5534 struct ibmvnic_login_buffer *login = adapter->login_buf;
5535 u64 *tx_handle_array;
5536 u64 *rx_handle_array;
5537 int num_tx_pools;
5538 int num_rx_pools;
5539 u64 *size_array;
5540 u32 rsp_len;
5541 int i;
5542
5543 /* CHECK: Test/set of login_pending does not need to be atomic
5544 * because only ibmvnic_tasklet tests/clears this.
5545 */
5546 if (!adapter->login_pending) {
5547 netdev_warn(netdev, "Ignoring unexpected login response\n");
5548 return 0;
5549 }
5550 adapter->login_pending = false;
5551
5552 /* If the number of queues requested can't be allocated by the
5553 * server, the login response will return with code 1. We will need
5554 * to resend the login buffer with fewer queues requested.
5555 */
5556 if (login_rsp_crq->generic.rc.code) {
5557 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5558 complete(&adapter->init_done);
5559 return 0;
5560 }
5561
5562 if (adapter->failover_pending) {
5563 adapter->init_done_rc = -EAGAIN;
5564 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5565 complete(&adapter->init_done);
5566 /* login response buffer will be released on reset */
5567 return 0;
5568 }
5569
5570 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5571
5572 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5573 ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf,
5574 adapter->login_rsp_buf_sz);
5575
5576 /* Sanity checks */
5577 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5578 (be32_to_cpu(login->num_rxcomp_subcrqs) *
5579 adapter->req_rx_add_queues !=
5580 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5581 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5582 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5583 return -EIO;
5584 }
5585
5586 rsp_len = be32_to_cpu(login_rsp->len);
5587 if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
5588 rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
5589 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
5590 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
5591 rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
5592 /* This can happen if a login request times out and there are
5593 * 2 outstanding login requests sent, the LOGIN_RSP crq
5594 * could have been for the older login request. So we are
5595 * parsing the newer response buffer which may be incomplete
5596 */
5597 dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
5598 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5599 return -EIO;
5600 }
5601
5602 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5603 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5604 /* variable buffer sizes are not supported, so just read the
5605 * first entry.
5606 */
5607 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5608
5609 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5610 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5611
5612 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5613 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5614 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5615 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5616
5617 for (i = 0; i < num_tx_pools; i++)
5618 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5619
5620 for (i = 0; i < num_rx_pools; i++)
5621 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5622
5623 adapter->num_active_tx_scrqs = num_tx_pools;
5624 adapter->num_active_rx_scrqs = num_rx_pools;
5625 release_login_rsp_buffer(adapter);
5626 release_login_buffer(adapter);
5627 complete(&adapter->init_done);
5628
5629 return 0;
5630 }
5631
handle_request_unmap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5632 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5633 struct ibmvnic_adapter *adapter)
5634 {
5635 struct device *dev = &adapter->vdev->dev;
5636 long rc;
5637
5638 rc = crq->request_unmap_rsp.rc.code;
5639 if (rc)
5640 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5641 }
5642
handle_query_map_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5643 static void handle_query_map_rsp(union ibmvnic_crq *crq,
5644 struct ibmvnic_adapter *adapter)
5645 {
5646 struct net_device *netdev = adapter->netdev;
5647 struct device *dev = &adapter->vdev->dev;
5648 long rc;
5649
5650 rc = crq->query_map_rsp.rc.code;
5651 if (rc) {
5652 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5653 return;
5654 }
5655 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5656 crq->query_map_rsp.page_size,
5657 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5658 __be32_to_cpu(crq->query_map_rsp.free_pages));
5659 }
5660
handle_query_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5661 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5662 struct ibmvnic_adapter *adapter)
5663 {
5664 struct net_device *netdev = adapter->netdev;
5665 struct device *dev = &adapter->vdev->dev;
5666 long rc;
5667
5668 atomic_dec(&adapter->running_cap_crqs);
5669 netdev_dbg(netdev, "Outstanding queries: %d\n",
5670 atomic_read(&adapter->running_cap_crqs));
5671 rc = crq->query_capability.rc.code;
5672 if (rc) {
5673 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5674 goto out;
5675 }
5676
5677 switch (be16_to_cpu(crq->query_capability.capability)) {
5678 case MIN_TX_QUEUES:
5679 adapter->min_tx_queues =
5680 be64_to_cpu(crq->query_capability.number);
5681 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5682 adapter->min_tx_queues);
5683 break;
5684 case MIN_RX_QUEUES:
5685 adapter->min_rx_queues =
5686 be64_to_cpu(crq->query_capability.number);
5687 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5688 adapter->min_rx_queues);
5689 break;
5690 case MIN_RX_ADD_QUEUES:
5691 adapter->min_rx_add_queues =
5692 be64_to_cpu(crq->query_capability.number);
5693 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5694 adapter->min_rx_add_queues);
5695 break;
5696 case MAX_TX_QUEUES:
5697 adapter->max_tx_queues =
5698 be64_to_cpu(crq->query_capability.number);
5699 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5700 adapter->max_tx_queues);
5701 break;
5702 case MAX_RX_QUEUES:
5703 adapter->max_rx_queues =
5704 be64_to_cpu(crq->query_capability.number);
5705 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5706 adapter->max_rx_queues);
5707 break;
5708 case MAX_RX_ADD_QUEUES:
5709 adapter->max_rx_add_queues =
5710 be64_to_cpu(crq->query_capability.number);
5711 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5712 adapter->max_rx_add_queues);
5713 break;
5714 case MIN_TX_ENTRIES_PER_SUBCRQ:
5715 adapter->min_tx_entries_per_subcrq =
5716 be64_to_cpu(crq->query_capability.number);
5717 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5718 adapter->min_tx_entries_per_subcrq);
5719 break;
5720 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5721 adapter->min_rx_add_entries_per_subcrq =
5722 be64_to_cpu(crq->query_capability.number);
5723 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5724 adapter->min_rx_add_entries_per_subcrq);
5725 break;
5726 case MAX_TX_ENTRIES_PER_SUBCRQ:
5727 adapter->max_tx_entries_per_subcrq =
5728 be64_to_cpu(crq->query_capability.number);
5729 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5730 adapter->max_tx_entries_per_subcrq);
5731 break;
5732 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5733 adapter->max_rx_add_entries_per_subcrq =
5734 be64_to_cpu(crq->query_capability.number);
5735 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5736 adapter->max_rx_add_entries_per_subcrq);
5737 break;
5738 case TCP_IP_OFFLOAD:
5739 adapter->tcp_ip_offload =
5740 be64_to_cpu(crq->query_capability.number);
5741 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5742 adapter->tcp_ip_offload);
5743 break;
5744 case PROMISC_SUPPORTED:
5745 adapter->promisc_supported =
5746 be64_to_cpu(crq->query_capability.number);
5747 netdev_dbg(netdev, "promisc_supported = %lld\n",
5748 adapter->promisc_supported);
5749 break;
5750 case MIN_MTU:
5751 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5752 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5753 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5754 break;
5755 case MAX_MTU:
5756 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5757 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5758 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5759 break;
5760 case MAX_MULTICAST_FILTERS:
5761 adapter->max_multicast_filters =
5762 be64_to_cpu(crq->query_capability.number);
5763 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5764 adapter->max_multicast_filters);
5765 break;
5766 case VLAN_HEADER_INSERTION:
5767 adapter->vlan_header_insertion =
5768 be64_to_cpu(crq->query_capability.number);
5769 if (adapter->vlan_header_insertion)
5770 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5771 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5772 adapter->vlan_header_insertion);
5773 break;
5774 case RX_VLAN_HEADER_INSERTION:
5775 adapter->rx_vlan_header_insertion =
5776 be64_to_cpu(crq->query_capability.number);
5777 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5778 adapter->rx_vlan_header_insertion);
5779 break;
5780 case MAX_TX_SG_ENTRIES:
5781 adapter->max_tx_sg_entries =
5782 be64_to_cpu(crq->query_capability.number);
5783 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5784 adapter->max_tx_sg_entries);
5785 break;
5786 case RX_SG_SUPPORTED:
5787 adapter->rx_sg_supported =
5788 be64_to_cpu(crq->query_capability.number);
5789 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5790 adapter->rx_sg_supported);
5791 break;
5792 case OPT_TX_COMP_SUB_QUEUES:
5793 adapter->opt_tx_comp_sub_queues =
5794 be64_to_cpu(crq->query_capability.number);
5795 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5796 adapter->opt_tx_comp_sub_queues);
5797 break;
5798 case OPT_RX_COMP_QUEUES:
5799 adapter->opt_rx_comp_queues =
5800 be64_to_cpu(crq->query_capability.number);
5801 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5802 adapter->opt_rx_comp_queues);
5803 break;
5804 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5805 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5806 be64_to_cpu(crq->query_capability.number);
5807 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5808 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5809 break;
5810 case OPT_TX_ENTRIES_PER_SUBCRQ:
5811 adapter->opt_tx_entries_per_subcrq =
5812 be64_to_cpu(crq->query_capability.number);
5813 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5814 adapter->opt_tx_entries_per_subcrq);
5815 break;
5816 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5817 adapter->opt_rxba_entries_per_subcrq =
5818 be64_to_cpu(crq->query_capability.number);
5819 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5820 adapter->opt_rxba_entries_per_subcrq);
5821 break;
5822 case TX_RX_DESC_REQ:
5823 adapter->tx_rx_desc_req = crq->query_capability.number;
5824 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5825 adapter->tx_rx_desc_req);
5826 break;
5827
5828 default:
5829 netdev_err(netdev, "Got invalid cap rsp %d\n",
5830 crq->query_capability.capability);
5831 }
5832
5833 out:
5834 if (atomic_read(&adapter->running_cap_crqs) == 0)
5835 send_request_cap(adapter, 0);
5836 }
5837
send_query_phys_parms(struct ibmvnic_adapter * adapter)5838 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5839 {
5840 union ibmvnic_crq crq;
5841 int rc;
5842
5843 memset(&crq, 0, sizeof(crq));
5844 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5845 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5846
5847 mutex_lock(&adapter->fw_lock);
5848 adapter->fw_done_rc = 0;
5849 reinit_completion(&adapter->fw_done);
5850
5851 rc = ibmvnic_send_crq(adapter, &crq);
5852 if (rc) {
5853 mutex_unlock(&adapter->fw_lock);
5854 return rc;
5855 }
5856
5857 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5858 if (rc) {
5859 mutex_unlock(&adapter->fw_lock);
5860 return rc;
5861 }
5862
5863 mutex_unlock(&adapter->fw_lock);
5864 return adapter->fw_done_rc ? -EIO : 0;
5865 }
5866
handle_query_phys_parms_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5867 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5868 struct ibmvnic_adapter *adapter)
5869 {
5870 struct net_device *netdev = adapter->netdev;
5871 int rc;
5872 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5873
5874 rc = crq->query_phys_parms_rsp.rc.code;
5875 if (rc) {
5876 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5877 return rc;
5878 }
5879 switch (rspeed) {
5880 case IBMVNIC_10MBPS:
5881 adapter->speed = SPEED_10;
5882 break;
5883 case IBMVNIC_100MBPS:
5884 adapter->speed = SPEED_100;
5885 break;
5886 case IBMVNIC_1GBPS:
5887 adapter->speed = SPEED_1000;
5888 break;
5889 case IBMVNIC_10GBPS:
5890 adapter->speed = SPEED_10000;
5891 break;
5892 case IBMVNIC_25GBPS:
5893 adapter->speed = SPEED_25000;
5894 break;
5895 case IBMVNIC_40GBPS:
5896 adapter->speed = SPEED_40000;
5897 break;
5898 case IBMVNIC_50GBPS:
5899 adapter->speed = SPEED_50000;
5900 break;
5901 case IBMVNIC_100GBPS:
5902 adapter->speed = SPEED_100000;
5903 break;
5904 case IBMVNIC_200GBPS:
5905 adapter->speed = SPEED_200000;
5906 break;
5907 default:
5908 if (netif_carrier_ok(netdev))
5909 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5910 adapter->speed = SPEED_UNKNOWN;
5911 }
5912 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5913 adapter->duplex = DUPLEX_FULL;
5914 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5915 adapter->duplex = DUPLEX_HALF;
5916 else
5917 adapter->duplex = DUPLEX_UNKNOWN;
5918
5919 return rc;
5920 }
5921
ibmvnic_handle_crq(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5922 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5923 struct ibmvnic_adapter *adapter)
5924 {
5925 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5926 struct net_device *netdev = adapter->netdev;
5927 struct device *dev = &adapter->vdev->dev;
5928 u64 *u64_crq = (u64 *)crq;
5929 long rc;
5930
5931 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5932 (unsigned long)cpu_to_be64(u64_crq[0]),
5933 (unsigned long)cpu_to_be64(u64_crq[1]));
5934 switch (gen_crq->first) {
5935 case IBMVNIC_CRQ_INIT_RSP:
5936 switch (gen_crq->cmd) {
5937 case IBMVNIC_CRQ_INIT:
5938 dev_info(dev, "Partner initialized\n");
5939 adapter->from_passive_init = true;
5940 /* Discard any stale login responses from prev reset.
5941 * CHECK: should we clear even on INIT_COMPLETE?
5942 */
5943 adapter->login_pending = false;
5944
5945 if (adapter->state == VNIC_DOWN)
5946 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5947 else
5948 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5949
5950 if (rc && rc != -EBUSY) {
5951 /* We were unable to schedule the failover
5952 * reset either because the adapter was still
5953 * probing (eg: during kexec) or we could not
5954 * allocate memory. Clear the failover_pending
5955 * flag since no one else will. We ignore
5956 * EBUSY because it means either FAILOVER reset
5957 * is already scheduled or the adapter is
5958 * being removed.
5959 */
5960 netdev_err(netdev,
5961 "Error %ld scheduling failover reset\n",
5962 rc);
5963 adapter->failover_pending = false;
5964 }
5965
5966 if (!completion_done(&adapter->init_done)) {
5967 if (!adapter->init_done_rc)
5968 adapter->init_done_rc = -EAGAIN;
5969 complete(&adapter->init_done);
5970 }
5971
5972 break;
5973 case IBMVNIC_CRQ_INIT_COMPLETE:
5974 dev_info(dev, "Partner initialization complete\n");
5975 adapter->crq.active = true;
5976 send_version_xchg(adapter);
5977 break;
5978 default:
5979 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5980 }
5981 return;
5982 case IBMVNIC_CRQ_XPORT_EVENT:
5983 netif_carrier_off(netdev);
5984 adapter->crq.active = false;
5985 /* terminate any thread waiting for a response
5986 * from the device
5987 */
5988 if (!completion_done(&adapter->fw_done)) {
5989 adapter->fw_done_rc = -EIO;
5990 complete(&adapter->fw_done);
5991 }
5992
5993 /* if we got here during crq-init, retry crq-init */
5994 if (!completion_done(&adapter->init_done)) {
5995 adapter->init_done_rc = -EAGAIN;
5996 complete(&adapter->init_done);
5997 }
5998
5999 if (!completion_done(&adapter->stats_done))
6000 complete(&adapter->stats_done);
6001 if (test_bit(0, &adapter->resetting))
6002 adapter->force_reset_recovery = true;
6003 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
6004 dev_info(dev, "Migrated, re-enabling adapter\n");
6005 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
6006 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
6007 dev_info(dev, "Backing device failover detected\n");
6008 adapter->failover_pending = true;
6009 } else {
6010 /* The adapter lost the connection */
6011 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
6012 gen_crq->cmd);
6013 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
6014 }
6015 return;
6016 case IBMVNIC_CRQ_CMD_RSP:
6017 break;
6018 default:
6019 dev_err(dev, "Got an invalid msg type 0x%02x\n",
6020 gen_crq->first);
6021 return;
6022 }
6023
6024 switch (gen_crq->cmd) {
6025 case VERSION_EXCHANGE_RSP:
6026 rc = crq->version_exchange_rsp.rc.code;
6027 if (rc) {
6028 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
6029 break;
6030 }
6031 ibmvnic_version =
6032 be16_to_cpu(crq->version_exchange_rsp.version);
6033 dev_info(dev, "Partner protocol version is %d\n",
6034 ibmvnic_version);
6035 send_query_cap(adapter);
6036 break;
6037 case QUERY_CAPABILITY_RSP:
6038 handle_query_cap_rsp(crq, adapter);
6039 break;
6040 case QUERY_MAP_RSP:
6041 handle_query_map_rsp(crq, adapter);
6042 break;
6043 case REQUEST_MAP_RSP:
6044 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
6045 complete(&adapter->fw_done);
6046 break;
6047 case REQUEST_UNMAP_RSP:
6048 handle_request_unmap_rsp(crq, adapter);
6049 break;
6050 case REQUEST_CAPABILITY_RSP:
6051 handle_request_cap_rsp(crq, adapter);
6052 break;
6053 case LOGIN_RSP:
6054 netdev_dbg(netdev, "Got Login Response\n");
6055 handle_login_rsp(crq, adapter);
6056 break;
6057 case LOGICAL_LINK_STATE_RSP:
6058 netdev_dbg(netdev,
6059 "Got Logical Link State Response, state: %d rc: %d\n",
6060 crq->logical_link_state_rsp.link_state,
6061 crq->logical_link_state_rsp.rc.code);
6062 adapter->logical_link_state =
6063 crq->logical_link_state_rsp.link_state;
6064 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
6065 complete(&adapter->init_done);
6066 break;
6067 case LINK_STATE_INDICATION:
6068 netdev_dbg(netdev, "Got Logical Link State Indication\n");
6069 adapter->phys_link_state =
6070 crq->link_state_indication.phys_link_state;
6071 adapter->logical_link_state =
6072 crq->link_state_indication.logical_link_state;
6073 if (adapter->phys_link_state && adapter->logical_link_state)
6074 netif_carrier_on(netdev);
6075 else
6076 netif_carrier_off(netdev);
6077 break;
6078 case CHANGE_MAC_ADDR_RSP:
6079 netdev_dbg(netdev, "Got MAC address change Response\n");
6080 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
6081 break;
6082 case ERROR_INDICATION:
6083 netdev_dbg(netdev, "Got Error Indication\n");
6084 handle_error_indication(crq, adapter);
6085 break;
6086 case REQUEST_STATISTICS_RSP:
6087 netdev_dbg(netdev, "Got Statistics Response\n");
6088 complete(&adapter->stats_done);
6089 break;
6090 case QUERY_IP_OFFLOAD_RSP:
6091 netdev_dbg(netdev, "Got Query IP offload Response\n");
6092 handle_query_ip_offload_rsp(adapter);
6093 break;
6094 case MULTICAST_CTRL_RSP:
6095 netdev_dbg(netdev, "Got multicast control Response\n");
6096 break;
6097 case CONTROL_IP_OFFLOAD_RSP:
6098 netdev_dbg(netdev, "Got Control IP offload Response\n");
6099 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
6100 sizeof(adapter->ip_offload_ctrl),
6101 DMA_TO_DEVICE);
6102 complete(&adapter->init_done);
6103 break;
6104 case COLLECT_FW_TRACE_RSP:
6105 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
6106 complete(&adapter->fw_done);
6107 break;
6108 case GET_VPD_SIZE_RSP:
6109 handle_vpd_size_rsp(crq, adapter);
6110 break;
6111 case GET_VPD_RSP:
6112 handle_vpd_rsp(crq, adapter);
6113 break;
6114 case QUERY_PHYS_PARMS_RSP:
6115 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
6116 complete(&adapter->fw_done);
6117 break;
6118 default:
6119 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
6120 gen_crq->cmd);
6121 }
6122 }
6123
ibmvnic_interrupt(int irq,void * instance)6124 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
6125 {
6126 struct ibmvnic_adapter *adapter = instance;
6127
6128 tasklet_schedule(&adapter->tasklet);
6129 return IRQ_HANDLED;
6130 }
6131
ibmvnic_tasklet(struct tasklet_struct * t)6132 static void ibmvnic_tasklet(struct tasklet_struct *t)
6133 {
6134 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
6135 struct ibmvnic_crq_queue *queue = &adapter->crq;
6136 union ibmvnic_crq *crq;
6137 unsigned long flags;
6138
6139 spin_lock_irqsave(&queue->lock, flags);
6140
6141 /* Pull all the valid messages off the CRQ */
6142 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
6143 /* This barrier makes sure ibmvnic_next_crq()'s
6144 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
6145 * before ibmvnic_handle_crq()'s
6146 * switch(gen_crq->first) and switch(gen_crq->cmd).
6147 */
6148 dma_rmb();
6149 ibmvnic_handle_crq(crq, adapter);
6150 crq->generic.first = 0;
6151 }
6152
6153 spin_unlock_irqrestore(&queue->lock, flags);
6154 }
6155
ibmvnic_reenable_crq_queue(struct ibmvnic_adapter * adapter)6156 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
6157 {
6158 struct vio_dev *vdev = adapter->vdev;
6159 int rc;
6160
6161 do {
6162 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
6163 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
6164
6165 if (rc)
6166 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
6167
6168 return rc;
6169 }
6170
ibmvnic_reset_crq(struct ibmvnic_adapter * adapter)6171 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
6172 {
6173 struct ibmvnic_crq_queue *crq = &adapter->crq;
6174 struct device *dev = &adapter->vdev->dev;
6175 struct vio_dev *vdev = adapter->vdev;
6176 int rc;
6177
6178 /* Close the CRQ */
6179 do {
6180 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6181 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6182
6183 /* Clean out the queue */
6184 if (!crq->msgs)
6185 return -EINVAL;
6186
6187 memset(crq->msgs, 0, PAGE_SIZE);
6188 crq->cur = 0;
6189 crq->active = false;
6190
6191 /* And re-open it again */
6192 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6193 crq->msg_token, PAGE_SIZE);
6194
6195 if (rc == H_CLOSED)
6196 /* Adapter is good, but other end is not ready */
6197 dev_warn(dev, "Partner adapter not ready\n");
6198 else if (rc != 0)
6199 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
6200
6201 return rc;
6202 }
6203
release_crq_queue(struct ibmvnic_adapter * adapter)6204 static void release_crq_queue(struct ibmvnic_adapter *adapter)
6205 {
6206 struct ibmvnic_crq_queue *crq = &adapter->crq;
6207 struct vio_dev *vdev = adapter->vdev;
6208 long rc;
6209
6210 if (!crq->msgs)
6211 return;
6212
6213 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
6214 free_irq(vdev->irq, adapter);
6215 tasklet_kill(&adapter->tasklet);
6216 do {
6217 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6218 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6219
6220 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
6221 DMA_BIDIRECTIONAL);
6222 free_page((unsigned long)crq->msgs);
6223 crq->msgs = NULL;
6224 crq->active = false;
6225 }
6226
init_crq_queue(struct ibmvnic_adapter * adapter)6227 static int init_crq_queue(struct ibmvnic_adapter *adapter)
6228 {
6229 struct ibmvnic_crq_queue *crq = &adapter->crq;
6230 struct device *dev = &adapter->vdev->dev;
6231 struct vio_dev *vdev = adapter->vdev;
6232 int rc, retrc = -ENOMEM;
6233
6234 if (crq->msgs)
6235 return 0;
6236
6237 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
6238 /* Should we allocate more than one page? */
6239
6240 if (!crq->msgs)
6241 return -ENOMEM;
6242
6243 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
6244 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
6245 DMA_BIDIRECTIONAL);
6246 if (dma_mapping_error(dev, crq->msg_token))
6247 goto map_failed;
6248
6249 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6250 crq->msg_token, PAGE_SIZE);
6251
6252 if (rc == H_RESOURCE)
6253 /* maybe kexecing and resource is busy. try a reset */
6254 rc = ibmvnic_reset_crq(adapter);
6255 retrc = rc;
6256
6257 if (rc == H_CLOSED) {
6258 dev_warn(dev, "Partner adapter not ready\n");
6259 } else if (rc) {
6260 dev_warn(dev, "Error %d opening adapter\n", rc);
6261 goto reg_crq_failed;
6262 }
6263
6264 retrc = 0;
6265
6266 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
6267
6268 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
6269 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
6270 adapter->vdev->unit_address);
6271 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
6272 if (rc) {
6273 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
6274 vdev->irq, rc);
6275 goto req_irq_failed;
6276 }
6277
6278 rc = vio_enable_interrupts(vdev);
6279 if (rc) {
6280 dev_err(dev, "Error %d enabling interrupts\n", rc);
6281 goto req_irq_failed;
6282 }
6283
6284 crq->cur = 0;
6285 spin_lock_init(&crq->lock);
6286
6287 /* process any CRQs that were queued before we enabled interrupts */
6288 tasklet_schedule(&adapter->tasklet);
6289
6290 return retrc;
6291
6292 req_irq_failed:
6293 tasklet_kill(&adapter->tasklet);
6294 do {
6295 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6296 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6297 reg_crq_failed:
6298 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
6299 map_failed:
6300 free_page((unsigned long)crq->msgs);
6301 crq->msgs = NULL;
6302 return retrc;
6303 }
6304
ibmvnic_reset_init(struct ibmvnic_adapter * adapter,bool reset)6305 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
6306 {
6307 struct device *dev = &adapter->vdev->dev;
6308 unsigned long timeout = msecs_to_jiffies(20000);
6309 u64 old_num_rx_queues = adapter->req_rx_queues;
6310 u64 old_num_tx_queues = adapter->req_tx_queues;
6311 int rc;
6312
6313 adapter->from_passive_init = false;
6314
6315 rc = ibmvnic_send_crq_init(adapter);
6316 if (rc) {
6317 dev_err(dev, "Send crq init failed with error %d\n", rc);
6318 return rc;
6319 }
6320
6321 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
6322 dev_err(dev, "Initialization sequence timed out\n");
6323 return -ETIMEDOUT;
6324 }
6325
6326 if (adapter->init_done_rc) {
6327 release_crq_queue(adapter);
6328 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
6329 return adapter->init_done_rc;
6330 }
6331
6332 if (adapter->from_passive_init) {
6333 adapter->state = VNIC_OPEN;
6334 adapter->from_passive_init = false;
6335 dev_err(dev, "CRQ-init failed, passive-init\n");
6336 return -EINVAL;
6337 }
6338
6339 if (reset &&
6340 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
6341 adapter->reset_reason != VNIC_RESET_MOBILITY) {
6342 if (adapter->req_rx_queues != old_num_rx_queues ||
6343 adapter->req_tx_queues != old_num_tx_queues) {
6344 release_sub_crqs(adapter, 0);
6345 rc = init_sub_crqs(adapter);
6346 } else {
6347 /* no need to reinitialize completely, but we do
6348 * need to clean up transmits that were in flight
6349 * when we processed the reset. Failure to do so
6350 * will confound the upper layer, usually TCP, by
6351 * creating the illusion of transmits that are
6352 * awaiting completion.
6353 */
6354 clean_tx_pools(adapter);
6355
6356 rc = reset_sub_crq_queues(adapter);
6357 }
6358 } else {
6359 rc = init_sub_crqs(adapter);
6360 }
6361
6362 if (rc) {
6363 dev_err(dev, "Initialization of sub crqs failed\n");
6364 release_crq_queue(adapter);
6365 return rc;
6366 }
6367
6368 rc = init_sub_crq_irqs(adapter);
6369 if (rc) {
6370 dev_err(dev, "Failed to initialize sub crq irqs\n");
6371 release_crq_queue(adapter);
6372 }
6373
6374 return rc;
6375 }
6376
6377 static struct device_attribute dev_attr_failover;
6378
ibmvnic_probe(struct vio_dev * dev,const struct vio_device_id * id)6379 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
6380 {
6381 struct ibmvnic_adapter *adapter;
6382 struct net_device *netdev;
6383 unsigned char *mac_addr_p;
6384 unsigned long flags;
6385 bool init_success;
6386 int rc;
6387
6388 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
6389 dev->unit_address);
6390
6391 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
6392 VETH_MAC_ADDR, NULL);
6393 if (!mac_addr_p) {
6394 dev_err(&dev->dev,
6395 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
6396 __FILE__, __LINE__);
6397 return 0;
6398 }
6399
6400 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
6401 IBMVNIC_MAX_QUEUES);
6402 if (!netdev)
6403 return -ENOMEM;
6404
6405 adapter = netdev_priv(netdev);
6406 adapter->state = VNIC_PROBING;
6407 dev_set_drvdata(&dev->dev, netdev);
6408 adapter->vdev = dev;
6409 adapter->netdev = netdev;
6410 adapter->login_pending = false;
6411 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6412 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
6413 bitmap_set(adapter->map_ids, 0, 1);
6414
6415 ether_addr_copy(adapter->mac_addr, mac_addr_p);
6416 eth_hw_addr_set(netdev, adapter->mac_addr);
6417 netdev->irq = dev->irq;
6418 netdev->netdev_ops = &ibmvnic_netdev_ops;
6419 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
6420 SET_NETDEV_DEV(netdev, &dev->dev);
6421
6422 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
6423 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
6424 __ibmvnic_delayed_reset);
6425 INIT_LIST_HEAD(&adapter->rwi_list);
6426 spin_lock_init(&adapter->rwi_lock);
6427 spin_lock_init(&adapter->state_lock);
6428 mutex_init(&adapter->fw_lock);
6429 init_completion(&adapter->probe_done);
6430 init_completion(&adapter->init_done);
6431 init_completion(&adapter->fw_done);
6432 init_completion(&adapter->reset_done);
6433 init_completion(&adapter->stats_done);
6434 clear_bit(0, &adapter->resetting);
6435 adapter->prev_rx_buf_sz = 0;
6436 adapter->prev_mtu = 0;
6437
6438 init_success = false;
6439 do {
6440 reinit_init_done(adapter);
6441
6442 /* clear any failovers we got in the previous pass
6443 * since we are reinitializing the CRQ
6444 */
6445 adapter->failover_pending = false;
6446
6447 /* If we had already initialized CRQ, we may have one or
6448 * more resets queued already. Discard those and release
6449 * the CRQ before initializing the CRQ again.
6450 */
6451 release_crq_queue(adapter);
6452
6453 /* Since we are still in PROBING state, __ibmvnic_reset()
6454 * will not access the ->rwi_list and since we released CRQ,
6455 * we won't get _new_ transport events. But there maybe an
6456 * ongoing ibmvnic_reset() call. So serialize access to
6457 * rwi_list. If we win the race, ibvmnic_reset() could add
6458 * a reset after we purged but thats ok - we just may end
6459 * up with an extra reset (i.e similar to having two or more
6460 * resets in the queue at once).
6461 * CHECK.
6462 */
6463 spin_lock_irqsave(&adapter->rwi_lock, flags);
6464 flush_reset_queue(adapter);
6465 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6466
6467 rc = init_crq_queue(adapter);
6468 if (rc) {
6469 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
6470 rc);
6471 goto ibmvnic_init_fail;
6472 }
6473
6474 rc = ibmvnic_reset_init(adapter, false);
6475 } while (rc == -EAGAIN);
6476
6477 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
6478 * partner is not ready. CRQ is not active. When the partner becomes
6479 * ready, we will do the passive init reset.
6480 */
6481
6482 if (!rc)
6483 init_success = true;
6484
6485 rc = init_stats_buffers(adapter);
6486 if (rc)
6487 goto ibmvnic_init_fail;
6488
6489 rc = init_stats_token(adapter);
6490 if (rc)
6491 goto ibmvnic_stats_fail;
6492
6493 rc = device_create_file(&dev->dev, &dev_attr_failover);
6494 if (rc)
6495 goto ibmvnic_dev_file_err;
6496
6497 netif_carrier_off(netdev);
6498
6499 if (init_success) {
6500 adapter->state = VNIC_PROBED;
6501 netdev->mtu = adapter->req_mtu - ETH_HLEN;
6502 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
6503 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
6504 } else {
6505 adapter->state = VNIC_DOWN;
6506 }
6507
6508 adapter->wait_for_reset = false;
6509 adapter->last_reset_time = jiffies;
6510
6511 rc = register_netdev(netdev);
6512 if (rc) {
6513 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
6514 goto ibmvnic_register_fail;
6515 }
6516 dev_info(&dev->dev, "ibmvnic registered\n");
6517
6518 rc = ibmvnic_cpu_notif_add(adapter);
6519 if (rc) {
6520 netdev_err(netdev, "Registering cpu notifier failed\n");
6521 goto cpu_notif_add_failed;
6522 }
6523
6524 complete(&adapter->probe_done);
6525
6526 return 0;
6527
6528 cpu_notif_add_failed:
6529 unregister_netdev(netdev);
6530
6531 ibmvnic_register_fail:
6532 device_remove_file(&dev->dev, &dev_attr_failover);
6533
6534 ibmvnic_dev_file_err:
6535 release_stats_token(adapter);
6536
6537 ibmvnic_stats_fail:
6538 release_stats_buffers(adapter);
6539
6540 ibmvnic_init_fail:
6541 release_sub_crqs(adapter, 1);
6542 release_crq_queue(adapter);
6543
6544 /* cleanup worker thread after releasing CRQ so we don't get
6545 * transport events (i.e new work items for the worker thread).
6546 */
6547 adapter->state = VNIC_REMOVING;
6548 complete(&adapter->probe_done);
6549 flush_work(&adapter->ibmvnic_reset);
6550 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6551
6552 flush_reset_queue(adapter);
6553
6554 mutex_destroy(&adapter->fw_lock);
6555 free_netdev(netdev);
6556
6557 return rc;
6558 }
6559
ibmvnic_remove(struct vio_dev * dev)6560 static void ibmvnic_remove(struct vio_dev *dev)
6561 {
6562 struct net_device *netdev = dev_get_drvdata(&dev->dev);
6563 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6564 unsigned long flags;
6565
6566 spin_lock_irqsave(&adapter->state_lock, flags);
6567
6568 /* If ibmvnic_reset() is scheduling a reset, wait for it to
6569 * finish. Then, set the state to REMOVING to prevent it from
6570 * scheduling any more work and to have reset functions ignore
6571 * any resets that have already been scheduled. Drop the lock
6572 * after setting state, so __ibmvnic_reset() which is called
6573 * from the flush_work() below, can make progress.
6574 */
6575 spin_lock(&adapter->rwi_lock);
6576 adapter->state = VNIC_REMOVING;
6577 spin_unlock(&adapter->rwi_lock);
6578
6579 spin_unlock_irqrestore(&adapter->state_lock, flags);
6580
6581 ibmvnic_cpu_notif_remove(adapter);
6582
6583 flush_work(&adapter->ibmvnic_reset);
6584 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6585
6586 rtnl_lock();
6587 unregister_netdevice(netdev);
6588
6589 release_resources(adapter);
6590 release_rx_pools(adapter);
6591 release_tx_pools(adapter);
6592 release_sub_crqs(adapter, 1);
6593 release_crq_queue(adapter);
6594
6595 release_stats_token(adapter);
6596 release_stats_buffers(adapter);
6597
6598 adapter->state = VNIC_REMOVED;
6599
6600 rtnl_unlock();
6601 mutex_destroy(&adapter->fw_lock);
6602 device_remove_file(&dev->dev, &dev_attr_failover);
6603 free_netdev(netdev);
6604 dev_set_drvdata(&dev->dev, NULL);
6605 }
6606
failover_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6607 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6608 const char *buf, size_t count)
6609 {
6610 struct net_device *netdev = dev_get_drvdata(dev);
6611 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6612 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6613 __be64 session_token;
6614 long rc;
6615
6616 if (!sysfs_streq(buf, "1"))
6617 return -EINVAL;
6618
6619 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6620 H_GET_SESSION_TOKEN, 0, 0, 0);
6621 if (rc) {
6622 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6623 rc);
6624 goto last_resort;
6625 }
6626
6627 session_token = (__be64)retbuf[0];
6628 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6629 be64_to_cpu(session_token));
6630 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6631 H_SESSION_ERR_DETECTED, session_token, 0, 0);
6632 if (rc) {
6633 netdev_err(netdev,
6634 "H_VIOCTL initiated failover failed, rc %ld\n",
6635 rc);
6636 goto last_resort;
6637 }
6638
6639 return count;
6640
6641 last_resort:
6642 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6643 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6644
6645 return count;
6646 }
6647 static DEVICE_ATTR_WO(failover);
6648
ibmvnic_get_desired_dma(struct vio_dev * vdev)6649 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6650 {
6651 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6652 struct ibmvnic_adapter *adapter;
6653 struct iommu_table *tbl;
6654 unsigned long ret = 0;
6655 int i;
6656
6657 tbl = get_iommu_table_base(&vdev->dev);
6658
6659 /* netdev inits at probe time along with the structures we need below*/
6660 if (!netdev)
6661 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6662
6663 adapter = netdev_priv(netdev);
6664
6665 ret += PAGE_SIZE; /* the crq message queue */
6666 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6667
6668 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6669 ret += 4 * PAGE_SIZE; /* the scrq message queue */
6670
6671 for (i = 0; i < adapter->num_active_rx_pools; i++)
6672 ret += adapter->rx_pool[i].size *
6673 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6674
6675 return ret;
6676 }
6677
ibmvnic_resume(struct device * dev)6678 static int ibmvnic_resume(struct device *dev)
6679 {
6680 struct net_device *netdev = dev_get_drvdata(dev);
6681 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6682
6683 if (adapter->state != VNIC_OPEN)
6684 return 0;
6685
6686 tasklet_schedule(&adapter->tasklet);
6687
6688 return 0;
6689 }
6690
6691 static const struct vio_device_id ibmvnic_device_table[] = {
6692 {"network", "IBM,vnic"},
6693 {"", "" }
6694 };
6695 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6696
6697 static const struct dev_pm_ops ibmvnic_pm_ops = {
6698 .resume = ibmvnic_resume
6699 };
6700
6701 static struct vio_driver ibmvnic_driver = {
6702 .id_table = ibmvnic_device_table,
6703 .probe = ibmvnic_probe,
6704 .remove = ibmvnic_remove,
6705 .get_desired_dma = ibmvnic_get_desired_dma,
6706 .name = ibmvnic_driver_name,
6707 .pm = &ibmvnic_pm_ops,
6708 };
6709
6710 /* module functions */
ibmvnic_module_init(void)6711 static int __init ibmvnic_module_init(void)
6712 {
6713 int ret;
6714
6715 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online",
6716 ibmvnic_cpu_online,
6717 ibmvnic_cpu_down_prep);
6718 if (ret < 0)
6719 goto out;
6720 ibmvnic_online = ret;
6721 ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead",
6722 NULL, ibmvnic_cpu_dead);
6723 if (ret)
6724 goto err_dead;
6725
6726 ret = vio_register_driver(&ibmvnic_driver);
6727 if (ret)
6728 goto err_vio_register;
6729
6730 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6731 IBMVNIC_DRIVER_VERSION);
6732
6733 return 0;
6734 err_vio_register:
6735 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6736 err_dead:
6737 cpuhp_remove_multi_state(ibmvnic_online);
6738 out:
6739 return ret;
6740 }
6741
ibmvnic_module_exit(void)6742 static void __exit ibmvnic_module_exit(void)
6743 {
6744 vio_unregister_driver(&ibmvnic_driver);
6745 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6746 cpuhp_remove_multi_state(ibmvnic_online);
6747 }
6748
6749 module_init(ibmvnic_module_init);
6750 module_exit(ibmvnic_module_exit);
6751