1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
3 /* */
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* */
10 /* */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
15 /* */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
25 /* */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
32 /* */
33 /**************************************************************************/
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/irqdomain.h>
57 #include <linux/kthread.h>
58 #include <linux/seq_file.h>
59 #include <linux/interrupt.h>
60 #include <net/net_namespace.h>
61 #include <asm/hvcall.h>
62 #include <linux/atomic.h>
63 #include <asm/vio.h>
64 #include <asm/xive.h>
65 #include <asm/iommu.h>
66 #include <linux/uaccess.h>
67 #include <asm/firmware.h>
68 #include <linux/workqueue.h>
69 #include <linux/if_vlan.h>
70 #include <linux/utsname.h>
71 #include <linux/cpu.h>
72
73 #include "ibmvnic.h"
74
75 static const char ibmvnic_driver_name[] = "ibmvnic";
76 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
77
78 MODULE_AUTHOR("Santiago Leon");
79 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
82
83 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
84 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
85 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
87 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
88 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
101 static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
102 static void send_query_map(struct ibmvnic_adapter *adapter);
103 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
104 static int send_request_unmap(struct ibmvnic_adapter *, u8);
105 static int send_login(struct ibmvnic_adapter *adapter);
106 static void send_query_cap(struct ibmvnic_adapter *adapter);
107 static int init_sub_crqs(struct ibmvnic_adapter *);
108 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
109 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
110 static void release_crq_queue(struct ibmvnic_adapter *);
111 static int __ibmvnic_set_mac(struct net_device *, u8 *);
112 static int init_crq_queue(struct ibmvnic_adapter *adapter);
113 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
114 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
115 struct ibmvnic_sub_crq_queue *tx_scrq);
116 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
117 struct ibmvnic_long_term_buff *ltb);
118 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
119 static void flush_reset_queue(struct ibmvnic_adapter *adapter);
120 static void print_subcrq_error(struct device *dev, int rc, const char *func);
121
122 struct ibmvnic_stat {
123 char name[ETH_GSTRING_LEN];
124 int offset;
125 };
126
127 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128 offsetof(struct ibmvnic_statistics, stat))
129 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
130
131 static const struct ibmvnic_stat ibmvnic_stats[] = {
132 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
154 };
155
send_crq_init_complete(struct ibmvnic_adapter * adapter)156 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
157 {
158 union ibmvnic_crq crq;
159
160 memset(&crq, 0, sizeof(crq));
161 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
162 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
163
164 return ibmvnic_send_crq(adapter, &crq);
165 }
166
send_version_xchg(struct ibmvnic_adapter * adapter)167 static int send_version_xchg(struct ibmvnic_adapter *adapter)
168 {
169 union ibmvnic_crq crq;
170
171 memset(&crq, 0, sizeof(crq));
172 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
173 crq.version_exchange.cmd = VERSION_EXCHANGE;
174 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
175
176 return ibmvnic_send_crq(adapter, &crq);
177 }
178
ibmvnic_clean_queue_affinity(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * queue)179 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
180 struct ibmvnic_sub_crq_queue *queue)
181 {
182 if (!(queue && queue->irq))
183 return;
184
185 cpumask_clear(queue->affinity_mask);
186
187 if (irq_set_affinity_and_hint(queue->irq, NULL))
188 netdev_warn(adapter->netdev,
189 "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
190 __func__, queue, queue->irq);
191 }
192
ibmvnic_clean_affinity(struct ibmvnic_adapter * adapter)193 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
194 {
195 struct ibmvnic_sub_crq_queue **rxqs;
196 struct ibmvnic_sub_crq_queue **txqs;
197 int num_rxqs, num_txqs;
198 int i;
199
200 rxqs = adapter->rx_scrq;
201 txqs = adapter->tx_scrq;
202 num_txqs = adapter->num_active_tx_scrqs;
203 num_rxqs = adapter->num_active_rx_scrqs;
204
205 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
206 if (txqs) {
207 for (i = 0; i < num_txqs; i++)
208 ibmvnic_clean_queue_affinity(adapter, txqs[i]);
209 }
210 if (rxqs) {
211 for (i = 0; i < num_rxqs; i++)
212 ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
213 }
214 }
215
ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue * queue,unsigned int * cpu,int * stragglers,int stride)216 static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
217 unsigned int *cpu, int *stragglers,
218 int stride)
219 {
220 cpumask_var_t mask;
221 int i;
222 int rc = 0;
223
224 if (!(queue && queue->irq))
225 return rc;
226
227 /* cpumask_var_t is either a pointer or array, allocation works here */
228 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
229 return -ENOMEM;
230
231 /* while we have extra cpu give one extra to this irq */
232 if (*stragglers) {
233 stride++;
234 (*stragglers)--;
235 }
236 /* atomic write is safer than writing bit by bit directly */
237 for (i = 0; i < stride; i++) {
238 cpumask_set_cpu(*cpu, mask);
239 *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
240 nr_cpu_ids, false);
241 }
242 /* set queue affinity mask */
243 cpumask_copy(queue->affinity_mask, mask);
244 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
245 free_cpumask_var(mask);
246
247 return rc;
248 }
249
250 /* assumes cpu read lock is held */
ibmvnic_set_affinity(struct ibmvnic_adapter * adapter)251 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
252 {
253 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
254 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
255 struct ibmvnic_sub_crq_queue *queue;
256 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
257 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
258 int total_queues, stride, stragglers, i;
259 unsigned int num_cpu, cpu;
260 bool is_rx_queue;
261 int rc = 0;
262
263 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
264 if (!(adapter->rx_scrq && adapter->tx_scrq)) {
265 netdev_warn(adapter->netdev,
266 "%s: Set affinity failed, queues not allocated\n",
267 __func__);
268 return;
269 }
270
271 total_queues = num_rxqs + num_txqs;
272 num_cpu = num_online_cpus();
273 /* number of cpu's assigned per irq */
274 stride = max_t(int, num_cpu / total_queues, 1);
275 /* number of leftover cpu's */
276 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
277 /* next available cpu to assign irq to */
278 cpu = cpumask_next(-1, cpu_online_mask);
279
280 for (i = 0; i < total_queues; i++) {
281 is_rx_queue = false;
282 /* balance core load by alternating rx and tx assignments
283 * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
284 */
285 if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
286 queue = rxqs[i_rxqs++];
287 is_rx_queue = true;
288 } else {
289 queue = txqs[i_txqs++];
290 }
291
292 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
293 stride);
294 if (rc)
295 goto out;
296
297 if (!queue || is_rx_queue)
298 continue;
299
300 rc = __netif_set_xps_queue(adapter->netdev,
301 cpumask_bits(queue->affinity_mask),
302 i_txqs - 1, XPS_CPUS);
303 if (rc)
304 netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
305 __func__, i_txqs - 1, rc);
306 }
307
308 out:
309 if (rc) {
310 netdev_warn(adapter->netdev,
311 "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
312 __func__, queue, queue->irq, rc);
313 ibmvnic_clean_affinity(adapter);
314 }
315 }
316
ibmvnic_cpu_online(unsigned int cpu,struct hlist_node * node)317 static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
318 {
319 struct ibmvnic_adapter *adapter;
320
321 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
322 ibmvnic_set_affinity(adapter);
323 return 0;
324 }
325
ibmvnic_cpu_dead(unsigned int cpu,struct hlist_node * node)326 static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
327 {
328 struct ibmvnic_adapter *adapter;
329
330 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
331 ibmvnic_set_affinity(adapter);
332 return 0;
333 }
334
ibmvnic_cpu_down_prep(unsigned int cpu,struct hlist_node * node)335 static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
336 {
337 struct ibmvnic_adapter *adapter;
338
339 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
340 ibmvnic_clean_affinity(adapter);
341 return 0;
342 }
343
344 static enum cpuhp_state ibmvnic_online;
345
ibmvnic_cpu_notif_add(struct ibmvnic_adapter * adapter)346 static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
347 {
348 int ret;
349
350 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
351 if (ret)
352 return ret;
353 ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD,
354 &adapter->node_dead);
355 if (!ret)
356 return ret;
357 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
358 return ret;
359 }
360
ibmvnic_cpu_notif_remove(struct ibmvnic_adapter * adapter)361 static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
362 {
363 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
364 cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD,
365 &adapter->node_dead);
366 }
367
h_reg_sub_crq(unsigned long unit_address,unsigned long token,unsigned long length,unsigned long * number,unsigned long * irq)368 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
369 unsigned long length, unsigned long *number,
370 unsigned long *irq)
371 {
372 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
373 long rc;
374
375 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
376 *number = retbuf[0];
377 *irq = retbuf[1];
378
379 return rc;
380 }
381
382 /**
383 * ibmvnic_wait_for_completion - Check device state and wait for completion
384 * @adapter: private device data
385 * @comp_done: completion structure to wait for
386 * @timeout: time to wait in milliseconds
387 *
388 * Wait for a completion signal or until the timeout limit is reached
389 * while checking that the device is still active.
390 */
ibmvnic_wait_for_completion(struct ibmvnic_adapter * adapter,struct completion * comp_done,unsigned long timeout)391 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
392 struct completion *comp_done,
393 unsigned long timeout)
394 {
395 struct net_device *netdev;
396 unsigned long div_timeout;
397 u8 retry;
398
399 netdev = adapter->netdev;
400 retry = 5;
401 div_timeout = msecs_to_jiffies(timeout / retry);
402 while (true) {
403 if (!adapter->crq.active) {
404 netdev_err(netdev, "Device down!\n");
405 return -ENODEV;
406 }
407 if (!retry--)
408 break;
409 if (wait_for_completion_timeout(comp_done, div_timeout))
410 return 0;
411 }
412 netdev_err(netdev, "Operation timed out.\n");
413 return -ETIMEDOUT;
414 }
415
416 /**
417 * reuse_ltb() - Check if a long term buffer can be reused
418 * @ltb: The long term buffer to be checked
419 * @size: The size of the long term buffer.
420 *
421 * An LTB can be reused unless its size has changed.
422 *
423 * Return: Return true if the LTB can be reused, false otherwise.
424 */
reuse_ltb(struct ibmvnic_long_term_buff * ltb,int size)425 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
426 {
427 return (ltb->buff && ltb->size == size);
428 }
429
430 /**
431 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
432 *
433 * @adapter: ibmvnic adapter associated to the LTB
434 * @ltb: container object for the LTB
435 * @size: size of the LTB
436 *
437 * Allocate an LTB of the specified size and notify VIOS.
438 *
439 * If the given @ltb already has the correct size, reuse it. Otherwise if
440 * its non-NULL, free it. Then allocate a new one of the correct size.
441 * Notify the VIOS either way since we may now be working with a new VIOS.
442 *
443 * Allocating larger chunks of memory during resets, specially LPM or under
444 * low memory situations can cause resets to fail/timeout and for LPAR to
445 * lose connectivity. So hold onto the LTB even if we fail to communicate
446 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
447 *
448 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
449 * a negative value otherwise.
450 */
alloc_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb,int size)451 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
452 struct ibmvnic_long_term_buff *ltb, int size)
453 {
454 struct device *dev = &adapter->vdev->dev;
455 u64 prev = 0;
456 int rc;
457
458 if (!reuse_ltb(ltb, size)) {
459 dev_dbg(dev,
460 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
461 ltb->size, size);
462 prev = ltb->size;
463 free_long_term_buff(adapter, ltb);
464 }
465
466 if (ltb->buff) {
467 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
468 ltb->map_id, ltb->size);
469 } else {
470 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
471 GFP_KERNEL);
472 if (!ltb->buff) {
473 dev_err(dev, "Couldn't alloc long term buffer\n");
474 return -ENOMEM;
475 }
476 ltb->size = size;
477
478 ltb->map_id = find_first_zero_bit(adapter->map_ids,
479 MAX_MAP_ID);
480 bitmap_set(adapter->map_ids, ltb->map_id, 1);
481
482 dev_dbg(dev,
483 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
484 ltb->map_id, ltb->size, prev);
485 }
486
487 /* Ensure ltb is zeroed - specially when reusing it. */
488 memset(ltb->buff, 0, ltb->size);
489
490 mutex_lock(&adapter->fw_lock);
491 adapter->fw_done_rc = 0;
492 reinit_completion(&adapter->fw_done);
493
494 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
495 if (rc) {
496 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
497 goto out;
498 }
499
500 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
501 if (rc) {
502 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
503 rc);
504 goto out;
505 }
506
507 if (adapter->fw_done_rc) {
508 dev_err(dev, "Couldn't map LTB, rc = %d\n",
509 adapter->fw_done_rc);
510 rc = -EIO;
511 goto out;
512 }
513 rc = 0;
514 out:
515 /* don't free LTB on communication error - see function header */
516 mutex_unlock(&adapter->fw_lock);
517 return rc;
518 }
519
free_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb)520 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
521 struct ibmvnic_long_term_buff *ltb)
522 {
523 struct device *dev = &adapter->vdev->dev;
524
525 if (!ltb->buff)
526 return;
527
528 /* VIOS automatically unmaps the long term buffer at remote
529 * end for the following resets:
530 * FAILOVER, MOBILITY, TIMEOUT.
531 */
532 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
533 adapter->reset_reason != VNIC_RESET_MOBILITY &&
534 adapter->reset_reason != VNIC_RESET_TIMEOUT)
535 send_request_unmap(adapter, ltb->map_id);
536
537 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
538
539 ltb->buff = NULL;
540 /* mark this map_id free */
541 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
542 ltb->map_id = 0;
543 }
544
545 /**
546 * free_ltb_set - free the given set of long term buffers (LTBS)
547 * @adapter: The ibmvnic adapter containing this ltb set
548 * @ltb_set: The ltb_set to be freed
549 *
550 * Free the set of LTBs in the given set.
551 */
552
free_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set)553 static void free_ltb_set(struct ibmvnic_adapter *adapter,
554 struct ibmvnic_ltb_set *ltb_set)
555 {
556 int i;
557
558 for (i = 0; i < ltb_set->num_ltbs; i++)
559 free_long_term_buff(adapter, <b_set->ltbs[i]);
560
561 kfree(ltb_set->ltbs);
562 ltb_set->ltbs = NULL;
563 ltb_set->num_ltbs = 0;
564 }
565
566 /**
567 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
568 *
569 * @adapter: ibmvnic adapter associated to the LTB
570 * @ltb_set: container object for the set of LTBs
571 * @num_buffs: Number of buffers in the LTB
572 * @buff_size: Size of each buffer in the LTB
573 *
574 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
575 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
576 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
577 * If new set needs more than in old set, allocate the remaining ones.
578 * Try and reuse as many LTBs as possible and avoid reallocation.
579 *
580 * Any changes to this allocation strategy must be reflected in
581 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
582 */
alloc_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set,int num_buffs,int buff_size)583 static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
584 struct ibmvnic_ltb_set *ltb_set, int num_buffs,
585 int buff_size)
586 {
587 struct device *dev = &adapter->vdev->dev;
588 struct ibmvnic_ltb_set old_set;
589 struct ibmvnic_ltb_set new_set;
590 int rem_size;
591 int tot_size; /* size of all ltbs */
592 int ltb_size; /* size of one ltb */
593 int nltbs;
594 int rc;
595 int n;
596 int i;
597
598 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
599 buff_size);
600
601 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
602 tot_size = num_buffs * buff_size;
603
604 if (ltb_size > tot_size)
605 ltb_size = tot_size;
606
607 nltbs = tot_size / ltb_size;
608 if (tot_size % ltb_size)
609 nltbs++;
610
611 old_set = *ltb_set;
612
613 if (old_set.num_ltbs == nltbs) {
614 new_set = old_set;
615 } else {
616 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
617
618 new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
619 if (!new_set.ltbs)
620 return -ENOMEM;
621
622 new_set.num_ltbs = nltbs;
623
624 /* Free any excess ltbs in old set */
625 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
626 free_long_term_buff(adapter, &old_set.ltbs[i]);
627
628 /* Copy remaining ltbs to new set. All LTBs except the
629 * last one are of the same size. alloc_long_term_buff()
630 * will realloc if the size changes.
631 */
632 n = min(old_set.num_ltbs, new_set.num_ltbs);
633 for (i = 0; i < n; i++)
634 new_set.ltbs[i] = old_set.ltbs[i];
635
636 /* Any additional ltbs in new set will have NULL ltbs for
637 * now and will be allocated in alloc_long_term_buff().
638 */
639
640 /* We no longer need the old_set so free it. Note that we
641 * may have reused some ltbs from old set and freed excess
642 * ltbs above. So we only need to free the container now
643 * not the LTBs themselves. (i.e. dont free_ltb_set()!)
644 */
645 kfree(old_set.ltbs);
646 old_set.ltbs = NULL;
647 old_set.num_ltbs = 0;
648
649 /* Install the new set. If allocations fail below, we will
650 * retry later and know what size LTBs we need.
651 */
652 *ltb_set = new_set;
653 }
654
655 i = 0;
656 rem_size = tot_size;
657 while (rem_size) {
658 if (ltb_size > rem_size)
659 ltb_size = rem_size;
660
661 rem_size -= ltb_size;
662
663 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
664 if (rc)
665 goto out;
666 i++;
667 }
668
669 WARN_ON(i != new_set.num_ltbs);
670
671 return 0;
672 out:
673 /* We may have allocated one/more LTBs before failing and we
674 * want to try and reuse on next reset. So don't free ltb set.
675 */
676 return rc;
677 }
678
679 /**
680 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
681 * @rxpool: The receive buffer pool containing buffer
682 * @bufidx: Index of buffer in rxpool
683 * @ltbp: (Output) pointer to the long term buffer containing the buffer
684 * @offset: (Output) offset of buffer in the LTB from @ltbp
685 *
686 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
687 * pool and its corresponding offset. Assume for now that each LTB is of
688 * different size but could possibly be optimized based on the allocation
689 * strategy in alloc_ltb_set().
690 */
map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool * rxpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)691 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
692 unsigned int bufidx,
693 struct ibmvnic_long_term_buff **ltbp,
694 unsigned int *offset)
695 {
696 struct ibmvnic_long_term_buff *ltb;
697 int nbufs; /* # of buffers in one ltb */
698 int i;
699
700 WARN_ON(bufidx >= rxpool->size);
701
702 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
703 ltb = &rxpool->ltb_set.ltbs[i];
704 nbufs = ltb->size / rxpool->buff_size;
705 if (bufidx < nbufs)
706 break;
707 bufidx -= nbufs;
708 }
709
710 *ltbp = ltb;
711 *offset = bufidx * rxpool->buff_size;
712 }
713
714 /**
715 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
716 * @txpool: The transmit buffer pool containing buffer
717 * @bufidx: Index of buffer in txpool
718 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
719 * @offset: (Output) offset of buffer in the LTB from @ltbp
720 *
721 * Map the given buffer identified by [txpool, bufidx] to an LTB in the
722 * pool and its corresponding offset.
723 */
map_txpool_buf_to_ltb(struct ibmvnic_tx_pool * txpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)724 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
725 unsigned int bufidx,
726 struct ibmvnic_long_term_buff **ltbp,
727 unsigned int *offset)
728 {
729 struct ibmvnic_long_term_buff *ltb;
730 int nbufs; /* # of buffers in one ltb */
731 int i;
732
733 WARN_ON_ONCE(bufidx >= txpool->num_buffers);
734
735 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
736 ltb = &txpool->ltb_set.ltbs[i];
737 nbufs = ltb->size / txpool->buf_size;
738 if (bufidx < nbufs)
739 break;
740 bufidx -= nbufs;
741 }
742
743 *ltbp = ltb;
744 *offset = bufidx * txpool->buf_size;
745 }
746
deactivate_rx_pools(struct ibmvnic_adapter * adapter)747 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
748 {
749 int i;
750
751 for (i = 0; i < adapter->num_active_rx_pools; i++)
752 adapter->rx_pool[i].active = 0;
753 }
754
replenish_rx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_pool * pool)755 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
756 struct ibmvnic_rx_pool *pool)
757 {
758 int count = pool->size - atomic_read(&pool->available);
759 u64 handle = adapter->rx_scrq[pool->index]->handle;
760 struct device *dev = &adapter->vdev->dev;
761 struct ibmvnic_ind_xmit_queue *ind_bufp;
762 struct ibmvnic_sub_crq_queue *rx_scrq;
763 struct ibmvnic_long_term_buff *ltb;
764 union sub_crq *sub_crq;
765 int buffers_added = 0;
766 unsigned long lpar_rc;
767 struct sk_buff *skb;
768 unsigned int offset;
769 dma_addr_t dma_addr;
770 unsigned char *dst;
771 int shift = 0;
772 int bufidx;
773 int i;
774
775 if (!pool->active)
776 return;
777
778 rx_scrq = adapter->rx_scrq[pool->index];
779 ind_bufp = &rx_scrq->ind_buf;
780
781 /* netdev_skb_alloc() could have failed after we saved a few skbs
782 * in the indir_buf and we would not have sent them to VIOS yet.
783 * To account for them, start the loop at ind_bufp->index rather
784 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
785 * be 0.
786 */
787 for (i = ind_bufp->index; i < count; ++i) {
788 bufidx = pool->free_map[pool->next_free];
789
790 /* We maybe reusing the skb from earlier resets. Allocate
791 * only if necessary. But since the LTB may have changed
792 * during reset (see init_rx_pools()), update LTB below
793 * even if reusing skb.
794 */
795 skb = pool->rx_buff[bufidx].skb;
796 if (!skb) {
797 skb = netdev_alloc_skb(adapter->netdev,
798 pool->buff_size);
799 if (!skb) {
800 dev_err(dev, "Couldn't replenish rx buff\n");
801 adapter->replenish_no_mem++;
802 break;
803 }
804 }
805
806 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
807 pool->next_free = (pool->next_free + 1) % pool->size;
808
809 /* Copy the skb to the long term mapped DMA buffer */
810 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset);
811 dst = ltb->buff + offset;
812 memset(dst, 0, pool->buff_size);
813 dma_addr = ltb->addr + offset;
814
815 /* add the skb to an rx_buff in the pool */
816 pool->rx_buff[bufidx].data = dst;
817 pool->rx_buff[bufidx].dma = dma_addr;
818 pool->rx_buff[bufidx].skb = skb;
819 pool->rx_buff[bufidx].pool_index = pool->index;
820 pool->rx_buff[bufidx].size = pool->buff_size;
821
822 /* queue the rx_buff for the next send_subcrq_indirect */
823 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
824 memset(sub_crq, 0, sizeof(*sub_crq));
825 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
826 sub_crq->rx_add.correlator =
827 cpu_to_be64((u64)&pool->rx_buff[bufidx]);
828 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
829 sub_crq->rx_add.map_id = ltb->map_id;
830
831 /* The length field of the sCRQ is defined to be 24 bits so the
832 * buffer size needs to be left shifted by a byte before it is
833 * converted to big endian to prevent the last byte from being
834 * truncated.
835 */
836 #ifdef __LITTLE_ENDIAN__
837 shift = 8;
838 #endif
839 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
840
841 /* if send_subcrq_indirect queue is full, flush to VIOS */
842 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
843 i == count - 1) {
844 lpar_rc =
845 send_subcrq_indirect(adapter, handle,
846 (u64)ind_bufp->indir_dma,
847 (u64)ind_bufp->index);
848 if (lpar_rc != H_SUCCESS)
849 goto failure;
850 buffers_added += ind_bufp->index;
851 adapter->replenish_add_buff_success += ind_bufp->index;
852 ind_bufp->index = 0;
853 }
854 }
855 atomic_add(buffers_added, &pool->available);
856 return;
857
858 failure:
859 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
860 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
861 for (i = ind_bufp->index - 1; i >= 0; --i) {
862 struct ibmvnic_rx_buff *rx_buff;
863
864 pool->next_free = pool->next_free == 0 ?
865 pool->size - 1 : pool->next_free - 1;
866 sub_crq = &ind_bufp->indir_arr[i];
867 rx_buff = (struct ibmvnic_rx_buff *)
868 be64_to_cpu(sub_crq->rx_add.correlator);
869 bufidx = (int)(rx_buff - pool->rx_buff);
870 pool->free_map[pool->next_free] = bufidx;
871 dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
872 pool->rx_buff[bufidx].skb = NULL;
873 }
874 adapter->replenish_add_buff_failure += ind_bufp->index;
875 atomic_add(buffers_added, &pool->available);
876 ind_bufp->index = 0;
877 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
878 /* Disable buffer pool replenishment and report carrier off if
879 * queue is closed or pending failover.
880 * Firmware guarantees that a signal will be sent to the
881 * driver, triggering a reset.
882 */
883 deactivate_rx_pools(adapter);
884 netif_carrier_off(adapter->netdev);
885 }
886 }
887
replenish_pools(struct ibmvnic_adapter * adapter)888 static void replenish_pools(struct ibmvnic_adapter *adapter)
889 {
890 int i;
891
892 adapter->replenish_task_cycles++;
893 for (i = 0; i < adapter->num_active_rx_pools; i++) {
894 if (adapter->rx_pool[i].active)
895 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
896 }
897
898 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
899 }
900
release_stats_buffers(struct ibmvnic_adapter * adapter)901 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
902 {
903 kfree(adapter->tx_stats_buffers);
904 kfree(adapter->rx_stats_buffers);
905 adapter->tx_stats_buffers = NULL;
906 adapter->rx_stats_buffers = NULL;
907 }
908
init_stats_buffers(struct ibmvnic_adapter * adapter)909 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
910 {
911 adapter->tx_stats_buffers =
912 kcalloc(IBMVNIC_MAX_QUEUES,
913 sizeof(struct ibmvnic_tx_queue_stats),
914 GFP_KERNEL);
915 if (!adapter->tx_stats_buffers)
916 return -ENOMEM;
917
918 adapter->rx_stats_buffers =
919 kcalloc(IBMVNIC_MAX_QUEUES,
920 sizeof(struct ibmvnic_rx_queue_stats),
921 GFP_KERNEL);
922 if (!adapter->rx_stats_buffers)
923 return -ENOMEM;
924
925 return 0;
926 }
927
release_stats_token(struct ibmvnic_adapter * adapter)928 static void release_stats_token(struct ibmvnic_adapter *adapter)
929 {
930 struct device *dev = &adapter->vdev->dev;
931
932 if (!adapter->stats_token)
933 return;
934
935 dma_unmap_single(dev, adapter->stats_token,
936 sizeof(struct ibmvnic_statistics),
937 DMA_FROM_DEVICE);
938 adapter->stats_token = 0;
939 }
940
init_stats_token(struct ibmvnic_adapter * adapter)941 static int init_stats_token(struct ibmvnic_adapter *adapter)
942 {
943 struct device *dev = &adapter->vdev->dev;
944 dma_addr_t stok;
945 int rc;
946
947 stok = dma_map_single(dev, &adapter->stats,
948 sizeof(struct ibmvnic_statistics),
949 DMA_FROM_DEVICE);
950 rc = dma_mapping_error(dev, stok);
951 if (rc) {
952 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
953 return rc;
954 }
955
956 adapter->stats_token = stok;
957 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
958 return 0;
959 }
960
961 /**
962 * release_rx_pools() - Release any rx pools attached to @adapter.
963 * @adapter: ibmvnic adapter
964 *
965 * Safe to call this multiple times - even if no pools are attached.
966 */
release_rx_pools(struct ibmvnic_adapter * adapter)967 static void release_rx_pools(struct ibmvnic_adapter *adapter)
968 {
969 struct ibmvnic_rx_pool *rx_pool;
970 int i, j;
971
972 if (!adapter->rx_pool)
973 return;
974
975 for (i = 0; i < adapter->num_active_rx_pools; i++) {
976 rx_pool = &adapter->rx_pool[i];
977
978 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
979
980 kfree(rx_pool->free_map);
981
982 free_ltb_set(adapter, &rx_pool->ltb_set);
983
984 if (!rx_pool->rx_buff)
985 continue;
986
987 for (j = 0; j < rx_pool->size; j++) {
988 if (rx_pool->rx_buff[j].skb) {
989 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
990 rx_pool->rx_buff[j].skb = NULL;
991 }
992 }
993
994 kfree(rx_pool->rx_buff);
995 }
996
997 kfree(adapter->rx_pool);
998 adapter->rx_pool = NULL;
999 adapter->num_active_rx_pools = 0;
1000 adapter->prev_rx_pool_size = 0;
1001 }
1002
1003 /**
1004 * reuse_rx_pools() - Check if the existing rx pools can be reused.
1005 * @adapter: ibmvnic adapter
1006 *
1007 * Check if the existing rx pools in the adapter can be reused. The
1008 * pools can be reused if the pool parameters (number of pools,
1009 * number of buffers in the pool and size of each buffer) have not
1010 * changed.
1011 *
1012 * NOTE: This assumes that all pools have the same number of buffers
1013 * which is the case currently. If that changes, we must fix this.
1014 *
1015 * Return: true if the rx pools can be reused, false otherwise.
1016 */
reuse_rx_pools(struct ibmvnic_adapter * adapter)1017 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
1018 {
1019 u64 old_num_pools, new_num_pools;
1020 u64 old_pool_size, new_pool_size;
1021 u64 old_buff_size, new_buff_size;
1022
1023 if (!adapter->rx_pool)
1024 return false;
1025
1026 old_num_pools = adapter->num_active_rx_pools;
1027 new_num_pools = adapter->req_rx_queues;
1028
1029 old_pool_size = adapter->prev_rx_pool_size;
1030 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
1031
1032 old_buff_size = adapter->prev_rx_buf_sz;
1033 new_buff_size = adapter->cur_rx_buf_sz;
1034
1035 if (old_buff_size != new_buff_size ||
1036 old_num_pools != new_num_pools ||
1037 old_pool_size != new_pool_size)
1038 return false;
1039
1040 return true;
1041 }
1042
1043 /**
1044 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
1045 * @netdev: net device associated with the vnic interface
1046 *
1047 * Initialize the set of receiver pools in the ibmvnic adapter associated
1048 * with the net_device @netdev. If possible, reuse the existing rx pools.
1049 * Otherwise free any existing pools and allocate a new set of pools
1050 * before initializing them.
1051 *
1052 * Return: 0 on success and negative value on error.
1053 */
init_rx_pools(struct net_device * netdev)1054 static int init_rx_pools(struct net_device *netdev)
1055 {
1056 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1057 struct device *dev = &adapter->vdev->dev;
1058 struct ibmvnic_rx_pool *rx_pool;
1059 u64 num_pools;
1060 u64 pool_size; /* # of buffers in one pool */
1061 u64 buff_size;
1062 int i, j, rc;
1063
1064 pool_size = adapter->req_rx_add_entries_per_subcrq;
1065 num_pools = adapter->req_rx_queues;
1066 buff_size = adapter->cur_rx_buf_sz;
1067
1068 if (reuse_rx_pools(adapter)) {
1069 dev_dbg(dev, "Reusing rx pools\n");
1070 goto update_ltb;
1071 }
1072
1073 /* Allocate/populate the pools. */
1074 release_rx_pools(adapter);
1075
1076 adapter->rx_pool = kcalloc(num_pools,
1077 sizeof(struct ibmvnic_rx_pool),
1078 GFP_KERNEL);
1079 if (!adapter->rx_pool) {
1080 dev_err(dev, "Failed to allocate rx pools\n");
1081 return -ENOMEM;
1082 }
1083
1084 /* Set num_active_rx_pools early. If we fail below after partial
1085 * allocation, release_rx_pools() will know how many to look for.
1086 */
1087 adapter->num_active_rx_pools = num_pools;
1088
1089 for (i = 0; i < num_pools; i++) {
1090 rx_pool = &adapter->rx_pool[i];
1091
1092 netdev_dbg(adapter->netdev,
1093 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
1094 i, pool_size, buff_size);
1095
1096 rx_pool->size = pool_size;
1097 rx_pool->index = i;
1098 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1099
1100 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
1101 GFP_KERNEL);
1102 if (!rx_pool->free_map) {
1103 dev_err(dev, "Couldn't alloc free_map %d\n", i);
1104 rc = -ENOMEM;
1105 goto out_release;
1106 }
1107
1108 rx_pool->rx_buff = kcalloc(rx_pool->size,
1109 sizeof(struct ibmvnic_rx_buff),
1110 GFP_KERNEL);
1111 if (!rx_pool->rx_buff) {
1112 dev_err(dev, "Couldn't alloc rx buffers\n");
1113 rc = -ENOMEM;
1114 goto out_release;
1115 }
1116 }
1117
1118 adapter->prev_rx_pool_size = pool_size;
1119 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
1120
1121 update_ltb:
1122 for (i = 0; i < num_pools; i++) {
1123 rx_pool = &adapter->rx_pool[i];
1124 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
1125 i, rx_pool->size, rx_pool->buff_size);
1126
1127 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
1128 rx_pool->size, rx_pool->buff_size);
1129 if (rc)
1130 goto out;
1131
1132 for (j = 0; j < rx_pool->size; ++j) {
1133 struct ibmvnic_rx_buff *rx_buff;
1134
1135 rx_pool->free_map[j] = j;
1136
1137 /* NOTE: Don't clear rx_buff->skb here - will leak
1138 * memory! replenish_rx_pool() will reuse skbs or
1139 * allocate as necessary.
1140 */
1141 rx_buff = &rx_pool->rx_buff[j];
1142 rx_buff->dma = 0;
1143 rx_buff->data = 0;
1144 rx_buff->size = 0;
1145 rx_buff->pool_index = 0;
1146 }
1147
1148 /* Mark pool "empty" so replenish_rx_pools() will
1149 * update the LTB info for each buffer
1150 */
1151 atomic_set(&rx_pool->available, 0);
1152 rx_pool->next_alloc = 0;
1153 rx_pool->next_free = 0;
1154 /* replenish_rx_pool() may have called deactivate_rx_pools()
1155 * on failover. Ensure pool is active now.
1156 */
1157 rx_pool->active = 1;
1158 }
1159 return 0;
1160 out_release:
1161 release_rx_pools(adapter);
1162 out:
1163 /* We failed to allocate one or more LTBs or map them on the VIOS.
1164 * Hold onto the pools and any LTBs that we did allocate/map.
1165 */
1166 return rc;
1167 }
1168
release_vpd_data(struct ibmvnic_adapter * adapter)1169 static void release_vpd_data(struct ibmvnic_adapter *adapter)
1170 {
1171 if (!adapter->vpd)
1172 return;
1173
1174 kfree(adapter->vpd->buff);
1175 kfree(adapter->vpd);
1176
1177 adapter->vpd = NULL;
1178 }
1179
release_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)1180 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
1181 struct ibmvnic_tx_pool *tx_pool)
1182 {
1183 kfree(tx_pool->tx_buff);
1184 kfree(tx_pool->free_map);
1185 free_ltb_set(adapter, &tx_pool->ltb_set);
1186 }
1187
1188 /**
1189 * release_tx_pools() - Release any tx pools attached to @adapter.
1190 * @adapter: ibmvnic adapter
1191 *
1192 * Safe to call this multiple times - even if no pools are attached.
1193 */
release_tx_pools(struct ibmvnic_adapter * adapter)1194 static void release_tx_pools(struct ibmvnic_adapter *adapter)
1195 {
1196 int i;
1197
1198 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
1199 * both NULL or both non-NULL. So we only need to check one.
1200 */
1201 if (!adapter->tx_pool)
1202 return;
1203
1204 for (i = 0; i < adapter->num_active_tx_pools; i++) {
1205 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1206 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1207 }
1208
1209 kfree(adapter->tx_pool);
1210 adapter->tx_pool = NULL;
1211 kfree(adapter->tso_pool);
1212 adapter->tso_pool = NULL;
1213 adapter->num_active_tx_pools = 0;
1214 adapter->prev_tx_pool_size = 0;
1215 }
1216
init_one_tx_pool(struct net_device * netdev,struct ibmvnic_tx_pool * tx_pool,int pool_size,int buf_size)1217 static int init_one_tx_pool(struct net_device *netdev,
1218 struct ibmvnic_tx_pool *tx_pool,
1219 int pool_size, int buf_size)
1220 {
1221 int i;
1222
1223 tx_pool->tx_buff = kcalloc(pool_size,
1224 sizeof(struct ibmvnic_tx_buff),
1225 GFP_KERNEL);
1226 if (!tx_pool->tx_buff)
1227 return -ENOMEM;
1228
1229 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
1230 if (!tx_pool->free_map) {
1231 kfree(tx_pool->tx_buff);
1232 tx_pool->tx_buff = NULL;
1233 return -ENOMEM;
1234 }
1235
1236 for (i = 0; i < pool_size; i++)
1237 tx_pool->free_map[i] = i;
1238
1239 tx_pool->consumer_index = 0;
1240 tx_pool->producer_index = 0;
1241 tx_pool->num_buffers = pool_size;
1242 tx_pool->buf_size = buf_size;
1243
1244 return 0;
1245 }
1246
1247 /**
1248 * reuse_tx_pools() - Check if the existing tx pools can be reused.
1249 * @adapter: ibmvnic adapter
1250 *
1251 * Check if the existing tx pools in the adapter can be reused. The
1252 * pools can be reused if the pool parameters (number of pools,
1253 * number of buffers in the pool and mtu) have not changed.
1254 *
1255 * NOTE: This assumes that all pools have the same number of buffers
1256 * which is the case currently. If that changes, we must fix this.
1257 *
1258 * Return: true if the tx pools can be reused, false otherwise.
1259 */
reuse_tx_pools(struct ibmvnic_adapter * adapter)1260 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1261 {
1262 u64 old_num_pools, new_num_pools;
1263 u64 old_pool_size, new_pool_size;
1264 u64 old_mtu, new_mtu;
1265
1266 if (!adapter->tx_pool)
1267 return false;
1268
1269 old_num_pools = adapter->num_active_tx_pools;
1270 new_num_pools = adapter->num_active_tx_scrqs;
1271 old_pool_size = adapter->prev_tx_pool_size;
1272 new_pool_size = adapter->req_tx_entries_per_subcrq;
1273 old_mtu = adapter->prev_mtu;
1274 new_mtu = adapter->req_mtu;
1275
1276 if (old_mtu != new_mtu ||
1277 old_num_pools != new_num_pools ||
1278 old_pool_size != new_pool_size)
1279 return false;
1280
1281 return true;
1282 }
1283
1284 /**
1285 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
1286 * @netdev: net device associated with the vnic interface
1287 *
1288 * Initialize the set of transmit pools in the ibmvnic adapter associated
1289 * with the net_device @netdev. If possible, reuse the existing tx pools.
1290 * Otherwise free any existing pools and allocate a new set of pools
1291 * before initializing them.
1292 *
1293 * Return: 0 on success and negative value on error.
1294 */
init_tx_pools(struct net_device * netdev)1295 static int init_tx_pools(struct net_device *netdev)
1296 {
1297 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1298 struct device *dev = &adapter->vdev->dev;
1299 int num_pools;
1300 u64 pool_size; /* # of buffers in pool */
1301 u64 buff_size;
1302 int i, j, rc;
1303
1304 num_pools = adapter->req_tx_queues;
1305
1306 /* We must notify the VIOS about the LTB on all resets - but we only
1307 * need to alloc/populate pools if either the number of buffers or
1308 * size of each buffer in the pool has changed.
1309 */
1310 if (reuse_tx_pools(adapter)) {
1311 netdev_dbg(netdev, "Reusing tx pools\n");
1312 goto update_ltb;
1313 }
1314
1315 /* Allocate/populate the pools. */
1316 release_tx_pools(adapter);
1317
1318 pool_size = adapter->req_tx_entries_per_subcrq;
1319 num_pools = adapter->num_active_tx_scrqs;
1320
1321 adapter->tx_pool = kcalloc(num_pools,
1322 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1323 if (!adapter->tx_pool)
1324 return -ENOMEM;
1325
1326 adapter->tso_pool = kcalloc(num_pools,
1327 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1328 /* To simplify release_tx_pools() ensure that ->tx_pool and
1329 * ->tso_pool are either both NULL or both non-NULL.
1330 */
1331 if (!adapter->tso_pool) {
1332 kfree(adapter->tx_pool);
1333 adapter->tx_pool = NULL;
1334 return -ENOMEM;
1335 }
1336
1337 /* Set num_active_tx_pools early. If we fail below after partial
1338 * allocation, release_tx_pools() will know how many to look for.
1339 */
1340 adapter->num_active_tx_pools = num_pools;
1341
1342 buff_size = adapter->req_mtu + VLAN_HLEN;
1343 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1344
1345 for (i = 0; i < num_pools; i++) {
1346 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
1347 i, adapter->req_tx_entries_per_subcrq, buff_size);
1348
1349 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
1350 pool_size, buff_size);
1351 if (rc)
1352 goto out_release;
1353
1354 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
1355 IBMVNIC_TSO_BUFS,
1356 IBMVNIC_TSO_BUF_SZ);
1357 if (rc)
1358 goto out_release;
1359 }
1360
1361 adapter->prev_tx_pool_size = pool_size;
1362 adapter->prev_mtu = adapter->req_mtu;
1363
1364 update_ltb:
1365 /* NOTE: All tx_pools have the same number of buffers (which is
1366 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
1367 * buffers (see calls init_one_tx_pool() for these).
1368 * For consistency, we use tx_pool->num_buffers and
1369 * tso_pool->num_buffers below.
1370 */
1371 rc = -1;
1372 for (i = 0; i < num_pools; i++) {
1373 struct ibmvnic_tx_pool *tso_pool;
1374 struct ibmvnic_tx_pool *tx_pool;
1375
1376 tx_pool = &adapter->tx_pool[i];
1377
1378 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
1379 i, tx_pool->num_buffers, tx_pool->buf_size);
1380
1381 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1382 tx_pool->num_buffers, tx_pool->buf_size);
1383 if (rc)
1384 goto out;
1385
1386 tx_pool->consumer_index = 0;
1387 tx_pool->producer_index = 0;
1388
1389 for (j = 0; j < tx_pool->num_buffers; j++)
1390 tx_pool->free_map[j] = j;
1391
1392 tso_pool = &adapter->tso_pool[i];
1393
1394 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
1395 i, tso_pool->num_buffers, tso_pool->buf_size);
1396
1397 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1398 tso_pool->num_buffers, tso_pool->buf_size);
1399 if (rc)
1400 goto out;
1401
1402 tso_pool->consumer_index = 0;
1403 tso_pool->producer_index = 0;
1404
1405 for (j = 0; j < tso_pool->num_buffers; j++)
1406 tso_pool->free_map[j] = j;
1407 }
1408
1409 return 0;
1410 out_release:
1411 release_tx_pools(adapter);
1412 out:
1413 /* We failed to allocate one or more LTBs or map them on the VIOS.
1414 * Hold onto the pools and any LTBs that we did allocate/map.
1415 */
1416 return rc;
1417 }
1418
ibmvnic_napi_enable(struct ibmvnic_adapter * adapter)1419 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1420 {
1421 int i;
1422
1423 if (adapter->napi_enabled)
1424 return;
1425
1426 for (i = 0; i < adapter->req_rx_queues; i++)
1427 napi_enable(&adapter->napi[i]);
1428
1429 adapter->napi_enabled = true;
1430 }
1431
ibmvnic_napi_disable(struct ibmvnic_adapter * adapter)1432 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1433 {
1434 int i;
1435
1436 if (!adapter->napi_enabled)
1437 return;
1438
1439 for (i = 0; i < adapter->req_rx_queues; i++) {
1440 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1441 napi_disable(&adapter->napi[i]);
1442 }
1443
1444 adapter->napi_enabled = false;
1445 }
1446
init_napi(struct ibmvnic_adapter * adapter)1447 static int init_napi(struct ibmvnic_adapter *adapter)
1448 {
1449 int i;
1450
1451 adapter->napi = kcalloc(adapter->req_rx_queues,
1452 sizeof(struct napi_struct), GFP_KERNEL);
1453 if (!adapter->napi)
1454 return -ENOMEM;
1455
1456 for (i = 0; i < adapter->req_rx_queues; i++) {
1457 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1458 netif_napi_add(adapter->netdev, &adapter->napi[i],
1459 ibmvnic_poll);
1460 }
1461
1462 adapter->num_active_rx_napi = adapter->req_rx_queues;
1463 return 0;
1464 }
1465
release_napi(struct ibmvnic_adapter * adapter)1466 static void release_napi(struct ibmvnic_adapter *adapter)
1467 {
1468 int i;
1469
1470 if (!adapter->napi)
1471 return;
1472
1473 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1474 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1475 netif_napi_del(&adapter->napi[i]);
1476 }
1477
1478 kfree(adapter->napi);
1479 adapter->napi = NULL;
1480 adapter->num_active_rx_napi = 0;
1481 adapter->napi_enabled = false;
1482 }
1483
adapter_state_to_string(enum vnic_state state)1484 static const char *adapter_state_to_string(enum vnic_state state)
1485 {
1486 switch (state) {
1487 case VNIC_PROBING:
1488 return "PROBING";
1489 case VNIC_PROBED:
1490 return "PROBED";
1491 case VNIC_OPENING:
1492 return "OPENING";
1493 case VNIC_OPEN:
1494 return "OPEN";
1495 case VNIC_CLOSING:
1496 return "CLOSING";
1497 case VNIC_CLOSED:
1498 return "CLOSED";
1499 case VNIC_REMOVING:
1500 return "REMOVING";
1501 case VNIC_REMOVED:
1502 return "REMOVED";
1503 case VNIC_DOWN:
1504 return "DOWN";
1505 }
1506 return "UNKNOWN";
1507 }
1508
ibmvnic_login(struct net_device * netdev)1509 static int ibmvnic_login(struct net_device *netdev)
1510 {
1511 unsigned long flags, timeout = msecs_to_jiffies(20000);
1512 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1513 int retry_count = 0;
1514 int retries = 10;
1515 bool retry;
1516 int rc;
1517
1518 do {
1519 retry = false;
1520 if (retry_count > retries) {
1521 netdev_warn(netdev, "Login attempts exceeded\n");
1522 return -EACCES;
1523 }
1524
1525 adapter->init_done_rc = 0;
1526 reinit_completion(&adapter->init_done);
1527 rc = send_login(adapter);
1528 if (rc)
1529 return rc;
1530
1531 if (!wait_for_completion_timeout(&adapter->init_done,
1532 timeout)) {
1533 netdev_warn(netdev, "Login timed out\n");
1534 adapter->login_pending = false;
1535 goto partial_reset;
1536 }
1537
1538 if (adapter->init_done_rc == ABORTED) {
1539 netdev_warn(netdev, "Login aborted, retrying...\n");
1540 retry = true;
1541 adapter->init_done_rc = 0;
1542 retry_count++;
1543 /* FW or device may be busy, so
1544 * wait a bit before retrying login
1545 */
1546 msleep(500);
1547 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1548 retry_count++;
1549 release_sub_crqs(adapter, 1);
1550
1551 retry = true;
1552 netdev_dbg(netdev,
1553 "Received partial success, retrying...\n");
1554 adapter->init_done_rc = 0;
1555 reinit_completion(&adapter->init_done);
1556 send_query_cap(adapter);
1557 if (!wait_for_completion_timeout(&adapter->init_done,
1558 timeout)) {
1559 netdev_warn(netdev,
1560 "Capabilities query timed out\n");
1561 return -ETIMEDOUT;
1562 }
1563
1564 rc = init_sub_crqs(adapter);
1565 if (rc) {
1566 netdev_warn(netdev,
1567 "SCRQ initialization failed\n");
1568 return rc;
1569 }
1570
1571 rc = init_sub_crq_irqs(adapter);
1572 if (rc) {
1573 netdev_warn(netdev,
1574 "SCRQ irq initialization failed\n");
1575 return rc;
1576 }
1577 /* Default/timeout error handling, reset and start fresh */
1578 } else if (adapter->init_done_rc) {
1579 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1580 adapter->init_done_rc);
1581
1582 partial_reset:
1583 /* adapter login failed, so free any CRQs or sub-CRQs
1584 * and register again before attempting to login again.
1585 * If we don't do this then the VIOS may think that
1586 * we are already logged in and reject any subsequent
1587 * attempts
1588 */
1589 netdev_warn(netdev,
1590 "Freeing and re-registering CRQs before attempting to login again\n");
1591 retry = true;
1592 adapter->init_done_rc = 0;
1593 release_sub_crqs(adapter, true);
1594 /* Much of this is similar logic as ibmvnic_probe(),
1595 * we are essentially re-initializing communication
1596 * with the server. We really should not run any
1597 * resets/failovers here because this is already a form
1598 * of reset and we do not want parallel resets occurring
1599 */
1600 do {
1601 reinit_init_done(adapter);
1602 /* Clear any failovers we got in the previous
1603 * pass since we are re-initializing the CRQ
1604 */
1605 adapter->failover_pending = false;
1606 release_crq_queue(adapter);
1607 /* If we don't sleep here then we risk an
1608 * unnecessary failover event from the VIOS.
1609 * This is a known VIOS issue caused by a vnic
1610 * device freeing and registering a CRQ too
1611 * quickly.
1612 */
1613 msleep(1500);
1614 /* Avoid any resets, since we are currently
1615 * resetting.
1616 */
1617 spin_lock_irqsave(&adapter->rwi_lock, flags);
1618 flush_reset_queue(adapter);
1619 spin_unlock_irqrestore(&adapter->rwi_lock,
1620 flags);
1621
1622 rc = init_crq_queue(adapter);
1623 if (rc) {
1624 netdev_err(netdev, "login recovery: init CRQ failed %d\n",
1625 rc);
1626 return -EIO;
1627 }
1628
1629 rc = ibmvnic_reset_init(adapter, false);
1630 if (rc)
1631 netdev_err(netdev, "login recovery: Reset init failed %d\n",
1632 rc);
1633 /* IBMVNIC_CRQ_INIT will return EAGAIN if it
1634 * fails, since ibmvnic_reset_init will free
1635 * irq's in failure, we won't be able to receive
1636 * new CRQs so we need to keep trying. probe()
1637 * handles this similarly.
1638 */
1639 } while (rc == -EAGAIN && retry_count++ < retries);
1640 }
1641 } while (retry);
1642
1643 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1644
1645 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1646 return 0;
1647 }
1648
release_login_buffer(struct ibmvnic_adapter * adapter)1649 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1650 {
1651 if (!adapter->login_buf)
1652 return;
1653
1654 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
1655 adapter->login_buf_sz, DMA_TO_DEVICE);
1656 kfree(adapter->login_buf);
1657 adapter->login_buf = NULL;
1658 }
1659
release_login_rsp_buffer(struct ibmvnic_adapter * adapter)1660 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1661 {
1662 if (!adapter->login_rsp_buf)
1663 return;
1664
1665 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
1666 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
1667 kfree(adapter->login_rsp_buf);
1668 adapter->login_rsp_buf = NULL;
1669 }
1670
release_resources(struct ibmvnic_adapter * adapter)1671 static void release_resources(struct ibmvnic_adapter *adapter)
1672 {
1673 release_vpd_data(adapter);
1674
1675 release_napi(adapter);
1676 release_login_buffer(adapter);
1677 release_login_rsp_buffer(adapter);
1678 }
1679
set_link_state(struct ibmvnic_adapter * adapter,u8 link_state)1680 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1681 {
1682 struct net_device *netdev = adapter->netdev;
1683 unsigned long timeout = msecs_to_jiffies(20000);
1684 union ibmvnic_crq crq;
1685 bool resend;
1686 int rc;
1687
1688 netdev_dbg(netdev, "setting link state %d\n", link_state);
1689
1690 memset(&crq, 0, sizeof(crq));
1691 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1692 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1693 crq.logical_link_state.link_state = link_state;
1694
1695 do {
1696 resend = false;
1697
1698 reinit_completion(&adapter->init_done);
1699 rc = ibmvnic_send_crq(adapter, &crq);
1700 if (rc) {
1701 netdev_err(netdev, "Failed to set link state\n");
1702 return rc;
1703 }
1704
1705 if (!wait_for_completion_timeout(&adapter->init_done,
1706 timeout)) {
1707 netdev_err(netdev, "timeout setting link state\n");
1708 return -ETIMEDOUT;
1709 }
1710
1711 if (adapter->init_done_rc == PARTIALSUCCESS) {
1712 /* Partuial success, delay and re-send */
1713 mdelay(1000);
1714 resend = true;
1715 } else if (adapter->init_done_rc) {
1716 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1717 adapter->init_done_rc);
1718 return adapter->init_done_rc;
1719 }
1720 } while (resend);
1721
1722 return 0;
1723 }
1724
set_real_num_queues(struct net_device * netdev)1725 static int set_real_num_queues(struct net_device *netdev)
1726 {
1727 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1728 int rc;
1729
1730 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1731 adapter->req_tx_queues, adapter->req_rx_queues);
1732
1733 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1734 if (rc) {
1735 netdev_err(netdev, "failed to set the number of tx queues\n");
1736 return rc;
1737 }
1738
1739 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1740 if (rc)
1741 netdev_err(netdev, "failed to set the number of rx queues\n");
1742
1743 return rc;
1744 }
1745
ibmvnic_get_vpd(struct ibmvnic_adapter * adapter)1746 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1747 {
1748 struct device *dev = &adapter->vdev->dev;
1749 union ibmvnic_crq crq;
1750 int len = 0;
1751 int rc;
1752
1753 if (adapter->vpd->buff)
1754 len = adapter->vpd->len;
1755
1756 mutex_lock(&adapter->fw_lock);
1757 adapter->fw_done_rc = 0;
1758 reinit_completion(&adapter->fw_done);
1759
1760 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1761 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1762 rc = ibmvnic_send_crq(adapter, &crq);
1763 if (rc) {
1764 mutex_unlock(&adapter->fw_lock);
1765 return rc;
1766 }
1767
1768 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1769 if (rc) {
1770 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1771 mutex_unlock(&adapter->fw_lock);
1772 return rc;
1773 }
1774 mutex_unlock(&adapter->fw_lock);
1775
1776 if (!adapter->vpd->len)
1777 return -ENODATA;
1778
1779 if (!adapter->vpd->buff)
1780 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1781 else if (adapter->vpd->len != len)
1782 adapter->vpd->buff =
1783 krealloc(adapter->vpd->buff,
1784 adapter->vpd->len, GFP_KERNEL);
1785
1786 if (!adapter->vpd->buff) {
1787 dev_err(dev, "Could allocate VPD buffer\n");
1788 return -ENOMEM;
1789 }
1790
1791 adapter->vpd->dma_addr =
1792 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1793 DMA_FROM_DEVICE);
1794 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1795 dev_err(dev, "Could not map VPD buffer\n");
1796 kfree(adapter->vpd->buff);
1797 adapter->vpd->buff = NULL;
1798 return -ENOMEM;
1799 }
1800
1801 mutex_lock(&adapter->fw_lock);
1802 adapter->fw_done_rc = 0;
1803 reinit_completion(&adapter->fw_done);
1804
1805 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1806 crq.get_vpd.cmd = GET_VPD;
1807 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1808 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1809 rc = ibmvnic_send_crq(adapter, &crq);
1810 if (rc) {
1811 kfree(adapter->vpd->buff);
1812 adapter->vpd->buff = NULL;
1813 mutex_unlock(&adapter->fw_lock);
1814 return rc;
1815 }
1816
1817 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1818 if (rc) {
1819 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1820 kfree(adapter->vpd->buff);
1821 adapter->vpd->buff = NULL;
1822 mutex_unlock(&adapter->fw_lock);
1823 return rc;
1824 }
1825
1826 mutex_unlock(&adapter->fw_lock);
1827 return 0;
1828 }
1829
init_resources(struct ibmvnic_adapter * adapter)1830 static int init_resources(struct ibmvnic_adapter *adapter)
1831 {
1832 struct net_device *netdev = adapter->netdev;
1833 int rc;
1834
1835 rc = set_real_num_queues(netdev);
1836 if (rc)
1837 return rc;
1838
1839 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1840 if (!adapter->vpd)
1841 return -ENOMEM;
1842
1843 /* Vital Product Data (VPD) */
1844 rc = ibmvnic_get_vpd(adapter);
1845 if (rc) {
1846 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1847 return rc;
1848 }
1849
1850 rc = init_napi(adapter);
1851 if (rc)
1852 return rc;
1853
1854 send_query_map(adapter);
1855
1856 rc = init_rx_pools(netdev);
1857 if (rc)
1858 return rc;
1859
1860 rc = init_tx_pools(netdev);
1861 return rc;
1862 }
1863
__ibmvnic_open(struct net_device * netdev)1864 static int __ibmvnic_open(struct net_device *netdev)
1865 {
1866 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1867 enum vnic_state prev_state = adapter->state;
1868 int i, rc;
1869
1870 adapter->state = VNIC_OPENING;
1871 replenish_pools(adapter);
1872 ibmvnic_napi_enable(adapter);
1873
1874 /* We're ready to receive frames, enable the sub-crq interrupts and
1875 * set the logical link state to up
1876 */
1877 for (i = 0; i < adapter->req_rx_queues; i++) {
1878 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1879 if (prev_state == VNIC_CLOSED)
1880 enable_irq(adapter->rx_scrq[i]->irq);
1881 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1882 }
1883
1884 for (i = 0; i < adapter->req_tx_queues; i++) {
1885 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1886 if (prev_state == VNIC_CLOSED)
1887 enable_irq(adapter->tx_scrq[i]->irq);
1888 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1889 /* netdev_tx_reset_queue will reset dql stats. During NON_FATAL
1890 * resets, don't reset the stats because there could be batched
1891 * skb's waiting to be sent. If we reset dql stats, we risk
1892 * num_completed being greater than num_queued. This will cause
1893 * a BUG_ON in dql_completed().
1894 */
1895 if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
1896 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1897 }
1898
1899 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1900 if (rc) {
1901 ibmvnic_napi_disable(adapter);
1902 ibmvnic_disable_irqs(adapter);
1903 return rc;
1904 }
1905
1906 adapter->tx_queues_active = true;
1907
1908 /* Since queues were stopped until now, there shouldn't be any
1909 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
1910 * don't need the synchronize_rcu()? Leaving it for consistency
1911 * with setting ->tx_queues_active = false.
1912 */
1913 synchronize_rcu();
1914
1915 netif_tx_start_all_queues(netdev);
1916
1917 if (prev_state == VNIC_CLOSED) {
1918 for (i = 0; i < adapter->req_rx_queues; i++)
1919 napi_schedule(&adapter->napi[i]);
1920 }
1921
1922 adapter->state = VNIC_OPEN;
1923 return rc;
1924 }
1925
ibmvnic_open(struct net_device * netdev)1926 static int ibmvnic_open(struct net_device *netdev)
1927 {
1928 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1929 int rc;
1930
1931 ASSERT_RTNL();
1932
1933 /* If device failover is pending or we are about to reset, just set
1934 * device state and return. Device operation will be handled by reset
1935 * routine.
1936 *
1937 * It should be safe to overwrite the adapter->state here. Since
1938 * we hold the rtnl, either the reset has not actually started or
1939 * the rtnl got dropped during the set_link_state() in do_reset().
1940 * In the former case, no one else is changing the state (again we
1941 * have the rtnl) and in the latter case, do_reset() will detect and
1942 * honor our setting below.
1943 */
1944 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1945 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1946 adapter_state_to_string(adapter->state),
1947 adapter->failover_pending);
1948 adapter->state = VNIC_OPEN;
1949 rc = 0;
1950 goto out;
1951 }
1952
1953 if (adapter->state != VNIC_CLOSED) {
1954 rc = ibmvnic_login(netdev);
1955 if (rc)
1956 goto out;
1957
1958 rc = init_resources(adapter);
1959 if (rc) {
1960 netdev_err(netdev, "failed to initialize resources\n");
1961 goto out;
1962 }
1963 }
1964
1965 rc = __ibmvnic_open(netdev);
1966
1967 out:
1968 /* If open failed and there is a pending failover or in-progress reset,
1969 * set device state and return. Device operation will be handled by
1970 * reset routine. See also comments above regarding rtnl.
1971 */
1972 if (rc &&
1973 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1974 adapter->state = VNIC_OPEN;
1975 rc = 0;
1976 }
1977
1978 if (rc) {
1979 release_resources(adapter);
1980 release_rx_pools(adapter);
1981 release_tx_pools(adapter);
1982 }
1983
1984 return rc;
1985 }
1986
clean_rx_pools(struct ibmvnic_adapter * adapter)1987 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1988 {
1989 struct ibmvnic_rx_pool *rx_pool;
1990 struct ibmvnic_rx_buff *rx_buff;
1991 u64 rx_entries;
1992 int rx_scrqs;
1993 int i, j;
1994
1995 if (!adapter->rx_pool)
1996 return;
1997
1998 rx_scrqs = adapter->num_active_rx_pools;
1999 rx_entries = adapter->req_rx_add_entries_per_subcrq;
2000
2001 /* Free any remaining skbs in the rx buffer pools */
2002 for (i = 0; i < rx_scrqs; i++) {
2003 rx_pool = &adapter->rx_pool[i];
2004 if (!rx_pool || !rx_pool->rx_buff)
2005 continue;
2006
2007 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
2008 for (j = 0; j < rx_entries; j++) {
2009 rx_buff = &rx_pool->rx_buff[j];
2010 if (rx_buff && rx_buff->skb) {
2011 dev_kfree_skb_any(rx_buff->skb);
2012 rx_buff->skb = NULL;
2013 }
2014 }
2015 }
2016 }
2017
clean_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)2018 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
2019 struct ibmvnic_tx_pool *tx_pool)
2020 {
2021 struct ibmvnic_tx_buff *tx_buff;
2022 u64 tx_entries;
2023 int i;
2024
2025 if (!tx_pool || !tx_pool->tx_buff)
2026 return;
2027
2028 tx_entries = tx_pool->num_buffers;
2029
2030 for (i = 0; i < tx_entries; i++) {
2031 tx_buff = &tx_pool->tx_buff[i];
2032 if (tx_buff && tx_buff->skb) {
2033 dev_kfree_skb_any(tx_buff->skb);
2034 tx_buff->skb = NULL;
2035 }
2036 }
2037 }
2038
clean_tx_pools(struct ibmvnic_adapter * adapter)2039 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
2040 {
2041 int tx_scrqs;
2042 int i;
2043
2044 if (!adapter->tx_pool || !adapter->tso_pool)
2045 return;
2046
2047 tx_scrqs = adapter->num_active_tx_pools;
2048
2049 /* Free any remaining skbs in the tx buffer pools */
2050 for (i = 0; i < tx_scrqs; i++) {
2051 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
2052 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
2053 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
2054 }
2055 }
2056
ibmvnic_disable_irqs(struct ibmvnic_adapter * adapter)2057 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
2058 {
2059 struct net_device *netdev = adapter->netdev;
2060 int i;
2061
2062 if (adapter->tx_scrq) {
2063 for (i = 0; i < adapter->req_tx_queues; i++)
2064 if (adapter->tx_scrq[i]->irq) {
2065 netdev_dbg(netdev,
2066 "Disabling tx_scrq[%d] irq\n", i);
2067 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
2068 disable_irq(adapter->tx_scrq[i]->irq);
2069 }
2070 }
2071
2072 if (adapter->rx_scrq) {
2073 for (i = 0; i < adapter->req_rx_queues; i++) {
2074 if (adapter->rx_scrq[i]->irq) {
2075 netdev_dbg(netdev,
2076 "Disabling rx_scrq[%d] irq\n", i);
2077 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
2078 disable_irq(adapter->rx_scrq[i]->irq);
2079 }
2080 }
2081 }
2082 }
2083
ibmvnic_cleanup(struct net_device * netdev)2084 static void ibmvnic_cleanup(struct net_device *netdev)
2085 {
2086 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2087
2088 /* ensure that transmissions are stopped if called by do_reset */
2089
2090 adapter->tx_queues_active = false;
2091
2092 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
2093 * update so they don't restart a queue after we stop it below.
2094 */
2095 synchronize_rcu();
2096
2097 if (test_bit(0, &adapter->resetting))
2098 netif_tx_disable(netdev);
2099 else
2100 netif_tx_stop_all_queues(netdev);
2101
2102 ibmvnic_napi_disable(adapter);
2103 ibmvnic_disable_irqs(adapter);
2104 }
2105
__ibmvnic_close(struct net_device * netdev)2106 static int __ibmvnic_close(struct net_device *netdev)
2107 {
2108 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2109 int rc = 0;
2110
2111 adapter->state = VNIC_CLOSING;
2112 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2113 adapter->state = VNIC_CLOSED;
2114 return rc;
2115 }
2116
ibmvnic_close(struct net_device * netdev)2117 static int ibmvnic_close(struct net_device *netdev)
2118 {
2119 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2120 int rc;
2121
2122 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
2123 adapter_state_to_string(adapter->state),
2124 adapter->failover_pending,
2125 adapter->force_reset_recovery);
2126
2127 /* If device failover is pending, just set device state and return.
2128 * Device operation will be handled by reset routine.
2129 */
2130 if (adapter->failover_pending) {
2131 adapter->state = VNIC_CLOSED;
2132 return 0;
2133 }
2134
2135 rc = __ibmvnic_close(netdev);
2136 ibmvnic_cleanup(netdev);
2137 clean_rx_pools(adapter);
2138 clean_tx_pools(adapter);
2139
2140 return rc;
2141 }
2142
2143 /**
2144 * get_hdr_lens - fills list of L2/L3/L4 hdr lens
2145 * @hdr_field: bitfield determining needed headers
2146 * @skb: socket buffer
2147 * @hdr_len: array of header lengths to be filled
2148 *
2149 * Reads hdr_field to determine which headers are needed by firmware.
2150 * Builds a buffer containing these headers. Saves individual header
2151 * lengths and total buffer length to be used to build descriptors.
2152 *
2153 * Return: total len of all headers
2154 */
get_hdr_lens(u8 hdr_field,struct sk_buff * skb,int * hdr_len)2155 static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb,
2156 int *hdr_len)
2157 {
2158 int len = 0;
2159
2160
2161 if ((hdr_field >> 6) & 1) {
2162 hdr_len[0] = skb_mac_header_len(skb);
2163 len += hdr_len[0];
2164 }
2165
2166 if ((hdr_field >> 5) & 1) {
2167 hdr_len[1] = skb_network_header_len(skb);
2168 len += hdr_len[1];
2169 }
2170
2171 if (!((hdr_field >> 4) & 1))
2172 return len;
2173
2174 if (skb->protocol == htons(ETH_P_IP)) {
2175 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2176 hdr_len[2] = tcp_hdrlen(skb);
2177 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2178 hdr_len[2] = sizeof(struct udphdr);
2179 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2180 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2181 hdr_len[2] = tcp_hdrlen(skb);
2182 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
2183 hdr_len[2] = sizeof(struct udphdr);
2184 }
2185
2186 return len + hdr_len[2];
2187 }
2188
2189 /**
2190 * create_hdr_descs - create header and header extension descriptors
2191 * @hdr_field: bitfield determining needed headers
2192 * @hdr_data: buffer containing header data
2193 * @len: length of data buffer
2194 * @hdr_len: array of individual header lengths
2195 * @scrq_arr: descriptor array
2196 *
2197 * Creates header and, if needed, header extension descriptors and
2198 * places them in a descriptor array, scrq_arr
2199 *
2200 * Return: Number of header descs
2201 */
2202
create_hdr_descs(u8 hdr_field,u8 * hdr_data,int len,int * hdr_len,union sub_crq * scrq_arr)2203 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
2204 union sub_crq *scrq_arr)
2205 {
2206 union sub_crq *hdr_desc;
2207 int tmp_len = len;
2208 int num_descs = 0;
2209 u8 *data, *cur;
2210 int tmp;
2211
2212 while (tmp_len > 0) {
2213 cur = hdr_data + len - tmp_len;
2214
2215 hdr_desc = &scrq_arr[num_descs];
2216 if (num_descs) {
2217 data = hdr_desc->hdr_ext.data;
2218 tmp = tmp_len > 29 ? 29 : tmp_len;
2219 hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD;
2220 hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
2221 hdr_desc->hdr_ext.len = tmp;
2222 } else {
2223 data = hdr_desc->hdr.data;
2224 tmp = tmp_len > 24 ? 24 : tmp_len;
2225 hdr_desc->hdr.first = IBMVNIC_CRQ_CMD;
2226 hdr_desc->hdr.type = IBMVNIC_HDR_DESC;
2227 hdr_desc->hdr.len = tmp;
2228 hdr_desc->hdr.l2_len = (u8)hdr_len[0];
2229 hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
2230 hdr_desc->hdr.l4_len = (u8)hdr_len[2];
2231 hdr_desc->hdr.flag = hdr_field << 1;
2232 }
2233 memcpy(data, cur, tmp);
2234 tmp_len -= tmp;
2235 num_descs++;
2236 }
2237
2238 return num_descs;
2239 }
2240
2241 /**
2242 * build_hdr_descs_arr - build a header descriptor array
2243 * @skb: tx socket buffer
2244 * @indir_arr: indirect array
2245 * @num_entries: number of descriptors to be sent
2246 * @hdr_field: bit field determining which headers will be sent
2247 *
2248 * This function will build a TX descriptor array with applicable
2249 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
2250 */
2251
build_hdr_descs_arr(struct sk_buff * skb,union sub_crq * indir_arr,int * num_entries,u8 hdr_field)2252 static void build_hdr_descs_arr(struct sk_buff *skb,
2253 union sub_crq *indir_arr,
2254 int *num_entries, u8 hdr_field)
2255 {
2256 int hdr_len[3] = {0, 0, 0};
2257 int tot_len;
2258
2259 tot_len = get_hdr_lens(hdr_field, skb, hdr_len);
2260 *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb),
2261 tot_len, hdr_len, indir_arr + 1);
2262 }
2263
ibmvnic_xmit_workarounds(struct sk_buff * skb,struct net_device * netdev)2264 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
2265 struct net_device *netdev)
2266 {
2267 /* For some backing devices, mishandling of small packets
2268 * can result in a loss of connection or TX stall. Device
2269 * architects recommend that no packet should be smaller
2270 * than the minimum MTU value provided to the driver, so
2271 * pad any packets to that length
2272 */
2273 if (skb->len < netdev->min_mtu)
2274 return skb_put_padto(skb, netdev->min_mtu);
2275
2276 return 0;
2277 }
2278
ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq)2279 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
2280 struct ibmvnic_sub_crq_queue *tx_scrq)
2281 {
2282 struct ibmvnic_ind_xmit_queue *ind_bufp;
2283 struct ibmvnic_tx_buff *tx_buff;
2284 struct ibmvnic_tx_pool *tx_pool;
2285 union sub_crq tx_scrq_entry;
2286 int queue_num;
2287 int entries;
2288 int index;
2289 int i;
2290
2291 ind_bufp = &tx_scrq->ind_buf;
2292 entries = (u64)ind_bufp->index;
2293 queue_num = tx_scrq->pool_index;
2294
2295 for (i = entries - 1; i >= 0; --i) {
2296 tx_scrq_entry = ind_bufp->indir_arr[i];
2297 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
2298 continue;
2299 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
2300 if (index & IBMVNIC_TSO_POOL_MASK) {
2301 tx_pool = &adapter->tso_pool[queue_num];
2302 index &= ~IBMVNIC_TSO_POOL_MASK;
2303 } else {
2304 tx_pool = &adapter->tx_pool[queue_num];
2305 }
2306 tx_pool->free_map[tx_pool->consumer_index] = index;
2307 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2308 tx_pool->num_buffers - 1 :
2309 tx_pool->consumer_index - 1;
2310 tx_buff = &tx_pool->tx_buff[index];
2311 adapter->netdev->stats.tx_packets--;
2312 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2313 adapter->tx_stats_buffers[queue_num].packets--;
2314 adapter->tx_stats_buffers[queue_num].bytes -=
2315 tx_buff->skb->len;
2316 dev_kfree_skb_any(tx_buff->skb);
2317 tx_buff->skb = NULL;
2318 adapter->netdev->stats.tx_dropped++;
2319 }
2320
2321 ind_bufp->index = 0;
2322
2323 if (atomic_sub_return(entries, &tx_scrq->used) <=
2324 (adapter->req_tx_entries_per_subcrq / 2) &&
2325 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
2326 rcu_read_lock();
2327
2328 if (adapter->tx_queues_active) {
2329 netif_wake_subqueue(adapter->netdev, queue_num);
2330 netdev_dbg(adapter->netdev, "Started queue %d\n",
2331 queue_num);
2332 }
2333
2334 rcu_read_unlock();
2335 }
2336 }
2337
send_subcrq_direct(struct ibmvnic_adapter * adapter,u64 remote_handle,u64 * entry)2338 static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
2339 u64 remote_handle, u64 *entry)
2340 {
2341 unsigned int ua = adapter->vdev->unit_address;
2342 struct device *dev = &adapter->vdev->dev;
2343 int rc;
2344
2345 /* Make sure the hypervisor sees the complete request */
2346 dma_wmb();
2347 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2348 cpu_to_be64(remote_handle),
2349 cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
2350 cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
2351
2352 if (rc)
2353 print_subcrq_error(dev, rc, __func__);
2354
2355 return rc;
2356 }
2357
ibmvnic_tx_scrq_flush(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq,bool indirect)2358 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
2359 struct ibmvnic_sub_crq_queue *tx_scrq,
2360 bool indirect)
2361 {
2362 struct ibmvnic_ind_xmit_queue *ind_bufp;
2363 u64 dma_addr;
2364 u64 entries;
2365 u64 handle;
2366 int rc;
2367
2368 ind_bufp = &tx_scrq->ind_buf;
2369 dma_addr = (u64)ind_bufp->indir_dma;
2370 entries = (u64)ind_bufp->index;
2371 handle = tx_scrq->handle;
2372
2373 if (!entries)
2374 return 0;
2375
2376 if (indirect)
2377 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
2378 else
2379 rc = send_subcrq_direct(adapter, handle,
2380 (u64 *)ind_bufp->indir_arr);
2381
2382 if (rc)
2383 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
2384 else
2385 ind_bufp->index = 0;
2386 return rc;
2387 }
2388
ibmvnic_xmit(struct sk_buff * skb,struct net_device * netdev)2389 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2390 {
2391 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2392 int queue_num = skb_get_queue_mapping(skb);
2393 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2394 struct device *dev = &adapter->vdev->dev;
2395 struct ibmvnic_ind_xmit_queue *ind_bufp;
2396 struct ibmvnic_tx_buff *tx_buff = NULL;
2397 struct ibmvnic_sub_crq_queue *tx_scrq;
2398 struct ibmvnic_long_term_buff *ltb;
2399 struct ibmvnic_tx_pool *tx_pool;
2400 unsigned int tx_send_failed = 0;
2401 netdev_tx_t ret = NETDEV_TX_OK;
2402 unsigned int tx_map_failed = 0;
2403 union sub_crq indir_arr[16];
2404 unsigned int tx_dropped = 0;
2405 unsigned int tx_packets = 0;
2406 unsigned int tx_bytes = 0;
2407 dma_addr_t data_dma_addr;
2408 struct netdev_queue *txq;
2409 unsigned long lpar_rc;
2410 union sub_crq tx_crq;
2411 unsigned int offset;
2412 bool use_scrq_send_direct = false;
2413 int num_entries = 1;
2414 unsigned char *dst;
2415 int bufidx = 0;
2416 u8 proto = 0;
2417
2418 /* If a reset is in progress, drop the packet since
2419 * the scrqs may get torn down. Otherwise use the
2420 * rcu to ensure reset waits for us to complete.
2421 */
2422 rcu_read_lock();
2423 if (!adapter->tx_queues_active) {
2424 dev_kfree_skb_any(skb);
2425
2426 tx_send_failed++;
2427 tx_dropped++;
2428 ret = NETDEV_TX_OK;
2429 goto out;
2430 }
2431
2432 tx_scrq = adapter->tx_scrq[queue_num];
2433 txq = netdev_get_tx_queue(netdev, queue_num);
2434 ind_bufp = &tx_scrq->ind_buf;
2435
2436 if (ibmvnic_xmit_workarounds(skb, netdev)) {
2437 tx_dropped++;
2438 tx_send_failed++;
2439 ret = NETDEV_TX_OK;
2440 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2441 if (lpar_rc != H_SUCCESS)
2442 goto tx_err;
2443 goto out;
2444 }
2445
2446 if (skb_is_gso(skb))
2447 tx_pool = &adapter->tso_pool[queue_num];
2448 else
2449 tx_pool = &adapter->tx_pool[queue_num];
2450
2451 bufidx = tx_pool->free_map[tx_pool->consumer_index];
2452
2453 if (bufidx == IBMVNIC_INVALID_MAP) {
2454 dev_kfree_skb_any(skb);
2455 tx_send_failed++;
2456 tx_dropped++;
2457 ret = NETDEV_TX_OK;
2458 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2459 if (lpar_rc != H_SUCCESS)
2460 goto tx_err;
2461 goto out;
2462 }
2463
2464 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
2465
2466 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset);
2467
2468 dst = ltb->buff + offset;
2469 memset(dst, 0, tx_pool->buf_size);
2470 data_dma_addr = ltb->addr + offset;
2471
2472 /* if we are going to send_subcrq_direct this then we need to
2473 * update the checksum before copying the data into ltb. Essentially
2474 * these packets force disable CSO so that we can guarantee that
2475 * FW does not need header info and we can send direct.
2476 */
2477 if (!skb_is_gso(skb) && !ind_bufp->index && !netdev_xmit_more()) {
2478 use_scrq_send_direct = true;
2479 if (skb->ip_summed == CHECKSUM_PARTIAL &&
2480 skb_checksum_help(skb))
2481 use_scrq_send_direct = false;
2482 }
2483
2484 if (skb_shinfo(skb)->nr_frags) {
2485 int cur, i;
2486
2487 /* Copy the head */
2488 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
2489 cur = skb_headlen(skb);
2490
2491 /* Copy the frags */
2492 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2493 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2494
2495 memcpy(dst + cur, skb_frag_address(frag),
2496 skb_frag_size(frag));
2497 cur += skb_frag_size(frag);
2498 }
2499 } else {
2500 skb_copy_from_linear_data(skb, dst, skb->len);
2501 }
2502
2503 tx_pool->consumer_index =
2504 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2505
2506 tx_buff = &tx_pool->tx_buff[bufidx];
2507
2508 /* Sanity checks on our free map to make sure it points to an index
2509 * that is not being occupied by another skb. If skb memory is
2510 * not freed then we see congestion control kick in and halt tx.
2511 */
2512 if (unlikely(tx_buff->skb)) {
2513 dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
2514 skb_is_gso(skb) ? "tso_pool" : "tx_pool",
2515 queue_num, bufidx);
2516 dev_kfree_skb_any(tx_buff->skb);
2517 }
2518
2519 tx_buff->skb = skb;
2520 tx_buff->index = bufidx;
2521 tx_buff->pool_index = queue_num;
2522
2523 memset(&tx_crq, 0, sizeof(tx_crq));
2524 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2525 tx_crq.v1.type = IBMVNIC_TX_DESC;
2526 tx_crq.v1.n_crq_elem = 1;
2527 tx_crq.v1.n_sge = 1;
2528 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
2529
2530 if (skb_is_gso(skb))
2531 tx_crq.v1.correlator =
2532 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
2533 else
2534 tx_crq.v1.correlator = cpu_to_be32(bufidx);
2535 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
2536 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2537 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2538
2539 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2540 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2541 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2542 }
2543
2544 if (skb->protocol == htons(ETH_P_IP)) {
2545 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2546 proto = ip_hdr(skb)->protocol;
2547 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2548 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2549 proto = ipv6_hdr(skb)->nexthdr;
2550 }
2551
2552 if (proto == IPPROTO_TCP)
2553 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2554 else if (proto == IPPROTO_UDP)
2555 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2556
2557 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2558 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2559 hdrs += 2;
2560 }
2561 if (skb_is_gso(skb)) {
2562 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2563 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2564 hdrs += 2;
2565 } else if (use_scrq_send_direct) {
2566 /* See above comment, CSO disabled with direct xmit */
2567 tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD);
2568 ind_bufp->index = 1;
2569 tx_buff->num_entries = 1;
2570 netdev_tx_sent_queue(txq, skb->len);
2571 ind_bufp->indir_arr[0] = tx_crq;
2572 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
2573 if (lpar_rc != H_SUCCESS)
2574 goto tx_err;
2575
2576 goto early_exit;
2577 }
2578
2579 if ((*hdrs >> 7) & 1)
2580 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2581
2582 tx_crq.v1.n_crq_elem = num_entries;
2583 tx_buff->num_entries = num_entries;
2584 /* flush buffer if current entry can not fit */
2585 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2586 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2587 if (lpar_rc != H_SUCCESS)
2588 goto tx_flush_err;
2589 }
2590
2591 indir_arr[0] = tx_crq;
2592 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2593 num_entries * sizeof(struct ibmvnic_generic_scrq));
2594
2595 ind_bufp->index += num_entries;
2596 if (__netdev_tx_sent_queue(txq, skb->len,
2597 netdev_xmit_more() &&
2598 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2599 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2600 if (lpar_rc != H_SUCCESS)
2601 goto tx_err;
2602 }
2603
2604 early_exit:
2605 if (atomic_add_return(num_entries, &tx_scrq->used)
2606 >= adapter->req_tx_entries_per_subcrq) {
2607 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2608 netif_stop_subqueue(netdev, queue_num);
2609 }
2610
2611 tx_packets++;
2612 tx_bytes += skb->len;
2613 txq_trans_cond_update(txq);
2614 ret = NETDEV_TX_OK;
2615 goto out;
2616
2617 tx_flush_err:
2618 dev_kfree_skb_any(skb);
2619 tx_buff->skb = NULL;
2620 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2621 tx_pool->num_buffers - 1 :
2622 tx_pool->consumer_index - 1;
2623 tx_dropped++;
2624 tx_err:
2625 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2626 dev_err_ratelimited(dev, "tx: send failed\n");
2627
2628 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2629 /* Disable TX and report carrier off if queue is closed
2630 * or pending failover.
2631 * Firmware guarantees that a signal will be sent to the
2632 * driver, triggering a reset or some other action.
2633 */
2634 netif_tx_stop_all_queues(netdev);
2635 netif_carrier_off(netdev);
2636 }
2637 out:
2638 rcu_read_unlock();
2639 netdev->stats.tx_dropped += tx_dropped;
2640 netdev->stats.tx_bytes += tx_bytes;
2641 netdev->stats.tx_packets += tx_packets;
2642 adapter->tx_send_failed += tx_send_failed;
2643 adapter->tx_map_failed += tx_map_failed;
2644 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2645 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2646 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2647
2648 return ret;
2649 }
2650
ibmvnic_set_multi(struct net_device * netdev)2651 static void ibmvnic_set_multi(struct net_device *netdev)
2652 {
2653 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2654 struct netdev_hw_addr *ha;
2655 union ibmvnic_crq crq;
2656
2657 memset(&crq, 0, sizeof(crq));
2658 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2659 crq.request_capability.cmd = REQUEST_CAPABILITY;
2660
2661 if (netdev->flags & IFF_PROMISC) {
2662 if (!adapter->promisc_supported)
2663 return;
2664 } else {
2665 if (netdev->flags & IFF_ALLMULTI) {
2666 /* Accept all multicast */
2667 memset(&crq, 0, sizeof(crq));
2668 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2669 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2670 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2671 ibmvnic_send_crq(adapter, &crq);
2672 } else if (netdev_mc_empty(netdev)) {
2673 /* Reject all multicast */
2674 memset(&crq, 0, sizeof(crq));
2675 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2676 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2677 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2678 ibmvnic_send_crq(adapter, &crq);
2679 } else {
2680 /* Accept one or more multicast(s) */
2681 netdev_for_each_mc_addr(ha, netdev) {
2682 memset(&crq, 0, sizeof(crq));
2683 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2684 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2685 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2686 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2687 ha->addr);
2688 ibmvnic_send_crq(adapter, &crq);
2689 }
2690 }
2691 }
2692 }
2693
__ibmvnic_set_mac(struct net_device * netdev,u8 * dev_addr)2694 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2695 {
2696 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2697 union ibmvnic_crq crq;
2698 int rc;
2699
2700 if (!is_valid_ether_addr(dev_addr)) {
2701 rc = -EADDRNOTAVAIL;
2702 goto err;
2703 }
2704
2705 memset(&crq, 0, sizeof(crq));
2706 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2707 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2708 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2709
2710 mutex_lock(&adapter->fw_lock);
2711 adapter->fw_done_rc = 0;
2712 reinit_completion(&adapter->fw_done);
2713
2714 rc = ibmvnic_send_crq(adapter, &crq);
2715 if (rc) {
2716 rc = -EIO;
2717 mutex_unlock(&adapter->fw_lock);
2718 goto err;
2719 }
2720
2721 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2722 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
2723 if (rc || adapter->fw_done_rc) {
2724 rc = -EIO;
2725 mutex_unlock(&adapter->fw_lock);
2726 goto err;
2727 }
2728 mutex_unlock(&adapter->fw_lock);
2729 return 0;
2730 err:
2731 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2732 return rc;
2733 }
2734
ibmvnic_set_mac(struct net_device * netdev,void * p)2735 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2736 {
2737 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2738 struct sockaddr *addr = p;
2739 int rc;
2740
2741 rc = 0;
2742 if (!is_valid_ether_addr(addr->sa_data))
2743 return -EADDRNOTAVAIL;
2744
2745 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2746 if (adapter->state != VNIC_PROBED)
2747 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2748
2749 return rc;
2750 }
2751
reset_reason_to_string(enum ibmvnic_reset_reason reason)2752 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2753 {
2754 switch (reason) {
2755 case VNIC_RESET_FAILOVER:
2756 return "FAILOVER";
2757 case VNIC_RESET_MOBILITY:
2758 return "MOBILITY";
2759 case VNIC_RESET_FATAL:
2760 return "FATAL";
2761 case VNIC_RESET_NON_FATAL:
2762 return "NON_FATAL";
2763 case VNIC_RESET_TIMEOUT:
2764 return "TIMEOUT";
2765 case VNIC_RESET_CHANGE_PARAM:
2766 return "CHANGE_PARAM";
2767 case VNIC_RESET_PASSIVE_INIT:
2768 return "PASSIVE_INIT";
2769 }
2770 return "UNKNOWN";
2771 }
2772
2773 /*
2774 * Initialize the init_done completion and return code values. We
2775 * can get a transport event just after registering the CRQ and the
2776 * tasklet will use this to communicate the transport event. To ensure
2777 * we don't miss the notification/error, initialize these _before_
2778 * regisering the CRQ.
2779 */
reinit_init_done(struct ibmvnic_adapter * adapter)2780 static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2781 {
2782 reinit_completion(&adapter->init_done);
2783 adapter->init_done_rc = 0;
2784 }
2785
2786 /*
2787 * do_reset returns zero if we are able to keep processing reset events, or
2788 * non-zero if we hit a fatal error and must halt.
2789 */
do_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)2790 static int do_reset(struct ibmvnic_adapter *adapter,
2791 struct ibmvnic_rwi *rwi, u32 reset_state)
2792 {
2793 struct net_device *netdev = adapter->netdev;
2794 u64 old_num_rx_queues, old_num_tx_queues;
2795 u64 old_num_rx_slots, old_num_tx_slots;
2796 int rc;
2797
2798 netdev_dbg(adapter->netdev,
2799 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2800 adapter_state_to_string(adapter->state),
2801 adapter->failover_pending,
2802 reset_reason_to_string(rwi->reset_reason),
2803 adapter_state_to_string(reset_state));
2804
2805 adapter->reset_reason = rwi->reset_reason;
2806 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2807 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2808 rtnl_lock();
2809
2810 /* Now that we have the rtnl lock, clear any pending failover.
2811 * This will ensure ibmvnic_open() has either completed or will
2812 * block until failover is complete.
2813 */
2814 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2815 adapter->failover_pending = false;
2816
2817 /* read the state and check (again) after getting rtnl */
2818 reset_state = adapter->state;
2819
2820 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2821 rc = -EBUSY;
2822 goto out;
2823 }
2824
2825 netif_carrier_off(netdev);
2826
2827 old_num_rx_queues = adapter->req_rx_queues;
2828 old_num_tx_queues = adapter->req_tx_queues;
2829 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2830 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2831
2832 ibmvnic_cleanup(netdev);
2833
2834 if (reset_state == VNIC_OPEN &&
2835 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2836 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2837 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2838 rc = __ibmvnic_close(netdev);
2839 if (rc)
2840 goto out;
2841 } else {
2842 adapter->state = VNIC_CLOSING;
2843
2844 /* Release the RTNL lock before link state change and
2845 * re-acquire after the link state change to allow
2846 * linkwatch_event to grab the RTNL lock and run during
2847 * a reset.
2848 */
2849 rtnl_unlock();
2850 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2851 rtnl_lock();
2852 if (rc)
2853 goto out;
2854
2855 if (adapter->state == VNIC_OPEN) {
2856 /* When we dropped rtnl, ibmvnic_open() got
2857 * it and noticed that we are resetting and
2858 * set the adapter state to OPEN. Update our
2859 * new "target" state, and resume the reset
2860 * from VNIC_CLOSING state.
2861 */
2862 netdev_dbg(netdev,
2863 "Open changed state from %s, updating.\n",
2864 adapter_state_to_string(reset_state));
2865 reset_state = VNIC_OPEN;
2866 adapter->state = VNIC_CLOSING;
2867 }
2868
2869 if (adapter->state != VNIC_CLOSING) {
2870 /* If someone else changed the adapter state
2871 * when we dropped the rtnl, fail the reset
2872 */
2873 rc = -EAGAIN;
2874 goto out;
2875 }
2876 adapter->state = VNIC_CLOSED;
2877 }
2878 }
2879
2880 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2881 release_resources(adapter);
2882 release_sub_crqs(adapter, 1);
2883 release_crq_queue(adapter);
2884 }
2885
2886 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2887 /* remove the closed state so when we call open it appears
2888 * we are coming from the probed state.
2889 */
2890 adapter->state = VNIC_PROBED;
2891
2892 reinit_init_done(adapter);
2893
2894 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2895 rc = init_crq_queue(adapter);
2896 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2897 rc = ibmvnic_reenable_crq_queue(adapter);
2898 release_sub_crqs(adapter, 1);
2899 } else {
2900 rc = ibmvnic_reset_crq(adapter);
2901 if (rc == H_CLOSED || rc == H_SUCCESS) {
2902 rc = vio_enable_interrupts(adapter->vdev);
2903 if (rc)
2904 netdev_err(adapter->netdev,
2905 "Reset failed to enable interrupts. rc=%d\n",
2906 rc);
2907 }
2908 }
2909
2910 if (rc) {
2911 netdev_err(adapter->netdev,
2912 "Reset couldn't initialize crq. rc=%d\n", rc);
2913 goto out;
2914 }
2915
2916 rc = ibmvnic_reset_init(adapter, true);
2917 if (rc)
2918 goto out;
2919
2920 /* If the adapter was in PROBE or DOWN state prior to the reset,
2921 * exit here.
2922 */
2923 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2924 rc = 0;
2925 goto out;
2926 }
2927
2928 rc = ibmvnic_login(netdev);
2929 if (rc)
2930 goto out;
2931
2932 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2933 rc = init_resources(adapter);
2934 if (rc)
2935 goto out;
2936 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2937 adapter->req_tx_queues != old_num_tx_queues ||
2938 adapter->req_rx_add_entries_per_subcrq !=
2939 old_num_rx_slots ||
2940 adapter->req_tx_entries_per_subcrq !=
2941 old_num_tx_slots ||
2942 !adapter->rx_pool ||
2943 !adapter->tso_pool ||
2944 !adapter->tx_pool) {
2945 release_napi(adapter);
2946 release_vpd_data(adapter);
2947
2948 rc = init_resources(adapter);
2949 if (rc)
2950 goto out;
2951
2952 } else {
2953 rc = init_tx_pools(netdev);
2954 if (rc) {
2955 netdev_dbg(netdev,
2956 "init tx pools failed (%d)\n",
2957 rc);
2958 goto out;
2959 }
2960
2961 rc = init_rx_pools(netdev);
2962 if (rc) {
2963 netdev_dbg(netdev,
2964 "init rx pools failed (%d)\n",
2965 rc);
2966 goto out;
2967 }
2968 }
2969 ibmvnic_disable_irqs(adapter);
2970 }
2971 adapter->state = VNIC_CLOSED;
2972
2973 if (reset_state == VNIC_CLOSED) {
2974 rc = 0;
2975 goto out;
2976 }
2977
2978 rc = __ibmvnic_open(netdev);
2979 if (rc) {
2980 rc = IBMVNIC_OPEN_FAILED;
2981 goto out;
2982 }
2983
2984 /* refresh device's multicast list */
2985 ibmvnic_set_multi(netdev);
2986
2987 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2988 adapter->reset_reason == VNIC_RESET_MOBILITY)
2989 __netdev_notify_peers(netdev);
2990
2991 rc = 0;
2992
2993 out:
2994 /* restore the adapter state if reset failed */
2995 if (rc)
2996 adapter->state = reset_state;
2997 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2998 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2999 rtnl_unlock();
3000
3001 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
3002 adapter_state_to_string(adapter->state),
3003 adapter->failover_pending, rc);
3004 return rc;
3005 }
3006
do_hard_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)3007 static int do_hard_reset(struct ibmvnic_adapter *adapter,
3008 struct ibmvnic_rwi *rwi, u32 reset_state)
3009 {
3010 struct net_device *netdev = adapter->netdev;
3011 int rc;
3012
3013 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
3014 reset_reason_to_string(rwi->reset_reason));
3015
3016 /* read the state and check (again) after getting rtnl */
3017 reset_state = adapter->state;
3018
3019 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
3020 rc = -EBUSY;
3021 goto out;
3022 }
3023
3024 netif_carrier_off(netdev);
3025 adapter->reset_reason = rwi->reset_reason;
3026
3027 ibmvnic_cleanup(netdev);
3028 release_resources(adapter);
3029 release_sub_crqs(adapter, 0);
3030 release_crq_queue(adapter);
3031
3032 /* remove the closed state so when we call open it appears
3033 * we are coming from the probed state.
3034 */
3035 adapter->state = VNIC_PROBED;
3036
3037 reinit_init_done(adapter);
3038
3039 rc = init_crq_queue(adapter);
3040 if (rc) {
3041 netdev_err(adapter->netdev,
3042 "Couldn't initialize crq. rc=%d\n", rc);
3043 goto out;
3044 }
3045
3046 rc = ibmvnic_reset_init(adapter, false);
3047 if (rc)
3048 goto out;
3049
3050 /* If the adapter was in PROBE or DOWN state prior to the reset,
3051 * exit here.
3052 */
3053 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
3054 goto out;
3055
3056 rc = ibmvnic_login(netdev);
3057 if (rc)
3058 goto out;
3059
3060 rc = init_resources(adapter);
3061 if (rc)
3062 goto out;
3063
3064 ibmvnic_disable_irqs(adapter);
3065 adapter->state = VNIC_CLOSED;
3066
3067 if (reset_state == VNIC_CLOSED)
3068 goto out;
3069
3070 rc = __ibmvnic_open(netdev);
3071 if (rc) {
3072 rc = IBMVNIC_OPEN_FAILED;
3073 goto out;
3074 }
3075
3076 __netdev_notify_peers(netdev);
3077 out:
3078 /* restore adapter state if reset failed */
3079 if (rc)
3080 adapter->state = reset_state;
3081 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
3082 adapter_state_to_string(adapter->state),
3083 adapter->failover_pending, rc);
3084 return rc;
3085 }
3086
get_next_rwi(struct ibmvnic_adapter * adapter)3087 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
3088 {
3089 struct ibmvnic_rwi *rwi;
3090 unsigned long flags;
3091
3092 spin_lock_irqsave(&adapter->rwi_lock, flags);
3093
3094 if (!list_empty(&adapter->rwi_list)) {
3095 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
3096 list);
3097 list_del(&rwi->list);
3098 } else {
3099 rwi = NULL;
3100 }
3101
3102 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3103 return rwi;
3104 }
3105
3106 /**
3107 * do_passive_init - complete probing when partner device is detected.
3108 * @adapter: ibmvnic_adapter struct
3109 *
3110 * If the ibmvnic device does not have a partner device to communicate with at boot
3111 * and that partner device comes online at a later time, this function is called
3112 * to complete the initialization process of ibmvnic device.
3113 * Caller is expected to hold rtnl_lock().
3114 *
3115 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
3116 * in the down state.
3117 * Returns 0 upon success and the device is in PROBED state.
3118 */
3119
do_passive_init(struct ibmvnic_adapter * adapter)3120 static int do_passive_init(struct ibmvnic_adapter *adapter)
3121 {
3122 unsigned long timeout = msecs_to_jiffies(30000);
3123 struct net_device *netdev = adapter->netdev;
3124 struct device *dev = &adapter->vdev->dev;
3125 int rc;
3126
3127 netdev_dbg(netdev, "Partner device found, probing.\n");
3128
3129 adapter->state = VNIC_PROBING;
3130 reinit_completion(&adapter->init_done);
3131 adapter->init_done_rc = 0;
3132 adapter->crq.active = true;
3133
3134 rc = send_crq_init_complete(adapter);
3135 if (rc)
3136 goto out;
3137
3138 rc = send_version_xchg(adapter);
3139 if (rc)
3140 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
3141
3142 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3143 dev_err(dev, "Initialization sequence timed out\n");
3144 rc = -ETIMEDOUT;
3145 goto out;
3146 }
3147
3148 rc = init_sub_crqs(adapter);
3149 if (rc) {
3150 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
3151 goto out;
3152 }
3153
3154 rc = init_sub_crq_irqs(adapter);
3155 if (rc) {
3156 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
3157 goto init_failed;
3158 }
3159
3160 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3161 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3162 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3163
3164 adapter->state = VNIC_PROBED;
3165 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
3166
3167 return 0;
3168
3169 init_failed:
3170 release_sub_crqs(adapter, 1);
3171 out:
3172 adapter->state = VNIC_DOWN;
3173 return rc;
3174 }
3175
__ibmvnic_reset(struct work_struct * work)3176 static void __ibmvnic_reset(struct work_struct *work)
3177 {
3178 struct ibmvnic_adapter *adapter;
3179 unsigned int timeout = 5000;
3180 struct ibmvnic_rwi *tmprwi;
3181 bool saved_state = false;
3182 struct ibmvnic_rwi *rwi;
3183 unsigned long flags;
3184 struct device *dev;
3185 bool need_reset;
3186 int num_fails = 0;
3187 u32 reset_state;
3188 int rc = 0;
3189
3190 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
3191 dev = &adapter->vdev->dev;
3192
3193 /* Wait for ibmvnic_probe() to complete. If probe is taking too long
3194 * or if another reset is in progress, defer work for now. If probe
3195 * eventually fails it will flush and terminate our work.
3196 *
3197 * Three possibilities here:
3198 * 1. Adpater being removed - just return
3199 * 2. Timed out on probe or another reset in progress - delay the work
3200 * 3. Completed probe - perform any resets in queue
3201 */
3202 if (adapter->state == VNIC_PROBING &&
3203 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
3204 dev_err(dev, "Reset thread timed out on probe");
3205 queue_delayed_work(system_long_wq,
3206 &adapter->ibmvnic_delayed_reset,
3207 IBMVNIC_RESET_DELAY);
3208 return;
3209 }
3210
3211 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
3212 if (adapter->state == VNIC_REMOVING)
3213 return;
3214
3215 /* ->rwi_list is stable now (no one else is removing entries) */
3216
3217 /* ibmvnic_probe() may have purged the reset queue after we were
3218 * scheduled to process a reset so there maybe no resets to process.
3219 * Before setting the ->resetting bit though, we have to make sure
3220 * that there is infact a reset to process. Otherwise we may race
3221 * with ibmvnic_open() and end up leaving the vnic down:
3222 *
3223 * __ibmvnic_reset() ibmvnic_open()
3224 * ----------------- --------------
3225 *
3226 * set ->resetting bit
3227 * find ->resetting bit is set
3228 * set ->state to IBMVNIC_OPEN (i.e
3229 * assume reset will open device)
3230 * return
3231 * find reset queue empty
3232 * return
3233 *
3234 * Neither performed vnic login/open and vnic stays down
3235 *
3236 * If we hold the lock and conditionally set the bit, either we
3237 * or ibmvnic_open() will complete the open.
3238 */
3239 need_reset = false;
3240 spin_lock(&adapter->rwi_lock);
3241 if (!list_empty(&adapter->rwi_list)) {
3242 if (test_and_set_bit_lock(0, &adapter->resetting)) {
3243 queue_delayed_work(system_long_wq,
3244 &adapter->ibmvnic_delayed_reset,
3245 IBMVNIC_RESET_DELAY);
3246 } else {
3247 need_reset = true;
3248 }
3249 }
3250 spin_unlock(&adapter->rwi_lock);
3251
3252 if (!need_reset)
3253 return;
3254
3255 rwi = get_next_rwi(adapter);
3256 while (rwi) {
3257 spin_lock_irqsave(&adapter->state_lock, flags);
3258
3259 if (adapter->state == VNIC_REMOVING ||
3260 adapter->state == VNIC_REMOVED) {
3261 spin_unlock_irqrestore(&adapter->state_lock, flags);
3262 kfree(rwi);
3263 rc = EBUSY;
3264 break;
3265 }
3266
3267 if (!saved_state) {
3268 reset_state = adapter->state;
3269 saved_state = true;
3270 }
3271 spin_unlock_irqrestore(&adapter->state_lock, flags);
3272
3273 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
3274 rtnl_lock();
3275 rc = do_passive_init(adapter);
3276 rtnl_unlock();
3277 if (!rc)
3278 netif_carrier_on(adapter->netdev);
3279 } else if (adapter->force_reset_recovery) {
3280 /* Since we are doing a hard reset now, clear the
3281 * failover_pending flag so we don't ignore any
3282 * future MOBILITY or other resets.
3283 */
3284 adapter->failover_pending = false;
3285
3286 /* Transport event occurred during previous reset */
3287 if (adapter->wait_for_reset) {
3288 /* Previous was CHANGE_PARAM; caller locked */
3289 adapter->force_reset_recovery = false;
3290 rc = do_hard_reset(adapter, rwi, reset_state);
3291 } else {
3292 rtnl_lock();
3293 adapter->force_reset_recovery = false;
3294 rc = do_hard_reset(adapter, rwi, reset_state);
3295 rtnl_unlock();
3296 }
3297 if (rc)
3298 num_fails++;
3299 else
3300 num_fails = 0;
3301
3302 /* If auto-priority-failover is enabled we can get
3303 * back to back failovers during resets, resulting
3304 * in at least two failed resets (from high-priority
3305 * backing device to low-priority one and then back)
3306 * If resets continue to fail beyond that, give the
3307 * adapter some time to settle down before retrying.
3308 */
3309 if (num_fails >= 3) {
3310 netdev_dbg(adapter->netdev,
3311 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
3312 adapter_state_to_string(adapter->state),
3313 num_fails);
3314 set_current_state(TASK_UNINTERRUPTIBLE);
3315 schedule_timeout(60 * HZ);
3316 }
3317 } else {
3318 rc = do_reset(adapter, rwi, reset_state);
3319 }
3320 tmprwi = rwi;
3321 adapter->last_reset_time = jiffies;
3322
3323 if (rc)
3324 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3325
3326 rwi = get_next_rwi(adapter);
3327
3328 /*
3329 * If there are no resets queued and the previous reset failed,
3330 * the adapter would be in an undefined state. So retry the
3331 * previous reset as a hard reset.
3332 *
3333 * Else, free the previous rwi and, if there is another reset
3334 * queued, process the new reset even if previous reset failed
3335 * (the previous reset could have failed because of a fail
3336 * over for instance, so process the fail over).
3337 */
3338 if (!rwi && rc)
3339 rwi = tmprwi;
3340 else
3341 kfree(tmprwi);
3342
3343 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
3344 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
3345 adapter->force_reset_recovery = true;
3346 }
3347
3348 if (adapter->wait_for_reset) {
3349 adapter->reset_done_rc = rc;
3350 complete(&adapter->reset_done);
3351 }
3352
3353 clear_bit_unlock(0, &adapter->resetting);
3354
3355 netdev_dbg(adapter->netdev,
3356 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
3357 adapter_state_to_string(adapter->state),
3358 adapter->force_reset_recovery,
3359 adapter->wait_for_reset);
3360 }
3361
__ibmvnic_delayed_reset(struct work_struct * work)3362 static void __ibmvnic_delayed_reset(struct work_struct *work)
3363 {
3364 struct ibmvnic_adapter *adapter;
3365
3366 adapter = container_of(work, struct ibmvnic_adapter,
3367 ibmvnic_delayed_reset.work);
3368 __ibmvnic_reset(&adapter->ibmvnic_reset);
3369 }
3370
flush_reset_queue(struct ibmvnic_adapter * adapter)3371 static void flush_reset_queue(struct ibmvnic_adapter *adapter)
3372 {
3373 struct list_head *entry, *tmp_entry;
3374
3375 if (!list_empty(&adapter->rwi_list)) {
3376 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
3377 list_del(entry);
3378 kfree(list_entry(entry, struct ibmvnic_rwi, list));
3379 }
3380 }
3381 }
3382
ibmvnic_reset(struct ibmvnic_adapter * adapter,enum ibmvnic_reset_reason reason)3383 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3384 enum ibmvnic_reset_reason reason)
3385 {
3386 struct net_device *netdev = adapter->netdev;
3387 struct ibmvnic_rwi *rwi, *tmp;
3388 unsigned long flags;
3389 int ret;
3390
3391 spin_lock_irqsave(&adapter->rwi_lock, flags);
3392
3393 /* If failover is pending don't schedule any other reset.
3394 * Instead let the failover complete. If there is already a
3395 * a failover reset scheduled, we will detect and drop the
3396 * duplicate reset when walking the ->rwi_list below.
3397 */
3398 if (adapter->state == VNIC_REMOVING ||
3399 adapter->state == VNIC_REMOVED ||
3400 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3401 ret = EBUSY;
3402 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
3403 goto err;
3404 }
3405
3406 list_for_each_entry(tmp, &adapter->rwi_list, list) {
3407 if (tmp->reset_reason == reason) {
3408 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
3409 reset_reason_to_string(reason));
3410 ret = EBUSY;
3411 goto err;
3412 }
3413 }
3414
3415 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
3416 if (!rwi) {
3417 ret = ENOMEM;
3418 goto err;
3419 }
3420 /* if we just received a transport event,
3421 * flush reset queue and process this reset
3422 */
3423 if (adapter->force_reset_recovery)
3424 flush_reset_queue(adapter);
3425
3426 rwi->reset_reason = reason;
3427 list_add_tail(&rwi->list, &adapter->rwi_list);
3428 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3429 reset_reason_to_string(reason));
3430 queue_work(system_long_wq, &adapter->ibmvnic_reset);
3431
3432 ret = 0;
3433 err:
3434 /* ibmvnic_close() below can block, so drop the lock first */
3435 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3436
3437 if (ret == ENOMEM)
3438 ibmvnic_close(netdev);
3439
3440 return -ret;
3441 }
3442
ibmvnic_tx_timeout(struct net_device * dev,unsigned int txqueue)3443 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
3444 {
3445 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3446
3447 if (test_bit(0, &adapter->resetting)) {
3448 netdev_err(adapter->netdev,
3449 "Adapter is resetting, skip timeout reset\n");
3450 return;
3451 }
3452 /* No queuing up reset until at least 5 seconds (default watchdog val)
3453 * after last reset
3454 */
3455 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3456 netdev_dbg(dev, "Not yet time to tx timeout.\n");
3457 return;
3458 }
3459 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3460 }
3461
remove_buff_from_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_buff * rx_buff)3462 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3463 struct ibmvnic_rx_buff *rx_buff)
3464 {
3465 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3466
3467 rx_buff->skb = NULL;
3468
3469 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
3470 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
3471
3472 atomic_dec(&pool->available);
3473 }
3474
ibmvnic_poll(struct napi_struct * napi,int budget)3475 static int ibmvnic_poll(struct napi_struct *napi, int budget)
3476 {
3477 struct ibmvnic_sub_crq_queue *rx_scrq;
3478 struct ibmvnic_adapter *adapter;
3479 struct net_device *netdev;
3480 int frames_processed;
3481 int scrq_num;
3482
3483 netdev = napi->dev;
3484 adapter = netdev_priv(netdev);
3485 scrq_num = (int)(napi - adapter->napi);
3486 frames_processed = 0;
3487 rx_scrq = adapter->rx_scrq[scrq_num];
3488
3489 restart_poll:
3490 while (frames_processed < budget) {
3491 struct sk_buff *skb;
3492 struct ibmvnic_rx_buff *rx_buff;
3493 union sub_crq *next;
3494 u32 length;
3495 u16 offset;
3496 u8 flags = 0;
3497
3498 if (unlikely(test_bit(0, &adapter->resetting) &&
3499 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3500 enable_scrq_irq(adapter, rx_scrq);
3501 napi_complete_done(napi, frames_processed);
3502 return frames_processed;
3503 }
3504
3505 if (!pending_scrq(adapter, rx_scrq))
3506 break;
3507 next = ibmvnic_next_scrq(adapter, rx_scrq);
3508 rx_buff = (struct ibmvnic_rx_buff *)
3509 be64_to_cpu(next->rx_comp.correlator);
3510 /* do error checking */
3511 if (next->rx_comp.rc) {
3512 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
3513 be16_to_cpu(next->rx_comp.rc));
3514 /* free the entry */
3515 next->rx_comp.first = 0;
3516 dev_kfree_skb_any(rx_buff->skb);
3517 remove_buff_from_pool(adapter, rx_buff);
3518 continue;
3519 } else if (!rx_buff->skb) {
3520 /* free the entry */
3521 next->rx_comp.first = 0;
3522 remove_buff_from_pool(adapter, rx_buff);
3523 continue;
3524 }
3525
3526 length = be32_to_cpu(next->rx_comp.len);
3527 offset = be16_to_cpu(next->rx_comp.off_frame_data);
3528 flags = next->rx_comp.flags;
3529 skb = rx_buff->skb;
3530 /* load long_term_buff before copying to skb */
3531 dma_rmb();
3532 skb_copy_to_linear_data(skb, rx_buff->data + offset,
3533 length);
3534
3535 /* VLAN Header has been stripped by the system firmware and
3536 * needs to be inserted by the driver
3537 */
3538 if (adapter->rx_vlan_header_insertion &&
3539 (flags & IBMVNIC_VLAN_STRIPPED))
3540 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3541 ntohs(next->rx_comp.vlan_tci));
3542
3543 /* free the entry */
3544 next->rx_comp.first = 0;
3545 remove_buff_from_pool(adapter, rx_buff);
3546
3547 skb_put(skb, length);
3548 skb->protocol = eth_type_trans(skb, netdev);
3549 skb_record_rx_queue(skb, scrq_num);
3550
3551 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3552 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3553 skb->ip_summed = CHECKSUM_UNNECESSARY;
3554 }
3555
3556 length = skb->len;
3557 napi_gro_receive(napi, skb); /* send it up */
3558 netdev->stats.rx_packets++;
3559 netdev->stats.rx_bytes += length;
3560 adapter->rx_stats_buffers[scrq_num].packets++;
3561 adapter->rx_stats_buffers[scrq_num].bytes += length;
3562 frames_processed++;
3563 }
3564
3565 if (adapter->state != VNIC_CLOSING &&
3566 (atomic_read(&adapter->rx_pool[scrq_num].available) <
3567 adapter->req_rx_add_entries_per_subcrq / 2))
3568 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3569 if (frames_processed < budget) {
3570 if (napi_complete_done(napi, frames_processed)) {
3571 enable_scrq_irq(adapter, rx_scrq);
3572 if (pending_scrq(adapter, rx_scrq)) {
3573 if (napi_schedule(napi)) {
3574 disable_scrq_irq(adapter, rx_scrq);
3575 goto restart_poll;
3576 }
3577 }
3578 }
3579 }
3580 return frames_processed;
3581 }
3582
wait_for_reset(struct ibmvnic_adapter * adapter)3583 static int wait_for_reset(struct ibmvnic_adapter *adapter)
3584 {
3585 int rc, ret;
3586
3587 adapter->fallback.mtu = adapter->req_mtu;
3588 adapter->fallback.rx_queues = adapter->req_rx_queues;
3589 adapter->fallback.tx_queues = adapter->req_tx_queues;
3590 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3591 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3592
3593 reinit_completion(&adapter->reset_done);
3594 adapter->wait_for_reset = true;
3595 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3596
3597 if (rc) {
3598 ret = rc;
3599 goto out;
3600 }
3601 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3602 if (rc) {
3603 ret = -ENODEV;
3604 goto out;
3605 }
3606
3607 ret = 0;
3608 if (adapter->reset_done_rc) {
3609 ret = -EIO;
3610 adapter->desired.mtu = adapter->fallback.mtu;
3611 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3612 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3613 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3614 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3615
3616 reinit_completion(&adapter->reset_done);
3617 adapter->wait_for_reset = true;
3618 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3619 if (rc) {
3620 ret = rc;
3621 goto out;
3622 }
3623 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3624 60000);
3625 if (rc) {
3626 ret = -ENODEV;
3627 goto out;
3628 }
3629 }
3630 out:
3631 adapter->wait_for_reset = false;
3632
3633 return ret;
3634 }
3635
ibmvnic_change_mtu(struct net_device * netdev,int new_mtu)3636 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3637 {
3638 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3639
3640 adapter->desired.mtu = new_mtu + ETH_HLEN;
3641
3642 return wait_for_reset(adapter);
3643 }
3644
ibmvnic_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3645 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3646 struct net_device *dev,
3647 netdev_features_t features)
3648 {
3649 /* Some backing hardware adapters can not
3650 * handle packets with a MSS less than 224
3651 * or with only one segment.
3652 */
3653 if (skb_is_gso(skb)) {
3654 if (skb_shinfo(skb)->gso_size < 224 ||
3655 skb_shinfo(skb)->gso_segs == 1)
3656 features &= ~NETIF_F_GSO_MASK;
3657 }
3658
3659 return features;
3660 }
3661
3662 static const struct net_device_ops ibmvnic_netdev_ops = {
3663 .ndo_open = ibmvnic_open,
3664 .ndo_stop = ibmvnic_close,
3665 .ndo_start_xmit = ibmvnic_xmit,
3666 .ndo_set_rx_mode = ibmvnic_set_multi,
3667 .ndo_set_mac_address = ibmvnic_set_mac,
3668 .ndo_validate_addr = eth_validate_addr,
3669 .ndo_tx_timeout = ibmvnic_tx_timeout,
3670 .ndo_change_mtu = ibmvnic_change_mtu,
3671 .ndo_features_check = ibmvnic_features_check,
3672 };
3673
3674 /* ethtool functions */
3675
ibmvnic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)3676 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3677 struct ethtool_link_ksettings *cmd)
3678 {
3679 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3680 int rc;
3681
3682 rc = send_query_phys_parms(adapter);
3683 if (rc) {
3684 adapter->speed = SPEED_UNKNOWN;
3685 adapter->duplex = DUPLEX_UNKNOWN;
3686 }
3687 cmd->base.speed = adapter->speed;
3688 cmd->base.duplex = adapter->duplex;
3689 cmd->base.port = PORT_FIBRE;
3690 cmd->base.phy_address = 0;
3691 cmd->base.autoneg = AUTONEG_ENABLE;
3692
3693 return 0;
3694 }
3695
ibmvnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)3696 static void ibmvnic_get_drvinfo(struct net_device *netdev,
3697 struct ethtool_drvinfo *info)
3698 {
3699 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3700
3701 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3702 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3703 strscpy(info->fw_version, adapter->fw_version,
3704 sizeof(info->fw_version));
3705 }
3706
ibmvnic_get_msglevel(struct net_device * netdev)3707 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3708 {
3709 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3710
3711 return adapter->msg_enable;
3712 }
3713
ibmvnic_set_msglevel(struct net_device * netdev,u32 data)3714 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3715 {
3716 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3717
3718 adapter->msg_enable = data;
3719 }
3720
ibmvnic_get_link(struct net_device * netdev)3721 static u32 ibmvnic_get_link(struct net_device *netdev)
3722 {
3723 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3724
3725 /* Don't need to send a query because we request a logical link up at
3726 * init and then we wait for link state indications
3727 */
3728 return adapter->logical_link_state;
3729 }
3730
ibmvnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3731 static void ibmvnic_get_ringparam(struct net_device *netdev,
3732 struct ethtool_ringparam *ring,
3733 struct kernel_ethtool_ringparam *kernel_ring,
3734 struct netlink_ext_ack *extack)
3735 {
3736 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3737
3738 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3739 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3740 ring->rx_mini_max_pending = 0;
3741 ring->rx_jumbo_max_pending = 0;
3742 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3743 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3744 ring->rx_mini_pending = 0;
3745 ring->rx_jumbo_pending = 0;
3746 }
3747
ibmvnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3748 static int ibmvnic_set_ringparam(struct net_device *netdev,
3749 struct ethtool_ringparam *ring,
3750 struct kernel_ethtool_ringparam *kernel_ring,
3751 struct netlink_ext_ack *extack)
3752 {
3753 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3754
3755 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
3756 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3757 netdev_err(netdev, "Invalid request.\n");
3758 netdev_err(netdev, "Max tx buffers = %llu\n",
3759 adapter->max_rx_add_entries_per_subcrq);
3760 netdev_err(netdev, "Max rx buffers = %llu\n",
3761 adapter->max_tx_entries_per_subcrq);
3762 return -EINVAL;
3763 }
3764
3765 adapter->desired.rx_entries = ring->rx_pending;
3766 adapter->desired.tx_entries = ring->tx_pending;
3767
3768 return wait_for_reset(adapter);
3769 }
3770
ibmvnic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)3771 static void ibmvnic_get_channels(struct net_device *netdev,
3772 struct ethtool_channels *channels)
3773 {
3774 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3775
3776 channels->max_rx = adapter->max_rx_queues;
3777 channels->max_tx = adapter->max_tx_queues;
3778 channels->max_other = 0;
3779 channels->max_combined = 0;
3780 channels->rx_count = adapter->req_rx_queues;
3781 channels->tx_count = adapter->req_tx_queues;
3782 channels->other_count = 0;
3783 channels->combined_count = 0;
3784 }
3785
ibmvnic_set_channels(struct net_device * netdev,struct ethtool_channels * channels)3786 static int ibmvnic_set_channels(struct net_device *netdev,
3787 struct ethtool_channels *channels)
3788 {
3789 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3790
3791 adapter->desired.rx_queues = channels->rx_count;
3792 adapter->desired.tx_queues = channels->tx_count;
3793
3794 return wait_for_reset(adapter);
3795 }
3796
ibmvnic_get_strings(struct net_device * dev,u32 stringset,u8 * data)3797 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3798 {
3799 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3800 int i;
3801
3802 if (stringset != ETH_SS_STATS)
3803 return;
3804
3805 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
3806 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3807
3808 for (i = 0; i < adapter->req_tx_queues; i++) {
3809 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3810 data += ETH_GSTRING_LEN;
3811
3812 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3813 data += ETH_GSTRING_LEN;
3814
3815 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
3816 data += ETH_GSTRING_LEN;
3817 }
3818
3819 for (i = 0; i < adapter->req_rx_queues; i++) {
3820 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3821 data += ETH_GSTRING_LEN;
3822
3823 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3824 data += ETH_GSTRING_LEN;
3825
3826 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3827 data += ETH_GSTRING_LEN;
3828 }
3829 }
3830
ibmvnic_get_sset_count(struct net_device * dev,int sset)3831 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3832 {
3833 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3834
3835 switch (sset) {
3836 case ETH_SS_STATS:
3837 return ARRAY_SIZE(ibmvnic_stats) +
3838 adapter->req_tx_queues * NUM_TX_STATS +
3839 adapter->req_rx_queues * NUM_RX_STATS;
3840 default:
3841 return -EOPNOTSUPP;
3842 }
3843 }
3844
ibmvnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)3845 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3846 struct ethtool_stats *stats, u64 *data)
3847 {
3848 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3849 union ibmvnic_crq crq;
3850 int i, j;
3851 int rc;
3852
3853 memset(&crq, 0, sizeof(crq));
3854 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3855 crq.request_statistics.cmd = REQUEST_STATISTICS;
3856 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3857 crq.request_statistics.len =
3858 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3859
3860 /* Wait for data to be written */
3861 reinit_completion(&adapter->stats_done);
3862 rc = ibmvnic_send_crq(adapter, &crq);
3863 if (rc)
3864 return;
3865 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3866 if (rc)
3867 return;
3868
3869 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3870 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3871 (adapter, ibmvnic_stats[i].offset));
3872
3873 for (j = 0; j < adapter->req_tx_queues; j++) {
3874 data[i] = adapter->tx_stats_buffers[j].packets;
3875 i++;
3876 data[i] = adapter->tx_stats_buffers[j].bytes;
3877 i++;
3878 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3879 i++;
3880 }
3881
3882 for (j = 0; j < adapter->req_rx_queues; j++) {
3883 data[i] = adapter->rx_stats_buffers[j].packets;
3884 i++;
3885 data[i] = adapter->rx_stats_buffers[j].bytes;
3886 i++;
3887 data[i] = adapter->rx_stats_buffers[j].interrupts;
3888 i++;
3889 }
3890 }
3891
3892 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3893 .get_drvinfo = ibmvnic_get_drvinfo,
3894 .get_msglevel = ibmvnic_get_msglevel,
3895 .set_msglevel = ibmvnic_set_msglevel,
3896 .get_link = ibmvnic_get_link,
3897 .get_ringparam = ibmvnic_get_ringparam,
3898 .set_ringparam = ibmvnic_set_ringparam,
3899 .get_channels = ibmvnic_get_channels,
3900 .set_channels = ibmvnic_set_channels,
3901 .get_strings = ibmvnic_get_strings,
3902 .get_sset_count = ibmvnic_get_sset_count,
3903 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3904 .get_link_ksettings = ibmvnic_get_link_ksettings,
3905 };
3906
3907 /* Routines for managing CRQs/sCRQs */
3908
reset_one_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)3909 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3910 struct ibmvnic_sub_crq_queue *scrq)
3911 {
3912 int rc;
3913
3914 if (!scrq) {
3915 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3916 return -EINVAL;
3917 }
3918
3919 if (scrq->irq) {
3920 free_irq(scrq->irq, scrq);
3921 irq_dispose_mapping(scrq->irq);
3922 scrq->irq = 0;
3923 }
3924
3925 if (scrq->msgs) {
3926 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3927 atomic_set(&scrq->used, 0);
3928 scrq->cur = 0;
3929 scrq->ind_buf.index = 0;
3930 } else {
3931 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3932 return -EINVAL;
3933 }
3934
3935 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3936 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3937 return rc;
3938 }
3939
reset_sub_crq_queues(struct ibmvnic_adapter * adapter)3940 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3941 {
3942 int i, rc;
3943
3944 if (!adapter->tx_scrq || !adapter->rx_scrq)
3945 return -EINVAL;
3946
3947 ibmvnic_clean_affinity(adapter);
3948
3949 for (i = 0; i < adapter->req_tx_queues; i++) {
3950 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3951 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3952 if (rc)
3953 return rc;
3954 }
3955
3956 for (i = 0; i < adapter->req_rx_queues; i++) {
3957 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3958 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3959 if (rc)
3960 return rc;
3961 }
3962
3963 return rc;
3964 }
3965
release_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq,bool do_h_free)3966 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3967 struct ibmvnic_sub_crq_queue *scrq,
3968 bool do_h_free)
3969 {
3970 struct device *dev = &adapter->vdev->dev;
3971 long rc;
3972
3973 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3974
3975 if (do_h_free) {
3976 /* Close the sub-crqs */
3977 do {
3978 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3979 adapter->vdev->unit_address,
3980 scrq->crq_num);
3981 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3982
3983 if (rc) {
3984 netdev_err(adapter->netdev,
3985 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3986 scrq->crq_num, rc);
3987 }
3988 }
3989
3990 dma_free_coherent(dev,
3991 IBMVNIC_IND_ARR_SZ,
3992 scrq->ind_buf.indir_arr,
3993 scrq->ind_buf.indir_dma);
3994
3995 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3996 DMA_BIDIRECTIONAL);
3997 free_pages((unsigned long)scrq->msgs, 2);
3998 free_cpumask_var(scrq->affinity_mask);
3999 kfree(scrq);
4000 }
4001
init_sub_crq_queue(struct ibmvnic_adapter * adapter)4002 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
4003 *adapter)
4004 {
4005 struct device *dev = &adapter->vdev->dev;
4006 struct ibmvnic_sub_crq_queue *scrq;
4007 int rc;
4008
4009 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
4010 if (!scrq)
4011 return NULL;
4012
4013 scrq->msgs =
4014 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
4015 if (!scrq->msgs) {
4016 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
4017 goto zero_page_failed;
4018 }
4019 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
4020 goto cpumask_alloc_failed;
4021
4022 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
4023 DMA_BIDIRECTIONAL);
4024 if (dma_mapping_error(dev, scrq->msg_token)) {
4025 dev_warn(dev, "Couldn't map crq queue messages page\n");
4026 goto map_failed;
4027 }
4028
4029 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
4030 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
4031
4032 if (rc == H_RESOURCE)
4033 rc = ibmvnic_reset_crq(adapter);
4034
4035 if (rc == H_CLOSED) {
4036 dev_warn(dev, "Partner adapter not ready, waiting.\n");
4037 } else if (rc) {
4038 dev_warn(dev, "Error %d registering sub-crq\n", rc);
4039 goto reg_failed;
4040 }
4041
4042 scrq->adapter = adapter;
4043 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
4044 scrq->ind_buf.index = 0;
4045
4046 scrq->ind_buf.indir_arr =
4047 dma_alloc_coherent(dev,
4048 IBMVNIC_IND_ARR_SZ,
4049 &scrq->ind_buf.indir_dma,
4050 GFP_KERNEL);
4051
4052 if (!scrq->ind_buf.indir_arr)
4053 goto indir_failed;
4054
4055 spin_lock_init(&scrq->lock);
4056
4057 netdev_dbg(adapter->netdev,
4058 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
4059 scrq->crq_num, scrq->hw_irq, scrq->irq);
4060
4061 return scrq;
4062
4063 indir_failed:
4064 do {
4065 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
4066 adapter->vdev->unit_address,
4067 scrq->crq_num);
4068 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
4069 reg_failed:
4070 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
4071 DMA_BIDIRECTIONAL);
4072 map_failed:
4073 free_cpumask_var(scrq->affinity_mask);
4074 cpumask_alloc_failed:
4075 free_pages((unsigned long)scrq->msgs, 2);
4076 zero_page_failed:
4077 kfree(scrq);
4078
4079 return NULL;
4080 }
4081
release_sub_crqs(struct ibmvnic_adapter * adapter,bool do_h_free)4082 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
4083 {
4084 int i;
4085
4086 ibmvnic_clean_affinity(adapter);
4087 if (adapter->tx_scrq) {
4088 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
4089 if (!adapter->tx_scrq[i])
4090 continue;
4091
4092 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
4093 i);
4094 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
4095 if (adapter->tx_scrq[i]->irq) {
4096 free_irq(adapter->tx_scrq[i]->irq,
4097 adapter->tx_scrq[i]);
4098 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
4099 adapter->tx_scrq[i]->irq = 0;
4100 }
4101
4102 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
4103 do_h_free);
4104 }
4105
4106 kfree(adapter->tx_scrq);
4107 adapter->tx_scrq = NULL;
4108 adapter->num_active_tx_scrqs = 0;
4109 }
4110
4111 /* Clean any remaining outstanding SKBs
4112 * we freed the irq so we won't be hearing
4113 * from them
4114 */
4115 clean_tx_pools(adapter);
4116
4117 if (adapter->rx_scrq) {
4118 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
4119 if (!adapter->rx_scrq[i])
4120 continue;
4121
4122 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
4123 i);
4124 if (adapter->rx_scrq[i]->irq) {
4125 free_irq(adapter->rx_scrq[i]->irq,
4126 adapter->rx_scrq[i]);
4127 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
4128 adapter->rx_scrq[i]->irq = 0;
4129 }
4130
4131 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
4132 do_h_free);
4133 }
4134
4135 kfree(adapter->rx_scrq);
4136 adapter->rx_scrq = NULL;
4137 adapter->num_active_rx_scrqs = 0;
4138 }
4139 }
4140
disable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4141 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
4142 struct ibmvnic_sub_crq_queue *scrq)
4143 {
4144 struct device *dev = &adapter->vdev->dev;
4145 unsigned long rc;
4146
4147 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4148 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4149 if (rc)
4150 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
4151 scrq->hw_irq, rc);
4152 return rc;
4153 }
4154
4155 /* We can not use the IRQ chip EOI handler because that has the
4156 * unintended effect of changing the interrupt priority.
4157 */
ibmvnic_xics_eoi(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)4158 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
4159 {
4160 u64 val = 0xff000000 | scrq->hw_irq;
4161 unsigned long rc;
4162
4163 rc = plpar_hcall_norets(H_EOI, val);
4164 if (rc)
4165 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
4166 }
4167
4168 /* Due to a firmware bug, the hypervisor can send an interrupt to a
4169 * transmit or receive queue just prior to a partition migration.
4170 * Force an EOI after migration.
4171 */
ibmvnic_clear_pending_interrupt(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)4172 static void ibmvnic_clear_pending_interrupt(struct device *dev,
4173 struct ibmvnic_sub_crq_queue *scrq)
4174 {
4175 if (!xive_enabled())
4176 ibmvnic_xics_eoi(dev, scrq);
4177 }
4178
enable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4179 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
4180 struct ibmvnic_sub_crq_queue *scrq)
4181 {
4182 struct device *dev = &adapter->vdev->dev;
4183 unsigned long rc;
4184
4185 if (scrq->hw_irq > 0x100000000ULL) {
4186 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
4187 return 1;
4188 }
4189
4190 if (test_bit(0, &adapter->resetting) &&
4191 adapter->reset_reason == VNIC_RESET_MOBILITY) {
4192 ibmvnic_clear_pending_interrupt(dev, scrq);
4193 }
4194
4195 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4196 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4197 if (rc)
4198 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
4199 scrq->hw_irq, rc);
4200 return rc;
4201 }
4202
ibmvnic_complete_tx(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4203 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
4204 struct ibmvnic_sub_crq_queue *scrq)
4205 {
4206 struct device *dev = &adapter->vdev->dev;
4207 int num_packets = 0, total_bytes = 0;
4208 struct ibmvnic_tx_pool *tx_pool;
4209 struct ibmvnic_tx_buff *txbuff;
4210 struct netdev_queue *txq;
4211 union sub_crq *next;
4212 int index, i;
4213
4214 restart_loop:
4215 while (pending_scrq(adapter, scrq)) {
4216 unsigned int pool = scrq->pool_index;
4217 int num_entries = 0;
4218 next = ibmvnic_next_scrq(adapter, scrq);
4219 for (i = 0; i < next->tx_comp.num_comps; i++) {
4220 index = be32_to_cpu(next->tx_comp.correlators[i]);
4221 if (index & IBMVNIC_TSO_POOL_MASK) {
4222 tx_pool = &adapter->tso_pool[pool];
4223 index &= ~IBMVNIC_TSO_POOL_MASK;
4224 } else {
4225 tx_pool = &adapter->tx_pool[pool];
4226 }
4227
4228 txbuff = &tx_pool->tx_buff[index];
4229 num_packets++;
4230 num_entries += txbuff->num_entries;
4231 if (txbuff->skb) {
4232 total_bytes += txbuff->skb->len;
4233 if (next->tx_comp.rcs[i]) {
4234 dev_err(dev, "tx error %x\n",
4235 next->tx_comp.rcs[i]);
4236 dev_kfree_skb_irq(txbuff->skb);
4237 } else {
4238 dev_consume_skb_irq(txbuff->skb);
4239 }
4240 txbuff->skb = NULL;
4241 } else {
4242 netdev_warn(adapter->netdev,
4243 "TX completion received with NULL socket buffer\n");
4244 }
4245 tx_pool->free_map[tx_pool->producer_index] = index;
4246 tx_pool->producer_index =
4247 (tx_pool->producer_index + 1) %
4248 tx_pool->num_buffers;
4249 }
4250 /* remove tx_comp scrq*/
4251 next->tx_comp.first = 0;
4252
4253
4254 if (atomic_sub_return(num_entries, &scrq->used) <=
4255 (adapter->req_tx_entries_per_subcrq / 2) &&
4256 __netif_subqueue_stopped(adapter->netdev,
4257 scrq->pool_index)) {
4258 rcu_read_lock();
4259 if (adapter->tx_queues_active) {
4260 netif_wake_subqueue(adapter->netdev,
4261 scrq->pool_index);
4262 netdev_dbg(adapter->netdev,
4263 "Started queue %d\n",
4264 scrq->pool_index);
4265 }
4266 rcu_read_unlock();
4267 }
4268 }
4269
4270 enable_scrq_irq(adapter, scrq);
4271
4272 if (pending_scrq(adapter, scrq)) {
4273 disable_scrq_irq(adapter, scrq);
4274 goto restart_loop;
4275 }
4276
4277 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
4278 netdev_tx_completed_queue(txq, num_packets, total_bytes);
4279
4280 return 0;
4281 }
4282
ibmvnic_interrupt_tx(int irq,void * instance)4283 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
4284 {
4285 struct ibmvnic_sub_crq_queue *scrq = instance;
4286 struct ibmvnic_adapter *adapter = scrq->adapter;
4287
4288 disable_scrq_irq(adapter, scrq);
4289 ibmvnic_complete_tx(adapter, scrq);
4290
4291 return IRQ_HANDLED;
4292 }
4293
ibmvnic_interrupt_rx(int irq,void * instance)4294 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
4295 {
4296 struct ibmvnic_sub_crq_queue *scrq = instance;
4297 struct ibmvnic_adapter *adapter = scrq->adapter;
4298
4299 /* When booting a kdump kernel we can hit pending interrupts
4300 * prior to completing driver initialization.
4301 */
4302 if (unlikely(adapter->state != VNIC_OPEN))
4303 return IRQ_NONE;
4304
4305 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
4306
4307 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
4308 disable_scrq_irq(adapter, scrq);
4309 __napi_schedule(&adapter->napi[scrq->scrq_num]);
4310 }
4311
4312 return IRQ_HANDLED;
4313 }
4314
init_sub_crq_irqs(struct ibmvnic_adapter * adapter)4315 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
4316 {
4317 struct device *dev = &adapter->vdev->dev;
4318 struct ibmvnic_sub_crq_queue *scrq;
4319 int i = 0, j = 0;
4320 int rc = 0;
4321
4322 for (i = 0; i < adapter->req_tx_queues; i++) {
4323 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
4324 i);
4325 scrq = adapter->tx_scrq[i];
4326 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4327
4328 if (!scrq->irq) {
4329 rc = -EINVAL;
4330 dev_err(dev, "Error mapping irq\n");
4331 goto req_tx_irq_failed;
4332 }
4333
4334 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
4335 adapter->vdev->unit_address, i);
4336 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
4337 0, scrq->name, scrq);
4338
4339 if (rc) {
4340 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
4341 scrq->irq, rc);
4342 irq_dispose_mapping(scrq->irq);
4343 goto req_tx_irq_failed;
4344 }
4345 }
4346
4347 for (i = 0; i < adapter->req_rx_queues; i++) {
4348 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4349 i);
4350 scrq = adapter->rx_scrq[i];
4351 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4352 if (!scrq->irq) {
4353 rc = -EINVAL;
4354 dev_err(dev, "Error mapping irq\n");
4355 goto req_rx_irq_failed;
4356 }
4357 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
4358 adapter->vdev->unit_address, i);
4359 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
4360 0, scrq->name, scrq);
4361 if (rc) {
4362 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
4363 scrq->irq, rc);
4364 irq_dispose_mapping(scrq->irq);
4365 goto req_rx_irq_failed;
4366 }
4367 }
4368
4369 cpus_read_lock();
4370 ibmvnic_set_affinity(adapter);
4371 cpus_read_unlock();
4372
4373 return rc;
4374
4375 req_rx_irq_failed:
4376 for (j = 0; j < i; j++) {
4377 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4378 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
4379 }
4380 i = adapter->req_tx_queues;
4381 req_tx_irq_failed:
4382 for (j = 0; j < i; j++) {
4383 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
4384 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
4385 }
4386 release_sub_crqs(adapter, 1);
4387 return rc;
4388 }
4389
init_sub_crqs(struct ibmvnic_adapter * adapter)4390 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4391 {
4392 struct device *dev = &adapter->vdev->dev;
4393 struct ibmvnic_sub_crq_queue **allqueues;
4394 int registered_queues = 0;
4395 int total_queues;
4396 int more = 0;
4397 int i;
4398
4399 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4400
4401 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
4402 if (!allqueues)
4403 return -ENOMEM;
4404
4405 for (i = 0; i < total_queues; i++) {
4406 allqueues[i] = init_sub_crq_queue(adapter);
4407 if (!allqueues[i]) {
4408 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
4409 break;
4410 }
4411 registered_queues++;
4412 }
4413
4414 /* Make sure we were able to register the minimum number of queues */
4415 if (registered_queues <
4416 adapter->min_tx_queues + adapter->min_rx_queues) {
4417 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
4418 goto tx_failed;
4419 }
4420
4421 /* Distribute the failed allocated queues*/
4422 for (i = 0; i < total_queues - registered_queues + more ; i++) {
4423 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4424 switch (i % 3) {
4425 case 0:
4426 if (adapter->req_rx_queues > adapter->min_rx_queues)
4427 adapter->req_rx_queues--;
4428 else
4429 more++;
4430 break;
4431 case 1:
4432 if (adapter->req_tx_queues > adapter->min_tx_queues)
4433 adapter->req_tx_queues--;
4434 else
4435 more++;
4436 break;
4437 }
4438 }
4439
4440 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
4441 sizeof(*adapter->tx_scrq), GFP_KERNEL);
4442 if (!adapter->tx_scrq)
4443 goto tx_failed;
4444
4445 for (i = 0; i < adapter->req_tx_queues; i++) {
4446 adapter->tx_scrq[i] = allqueues[i];
4447 adapter->tx_scrq[i]->pool_index = i;
4448 adapter->num_active_tx_scrqs++;
4449 }
4450
4451 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
4452 sizeof(*adapter->rx_scrq), GFP_KERNEL);
4453 if (!adapter->rx_scrq)
4454 goto rx_failed;
4455
4456 for (i = 0; i < adapter->req_rx_queues; i++) {
4457 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4458 adapter->rx_scrq[i]->scrq_num = i;
4459 adapter->num_active_rx_scrqs++;
4460 }
4461
4462 kfree(allqueues);
4463 return 0;
4464
4465 rx_failed:
4466 kfree(adapter->tx_scrq);
4467 adapter->tx_scrq = NULL;
4468 tx_failed:
4469 for (i = 0; i < registered_queues; i++)
4470 release_sub_crq_queue(adapter, allqueues[i], 1);
4471 kfree(allqueues);
4472 return -ENOMEM;
4473 }
4474
send_request_cap(struct ibmvnic_adapter * adapter,int retry)4475 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4476 {
4477 struct device *dev = &adapter->vdev->dev;
4478 union ibmvnic_crq crq;
4479 int max_entries;
4480 int cap_reqs;
4481
4482 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
4483 * the PROMISC flag). Initialize this count upfront. When the tasklet
4484 * receives a response to all of these, it will send the next protocol
4485 * message (QUERY_IP_OFFLOAD).
4486 */
4487 if (!(adapter->netdev->flags & IFF_PROMISC) ||
4488 adapter->promisc_supported)
4489 cap_reqs = 7;
4490 else
4491 cap_reqs = 6;
4492
4493 if (!retry) {
4494 /* Sub-CRQ entries are 32 byte long */
4495 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4496
4497 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4498
4499 if (adapter->min_tx_entries_per_subcrq > entries_page ||
4500 adapter->min_rx_add_entries_per_subcrq > entries_page) {
4501 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4502 return;
4503 }
4504
4505 if (adapter->desired.mtu)
4506 adapter->req_mtu = adapter->desired.mtu;
4507 else
4508 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4509
4510 if (!adapter->desired.tx_entries)
4511 adapter->desired.tx_entries =
4512 adapter->max_tx_entries_per_subcrq;
4513 if (!adapter->desired.rx_entries)
4514 adapter->desired.rx_entries =
4515 adapter->max_rx_add_entries_per_subcrq;
4516
4517 max_entries = IBMVNIC_LTB_SET_SIZE /
4518 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4519
4520 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4521 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4522 adapter->desired.tx_entries = max_entries;
4523 }
4524
4525 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4526 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4527 adapter->desired.rx_entries = max_entries;
4528 }
4529
4530 if (adapter->desired.tx_entries)
4531 adapter->req_tx_entries_per_subcrq =
4532 adapter->desired.tx_entries;
4533 else
4534 adapter->req_tx_entries_per_subcrq =
4535 adapter->max_tx_entries_per_subcrq;
4536
4537 if (adapter->desired.rx_entries)
4538 adapter->req_rx_add_entries_per_subcrq =
4539 adapter->desired.rx_entries;
4540 else
4541 adapter->req_rx_add_entries_per_subcrq =
4542 adapter->max_rx_add_entries_per_subcrq;
4543
4544 if (adapter->desired.tx_queues)
4545 adapter->req_tx_queues =
4546 adapter->desired.tx_queues;
4547 else
4548 adapter->req_tx_queues =
4549 adapter->opt_tx_comp_sub_queues;
4550
4551 if (adapter->desired.rx_queues)
4552 adapter->req_rx_queues =
4553 adapter->desired.rx_queues;
4554 else
4555 adapter->req_rx_queues =
4556 adapter->opt_rx_comp_queues;
4557
4558 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4559 } else {
4560 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4561 }
4562 memset(&crq, 0, sizeof(crq));
4563 crq.request_capability.first = IBMVNIC_CRQ_CMD;
4564 crq.request_capability.cmd = REQUEST_CAPABILITY;
4565
4566 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4567 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4568 cap_reqs--;
4569 ibmvnic_send_crq(adapter, &crq);
4570
4571 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4572 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4573 cap_reqs--;
4574 ibmvnic_send_crq(adapter, &crq);
4575
4576 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4577 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4578 cap_reqs--;
4579 ibmvnic_send_crq(adapter, &crq);
4580
4581 crq.request_capability.capability =
4582 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4583 crq.request_capability.number =
4584 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4585 cap_reqs--;
4586 ibmvnic_send_crq(adapter, &crq);
4587
4588 crq.request_capability.capability =
4589 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4590 crq.request_capability.number =
4591 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4592 cap_reqs--;
4593 ibmvnic_send_crq(adapter, &crq);
4594
4595 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4596 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4597 cap_reqs--;
4598 ibmvnic_send_crq(adapter, &crq);
4599
4600 if (adapter->netdev->flags & IFF_PROMISC) {
4601 if (adapter->promisc_supported) {
4602 crq.request_capability.capability =
4603 cpu_to_be16(PROMISC_REQUESTED);
4604 crq.request_capability.number = cpu_to_be64(1);
4605 cap_reqs--;
4606 ibmvnic_send_crq(adapter, &crq);
4607 }
4608 } else {
4609 crq.request_capability.capability =
4610 cpu_to_be16(PROMISC_REQUESTED);
4611 crq.request_capability.number = cpu_to_be64(0);
4612 cap_reqs--;
4613 ibmvnic_send_crq(adapter, &crq);
4614 }
4615
4616 /* Keep at end to catch any discrepancy between expected and actual
4617 * CRQs sent.
4618 */
4619 WARN_ON(cap_reqs != 0);
4620 }
4621
pending_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4622 static int pending_scrq(struct ibmvnic_adapter *adapter,
4623 struct ibmvnic_sub_crq_queue *scrq)
4624 {
4625 union sub_crq *entry = &scrq->msgs[scrq->cur];
4626 int rc;
4627
4628 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4629
4630 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4631 * contents of the SCRQ descriptor
4632 */
4633 dma_rmb();
4634
4635 return rc;
4636 }
4637
ibmvnic_next_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4638 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4639 struct ibmvnic_sub_crq_queue *scrq)
4640 {
4641 union sub_crq *entry;
4642 unsigned long flags;
4643
4644 spin_lock_irqsave(&scrq->lock, flags);
4645 entry = &scrq->msgs[scrq->cur];
4646 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4647 if (++scrq->cur == scrq->size)
4648 scrq->cur = 0;
4649 } else {
4650 entry = NULL;
4651 }
4652 spin_unlock_irqrestore(&scrq->lock, flags);
4653
4654 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4655 * contents of the SCRQ descriptor
4656 */
4657 dma_rmb();
4658
4659 return entry;
4660 }
4661
ibmvnic_next_crq(struct ibmvnic_adapter * adapter)4662 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4663 {
4664 struct ibmvnic_crq_queue *queue = &adapter->crq;
4665 union ibmvnic_crq *crq;
4666
4667 crq = &queue->msgs[queue->cur];
4668 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4669 if (++queue->cur == queue->size)
4670 queue->cur = 0;
4671 } else {
4672 crq = NULL;
4673 }
4674
4675 return crq;
4676 }
4677
print_subcrq_error(struct device * dev,int rc,const char * func)4678 static void print_subcrq_error(struct device *dev, int rc, const char *func)
4679 {
4680 switch (rc) {
4681 case H_PARAMETER:
4682 dev_warn_ratelimited(dev,
4683 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4684 func, rc);
4685 break;
4686 case H_CLOSED:
4687 dev_warn_ratelimited(dev,
4688 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4689 func, rc);
4690 break;
4691 default:
4692 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4693 break;
4694 }
4695 }
4696
send_subcrq_indirect(struct ibmvnic_adapter * adapter,u64 remote_handle,u64 ioba,u64 num_entries)4697 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4698 u64 remote_handle, u64 ioba, u64 num_entries)
4699 {
4700 unsigned int ua = adapter->vdev->unit_address;
4701 struct device *dev = &adapter->vdev->dev;
4702 int rc;
4703
4704 /* Make sure the hypervisor sees the complete request */
4705 dma_wmb();
4706 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4707 cpu_to_be64(remote_handle),
4708 ioba, num_entries);
4709
4710 if (rc)
4711 print_subcrq_error(dev, rc, __func__);
4712
4713 return rc;
4714 }
4715
ibmvnic_send_crq(struct ibmvnic_adapter * adapter,union ibmvnic_crq * crq)4716 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4717 union ibmvnic_crq *crq)
4718 {
4719 unsigned int ua = adapter->vdev->unit_address;
4720 struct device *dev = &adapter->vdev->dev;
4721 u64 *u64_crq = (u64 *)crq;
4722 int rc;
4723
4724 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4725 (unsigned long)cpu_to_be64(u64_crq[0]),
4726 (unsigned long)cpu_to_be64(u64_crq[1]));
4727
4728 if (!adapter->crq.active &&
4729 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4730 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4731 return -EINVAL;
4732 }
4733
4734 /* Make sure the hypervisor sees the complete request */
4735 dma_wmb();
4736
4737 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4738 cpu_to_be64(u64_crq[0]),
4739 cpu_to_be64(u64_crq[1]));
4740
4741 if (rc) {
4742 if (rc == H_CLOSED) {
4743 dev_warn(dev, "CRQ Queue closed\n");
4744 /* do not reset, report the fail, wait for passive init from server */
4745 }
4746
4747 dev_warn(dev, "Send error (rc=%d)\n", rc);
4748 }
4749
4750 return rc;
4751 }
4752
ibmvnic_send_crq_init(struct ibmvnic_adapter * adapter)4753 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4754 {
4755 struct device *dev = &adapter->vdev->dev;
4756 union ibmvnic_crq crq;
4757 int retries = 100;
4758 int rc;
4759
4760 memset(&crq, 0, sizeof(crq));
4761 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4762 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4763 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4764
4765 do {
4766 rc = ibmvnic_send_crq(adapter, &crq);
4767 if (rc != H_CLOSED)
4768 break;
4769 retries--;
4770 msleep(50);
4771
4772 } while (retries > 0);
4773
4774 if (rc) {
4775 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4776 return rc;
4777 }
4778
4779 return 0;
4780 }
4781
4782 struct vnic_login_client_data {
4783 u8 type;
4784 __be16 len;
4785 char name[];
4786 } __packed;
4787
vnic_client_data_len(struct ibmvnic_adapter * adapter)4788 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4789 {
4790 int len;
4791
4792 /* Calculate the amount of buffer space needed for the
4793 * vnic client data in the login buffer. There are four entries,
4794 * OS name, LPAR name, device name, and a null last entry.
4795 */
4796 len = 4 * sizeof(struct vnic_login_client_data);
4797 len += 6; /* "Linux" plus NULL */
4798 len += strlen(utsname()->nodename) + 1;
4799 len += strlen(adapter->netdev->name) + 1;
4800
4801 return len;
4802 }
4803
vnic_add_client_data(struct ibmvnic_adapter * adapter,struct vnic_login_client_data * vlcd)4804 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4805 struct vnic_login_client_data *vlcd)
4806 {
4807 const char *os_name = "Linux";
4808 int len;
4809
4810 /* Type 1 - LPAR OS */
4811 vlcd->type = 1;
4812 len = strlen(os_name) + 1;
4813 vlcd->len = cpu_to_be16(len);
4814 strscpy(vlcd->name, os_name, len);
4815 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4816
4817 /* Type 2 - LPAR name */
4818 vlcd->type = 2;
4819 len = strlen(utsname()->nodename) + 1;
4820 vlcd->len = cpu_to_be16(len);
4821 strscpy(vlcd->name, utsname()->nodename, len);
4822 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4823
4824 /* Type 3 - device name */
4825 vlcd->type = 3;
4826 len = strlen(adapter->netdev->name) + 1;
4827 vlcd->len = cpu_to_be16(len);
4828 strscpy(vlcd->name, adapter->netdev->name, len);
4829 }
4830
send_login(struct ibmvnic_adapter * adapter)4831 static int send_login(struct ibmvnic_adapter *adapter)
4832 {
4833 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4834 struct ibmvnic_login_buffer *login_buffer;
4835 struct device *dev = &adapter->vdev->dev;
4836 struct vnic_login_client_data *vlcd;
4837 dma_addr_t rsp_buffer_token;
4838 dma_addr_t buffer_token;
4839 size_t rsp_buffer_size;
4840 union ibmvnic_crq crq;
4841 int client_data_len;
4842 size_t buffer_size;
4843 __be64 *tx_list_p;
4844 __be64 *rx_list_p;
4845 int rc;
4846 int i;
4847
4848 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4849 netdev_err(adapter->netdev,
4850 "RX or TX queues are not allocated, device login failed\n");
4851 return -ENOMEM;
4852 }
4853
4854 release_login_buffer(adapter);
4855 release_login_rsp_buffer(adapter);
4856
4857 client_data_len = vnic_client_data_len(adapter);
4858
4859 buffer_size =
4860 sizeof(struct ibmvnic_login_buffer) +
4861 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4862 client_data_len;
4863
4864 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4865 if (!login_buffer)
4866 goto buf_alloc_failed;
4867
4868 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4869 DMA_TO_DEVICE);
4870 if (dma_mapping_error(dev, buffer_token)) {
4871 dev_err(dev, "Couldn't map login buffer\n");
4872 goto buf_map_failed;
4873 }
4874
4875 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4876 sizeof(u64) * adapter->req_tx_queues +
4877 sizeof(u64) * adapter->req_rx_queues +
4878 sizeof(u64) * adapter->req_rx_queues +
4879 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4880
4881 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4882 if (!login_rsp_buffer)
4883 goto buf_rsp_alloc_failed;
4884
4885 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4886 rsp_buffer_size, DMA_FROM_DEVICE);
4887 if (dma_mapping_error(dev, rsp_buffer_token)) {
4888 dev_err(dev, "Couldn't map login rsp buffer\n");
4889 goto buf_rsp_map_failed;
4890 }
4891
4892 adapter->login_buf = login_buffer;
4893 adapter->login_buf_token = buffer_token;
4894 adapter->login_buf_sz = buffer_size;
4895 adapter->login_rsp_buf = login_rsp_buffer;
4896 adapter->login_rsp_buf_token = rsp_buffer_token;
4897 adapter->login_rsp_buf_sz = rsp_buffer_size;
4898
4899 login_buffer->len = cpu_to_be32(buffer_size);
4900 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4901 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4902 login_buffer->off_txcomp_subcrqs =
4903 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4904 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4905 login_buffer->off_rxcomp_subcrqs =
4906 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4907 sizeof(u64) * adapter->req_tx_queues);
4908 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4909 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4910
4911 tx_list_p = (__be64 *)((char *)login_buffer +
4912 sizeof(struct ibmvnic_login_buffer));
4913 rx_list_p = (__be64 *)((char *)login_buffer +
4914 sizeof(struct ibmvnic_login_buffer) +
4915 sizeof(u64) * adapter->req_tx_queues);
4916
4917 for (i = 0; i < adapter->req_tx_queues; i++) {
4918 if (adapter->tx_scrq[i]) {
4919 tx_list_p[i] =
4920 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4921 }
4922 }
4923
4924 for (i = 0; i < adapter->req_rx_queues; i++) {
4925 if (adapter->rx_scrq[i]) {
4926 rx_list_p[i] =
4927 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4928 }
4929 }
4930
4931 /* Insert vNIC login client data */
4932 vlcd = (struct vnic_login_client_data *)
4933 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4934 login_buffer->client_data_offset =
4935 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4936 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4937
4938 vnic_add_client_data(adapter, vlcd);
4939
4940 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4941 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4942 netdev_dbg(adapter->netdev, "%016lx\n",
4943 ((unsigned long *)(adapter->login_buf))[i]);
4944 }
4945
4946 memset(&crq, 0, sizeof(crq));
4947 crq.login.first = IBMVNIC_CRQ_CMD;
4948 crq.login.cmd = LOGIN;
4949 crq.login.ioba = cpu_to_be32(buffer_token);
4950 crq.login.len = cpu_to_be32(buffer_size);
4951
4952 adapter->login_pending = true;
4953 rc = ibmvnic_send_crq(adapter, &crq);
4954 if (rc) {
4955 adapter->login_pending = false;
4956 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4957 goto buf_send_failed;
4958 }
4959
4960 return 0;
4961
4962 buf_send_failed:
4963 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
4964 DMA_FROM_DEVICE);
4965 buf_rsp_map_failed:
4966 kfree(login_rsp_buffer);
4967 adapter->login_rsp_buf = NULL;
4968 buf_rsp_alloc_failed:
4969 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4970 buf_map_failed:
4971 kfree(login_buffer);
4972 adapter->login_buf = NULL;
4973 buf_alloc_failed:
4974 return -ENOMEM;
4975 }
4976
send_request_map(struct ibmvnic_adapter * adapter,dma_addr_t addr,u32 len,u8 map_id)4977 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4978 u32 len, u8 map_id)
4979 {
4980 union ibmvnic_crq crq;
4981
4982 memset(&crq, 0, sizeof(crq));
4983 crq.request_map.first = IBMVNIC_CRQ_CMD;
4984 crq.request_map.cmd = REQUEST_MAP;
4985 crq.request_map.map_id = map_id;
4986 crq.request_map.ioba = cpu_to_be32(addr);
4987 crq.request_map.len = cpu_to_be32(len);
4988 return ibmvnic_send_crq(adapter, &crq);
4989 }
4990
send_request_unmap(struct ibmvnic_adapter * adapter,u8 map_id)4991 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4992 {
4993 union ibmvnic_crq crq;
4994
4995 memset(&crq, 0, sizeof(crq));
4996 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4997 crq.request_unmap.cmd = REQUEST_UNMAP;
4998 crq.request_unmap.map_id = map_id;
4999 return ibmvnic_send_crq(adapter, &crq);
5000 }
5001
send_query_map(struct ibmvnic_adapter * adapter)5002 static void send_query_map(struct ibmvnic_adapter *adapter)
5003 {
5004 union ibmvnic_crq crq;
5005
5006 memset(&crq, 0, sizeof(crq));
5007 crq.query_map.first = IBMVNIC_CRQ_CMD;
5008 crq.query_map.cmd = QUERY_MAP;
5009 ibmvnic_send_crq(adapter, &crq);
5010 }
5011
5012 /* Send a series of CRQs requesting various capabilities of the VNIC server */
send_query_cap(struct ibmvnic_adapter * adapter)5013 static void send_query_cap(struct ibmvnic_adapter *adapter)
5014 {
5015 union ibmvnic_crq crq;
5016 int cap_reqs;
5017
5018 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
5019 * upfront. When the tasklet receives a response to all of these, it
5020 * can send out the next protocol messaage (REQUEST_CAPABILITY).
5021 */
5022 cap_reqs = 25;
5023
5024 atomic_set(&adapter->running_cap_crqs, cap_reqs);
5025
5026 memset(&crq, 0, sizeof(crq));
5027 crq.query_capability.first = IBMVNIC_CRQ_CMD;
5028 crq.query_capability.cmd = QUERY_CAPABILITY;
5029
5030 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
5031 ibmvnic_send_crq(adapter, &crq);
5032 cap_reqs--;
5033
5034 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
5035 ibmvnic_send_crq(adapter, &crq);
5036 cap_reqs--;
5037
5038 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
5039 ibmvnic_send_crq(adapter, &crq);
5040 cap_reqs--;
5041
5042 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
5043 ibmvnic_send_crq(adapter, &crq);
5044 cap_reqs--;
5045
5046 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
5047 ibmvnic_send_crq(adapter, &crq);
5048 cap_reqs--;
5049
5050 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
5051 ibmvnic_send_crq(adapter, &crq);
5052 cap_reqs--;
5053
5054 crq.query_capability.capability =
5055 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
5056 ibmvnic_send_crq(adapter, &crq);
5057 cap_reqs--;
5058
5059 crq.query_capability.capability =
5060 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
5061 ibmvnic_send_crq(adapter, &crq);
5062 cap_reqs--;
5063
5064 crq.query_capability.capability =
5065 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
5066 ibmvnic_send_crq(adapter, &crq);
5067 cap_reqs--;
5068
5069 crq.query_capability.capability =
5070 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
5071 ibmvnic_send_crq(adapter, &crq);
5072 cap_reqs--;
5073
5074 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
5075 ibmvnic_send_crq(adapter, &crq);
5076 cap_reqs--;
5077
5078 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
5079 ibmvnic_send_crq(adapter, &crq);
5080 cap_reqs--;
5081
5082 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
5083 ibmvnic_send_crq(adapter, &crq);
5084 cap_reqs--;
5085
5086 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
5087 ibmvnic_send_crq(adapter, &crq);
5088 cap_reqs--;
5089
5090 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
5091 ibmvnic_send_crq(adapter, &crq);
5092 cap_reqs--;
5093
5094 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
5095 ibmvnic_send_crq(adapter, &crq);
5096 cap_reqs--;
5097
5098 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
5099 ibmvnic_send_crq(adapter, &crq);
5100 cap_reqs--;
5101
5102 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
5103 ibmvnic_send_crq(adapter, &crq);
5104 cap_reqs--;
5105
5106 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
5107 ibmvnic_send_crq(adapter, &crq);
5108 cap_reqs--;
5109
5110 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
5111 ibmvnic_send_crq(adapter, &crq);
5112 cap_reqs--;
5113
5114 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
5115 ibmvnic_send_crq(adapter, &crq);
5116 cap_reqs--;
5117
5118 crq.query_capability.capability =
5119 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
5120 ibmvnic_send_crq(adapter, &crq);
5121 cap_reqs--;
5122
5123 crq.query_capability.capability =
5124 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
5125 ibmvnic_send_crq(adapter, &crq);
5126 cap_reqs--;
5127
5128 crq.query_capability.capability =
5129 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
5130 ibmvnic_send_crq(adapter, &crq);
5131 cap_reqs--;
5132
5133 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
5134
5135 ibmvnic_send_crq(adapter, &crq);
5136 cap_reqs--;
5137
5138 /* Keep at end to catch any discrepancy between expected and actual
5139 * CRQs sent.
5140 */
5141 WARN_ON(cap_reqs != 0);
5142 }
5143
send_query_ip_offload(struct ibmvnic_adapter * adapter)5144 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
5145 {
5146 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
5147 struct device *dev = &adapter->vdev->dev;
5148 union ibmvnic_crq crq;
5149
5150 adapter->ip_offload_tok =
5151 dma_map_single(dev,
5152 &adapter->ip_offload_buf,
5153 buf_sz,
5154 DMA_FROM_DEVICE);
5155
5156 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
5157 if (!firmware_has_feature(FW_FEATURE_CMO))
5158 dev_err(dev, "Couldn't map offload buffer\n");
5159 return;
5160 }
5161
5162 memset(&crq, 0, sizeof(crq));
5163 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
5164 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
5165 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
5166 crq.query_ip_offload.ioba =
5167 cpu_to_be32(adapter->ip_offload_tok);
5168
5169 ibmvnic_send_crq(adapter, &crq);
5170 }
5171
send_control_ip_offload(struct ibmvnic_adapter * adapter)5172 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
5173 {
5174 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
5175 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5176 struct device *dev = &adapter->vdev->dev;
5177 netdev_features_t old_hw_features = 0;
5178 union ibmvnic_crq crq;
5179
5180 adapter->ip_offload_ctrl_tok =
5181 dma_map_single(dev,
5182 ctrl_buf,
5183 sizeof(adapter->ip_offload_ctrl),
5184 DMA_TO_DEVICE);
5185
5186 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
5187 dev_err(dev, "Couldn't map ip offload control buffer\n");
5188 return;
5189 }
5190
5191 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5192 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
5193 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
5194 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
5195 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
5196 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
5197 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
5198 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
5199 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
5200 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
5201
5202 /* large_rx disabled for now, additional features needed */
5203 ctrl_buf->large_rx_ipv4 = 0;
5204 ctrl_buf->large_rx_ipv6 = 0;
5205
5206 if (adapter->state != VNIC_PROBING) {
5207 old_hw_features = adapter->netdev->hw_features;
5208 adapter->netdev->hw_features = 0;
5209 }
5210
5211 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
5212
5213 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
5214 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
5215
5216 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
5217 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
5218
5219 if ((adapter->netdev->features &
5220 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
5221 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
5222
5223 if (buf->large_tx_ipv4)
5224 adapter->netdev->hw_features |= NETIF_F_TSO;
5225 if (buf->large_tx_ipv6)
5226 adapter->netdev->hw_features |= NETIF_F_TSO6;
5227
5228 if (adapter->state == VNIC_PROBING) {
5229 adapter->netdev->features |= adapter->netdev->hw_features;
5230 } else if (old_hw_features != adapter->netdev->hw_features) {
5231 netdev_features_t tmp = 0;
5232
5233 /* disable features no longer supported */
5234 adapter->netdev->features &= adapter->netdev->hw_features;
5235 /* turn on features now supported if previously enabled */
5236 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
5237 adapter->netdev->hw_features;
5238 adapter->netdev->features |=
5239 tmp & adapter->netdev->wanted_features;
5240 }
5241
5242 memset(&crq, 0, sizeof(crq));
5243 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
5244 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
5245 crq.control_ip_offload.len =
5246 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5247 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
5248 ibmvnic_send_crq(adapter, &crq);
5249 }
5250
handle_vpd_size_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5251 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
5252 struct ibmvnic_adapter *adapter)
5253 {
5254 struct device *dev = &adapter->vdev->dev;
5255
5256 if (crq->get_vpd_size_rsp.rc.code) {
5257 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
5258 crq->get_vpd_size_rsp.rc.code);
5259 complete(&adapter->fw_done);
5260 return;
5261 }
5262
5263 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
5264 complete(&adapter->fw_done);
5265 }
5266
handle_vpd_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5267 static void handle_vpd_rsp(union ibmvnic_crq *crq,
5268 struct ibmvnic_adapter *adapter)
5269 {
5270 struct device *dev = &adapter->vdev->dev;
5271 unsigned char *substr = NULL;
5272 u8 fw_level_len = 0;
5273
5274 memset(adapter->fw_version, 0, 32);
5275
5276 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
5277 DMA_FROM_DEVICE);
5278
5279 if (crq->get_vpd_rsp.rc.code) {
5280 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
5281 crq->get_vpd_rsp.rc.code);
5282 goto complete;
5283 }
5284
5285 /* get the position of the firmware version info
5286 * located after the ASCII 'RM' substring in the buffer
5287 */
5288 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
5289 if (!substr) {
5290 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
5291 goto complete;
5292 }
5293
5294 /* get length of firmware level ASCII substring */
5295 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
5296 fw_level_len = *(substr + 2);
5297 } else {
5298 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
5299 goto complete;
5300 }
5301
5302 /* copy firmware version string from vpd into adapter */
5303 if ((substr + 3 + fw_level_len) <
5304 (adapter->vpd->buff + adapter->vpd->len)) {
5305 strscpy(adapter->fw_version, substr + 3,
5306 sizeof(adapter->fw_version));
5307 } else {
5308 dev_info(dev, "FW substr extrapolated VPD buff\n");
5309 }
5310
5311 complete:
5312 if (adapter->fw_version[0] == '\0')
5313 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
5314 complete(&adapter->fw_done);
5315 }
5316
handle_query_ip_offload_rsp(struct ibmvnic_adapter * adapter)5317 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
5318 {
5319 struct device *dev = &adapter->vdev->dev;
5320 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5321 int i;
5322
5323 dma_unmap_single(dev, adapter->ip_offload_tok,
5324 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
5325
5326 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
5327 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
5328 netdev_dbg(adapter->netdev, "%016lx\n",
5329 ((unsigned long *)(buf))[i]);
5330
5331 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
5332 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
5333 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
5334 buf->tcp_ipv4_chksum);
5335 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
5336 buf->tcp_ipv6_chksum);
5337 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
5338 buf->udp_ipv4_chksum);
5339 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5340 buf->udp_ipv6_chksum);
5341 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5342 buf->large_tx_ipv4);
5343 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5344 buf->large_tx_ipv6);
5345 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5346 buf->large_rx_ipv4);
5347 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5348 buf->large_rx_ipv6);
5349 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5350 buf->max_ipv4_header_size);
5351 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5352 buf->max_ipv6_header_size);
5353 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5354 buf->max_tcp_header_size);
5355 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5356 buf->max_udp_header_size);
5357 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5358 buf->max_large_tx_size);
5359 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5360 buf->max_large_rx_size);
5361 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5362 buf->ipv6_extension_header);
5363 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5364 buf->tcp_pseudosum_req);
5365 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5366 buf->num_ipv6_ext_headers);
5367 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5368 buf->off_ipv6_ext_headers);
5369
5370 send_control_ip_offload(adapter);
5371 }
5372
ibmvnic_fw_err_cause(u16 cause)5373 static const char *ibmvnic_fw_err_cause(u16 cause)
5374 {
5375 switch (cause) {
5376 case ADAPTER_PROBLEM:
5377 return "adapter problem";
5378 case BUS_PROBLEM:
5379 return "bus problem";
5380 case FW_PROBLEM:
5381 return "firmware problem";
5382 case DD_PROBLEM:
5383 return "device driver problem";
5384 case EEH_RECOVERY:
5385 return "EEH recovery";
5386 case FW_UPDATED:
5387 return "firmware updated";
5388 case LOW_MEMORY:
5389 return "low Memory";
5390 default:
5391 return "unknown";
5392 }
5393 }
5394
handle_error_indication(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5395 static void handle_error_indication(union ibmvnic_crq *crq,
5396 struct ibmvnic_adapter *adapter)
5397 {
5398 struct device *dev = &adapter->vdev->dev;
5399 u16 cause;
5400
5401 cause = be16_to_cpu(crq->error_indication.error_cause);
5402
5403 dev_warn_ratelimited(dev,
5404 "Firmware reports %serror, cause: %s. Starting recovery...\n",
5405 crq->error_indication.flags
5406 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
5407 ibmvnic_fw_err_cause(cause));
5408
5409 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
5410 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5411 else
5412 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5413 }
5414
handle_change_mac_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5415 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
5416 struct ibmvnic_adapter *adapter)
5417 {
5418 struct net_device *netdev = adapter->netdev;
5419 struct device *dev = &adapter->vdev->dev;
5420 long rc;
5421
5422 rc = crq->change_mac_addr_rsp.rc.code;
5423 if (rc) {
5424 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
5425 goto out;
5426 }
5427 /* crq->change_mac_addr.mac_addr is the requested one
5428 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
5429 */
5430 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
5431 ether_addr_copy(adapter->mac_addr,
5432 &crq->change_mac_addr_rsp.mac_addr[0]);
5433 out:
5434 complete(&adapter->fw_done);
5435 return rc;
5436 }
5437
handle_request_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5438 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
5439 struct ibmvnic_adapter *adapter)
5440 {
5441 struct device *dev = &adapter->vdev->dev;
5442 u64 *req_value;
5443 char *name;
5444
5445 atomic_dec(&adapter->running_cap_crqs);
5446 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5447 atomic_read(&adapter->running_cap_crqs));
5448 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
5449 case REQ_TX_QUEUES:
5450 req_value = &adapter->req_tx_queues;
5451 name = "tx";
5452 break;
5453 case REQ_RX_QUEUES:
5454 req_value = &adapter->req_rx_queues;
5455 name = "rx";
5456 break;
5457 case REQ_RX_ADD_QUEUES:
5458 req_value = &adapter->req_rx_add_queues;
5459 name = "rx_add";
5460 break;
5461 case REQ_TX_ENTRIES_PER_SUBCRQ:
5462 req_value = &adapter->req_tx_entries_per_subcrq;
5463 name = "tx_entries_per_subcrq";
5464 break;
5465 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
5466 req_value = &adapter->req_rx_add_entries_per_subcrq;
5467 name = "rx_add_entries_per_subcrq";
5468 break;
5469 case REQ_MTU:
5470 req_value = &adapter->req_mtu;
5471 name = "mtu";
5472 break;
5473 case PROMISC_REQUESTED:
5474 req_value = &adapter->promisc;
5475 name = "promisc";
5476 break;
5477 default:
5478 dev_err(dev, "Got invalid cap request rsp %d\n",
5479 crq->request_capability.capability);
5480 return;
5481 }
5482
5483 switch (crq->request_capability_rsp.rc.code) {
5484 case SUCCESS:
5485 break;
5486 case PARTIALSUCCESS:
5487 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5488 *req_value,
5489 (long)be64_to_cpu(crq->request_capability_rsp.number),
5490 name);
5491
5492 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5493 REQ_MTU) {
5494 pr_err("mtu of %llu is not supported. Reverting.\n",
5495 *req_value);
5496 *req_value = adapter->fallback.mtu;
5497 } else {
5498 *req_value =
5499 be64_to_cpu(crq->request_capability_rsp.number);
5500 }
5501
5502 send_request_cap(adapter, 1);
5503 return;
5504 default:
5505 dev_err(dev, "Error %d in request cap rsp\n",
5506 crq->request_capability_rsp.rc.code);
5507 return;
5508 }
5509
5510 /* Done receiving requested capabilities, query IP offload support */
5511 if (atomic_read(&adapter->running_cap_crqs) == 0)
5512 send_query_ip_offload(adapter);
5513 }
5514
handle_login_rsp(union ibmvnic_crq * login_rsp_crq,struct ibmvnic_adapter * adapter)5515 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5516 struct ibmvnic_adapter *adapter)
5517 {
5518 struct device *dev = &adapter->vdev->dev;
5519 struct net_device *netdev = adapter->netdev;
5520 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5521 struct ibmvnic_login_buffer *login = adapter->login_buf;
5522 u64 *tx_handle_array;
5523 u64 *rx_handle_array;
5524 int num_tx_pools;
5525 int num_rx_pools;
5526 u64 *size_array;
5527 u32 rsp_len;
5528 int i;
5529
5530 /* CHECK: Test/set of login_pending does not need to be atomic
5531 * because only ibmvnic_tasklet tests/clears this.
5532 */
5533 if (!adapter->login_pending) {
5534 netdev_warn(netdev, "Ignoring unexpected login response\n");
5535 return 0;
5536 }
5537 adapter->login_pending = false;
5538
5539 /* If the number of queues requested can't be allocated by the
5540 * server, the login response will return with code 1. We will need
5541 * to resend the login buffer with fewer queues requested.
5542 */
5543 if (login_rsp_crq->generic.rc.code) {
5544 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5545 complete(&adapter->init_done);
5546 return 0;
5547 }
5548
5549 if (adapter->failover_pending) {
5550 adapter->init_done_rc = -EAGAIN;
5551 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5552 complete(&adapter->init_done);
5553 /* login response buffer will be released on reset */
5554 return 0;
5555 }
5556
5557 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5558
5559 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5560 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5561 netdev_dbg(adapter->netdev, "%016lx\n",
5562 ((unsigned long *)(adapter->login_rsp_buf))[i]);
5563 }
5564
5565 /* Sanity checks */
5566 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5567 (be32_to_cpu(login->num_rxcomp_subcrqs) *
5568 adapter->req_rx_add_queues !=
5569 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5570 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5571 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5572 return -EIO;
5573 }
5574
5575 rsp_len = be32_to_cpu(login_rsp->len);
5576 if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
5577 rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
5578 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
5579 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
5580 rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
5581 /* This can happen if a login request times out and there are
5582 * 2 outstanding login requests sent, the LOGIN_RSP crq
5583 * could have been for the older login request. So we are
5584 * parsing the newer response buffer which may be incomplete
5585 */
5586 dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
5587 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5588 return -EIO;
5589 }
5590
5591 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5592 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5593 /* variable buffer sizes are not supported, so just read the
5594 * first entry.
5595 */
5596 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5597
5598 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5599 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5600
5601 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5602 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5603 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5604 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5605
5606 for (i = 0; i < num_tx_pools; i++)
5607 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5608
5609 for (i = 0; i < num_rx_pools; i++)
5610 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5611
5612 adapter->num_active_tx_scrqs = num_tx_pools;
5613 adapter->num_active_rx_scrqs = num_rx_pools;
5614 release_login_rsp_buffer(adapter);
5615 release_login_buffer(adapter);
5616 complete(&adapter->init_done);
5617
5618 return 0;
5619 }
5620
handle_request_unmap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5621 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5622 struct ibmvnic_adapter *adapter)
5623 {
5624 struct device *dev = &adapter->vdev->dev;
5625 long rc;
5626
5627 rc = crq->request_unmap_rsp.rc.code;
5628 if (rc)
5629 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5630 }
5631
handle_query_map_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5632 static void handle_query_map_rsp(union ibmvnic_crq *crq,
5633 struct ibmvnic_adapter *adapter)
5634 {
5635 struct net_device *netdev = adapter->netdev;
5636 struct device *dev = &adapter->vdev->dev;
5637 long rc;
5638
5639 rc = crq->query_map_rsp.rc.code;
5640 if (rc) {
5641 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5642 return;
5643 }
5644 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5645 crq->query_map_rsp.page_size,
5646 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5647 __be32_to_cpu(crq->query_map_rsp.free_pages));
5648 }
5649
handle_query_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5650 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5651 struct ibmvnic_adapter *adapter)
5652 {
5653 struct net_device *netdev = adapter->netdev;
5654 struct device *dev = &adapter->vdev->dev;
5655 long rc;
5656
5657 atomic_dec(&adapter->running_cap_crqs);
5658 netdev_dbg(netdev, "Outstanding queries: %d\n",
5659 atomic_read(&adapter->running_cap_crqs));
5660 rc = crq->query_capability.rc.code;
5661 if (rc) {
5662 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5663 goto out;
5664 }
5665
5666 switch (be16_to_cpu(crq->query_capability.capability)) {
5667 case MIN_TX_QUEUES:
5668 adapter->min_tx_queues =
5669 be64_to_cpu(crq->query_capability.number);
5670 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5671 adapter->min_tx_queues);
5672 break;
5673 case MIN_RX_QUEUES:
5674 adapter->min_rx_queues =
5675 be64_to_cpu(crq->query_capability.number);
5676 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5677 adapter->min_rx_queues);
5678 break;
5679 case MIN_RX_ADD_QUEUES:
5680 adapter->min_rx_add_queues =
5681 be64_to_cpu(crq->query_capability.number);
5682 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5683 adapter->min_rx_add_queues);
5684 break;
5685 case MAX_TX_QUEUES:
5686 adapter->max_tx_queues =
5687 be64_to_cpu(crq->query_capability.number);
5688 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5689 adapter->max_tx_queues);
5690 break;
5691 case MAX_RX_QUEUES:
5692 adapter->max_rx_queues =
5693 be64_to_cpu(crq->query_capability.number);
5694 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5695 adapter->max_rx_queues);
5696 break;
5697 case MAX_RX_ADD_QUEUES:
5698 adapter->max_rx_add_queues =
5699 be64_to_cpu(crq->query_capability.number);
5700 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5701 adapter->max_rx_add_queues);
5702 break;
5703 case MIN_TX_ENTRIES_PER_SUBCRQ:
5704 adapter->min_tx_entries_per_subcrq =
5705 be64_to_cpu(crq->query_capability.number);
5706 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5707 adapter->min_tx_entries_per_subcrq);
5708 break;
5709 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5710 adapter->min_rx_add_entries_per_subcrq =
5711 be64_to_cpu(crq->query_capability.number);
5712 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5713 adapter->min_rx_add_entries_per_subcrq);
5714 break;
5715 case MAX_TX_ENTRIES_PER_SUBCRQ:
5716 adapter->max_tx_entries_per_subcrq =
5717 be64_to_cpu(crq->query_capability.number);
5718 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5719 adapter->max_tx_entries_per_subcrq);
5720 break;
5721 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5722 adapter->max_rx_add_entries_per_subcrq =
5723 be64_to_cpu(crq->query_capability.number);
5724 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5725 adapter->max_rx_add_entries_per_subcrq);
5726 break;
5727 case TCP_IP_OFFLOAD:
5728 adapter->tcp_ip_offload =
5729 be64_to_cpu(crq->query_capability.number);
5730 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5731 adapter->tcp_ip_offload);
5732 break;
5733 case PROMISC_SUPPORTED:
5734 adapter->promisc_supported =
5735 be64_to_cpu(crq->query_capability.number);
5736 netdev_dbg(netdev, "promisc_supported = %lld\n",
5737 adapter->promisc_supported);
5738 break;
5739 case MIN_MTU:
5740 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5741 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5742 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5743 break;
5744 case MAX_MTU:
5745 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5746 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5747 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5748 break;
5749 case MAX_MULTICAST_FILTERS:
5750 adapter->max_multicast_filters =
5751 be64_to_cpu(crq->query_capability.number);
5752 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5753 adapter->max_multicast_filters);
5754 break;
5755 case VLAN_HEADER_INSERTION:
5756 adapter->vlan_header_insertion =
5757 be64_to_cpu(crq->query_capability.number);
5758 if (adapter->vlan_header_insertion)
5759 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5760 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5761 adapter->vlan_header_insertion);
5762 break;
5763 case RX_VLAN_HEADER_INSERTION:
5764 adapter->rx_vlan_header_insertion =
5765 be64_to_cpu(crq->query_capability.number);
5766 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5767 adapter->rx_vlan_header_insertion);
5768 break;
5769 case MAX_TX_SG_ENTRIES:
5770 adapter->max_tx_sg_entries =
5771 be64_to_cpu(crq->query_capability.number);
5772 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5773 adapter->max_tx_sg_entries);
5774 break;
5775 case RX_SG_SUPPORTED:
5776 adapter->rx_sg_supported =
5777 be64_to_cpu(crq->query_capability.number);
5778 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5779 adapter->rx_sg_supported);
5780 break;
5781 case OPT_TX_COMP_SUB_QUEUES:
5782 adapter->opt_tx_comp_sub_queues =
5783 be64_to_cpu(crq->query_capability.number);
5784 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5785 adapter->opt_tx_comp_sub_queues);
5786 break;
5787 case OPT_RX_COMP_QUEUES:
5788 adapter->opt_rx_comp_queues =
5789 be64_to_cpu(crq->query_capability.number);
5790 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5791 adapter->opt_rx_comp_queues);
5792 break;
5793 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5794 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5795 be64_to_cpu(crq->query_capability.number);
5796 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5797 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5798 break;
5799 case OPT_TX_ENTRIES_PER_SUBCRQ:
5800 adapter->opt_tx_entries_per_subcrq =
5801 be64_to_cpu(crq->query_capability.number);
5802 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5803 adapter->opt_tx_entries_per_subcrq);
5804 break;
5805 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5806 adapter->opt_rxba_entries_per_subcrq =
5807 be64_to_cpu(crq->query_capability.number);
5808 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5809 adapter->opt_rxba_entries_per_subcrq);
5810 break;
5811 case TX_RX_DESC_REQ:
5812 adapter->tx_rx_desc_req = crq->query_capability.number;
5813 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5814 adapter->tx_rx_desc_req);
5815 break;
5816
5817 default:
5818 netdev_err(netdev, "Got invalid cap rsp %d\n",
5819 crq->query_capability.capability);
5820 }
5821
5822 out:
5823 if (atomic_read(&adapter->running_cap_crqs) == 0)
5824 send_request_cap(adapter, 0);
5825 }
5826
send_query_phys_parms(struct ibmvnic_adapter * adapter)5827 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5828 {
5829 union ibmvnic_crq crq;
5830 int rc;
5831
5832 memset(&crq, 0, sizeof(crq));
5833 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5834 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5835
5836 mutex_lock(&adapter->fw_lock);
5837 adapter->fw_done_rc = 0;
5838 reinit_completion(&adapter->fw_done);
5839
5840 rc = ibmvnic_send_crq(adapter, &crq);
5841 if (rc) {
5842 mutex_unlock(&adapter->fw_lock);
5843 return rc;
5844 }
5845
5846 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5847 if (rc) {
5848 mutex_unlock(&adapter->fw_lock);
5849 return rc;
5850 }
5851
5852 mutex_unlock(&adapter->fw_lock);
5853 return adapter->fw_done_rc ? -EIO : 0;
5854 }
5855
handle_query_phys_parms_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5856 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5857 struct ibmvnic_adapter *adapter)
5858 {
5859 struct net_device *netdev = adapter->netdev;
5860 int rc;
5861 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5862
5863 rc = crq->query_phys_parms_rsp.rc.code;
5864 if (rc) {
5865 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5866 return rc;
5867 }
5868 switch (rspeed) {
5869 case IBMVNIC_10MBPS:
5870 adapter->speed = SPEED_10;
5871 break;
5872 case IBMVNIC_100MBPS:
5873 adapter->speed = SPEED_100;
5874 break;
5875 case IBMVNIC_1GBPS:
5876 adapter->speed = SPEED_1000;
5877 break;
5878 case IBMVNIC_10GBPS:
5879 adapter->speed = SPEED_10000;
5880 break;
5881 case IBMVNIC_25GBPS:
5882 adapter->speed = SPEED_25000;
5883 break;
5884 case IBMVNIC_40GBPS:
5885 adapter->speed = SPEED_40000;
5886 break;
5887 case IBMVNIC_50GBPS:
5888 adapter->speed = SPEED_50000;
5889 break;
5890 case IBMVNIC_100GBPS:
5891 adapter->speed = SPEED_100000;
5892 break;
5893 case IBMVNIC_200GBPS:
5894 adapter->speed = SPEED_200000;
5895 break;
5896 default:
5897 if (netif_carrier_ok(netdev))
5898 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5899 adapter->speed = SPEED_UNKNOWN;
5900 }
5901 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5902 adapter->duplex = DUPLEX_FULL;
5903 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5904 adapter->duplex = DUPLEX_HALF;
5905 else
5906 adapter->duplex = DUPLEX_UNKNOWN;
5907
5908 return rc;
5909 }
5910
ibmvnic_handle_crq(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5911 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5912 struct ibmvnic_adapter *adapter)
5913 {
5914 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5915 struct net_device *netdev = adapter->netdev;
5916 struct device *dev = &adapter->vdev->dev;
5917 u64 *u64_crq = (u64 *)crq;
5918 long rc;
5919
5920 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5921 (unsigned long)cpu_to_be64(u64_crq[0]),
5922 (unsigned long)cpu_to_be64(u64_crq[1]));
5923 switch (gen_crq->first) {
5924 case IBMVNIC_CRQ_INIT_RSP:
5925 switch (gen_crq->cmd) {
5926 case IBMVNIC_CRQ_INIT:
5927 dev_info(dev, "Partner initialized\n");
5928 adapter->from_passive_init = true;
5929 /* Discard any stale login responses from prev reset.
5930 * CHECK: should we clear even on INIT_COMPLETE?
5931 */
5932 adapter->login_pending = false;
5933
5934 if (adapter->state == VNIC_DOWN)
5935 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5936 else
5937 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5938
5939 if (rc && rc != -EBUSY) {
5940 /* We were unable to schedule the failover
5941 * reset either because the adapter was still
5942 * probing (eg: during kexec) or we could not
5943 * allocate memory. Clear the failover_pending
5944 * flag since no one else will. We ignore
5945 * EBUSY because it means either FAILOVER reset
5946 * is already scheduled or the adapter is
5947 * being removed.
5948 */
5949 netdev_err(netdev,
5950 "Error %ld scheduling failover reset\n",
5951 rc);
5952 adapter->failover_pending = false;
5953 }
5954
5955 if (!completion_done(&adapter->init_done)) {
5956 if (!adapter->init_done_rc)
5957 adapter->init_done_rc = -EAGAIN;
5958 complete(&adapter->init_done);
5959 }
5960
5961 break;
5962 case IBMVNIC_CRQ_INIT_COMPLETE:
5963 dev_info(dev, "Partner initialization complete\n");
5964 adapter->crq.active = true;
5965 send_version_xchg(adapter);
5966 break;
5967 default:
5968 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5969 }
5970 return;
5971 case IBMVNIC_CRQ_XPORT_EVENT:
5972 netif_carrier_off(netdev);
5973 adapter->crq.active = false;
5974 /* terminate any thread waiting for a response
5975 * from the device
5976 */
5977 if (!completion_done(&adapter->fw_done)) {
5978 adapter->fw_done_rc = -EIO;
5979 complete(&adapter->fw_done);
5980 }
5981
5982 /* if we got here during crq-init, retry crq-init */
5983 if (!completion_done(&adapter->init_done)) {
5984 adapter->init_done_rc = -EAGAIN;
5985 complete(&adapter->init_done);
5986 }
5987
5988 if (!completion_done(&adapter->stats_done))
5989 complete(&adapter->stats_done);
5990 if (test_bit(0, &adapter->resetting))
5991 adapter->force_reset_recovery = true;
5992 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5993 dev_info(dev, "Migrated, re-enabling adapter\n");
5994 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5995 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5996 dev_info(dev, "Backing device failover detected\n");
5997 adapter->failover_pending = true;
5998 } else {
5999 /* The adapter lost the connection */
6000 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
6001 gen_crq->cmd);
6002 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
6003 }
6004 return;
6005 case IBMVNIC_CRQ_CMD_RSP:
6006 break;
6007 default:
6008 dev_err(dev, "Got an invalid msg type 0x%02x\n",
6009 gen_crq->first);
6010 return;
6011 }
6012
6013 switch (gen_crq->cmd) {
6014 case VERSION_EXCHANGE_RSP:
6015 rc = crq->version_exchange_rsp.rc.code;
6016 if (rc) {
6017 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
6018 break;
6019 }
6020 ibmvnic_version =
6021 be16_to_cpu(crq->version_exchange_rsp.version);
6022 dev_info(dev, "Partner protocol version is %d\n",
6023 ibmvnic_version);
6024 send_query_cap(adapter);
6025 break;
6026 case QUERY_CAPABILITY_RSP:
6027 handle_query_cap_rsp(crq, adapter);
6028 break;
6029 case QUERY_MAP_RSP:
6030 handle_query_map_rsp(crq, adapter);
6031 break;
6032 case REQUEST_MAP_RSP:
6033 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
6034 complete(&adapter->fw_done);
6035 break;
6036 case REQUEST_UNMAP_RSP:
6037 handle_request_unmap_rsp(crq, adapter);
6038 break;
6039 case REQUEST_CAPABILITY_RSP:
6040 handle_request_cap_rsp(crq, adapter);
6041 break;
6042 case LOGIN_RSP:
6043 netdev_dbg(netdev, "Got Login Response\n");
6044 handle_login_rsp(crq, adapter);
6045 break;
6046 case LOGICAL_LINK_STATE_RSP:
6047 netdev_dbg(netdev,
6048 "Got Logical Link State Response, state: %d rc: %d\n",
6049 crq->logical_link_state_rsp.link_state,
6050 crq->logical_link_state_rsp.rc.code);
6051 adapter->logical_link_state =
6052 crq->logical_link_state_rsp.link_state;
6053 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
6054 complete(&adapter->init_done);
6055 break;
6056 case LINK_STATE_INDICATION:
6057 netdev_dbg(netdev, "Got Logical Link State Indication\n");
6058 adapter->phys_link_state =
6059 crq->link_state_indication.phys_link_state;
6060 adapter->logical_link_state =
6061 crq->link_state_indication.logical_link_state;
6062 if (adapter->phys_link_state && adapter->logical_link_state)
6063 netif_carrier_on(netdev);
6064 else
6065 netif_carrier_off(netdev);
6066 break;
6067 case CHANGE_MAC_ADDR_RSP:
6068 netdev_dbg(netdev, "Got MAC address change Response\n");
6069 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
6070 break;
6071 case ERROR_INDICATION:
6072 netdev_dbg(netdev, "Got Error Indication\n");
6073 handle_error_indication(crq, adapter);
6074 break;
6075 case REQUEST_STATISTICS_RSP:
6076 netdev_dbg(netdev, "Got Statistics Response\n");
6077 complete(&adapter->stats_done);
6078 break;
6079 case QUERY_IP_OFFLOAD_RSP:
6080 netdev_dbg(netdev, "Got Query IP offload Response\n");
6081 handle_query_ip_offload_rsp(adapter);
6082 break;
6083 case MULTICAST_CTRL_RSP:
6084 netdev_dbg(netdev, "Got multicast control Response\n");
6085 break;
6086 case CONTROL_IP_OFFLOAD_RSP:
6087 netdev_dbg(netdev, "Got Control IP offload Response\n");
6088 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
6089 sizeof(adapter->ip_offload_ctrl),
6090 DMA_TO_DEVICE);
6091 complete(&adapter->init_done);
6092 break;
6093 case COLLECT_FW_TRACE_RSP:
6094 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
6095 complete(&adapter->fw_done);
6096 break;
6097 case GET_VPD_SIZE_RSP:
6098 handle_vpd_size_rsp(crq, adapter);
6099 break;
6100 case GET_VPD_RSP:
6101 handle_vpd_rsp(crq, adapter);
6102 break;
6103 case QUERY_PHYS_PARMS_RSP:
6104 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
6105 complete(&adapter->fw_done);
6106 break;
6107 default:
6108 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
6109 gen_crq->cmd);
6110 }
6111 }
6112
ibmvnic_interrupt(int irq,void * instance)6113 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
6114 {
6115 struct ibmvnic_adapter *adapter = instance;
6116
6117 tasklet_schedule(&adapter->tasklet);
6118 return IRQ_HANDLED;
6119 }
6120
ibmvnic_tasklet(struct tasklet_struct * t)6121 static void ibmvnic_tasklet(struct tasklet_struct *t)
6122 {
6123 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
6124 struct ibmvnic_crq_queue *queue = &adapter->crq;
6125 union ibmvnic_crq *crq;
6126 unsigned long flags;
6127
6128 spin_lock_irqsave(&queue->lock, flags);
6129
6130 /* Pull all the valid messages off the CRQ */
6131 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
6132 /* This barrier makes sure ibmvnic_next_crq()'s
6133 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
6134 * before ibmvnic_handle_crq()'s
6135 * switch(gen_crq->first) and switch(gen_crq->cmd).
6136 */
6137 dma_rmb();
6138 ibmvnic_handle_crq(crq, adapter);
6139 crq->generic.first = 0;
6140 }
6141
6142 spin_unlock_irqrestore(&queue->lock, flags);
6143 }
6144
ibmvnic_reenable_crq_queue(struct ibmvnic_adapter * adapter)6145 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
6146 {
6147 struct vio_dev *vdev = adapter->vdev;
6148 int rc;
6149
6150 do {
6151 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
6152 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
6153
6154 if (rc)
6155 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
6156
6157 return rc;
6158 }
6159
ibmvnic_reset_crq(struct ibmvnic_adapter * adapter)6160 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
6161 {
6162 struct ibmvnic_crq_queue *crq = &adapter->crq;
6163 struct device *dev = &adapter->vdev->dev;
6164 struct vio_dev *vdev = adapter->vdev;
6165 int rc;
6166
6167 /* Close the CRQ */
6168 do {
6169 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6170 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6171
6172 /* Clean out the queue */
6173 if (!crq->msgs)
6174 return -EINVAL;
6175
6176 memset(crq->msgs, 0, PAGE_SIZE);
6177 crq->cur = 0;
6178 crq->active = false;
6179
6180 /* And re-open it again */
6181 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6182 crq->msg_token, PAGE_SIZE);
6183
6184 if (rc == H_CLOSED)
6185 /* Adapter is good, but other end is not ready */
6186 dev_warn(dev, "Partner adapter not ready\n");
6187 else if (rc != 0)
6188 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
6189
6190 return rc;
6191 }
6192
release_crq_queue(struct ibmvnic_adapter * adapter)6193 static void release_crq_queue(struct ibmvnic_adapter *adapter)
6194 {
6195 struct ibmvnic_crq_queue *crq = &adapter->crq;
6196 struct vio_dev *vdev = adapter->vdev;
6197 long rc;
6198
6199 if (!crq->msgs)
6200 return;
6201
6202 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
6203 free_irq(vdev->irq, adapter);
6204 tasklet_kill(&adapter->tasklet);
6205 do {
6206 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6207 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6208
6209 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
6210 DMA_BIDIRECTIONAL);
6211 free_page((unsigned long)crq->msgs);
6212 crq->msgs = NULL;
6213 crq->active = false;
6214 }
6215
init_crq_queue(struct ibmvnic_adapter * adapter)6216 static int init_crq_queue(struct ibmvnic_adapter *adapter)
6217 {
6218 struct ibmvnic_crq_queue *crq = &adapter->crq;
6219 struct device *dev = &adapter->vdev->dev;
6220 struct vio_dev *vdev = adapter->vdev;
6221 int rc, retrc = -ENOMEM;
6222
6223 if (crq->msgs)
6224 return 0;
6225
6226 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
6227 /* Should we allocate more than one page? */
6228
6229 if (!crq->msgs)
6230 return -ENOMEM;
6231
6232 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
6233 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
6234 DMA_BIDIRECTIONAL);
6235 if (dma_mapping_error(dev, crq->msg_token))
6236 goto map_failed;
6237
6238 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6239 crq->msg_token, PAGE_SIZE);
6240
6241 if (rc == H_RESOURCE)
6242 /* maybe kexecing and resource is busy. try a reset */
6243 rc = ibmvnic_reset_crq(adapter);
6244 retrc = rc;
6245
6246 if (rc == H_CLOSED) {
6247 dev_warn(dev, "Partner adapter not ready\n");
6248 } else if (rc) {
6249 dev_warn(dev, "Error %d opening adapter\n", rc);
6250 goto reg_crq_failed;
6251 }
6252
6253 retrc = 0;
6254
6255 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
6256
6257 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
6258 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
6259 adapter->vdev->unit_address);
6260 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
6261 if (rc) {
6262 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
6263 vdev->irq, rc);
6264 goto req_irq_failed;
6265 }
6266
6267 rc = vio_enable_interrupts(vdev);
6268 if (rc) {
6269 dev_err(dev, "Error %d enabling interrupts\n", rc);
6270 goto req_irq_failed;
6271 }
6272
6273 crq->cur = 0;
6274 spin_lock_init(&crq->lock);
6275
6276 /* process any CRQs that were queued before we enabled interrupts */
6277 tasklet_schedule(&adapter->tasklet);
6278
6279 return retrc;
6280
6281 req_irq_failed:
6282 tasklet_kill(&adapter->tasklet);
6283 do {
6284 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6285 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6286 reg_crq_failed:
6287 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
6288 map_failed:
6289 free_page((unsigned long)crq->msgs);
6290 crq->msgs = NULL;
6291 return retrc;
6292 }
6293
ibmvnic_reset_init(struct ibmvnic_adapter * adapter,bool reset)6294 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
6295 {
6296 struct device *dev = &adapter->vdev->dev;
6297 unsigned long timeout = msecs_to_jiffies(20000);
6298 u64 old_num_rx_queues = adapter->req_rx_queues;
6299 u64 old_num_tx_queues = adapter->req_tx_queues;
6300 int rc;
6301
6302 adapter->from_passive_init = false;
6303
6304 rc = ibmvnic_send_crq_init(adapter);
6305 if (rc) {
6306 dev_err(dev, "Send crq init failed with error %d\n", rc);
6307 return rc;
6308 }
6309
6310 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
6311 dev_err(dev, "Initialization sequence timed out\n");
6312 return -ETIMEDOUT;
6313 }
6314
6315 if (adapter->init_done_rc) {
6316 release_crq_queue(adapter);
6317 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
6318 return adapter->init_done_rc;
6319 }
6320
6321 if (adapter->from_passive_init) {
6322 adapter->state = VNIC_OPEN;
6323 adapter->from_passive_init = false;
6324 dev_err(dev, "CRQ-init failed, passive-init\n");
6325 return -EINVAL;
6326 }
6327
6328 if (reset &&
6329 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
6330 adapter->reset_reason != VNIC_RESET_MOBILITY) {
6331 if (adapter->req_rx_queues != old_num_rx_queues ||
6332 adapter->req_tx_queues != old_num_tx_queues) {
6333 release_sub_crqs(adapter, 0);
6334 rc = init_sub_crqs(adapter);
6335 } else {
6336 /* no need to reinitialize completely, but we do
6337 * need to clean up transmits that were in flight
6338 * when we processed the reset. Failure to do so
6339 * will confound the upper layer, usually TCP, by
6340 * creating the illusion of transmits that are
6341 * awaiting completion.
6342 */
6343 clean_tx_pools(adapter);
6344
6345 rc = reset_sub_crq_queues(adapter);
6346 }
6347 } else {
6348 rc = init_sub_crqs(adapter);
6349 }
6350
6351 if (rc) {
6352 dev_err(dev, "Initialization of sub crqs failed\n");
6353 release_crq_queue(adapter);
6354 return rc;
6355 }
6356
6357 rc = init_sub_crq_irqs(adapter);
6358 if (rc) {
6359 dev_err(dev, "Failed to initialize sub crq irqs\n");
6360 release_crq_queue(adapter);
6361 }
6362
6363 return rc;
6364 }
6365
6366 static struct device_attribute dev_attr_failover;
6367
ibmvnic_probe(struct vio_dev * dev,const struct vio_device_id * id)6368 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
6369 {
6370 struct ibmvnic_adapter *adapter;
6371 struct net_device *netdev;
6372 unsigned char *mac_addr_p;
6373 unsigned long flags;
6374 bool init_success;
6375 int rc;
6376
6377 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
6378 dev->unit_address);
6379
6380 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
6381 VETH_MAC_ADDR, NULL);
6382 if (!mac_addr_p) {
6383 dev_err(&dev->dev,
6384 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
6385 __FILE__, __LINE__);
6386 return 0;
6387 }
6388
6389 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
6390 IBMVNIC_MAX_QUEUES);
6391 if (!netdev)
6392 return -ENOMEM;
6393
6394 adapter = netdev_priv(netdev);
6395 adapter->state = VNIC_PROBING;
6396 dev_set_drvdata(&dev->dev, netdev);
6397 adapter->vdev = dev;
6398 adapter->netdev = netdev;
6399 adapter->login_pending = false;
6400 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6401 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
6402 bitmap_set(adapter->map_ids, 0, 1);
6403
6404 ether_addr_copy(adapter->mac_addr, mac_addr_p);
6405 eth_hw_addr_set(netdev, adapter->mac_addr);
6406 netdev->irq = dev->irq;
6407 netdev->netdev_ops = &ibmvnic_netdev_ops;
6408 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
6409 SET_NETDEV_DEV(netdev, &dev->dev);
6410
6411 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
6412 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
6413 __ibmvnic_delayed_reset);
6414 INIT_LIST_HEAD(&adapter->rwi_list);
6415 spin_lock_init(&adapter->rwi_lock);
6416 spin_lock_init(&adapter->state_lock);
6417 mutex_init(&adapter->fw_lock);
6418 init_completion(&adapter->probe_done);
6419 init_completion(&adapter->init_done);
6420 init_completion(&adapter->fw_done);
6421 init_completion(&adapter->reset_done);
6422 init_completion(&adapter->stats_done);
6423 clear_bit(0, &adapter->resetting);
6424 adapter->prev_rx_buf_sz = 0;
6425 adapter->prev_mtu = 0;
6426
6427 init_success = false;
6428 do {
6429 reinit_init_done(adapter);
6430
6431 /* clear any failovers we got in the previous pass
6432 * since we are reinitializing the CRQ
6433 */
6434 adapter->failover_pending = false;
6435
6436 /* If we had already initialized CRQ, we may have one or
6437 * more resets queued already. Discard those and release
6438 * the CRQ before initializing the CRQ again.
6439 */
6440 release_crq_queue(adapter);
6441
6442 /* Since we are still in PROBING state, __ibmvnic_reset()
6443 * will not access the ->rwi_list and since we released CRQ,
6444 * we won't get _new_ transport events. But there maybe an
6445 * ongoing ibmvnic_reset() call. So serialize access to
6446 * rwi_list. If we win the race, ibvmnic_reset() could add
6447 * a reset after we purged but thats ok - we just may end
6448 * up with an extra reset (i.e similar to having two or more
6449 * resets in the queue at once).
6450 * CHECK.
6451 */
6452 spin_lock_irqsave(&adapter->rwi_lock, flags);
6453 flush_reset_queue(adapter);
6454 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6455
6456 rc = init_crq_queue(adapter);
6457 if (rc) {
6458 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
6459 rc);
6460 goto ibmvnic_init_fail;
6461 }
6462
6463 rc = ibmvnic_reset_init(adapter, false);
6464 } while (rc == -EAGAIN);
6465
6466 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
6467 * partner is not ready. CRQ is not active. When the partner becomes
6468 * ready, we will do the passive init reset.
6469 */
6470
6471 if (!rc)
6472 init_success = true;
6473
6474 rc = init_stats_buffers(adapter);
6475 if (rc)
6476 goto ibmvnic_init_fail;
6477
6478 rc = init_stats_token(adapter);
6479 if (rc)
6480 goto ibmvnic_stats_fail;
6481
6482 rc = device_create_file(&dev->dev, &dev_attr_failover);
6483 if (rc)
6484 goto ibmvnic_dev_file_err;
6485
6486 netif_carrier_off(netdev);
6487
6488 if (init_success) {
6489 adapter->state = VNIC_PROBED;
6490 netdev->mtu = adapter->req_mtu - ETH_HLEN;
6491 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
6492 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
6493 } else {
6494 adapter->state = VNIC_DOWN;
6495 }
6496
6497 adapter->wait_for_reset = false;
6498 adapter->last_reset_time = jiffies;
6499
6500 rc = register_netdev(netdev);
6501 if (rc) {
6502 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
6503 goto ibmvnic_register_fail;
6504 }
6505 dev_info(&dev->dev, "ibmvnic registered\n");
6506
6507 rc = ibmvnic_cpu_notif_add(adapter);
6508 if (rc) {
6509 netdev_err(netdev, "Registering cpu notifier failed\n");
6510 goto cpu_notif_add_failed;
6511 }
6512
6513 complete(&adapter->probe_done);
6514
6515 return 0;
6516
6517 cpu_notif_add_failed:
6518 unregister_netdev(netdev);
6519
6520 ibmvnic_register_fail:
6521 device_remove_file(&dev->dev, &dev_attr_failover);
6522
6523 ibmvnic_dev_file_err:
6524 release_stats_token(adapter);
6525
6526 ibmvnic_stats_fail:
6527 release_stats_buffers(adapter);
6528
6529 ibmvnic_init_fail:
6530 release_sub_crqs(adapter, 1);
6531 release_crq_queue(adapter);
6532
6533 /* cleanup worker thread after releasing CRQ so we don't get
6534 * transport events (i.e new work items for the worker thread).
6535 */
6536 adapter->state = VNIC_REMOVING;
6537 complete(&adapter->probe_done);
6538 flush_work(&adapter->ibmvnic_reset);
6539 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6540
6541 flush_reset_queue(adapter);
6542
6543 mutex_destroy(&adapter->fw_lock);
6544 free_netdev(netdev);
6545
6546 return rc;
6547 }
6548
ibmvnic_remove(struct vio_dev * dev)6549 static void ibmvnic_remove(struct vio_dev *dev)
6550 {
6551 struct net_device *netdev = dev_get_drvdata(&dev->dev);
6552 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6553 unsigned long flags;
6554
6555 spin_lock_irqsave(&adapter->state_lock, flags);
6556
6557 /* If ibmvnic_reset() is scheduling a reset, wait for it to
6558 * finish. Then, set the state to REMOVING to prevent it from
6559 * scheduling any more work and to have reset functions ignore
6560 * any resets that have already been scheduled. Drop the lock
6561 * after setting state, so __ibmvnic_reset() which is called
6562 * from the flush_work() below, can make progress.
6563 */
6564 spin_lock(&adapter->rwi_lock);
6565 adapter->state = VNIC_REMOVING;
6566 spin_unlock(&adapter->rwi_lock);
6567
6568 spin_unlock_irqrestore(&adapter->state_lock, flags);
6569
6570 ibmvnic_cpu_notif_remove(adapter);
6571
6572 flush_work(&adapter->ibmvnic_reset);
6573 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6574
6575 rtnl_lock();
6576 unregister_netdevice(netdev);
6577
6578 release_resources(adapter);
6579 release_rx_pools(adapter);
6580 release_tx_pools(adapter);
6581 release_sub_crqs(adapter, 1);
6582 release_crq_queue(adapter);
6583
6584 release_stats_token(adapter);
6585 release_stats_buffers(adapter);
6586
6587 adapter->state = VNIC_REMOVED;
6588
6589 rtnl_unlock();
6590 mutex_destroy(&adapter->fw_lock);
6591 device_remove_file(&dev->dev, &dev_attr_failover);
6592 free_netdev(netdev);
6593 dev_set_drvdata(&dev->dev, NULL);
6594 }
6595
failover_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6596 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6597 const char *buf, size_t count)
6598 {
6599 struct net_device *netdev = dev_get_drvdata(dev);
6600 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6601 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6602 __be64 session_token;
6603 long rc;
6604
6605 if (!sysfs_streq(buf, "1"))
6606 return -EINVAL;
6607
6608 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6609 H_GET_SESSION_TOKEN, 0, 0, 0);
6610 if (rc) {
6611 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6612 rc);
6613 goto last_resort;
6614 }
6615
6616 session_token = (__be64)retbuf[0];
6617 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6618 be64_to_cpu(session_token));
6619 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6620 H_SESSION_ERR_DETECTED, session_token, 0, 0);
6621 if (rc) {
6622 netdev_err(netdev,
6623 "H_VIOCTL initiated failover failed, rc %ld\n",
6624 rc);
6625 goto last_resort;
6626 }
6627
6628 return count;
6629
6630 last_resort:
6631 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6632 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6633
6634 return count;
6635 }
6636 static DEVICE_ATTR_WO(failover);
6637
ibmvnic_get_desired_dma(struct vio_dev * vdev)6638 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6639 {
6640 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6641 struct ibmvnic_adapter *adapter;
6642 struct iommu_table *tbl;
6643 unsigned long ret = 0;
6644 int i;
6645
6646 tbl = get_iommu_table_base(&vdev->dev);
6647
6648 /* netdev inits at probe time along with the structures we need below*/
6649 if (!netdev)
6650 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6651
6652 adapter = netdev_priv(netdev);
6653
6654 ret += PAGE_SIZE; /* the crq message queue */
6655 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6656
6657 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6658 ret += 4 * PAGE_SIZE; /* the scrq message queue */
6659
6660 for (i = 0; i < adapter->num_active_rx_pools; i++)
6661 ret += adapter->rx_pool[i].size *
6662 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6663
6664 return ret;
6665 }
6666
ibmvnic_resume(struct device * dev)6667 static int ibmvnic_resume(struct device *dev)
6668 {
6669 struct net_device *netdev = dev_get_drvdata(dev);
6670 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6671
6672 if (adapter->state != VNIC_OPEN)
6673 return 0;
6674
6675 tasklet_schedule(&adapter->tasklet);
6676
6677 return 0;
6678 }
6679
6680 static const struct vio_device_id ibmvnic_device_table[] = {
6681 {"network", "IBM,vnic"},
6682 {"", "" }
6683 };
6684 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6685
6686 static const struct dev_pm_ops ibmvnic_pm_ops = {
6687 .resume = ibmvnic_resume
6688 };
6689
6690 static struct vio_driver ibmvnic_driver = {
6691 .id_table = ibmvnic_device_table,
6692 .probe = ibmvnic_probe,
6693 .remove = ibmvnic_remove,
6694 .get_desired_dma = ibmvnic_get_desired_dma,
6695 .name = ibmvnic_driver_name,
6696 .pm = &ibmvnic_pm_ops,
6697 };
6698
6699 /* module functions */
ibmvnic_module_init(void)6700 static int __init ibmvnic_module_init(void)
6701 {
6702 int ret;
6703
6704 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online",
6705 ibmvnic_cpu_online,
6706 ibmvnic_cpu_down_prep);
6707 if (ret < 0)
6708 goto out;
6709 ibmvnic_online = ret;
6710 ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead",
6711 NULL, ibmvnic_cpu_dead);
6712 if (ret)
6713 goto err_dead;
6714
6715 ret = vio_register_driver(&ibmvnic_driver);
6716 if (ret)
6717 goto err_vio_register;
6718
6719 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6720 IBMVNIC_DRIVER_VERSION);
6721
6722 return 0;
6723 err_vio_register:
6724 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6725 err_dead:
6726 cpuhp_remove_multi_state(ibmvnic_online);
6727 out:
6728 return ret;
6729 }
6730
ibmvnic_module_exit(void)6731 static void __exit ibmvnic_module_exit(void)
6732 {
6733 vio_unregister_driver(&ibmvnic_driver);
6734 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6735 cpuhp_remove_multi_state(ibmvnic_online);
6736 }
6737
6738 module_init(ibmvnic_module_init);
6739 module_exit(ibmvnic_module_exit);
6740