xref: /linux/drivers/net/ethernet/ibm/ibmvnic.c (revision 8520a98dbab61e9e340cdfb72dd17ccc8a98961e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
3 /*                                                                        */
4 /*  IBM System i and System p Virtual NIC Device Driver                   */
5 /*  Copyright (C) 2014 IBM Corp.                                          */
6 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
7 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
8 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
9 /*                                                                        */
10 /*                                                                        */
11 /* This module contains the implementation of a virtual ethernet device   */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
13 /* option of the RS/6000 Platform Architecture to interface with virtual  */
14 /* ethernet NICs that are presented to the partition by the hypervisor.   */
15 /*									   */
16 /* Messages are passed between the VNIC driver and the VNIC server using  */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
20 /* are used by the driver to notify the server that a packet is           */
21 /* ready for transmission or that a buffer has been added to receive a    */
22 /* packet. Subsequently, sCRQs are used by the server to notify the       */
23 /* driver that a packet transmission has been completed or that a packet  */
24 /* has been received and placed in a waiting buffer.                      */
25 /*                                                                        */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
28 /* or receive has been completed, the VNIC driver is required to use      */
29 /* "long term mapping". This entails that large, continuous DMA mapped    */
30 /* buffers are allocated on driver initialization and these buffers are   */
31 /* then continuously reused to pass skbs to and from the VNIC server.     */
32 /*                                                                        */
33 /**************************************************************************/
34 
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
62 #include <asm/vio.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
69 
70 #include "ibmvnic.h"
71 
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74 
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79 
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 		       union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 			   struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 			    struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 			struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 					struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_map_query(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_cap_queries(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_init(struct ibmvnic_adapter *);
108 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
109 static void release_crq_queue(struct ibmvnic_adapter *);
110 static int __ibmvnic_set_mac(struct net_device *, u8 *);
111 static int init_crq_queue(struct ibmvnic_adapter *adapter);
112 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
113 
114 struct ibmvnic_stat {
115 	char name[ETH_GSTRING_LEN];
116 	int offset;
117 };
118 
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 			     offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122 
123 static const struct ibmvnic_stat ibmvnic_stats[] = {
124 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146 };
147 
148 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 			  unsigned long length, unsigned long *number,
150 			  unsigned long *irq)
151 {
152 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 	long rc;
154 
155 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 	*number = retbuf[0];
157 	*irq = retbuf[1];
158 
159 	return rc;
160 }
161 
162 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
163 				struct ibmvnic_long_term_buff *ltb, int size)
164 {
165 	struct device *dev = &adapter->vdev->dev;
166 	int rc;
167 
168 	ltb->size = size;
169 	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
170 				       GFP_KERNEL);
171 
172 	if (!ltb->buff) {
173 		dev_err(dev, "Couldn't alloc long term buffer\n");
174 		return -ENOMEM;
175 	}
176 	ltb->map_id = adapter->map_id;
177 	adapter->map_id++;
178 
179 	init_completion(&adapter->fw_done);
180 	rc = send_request_map(adapter, ltb->addr,
181 			      ltb->size, ltb->map_id);
182 	if (rc) {
183 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
184 		return rc;
185 	}
186 	wait_for_completion(&adapter->fw_done);
187 
188 	if (adapter->fw_done_rc) {
189 		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
190 			adapter->fw_done_rc);
191 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
192 		return -1;
193 	}
194 	return 0;
195 }
196 
197 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
198 				struct ibmvnic_long_term_buff *ltb)
199 {
200 	struct device *dev = &adapter->vdev->dev;
201 
202 	if (!ltb->buff)
203 		return;
204 
205 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
206 	    adapter->reset_reason != VNIC_RESET_MOBILITY)
207 		send_request_unmap(adapter, ltb->map_id);
208 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
209 }
210 
211 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
212 				struct ibmvnic_long_term_buff *ltb)
213 {
214 	int rc;
215 
216 	memset(ltb->buff, 0, ltb->size);
217 
218 	init_completion(&adapter->fw_done);
219 	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 	if (rc)
221 		return rc;
222 	wait_for_completion(&adapter->fw_done);
223 
224 	if (adapter->fw_done_rc) {
225 		dev_info(&adapter->vdev->dev,
226 			 "Reset failed, attempting to free and reallocate buffer\n");
227 		free_long_term_buff(adapter, ltb);
228 		return alloc_long_term_buff(adapter, ltb, ltb->size);
229 	}
230 	return 0;
231 }
232 
233 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
234 {
235 	int i;
236 
237 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
238 	     i++)
239 		adapter->rx_pool[i].active = 0;
240 }
241 
242 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
243 			      struct ibmvnic_rx_pool *pool)
244 {
245 	int count = pool->size - atomic_read(&pool->available);
246 	struct device *dev = &adapter->vdev->dev;
247 	int buffers_added = 0;
248 	unsigned long lpar_rc;
249 	union sub_crq sub_crq;
250 	struct sk_buff *skb;
251 	unsigned int offset;
252 	dma_addr_t dma_addr;
253 	unsigned char *dst;
254 	u64 *handle_array;
255 	int shift = 0;
256 	int index;
257 	int i;
258 
259 	if (!pool->active)
260 		return;
261 
262 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
263 				      be32_to_cpu(adapter->login_rsp_buf->
264 				      off_rxadd_subcrqs));
265 
266 	for (i = 0; i < count; ++i) {
267 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
268 		if (!skb) {
269 			dev_err(dev, "Couldn't replenish rx buff\n");
270 			adapter->replenish_no_mem++;
271 			break;
272 		}
273 
274 		index = pool->free_map[pool->next_free];
275 
276 		if (pool->rx_buff[index].skb)
277 			dev_err(dev, "Inconsistent free_map!\n");
278 
279 		/* Copy the skb to the long term mapped DMA buffer */
280 		offset = index * pool->buff_size;
281 		dst = pool->long_term_buff.buff + offset;
282 		memset(dst, 0, pool->buff_size);
283 		dma_addr = pool->long_term_buff.addr + offset;
284 		pool->rx_buff[index].data = dst;
285 
286 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
287 		pool->rx_buff[index].dma = dma_addr;
288 		pool->rx_buff[index].skb = skb;
289 		pool->rx_buff[index].pool_index = pool->index;
290 		pool->rx_buff[index].size = pool->buff_size;
291 
292 		memset(&sub_crq, 0, sizeof(sub_crq));
293 		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
294 		sub_crq.rx_add.correlator =
295 		    cpu_to_be64((u64)&pool->rx_buff[index]);
296 		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
297 		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
298 
299 		/* The length field of the sCRQ is defined to be 24 bits so the
300 		 * buffer size needs to be left shifted by a byte before it is
301 		 * converted to big endian to prevent the last byte from being
302 		 * truncated.
303 		 */
304 #ifdef __LITTLE_ENDIAN__
305 		shift = 8;
306 #endif
307 		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
308 
309 		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
310 				      &sub_crq);
311 		if (lpar_rc != H_SUCCESS)
312 			goto failure;
313 
314 		buffers_added++;
315 		adapter->replenish_add_buff_success++;
316 		pool->next_free = (pool->next_free + 1) % pool->size;
317 	}
318 	atomic_add(buffers_added, &pool->available);
319 	return;
320 
321 failure:
322 	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
323 		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
324 	pool->free_map[pool->next_free] = index;
325 	pool->rx_buff[index].skb = NULL;
326 
327 	dev_kfree_skb_any(skb);
328 	adapter->replenish_add_buff_failure++;
329 	atomic_add(buffers_added, &pool->available);
330 
331 	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
332 		/* Disable buffer pool replenishment and report carrier off if
333 		 * queue is closed or pending failover.
334 		 * Firmware guarantees that a signal will be sent to the
335 		 * driver, triggering a reset.
336 		 */
337 		deactivate_rx_pools(adapter);
338 		netif_carrier_off(adapter->netdev);
339 	}
340 }
341 
342 static void replenish_pools(struct ibmvnic_adapter *adapter)
343 {
344 	int i;
345 
346 	adapter->replenish_task_cycles++;
347 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
348 	     i++) {
349 		if (adapter->rx_pool[i].active)
350 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
351 	}
352 }
353 
354 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
355 {
356 	kfree(adapter->tx_stats_buffers);
357 	kfree(adapter->rx_stats_buffers);
358 	adapter->tx_stats_buffers = NULL;
359 	adapter->rx_stats_buffers = NULL;
360 }
361 
362 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
363 {
364 	adapter->tx_stats_buffers =
365 				kcalloc(IBMVNIC_MAX_QUEUES,
366 					sizeof(struct ibmvnic_tx_queue_stats),
367 					GFP_KERNEL);
368 	if (!adapter->tx_stats_buffers)
369 		return -ENOMEM;
370 
371 	adapter->rx_stats_buffers =
372 				kcalloc(IBMVNIC_MAX_QUEUES,
373 					sizeof(struct ibmvnic_rx_queue_stats),
374 					GFP_KERNEL);
375 	if (!adapter->rx_stats_buffers)
376 		return -ENOMEM;
377 
378 	return 0;
379 }
380 
381 static void release_stats_token(struct ibmvnic_adapter *adapter)
382 {
383 	struct device *dev = &adapter->vdev->dev;
384 
385 	if (!adapter->stats_token)
386 		return;
387 
388 	dma_unmap_single(dev, adapter->stats_token,
389 			 sizeof(struct ibmvnic_statistics),
390 			 DMA_FROM_DEVICE);
391 	adapter->stats_token = 0;
392 }
393 
394 static int init_stats_token(struct ibmvnic_adapter *adapter)
395 {
396 	struct device *dev = &adapter->vdev->dev;
397 	dma_addr_t stok;
398 
399 	stok = dma_map_single(dev, &adapter->stats,
400 			      sizeof(struct ibmvnic_statistics),
401 			      DMA_FROM_DEVICE);
402 	if (dma_mapping_error(dev, stok)) {
403 		dev_err(dev, "Couldn't map stats buffer\n");
404 		return -1;
405 	}
406 
407 	adapter->stats_token = stok;
408 	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
409 	return 0;
410 }
411 
412 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
413 {
414 	struct ibmvnic_rx_pool *rx_pool;
415 	int rx_scrqs;
416 	int i, j, rc;
417 	u64 *size_array;
418 
419 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
420 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
421 
422 	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
423 	for (i = 0; i < rx_scrqs; i++) {
424 		rx_pool = &adapter->rx_pool[i];
425 
426 		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
427 
428 		if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
429 			free_long_term_buff(adapter, &rx_pool->long_term_buff);
430 			rx_pool->buff_size = be64_to_cpu(size_array[i]);
431 			rc = alloc_long_term_buff(adapter,
432 						  &rx_pool->long_term_buff,
433 						  rx_pool->size *
434 						  rx_pool->buff_size);
435 		} else {
436 			rc = reset_long_term_buff(adapter,
437 						  &rx_pool->long_term_buff);
438 		}
439 
440 		if (rc)
441 			return rc;
442 
443 		for (j = 0; j < rx_pool->size; j++)
444 			rx_pool->free_map[j] = j;
445 
446 		memset(rx_pool->rx_buff, 0,
447 		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
448 
449 		atomic_set(&rx_pool->available, 0);
450 		rx_pool->next_alloc = 0;
451 		rx_pool->next_free = 0;
452 		rx_pool->active = 1;
453 	}
454 
455 	return 0;
456 }
457 
458 static void release_rx_pools(struct ibmvnic_adapter *adapter)
459 {
460 	struct ibmvnic_rx_pool *rx_pool;
461 	int i, j;
462 
463 	if (!adapter->rx_pool)
464 		return;
465 
466 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
467 		rx_pool = &adapter->rx_pool[i];
468 
469 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
470 
471 		kfree(rx_pool->free_map);
472 		free_long_term_buff(adapter, &rx_pool->long_term_buff);
473 
474 		if (!rx_pool->rx_buff)
475 			continue;
476 
477 		for (j = 0; j < rx_pool->size; j++) {
478 			if (rx_pool->rx_buff[j].skb) {
479 				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
480 				rx_pool->rx_buff[j].skb = NULL;
481 			}
482 		}
483 
484 		kfree(rx_pool->rx_buff);
485 	}
486 
487 	kfree(adapter->rx_pool);
488 	adapter->rx_pool = NULL;
489 	adapter->num_active_rx_pools = 0;
490 }
491 
492 static int init_rx_pools(struct net_device *netdev)
493 {
494 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
495 	struct device *dev = &adapter->vdev->dev;
496 	struct ibmvnic_rx_pool *rx_pool;
497 	int rxadd_subcrqs;
498 	u64 *size_array;
499 	int i, j;
500 
501 	rxadd_subcrqs =
502 		be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
503 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
504 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
505 
506 	adapter->rx_pool = kcalloc(rxadd_subcrqs,
507 				   sizeof(struct ibmvnic_rx_pool),
508 				   GFP_KERNEL);
509 	if (!adapter->rx_pool) {
510 		dev_err(dev, "Failed to allocate rx pools\n");
511 		return -1;
512 	}
513 
514 	adapter->num_active_rx_pools = rxadd_subcrqs;
515 
516 	for (i = 0; i < rxadd_subcrqs; i++) {
517 		rx_pool = &adapter->rx_pool[i];
518 
519 		netdev_dbg(adapter->netdev,
520 			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
521 			   i, adapter->req_rx_add_entries_per_subcrq,
522 			   be64_to_cpu(size_array[i]));
523 
524 		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
525 		rx_pool->index = i;
526 		rx_pool->buff_size = be64_to_cpu(size_array[i]);
527 		rx_pool->active = 1;
528 
529 		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
530 					    GFP_KERNEL);
531 		if (!rx_pool->free_map) {
532 			release_rx_pools(adapter);
533 			return -1;
534 		}
535 
536 		rx_pool->rx_buff = kcalloc(rx_pool->size,
537 					   sizeof(struct ibmvnic_rx_buff),
538 					   GFP_KERNEL);
539 		if (!rx_pool->rx_buff) {
540 			dev_err(dev, "Couldn't alloc rx buffers\n");
541 			release_rx_pools(adapter);
542 			return -1;
543 		}
544 
545 		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
546 					 rx_pool->size * rx_pool->buff_size)) {
547 			release_rx_pools(adapter);
548 			return -1;
549 		}
550 
551 		for (j = 0; j < rx_pool->size; ++j)
552 			rx_pool->free_map[j] = j;
553 
554 		atomic_set(&rx_pool->available, 0);
555 		rx_pool->next_alloc = 0;
556 		rx_pool->next_free = 0;
557 	}
558 
559 	return 0;
560 }
561 
562 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
563 			     struct ibmvnic_tx_pool *tx_pool)
564 {
565 	int rc, i;
566 
567 	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
568 	if (rc)
569 		return rc;
570 
571 	memset(tx_pool->tx_buff, 0,
572 	       tx_pool->num_buffers *
573 	       sizeof(struct ibmvnic_tx_buff));
574 
575 	for (i = 0; i < tx_pool->num_buffers; i++)
576 		tx_pool->free_map[i] = i;
577 
578 	tx_pool->consumer_index = 0;
579 	tx_pool->producer_index = 0;
580 
581 	return 0;
582 }
583 
584 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
585 {
586 	int tx_scrqs;
587 	int i, rc;
588 
589 	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
590 	for (i = 0; i < tx_scrqs; i++) {
591 		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
592 		if (rc)
593 			return rc;
594 		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
595 		if (rc)
596 			return rc;
597 	}
598 
599 	return 0;
600 }
601 
602 static void release_vpd_data(struct ibmvnic_adapter *adapter)
603 {
604 	if (!adapter->vpd)
605 		return;
606 
607 	kfree(adapter->vpd->buff);
608 	kfree(adapter->vpd);
609 
610 	adapter->vpd = NULL;
611 }
612 
613 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
614 				struct ibmvnic_tx_pool *tx_pool)
615 {
616 	kfree(tx_pool->tx_buff);
617 	kfree(tx_pool->free_map);
618 	free_long_term_buff(adapter, &tx_pool->long_term_buff);
619 }
620 
621 static void release_tx_pools(struct ibmvnic_adapter *adapter)
622 {
623 	int i;
624 
625 	if (!adapter->tx_pool)
626 		return;
627 
628 	for (i = 0; i < adapter->num_active_tx_pools; i++) {
629 		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
630 		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
631 	}
632 
633 	kfree(adapter->tx_pool);
634 	adapter->tx_pool = NULL;
635 	kfree(adapter->tso_pool);
636 	adapter->tso_pool = NULL;
637 	adapter->num_active_tx_pools = 0;
638 }
639 
640 static int init_one_tx_pool(struct net_device *netdev,
641 			    struct ibmvnic_tx_pool *tx_pool,
642 			    int num_entries, int buf_size)
643 {
644 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
645 	int i;
646 
647 	tx_pool->tx_buff = kcalloc(num_entries,
648 				   sizeof(struct ibmvnic_tx_buff),
649 				   GFP_KERNEL);
650 	if (!tx_pool->tx_buff)
651 		return -1;
652 
653 	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
654 				 num_entries * buf_size))
655 		return -1;
656 
657 	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
658 	if (!tx_pool->free_map)
659 		return -1;
660 
661 	for (i = 0; i < num_entries; i++)
662 		tx_pool->free_map[i] = i;
663 
664 	tx_pool->consumer_index = 0;
665 	tx_pool->producer_index = 0;
666 	tx_pool->num_buffers = num_entries;
667 	tx_pool->buf_size = buf_size;
668 
669 	return 0;
670 }
671 
672 static int init_tx_pools(struct net_device *netdev)
673 {
674 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
675 	int tx_subcrqs;
676 	int i, rc;
677 
678 	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
679 	adapter->tx_pool = kcalloc(tx_subcrqs,
680 				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
681 	if (!adapter->tx_pool)
682 		return -1;
683 
684 	adapter->tso_pool = kcalloc(tx_subcrqs,
685 				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
686 	if (!adapter->tso_pool)
687 		return -1;
688 
689 	adapter->num_active_tx_pools = tx_subcrqs;
690 
691 	for (i = 0; i < tx_subcrqs; i++) {
692 		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
693 				      adapter->req_tx_entries_per_subcrq,
694 				      adapter->req_mtu + VLAN_HLEN);
695 		if (rc) {
696 			release_tx_pools(adapter);
697 			return rc;
698 		}
699 
700 		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
701 				      IBMVNIC_TSO_BUFS,
702 				      IBMVNIC_TSO_BUF_SZ);
703 		if (rc) {
704 			release_tx_pools(adapter);
705 			return rc;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
712 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
713 {
714 	int i;
715 
716 	if (adapter->napi_enabled)
717 		return;
718 
719 	for (i = 0; i < adapter->req_rx_queues; i++)
720 		napi_enable(&adapter->napi[i]);
721 
722 	adapter->napi_enabled = true;
723 }
724 
725 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
726 {
727 	int i;
728 
729 	if (!adapter->napi_enabled)
730 		return;
731 
732 	for (i = 0; i < adapter->req_rx_queues; i++) {
733 		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
734 		napi_disable(&adapter->napi[i]);
735 	}
736 
737 	adapter->napi_enabled = false;
738 }
739 
740 static int init_napi(struct ibmvnic_adapter *adapter)
741 {
742 	int i;
743 
744 	adapter->napi = kcalloc(adapter->req_rx_queues,
745 				sizeof(struct napi_struct), GFP_KERNEL);
746 	if (!adapter->napi)
747 		return -ENOMEM;
748 
749 	for (i = 0; i < adapter->req_rx_queues; i++) {
750 		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
751 		netif_napi_add(adapter->netdev, &adapter->napi[i],
752 			       ibmvnic_poll, NAPI_POLL_WEIGHT);
753 	}
754 
755 	adapter->num_active_rx_napi = adapter->req_rx_queues;
756 	return 0;
757 }
758 
759 static void release_napi(struct ibmvnic_adapter *adapter)
760 {
761 	int i;
762 
763 	if (!adapter->napi)
764 		return;
765 
766 	for (i = 0; i < adapter->num_active_rx_napi; i++) {
767 		netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
768 		netif_napi_del(&adapter->napi[i]);
769 	}
770 
771 	kfree(adapter->napi);
772 	adapter->napi = NULL;
773 	adapter->num_active_rx_napi = 0;
774 	adapter->napi_enabled = false;
775 }
776 
777 static int ibmvnic_login(struct net_device *netdev)
778 {
779 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
780 	unsigned long timeout = msecs_to_jiffies(30000);
781 	int retry_count = 0;
782 	bool retry;
783 	int rc;
784 
785 	do {
786 		retry = false;
787 		if (retry_count > IBMVNIC_MAX_QUEUES) {
788 			netdev_warn(netdev, "Login attempts exceeded\n");
789 			return -1;
790 		}
791 
792 		adapter->init_done_rc = 0;
793 		reinit_completion(&adapter->init_done);
794 		rc = send_login(adapter);
795 		if (rc) {
796 			netdev_warn(netdev, "Unable to login\n");
797 			return rc;
798 		}
799 
800 		if (!wait_for_completion_timeout(&adapter->init_done,
801 						 timeout)) {
802 			netdev_warn(netdev, "Login timed out\n");
803 			return -1;
804 		}
805 
806 		if (adapter->init_done_rc == PARTIALSUCCESS) {
807 			retry_count++;
808 			release_sub_crqs(adapter, 1);
809 
810 			retry = true;
811 			netdev_dbg(netdev,
812 				   "Received partial success, retrying...\n");
813 			adapter->init_done_rc = 0;
814 			reinit_completion(&adapter->init_done);
815 			send_cap_queries(adapter);
816 			if (!wait_for_completion_timeout(&adapter->init_done,
817 							 timeout)) {
818 				netdev_warn(netdev,
819 					    "Capabilities query timed out\n");
820 				return -1;
821 			}
822 
823 			rc = init_sub_crqs(adapter);
824 			if (rc) {
825 				netdev_warn(netdev,
826 					    "SCRQ initialization failed\n");
827 				return -1;
828 			}
829 
830 			rc = init_sub_crq_irqs(adapter);
831 			if (rc) {
832 				netdev_warn(netdev,
833 					    "SCRQ irq initialization failed\n");
834 				return -1;
835 			}
836 		} else if (adapter->init_done_rc) {
837 			netdev_warn(netdev, "Adapter login failed\n");
838 			return -1;
839 		}
840 	} while (retry);
841 
842 	__ibmvnic_set_mac(netdev, adapter->mac_addr);
843 
844 	return 0;
845 }
846 
847 static void release_login_buffer(struct ibmvnic_adapter *adapter)
848 {
849 	kfree(adapter->login_buf);
850 	adapter->login_buf = NULL;
851 }
852 
853 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
854 {
855 	kfree(adapter->login_rsp_buf);
856 	adapter->login_rsp_buf = NULL;
857 }
858 
859 static void release_resources(struct ibmvnic_adapter *adapter)
860 {
861 	release_vpd_data(adapter);
862 
863 	release_tx_pools(adapter);
864 	release_rx_pools(adapter);
865 
866 	release_napi(adapter);
867 	release_login_rsp_buffer(adapter);
868 }
869 
870 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
871 {
872 	struct net_device *netdev = adapter->netdev;
873 	unsigned long timeout = msecs_to_jiffies(30000);
874 	union ibmvnic_crq crq;
875 	bool resend;
876 	int rc;
877 
878 	netdev_dbg(netdev, "setting link state %d\n", link_state);
879 
880 	memset(&crq, 0, sizeof(crq));
881 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
882 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
883 	crq.logical_link_state.link_state = link_state;
884 
885 	do {
886 		resend = false;
887 
888 		reinit_completion(&adapter->init_done);
889 		rc = ibmvnic_send_crq(adapter, &crq);
890 		if (rc) {
891 			netdev_err(netdev, "Failed to set link state\n");
892 			return rc;
893 		}
894 
895 		if (!wait_for_completion_timeout(&adapter->init_done,
896 						 timeout)) {
897 			netdev_err(netdev, "timeout setting link state\n");
898 			return -1;
899 		}
900 
901 		if (adapter->init_done_rc == 1) {
902 			/* Partuial success, delay and re-send */
903 			mdelay(1000);
904 			resend = true;
905 		} else if (adapter->init_done_rc) {
906 			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
907 				    adapter->init_done_rc);
908 			return adapter->init_done_rc;
909 		}
910 	} while (resend);
911 
912 	return 0;
913 }
914 
915 static int set_real_num_queues(struct net_device *netdev)
916 {
917 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
918 	int rc;
919 
920 	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
921 		   adapter->req_tx_queues, adapter->req_rx_queues);
922 
923 	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
924 	if (rc) {
925 		netdev_err(netdev, "failed to set the number of tx queues\n");
926 		return rc;
927 	}
928 
929 	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
930 	if (rc)
931 		netdev_err(netdev, "failed to set the number of rx queues\n");
932 
933 	return rc;
934 }
935 
936 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
937 {
938 	struct device *dev = &adapter->vdev->dev;
939 	union ibmvnic_crq crq;
940 	int len = 0;
941 	int rc;
942 
943 	if (adapter->vpd->buff)
944 		len = adapter->vpd->len;
945 
946 	init_completion(&adapter->fw_done);
947 	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
948 	crq.get_vpd_size.cmd = GET_VPD_SIZE;
949 	rc = ibmvnic_send_crq(adapter, &crq);
950 	if (rc)
951 		return rc;
952 	wait_for_completion(&adapter->fw_done);
953 
954 	if (!adapter->vpd->len)
955 		return -ENODATA;
956 
957 	if (!adapter->vpd->buff)
958 		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
959 	else if (adapter->vpd->len != len)
960 		adapter->vpd->buff =
961 			krealloc(adapter->vpd->buff,
962 				 adapter->vpd->len, GFP_KERNEL);
963 
964 	if (!adapter->vpd->buff) {
965 		dev_err(dev, "Could allocate VPD buffer\n");
966 		return -ENOMEM;
967 	}
968 
969 	adapter->vpd->dma_addr =
970 		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
971 			       DMA_FROM_DEVICE);
972 	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
973 		dev_err(dev, "Could not map VPD buffer\n");
974 		kfree(adapter->vpd->buff);
975 		adapter->vpd->buff = NULL;
976 		return -ENOMEM;
977 	}
978 
979 	reinit_completion(&adapter->fw_done);
980 	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
981 	crq.get_vpd.cmd = GET_VPD;
982 	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
983 	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
984 	rc = ibmvnic_send_crq(adapter, &crq);
985 	if (rc) {
986 		kfree(adapter->vpd->buff);
987 		adapter->vpd->buff = NULL;
988 		return rc;
989 	}
990 	wait_for_completion(&adapter->fw_done);
991 
992 	return 0;
993 }
994 
995 static int init_resources(struct ibmvnic_adapter *adapter)
996 {
997 	struct net_device *netdev = adapter->netdev;
998 	int rc;
999 
1000 	rc = set_real_num_queues(netdev);
1001 	if (rc)
1002 		return rc;
1003 
1004 	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1005 	if (!adapter->vpd)
1006 		return -ENOMEM;
1007 
1008 	/* Vital Product Data (VPD) */
1009 	rc = ibmvnic_get_vpd(adapter);
1010 	if (rc) {
1011 		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1012 		return rc;
1013 	}
1014 
1015 	adapter->map_id = 1;
1016 
1017 	rc = init_napi(adapter);
1018 	if (rc)
1019 		return rc;
1020 
1021 	send_map_query(adapter);
1022 
1023 	rc = init_rx_pools(netdev);
1024 	if (rc)
1025 		return rc;
1026 
1027 	rc = init_tx_pools(netdev);
1028 	return rc;
1029 }
1030 
1031 static int __ibmvnic_open(struct net_device *netdev)
1032 {
1033 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1034 	enum vnic_state prev_state = adapter->state;
1035 	int i, rc;
1036 
1037 	adapter->state = VNIC_OPENING;
1038 	replenish_pools(adapter);
1039 	ibmvnic_napi_enable(adapter);
1040 
1041 	/* We're ready to receive frames, enable the sub-crq interrupts and
1042 	 * set the logical link state to up
1043 	 */
1044 	for (i = 0; i < adapter->req_rx_queues; i++) {
1045 		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1046 		if (prev_state == VNIC_CLOSED)
1047 			enable_irq(adapter->rx_scrq[i]->irq);
1048 		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1049 	}
1050 
1051 	for (i = 0; i < adapter->req_tx_queues; i++) {
1052 		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1053 		if (prev_state == VNIC_CLOSED)
1054 			enable_irq(adapter->tx_scrq[i]->irq);
1055 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1056 	}
1057 
1058 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1059 	if (rc) {
1060 		for (i = 0; i < adapter->req_rx_queues; i++)
1061 			napi_disable(&adapter->napi[i]);
1062 		release_resources(adapter);
1063 		return rc;
1064 	}
1065 
1066 	netif_tx_start_all_queues(netdev);
1067 
1068 	if (prev_state == VNIC_CLOSED) {
1069 		for (i = 0; i < adapter->req_rx_queues; i++)
1070 			napi_schedule(&adapter->napi[i]);
1071 	}
1072 
1073 	adapter->state = VNIC_OPEN;
1074 	return rc;
1075 }
1076 
1077 static int ibmvnic_open(struct net_device *netdev)
1078 {
1079 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1080 	int rc;
1081 
1082 	/* If device failover is pending, just set device state and return.
1083 	 * Device operation will be handled by reset routine.
1084 	 */
1085 	if (adapter->failover_pending) {
1086 		adapter->state = VNIC_OPEN;
1087 		return 0;
1088 	}
1089 
1090 	if (adapter->state != VNIC_CLOSED) {
1091 		rc = ibmvnic_login(netdev);
1092 		if (rc)
1093 			return rc;
1094 
1095 		rc = init_resources(adapter);
1096 		if (rc) {
1097 			netdev_err(netdev, "failed to initialize resources\n");
1098 			release_resources(adapter);
1099 			return rc;
1100 		}
1101 	}
1102 
1103 	rc = __ibmvnic_open(netdev);
1104 
1105 	return rc;
1106 }
1107 
1108 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1109 {
1110 	struct ibmvnic_rx_pool *rx_pool;
1111 	struct ibmvnic_rx_buff *rx_buff;
1112 	u64 rx_entries;
1113 	int rx_scrqs;
1114 	int i, j;
1115 
1116 	if (!adapter->rx_pool)
1117 		return;
1118 
1119 	rx_scrqs = adapter->num_active_rx_pools;
1120 	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1121 
1122 	/* Free any remaining skbs in the rx buffer pools */
1123 	for (i = 0; i < rx_scrqs; i++) {
1124 		rx_pool = &adapter->rx_pool[i];
1125 		if (!rx_pool || !rx_pool->rx_buff)
1126 			continue;
1127 
1128 		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1129 		for (j = 0; j < rx_entries; j++) {
1130 			rx_buff = &rx_pool->rx_buff[j];
1131 			if (rx_buff && rx_buff->skb) {
1132 				dev_kfree_skb_any(rx_buff->skb);
1133 				rx_buff->skb = NULL;
1134 			}
1135 		}
1136 	}
1137 }
1138 
1139 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1140 			      struct ibmvnic_tx_pool *tx_pool)
1141 {
1142 	struct ibmvnic_tx_buff *tx_buff;
1143 	u64 tx_entries;
1144 	int i;
1145 
1146 	if (!tx_pool || !tx_pool->tx_buff)
1147 		return;
1148 
1149 	tx_entries = tx_pool->num_buffers;
1150 
1151 	for (i = 0; i < tx_entries; i++) {
1152 		tx_buff = &tx_pool->tx_buff[i];
1153 		if (tx_buff && tx_buff->skb) {
1154 			dev_kfree_skb_any(tx_buff->skb);
1155 			tx_buff->skb = NULL;
1156 		}
1157 	}
1158 }
1159 
1160 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1161 {
1162 	int tx_scrqs;
1163 	int i;
1164 
1165 	if (!adapter->tx_pool || !adapter->tso_pool)
1166 		return;
1167 
1168 	tx_scrqs = adapter->num_active_tx_pools;
1169 
1170 	/* Free any remaining skbs in the tx buffer pools */
1171 	for (i = 0; i < tx_scrqs; i++) {
1172 		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1173 		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1174 		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1175 	}
1176 }
1177 
1178 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1179 {
1180 	struct net_device *netdev = adapter->netdev;
1181 	int i;
1182 
1183 	if (adapter->tx_scrq) {
1184 		for (i = 0; i < adapter->req_tx_queues; i++)
1185 			if (adapter->tx_scrq[i]->irq) {
1186 				netdev_dbg(netdev,
1187 					   "Disabling tx_scrq[%d] irq\n", i);
1188 				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1189 				disable_irq(adapter->tx_scrq[i]->irq);
1190 			}
1191 	}
1192 
1193 	if (adapter->rx_scrq) {
1194 		for (i = 0; i < adapter->req_rx_queues; i++) {
1195 			if (adapter->rx_scrq[i]->irq) {
1196 				netdev_dbg(netdev,
1197 					   "Disabling rx_scrq[%d] irq\n", i);
1198 				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1199 				disable_irq(adapter->rx_scrq[i]->irq);
1200 			}
1201 		}
1202 	}
1203 }
1204 
1205 static void ibmvnic_cleanup(struct net_device *netdev)
1206 {
1207 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1208 
1209 	/* ensure that transmissions are stopped if called by do_reset */
1210 	if (adapter->resetting)
1211 		netif_tx_disable(netdev);
1212 	else
1213 		netif_tx_stop_all_queues(netdev);
1214 
1215 	ibmvnic_napi_disable(adapter);
1216 	ibmvnic_disable_irqs(adapter);
1217 
1218 	clean_rx_pools(adapter);
1219 	clean_tx_pools(adapter);
1220 }
1221 
1222 static int __ibmvnic_close(struct net_device *netdev)
1223 {
1224 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1225 	int rc = 0;
1226 
1227 	adapter->state = VNIC_CLOSING;
1228 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1229 	if (rc)
1230 		return rc;
1231 	adapter->state = VNIC_CLOSED;
1232 	return 0;
1233 }
1234 
1235 static int ibmvnic_close(struct net_device *netdev)
1236 {
1237 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1238 	int rc;
1239 
1240 	/* If device failover is pending, just set device state and return.
1241 	 * Device operation will be handled by reset routine.
1242 	 */
1243 	if (adapter->failover_pending) {
1244 		adapter->state = VNIC_CLOSED;
1245 		return 0;
1246 	}
1247 
1248 	rc = __ibmvnic_close(netdev);
1249 	ibmvnic_cleanup(netdev);
1250 
1251 	return rc;
1252 }
1253 
1254 /**
1255  * build_hdr_data - creates L2/L3/L4 header data buffer
1256  * @hdr_field - bitfield determining needed headers
1257  * @skb - socket buffer
1258  * @hdr_len - array of header lengths
1259  * @tot_len - total length of data
1260  *
1261  * Reads hdr_field to determine which headers are needed by firmware.
1262  * Builds a buffer containing these headers.  Saves individual header
1263  * lengths and total buffer length to be used to build descriptors.
1264  */
1265 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1266 			  int *hdr_len, u8 *hdr_data)
1267 {
1268 	int len = 0;
1269 	u8 *hdr;
1270 
1271 	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1272 		hdr_len[0] = sizeof(struct vlan_ethhdr);
1273 	else
1274 		hdr_len[0] = sizeof(struct ethhdr);
1275 
1276 	if (skb->protocol == htons(ETH_P_IP)) {
1277 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1278 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1279 			hdr_len[2] = tcp_hdrlen(skb);
1280 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1281 			hdr_len[2] = sizeof(struct udphdr);
1282 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1283 		hdr_len[1] = sizeof(struct ipv6hdr);
1284 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1285 			hdr_len[2] = tcp_hdrlen(skb);
1286 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1287 			hdr_len[2] = sizeof(struct udphdr);
1288 	} else if (skb->protocol == htons(ETH_P_ARP)) {
1289 		hdr_len[1] = arp_hdr_len(skb->dev);
1290 		hdr_len[2] = 0;
1291 	}
1292 
1293 	memset(hdr_data, 0, 120);
1294 	if ((hdr_field >> 6) & 1) {
1295 		hdr = skb_mac_header(skb);
1296 		memcpy(hdr_data, hdr, hdr_len[0]);
1297 		len += hdr_len[0];
1298 	}
1299 
1300 	if ((hdr_field >> 5) & 1) {
1301 		hdr = skb_network_header(skb);
1302 		memcpy(hdr_data + len, hdr, hdr_len[1]);
1303 		len += hdr_len[1];
1304 	}
1305 
1306 	if ((hdr_field >> 4) & 1) {
1307 		hdr = skb_transport_header(skb);
1308 		memcpy(hdr_data + len, hdr, hdr_len[2]);
1309 		len += hdr_len[2];
1310 	}
1311 	return len;
1312 }
1313 
1314 /**
1315  * create_hdr_descs - create header and header extension descriptors
1316  * @hdr_field - bitfield determining needed headers
1317  * @data - buffer containing header data
1318  * @len - length of data buffer
1319  * @hdr_len - array of individual header lengths
1320  * @scrq_arr - descriptor array
1321  *
1322  * Creates header and, if needed, header extension descriptors and
1323  * places them in a descriptor array, scrq_arr
1324  */
1325 
1326 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1327 			    union sub_crq *scrq_arr)
1328 {
1329 	union sub_crq hdr_desc;
1330 	int tmp_len = len;
1331 	int num_descs = 0;
1332 	u8 *data, *cur;
1333 	int tmp;
1334 
1335 	while (tmp_len > 0) {
1336 		cur = hdr_data + len - tmp_len;
1337 
1338 		memset(&hdr_desc, 0, sizeof(hdr_desc));
1339 		if (cur != hdr_data) {
1340 			data = hdr_desc.hdr_ext.data;
1341 			tmp = tmp_len > 29 ? 29 : tmp_len;
1342 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1343 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1344 			hdr_desc.hdr_ext.len = tmp;
1345 		} else {
1346 			data = hdr_desc.hdr.data;
1347 			tmp = tmp_len > 24 ? 24 : tmp_len;
1348 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1349 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1350 			hdr_desc.hdr.len = tmp;
1351 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1352 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1353 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1354 			hdr_desc.hdr.flag = hdr_field << 1;
1355 		}
1356 		memcpy(data, cur, tmp);
1357 		tmp_len -= tmp;
1358 		*scrq_arr = hdr_desc;
1359 		scrq_arr++;
1360 		num_descs++;
1361 	}
1362 
1363 	return num_descs;
1364 }
1365 
1366 /**
1367  * build_hdr_descs_arr - build a header descriptor array
1368  * @skb - socket buffer
1369  * @num_entries - number of descriptors to be sent
1370  * @subcrq - first TX descriptor
1371  * @hdr_field - bit field determining which headers will be sent
1372  *
1373  * This function will build a TX descriptor array with applicable
1374  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1375  */
1376 
1377 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1378 				int *num_entries, u8 hdr_field)
1379 {
1380 	int hdr_len[3] = {0, 0, 0};
1381 	int tot_len;
1382 	u8 *hdr_data = txbuff->hdr_data;
1383 
1384 	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1385 				 txbuff->hdr_data);
1386 	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1387 			 txbuff->indir_arr + 1);
1388 }
1389 
1390 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1391 				    struct net_device *netdev)
1392 {
1393 	/* For some backing devices, mishandling of small packets
1394 	 * can result in a loss of connection or TX stall. Device
1395 	 * architects recommend that no packet should be smaller
1396 	 * than the minimum MTU value provided to the driver, so
1397 	 * pad any packets to that length
1398 	 */
1399 	if (skb->len < netdev->min_mtu)
1400 		return skb_put_padto(skb, netdev->min_mtu);
1401 
1402 	return 0;
1403 }
1404 
1405 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1406 {
1407 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1408 	int queue_num = skb_get_queue_mapping(skb);
1409 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1410 	struct device *dev = &adapter->vdev->dev;
1411 	struct ibmvnic_tx_buff *tx_buff = NULL;
1412 	struct ibmvnic_sub_crq_queue *tx_scrq;
1413 	struct ibmvnic_tx_pool *tx_pool;
1414 	unsigned int tx_send_failed = 0;
1415 	unsigned int tx_map_failed = 0;
1416 	unsigned int tx_dropped = 0;
1417 	unsigned int tx_packets = 0;
1418 	unsigned int tx_bytes = 0;
1419 	dma_addr_t data_dma_addr;
1420 	struct netdev_queue *txq;
1421 	unsigned long lpar_rc;
1422 	union sub_crq tx_crq;
1423 	unsigned int offset;
1424 	int num_entries = 1;
1425 	unsigned char *dst;
1426 	u64 *handle_array;
1427 	int index = 0;
1428 	u8 proto = 0;
1429 	netdev_tx_t ret = NETDEV_TX_OK;
1430 
1431 	if (adapter->resetting) {
1432 		if (!netif_subqueue_stopped(netdev, skb))
1433 			netif_stop_subqueue(netdev, queue_num);
1434 		dev_kfree_skb_any(skb);
1435 
1436 		tx_send_failed++;
1437 		tx_dropped++;
1438 		ret = NETDEV_TX_OK;
1439 		goto out;
1440 	}
1441 
1442 	if (ibmvnic_xmit_workarounds(skb, netdev)) {
1443 		tx_dropped++;
1444 		tx_send_failed++;
1445 		ret = NETDEV_TX_OK;
1446 		goto out;
1447 	}
1448 	if (skb_is_gso(skb))
1449 		tx_pool = &adapter->tso_pool[queue_num];
1450 	else
1451 		tx_pool = &adapter->tx_pool[queue_num];
1452 
1453 	tx_scrq = adapter->tx_scrq[queue_num];
1454 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1455 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1456 		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1457 
1458 	index = tx_pool->free_map[tx_pool->consumer_index];
1459 
1460 	if (index == IBMVNIC_INVALID_MAP) {
1461 		dev_kfree_skb_any(skb);
1462 		tx_send_failed++;
1463 		tx_dropped++;
1464 		ret = NETDEV_TX_OK;
1465 		goto out;
1466 	}
1467 
1468 	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1469 
1470 	offset = index * tx_pool->buf_size;
1471 	dst = tx_pool->long_term_buff.buff + offset;
1472 	memset(dst, 0, tx_pool->buf_size);
1473 	data_dma_addr = tx_pool->long_term_buff.addr + offset;
1474 
1475 	if (skb_shinfo(skb)->nr_frags) {
1476 		int cur, i;
1477 
1478 		/* Copy the head */
1479 		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1480 		cur = skb_headlen(skb);
1481 
1482 		/* Copy the frags */
1483 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1484 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1485 
1486 			memcpy(dst + cur,
1487 			       page_address(skb_frag_page(frag)) +
1488 			       frag->page_offset, skb_frag_size(frag));
1489 			cur += skb_frag_size(frag);
1490 		}
1491 	} else {
1492 		skb_copy_from_linear_data(skb, dst, skb->len);
1493 	}
1494 
1495 	tx_pool->consumer_index =
1496 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1497 
1498 	tx_buff = &tx_pool->tx_buff[index];
1499 	tx_buff->skb = skb;
1500 	tx_buff->data_dma[0] = data_dma_addr;
1501 	tx_buff->data_len[0] = skb->len;
1502 	tx_buff->index = index;
1503 	tx_buff->pool_index = queue_num;
1504 	tx_buff->last_frag = true;
1505 
1506 	memset(&tx_crq, 0, sizeof(tx_crq));
1507 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1508 	tx_crq.v1.type = IBMVNIC_TX_DESC;
1509 	tx_crq.v1.n_crq_elem = 1;
1510 	tx_crq.v1.n_sge = 1;
1511 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1512 
1513 	if (skb_is_gso(skb))
1514 		tx_crq.v1.correlator =
1515 			cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1516 	else
1517 		tx_crq.v1.correlator = cpu_to_be32(index);
1518 	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1519 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1520 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1521 
1522 	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1523 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1524 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1525 	}
1526 
1527 	if (skb->protocol == htons(ETH_P_IP)) {
1528 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1529 		proto = ip_hdr(skb)->protocol;
1530 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1531 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1532 		proto = ipv6_hdr(skb)->nexthdr;
1533 	}
1534 
1535 	if (proto == IPPROTO_TCP)
1536 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1537 	else if (proto == IPPROTO_UDP)
1538 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1539 
1540 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1541 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1542 		hdrs += 2;
1543 	}
1544 	if (skb_is_gso(skb)) {
1545 		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1546 		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1547 		hdrs += 2;
1548 	}
1549 	/* determine if l2/3/4 headers are sent to firmware */
1550 	if ((*hdrs >> 7) & 1) {
1551 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1552 		tx_crq.v1.n_crq_elem = num_entries;
1553 		tx_buff->num_entries = num_entries;
1554 		tx_buff->indir_arr[0] = tx_crq;
1555 		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1556 						    sizeof(tx_buff->indir_arr),
1557 						    DMA_TO_DEVICE);
1558 		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1559 			dev_kfree_skb_any(skb);
1560 			tx_buff->skb = NULL;
1561 			if (!firmware_has_feature(FW_FEATURE_CMO))
1562 				dev_err(dev, "tx: unable to map descriptor array\n");
1563 			tx_map_failed++;
1564 			tx_dropped++;
1565 			ret = NETDEV_TX_OK;
1566 			goto tx_err_out;
1567 		}
1568 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 					       (u64)tx_buff->indir_dma,
1570 					       (u64)num_entries);
1571 		dma_unmap_single(dev, tx_buff->indir_dma,
1572 				 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1573 	} else {
1574 		tx_buff->num_entries = num_entries;
1575 		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1576 				      &tx_crq);
1577 	}
1578 	if (lpar_rc != H_SUCCESS) {
1579 		if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1580 			dev_err_ratelimited(dev, "tx: send failed\n");
1581 		dev_kfree_skb_any(skb);
1582 		tx_buff->skb = NULL;
1583 
1584 		if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1585 			/* Disable TX and report carrier off if queue is closed
1586 			 * or pending failover.
1587 			 * Firmware guarantees that a signal will be sent to the
1588 			 * driver, triggering a reset or some other action.
1589 			 */
1590 			netif_tx_stop_all_queues(netdev);
1591 			netif_carrier_off(netdev);
1592 		}
1593 
1594 		tx_send_failed++;
1595 		tx_dropped++;
1596 		ret = NETDEV_TX_OK;
1597 		goto tx_err_out;
1598 	}
1599 
1600 	if (atomic_add_return(num_entries, &tx_scrq->used)
1601 					>= adapter->req_tx_entries_per_subcrq) {
1602 		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1603 		netif_stop_subqueue(netdev, queue_num);
1604 	}
1605 
1606 	tx_packets++;
1607 	tx_bytes += skb->len;
1608 	txq->trans_start = jiffies;
1609 	ret = NETDEV_TX_OK;
1610 	goto out;
1611 
1612 tx_err_out:
1613 	/* roll back consumer index and map array*/
1614 	if (tx_pool->consumer_index == 0)
1615 		tx_pool->consumer_index =
1616 			tx_pool->num_buffers - 1;
1617 	else
1618 		tx_pool->consumer_index--;
1619 	tx_pool->free_map[tx_pool->consumer_index] = index;
1620 out:
1621 	netdev->stats.tx_dropped += tx_dropped;
1622 	netdev->stats.tx_bytes += tx_bytes;
1623 	netdev->stats.tx_packets += tx_packets;
1624 	adapter->tx_send_failed += tx_send_failed;
1625 	adapter->tx_map_failed += tx_map_failed;
1626 	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1627 	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1628 	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1629 
1630 	return ret;
1631 }
1632 
1633 static void ibmvnic_set_multi(struct net_device *netdev)
1634 {
1635 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1636 	struct netdev_hw_addr *ha;
1637 	union ibmvnic_crq crq;
1638 
1639 	memset(&crq, 0, sizeof(crq));
1640 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1641 	crq.request_capability.cmd = REQUEST_CAPABILITY;
1642 
1643 	if (netdev->flags & IFF_PROMISC) {
1644 		if (!adapter->promisc_supported)
1645 			return;
1646 	} else {
1647 		if (netdev->flags & IFF_ALLMULTI) {
1648 			/* Accept all multicast */
1649 			memset(&crq, 0, sizeof(crq));
1650 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1651 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1652 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1653 			ibmvnic_send_crq(adapter, &crq);
1654 		} else if (netdev_mc_empty(netdev)) {
1655 			/* Reject all multicast */
1656 			memset(&crq, 0, sizeof(crq));
1657 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1658 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1659 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1660 			ibmvnic_send_crq(adapter, &crq);
1661 		} else {
1662 			/* Accept one or more multicast(s) */
1663 			netdev_for_each_mc_addr(ha, netdev) {
1664 				memset(&crq, 0, sizeof(crq));
1665 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1666 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1667 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1668 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1669 						ha->addr);
1670 				ibmvnic_send_crq(adapter, &crq);
1671 			}
1672 		}
1673 	}
1674 }
1675 
1676 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1677 {
1678 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1679 	union ibmvnic_crq crq;
1680 	int rc;
1681 
1682 	if (!is_valid_ether_addr(dev_addr)) {
1683 		rc = -EADDRNOTAVAIL;
1684 		goto err;
1685 	}
1686 
1687 	memset(&crq, 0, sizeof(crq));
1688 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1689 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1690 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1691 
1692 	init_completion(&adapter->fw_done);
1693 	rc = ibmvnic_send_crq(adapter, &crq);
1694 	if (rc) {
1695 		rc = -EIO;
1696 		goto err;
1697 	}
1698 
1699 	wait_for_completion(&adapter->fw_done);
1700 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1701 	if (adapter->fw_done_rc) {
1702 		rc = -EIO;
1703 		goto err;
1704 	}
1705 
1706 	return 0;
1707 err:
1708 	ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1709 	return rc;
1710 }
1711 
1712 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1713 {
1714 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1715 	struct sockaddr *addr = p;
1716 	int rc;
1717 
1718 	rc = 0;
1719 	ether_addr_copy(adapter->mac_addr, addr->sa_data);
1720 	if (adapter->state != VNIC_PROBED)
1721 		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1722 
1723 	return rc;
1724 }
1725 
1726 /**
1727  * do_reset returns zero if we are able to keep processing reset events, or
1728  * non-zero if we hit a fatal error and must halt.
1729  */
1730 static int do_reset(struct ibmvnic_adapter *adapter,
1731 		    struct ibmvnic_rwi *rwi, u32 reset_state)
1732 {
1733 	u64 old_num_rx_queues, old_num_tx_queues;
1734 	u64 old_num_rx_slots, old_num_tx_slots;
1735 	struct net_device *netdev = adapter->netdev;
1736 	int i, rc;
1737 
1738 	netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1739 		   rwi->reset_reason);
1740 
1741 	netif_carrier_off(netdev);
1742 	adapter->reset_reason = rwi->reset_reason;
1743 
1744 	old_num_rx_queues = adapter->req_rx_queues;
1745 	old_num_tx_queues = adapter->req_tx_queues;
1746 	old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1747 	old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1748 
1749 	ibmvnic_cleanup(netdev);
1750 
1751 	if (reset_state == VNIC_OPEN &&
1752 	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
1753 	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
1754 		rc = __ibmvnic_close(netdev);
1755 		if (rc)
1756 			return rc;
1757 	}
1758 
1759 	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1760 	    adapter->wait_for_reset) {
1761 		release_resources(adapter);
1762 		release_sub_crqs(adapter, 1);
1763 		release_crq_queue(adapter);
1764 	}
1765 
1766 	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1767 		/* remove the closed state so when we call open it appears
1768 		 * we are coming from the probed state.
1769 		 */
1770 		adapter->state = VNIC_PROBED;
1771 
1772 		if (adapter->wait_for_reset) {
1773 			rc = init_crq_queue(adapter);
1774 		} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1775 			rc = ibmvnic_reenable_crq_queue(adapter);
1776 			release_sub_crqs(adapter, 1);
1777 		} else {
1778 			rc = ibmvnic_reset_crq(adapter);
1779 			if (!rc)
1780 				rc = vio_enable_interrupts(adapter->vdev);
1781 		}
1782 
1783 		if (rc) {
1784 			netdev_err(adapter->netdev,
1785 				   "Couldn't initialize crq. rc=%d\n", rc);
1786 			return rc;
1787 		}
1788 
1789 		rc = ibmvnic_reset_init(adapter);
1790 		if (rc)
1791 			return IBMVNIC_INIT_FAILED;
1792 
1793 		/* If the adapter was in PROBE state prior to the reset,
1794 		 * exit here.
1795 		 */
1796 		if (reset_state == VNIC_PROBED)
1797 			return 0;
1798 
1799 		rc = ibmvnic_login(netdev);
1800 		if (rc) {
1801 			adapter->state = reset_state;
1802 			return rc;
1803 		}
1804 
1805 		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1806 		    adapter->wait_for_reset) {
1807 			rc = init_resources(adapter);
1808 			if (rc)
1809 				return rc;
1810 		} else if (adapter->req_rx_queues != old_num_rx_queues ||
1811 			   adapter->req_tx_queues != old_num_tx_queues ||
1812 			   adapter->req_rx_add_entries_per_subcrq !=
1813 							old_num_rx_slots ||
1814 			   adapter->req_tx_entries_per_subcrq !=
1815 							old_num_tx_slots) {
1816 			release_rx_pools(adapter);
1817 			release_tx_pools(adapter);
1818 			release_napi(adapter);
1819 			release_vpd_data(adapter);
1820 
1821 			rc = init_resources(adapter);
1822 			if (rc)
1823 				return rc;
1824 
1825 		} else {
1826 			rc = reset_tx_pools(adapter);
1827 			if (rc)
1828 				return rc;
1829 
1830 			rc = reset_rx_pools(adapter);
1831 			if (rc)
1832 				return rc;
1833 		}
1834 		ibmvnic_disable_irqs(adapter);
1835 	}
1836 	adapter->state = VNIC_CLOSED;
1837 
1838 	if (reset_state == VNIC_CLOSED)
1839 		return 0;
1840 
1841 	rc = __ibmvnic_open(netdev);
1842 	if (rc) {
1843 		if (list_empty(&adapter->rwi_list))
1844 			adapter->state = VNIC_CLOSED;
1845 		else
1846 			adapter->state = reset_state;
1847 
1848 		return 0;
1849 	}
1850 
1851 	/* refresh device's multicast list */
1852 	ibmvnic_set_multi(netdev);
1853 
1854 	/* kick napi */
1855 	for (i = 0; i < adapter->req_rx_queues; i++)
1856 		napi_schedule(&adapter->napi[i]);
1857 
1858 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1859 	    adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1860 		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1861 
1862 	return 0;
1863 }
1864 
1865 static int do_hard_reset(struct ibmvnic_adapter *adapter,
1866 			 struct ibmvnic_rwi *rwi, u32 reset_state)
1867 {
1868 	struct net_device *netdev = adapter->netdev;
1869 	int rc;
1870 
1871 	netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1872 		   rwi->reset_reason);
1873 
1874 	netif_carrier_off(netdev);
1875 	adapter->reset_reason = rwi->reset_reason;
1876 
1877 	ibmvnic_cleanup(netdev);
1878 	release_resources(adapter);
1879 	release_sub_crqs(adapter, 0);
1880 	release_crq_queue(adapter);
1881 
1882 	/* remove the closed state so when we call open it appears
1883 	 * we are coming from the probed state.
1884 	 */
1885 	adapter->state = VNIC_PROBED;
1886 
1887 	reinit_completion(&adapter->init_done);
1888 	rc = init_crq_queue(adapter);
1889 	if (rc) {
1890 		netdev_err(adapter->netdev,
1891 			   "Couldn't initialize crq. rc=%d\n", rc);
1892 		return rc;
1893 	}
1894 
1895 	rc = ibmvnic_init(adapter);
1896 	if (rc)
1897 		return rc;
1898 
1899 	/* If the adapter was in PROBE state prior to the reset,
1900 	 * exit here.
1901 	 */
1902 	if (reset_state == VNIC_PROBED)
1903 		return 0;
1904 
1905 	rc = ibmvnic_login(netdev);
1906 	if (rc) {
1907 		adapter->state = VNIC_PROBED;
1908 		return 0;
1909 	}
1910 
1911 	rc = init_resources(adapter);
1912 	if (rc)
1913 		return rc;
1914 
1915 	ibmvnic_disable_irqs(adapter);
1916 	adapter->state = VNIC_CLOSED;
1917 
1918 	if (reset_state == VNIC_CLOSED)
1919 		return 0;
1920 
1921 	rc = __ibmvnic_open(netdev);
1922 	if (rc) {
1923 		if (list_empty(&adapter->rwi_list))
1924 			adapter->state = VNIC_CLOSED;
1925 		else
1926 			adapter->state = reset_state;
1927 
1928 		return 0;
1929 	}
1930 
1931 	return 0;
1932 }
1933 
1934 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1935 {
1936 	struct ibmvnic_rwi *rwi;
1937 	unsigned long flags;
1938 
1939 	spin_lock_irqsave(&adapter->rwi_lock, flags);
1940 
1941 	if (!list_empty(&adapter->rwi_list)) {
1942 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1943 				       list);
1944 		list_del(&rwi->list);
1945 	} else {
1946 		rwi = NULL;
1947 	}
1948 
1949 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
1950 	return rwi;
1951 }
1952 
1953 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1954 {
1955 	struct ibmvnic_rwi *rwi;
1956 
1957 	rwi = get_next_rwi(adapter);
1958 	while (rwi) {
1959 		kfree(rwi);
1960 		rwi = get_next_rwi(adapter);
1961 	}
1962 }
1963 
1964 static void __ibmvnic_reset(struct work_struct *work)
1965 {
1966 	struct ibmvnic_rwi *rwi;
1967 	struct ibmvnic_adapter *adapter;
1968 	bool we_lock_rtnl = false;
1969 	u32 reset_state;
1970 	int rc = 0;
1971 
1972 	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1973 
1974 	/* netif_set_real_num_xx_queues needs to take rtnl lock here
1975 	 * unless wait_for_reset is set, in which case the rtnl lock
1976 	 * has already been taken before initializing the reset
1977 	 */
1978 	if (!adapter->wait_for_reset) {
1979 		rtnl_lock();
1980 		we_lock_rtnl = true;
1981 	}
1982 	reset_state = adapter->state;
1983 
1984 	rwi = get_next_rwi(adapter);
1985 	while (rwi) {
1986 		if (adapter->force_reset_recovery) {
1987 			adapter->force_reset_recovery = false;
1988 			rc = do_hard_reset(adapter, rwi, reset_state);
1989 		} else {
1990 			rc = do_reset(adapter, rwi, reset_state);
1991 		}
1992 		kfree(rwi);
1993 		if (rc && rc != IBMVNIC_INIT_FAILED &&
1994 		    !adapter->force_reset_recovery)
1995 			break;
1996 
1997 		rwi = get_next_rwi(adapter);
1998 	}
1999 
2000 	if (adapter->wait_for_reset) {
2001 		adapter->wait_for_reset = false;
2002 		adapter->reset_done_rc = rc;
2003 		complete(&adapter->reset_done);
2004 	}
2005 
2006 	if (rc) {
2007 		netdev_dbg(adapter->netdev, "Reset failed\n");
2008 		free_all_rwi(adapter);
2009 	}
2010 
2011 	adapter->resetting = false;
2012 	if (we_lock_rtnl)
2013 		rtnl_unlock();
2014 }
2015 
2016 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2017 			 enum ibmvnic_reset_reason reason)
2018 {
2019 	struct list_head *entry, *tmp_entry;
2020 	struct ibmvnic_rwi *rwi, *tmp;
2021 	struct net_device *netdev = adapter->netdev;
2022 	unsigned long flags;
2023 	int ret;
2024 
2025 	if (adapter->state == VNIC_REMOVING ||
2026 	    adapter->state == VNIC_REMOVED ||
2027 	    adapter->failover_pending) {
2028 		ret = EBUSY;
2029 		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2030 		goto err;
2031 	}
2032 
2033 	if (adapter->state == VNIC_PROBING) {
2034 		netdev_warn(netdev, "Adapter reset during probe\n");
2035 		ret = adapter->init_done_rc = EAGAIN;
2036 		goto err;
2037 	}
2038 
2039 	spin_lock_irqsave(&adapter->rwi_lock, flags);
2040 
2041 	list_for_each(entry, &adapter->rwi_list) {
2042 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
2043 		if (tmp->reset_reason == reason) {
2044 			netdev_dbg(netdev, "Skipping matching reset\n");
2045 			spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2046 			ret = EBUSY;
2047 			goto err;
2048 		}
2049 	}
2050 
2051 	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2052 	if (!rwi) {
2053 		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2054 		ibmvnic_close(netdev);
2055 		ret = ENOMEM;
2056 		goto err;
2057 	}
2058 	/* if we just received a transport event,
2059 	 * flush reset queue and process this reset
2060 	 */
2061 	if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2062 		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2063 			list_del(entry);
2064 	}
2065 	rwi->reset_reason = reason;
2066 	list_add_tail(&rwi->list, &adapter->rwi_list);
2067 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2068 	adapter->resetting = true;
2069 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2070 	schedule_work(&adapter->ibmvnic_reset);
2071 
2072 	return 0;
2073 err:
2074 	if (adapter->wait_for_reset)
2075 		adapter->wait_for_reset = false;
2076 	return -ret;
2077 }
2078 
2079 static void ibmvnic_tx_timeout(struct net_device *dev)
2080 {
2081 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2082 
2083 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2084 }
2085 
2086 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2087 				  struct ibmvnic_rx_buff *rx_buff)
2088 {
2089 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2090 
2091 	rx_buff->skb = NULL;
2092 
2093 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2094 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2095 
2096 	atomic_dec(&pool->available);
2097 }
2098 
2099 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2100 {
2101 	struct net_device *netdev = napi->dev;
2102 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2103 	int scrq_num = (int)(napi - adapter->napi);
2104 	int frames_processed = 0;
2105 
2106 restart_poll:
2107 	while (frames_processed < budget) {
2108 		struct sk_buff *skb;
2109 		struct ibmvnic_rx_buff *rx_buff;
2110 		union sub_crq *next;
2111 		u32 length;
2112 		u16 offset;
2113 		u8 flags = 0;
2114 
2115 		if (unlikely(adapter->resetting &&
2116 			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2117 			enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2118 			napi_complete_done(napi, frames_processed);
2119 			return frames_processed;
2120 		}
2121 
2122 		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2123 			break;
2124 		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2125 		rx_buff =
2126 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2127 							  rx_comp.correlator);
2128 		/* do error checking */
2129 		if (next->rx_comp.rc) {
2130 			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2131 				   be16_to_cpu(next->rx_comp.rc));
2132 			/* free the entry */
2133 			next->rx_comp.first = 0;
2134 			dev_kfree_skb_any(rx_buff->skb);
2135 			remove_buff_from_pool(adapter, rx_buff);
2136 			continue;
2137 		} else if (!rx_buff->skb) {
2138 			/* free the entry */
2139 			next->rx_comp.first = 0;
2140 			remove_buff_from_pool(adapter, rx_buff);
2141 			continue;
2142 		}
2143 
2144 		length = be32_to_cpu(next->rx_comp.len);
2145 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
2146 		flags = next->rx_comp.flags;
2147 		skb = rx_buff->skb;
2148 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
2149 					length);
2150 
2151 		/* VLAN Header has been stripped by the system firmware and
2152 		 * needs to be inserted by the driver
2153 		 */
2154 		if (adapter->rx_vlan_header_insertion &&
2155 		    (flags & IBMVNIC_VLAN_STRIPPED))
2156 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2157 					       ntohs(next->rx_comp.vlan_tci));
2158 
2159 		/* free the entry */
2160 		next->rx_comp.first = 0;
2161 		remove_buff_from_pool(adapter, rx_buff);
2162 
2163 		skb_put(skb, length);
2164 		skb->protocol = eth_type_trans(skb, netdev);
2165 		skb_record_rx_queue(skb, scrq_num);
2166 
2167 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2168 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2169 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2170 		}
2171 
2172 		length = skb->len;
2173 		napi_gro_receive(napi, skb); /* send it up */
2174 		netdev->stats.rx_packets++;
2175 		netdev->stats.rx_bytes += length;
2176 		adapter->rx_stats_buffers[scrq_num].packets++;
2177 		adapter->rx_stats_buffers[scrq_num].bytes += length;
2178 		frames_processed++;
2179 	}
2180 
2181 	if (adapter->state != VNIC_CLOSING)
2182 		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2183 
2184 	if (frames_processed < budget) {
2185 		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2186 		napi_complete_done(napi, frames_processed);
2187 		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2188 		    napi_reschedule(napi)) {
2189 			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2190 			goto restart_poll;
2191 		}
2192 	}
2193 	return frames_processed;
2194 }
2195 
2196 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2197 {
2198 	int rc, ret;
2199 
2200 	adapter->fallback.mtu = adapter->req_mtu;
2201 	adapter->fallback.rx_queues = adapter->req_rx_queues;
2202 	adapter->fallback.tx_queues = adapter->req_tx_queues;
2203 	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2204 	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2205 
2206 	init_completion(&adapter->reset_done);
2207 	adapter->wait_for_reset = true;
2208 	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2209 	if (rc)
2210 		return rc;
2211 	wait_for_completion(&adapter->reset_done);
2212 
2213 	ret = 0;
2214 	if (adapter->reset_done_rc) {
2215 		ret = -EIO;
2216 		adapter->desired.mtu = adapter->fallback.mtu;
2217 		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2218 		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2219 		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2220 		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2221 
2222 		init_completion(&adapter->reset_done);
2223 		adapter->wait_for_reset = true;
2224 		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2225 		if (rc)
2226 			return ret;
2227 		wait_for_completion(&adapter->reset_done);
2228 	}
2229 	adapter->wait_for_reset = false;
2230 
2231 	return ret;
2232 }
2233 
2234 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2235 {
2236 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2237 
2238 	adapter->desired.mtu = new_mtu + ETH_HLEN;
2239 
2240 	return wait_for_reset(adapter);
2241 }
2242 
2243 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2244 						struct net_device *dev,
2245 						netdev_features_t features)
2246 {
2247 	/* Some backing hardware adapters can not
2248 	 * handle packets with a MSS less than 224
2249 	 * or with only one segment.
2250 	 */
2251 	if (skb_is_gso(skb)) {
2252 		if (skb_shinfo(skb)->gso_size < 224 ||
2253 		    skb_shinfo(skb)->gso_segs == 1)
2254 			features &= ~NETIF_F_GSO_MASK;
2255 	}
2256 
2257 	return features;
2258 }
2259 
2260 static const struct net_device_ops ibmvnic_netdev_ops = {
2261 	.ndo_open		= ibmvnic_open,
2262 	.ndo_stop		= ibmvnic_close,
2263 	.ndo_start_xmit		= ibmvnic_xmit,
2264 	.ndo_set_rx_mode	= ibmvnic_set_multi,
2265 	.ndo_set_mac_address	= ibmvnic_set_mac,
2266 	.ndo_validate_addr	= eth_validate_addr,
2267 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2268 	.ndo_change_mtu		= ibmvnic_change_mtu,
2269 	.ndo_features_check     = ibmvnic_features_check,
2270 };
2271 
2272 /* ethtool functions */
2273 
2274 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2275 				      struct ethtool_link_ksettings *cmd)
2276 {
2277 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2278 	int rc;
2279 
2280 	rc = send_query_phys_parms(adapter);
2281 	if (rc) {
2282 		adapter->speed = SPEED_UNKNOWN;
2283 		adapter->duplex = DUPLEX_UNKNOWN;
2284 	}
2285 	cmd->base.speed = adapter->speed;
2286 	cmd->base.duplex = adapter->duplex;
2287 	cmd->base.port = PORT_FIBRE;
2288 	cmd->base.phy_address = 0;
2289 	cmd->base.autoneg = AUTONEG_ENABLE;
2290 
2291 	return 0;
2292 }
2293 
2294 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2295 				struct ethtool_drvinfo *info)
2296 {
2297 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2298 
2299 	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2300 	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2301 	strlcpy(info->fw_version, adapter->fw_version,
2302 		sizeof(info->fw_version));
2303 }
2304 
2305 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2306 {
2307 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2308 
2309 	return adapter->msg_enable;
2310 }
2311 
2312 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2313 {
2314 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2315 
2316 	adapter->msg_enable = data;
2317 }
2318 
2319 static u32 ibmvnic_get_link(struct net_device *netdev)
2320 {
2321 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2322 
2323 	/* Don't need to send a query because we request a logical link up at
2324 	 * init and then we wait for link state indications
2325 	 */
2326 	return adapter->logical_link_state;
2327 }
2328 
2329 static void ibmvnic_get_ringparam(struct net_device *netdev,
2330 				  struct ethtool_ringparam *ring)
2331 {
2332 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2333 
2334 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2335 		ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2336 		ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2337 	} else {
2338 		ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2339 		ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2340 	}
2341 	ring->rx_mini_max_pending = 0;
2342 	ring->rx_jumbo_max_pending = 0;
2343 	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2344 	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2345 	ring->rx_mini_pending = 0;
2346 	ring->rx_jumbo_pending = 0;
2347 }
2348 
2349 static int ibmvnic_set_ringparam(struct net_device *netdev,
2350 				 struct ethtool_ringparam *ring)
2351 {
2352 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2353 	int ret;
2354 
2355 	ret = 0;
2356 	adapter->desired.rx_entries = ring->rx_pending;
2357 	adapter->desired.tx_entries = ring->tx_pending;
2358 
2359 	ret = wait_for_reset(adapter);
2360 
2361 	if (!ret &&
2362 	    (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2363 	     adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2364 		netdev_info(netdev,
2365 			    "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2366 			    ring->rx_pending, ring->tx_pending,
2367 			    adapter->req_rx_add_entries_per_subcrq,
2368 			    adapter->req_tx_entries_per_subcrq);
2369 	return ret;
2370 }
2371 
2372 static void ibmvnic_get_channels(struct net_device *netdev,
2373 				 struct ethtool_channels *channels)
2374 {
2375 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2376 
2377 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2378 		channels->max_rx = adapter->max_rx_queues;
2379 		channels->max_tx = adapter->max_tx_queues;
2380 	} else {
2381 		channels->max_rx = IBMVNIC_MAX_QUEUES;
2382 		channels->max_tx = IBMVNIC_MAX_QUEUES;
2383 	}
2384 
2385 	channels->max_other = 0;
2386 	channels->max_combined = 0;
2387 	channels->rx_count = adapter->req_rx_queues;
2388 	channels->tx_count = adapter->req_tx_queues;
2389 	channels->other_count = 0;
2390 	channels->combined_count = 0;
2391 }
2392 
2393 static int ibmvnic_set_channels(struct net_device *netdev,
2394 				struct ethtool_channels *channels)
2395 {
2396 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2397 	int ret;
2398 
2399 	ret = 0;
2400 	adapter->desired.rx_queues = channels->rx_count;
2401 	adapter->desired.tx_queues = channels->tx_count;
2402 
2403 	ret = wait_for_reset(adapter);
2404 
2405 	if (!ret &&
2406 	    (adapter->req_rx_queues != channels->rx_count ||
2407 	     adapter->req_tx_queues != channels->tx_count))
2408 		netdev_info(netdev,
2409 			    "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2410 			    channels->rx_count, channels->tx_count,
2411 			    adapter->req_rx_queues, adapter->req_tx_queues);
2412 	return ret;
2413 
2414 }
2415 
2416 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2417 {
2418 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2419 	int i;
2420 
2421 	switch (stringset) {
2422 	case ETH_SS_STATS:
2423 		for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2424 				i++, data += ETH_GSTRING_LEN)
2425 			memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2426 
2427 		for (i = 0; i < adapter->req_tx_queues; i++) {
2428 			snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2429 			data += ETH_GSTRING_LEN;
2430 
2431 			snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2432 			data += ETH_GSTRING_LEN;
2433 
2434 			snprintf(data, ETH_GSTRING_LEN,
2435 				 "tx%d_dropped_packets", i);
2436 			data += ETH_GSTRING_LEN;
2437 		}
2438 
2439 		for (i = 0; i < adapter->req_rx_queues; i++) {
2440 			snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2441 			data += ETH_GSTRING_LEN;
2442 
2443 			snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2444 			data += ETH_GSTRING_LEN;
2445 
2446 			snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2447 			data += ETH_GSTRING_LEN;
2448 		}
2449 		break;
2450 
2451 	case ETH_SS_PRIV_FLAGS:
2452 		for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2453 			strcpy(data + i * ETH_GSTRING_LEN,
2454 			       ibmvnic_priv_flags[i]);
2455 		break;
2456 	default:
2457 		return;
2458 	}
2459 }
2460 
2461 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2462 {
2463 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2464 
2465 	switch (sset) {
2466 	case ETH_SS_STATS:
2467 		return ARRAY_SIZE(ibmvnic_stats) +
2468 		       adapter->req_tx_queues * NUM_TX_STATS +
2469 		       adapter->req_rx_queues * NUM_RX_STATS;
2470 	case ETH_SS_PRIV_FLAGS:
2471 		return ARRAY_SIZE(ibmvnic_priv_flags);
2472 	default:
2473 		return -EOPNOTSUPP;
2474 	}
2475 }
2476 
2477 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2478 				      struct ethtool_stats *stats, u64 *data)
2479 {
2480 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2481 	union ibmvnic_crq crq;
2482 	int i, j;
2483 	int rc;
2484 
2485 	memset(&crq, 0, sizeof(crq));
2486 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2487 	crq.request_statistics.cmd = REQUEST_STATISTICS;
2488 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2489 	crq.request_statistics.len =
2490 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
2491 
2492 	/* Wait for data to be written */
2493 	init_completion(&adapter->stats_done);
2494 	rc = ibmvnic_send_crq(adapter, &crq);
2495 	if (rc)
2496 		return;
2497 	wait_for_completion(&adapter->stats_done);
2498 
2499 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2500 		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2501 						ibmvnic_stats[i].offset));
2502 
2503 	for (j = 0; j < adapter->req_tx_queues; j++) {
2504 		data[i] = adapter->tx_stats_buffers[j].packets;
2505 		i++;
2506 		data[i] = adapter->tx_stats_buffers[j].bytes;
2507 		i++;
2508 		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2509 		i++;
2510 	}
2511 
2512 	for (j = 0; j < adapter->req_rx_queues; j++) {
2513 		data[i] = adapter->rx_stats_buffers[j].packets;
2514 		i++;
2515 		data[i] = adapter->rx_stats_buffers[j].bytes;
2516 		i++;
2517 		data[i] = adapter->rx_stats_buffers[j].interrupts;
2518 		i++;
2519 	}
2520 }
2521 
2522 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2523 {
2524 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2525 
2526 	return adapter->priv_flags;
2527 }
2528 
2529 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2530 {
2531 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2532 	bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2533 
2534 	if (which_maxes)
2535 		adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2536 	else
2537 		adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2538 
2539 	return 0;
2540 }
2541 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2542 	.get_drvinfo		= ibmvnic_get_drvinfo,
2543 	.get_msglevel		= ibmvnic_get_msglevel,
2544 	.set_msglevel		= ibmvnic_set_msglevel,
2545 	.get_link		= ibmvnic_get_link,
2546 	.get_ringparam		= ibmvnic_get_ringparam,
2547 	.set_ringparam		= ibmvnic_set_ringparam,
2548 	.get_channels		= ibmvnic_get_channels,
2549 	.set_channels		= ibmvnic_set_channels,
2550 	.get_strings            = ibmvnic_get_strings,
2551 	.get_sset_count         = ibmvnic_get_sset_count,
2552 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
2553 	.get_link_ksettings	= ibmvnic_get_link_ksettings,
2554 	.get_priv_flags		= ibmvnic_get_priv_flags,
2555 	.set_priv_flags		= ibmvnic_set_priv_flags,
2556 };
2557 
2558 /* Routines for managing CRQs/sCRQs  */
2559 
2560 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2561 				   struct ibmvnic_sub_crq_queue *scrq)
2562 {
2563 	int rc;
2564 
2565 	if (scrq->irq) {
2566 		free_irq(scrq->irq, scrq);
2567 		irq_dispose_mapping(scrq->irq);
2568 		scrq->irq = 0;
2569 	}
2570 
2571 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2572 	atomic_set(&scrq->used, 0);
2573 	scrq->cur = 0;
2574 
2575 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2576 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2577 	return rc;
2578 }
2579 
2580 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2581 {
2582 	int i, rc;
2583 
2584 	for (i = 0; i < adapter->req_tx_queues; i++) {
2585 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2586 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2587 		if (rc)
2588 			return rc;
2589 	}
2590 
2591 	for (i = 0; i < adapter->req_rx_queues; i++) {
2592 		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2593 		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2594 		if (rc)
2595 			return rc;
2596 	}
2597 
2598 	return rc;
2599 }
2600 
2601 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2602 				  struct ibmvnic_sub_crq_queue *scrq,
2603 				  bool do_h_free)
2604 {
2605 	struct device *dev = &adapter->vdev->dev;
2606 	long rc;
2607 
2608 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2609 
2610 	if (do_h_free) {
2611 		/* Close the sub-crqs */
2612 		do {
2613 			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2614 						adapter->vdev->unit_address,
2615 						scrq->crq_num);
2616 		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2617 
2618 		if (rc) {
2619 			netdev_err(adapter->netdev,
2620 				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
2621 				   scrq->crq_num, rc);
2622 		}
2623 	}
2624 
2625 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2626 			 DMA_BIDIRECTIONAL);
2627 	free_pages((unsigned long)scrq->msgs, 2);
2628 	kfree(scrq);
2629 }
2630 
2631 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2632 							*adapter)
2633 {
2634 	struct device *dev = &adapter->vdev->dev;
2635 	struct ibmvnic_sub_crq_queue *scrq;
2636 	int rc;
2637 
2638 	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2639 	if (!scrq)
2640 		return NULL;
2641 
2642 	scrq->msgs =
2643 		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2644 	if (!scrq->msgs) {
2645 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2646 		goto zero_page_failed;
2647 	}
2648 
2649 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2650 					 DMA_BIDIRECTIONAL);
2651 	if (dma_mapping_error(dev, scrq->msg_token)) {
2652 		dev_warn(dev, "Couldn't map crq queue messages page\n");
2653 		goto map_failed;
2654 	}
2655 
2656 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2657 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2658 
2659 	if (rc == H_RESOURCE)
2660 		rc = ibmvnic_reset_crq(adapter);
2661 
2662 	if (rc == H_CLOSED) {
2663 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
2664 	} else if (rc) {
2665 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
2666 		goto reg_failed;
2667 	}
2668 
2669 	scrq->adapter = adapter;
2670 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2671 	spin_lock_init(&scrq->lock);
2672 
2673 	netdev_dbg(adapter->netdev,
2674 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2675 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
2676 
2677 	return scrq;
2678 
2679 reg_failed:
2680 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2681 			 DMA_BIDIRECTIONAL);
2682 map_failed:
2683 	free_pages((unsigned long)scrq->msgs, 2);
2684 zero_page_failed:
2685 	kfree(scrq);
2686 
2687 	return NULL;
2688 }
2689 
2690 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2691 {
2692 	int i;
2693 
2694 	if (adapter->tx_scrq) {
2695 		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2696 			if (!adapter->tx_scrq[i])
2697 				continue;
2698 
2699 			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2700 				   i);
2701 			if (adapter->tx_scrq[i]->irq) {
2702 				free_irq(adapter->tx_scrq[i]->irq,
2703 					 adapter->tx_scrq[i]);
2704 				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2705 				adapter->tx_scrq[i]->irq = 0;
2706 			}
2707 
2708 			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2709 					      do_h_free);
2710 		}
2711 
2712 		kfree(adapter->tx_scrq);
2713 		adapter->tx_scrq = NULL;
2714 		adapter->num_active_tx_scrqs = 0;
2715 	}
2716 
2717 	if (adapter->rx_scrq) {
2718 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2719 			if (!adapter->rx_scrq[i])
2720 				continue;
2721 
2722 			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2723 				   i);
2724 			if (adapter->rx_scrq[i]->irq) {
2725 				free_irq(adapter->rx_scrq[i]->irq,
2726 					 adapter->rx_scrq[i]);
2727 				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2728 				adapter->rx_scrq[i]->irq = 0;
2729 			}
2730 
2731 			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2732 					      do_h_free);
2733 		}
2734 
2735 		kfree(adapter->rx_scrq);
2736 		adapter->rx_scrq = NULL;
2737 		adapter->num_active_rx_scrqs = 0;
2738 	}
2739 }
2740 
2741 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2742 			    struct ibmvnic_sub_crq_queue *scrq)
2743 {
2744 	struct device *dev = &adapter->vdev->dev;
2745 	unsigned long rc;
2746 
2747 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2748 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2749 	if (rc)
2750 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2751 			scrq->hw_irq, rc);
2752 	return rc;
2753 }
2754 
2755 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2756 			   struct ibmvnic_sub_crq_queue *scrq)
2757 {
2758 	struct device *dev = &adapter->vdev->dev;
2759 	unsigned long rc;
2760 
2761 	if (scrq->hw_irq > 0x100000000ULL) {
2762 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2763 		return 1;
2764 	}
2765 
2766 	if (adapter->resetting &&
2767 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
2768 		u64 val = (0xff000000) | scrq->hw_irq;
2769 
2770 		rc = plpar_hcall_norets(H_EOI, val);
2771 		if (rc)
2772 			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2773 				val, rc);
2774 	}
2775 
2776 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2777 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2778 	if (rc)
2779 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2780 			scrq->hw_irq, rc);
2781 	return rc;
2782 }
2783 
2784 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2785 			       struct ibmvnic_sub_crq_queue *scrq)
2786 {
2787 	struct device *dev = &adapter->vdev->dev;
2788 	struct ibmvnic_tx_pool *tx_pool;
2789 	struct ibmvnic_tx_buff *txbuff;
2790 	union sub_crq *next;
2791 	int index;
2792 	int i, j;
2793 
2794 restart_loop:
2795 	while (pending_scrq(adapter, scrq)) {
2796 		unsigned int pool = scrq->pool_index;
2797 		int num_entries = 0;
2798 
2799 		next = ibmvnic_next_scrq(adapter, scrq);
2800 		for (i = 0; i < next->tx_comp.num_comps; i++) {
2801 			if (next->tx_comp.rcs[i]) {
2802 				dev_err(dev, "tx error %x\n",
2803 					next->tx_comp.rcs[i]);
2804 				continue;
2805 			}
2806 			index = be32_to_cpu(next->tx_comp.correlators[i]);
2807 			if (index & IBMVNIC_TSO_POOL_MASK) {
2808 				tx_pool = &adapter->tso_pool[pool];
2809 				index &= ~IBMVNIC_TSO_POOL_MASK;
2810 			} else {
2811 				tx_pool = &adapter->tx_pool[pool];
2812 			}
2813 
2814 			txbuff = &tx_pool->tx_buff[index];
2815 
2816 			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2817 				if (!txbuff->data_dma[j])
2818 					continue;
2819 
2820 				txbuff->data_dma[j] = 0;
2821 			}
2822 
2823 			if (txbuff->last_frag) {
2824 				dev_kfree_skb_any(txbuff->skb);
2825 				txbuff->skb = NULL;
2826 			}
2827 
2828 			num_entries += txbuff->num_entries;
2829 
2830 			tx_pool->free_map[tx_pool->producer_index] = index;
2831 			tx_pool->producer_index =
2832 				(tx_pool->producer_index + 1) %
2833 					tx_pool->num_buffers;
2834 		}
2835 		/* remove tx_comp scrq*/
2836 		next->tx_comp.first = 0;
2837 
2838 		if (atomic_sub_return(num_entries, &scrq->used) <=
2839 		    (adapter->req_tx_entries_per_subcrq / 2) &&
2840 		    __netif_subqueue_stopped(adapter->netdev,
2841 					     scrq->pool_index)) {
2842 			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2843 			netdev_dbg(adapter->netdev, "Started queue %d\n",
2844 				   scrq->pool_index);
2845 		}
2846 	}
2847 
2848 	enable_scrq_irq(adapter, scrq);
2849 
2850 	if (pending_scrq(adapter, scrq)) {
2851 		disable_scrq_irq(adapter, scrq);
2852 		goto restart_loop;
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2859 {
2860 	struct ibmvnic_sub_crq_queue *scrq = instance;
2861 	struct ibmvnic_adapter *adapter = scrq->adapter;
2862 
2863 	disable_scrq_irq(adapter, scrq);
2864 	ibmvnic_complete_tx(adapter, scrq);
2865 
2866 	return IRQ_HANDLED;
2867 }
2868 
2869 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2870 {
2871 	struct ibmvnic_sub_crq_queue *scrq = instance;
2872 	struct ibmvnic_adapter *adapter = scrq->adapter;
2873 
2874 	/* When booting a kdump kernel we can hit pending interrupts
2875 	 * prior to completing driver initialization.
2876 	 */
2877 	if (unlikely(adapter->state != VNIC_OPEN))
2878 		return IRQ_NONE;
2879 
2880 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2881 
2882 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2883 		disable_scrq_irq(adapter, scrq);
2884 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
2885 	}
2886 
2887 	return IRQ_HANDLED;
2888 }
2889 
2890 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2891 {
2892 	struct device *dev = &adapter->vdev->dev;
2893 	struct ibmvnic_sub_crq_queue *scrq;
2894 	int i = 0, j = 0;
2895 	int rc = 0;
2896 
2897 	for (i = 0; i < adapter->req_tx_queues; i++) {
2898 		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2899 			   i);
2900 		scrq = adapter->tx_scrq[i];
2901 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2902 
2903 		if (!scrq->irq) {
2904 			rc = -EINVAL;
2905 			dev_err(dev, "Error mapping irq\n");
2906 			goto req_tx_irq_failed;
2907 		}
2908 
2909 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
2910 			 adapter->vdev->unit_address, i);
2911 		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2912 				 0, scrq->name, scrq);
2913 
2914 		if (rc) {
2915 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2916 				scrq->irq, rc);
2917 			irq_dispose_mapping(scrq->irq);
2918 			goto req_tx_irq_failed;
2919 		}
2920 	}
2921 
2922 	for (i = 0; i < adapter->req_rx_queues; i++) {
2923 		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2924 			   i);
2925 		scrq = adapter->rx_scrq[i];
2926 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2927 		if (!scrq->irq) {
2928 			rc = -EINVAL;
2929 			dev_err(dev, "Error mapping irq\n");
2930 			goto req_rx_irq_failed;
2931 		}
2932 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
2933 			 adapter->vdev->unit_address, i);
2934 		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2935 				 0, scrq->name, scrq);
2936 		if (rc) {
2937 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2938 				scrq->irq, rc);
2939 			irq_dispose_mapping(scrq->irq);
2940 			goto req_rx_irq_failed;
2941 		}
2942 	}
2943 	return rc;
2944 
2945 req_rx_irq_failed:
2946 	for (j = 0; j < i; j++) {
2947 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2948 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2949 	}
2950 	i = adapter->req_tx_queues;
2951 req_tx_irq_failed:
2952 	for (j = 0; j < i; j++) {
2953 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2954 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2955 	}
2956 	release_sub_crqs(adapter, 1);
2957 	return rc;
2958 }
2959 
2960 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2961 {
2962 	struct device *dev = &adapter->vdev->dev;
2963 	struct ibmvnic_sub_crq_queue **allqueues;
2964 	int registered_queues = 0;
2965 	int total_queues;
2966 	int more = 0;
2967 	int i;
2968 
2969 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2970 
2971 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2972 	if (!allqueues)
2973 		return -1;
2974 
2975 	for (i = 0; i < total_queues; i++) {
2976 		allqueues[i] = init_sub_crq_queue(adapter);
2977 		if (!allqueues[i]) {
2978 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2979 			break;
2980 		}
2981 		registered_queues++;
2982 	}
2983 
2984 	/* Make sure we were able to register the minimum number of queues */
2985 	if (registered_queues <
2986 	    adapter->min_tx_queues + adapter->min_rx_queues) {
2987 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
2988 		goto tx_failed;
2989 	}
2990 
2991 	/* Distribute the failed allocated queues*/
2992 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
2993 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2994 		switch (i % 3) {
2995 		case 0:
2996 			if (adapter->req_rx_queues > adapter->min_rx_queues)
2997 				adapter->req_rx_queues--;
2998 			else
2999 				more++;
3000 			break;
3001 		case 1:
3002 			if (adapter->req_tx_queues > adapter->min_tx_queues)
3003 				adapter->req_tx_queues--;
3004 			else
3005 				more++;
3006 			break;
3007 		}
3008 	}
3009 
3010 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3011 				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
3012 	if (!adapter->tx_scrq)
3013 		goto tx_failed;
3014 
3015 	for (i = 0; i < adapter->req_tx_queues; i++) {
3016 		adapter->tx_scrq[i] = allqueues[i];
3017 		adapter->tx_scrq[i]->pool_index = i;
3018 		adapter->num_active_tx_scrqs++;
3019 	}
3020 
3021 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3022 				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
3023 	if (!adapter->rx_scrq)
3024 		goto rx_failed;
3025 
3026 	for (i = 0; i < adapter->req_rx_queues; i++) {
3027 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3028 		adapter->rx_scrq[i]->scrq_num = i;
3029 		adapter->num_active_rx_scrqs++;
3030 	}
3031 
3032 	kfree(allqueues);
3033 	return 0;
3034 
3035 rx_failed:
3036 	kfree(adapter->tx_scrq);
3037 	adapter->tx_scrq = NULL;
3038 tx_failed:
3039 	for (i = 0; i < registered_queues; i++)
3040 		release_sub_crq_queue(adapter, allqueues[i], 1);
3041 	kfree(allqueues);
3042 	return -1;
3043 }
3044 
3045 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3046 {
3047 	struct device *dev = &adapter->vdev->dev;
3048 	union ibmvnic_crq crq;
3049 	int max_entries;
3050 
3051 	if (!retry) {
3052 		/* Sub-CRQ entries are 32 byte long */
3053 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3054 
3055 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
3056 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
3057 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3058 			return;
3059 		}
3060 
3061 		if (adapter->desired.mtu)
3062 			adapter->req_mtu = adapter->desired.mtu;
3063 		else
3064 			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3065 
3066 		if (!adapter->desired.tx_entries)
3067 			adapter->desired.tx_entries =
3068 					adapter->max_tx_entries_per_subcrq;
3069 		if (!adapter->desired.rx_entries)
3070 			adapter->desired.rx_entries =
3071 					adapter->max_rx_add_entries_per_subcrq;
3072 
3073 		max_entries = IBMVNIC_MAX_LTB_SIZE /
3074 			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3075 
3076 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3077 			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3078 			adapter->desired.tx_entries = max_entries;
3079 		}
3080 
3081 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3082 			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3083 			adapter->desired.rx_entries = max_entries;
3084 		}
3085 
3086 		if (adapter->desired.tx_entries)
3087 			adapter->req_tx_entries_per_subcrq =
3088 					adapter->desired.tx_entries;
3089 		else
3090 			adapter->req_tx_entries_per_subcrq =
3091 					adapter->max_tx_entries_per_subcrq;
3092 
3093 		if (adapter->desired.rx_entries)
3094 			adapter->req_rx_add_entries_per_subcrq =
3095 					adapter->desired.rx_entries;
3096 		else
3097 			adapter->req_rx_add_entries_per_subcrq =
3098 					adapter->max_rx_add_entries_per_subcrq;
3099 
3100 		if (adapter->desired.tx_queues)
3101 			adapter->req_tx_queues =
3102 					adapter->desired.tx_queues;
3103 		else
3104 			adapter->req_tx_queues =
3105 					adapter->opt_tx_comp_sub_queues;
3106 
3107 		if (adapter->desired.rx_queues)
3108 			adapter->req_rx_queues =
3109 					adapter->desired.rx_queues;
3110 		else
3111 			adapter->req_rx_queues =
3112 					adapter->opt_rx_comp_queues;
3113 
3114 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3115 	}
3116 
3117 	memset(&crq, 0, sizeof(crq));
3118 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
3119 	crq.request_capability.cmd = REQUEST_CAPABILITY;
3120 
3121 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3122 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3123 	atomic_inc(&adapter->running_cap_crqs);
3124 	ibmvnic_send_crq(adapter, &crq);
3125 
3126 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3127 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3128 	atomic_inc(&adapter->running_cap_crqs);
3129 	ibmvnic_send_crq(adapter, &crq);
3130 
3131 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3132 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3133 	atomic_inc(&adapter->running_cap_crqs);
3134 	ibmvnic_send_crq(adapter, &crq);
3135 
3136 	crq.request_capability.capability =
3137 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3138 	crq.request_capability.number =
3139 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3140 	atomic_inc(&adapter->running_cap_crqs);
3141 	ibmvnic_send_crq(adapter, &crq);
3142 
3143 	crq.request_capability.capability =
3144 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3145 	crq.request_capability.number =
3146 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3147 	atomic_inc(&adapter->running_cap_crqs);
3148 	ibmvnic_send_crq(adapter, &crq);
3149 
3150 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3151 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3152 	atomic_inc(&adapter->running_cap_crqs);
3153 	ibmvnic_send_crq(adapter, &crq);
3154 
3155 	if (adapter->netdev->flags & IFF_PROMISC) {
3156 		if (adapter->promisc_supported) {
3157 			crq.request_capability.capability =
3158 			    cpu_to_be16(PROMISC_REQUESTED);
3159 			crq.request_capability.number = cpu_to_be64(1);
3160 			atomic_inc(&adapter->running_cap_crqs);
3161 			ibmvnic_send_crq(adapter, &crq);
3162 		}
3163 	} else {
3164 		crq.request_capability.capability =
3165 		    cpu_to_be16(PROMISC_REQUESTED);
3166 		crq.request_capability.number = cpu_to_be64(0);
3167 		atomic_inc(&adapter->running_cap_crqs);
3168 		ibmvnic_send_crq(adapter, &crq);
3169 	}
3170 }
3171 
3172 static int pending_scrq(struct ibmvnic_adapter *adapter,
3173 			struct ibmvnic_sub_crq_queue *scrq)
3174 {
3175 	union sub_crq *entry = &scrq->msgs[scrq->cur];
3176 
3177 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3178 		return 1;
3179 	else
3180 		return 0;
3181 }
3182 
3183 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3184 					struct ibmvnic_sub_crq_queue *scrq)
3185 {
3186 	union sub_crq *entry;
3187 	unsigned long flags;
3188 
3189 	spin_lock_irqsave(&scrq->lock, flags);
3190 	entry = &scrq->msgs[scrq->cur];
3191 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3192 		if (++scrq->cur == scrq->size)
3193 			scrq->cur = 0;
3194 	} else {
3195 		entry = NULL;
3196 	}
3197 	spin_unlock_irqrestore(&scrq->lock, flags);
3198 
3199 	return entry;
3200 }
3201 
3202 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3203 {
3204 	struct ibmvnic_crq_queue *queue = &adapter->crq;
3205 	union ibmvnic_crq *crq;
3206 
3207 	crq = &queue->msgs[queue->cur];
3208 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3209 		if (++queue->cur == queue->size)
3210 			queue->cur = 0;
3211 	} else {
3212 		crq = NULL;
3213 	}
3214 
3215 	return crq;
3216 }
3217 
3218 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3219 {
3220 	switch (rc) {
3221 	case H_PARAMETER:
3222 		dev_warn_ratelimited(dev,
3223 				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3224 				     func, rc);
3225 		break;
3226 	case H_CLOSED:
3227 		dev_warn_ratelimited(dev,
3228 				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3229 				     func, rc);
3230 		break;
3231 	default:
3232 		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3233 		break;
3234 	}
3235 }
3236 
3237 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3238 		       union sub_crq *sub_crq)
3239 {
3240 	unsigned int ua = adapter->vdev->unit_address;
3241 	struct device *dev = &adapter->vdev->dev;
3242 	u64 *u64_crq = (u64 *)sub_crq;
3243 	int rc;
3244 
3245 	netdev_dbg(adapter->netdev,
3246 		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3247 		   (unsigned long int)cpu_to_be64(remote_handle),
3248 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3249 		   (unsigned long int)cpu_to_be64(u64_crq[1]),
3250 		   (unsigned long int)cpu_to_be64(u64_crq[2]),
3251 		   (unsigned long int)cpu_to_be64(u64_crq[3]));
3252 
3253 	/* Make sure the hypervisor sees the complete request */
3254 	mb();
3255 
3256 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3257 				cpu_to_be64(remote_handle),
3258 				cpu_to_be64(u64_crq[0]),
3259 				cpu_to_be64(u64_crq[1]),
3260 				cpu_to_be64(u64_crq[2]),
3261 				cpu_to_be64(u64_crq[3]));
3262 
3263 	if (rc)
3264 		print_subcrq_error(dev, rc, __func__);
3265 
3266 	return rc;
3267 }
3268 
3269 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3270 				u64 remote_handle, u64 ioba, u64 num_entries)
3271 {
3272 	unsigned int ua = adapter->vdev->unit_address;
3273 	struct device *dev = &adapter->vdev->dev;
3274 	int rc;
3275 
3276 	/* Make sure the hypervisor sees the complete request */
3277 	mb();
3278 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3279 				cpu_to_be64(remote_handle),
3280 				ioba, num_entries);
3281 
3282 	if (rc)
3283 		print_subcrq_error(dev, rc, __func__);
3284 
3285 	return rc;
3286 }
3287 
3288 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3289 			    union ibmvnic_crq *crq)
3290 {
3291 	unsigned int ua = adapter->vdev->unit_address;
3292 	struct device *dev = &adapter->vdev->dev;
3293 	u64 *u64_crq = (u64 *)crq;
3294 	int rc;
3295 
3296 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3297 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3298 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
3299 
3300 	if (!adapter->crq.active &&
3301 	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3302 		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3303 		return -EINVAL;
3304 	}
3305 
3306 	/* Make sure the hypervisor sees the complete request */
3307 	mb();
3308 
3309 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3310 				cpu_to_be64(u64_crq[0]),
3311 				cpu_to_be64(u64_crq[1]));
3312 
3313 	if (rc) {
3314 		if (rc == H_CLOSED) {
3315 			dev_warn(dev, "CRQ Queue closed\n");
3316 			if (adapter->resetting)
3317 				ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3318 		}
3319 
3320 		dev_warn(dev, "Send error (rc=%d)\n", rc);
3321 	}
3322 
3323 	return rc;
3324 }
3325 
3326 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3327 {
3328 	union ibmvnic_crq crq;
3329 
3330 	memset(&crq, 0, sizeof(crq));
3331 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3332 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3333 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3334 
3335 	return ibmvnic_send_crq(adapter, &crq);
3336 }
3337 
3338 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3339 {
3340 	union ibmvnic_crq crq;
3341 
3342 	memset(&crq, 0, sizeof(crq));
3343 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3344 	crq.version_exchange.cmd = VERSION_EXCHANGE;
3345 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3346 
3347 	return ibmvnic_send_crq(adapter, &crq);
3348 }
3349 
3350 struct vnic_login_client_data {
3351 	u8	type;
3352 	__be16	len;
3353 	char	name[];
3354 } __packed;
3355 
3356 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3357 {
3358 	int len;
3359 
3360 	/* Calculate the amount of buffer space needed for the
3361 	 * vnic client data in the login buffer. There are four entries,
3362 	 * OS name, LPAR name, device name, and a null last entry.
3363 	 */
3364 	len = 4 * sizeof(struct vnic_login_client_data);
3365 	len += 6; /* "Linux" plus NULL */
3366 	len += strlen(utsname()->nodename) + 1;
3367 	len += strlen(adapter->netdev->name) + 1;
3368 
3369 	return len;
3370 }
3371 
3372 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3373 				 struct vnic_login_client_data *vlcd)
3374 {
3375 	const char *os_name = "Linux";
3376 	int len;
3377 
3378 	/* Type 1 - LPAR OS */
3379 	vlcd->type = 1;
3380 	len = strlen(os_name) + 1;
3381 	vlcd->len = cpu_to_be16(len);
3382 	strncpy(vlcd->name, os_name, len);
3383 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3384 
3385 	/* Type 2 - LPAR name */
3386 	vlcd->type = 2;
3387 	len = strlen(utsname()->nodename) + 1;
3388 	vlcd->len = cpu_to_be16(len);
3389 	strncpy(vlcd->name, utsname()->nodename, len);
3390 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3391 
3392 	/* Type 3 - device name */
3393 	vlcd->type = 3;
3394 	len = strlen(adapter->netdev->name) + 1;
3395 	vlcd->len = cpu_to_be16(len);
3396 	strncpy(vlcd->name, adapter->netdev->name, len);
3397 }
3398 
3399 static int send_login(struct ibmvnic_adapter *adapter)
3400 {
3401 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3402 	struct ibmvnic_login_buffer *login_buffer;
3403 	struct device *dev = &adapter->vdev->dev;
3404 	dma_addr_t rsp_buffer_token;
3405 	dma_addr_t buffer_token;
3406 	size_t rsp_buffer_size;
3407 	union ibmvnic_crq crq;
3408 	size_t buffer_size;
3409 	__be64 *tx_list_p;
3410 	__be64 *rx_list_p;
3411 	int client_data_len;
3412 	struct vnic_login_client_data *vlcd;
3413 	int i;
3414 
3415 	if (!adapter->tx_scrq || !adapter->rx_scrq) {
3416 		netdev_err(adapter->netdev,
3417 			   "RX or TX queues are not allocated, device login failed\n");
3418 		return -1;
3419 	}
3420 
3421 	release_login_rsp_buffer(adapter);
3422 	client_data_len = vnic_client_data_len(adapter);
3423 
3424 	buffer_size =
3425 	    sizeof(struct ibmvnic_login_buffer) +
3426 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3427 	    client_data_len;
3428 
3429 	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3430 	if (!login_buffer)
3431 		goto buf_alloc_failed;
3432 
3433 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3434 				      DMA_TO_DEVICE);
3435 	if (dma_mapping_error(dev, buffer_token)) {
3436 		dev_err(dev, "Couldn't map login buffer\n");
3437 		goto buf_map_failed;
3438 	}
3439 
3440 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3441 			  sizeof(u64) * adapter->req_tx_queues +
3442 			  sizeof(u64) * adapter->req_rx_queues +
3443 			  sizeof(u64) * adapter->req_rx_queues +
3444 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3445 
3446 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3447 	if (!login_rsp_buffer)
3448 		goto buf_rsp_alloc_failed;
3449 
3450 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3451 					  rsp_buffer_size, DMA_FROM_DEVICE);
3452 	if (dma_mapping_error(dev, rsp_buffer_token)) {
3453 		dev_err(dev, "Couldn't map login rsp buffer\n");
3454 		goto buf_rsp_map_failed;
3455 	}
3456 
3457 	adapter->login_buf = login_buffer;
3458 	adapter->login_buf_token = buffer_token;
3459 	adapter->login_buf_sz = buffer_size;
3460 	adapter->login_rsp_buf = login_rsp_buffer;
3461 	adapter->login_rsp_buf_token = rsp_buffer_token;
3462 	adapter->login_rsp_buf_sz = rsp_buffer_size;
3463 
3464 	login_buffer->len = cpu_to_be32(buffer_size);
3465 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3466 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3467 	login_buffer->off_txcomp_subcrqs =
3468 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3469 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3470 	login_buffer->off_rxcomp_subcrqs =
3471 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3472 			sizeof(u64) * adapter->req_tx_queues);
3473 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3474 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3475 
3476 	tx_list_p = (__be64 *)((char *)login_buffer +
3477 				      sizeof(struct ibmvnic_login_buffer));
3478 	rx_list_p = (__be64 *)((char *)login_buffer +
3479 				      sizeof(struct ibmvnic_login_buffer) +
3480 				      sizeof(u64) * adapter->req_tx_queues);
3481 
3482 	for (i = 0; i < adapter->req_tx_queues; i++) {
3483 		if (adapter->tx_scrq[i]) {
3484 			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3485 						   crq_num);
3486 		}
3487 	}
3488 
3489 	for (i = 0; i < adapter->req_rx_queues; i++) {
3490 		if (adapter->rx_scrq[i]) {
3491 			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3492 						   crq_num);
3493 		}
3494 	}
3495 
3496 	/* Insert vNIC login client data */
3497 	vlcd = (struct vnic_login_client_data *)
3498 		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3499 	login_buffer->client_data_offset =
3500 			cpu_to_be32((char *)vlcd - (char *)login_buffer);
3501 	login_buffer->client_data_len = cpu_to_be32(client_data_len);
3502 
3503 	vnic_add_client_data(adapter, vlcd);
3504 
3505 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
3506 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3507 		netdev_dbg(adapter->netdev, "%016lx\n",
3508 			   ((unsigned long int *)(adapter->login_buf))[i]);
3509 	}
3510 
3511 	memset(&crq, 0, sizeof(crq));
3512 	crq.login.first = IBMVNIC_CRQ_CMD;
3513 	crq.login.cmd = LOGIN;
3514 	crq.login.ioba = cpu_to_be32(buffer_token);
3515 	crq.login.len = cpu_to_be32(buffer_size);
3516 	ibmvnic_send_crq(adapter, &crq);
3517 
3518 	return 0;
3519 
3520 buf_rsp_map_failed:
3521 	kfree(login_rsp_buffer);
3522 buf_rsp_alloc_failed:
3523 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3524 buf_map_failed:
3525 	kfree(login_buffer);
3526 buf_alloc_failed:
3527 	return -1;
3528 }
3529 
3530 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3531 			    u32 len, u8 map_id)
3532 {
3533 	union ibmvnic_crq crq;
3534 
3535 	memset(&crq, 0, sizeof(crq));
3536 	crq.request_map.first = IBMVNIC_CRQ_CMD;
3537 	crq.request_map.cmd = REQUEST_MAP;
3538 	crq.request_map.map_id = map_id;
3539 	crq.request_map.ioba = cpu_to_be32(addr);
3540 	crq.request_map.len = cpu_to_be32(len);
3541 	return ibmvnic_send_crq(adapter, &crq);
3542 }
3543 
3544 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3545 {
3546 	union ibmvnic_crq crq;
3547 
3548 	memset(&crq, 0, sizeof(crq));
3549 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3550 	crq.request_unmap.cmd = REQUEST_UNMAP;
3551 	crq.request_unmap.map_id = map_id;
3552 	return ibmvnic_send_crq(adapter, &crq);
3553 }
3554 
3555 static void send_map_query(struct ibmvnic_adapter *adapter)
3556 {
3557 	union ibmvnic_crq crq;
3558 
3559 	memset(&crq, 0, sizeof(crq));
3560 	crq.query_map.first = IBMVNIC_CRQ_CMD;
3561 	crq.query_map.cmd = QUERY_MAP;
3562 	ibmvnic_send_crq(adapter, &crq);
3563 }
3564 
3565 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3566 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3567 {
3568 	union ibmvnic_crq crq;
3569 
3570 	atomic_set(&adapter->running_cap_crqs, 0);
3571 	memset(&crq, 0, sizeof(crq));
3572 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
3573 	crq.query_capability.cmd = QUERY_CAPABILITY;
3574 
3575 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3576 	atomic_inc(&adapter->running_cap_crqs);
3577 	ibmvnic_send_crq(adapter, &crq);
3578 
3579 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3580 	atomic_inc(&adapter->running_cap_crqs);
3581 	ibmvnic_send_crq(adapter, &crq);
3582 
3583 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3584 	atomic_inc(&adapter->running_cap_crqs);
3585 	ibmvnic_send_crq(adapter, &crq);
3586 
3587 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3588 	atomic_inc(&adapter->running_cap_crqs);
3589 	ibmvnic_send_crq(adapter, &crq);
3590 
3591 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3592 	atomic_inc(&adapter->running_cap_crqs);
3593 	ibmvnic_send_crq(adapter, &crq);
3594 
3595 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3596 	atomic_inc(&adapter->running_cap_crqs);
3597 	ibmvnic_send_crq(adapter, &crq);
3598 
3599 	crq.query_capability.capability =
3600 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3601 	atomic_inc(&adapter->running_cap_crqs);
3602 	ibmvnic_send_crq(adapter, &crq);
3603 
3604 	crq.query_capability.capability =
3605 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3606 	atomic_inc(&adapter->running_cap_crqs);
3607 	ibmvnic_send_crq(adapter, &crq);
3608 
3609 	crq.query_capability.capability =
3610 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3611 	atomic_inc(&adapter->running_cap_crqs);
3612 	ibmvnic_send_crq(adapter, &crq);
3613 
3614 	crq.query_capability.capability =
3615 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3616 	atomic_inc(&adapter->running_cap_crqs);
3617 	ibmvnic_send_crq(adapter, &crq);
3618 
3619 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3620 	atomic_inc(&adapter->running_cap_crqs);
3621 	ibmvnic_send_crq(adapter, &crq);
3622 
3623 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3624 	atomic_inc(&adapter->running_cap_crqs);
3625 	ibmvnic_send_crq(adapter, &crq);
3626 
3627 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3628 	atomic_inc(&adapter->running_cap_crqs);
3629 	ibmvnic_send_crq(adapter, &crq);
3630 
3631 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3632 	atomic_inc(&adapter->running_cap_crqs);
3633 	ibmvnic_send_crq(adapter, &crq);
3634 
3635 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3636 	atomic_inc(&adapter->running_cap_crqs);
3637 	ibmvnic_send_crq(adapter, &crq);
3638 
3639 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3640 	atomic_inc(&adapter->running_cap_crqs);
3641 	ibmvnic_send_crq(adapter, &crq);
3642 
3643 	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3644 	atomic_inc(&adapter->running_cap_crqs);
3645 	ibmvnic_send_crq(adapter, &crq);
3646 
3647 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3648 	atomic_inc(&adapter->running_cap_crqs);
3649 	ibmvnic_send_crq(adapter, &crq);
3650 
3651 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3652 	atomic_inc(&adapter->running_cap_crqs);
3653 	ibmvnic_send_crq(adapter, &crq);
3654 
3655 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3656 	atomic_inc(&adapter->running_cap_crqs);
3657 	ibmvnic_send_crq(adapter, &crq);
3658 
3659 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3660 	atomic_inc(&adapter->running_cap_crqs);
3661 	ibmvnic_send_crq(adapter, &crq);
3662 
3663 	crq.query_capability.capability =
3664 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3665 	atomic_inc(&adapter->running_cap_crqs);
3666 	ibmvnic_send_crq(adapter, &crq);
3667 
3668 	crq.query_capability.capability =
3669 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3670 	atomic_inc(&adapter->running_cap_crqs);
3671 	ibmvnic_send_crq(adapter, &crq);
3672 
3673 	crq.query_capability.capability =
3674 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3675 	atomic_inc(&adapter->running_cap_crqs);
3676 	ibmvnic_send_crq(adapter, &crq);
3677 
3678 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3679 	atomic_inc(&adapter->running_cap_crqs);
3680 	ibmvnic_send_crq(adapter, &crq);
3681 }
3682 
3683 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3684 				struct ibmvnic_adapter *adapter)
3685 {
3686 	struct device *dev = &adapter->vdev->dev;
3687 
3688 	if (crq->get_vpd_size_rsp.rc.code) {
3689 		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3690 			crq->get_vpd_size_rsp.rc.code);
3691 		complete(&adapter->fw_done);
3692 		return;
3693 	}
3694 
3695 	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3696 	complete(&adapter->fw_done);
3697 }
3698 
3699 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3700 			   struct ibmvnic_adapter *adapter)
3701 {
3702 	struct device *dev = &adapter->vdev->dev;
3703 	unsigned char *substr = NULL;
3704 	u8 fw_level_len = 0;
3705 
3706 	memset(adapter->fw_version, 0, 32);
3707 
3708 	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3709 			 DMA_FROM_DEVICE);
3710 
3711 	if (crq->get_vpd_rsp.rc.code) {
3712 		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3713 			crq->get_vpd_rsp.rc.code);
3714 		goto complete;
3715 	}
3716 
3717 	/* get the position of the firmware version info
3718 	 * located after the ASCII 'RM' substring in the buffer
3719 	 */
3720 	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3721 	if (!substr) {
3722 		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3723 		goto complete;
3724 	}
3725 
3726 	/* get length of firmware level ASCII substring */
3727 	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3728 		fw_level_len = *(substr + 2);
3729 	} else {
3730 		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3731 		goto complete;
3732 	}
3733 
3734 	/* copy firmware version string from vpd into adapter */
3735 	if ((substr + 3 + fw_level_len) <
3736 	    (adapter->vpd->buff + adapter->vpd->len)) {
3737 		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3738 	} else {
3739 		dev_info(dev, "FW substr extrapolated VPD buff\n");
3740 	}
3741 
3742 complete:
3743 	if (adapter->fw_version[0] == '\0')
3744 		strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3745 	complete(&adapter->fw_done);
3746 }
3747 
3748 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3749 {
3750 	struct device *dev = &adapter->vdev->dev;
3751 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3752 	netdev_features_t old_hw_features = 0;
3753 	union ibmvnic_crq crq;
3754 	int i;
3755 
3756 	dma_unmap_single(dev, adapter->ip_offload_tok,
3757 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3758 
3759 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3760 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3761 		netdev_dbg(adapter->netdev, "%016lx\n",
3762 			   ((unsigned long int *)(buf))[i]);
3763 
3764 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3765 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3766 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3767 		   buf->tcp_ipv4_chksum);
3768 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3769 		   buf->tcp_ipv6_chksum);
3770 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3771 		   buf->udp_ipv4_chksum);
3772 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3773 		   buf->udp_ipv6_chksum);
3774 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3775 		   buf->large_tx_ipv4);
3776 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3777 		   buf->large_tx_ipv6);
3778 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3779 		   buf->large_rx_ipv4);
3780 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3781 		   buf->large_rx_ipv6);
3782 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3783 		   buf->max_ipv4_header_size);
3784 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3785 		   buf->max_ipv6_header_size);
3786 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3787 		   buf->max_tcp_header_size);
3788 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3789 		   buf->max_udp_header_size);
3790 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3791 		   buf->max_large_tx_size);
3792 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3793 		   buf->max_large_rx_size);
3794 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3795 		   buf->ipv6_extension_header);
3796 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3797 		   buf->tcp_pseudosum_req);
3798 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3799 		   buf->num_ipv6_ext_headers);
3800 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3801 		   buf->off_ipv6_ext_headers);
3802 
3803 	adapter->ip_offload_ctrl_tok =
3804 	    dma_map_single(dev, &adapter->ip_offload_ctrl,
3805 			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3806 
3807 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3808 		dev_err(dev, "Couldn't map ip offload control buffer\n");
3809 		return;
3810 	}
3811 
3812 	adapter->ip_offload_ctrl.len =
3813 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3814 	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3815 	adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3816 	adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3817 	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3818 	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3819 	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3820 	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3821 	adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3822 	adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3823 
3824 	/* large_rx disabled for now, additional features needed */
3825 	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3826 	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3827 
3828 	if (adapter->state != VNIC_PROBING) {
3829 		old_hw_features = adapter->netdev->hw_features;
3830 		adapter->netdev->hw_features = 0;
3831 	}
3832 
3833 	adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3834 
3835 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3836 		adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3837 
3838 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3839 		adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3840 
3841 	if ((adapter->netdev->features &
3842 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3843 		adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3844 
3845 	if (buf->large_tx_ipv4)
3846 		adapter->netdev->hw_features |= NETIF_F_TSO;
3847 	if (buf->large_tx_ipv6)
3848 		adapter->netdev->hw_features |= NETIF_F_TSO6;
3849 
3850 	if (adapter->state == VNIC_PROBING) {
3851 		adapter->netdev->features |= adapter->netdev->hw_features;
3852 	} else if (old_hw_features != adapter->netdev->hw_features) {
3853 		netdev_features_t tmp = 0;
3854 
3855 		/* disable features no longer supported */
3856 		adapter->netdev->features &= adapter->netdev->hw_features;
3857 		/* turn on features now supported if previously enabled */
3858 		tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3859 			adapter->netdev->hw_features;
3860 		adapter->netdev->features |=
3861 				tmp & adapter->netdev->wanted_features;
3862 	}
3863 
3864 	memset(&crq, 0, sizeof(crq));
3865 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3866 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3867 	crq.control_ip_offload.len =
3868 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3869 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3870 	ibmvnic_send_crq(adapter, &crq);
3871 }
3872 
3873 static const char *ibmvnic_fw_err_cause(u16 cause)
3874 {
3875 	switch (cause) {
3876 	case ADAPTER_PROBLEM:
3877 		return "adapter problem";
3878 	case BUS_PROBLEM:
3879 		return "bus problem";
3880 	case FW_PROBLEM:
3881 		return "firmware problem";
3882 	case DD_PROBLEM:
3883 		return "device driver problem";
3884 	case EEH_RECOVERY:
3885 		return "EEH recovery";
3886 	case FW_UPDATED:
3887 		return "firmware updated";
3888 	case LOW_MEMORY:
3889 		return "low Memory";
3890 	default:
3891 		return "unknown";
3892 	}
3893 }
3894 
3895 static void handle_error_indication(union ibmvnic_crq *crq,
3896 				    struct ibmvnic_adapter *adapter)
3897 {
3898 	struct device *dev = &adapter->vdev->dev;
3899 	u16 cause;
3900 
3901 	cause = be16_to_cpu(crq->error_indication.error_cause);
3902 
3903 	dev_warn_ratelimited(dev,
3904 			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
3905 			     crq->error_indication.flags
3906 				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3907 			     ibmvnic_fw_err_cause(cause));
3908 
3909 	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3910 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3911 	else
3912 		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3913 }
3914 
3915 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3916 				 struct ibmvnic_adapter *adapter)
3917 {
3918 	struct net_device *netdev = adapter->netdev;
3919 	struct device *dev = &adapter->vdev->dev;
3920 	long rc;
3921 
3922 	rc = crq->change_mac_addr_rsp.rc.code;
3923 	if (rc) {
3924 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3925 		goto out;
3926 	}
3927 	ether_addr_copy(netdev->dev_addr,
3928 			&crq->change_mac_addr_rsp.mac_addr[0]);
3929 out:
3930 	complete(&adapter->fw_done);
3931 	return rc;
3932 }
3933 
3934 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3935 				   struct ibmvnic_adapter *adapter)
3936 {
3937 	struct device *dev = &adapter->vdev->dev;
3938 	u64 *req_value;
3939 	char *name;
3940 
3941 	atomic_dec(&adapter->running_cap_crqs);
3942 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3943 	case REQ_TX_QUEUES:
3944 		req_value = &adapter->req_tx_queues;
3945 		name = "tx";
3946 		break;
3947 	case REQ_RX_QUEUES:
3948 		req_value = &adapter->req_rx_queues;
3949 		name = "rx";
3950 		break;
3951 	case REQ_RX_ADD_QUEUES:
3952 		req_value = &adapter->req_rx_add_queues;
3953 		name = "rx_add";
3954 		break;
3955 	case REQ_TX_ENTRIES_PER_SUBCRQ:
3956 		req_value = &adapter->req_tx_entries_per_subcrq;
3957 		name = "tx_entries_per_subcrq";
3958 		break;
3959 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3960 		req_value = &adapter->req_rx_add_entries_per_subcrq;
3961 		name = "rx_add_entries_per_subcrq";
3962 		break;
3963 	case REQ_MTU:
3964 		req_value = &adapter->req_mtu;
3965 		name = "mtu";
3966 		break;
3967 	case PROMISC_REQUESTED:
3968 		req_value = &adapter->promisc;
3969 		name = "promisc";
3970 		break;
3971 	default:
3972 		dev_err(dev, "Got invalid cap request rsp %d\n",
3973 			crq->request_capability.capability);
3974 		return;
3975 	}
3976 
3977 	switch (crq->request_capability_rsp.rc.code) {
3978 	case SUCCESS:
3979 		break;
3980 	case PARTIALSUCCESS:
3981 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3982 			 *req_value,
3983 			 (long int)be64_to_cpu(crq->request_capability_rsp.
3984 					       number), name);
3985 
3986 		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3987 		    REQ_MTU) {
3988 			pr_err("mtu of %llu is not supported. Reverting.\n",
3989 			       *req_value);
3990 			*req_value = adapter->fallback.mtu;
3991 		} else {
3992 			*req_value =
3993 				be64_to_cpu(crq->request_capability_rsp.number);
3994 		}
3995 
3996 		ibmvnic_send_req_caps(adapter, 1);
3997 		return;
3998 	default:
3999 		dev_err(dev, "Error %d in request cap rsp\n",
4000 			crq->request_capability_rsp.rc.code);
4001 		return;
4002 	}
4003 
4004 	/* Done receiving requested capabilities, query IP offload support */
4005 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4006 		union ibmvnic_crq newcrq;
4007 		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4008 		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4009 		    &adapter->ip_offload_buf;
4010 
4011 		adapter->wait_capability = false;
4012 		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4013 							 buf_sz,
4014 							 DMA_FROM_DEVICE);
4015 
4016 		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4017 			if (!firmware_has_feature(FW_FEATURE_CMO))
4018 				dev_err(dev, "Couldn't map offload buffer\n");
4019 			return;
4020 		}
4021 
4022 		memset(&newcrq, 0, sizeof(newcrq));
4023 		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4024 		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4025 		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4026 		newcrq.query_ip_offload.ioba =
4027 		    cpu_to_be32(adapter->ip_offload_tok);
4028 
4029 		ibmvnic_send_crq(adapter, &newcrq);
4030 	}
4031 }
4032 
4033 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4034 			    struct ibmvnic_adapter *adapter)
4035 {
4036 	struct device *dev = &adapter->vdev->dev;
4037 	struct net_device *netdev = adapter->netdev;
4038 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4039 	struct ibmvnic_login_buffer *login = adapter->login_buf;
4040 	int i;
4041 
4042 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4043 			 DMA_TO_DEVICE);
4044 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
4045 			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4046 
4047 	/* If the number of queues requested can't be allocated by the
4048 	 * server, the login response will return with code 1. We will need
4049 	 * to resend the login buffer with fewer queues requested.
4050 	 */
4051 	if (login_rsp_crq->generic.rc.code) {
4052 		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4053 		complete(&adapter->init_done);
4054 		return 0;
4055 	}
4056 
4057 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4058 
4059 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4060 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4061 		netdev_dbg(adapter->netdev, "%016lx\n",
4062 			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4063 	}
4064 
4065 	/* Sanity checks */
4066 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4067 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
4068 	     adapter->req_rx_add_queues !=
4069 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4070 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4071 		ibmvnic_remove(adapter->vdev);
4072 		return -EIO;
4073 	}
4074 	release_login_buffer(adapter);
4075 	complete(&adapter->init_done);
4076 
4077 	return 0;
4078 }
4079 
4080 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4081 				     struct ibmvnic_adapter *adapter)
4082 {
4083 	struct device *dev = &adapter->vdev->dev;
4084 	long rc;
4085 
4086 	rc = crq->request_unmap_rsp.rc.code;
4087 	if (rc)
4088 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4089 }
4090 
4091 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4092 				 struct ibmvnic_adapter *adapter)
4093 {
4094 	struct net_device *netdev = adapter->netdev;
4095 	struct device *dev = &adapter->vdev->dev;
4096 	long rc;
4097 
4098 	rc = crq->query_map_rsp.rc.code;
4099 	if (rc) {
4100 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4101 		return;
4102 	}
4103 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4104 		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4105 		   crq->query_map_rsp.free_pages);
4106 }
4107 
4108 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4109 				 struct ibmvnic_adapter *adapter)
4110 {
4111 	struct net_device *netdev = adapter->netdev;
4112 	struct device *dev = &adapter->vdev->dev;
4113 	long rc;
4114 
4115 	atomic_dec(&adapter->running_cap_crqs);
4116 	netdev_dbg(netdev, "Outstanding queries: %d\n",
4117 		   atomic_read(&adapter->running_cap_crqs));
4118 	rc = crq->query_capability.rc.code;
4119 	if (rc) {
4120 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4121 		goto out;
4122 	}
4123 
4124 	switch (be16_to_cpu(crq->query_capability.capability)) {
4125 	case MIN_TX_QUEUES:
4126 		adapter->min_tx_queues =
4127 		    be64_to_cpu(crq->query_capability.number);
4128 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
4129 			   adapter->min_tx_queues);
4130 		break;
4131 	case MIN_RX_QUEUES:
4132 		adapter->min_rx_queues =
4133 		    be64_to_cpu(crq->query_capability.number);
4134 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
4135 			   adapter->min_rx_queues);
4136 		break;
4137 	case MIN_RX_ADD_QUEUES:
4138 		adapter->min_rx_add_queues =
4139 		    be64_to_cpu(crq->query_capability.number);
4140 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4141 			   adapter->min_rx_add_queues);
4142 		break;
4143 	case MAX_TX_QUEUES:
4144 		adapter->max_tx_queues =
4145 		    be64_to_cpu(crq->query_capability.number);
4146 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
4147 			   adapter->max_tx_queues);
4148 		break;
4149 	case MAX_RX_QUEUES:
4150 		adapter->max_rx_queues =
4151 		    be64_to_cpu(crq->query_capability.number);
4152 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
4153 			   adapter->max_rx_queues);
4154 		break;
4155 	case MAX_RX_ADD_QUEUES:
4156 		adapter->max_rx_add_queues =
4157 		    be64_to_cpu(crq->query_capability.number);
4158 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4159 			   adapter->max_rx_add_queues);
4160 		break;
4161 	case MIN_TX_ENTRIES_PER_SUBCRQ:
4162 		adapter->min_tx_entries_per_subcrq =
4163 		    be64_to_cpu(crq->query_capability.number);
4164 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4165 			   adapter->min_tx_entries_per_subcrq);
4166 		break;
4167 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4168 		adapter->min_rx_add_entries_per_subcrq =
4169 		    be64_to_cpu(crq->query_capability.number);
4170 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4171 			   adapter->min_rx_add_entries_per_subcrq);
4172 		break;
4173 	case MAX_TX_ENTRIES_PER_SUBCRQ:
4174 		adapter->max_tx_entries_per_subcrq =
4175 		    be64_to_cpu(crq->query_capability.number);
4176 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4177 			   adapter->max_tx_entries_per_subcrq);
4178 		break;
4179 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4180 		adapter->max_rx_add_entries_per_subcrq =
4181 		    be64_to_cpu(crq->query_capability.number);
4182 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4183 			   adapter->max_rx_add_entries_per_subcrq);
4184 		break;
4185 	case TCP_IP_OFFLOAD:
4186 		adapter->tcp_ip_offload =
4187 		    be64_to_cpu(crq->query_capability.number);
4188 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4189 			   adapter->tcp_ip_offload);
4190 		break;
4191 	case PROMISC_SUPPORTED:
4192 		adapter->promisc_supported =
4193 		    be64_to_cpu(crq->query_capability.number);
4194 		netdev_dbg(netdev, "promisc_supported = %lld\n",
4195 			   adapter->promisc_supported);
4196 		break;
4197 	case MIN_MTU:
4198 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4199 		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4200 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4201 		break;
4202 	case MAX_MTU:
4203 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4204 		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4205 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4206 		break;
4207 	case MAX_MULTICAST_FILTERS:
4208 		adapter->max_multicast_filters =
4209 		    be64_to_cpu(crq->query_capability.number);
4210 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4211 			   adapter->max_multicast_filters);
4212 		break;
4213 	case VLAN_HEADER_INSERTION:
4214 		adapter->vlan_header_insertion =
4215 		    be64_to_cpu(crq->query_capability.number);
4216 		if (adapter->vlan_header_insertion)
4217 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4218 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4219 			   adapter->vlan_header_insertion);
4220 		break;
4221 	case RX_VLAN_HEADER_INSERTION:
4222 		adapter->rx_vlan_header_insertion =
4223 		    be64_to_cpu(crq->query_capability.number);
4224 		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4225 			   adapter->rx_vlan_header_insertion);
4226 		break;
4227 	case MAX_TX_SG_ENTRIES:
4228 		adapter->max_tx_sg_entries =
4229 		    be64_to_cpu(crq->query_capability.number);
4230 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4231 			   adapter->max_tx_sg_entries);
4232 		break;
4233 	case RX_SG_SUPPORTED:
4234 		adapter->rx_sg_supported =
4235 		    be64_to_cpu(crq->query_capability.number);
4236 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4237 			   adapter->rx_sg_supported);
4238 		break;
4239 	case OPT_TX_COMP_SUB_QUEUES:
4240 		adapter->opt_tx_comp_sub_queues =
4241 		    be64_to_cpu(crq->query_capability.number);
4242 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4243 			   adapter->opt_tx_comp_sub_queues);
4244 		break;
4245 	case OPT_RX_COMP_QUEUES:
4246 		adapter->opt_rx_comp_queues =
4247 		    be64_to_cpu(crq->query_capability.number);
4248 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4249 			   adapter->opt_rx_comp_queues);
4250 		break;
4251 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4252 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
4253 		    be64_to_cpu(crq->query_capability.number);
4254 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4255 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4256 		break;
4257 	case OPT_TX_ENTRIES_PER_SUBCRQ:
4258 		adapter->opt_tx_entries_per_subcrq =
4259 		    be64_to_cpu(crq->query_capability.number);
4260 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4261 			   adapter->opt_tx_entries_per_subcrq);
4262 		break;
4263 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4264 		adapter->opt_rxba_entries_per_subcrq =
4265 		    be64_to_cpu(crq->query_capability.number);
4266 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4267 			   adapter->opt_rxba_entries_per_subcrq);
4268 		break;
4269 	case TX_RX_DESC_REQ:
4270 		adapter->tx_rx_desc_req = crq->query_capability.number;
4271 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4272 			   adapter->tx_rx_desc_req);
4273 		break;
4274 
4275 	default:
4276 		netdev_err(netdev, "Got invalid cap rsp %d\n",
4277 			   crq->query_capability.capability);
4278 	}
4279 
4280 out:
4281 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4282 		adapter->wait_capability = false;
4283 		ibmvnic_send_req_caps(adapter, 0);
4284 	}
4285 }
4286 
4287 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4288 {
4289 	union ibmvnic_crq crq;
4290 	int rc;
4291 
4292 	memset(&crq, 0, sizeof(crq));
4293 	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4294 	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4295 	init_completion(&adapter->fw_done);
4296 	rc = ibmvnic_send_crq(adapter, &crq);
4297 	if (rc)
4298 		return rc;
4299 	wait_for_completion(&adapter->fw_done);
4300 	return adapter->fw_done_rc ? -EIO : 0;
4301 }
4302 
4303 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4304 				       struct ibmvnic_adapter *adapter)
4305 {
4306 	struct net_device *netdev = adapter->netdev;
4307 	int rc;
4308 
4309 	rc = crq->query_phys_parms_rsp.rc.code;
4310 	if (rc) {
4311 		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4312 		return rc;
4313 	}
4314 	switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
4315 	case IBMVNIC_10MBPS:
4316 		adapter->speed = SPEED_10;
4317 		break;
4318 	case IBMVNIC_100MBPS:
4319 		adapter->speed = SPEED_100;
4320 		break;
4321 	case IBMVNIC_1GBPS:
4322 		adapter->speed = SPEED_1000;
4323 		break;
4324 	case IBMVNIC_10GBP:
4325 		adapter->speed = SPEED_10000;
4326 		break;
4327 	case IBMVNIC_25GBPS:
4328 		adapter->speed = SPEED_25000;
4329 		break;
4330 	case IBMVNIC_40GBPS:
4331 		adapter->speed = SPEED_40000;
4332 		break;
4333 	case IBMVNIC_50GBPS:
4334 		adapter->speed = SPEED_50000;
4335 		break;
4336 	case IBMVNIC_100GBPS:
4337 		adapter->speed = SPEED_100000;
4338 		break;
4339 	default:
4340 		netdev_warn(netdev, "Unknown speed 0x%08x\n",
4341 			    cpu_to_be32(crq->query_phys_parms_rsp.speed));
4342 		adapter->speed = SPEED_UNKNOWN;
4343 	}
4344 	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4345 		adapter->duplex = DUPLEX_FULL;
4346 	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4347 		adapter->duplex = DUPLEX_HALF;
4348 	else
4349 		adapter->duplex = DUPLEX_UNKNOWN;
4350 
4351 	return rc;
4352 }
4353 
4354 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4355 			       struct ibmvnic_adapter *adapter)
4356 {
4357 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4358 	struct net_device *netdev = adapter->netdev;
4359 	struct device *dev = &adapter->vdev->dev;
4360 	u64 *u64_crq = (u64 *)crq;
4361 	long rc;
4362 
4363 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4364 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
4365 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
4366 	switch (gen_crq->first) {
4367 	case IBMVNIC_CRQ_INIT_RSP:
4368 		switch (gen_crq->cmd) {
4369 		case IBMVNIC_CRQ_INIT:
4370 			dev_info(dev, "Partner initialized\n");
4371 			adapter->from_passive_init = true;
4372 			adapter->failover_pending = false;
4373 			if (!completion_done(&adapter->init_done)) {
4374 				complete(&adapter->init_done);
4375 				adapter->init_done_rc = -EIO;
4376 			}
4377 			ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4378 			break;
4379 		case IBMVNIC_CRQ_INIT_COMPLETE:
4380 			dev_info(dev, "Partner initialization complete\n");
4381 			adapter->crq.active = true;
4382 			send_version_xchg(adapter);
4383 			break;
4384 		default:
4385 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4386 		}
4387 		return;
4388 	case IBMVNIC_CRQ_XPORT_EVENT:
4389 		netif_carrier_off(netdev);
4390 		adapter->crq.active = false;
4391 		if (adapter->resetting)
4392 			adapter->force_reset_recovery = true;
4393 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4394 			dev_info(dev, "Migrated, re-enabling adapter\n");
4395 			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4396 		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4397 			dev_info(dev, "Backing device failover detected\n");
4398 			adapter->failover_pending = true;
4399 		} else {
4400 			/* The adapter lost the connection */
4401 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4402 				gen_crq->cmd);
4403 			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4404 		}
4405 		return;
4406 	case IBMVNIC_CRQ_CMD_RSP:
4407 		break;
4408 	default:
4409 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
4410 			gen_crq->first);
4411 		return;
4412 	}
4413 
4414 	switch (gen_crq->cmd) {
4415 	case VERSION_EXCHANGE_RSP:
4416 		rc = crq->version_exchange_rsp.rc.code;
4417 		if (rc) {
4418 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4419 			break;
4420 		}
4421 		dev_info(dev, "Partner protocol version is %d\n",
4422 			 crq->version_exchange_rsp.version);
4423 		if (be16_to_cpu(crq->version_exchange_rsp.version) <
4424 		    ibmvnic_version)
4425 			ibmvnic_version =
4426 			    be16_to_cpu(crq->version_exchange_rsp.version);
4427 		send_cap_queries(adapter);
4428 		break;
4429 	case QUERY_CAPABILITY_RSP:
4430 		handle_query_cap_rsp(crq, adapter);
4431 		break;
4432 	case QUERY_MAP_RSP:
4433 		handle_query_map_rsp(crq, adapter);
4434 		break;
4435 	case REQUEST_MAP_RSP:
4436 		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4437 		complete(&adapter->fw_done);
4438 		break;
4439 	case REQUEST_UNMAP_RSP:
4440 		handle_request_unmap_rsp(crq, adapter);
4441 		break;
4442 	case REQUEST_CAPABILITY_RSP:
4443 		handle_request_cap_rsp(crq, adapter);
4444 		break;
4445 	case LOGIN_RSP:
4446 		netdev_dbg(netdev, "Got Login Response\n");
4447 		handle_login_rsp(crq, adapter);
4448 		break;
4449 	case LOGICAL_LINK_STATE_RSP:
4450 		netdev_dbg(netdev,
4451 			   "Got Logical Link State Response, state: %d rc: %d\n",
4452 			   crq->logical_link_state_rsp.link_state,
4453 			   crq->logical_link_state_rsp.rc.code);
4454 		adapter->logical_link_state =
4455 		    crq->logical_link_state_rsp.link_state;
4456 		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4457 		complete(&adapter->init_done);
4458 		break;
4459 	case LINK_STATE_INDICATION:
4460 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
4461 		adapter->phys_link_state =
4462 		    crq->link_state_indication.phys_link_state;
4463 		adapter->logical_link_state =
4464 		    crq->link_state_indication.logical_link_state;
4465 		if (adapter->phys_link_state && adapter->logical_link_state)
4466 			netif_carrier_on(netdev);
4467 		else
4468 			netif_carrier_off(netdev);
4469 		break;
4470 	case CHANGE_MAC_ADDR_RSP:
4471 		netdev_dbg(netdev, "Got MAC address change Response\n");
4472 		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4473 		break;
4474 	case ERROR_INDICATION:
4475 		netdev_dbg(netdev, "Got Error Indication\n");
4476 		handle_error_indication(crq, adapter);
4477 		break;
4478 	case REQUEST_STATISTICS_RSP:
4479 		netdev_dbg(netdev, "Got Statistics Response\n");
4480 		complete(&adapter->stats_done);
4481 		break;
4482 	case QUERY_IP_OFFLOAD_RSP:
4483 		netdev_dbg(netdev, "Got Query IP offload Response\n");
4484 		handle_query_ip_offload_rsp(adapter);
4485 		break;
4486 	case MULTICAST_CTRL_RSP:
4487 		netdev_dbg(netdev, "Got multicast control Response\n");
4488 		break;
4489 	case CONTROL_IP_OFFLOAD_RSP:
4490 		netdev_dbg(netdev, "Got Control IP offload Response\n");
4491 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4492 				 sizeof(adapter->ip_offload_ctrl),
4493 				 DMA_TO_DEVICE);
4494 		complete(&adapter->init_done);
4495 		break;
4496 	case COLLECT_FW_TRACE_RSP:
4497 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4498 		complete(&adapter->fw_done);
4499 		break;
4500 	case GET_VPD_SIZE_RSP:
4501 		handle_vpd_size_rsp(crq, adapter);
4502 		break;
4503 	case GET_VPD_RSP:
4504 		handle_vpd_rsp(crq, adapter);
4505 		break;
4506 	case QUERY_PHYS_PARMS_RSP:
4507 		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4508 		complete(&adapter->fw_done);
4509 		break;
4510 	default:
4511 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4512 			   gen_crq->cmd);
4513 	}
4514 }
4515 
4516 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4517 {
4518 	struct ibmvnic_adapter *adapter = instance;
4519 
4520 	tasklet_schedule(&adapter->tasklet);
4521 	return IRQ_HANDLED;
4522 }
4523 
4524 static void ibmvnic_tasklet(void *data)
4525 {
4526 	struct ibmvnic_adapter *adapter = data;
4527 	struct ibmvnic_crq_queue *queue = &adapter->crq;
4528 	union ibmvnic_crq *crq;
4529 	unsigned long flags;
4530 	bool done = false;
4531 
4532 	spin_lock_irqsave(&queue->lock, flags);
4533 	while (!done) {
4534 		/* Pull all the valid messages off the CRQ */
4535 		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4536 			ibmvnic_handle_crq(crq, adapter);
4537 			crq->generic.first = 0;
4538 		}
4539 
4540 		/* remain in tasklet until all
4541 		 * capabilities responses are received
4542 		 */
4543 		if (!adapter->wait_capability)
4544 			done = true;
4545 	}
4546 	/* if capabilities CRQ's were sent in this tasklet, the following
4547 	 * tasklet must wait until all responses are received
4548 	 */
4549 	if (atomic_read(&adapter->running_cap_crqs) != 0)
4550 		adapter->wait_capability = true;
4551 	spin_unlock_irqrestore(&queue->lock, flags);
4552 }
4553 
4554 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4555 {
4556 	struct vio_dev *vdev = adapter->vdev;
4557 	int rc;
4558 
4559 	do {
4560 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4561 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4562 
4563 	if (rc)
4564 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4565 
4566 	return rc;
4567 }
4568 
4569 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4570 {
4571 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4572 	struct device *dev = &adapter->vdev->dev;
4573 	struct vio_dev *vdev = adapter->vdev;
4574 	int rc;
4575 
4576 	/* Close the CRQ */
4577 	do {
4578 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4579 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4580 
4581 	/* Clean out the queue */
4582 	memset(crq->msgs, 0, PAGE_SIZE);
4583 	crq->cur = 0;
4584 	crq->active = false;
4585 
4586 	/* And re-open it again */
4587 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4588 				crq->msg_token, PAGE_SIZE);
4589 
4590 	if (rc == H_CLOSED)
4591 		/* Adapter is good, but other end is not ready */
4592 		dev_warn(dev, "Partner adapter not ready\n");
4593 	else if (rc != 0)
4594 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4595 
4596 	return rc;
4597 }
4598 
4599 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4600 {
4601 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4602 	struct vio_dev *vdev = adapter->vdev;
4603 	long rc;
4604 
4605 	if (!crq->msgs)
4606 		return;
4607 
4608 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4609 	free_irq(vdev->irq, adapter);
4610 	tasklet_kill(&adapter->tasklet);
4611 	do {
4612 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4613 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4614 
4615 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4616 			 DMA_BIDIRECTIONAL);
4617 	free_page((unsigned long)crq->msgs);
4618 	crq->msgs = NULL;
4619 	crq->active = false;
4620 }
4621 
4622 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4623 {
4624 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4625 	struct device *dev = &adapter->vdev->dev;
4626 	struct vio_dev *vdev = adapter->vdev;
4627 	int rc, retrc = -ENOMEM;
4628 
4629 	if (crq->msgs)
4630 		return 0;
4631 
4632 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4633 	/* Should we allocate more than one page? */
4634 
4635 	if (!crq->msgs)
4636 		return -ENOMEM;
4637 
4638 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4639 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4640 					DMA_BIDIRECTIONAL);
4641 	if (dma_mapping_error(dev, crq->msg_token))
4642 		goto map_failed;
4643 
4644 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4645 				crq->msg_token, PAGE_SIZE);
4646 
4647 	if (rc == H_RESOURCE)
4648 		/* maybe kexecing and resource is busy. try a reset */
4649 		rc = ibmvnic_reset_crq(adapter);
4650 	retrc = rc;
4651 
4652 	if (rc == H_CLOSED) {
4653 		dev_warn(dev, "Partner adapter not ready\n");
4654 	} else if (rc) {
4655 		dev_warn(dev, "Error %d opening adapter\n", rc);
4656 		goto reg_crq_failed;
4657 	}
4658 
4659 	retrc = 0;
4660 
4661 	tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4662 		     (unsigned long)adapter);
4663 
4664 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4665 	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4666 		 adapter->vdev->unit_address);
4667 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4668 	if (rc) {
4669 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4670 			vdev->irq, rc);
4671 		goto req_irq_failed;
4672 	}
4673 
4674 	rc = vio_enable_interrupts(vdev);
4675 	if (rc) {
4676 		dev_err(dev, "Error %d enabling interrupts\n", rc);
4677 		goto req_irq_failed;
4678 	}
4679 
4680 	crq->cur = 0;
4681 	spin_lock_init(&crq->lock);
4682 
4683 	return retrc;
4684 
4685 req_irq_failed:
4686 	tasklet_kill(&adapter->tasklet);
4687 	do {
4688 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4689 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4690 reg_crq_failed:
4691 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4692 map_failed:
4693 	free_page((unsigned long)crq->msgs);
4694 	crq->msgs = NULL;
4695 	return retrc;
4696 }
4697 
4698 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4699 {
4700 	struct device *dev = &adapter->vdev->dev;
4701 	unsigned long timeout = msecs_to_jiffies(30000);
4702 	u64 old_num_rx_queues, old_num_tx_queues;
4703 	int rc;
4704 
4705 	adapter->from_passive_init = false;
4706 
4707 	old_num_rx_queues = adapter->req_rx_queues;
4708 	old_num_tx_queues = adapter->req_tx_queues;
4709 
4710 	reinit_completion(&adapter->init_done);
4711 	adapter->init_done_rc = 0;
4712 	ibmvnic_send_crq_init(adapter);
4713 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4714 		dev_err(dev, "Initialization sequence timed out\n");
4715 		return -1;
4716 	}
4717 
4718 	if (adapter->init_done_rc) {
4719 		release_crq_queue(adapter);
4720 		return adapter->init_done_rc;
4721 	}
4722 
4723 	if (adapter->from_passive_init) {
4724 		adapter->state = VNIC_OPEN;
4725 		adapter->from_passive_init = false;
4726 		return -1;
4727 	}
4728 
4729 	if (adapter->resetting && !adapter->wait_for_reset &&
4730 	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
4731 		if (adapter->req_rx_queues != old_num_rx_queues ||
4732 		    adapter->req_tx_queues != old_num_tx_queues) {
4733 			release_sub_crqs(adapter, 0);
4734 			rc = init_sub_crqs(adapter);
4735 		} else {
4736 			rc = reset_sub_crq_queues(adapter);
4737 		}
4738 	} else {
4739 		rc = init_sub_crqs(adapter);
4740 	}
4741 
4742 	if (rc) {
4743 		dev_err(dev, "Initialization of sub crqs failed\n");
4744 		release_crq_queue(adapter);
4745 		return rc;
4746 	}
4747 
4748 	rc = init_sub_crq_irqs(adapter);
4749 	if (rc) {
4750 		dev_err(dev, "Failed to initialize sub crq irqs\n");
4751 		release_crq_queue(adapter);
4752 	}
4753 
4754 	return rc;
4755 }
4756 
4757 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4758 {
4759 	struct device *dev = &adapter->vdev->dev;
4760 	unsigned long timeout = msecs_to_jiffies(30000);
4761 	int rc;
4762 
4763 	adapter->from_passive_init = false;
4764 
4765 	adapter->init_done_rc = 0;
4766 	ibmvnic_send_crq_init(adapter);
4767 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4768 		dev_err(dev, "Initialization sequence timed out\n");
4769 		return -1;
4770 	}
4771 
4772 	if (adapter->init_done_rc) {
4773 		release_crq_queue(adapter);
4774 		return adapter->init_done_rc;
4775 	}
4776 
4777 	if (adapter->from_passive_init) {
4778 		adapter->state = VNIC_OPEN;
4779 		adapter->from_passive_init = false;
4780 		return -1;
4781 	}
4782 
4783 	rc = init_sub_crqs(adapter);
4784 	if (rc) {
4785 		dev_err(dev, "Initialization of sub crqs failed\n");
4786 		release_crq_queue(adapter);
4787 		return rc;
4788 	}
4789 
4790 	rc = init_sub_crq_irqs(adapter);
4791 	if (rc) {
4792 		dev_err(dev, "Failed to initialize sub crq irqs\n");
4793 		release_crq_queue(adapter);
4794 	}
4795 
4796 	return rc;
4797 }
4798 
4799 static struct device_attribute dev_attr_failover;
4800 
4801 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4802 {
4803 	struct ibmvnic_adapter *adapter;
4804 	struct net_device *netdev;
4805 	unsigned char *mac_addr_p;
4806 	int rc;
4807 
4808 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4809 		dev->unit_address);
4810 
4811 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4812 							VETH_MAC_ADDR, NULL);
4813 	if (!mac_addr_p) {
4814 		dev_err(&dev->dev,
4815 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4816 			__FILE__, __LINE__);
4817 		return 0;
4818 	}
4819 
4820 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4821 				   IBMVNIC_MAX_QUEUES);
4822 	if (!netdev)
4823 		return -ENOMEM;
4824 
4825 	adapter = netdev_priv(netdev);
4826 	adapter->state = VNIC_PROBING;
4827 	dev_set_drvdata(&dev->dev, netdev);
4828 	adapter->vdev = dev;
4829 	adapter->netdev = netdev;
4830 
4831 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
4832 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4833 	netdev->irq = dev->irq;
4834 	netdev->netdev_ops = &ibmvnic_netdev_ops;
4835 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4836 	SET_NETDEV_DEV(netdev, &dev->dev);
4837 
4838 	spin_lock_init(&adapter->stats_lock);
4839 
4840 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4841 	INIT_LIST_HEAD(&adapter->rwi_list);
4842 	spin_lock_init(&adapter->rwi_lock);
4843 	init_completion(&adapter->init_done);
4844 	adapter->resetting = false;
4845 
4846 	do {
4847 		rc = init_crq_queue(adapter);
4848 		if (rc) {
4849 			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4850 				rc);
4851 			goto ibmvnic_init_fail;
4852 		}
4853 
4854 		rc = ibmvnic_init(adapter);
4855 		if (rc && rc != EAGAIN)
4856 			goto ibmvnic_init_fail;
4857 	} while (rc == EAGAIN);
4858 
4859 	rc = init_stats_buffers(adapter);
4860 	if (rc)
4861 		goto ibmvnic_init_fail;
4862 
4863 	rc = init_stats_token(adapter);
4864 	if (rc)
4865 		goto ibmvnic_stats_fail;
4866 
4867 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4868 	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4869 	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4870 
4871 	rc = device_create_file(&dev->dev, &dev_attr_failover);
4872 	if (rc)
4873 		goto ibmvnic_dev_file_err;
4874 
4875 	netif_carrier_off(netdev);
4876 	rc = register_netdev(netdev);
4877 	if (rc) {
4878 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4879 		goto ibmvnic_register_fail;
4880 	}
4881 	dev_info(&dev->dev, "ibmvnic registered\n");
4882 
4883 	adapter->state = VNIC_PROBED;
4884 
4885 	adapter->wait_for_reset = false;
4886 
4887 	return 0;
4888 
4889 ibmvnic_register_fail:
4890 	device_remove_file(&dev->dev, &dev_attr_failover);
4891 
4892 ibmvnic_dev_file_err:
4893 	release_stats_token(adapter);
4894 
4895 ibmvnic_stats_fail:
4896 	release_stats_buffers(adapter);
4897 
4898 ibmvnic_init_fail:
4899 	release_sub_crqs(adapter, 1);
4900 	release_crq_queue(adapter);
4901 	free_netdev(netdev);
4902 
4903 	return rc;
4904 }
4905 
4906 static int ibmvnic_remove(struct vio_dev *dev)
4907 {
4908 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
4909 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4910 
4911 	adapter->state = VNIC_REMOVING;
4912 	rtnl_lock();
4913 	unregister_netdevice(netdev);
4914 
4915 	release_resources(adapter);
4916 	release_sub_crqs(adapter, 1);
4917 	release_crq_queue(adapter);
4918 
4919 	release_stats_token(adapter);
4920 	release_stats_buffers(adapter);
4921 
4922 	adapter->state = VNIC_REMOVED;
4923 
4924 	rtnl_unlock();
4925 	device_remove_file(&dev->dev, &dev_attr_failover);
4926 	free_netdev(netdev);
4927 	dev_set_drvdata(&dev->dev, NULL);
4928 
4929 	return 0;
4930 }
4931 
4932 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4933 			      const char *buf, size_t count)
4934 {
4935 	struct net_device *netdev = dev_get_drvdata(dev);
4936 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4937 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4938 	__be64 session_token;
4939 	long rc;
4940 
4941 	if (!sysfs_streq(buf, "1"))
4942 		return -EINVAL;
4943 
4944 	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4945 			 H_GET_SESSION_TOKEN, 0, 0, 0);
4946 	if (rc) {
4947 		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4948 			   rc);
4949 		return -EINVAL;
4950 	}
4951 
4952 	session_token = (__be64)retbuf[0];
4953 	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4954 		   be64_to_cpu(session_token));
4955 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4956 				H_SESSION_ERR_DETECTED, session_token, 0, 0);
4957 	if (rc) {
4958 		netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4959 			   rc);
4960 		return -EINVAL;
4961 	}
4962 
4963 	return count;
4964 }
4965 
4966 static DEVICE_ATTR_WO(failover);
4967 
4968 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4969 {
4970 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4971 	struct ibmvnic_adapter *adapter;
4972 	struct iommu_table *tbl;
4973 	unsigned long ret = 0;
4974 	int i;
4975 
4976 	tbl = get_iommu_table_base(&vdev->dev);
4977 
4978 	/* netdev inits at probe time along with the structures we need below*/
4979 	if (!netdev)
4980 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4981 
4982 	adapter = netdev_priv(netdev);
4983 
4984 	ret += PAGE_SIZE; /* the crq message queue */
4985 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4986 
4987 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4988 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
4989 
4990 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4991 	     i++)
4992 		ret += adapter->rx_pool[i].size *
4993 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4994 
4995 	return ret;
4996 }
4997 
4998 static int ibmvnic_resume(struct device *dev)
4999 {
5000 	struct net_device *netdev = dev_get_drvdata(dev);
5001 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5002 
5003 	if (adapter->state != VNIC_OPEN)
5004 		return 0;
5005 
5006 	tasklet_schedule(&adapter->tasklet);
5007 
5008 	return 0;
5009 }
5010 
5011 static const struct vio_device_id ibmvnic_device_table[] = {
5012 	{"network", "IBM,vnic"},
5013 	{"", "" }
5014 };
5015 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5016 
5017 static const struct dev_pm_ops ibmvnic_pm_ops = {
5018 	.resume = ibmvnic_resume
5019 };
5020 
5021 static struct vio_driver ibmvnic_driver = {
5022 	.id_table       = ibmvnic_device_table,
5023 	.probe          = ibmvnic_probe,
5024 	.remove         = ibmvnic_remove,
5025 	.get_desired_dma = ibmvnic_get_desired_dma,
5026 	.name		= ibmvnic_driver_name,
5027 	.pm		= &ibmvnic_pm_ops,
5028 };
5029 
5030 /* module functions */
5031 static int __init ibmvnic_module_init(void)
5032 {
5033 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5034 		IBMVNIC_DRIVER_VERSION);
5035 
5036 	return vio_register_driver(&ibmvnic_driver);
5037 }
5038 
5039 static void __exit ibmvnic_module_exit(void)
5040 {
5041 	vio_unregister_driver(&ibmvnic_driver);
5042 }
5043 
5044 module_init(ibmvnic_module_init);
5045 module_exit(ibmvnic_module_exit);
5046