xref: /linux/drivers/net/ethernet/ibm/ibmvnic.c (revision 0427612cddef07568ba80596a02089181092783d)
1 /**************************************************************************/
2 /*                                                                        */
3 /*  IBM System i and System p Virtual NIC Device Driver                   */
4 /*  Copyright (C) 2014 IBM Corp.                                          */
5 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
6 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
7 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
8 /*                                                                        */
9 /*  This program is free software; you can redistribute it and/or modify  */
10 /*  it under the terms of the GNU General Public License as published by  */
11 /*  the Free Software Foundation; either version 2 of the License, or     */
12 /*  (at your option) any later version.                                   */
13 /*                                                                        */
14 /*  This program is distributed in the hope that it will be useful,       */
15 /*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
16 /*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
17 /*  GNU General Public License for more details.                          */
18 /*                                                                        */
19 /*  You should have received a copy of the GNU General Public License     */
20 /*  along with this program.                                              */
21 /*                                                                        */
22 /* This module contains the implementation of a virtual ethernet device   */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
24 /* option of the RS/6000 Platform Architecture to interface with virtual  */
25 /* ethernet NICs that are presented to the partition by the hypervisor.   */
26 /*									   */
27 /* Messages are passed between the VNIC driver and the VNIC server using  */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
31 /* are used by the driver to notify the server that a packet is           */
32 /* ready for transmission or that a buffer has been added to receive a    */
33 /* packet. Subsequently, sCRQs are used by the server to notify the       */
34 /* driver that a packet transmission has been completed or that a packet  */
35 /* has been received and placed in a waiting buffer.                      */
36 /*                                                                        */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
39 /* or receive has been completed, the VNIC driver is required to use      */
40 /* "long term mapping". This entails that large, continuous DMA mapped    */
41 /* buffers are allocated on driver initialization and these buffers are   */
42 /* then continuously reused to pass skbs to and from the VNIC server.     */
43 /*                                                                        */
44 /**************************************************************************/
45 
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/mm.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
63 #include <linux/in.h>
64 #include <linux/ip.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
73 #include <asm/vio.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
80 
81 #include "ibmvnic.h"
82 
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
85 
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
90 
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 		       union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 			   struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 			    struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 			struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 					struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static int send_request_unmap(struct ibmvnic_adapter *, u8);
114 static int send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
120 static void release_crq_queue(struct ibmvnic_adapter *);
121 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
122 static int init_crq_queue(struct ibmvnic_adapter *adapter);
123 
124 struct ibmvnic_stat {
125 	char name[ETH_GSTRING_LEN];
126 	int offset;
127 };
128 
129 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
130 			     offsetof(struct ibmvnic_statistics, stat))
131 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
132 
133 static const struct ibmvnic_stat ibmvnic_stats[] = {
134 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
135 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
136 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
137 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
138 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
139 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
140 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
141 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
142 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
143 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
144 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
145 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
146 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
147 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
148 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
149 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
150 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
151 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
152 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
153 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
154 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
155 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
156 };
157 
158 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
159 			  unsigned long length, unsigned long *number,
160 			  unsigned long *irq)
161 {
162 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
163 	long rc;
164 
165 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
166 	*number = retbuf[0];
167 	*irq = retbuf[1];
168 
169 	return rc;
170 }
171 
172 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
173 				struct ibmvnic_long_term_buff *ltb, int size)
174 {
175 	struct device *dev = &adapter->vdev->dev;
176 	int rc;
177 
178 	ltb->size = size;
179 	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
180 				       GFP_KERNEL);
181 
182 	if (!ltb->buff) {
183 		dev_err(dev, "Couldn't alloc long term buffer\n");
184 		return -ENOMEM;
185 	}
186 	ltb->map_id = adapter->map_id;
187 	adapter->map_id++;
188 
189 	init_completion(&adapter->fw_done);
190 	rc = send_request_map(adapter, ltb->addr,
191 			      ltb->size, ltb->map_id);
192 	if (rc) {
193 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
194 		return rc;
195 	}
196 	wait_for_completion(&adapter->fw_done);
197 
198 	if (adapter->fw_done_rc) {
199 		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
200 			adapter->fw_done_rc);
201 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
202 		return -1;
203 	}
204 	return 0;
205 }
206 
207 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
208 				struct ibmvnic_long_term_buff *ltb)
209 {
210 	struct device *dev = &adapter->vdev->dev;
211 
212 	if (!ltb->buff)
213 		return;
214 
215 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
216 	    adapter->reset_reason != VNIC_RESET_MOBILITY)
217 		send_request_unmap(adapter, ltb->map_id);
218 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
219 }
220 
221 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
222 				struct ibmvnic_long_term_buff *ltb)
223 {
224 	int rc;
225 
226 	memset(ltb->buff, 0, ltb->size);
227 
228 	init_completion(&adapter->fw_done);
229 	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
230 	if (rc)
231 		return rc;
232 	wait_for_completion(&adapter->fw_done);
233 
234 	if (adapter->fw_done_rc) {
235 		dev_info(&adapter->vdev->dev,
236 			 "Reset failed, attempting to free and reallocate buffer\n");
237 		free_long_term_buff(adapter, ltb);
238 		return alloc_long_term_buff(adapter, ltb, ltb->size);
239 	}
240 	return 0;
241 }
242 
243 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
244 {
245 	int i;
246 
247 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
248 	     i++)
249 		adapter->rx_pool[i].active = 0;
250 }
251 
252 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
253 			      struct ibmvnic_rx_pool *pool)
254 {
255 	int count = pool->size - atomic_read(&pool->available);
256 	struct device *dev = &adapter->vdev->dev;
257 	int buffers_added = 0;
258 	unsigned long lpar_rc;
259 	union sub_crq sub_crq;
260 	struct sk_buff *skb;
261 	unsigned int offset;
262 	dma_addr_t dma_addr;
263 	unsigned char *dst;
264 	u64 *handle_array;
265 	int shift = 0;
266 	int index;
267 	int i;
268 
269 	if (!pool->active)
270 		return;
271 
272 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
273 				      be32_to_cpu(adapter->login_rsp_buf->
274 				      off_rxadd_subcrqs));
275 
276 	for (i = 0; i < count; ++i) {
277 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
278 		if (!skb) {
279 			dev_err(dev, "Couldn't replenish rx buff\n");
280 			adapter->replenish_no_mem++;
281 			break;
282 		}
283 
284 		index = pool->free_map[pool->next_free];
285 
286 		if (pool->rx_buff[index].skb)
287 			dev_err(dev, "Inconsistent free_map!\n");
288 
289 		/* Copy the skb to the long term mapped DMA buffer */
290 		offset = index * pool->buff_size;
291 		dst = pool->long_term_buff.buff + offset;
292 		memset(dst, 0, pool->buff_size);
293 		dma_addr = pool->long_term_buff.addr + offset;
294 		pool->rx_buff[index].data = dst;
295 
296 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
297 		pool->rx_buff[index].dma = dma_addr;
298 		pool->rx_buff[index].skb = skb;
299 		pool->rx_buff[index].pool_index = pool->index;
300 		pool->rx_buff[index].size = pool->buff_size;
301 
302 		memset(&sub_crq, 0, sizeof(sub_crq));
303 		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
304 		sub_crq.rx_add.correlator =
305 		    cpu_to_be64((u64)&pool->rx_buff[index]);
306 		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
307 		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
308 
309 		/* The length field of the sCRQ is defined to be 24 bits so the
310 		 * buffer size needs to be left shifted by a byte before it is
311 		 * converted to big endian to prevent the last byte from being
312 		 * truncated.
313 		 */
314 #ifdef __LITTLE_ENDIAN__
315 		shift = 8;
316 #endif
317 		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
318 
319 		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
320 				      &sub_crq);
321 		if (lpar_rc != H_SUCCESS)
322 			goto failure;
323 
324 		buffers_added++;
325 		adapter->replenish_add_buff_success++;
326 		pool->next_free = (pool->next_free + 1) % pool->size;
327 	}
328 	atomic_add(buffers_added, &pool->available);
329 	return;
330 
331 failure:
332 	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
333 		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
334 	pool->free_map[pool->next_free] = index;
335 	pool->rx_buff[index].skb = NULL;
336 
337 	dev_kfree_skb_any(skb);
338 	adapter->replenish_add_buff_failure++;
339 	atomic_add(buffers_added, &pool->available);
340 
341 	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
342 		/* Disable buffer pool replenishment and report carrier off if
343 		 * queue is closed or pending failover.
344 		 * Firmware guarantees that a signal will be sent to the
345 		 * driver, triggering a reset.
346 		 */
347 		deactivate_rx_pools(adapter);
348 		netif_carrier_off(adapter->netdev);
349 	}
350 }
351 
352 static void replenish_pools(struct ibmvnic_adapter *adapter)
353 {
354 	int i;
355 
356 	adapter->replenish_task_cycles++;
357 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
358 	     i++) {
359 		if (adapter->rx_pool[i].active)
360 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
361 	}
362 }
363 
364 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
365 {
366 	kfree(adapter->tx_stats_buffers);
367 	kfree(adapter->rx_stats_buffers);
368 	adapter->tx_stats_buffers = NULL;
369 	adapter->rx_stats_buffers = NULL;
370 }
371 
372 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
373 {
374 	adapter->tx_stats_buffers =
375 				kcalloc(IBMVNIC_MAX_QUEUES,
376 					sizeof(struct ibmvnic_tx_queue_stats),
377 					GFP_KERNEL);
378 	if (!adapter->tx_stats_buffers)
379 		return -ENOMEM;
380 
381 	adapter->rx_stats_buffers =
382 				kcalloc(IBMVNIC_MAX_QUEUES,
383 					sizeof(struct ibmvnic_rx_queue_stats),
384 					GFP_KERNEL);
385 	if (!adapter->rx_stats_buffers)
386 		return -ENOMEM;
387 
388 	return 0;
389 }
390 
391 static void release_stats_token(struct ibmvnic_adapter *adapter)
392 {
393 	struct device *dev = &adapter->vdev->dev;
394 
395 	if (!adapter->stats_token)
396 		return;
397 
398 	dma_unmap_single(dev, adapter->stats_token,
399 			 sizeof(struct ibmvnic_statistics),
400 			 DMA_FROM_DEVICE);
401 	adapter->stats_token = 0;
402 }
403 
404 static int init_stats_token(struct ibmvnic_adapter *adapter)
405 {
406 	struct device *dev = &adapter->vdev->dev;
407 	dma_addr_t stok;
408 
409 	stok = dma_map_single(dev, &adapter->stats,
410 			      sizeof(struct ibmvnic_statistics),
411 			      DMA_FROM_DEVICE);
412 	if (dma_mapping_error(dev, stok)) {
413 		dev_err(dev, "Couldn't map stats buffer\n");
414 		return -1;
415 	}
416 
417 	adapter->stats_token = stok;
418 	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
419 	return 0;
420 }
421 
422 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
423 {
424 	struct ibmvnic_rx_pool *rx_pool;
425 	int rx_scrqs;
426 	int i, j, rc;
427 	u64 *size_array;
428 
429 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
430 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
431 
432 	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
433 	for (i = 0; i < rx_scrqs; i++) {
434 		rx_pool = &adapter->rx_pool[i];
435 
436 		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
437 
438 		if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
439 			free_long_term_buff(adapter, &rx_pool->long_term_buff);
440 			rx_pool->buff_size = be64_to_cpu(size_array[i]);
441 			alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
442 					     rx_pool->size *
443 					     rx_pool->buff_size);
444 		} else {
445 			rc = reset_long_term_buff(adapter,
446 						  &rx_pool->long_term_buff);
447 		}
448 
449 		if (rc)
450 			return rc;
451 
452 		for (j = 0; j < rx_pool->size; j++)
453 			rx_pool->free_map[j] = j;
454 
455 		memset(rx_pool->rx_buff, 0,
456 		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
457 
458 		atomic_set(&rx_pool->available, 0);
459 		rx_pool->next_alloc = 0;
460 		rx_pool->next_free = 0;
461 		rx_pool->active = 1;
462 	}
463 
464 	return 0;
465 }
466 
467 static void release_rx_pools(struct ibmvnic_adapter *adapter)
468 {
469 	struct ibmvnic_rx_pool *rx_pool;
470 	int i, j;
471 
472 	if (!adapter->rx_pool)
473 		return;
474 
475 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
476 		rx_pool = &adapter->rx_pool[i];
477 
478 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
479 
480 		kfree(rx_pool->free_map);
481 		free_long_term_buff(adapter, &rx_pool->long_term_buff);
482 
483 		if (!rx_pool->rx_buff)
484 			continue;
485 
486 		for (j = 0; j < rx_pool->size; j++) {
487 			if (rx_pool->rx_buff[j].skb) {
488 				dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
489 				rx_pool->rx_buff[i].skb = NULL;
490 			}
491 		}
492 
493 		kfree(rx_pool->rx_buff);
494 	}
495 
496 	kfree(adapter->rx_pool);
497 	adapter->rx_pool = NULL;
498 	adapter->num_active_rx_pools = 0;
499 }
500 
501 static int init_rx_pools(struct net_device *netdev)
502 {
503 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
504 	struct device *dev = &adapter->vdev->dev;
505 	struct ibmvnic_rx_pool *rx_pool;
506 	int rxadd_subcrqs;
507 	u64 *size_array;
508 	int i, j;
509 
510 	rxadd_subcrqs =
511 		be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
512 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
513 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
514 
515 	adapter->rx_pool = kcalloc(rxadd_subcrqs,
516 				   sizeof(struct ibmvnic_rx_pool),
517 				   GFP_KERNEL);
518 	if (!adapter->rx_pool) {
519 		dev_err(dev, "Failed to allocate rx pools\n");
520 		return -1;
521 	}
522 
523 	adapter->num_active_rx_pools = rxadd_subcrqs;
524 
525 	for (i = 0; i < rxadd_subcrqs; i++) {
526 		rx_pool = &adapter->rx_pool[i];
527 
528 		netdev_dbg(adapter->netdev,
529 			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
530 			   i, adapter->req_rx_add_entries_per_subcrq,
531 			   be64_to_cpu(size_array[i]));
532 
533 		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
534 		rx_pool->index = i;
535 		rx_pool->buff_size = be64_to_cpu(size_array[i]);
536 		rx_pool->active = 1;
537 
538 		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
539 					    GFP_KERNEL);
540 		if (!rx_pool->free_map) {
541 			release_rx_pools(adapter);
542 			return -1;
543 		}
544 
545 		rx_pool->rx_buff = kcalloc(rx_pool->size,
546 					   sizeof(struct ibmvnic_rx_buff),
547 					   GFP_KERNEL);
548 		if (!rx_pool->rx_buff) {
549 			dev_err(dev, "Couldn't alloc rx buffers\n");
550 			release_rx_pools(adapter);
551 			return -1;
552 		}
553 
554 		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
555 					 rx_pool->size * rx_pool->buff_size)) {
556 			release_rx_pools(adapter);
557 			return -1;
558 		}
559 
560 		for (j = 0; j < rx_pool->size; ++j)
561 			rx_pool->free_map[j] = j;
562 
563 		atomic_set(&rx_pool->available, 0);
564 		rx_pool->next_alloc = 0;
565 		rx_pool->next_free = 0;
566 	}
567 
568 	return 0;
569 }
570 
571 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
572 			     struct ibmvnic_tx_pool *tx_pool)
573 {
574 	int rc, i;
575 
576 	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
577 	if (rc)
578 		return rc;
579 
580 	memset(tx_pool->tx_buff, 0,
581 	       tx_pool->num_buffers *
582 	       sizeof(struct ibmvnic_tx_buff));
583 
584 	for (i = 0; i < tx_pool->num_buffers; i++)
585 		tx_pool->free_map[i] = i;
586 
587 	tx_pool->consumer_index = 0;
588 	tx_pool->producer_index = 0;
589 
590 	return 0;
591 }
592 
593 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
594 {
595 	int tx_scrqs;
596 	int i, rc;
597 
598 	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
599 	for (i = 0; i < tx_scrqs; i++) {
600 		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
601 		if (rc)
602 			return rc;
603 		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
604 		if (rc)
605 			return rc;
606 	}
607 
608 	return 0;
609 }
610 
611 static void release_vpd_data(struct ibmvnic_adapter *adapter)
612 {
613 	if (!adapter->vpd)
614 		return;
615 
616 	kfree(adapter->vpd->buff);
617 	kfree(adapter->vpd);
618 
619 	adapter->vpd = NULL;
620 }
621 
622 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
623 				struct ibmvnic_tx_pool *tx_pool)
624 {
625 	kfree(tx_pool->tx_buff);
626 	kfree(tx_pool->free_map);
627 	free_long_term_buff(adapter, &tx_pool->long_term_buff);
628 }
629 
630 static void release_tx_pools(struct ibmvnic_adapter *adapter)
631 {
632 	int i;
633 
634 	if (!adapter->tx_pool)
635 		return;
636 
637 	for (i = 0; i < adapter->num_active_tx_pools; i++) {
638 		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
639 		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
640 	}
641 
642 	kfree(adapter->tx_pool);
643 	adapter->tx_pool = NULL;
644 	kfree(adapter->tso_pool);
645 	adapter->tso_pool = NULL;
646 	adapter->num_active_tx_pools = 0;
647 }
648 
649 static int init_one_tx_pool(struct net_device *netdev,
650 			    struct ibmvnic_tx_pool *tx_pool,
651 			    int num_entries, int buf_size)
652 {
653 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
654 	int i;
655 
656 	tx_pool->tx_buff = kcalloc(num_entries,
657 				   sizeof(struct ibmvnic_tx_buff),
658 				   GFP_KERNEL);
659 	if (!tx_pool->tx_buff)
660 		return -1;
661 
662 	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
663 				 num_entries * buf_size))
664 		return -1;
665 
666 	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
667 	if (!tx_pool->free_map)
668 		return -1;
669 
670 	for (i = 0; i < num_entries; i++)
671 		tx_pool->free_map[i] = i;
672 
673 	tx_pool->consumer_index = 0;
674 	tx_pool->producer_index = 0;
675 	tx_pool->num_buffers = num_entries;
676 	tx_pool->buf_size = buf_size;
677 
678 	return 0;
679 }
680 
681 static int init_tx_pools(struct net_device *netdev)
682 {
683 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
684 	int tx_subcrqs;
685 	int i, rc;
686 
687 	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
688 	adapter->tx_pool = kcalloc(tx_subcrqs,
689 				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
690 	if (!adapter->tx_pool)
691 		return -1;
692 
693 	adapter->tso_pool = kcalloc(tx_subcrqs,
694 				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
695 	if (!adapter->tso_pool)
696 		return -1;
697 
698 	adapter->num_active_tx_pools = tx_subcrqs;
699 
700 	for (i = 0; i < tx_subcrqs; i++) {
701 		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
702 				      adapter->req_tx_entries_per_subcrq,
703 				      adapter->req_mtu + VLAN_HLEN);
704 		if (rc) {
705 			release_tx_pools(adapter);
706 			return rc;
707 		}
708 
709 		init_one_tx_pool(netdev, &adapter->tso_pool[i],
710 				 IBMVNIC_TSO_BUFS,
711 				 IBMVNIC_TSO_BUF_SZ);
712 		if (rc) {
713 			release_tx_pools(adapter);
714 			return rc;
715 		}
716 	}
717 
718 	return 0;
719 }
720 
721 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
722 {
723 	int i;
724 
725 	if (adapter->napi_enabled)
726 		return;
727 
728 	for (i = 0; i < adapter->req_rx_queues; i++)
729 		napi_enable(&adapter->napi[i]);
730 
731 	adapter->napi_enabled = true;
732 }
733 
734 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
735 {
736 	int i;
737 
738 	if (!adapter->napi_enabled)
739 		return;
740 
741 	for (i = 0; i < adapter->req_rx_queues; i++) {
742 		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
743 		napi_disable(&adapter->napi[i]);
744 	}
745 
746 	adapter->napi_enabled = false;
747 }
748 
749 static int init_napi(struct ibmvnic_adapter *adapter)
750 {
751 	int i;
752 
753 	adapter->napi = kcalloc(adapter->req_rx_queues,
754 				sizeof(struct napi_struct), GFP_KERNEL);
755 	if (!adapter->napi)
756 		return -ENOMEM;
757 
758 	for (i = 0; i < adapter->req_rx_queues; i++) {
759 		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
760 		netif_napi_add(adapter->netdev, &adapter->napi[i],
761 			       ibmvnic_poll, NAPI_POLL_WEIGHT);
762 	}
763 
764 	adapter->num_active_rx_napi = adapter->req_rx_queues;
765 	return 0;
766 }
767 
768 static void release_napi(struct ibmvnic_adapter *adapter)
769 {
770 	int i;
771 
772 	if (!adapter->napi)
773 		return;
774 
775 	for (i = 0; i < adapter->num_active_rx_napi; i++) {
776 		if (&adapter->napi[i]) {
777 			netdev_dbg(adapter->netdev,
778 				   "Releasing napi[%d]\n", i);
779 			netif_napi_del(&adapter->napi[i]);
780 		}
781 	}
782 
783 	kfree(adapter->napi);
784 	adapter->napi = NULL;
785 	adapter->num_active_rx_napi = 0;
786 	adapter->napi_enabled = false;
787 }
788 
789 static int ibmvnic_login(struct net_device *netdev)
790 {
791 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
792 	unsigned long timeout = msecs_to_jiffies(30000);
793 	int retry_count = 0;
794 	bool retry;
795 	int rc;
796 
797 	do {
798 		retry = false;
799 		if (retry_count > IBMVNIC_MAX_QUEUES) {
800 			netdev_warn(netdev, "Login attempts exceeded\n");
801 			return -1;
802 		}
803 
804 		adapter->init_done_rc = 0;
805 		reinit_completion(&adapter->init_done);
806 		rc = send_login(adapter);
807 		if (rc) {
808 			netdev_warn(netdev, "Unable to login\n");
809 			return rc;
810 		}
811 
812 		if (!wait_for_completion_timeout(&adapter->init_done,
813 						 timeout)) {
814 			netdev_warn(netdev, "Login timed out\n");
815 			return -1;
816 		}
817 
818 		if (adapter->init_done_rc == PARTIALSUCCESS) {
819 			retry_count++;
820 			release_sub_crqs(adapter, 1);
821 
822 			retry = true;
823 			netdev_dbg(netdev,
824 				   "Received partial success, retrying...\n");
825 			adapter->init_done_rc = 0;
826 			reinit_completion(&adapter->init_done);
827 			send_cap_queries(adapter);
828 			if (!wait_for_completion_timeout(&adapter->init_done,
829 							 timeout)) {
830 				netdev_warn(netdev,
831 					    "Capabilities query timed out\n");
832 				return -1;
833 			}
834 
835 			rc = init_sub_crqs(adapter);
836 			if (rc) {
837 				netdev_warn(netdev,
838 					    "SCRQ initialization failed\n");
839 				return -1;
840 			}
841 
842 			rc = init_sub_crq_irqs(adapter);
843 			if (rc) {
844 				netdev_warn(netdev,
845 					    "SCRQ irq initialization failed\n");
846 				return -1;
847 			}
848 		} else if (adapter->init_done_rc) {
849 			netdev_warn(netdev, "Adapter login failed\n");
850 			return -1;
851 		}
852 	} while (retry);
853 
854 	/* handle pending MAC address changes after successful login */
855 	if (adapter->mac_change_pending) {
856 		__ibmvnic_set_mac(netdev, &adapter->desired.mac);
857 		adapter->mac_change_pending = false;
858 	}
859 
860 	return 0;
861 }
862 
863 static void release_login_buffer(struct ibmvnic_adapter *adapter)
864 {
865 	kfree(adapter->login_buf);
866 	adapter->login_buf = NULL;
867 }
868 
869 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
870 {
871 	kfree(adapter->login_rsp_buf);
872 	adapter->login_rsp_buf = NULL;
873 }
874 
875 static void release_resources(struct ibmvnic_adapter *adapter)
876 {
877 	release_vpd_data(adapter);
878 
879 	release_tx_pools(adapter);
880 	release_rx_pools(adapter);
881 
882 	release_napi(adapter);
883 	release_login_rsp_buffer(adapter);
884 }
885 
886 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
887 {
888 	struct net_device *netdev = adapter->netdev;
889 	unsigned long timeout = msecs_to_jiffies(30000);
890 	union ibmvnic_crq crq;
891 	bool resend;
892 	int rc;
893 
894 	netdev_dbg(netdev, "setting link state %d\n", link_state);
895 
896 	memset(&crq, 0, sizeof(crq));
897 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
898 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
899 	crq.logical_link_state.link_state = link_state;
900 
901 	do {
902 		resend = false;
903 
904 		reinit_completion(&adapter->init_done);
905 		rc = ibmvnic_send_crq(adapter, &crq);
906 		if (rc) {
907 			netdev_err(netdev, "Failed to set link state\n");
908 			return rc;
909 		}
910 
911 		if (!wait_for_completion_timeout(&adapter->init_done,
912 						 timeout)) {
913 			netdev_err(netdev, "timeout setting link state\n");
914 			return -1;
915 		}
916 
917 		if (adapter->init_done_rc == 1) {
918 			/* Partuial success, delay and re-send */
919 			mdelay(1000);
920 			resend = true;
921 		} else if (adapter->init_done_rc) {
922 			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
923 				    adapter->init_done_rc);
924 			return adapter->init_done_rc;
925 		}
926 	} while (resend);
927 
928 	return 0;
929 }
930 
931 static int set_real_num_queues(struct net_device *netdev)
932 {
933 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
934 	int rc;
935 
936 	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
937 		   adapter->req_tx_queues, adapter->req_rx_queues);
938 
939 	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
940 	if (rc) {
941 		netdev_err(netdev, "failed to set the number of tx queues\n");
942 		return rc;
943 	}
944 
945 	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
946 	if (rc)
947 		netdev_err(netdev, "failed to set the number of rx queues\n");
948 
949 	return rc;
950 }
951 
952 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
953 {
954 	struct device *dev = &adapter->vdev->dev;
955 	union ibmvnic_crq crq;
956 	int len = 0;
957 	int rc;
958 
959 	if (adapter->vpd->buff)
960 		len = adapter->vpd->len;
961 
962 	init_completion(&adapter->fw_done);
963 	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
964 	crq.get_vpd_size.cmd = GET_VPD_SIZE;
965 	rc = ibmvnic_send_crq(adapter, &crq);
966 	if (rc)
967 		return rc;
968 	wait_for_completion(&adapter->fw_done);
969 
970 	if (!adapter->vpd->len)
971 		return -ENODATA;
972 
973 	if (!adapter->vpd->buff)
974 		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
975 	else if (adapter->vpd->len != len)
976 		adapter->vpd->buff =
977 			krealloc(adapter->vpd->buff,
978 				 adapter->vpd->len, GFP_KERNEL);
979 
980 	if (!adapter->vpd->buff) {
981 		dev_err(dev, "Could allocate VPD buffer\n");
982 		return -ENOMEM;
983 	}
984 
985 	adapter->vpd->dma_addr =
986 		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
987 			       DMA_FROM_DEVICE);
988 	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
989 		dev_err(dev, "Could not map VPD buffer\n");
990 		kfree(adapter->vpd->buff);
991 		adapter->vpd->buff = NULL;
992 		return -ENOMEM;
993 	}
994 
995 	reinit_completion(&adapter->fw_done);
996 	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
997 	crq.get_vpd.cmd = GET_VPD;
998 	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
999 	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1000 	rc = ibmvnic_send_crq(adapter, &crq);
1001 	if (rc) {
1002 		kfree(adapter->vpd->buff);
1003 		adapter->vpd->buff = NULL;
1004 		return rc;
1005 	}
1006 	wait_for_completion(&adapter->fw_done);
1007 
1008 	return 0;
1009 }
1010 
1011 static int init_resources(struct ibmvnic_adapter *adapter)
1012 {
1013 	struct net_device *netdev = adapter->netdev;
1014 	int rc;
1015 
1016 	rc = set_real_num_queues(netdev);
1017 	if (rc)
1018 		return rc;
1019 
1020 	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1021 	if (!adapter->vpd)
1022 		return -ENOMEM;
1023 
1024 	/* Vital Product Data (VPD) */
1025 	rc = ibmvnic_get_vpd(adapter);
1026 	if (rc) {
1027 		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1028 		return rc;
1029 	}
1030 
1031 	adapter->map_id = 1;
1032 
1033 	rc = init_napi(adapter);
1034 	if (rc)
1035 		return rc;
1036 
1037 	send_map_query(adapter);
1038 
1039 	rc = init_rx_pools(netdev);
1040 	if (rc)
1041 		return rc;
1042 
1043 	rc = init_tx_pools(netdev);
1044 	return rc;
1045 }
1046 
1047 static int __ibmvnic_open(struct net_device *netdev)
1048 {
1049 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1050 	enum vnic_state prev_state = adapter->state;
1051 	int i, rc;
1052 
1053 	adapter->state = VNIC_OPENING;
1054 	replenish_pools(adapter);
1055 	ibmvnic_napi_enable(adapter);
1056 
1057 	/* We're ready to receive frames, enable the sub-crq interrupts and
1058 	 * set the logical link state to up
1059 	 */
1060 	for (i = 0; i < adapter->req_rx_queues; i++) {
1061 		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1062 		if (prev_state == VNIC_CLOSED)
1063 			enable_irq(adapter->rx_scrq[i]->irq);
1064 		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1065 	}
1066 
1067 	for (i = 0; i < adapter->req_tx_queues; i++) {
1068 		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1069 		if (prev_state == VNIC_CLOSED)
1070 			enable_irq(adapter->tx_scrq[i]->irq);
1071 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1072 	}
1073 
1074 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1075 	if (rc) {
1076 		for (i = 0; i < adapter->req_rx_queues; i++)
1077 			napi_disable(&adapter->napi[i]);
1078 		release_resources(adapter);
1079 		return rc;
1080 	}
1081 
1082 	netif_tx_start_all_queues(netdev);
1083 
1084 	if (prev_state == VNIC_CLOSED) {
1085 		for (i = 0; i < adapter->req_rx_queues; i++)
1086 			napi_schedule(&adapter->napi[i]);
1087 	}
1088 
1089 	adapter->state = VNIC_OPEN;
1090 	return rc;
1091 }
1092 
1093 static int ibmvnic_open(struct net_device *netdev)
1094 {
1095 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1096 	int rc;
1097 
1098 	/* If device failover is pending, just set device state and return.
1099 	 * Device operation will be handled by reset routine.
1100 	 */
1101 	if (adapter->failover_pending) {
1102 		adapter->state = VNIC_OPEN;
1103 		return 0;
1104 	}
1105 
1106 	mutex_lock(&adapter->reset_lock);
1107 
1108 	if (adapter->state != VNIC_CLOSED) {
1109 		rc = ibmvnic_login(netdev);
1110 		if (rc) {
1111 			mutex_unlock(&adapter->reset_lock);
1112 			return rc;
1113 		}
1114 
1115 		rc = init_resources(adapter);
1116 		if (rc) {
1117 			netdev_err(netdev, "failed to initialize resources\n");
1118 			release_resources(adapter);
1119 			mutex_unlock(&adapter->reset_lock);
1120 			return rc;
1121 		}
1122 	}
1123 
1124 	rc = __ibmvnic_open(netdev);
1125 	netif_carrier_on(netdev);
1126 
1127 	mutex_unlock(&adapter->reset_lock);
1128 
1129 	return rc;
1130 }
1131 
1132 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1133 {
1134 	struct ibmvnic_rx_pool *rx_pool;
1135 	struct ibmvnic_rx_buff *rx_buff;
1136 	u64 rx_entries;
1137 	int rx_scrqs;
1138 	int i, j;
1139 
1140 	if (!adapter->rx_pool)
1141 		return;
1142 
1143 	rx_scrqs = adapter->num_active_rx_pools;
1144 	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1145 
1146 	/* Free any remaining skbs in the rx buffer pools */
1147 	for (i = 0; i < rx_scrqs; i++) {
1148 		rx_pool = &adapter->rx_pool[i];
1149 		if (!rx_pool || !rx_pool->rx_buff)
1150 			continue;
1151 
1152 		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1153 		for (j = 0; j < rx_entries; j++) {
1154 			rx_buff = &rx_pool->rx_buff[j];
1155 			if (rx_buff && rx_buff->skb) {
1156 				dev_kfree_skb_any(rx_buff->skb);
1157 				rx_buff->skb = NULL;
1158 			}
1159 		}
1160 	}
1161 }
1162 
1163 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1164 			      struct ibmvnic_tx_pool *tx_pool)
1165 {
1166 	struct ibmvnic_tx_buff *tx_buff;
1167 	u64 tx_entries;
1168 	int i;
1169 
1170 	if (!tx_pool || !tx_pool->tx_buff)
1171 		return;
1172 
1173 	tx_entries = tx_pool->num_buffers;
1174 
1175 	for (i = 0; i < tx_entries; i++) {
1176 		tx_buff = &tx_pool->tx_buff[i];
1177 		if (tx_buff && tx_buff->skb) {
1178 			dev_kfree_skb_any(tx_buff->skb);
1179 			tx_buff->skb = NULL;
1180 		}
1181 	}
1182 }
1183 
1184 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1185 {
1186 	int tx_scrqs;
1187 	int i;
1188 
1189 	if (!adapter->tx_pool || !adapter->tso_pool)
1190 		return;
1191 
1192 	tx_scrqs = adapter->num_active_tx_pools;
1193 
1194 	/* Free any remaining skbs in the tx buffer pools */
1195 	for (i = 0; i < tx_scrqs; i++) {
1196 		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1197 		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1198 		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1199 	}
1200 }
1201 
1202 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1203 {
1204 	struct net_device *netdev = adapter->netdev;
1205 	int i;
1206 
1207 	if (adapter->tx_scrq) {
1208 		for (i = 0; i < adapter->req_tx_queues; i++)
1209 			if (adapter->tx_scrq[i]->irq) {
1210 				netdev_dbg(netdev,
1211 					   "Disabling tx_scrq[%d] irq\n", i);
1212 				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1213 				disable_irq(adapter->tx_scrq[i]->irq);
1214 			}
1215 	}
1216 
1217 	if (adapter->rx_scrq) {
1218 		for (i = 0; i < adapter->req_rx_queues; i++) {
1219 			if (adapter->rx_scrq[i]->irq) {
1220 				netdev_dbg(netdev,
1221 					   "Disabling rx_scrq[%d] irq\n", i);
1222 				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1223 				disable_irq(adapter->rx_scrq[i]->irq);
1224 			}
1225 		}
1226 	}
1227 }
1228 
1229 static void ibmvnic_cleanup(struct net_device *netdev)
1230 {
1231 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1232 
1233 	/* ensure that transmissions are stopped if called by do_reset */
1234 	if (adapter->resetting)
1235 		netif_tx_disable(netdev);
1236 	else
1237 		netif_tx_stop_all_queues(netdev);
1238 
1239 	ibmvnic_napi_disable(adapter);
1240 	ibmvnic_disable_irqs(adapter);
1241 
1242 	clean_rx_pools(adapter);
1243 	clean_tx_pools(adapter);
1244 }
1245 
1246 static int __ibmvnic_close(struct net_device *netdev)
1247 {
1248 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1249 	int rc = 0;
1250 
1251 	adapter->state = VNIC_CLOSING;
1252 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1253 	if (rc)
1254 		return rc;
1255 	adapter->state = VNIC_CLOSED;
1256 	return 0;
1257 }
1258 
1259 static int ibmvnic_close(struct net_device *netdev)
1260 {
1261 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1262 	int rc;
1263 
1264 	/* If device failover is pending, just set device state and return.
1265 	 * Device operation will be handled by reset routine.
1266 	 */
1267 	if (adapter->failover_pending) {
1268 		adapter->state = VNIC_CLOSED;
1269 		return 0;
1270 	}
1271 
1272 	mutex_lock(&adapter->reset_lock);
1273 	rc = __ibmvnic_close(netdev);
1274 	ibmvnic_cleanup(netdev);
1275 	mutex_unlock(&adapter->reset_lock);
1276 
1277 	return rc;
1278 }
1279 
1280 /**
1281  * build_hdr_data - creates L2/L3/L4 header data buffer
1282  * @hdr_field - bitfield determining needed headers
1283  * @skb - socket buffer
1284  * @hdr_len - array of header lengths
1285  * @tot_len - total length of data
1286  *
1287  * Reads hdr_field to determine which headers are needed by firmware.
1288  * Builds a buffer containing these headers.  Saves individual header
1289  * lengths and total buffer length to be used to build descriptors.
1290  */
1291 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1292 			  int *hdr_len, u8 *hdr_data)
1293 {
1294 	int len = 0;
1295 	u8 *hdr;
1296 
1297 	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1298 		hdr_len[0] = sizeof(struct vlan_ethhdr);
1299 	else
1300 		hdr_len[0] = sizeof(struct ethhdr);
1301 
1302 	if (skb->protocol == htons(ETH_P_IP)) {
1303 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1304 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1305 			hdr_len[2] = tcp_hdrlen(skb);
1306 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1307 			hdr_len[2] = sizeof(struct udphdr);
1308 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1309 		hdr_len[1] = sizeof(struct ipv6hdr);
1310 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1311 			hdr_len[2] = tcp_hdrlen(skb);
1312 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1313 			hdr_len[2] = sizeof(struct udphdr);
1314 	} else if (skb->protocol == htons(ETH_P_ARP)) {
1315 		hdr_len[1] = arp_hdr_len(skb->dev);
1316 		hdr_len[2] = 0;
1317 	}
1318 
1319 	memset(hdr_data, 0, 120);
1320 	if ((hdr_field >> 6) & 1) {
1321 		hdr = skb_mac_header(skb);
1322 		memcpy(hdr_data, hdr, hdr_len[0]);
1323 		len += hdr_len[0];
1324 	}
1325 
1326 	if ((hdr_field >> 5) & 1) {
1327 		hdr = skb_network_header(skb);
1328 		memcpy(hdr_data + len, hdr, hdr_len[1]);
1329 		len += hdr_len[1];
1330 	}
1331 
1332 	if ((hdr_field >> 4) & 1) {
1333 		hdr = skb_transport_header(skb);
1334 		memcpy(hdr_data + len, hdr, hdr_len[2]);
1335 		len += hdr_len[2];
1336 	}
1337 	return len;
1338 }
1339 
1340 /**
1341  * create_hdr_descs - create header and header extension descriptors
1342  * @hdr_field - bitfield determining needed headers
1343  * @data - buffer containing header data
1344  * @len - length of data buffer
1345  * @hdr_len - array of individual header lengths
1346  * @scrq_arr - descriptor array
1347  *
1348  * Creates header and, if needed, header extension descriptors and
1349  * places them in a descriptor array, scrq_arr
1350  */
1351 
1352 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1353 			    union sub_crq *scrq_arr)
1354 {
1355 	union sub_crq hdr_desc;
1356 	int tmp_len = len;
1357 	int num_descs = 0;
1358 	u8 *data, *cur;
1359 	int tmp;
1360 
1361 	while (tmp_len > 0) {
1362 		cur = hdr_data + len - tmp_len;
1363 
1364 		memset(&hdr_desc, 0, sizeof(hdr_desc));
1365 		if (cur != hdr_data) {
1366 			data = hdr_desc.hdr_ext.data;
1367 			tmp = tmp_len > 29 ? 29 : tmp_len;
1368 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1369 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1370 			hdr_desc.hdr_ext.len = tmp;
1371 		} else {
1372 			data = hdr_desc.hdr.data;
1373 			tmp = tmp_len > 24 ? 24 : tmp_len;
1374 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1375 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1376 			hdr_desc.hdr.len = tmp;
1377 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1378 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1379 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1380 			hdr_desc.hdr.flag = hdr_field << 1;
1381 		}
1382 		memcpy(data, cur, tmp);
1383 		tmp_len -= tmp;
1384 		*scrq_arr = hdr_desc;
1385 		scrq_arr++;
1386 		num_descs++;
1387 	}
1388 
1389 	return num_descs;
1390 }
1391 
1392 /**
1393  * build_hdr_descs_arr - build a header descriptor array
1394  * @skb - socket buffer
1395  * @num_entries - number of descriptors to be sent
1396  * @subcrq - first TX descriptor
1397  * @hdr_field - bit field determining which headers will be sent
1398  *
1399  * This function will build a TX descriptor array with applicable
1400  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1401  */
1402 
1403 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1404 				int *num_entries, u8 hdr_field)
1405 {
1406 	int hdr_len[3] = {0, 0, 0};
1407 	int tot_len;
1408 	u8 *hdr_data = txbuff->hdr_data;
1409 
1410 	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1411 				 txbuff->hdr_data);
1412 	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1413 			 txbuff->indir_arr + 1);
1414 }
1415 
1416 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1417 				    struct net_device *netdev)
1418 {
1419 	/* For some backing devices, mishandling of small packets
1420 	 * can result in a loss of connection or TX stall. Device
1421 	 * architects recommend that no packet should be smaller
1422 	 * than the minimum MTU value provided to the driver, so
1423 	 * pad any packets to that length
1424 	 */
1425 	if (skb->len < netdev->min_mtu)
1426 		return skb_put_padto(skb, netdev->min_mtu);
1427 
1428 	return 0;
1429 }
1430 
1431 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1432 {
1433 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1434 	int queue_num = skb_get_queue_mapping(skb);
1435 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1436 	struct device *dev = &adapter->vdev->dev;
1437 	struct ibmvnic_tx_buff *tx_buff = NULL;
1438 	struct ibmvnic_sub_crq_queue *tx_scrq;
1439 	struct ibmvnic_tx_pool *tx_pool;
1440 	unsigned int tx_send_failed = 0;
1441 	unsigned int tx_map_failed = 0;
1442 	unsigned int tx_dropped = 0;
1443 	unsigned int tx_packets = 0;
1444 	unsigned int tx_bytes = 0;
1445 	dma_addr_t data_dma_addr;
1446 	struct netdev_queue *txq;
1447 	unsigned long lpar_rc;
1448 	union sub_crq tx_crq;
1449 	unsigned int offset;
1450 	int num_entries = 1;
1451 	unsigned char *dst;
1452 	u64 *handle_array;
1453 	int index = 0;
1454 	u8 proto = 0;
1455 	netdev_tx_t ret = NETDEV_TX_OK;
1456 
1457 	if (adapter->resetting) {
1458 		if (!netif_subqueue_stopped(netdev, skb))
1459 			netif_stop_subqueue(netdev, queue_num);
1460 		dev_kfree_skb_any(skb);
1461 
1462 		tx_send_failed++;
1463 		tx_dropped++;
1464 		ret = NETDEV_TX_OK;
1465 		goto out;
1466 	}
1467 
1468 	if (ibmvnic_xmit_workarounds(skb, netdev)) {
1469 		tx_dropped++;
1470 		tx_send_failed++;
1471 		ret = NETDEV_TX_OK;
1472 		goto out;
1473 	}
1474 	if (skb_is_gso(skb))
1475 		tx_pool = &adapter->tso_pool[queue_num];
1476 	else
1477 		tx_pool = &adapter->tx_pool[queue_num];
1478 
1479 	tx_scrq = adapter->tx_scrq[queue_num];
1480 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1481 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1482 		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1483 
1484 	index = tx_pool->free_map[tx_pool->consumer_index];
1485 
1486 	if (index == IBMVNIC_INVALID_MAP) {
1487 		dev_kfree_skb_any(skb);
1488 		tx_send_failed++;
1489 		tx_dropped++;
1490 		ret = NETDEV_TX_OK;
1491 		goto out;
1492 	}
1493 
1494 	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1495 
1496 	offset = index * tx_pool->buf_size;
1497 	dst = tx_pool->long_term_buff.buff + offset;
1498 	memset(dst, 0, tx_pool->buf_size);
1499 	data_dma_addr = tx_pool->long_term_buff.addr + offset;
1500 
1501 	if (skb_shinfo(skb)->nr_frags) {
1502 		int cur, i;
1503 
1504 		/* Copy the head */
1505 		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1506 		cur = skb_headlen(skb);
1507 
1508 		/* Copy the frags */
1509 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1510 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1511 
1512 			memcpy(dst + cur,
1513 			       page_address(skb_frag_page(frag)) +
1514 			       frag->page_offset, skb_frag_size(frag));
1515 			cur += skb_frag_size(frag);
1516 		}
1517 	} else {
1518 		skb_copy_from_linear_data(skb, dst, skb->len);
1519 	}
1520 
1521 	tx_pool->consumer_index =
1522 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1523 
1524 	tx_buff = &tx_pool->tx_buff[index];
1525 	tx_buff->skb = skb;
1526 	tx_buff->data_dma[0] = data_dma_addr;
1527 	tx_buff->data_len[0] = skb->len;
1528 	tx_buff->index = index;
1529 	tx_buff->pool_index = queue_num;
1530 	tx_buff->last_frag = true;
1531 
1532 	memset(&tx_crq, 0, sizeof(tx_crq));
1533 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1534 	tx_crq.v1.type = IBMVNIC_TX_DESC;
1535 	tx_crq.v1.n_crq_elem = 1;
1536 	tx_crq.v1.n_sge = 1;
1537 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1538 
1539 	if (skb_is_gso(skb))
1540 		tx_crq.v1.correlator =
1541 			cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1542 	else
1543 		tx_crq.v1.correlator = cpu_to_be32(index);
1544 	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1545 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1546 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1547 
1548 	if (adapter->vlan_header_insertion) {
1549 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1550 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1551 	}
1552 
1553 	if (skb->protocol == htons(ETH_P_IP)) {
1554 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1555 		proto = ip_hdr(skb)->protocol;
1556 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1557 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1558 		proto = ipv6_hdr(skb)->nexthdr;
1559 	}
1560 
1561 	if (proto == IPPROTO_TCP)
1562 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1563 	else if (proto == IPPROTO_UDP)
1564 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1565 
1566 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1567 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1568 		hdrs += 2;
1569 	}
1570 	if (skb_is_gso(skb)) {
1571 		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1572 		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1573 		hdrs += 2;
1574 	}
1575 	/* determine if l2/3/4 headers are sent to firmware */
1576 	if ((*hdrs >> 7) & 1) {
1577 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1578 		tx_crq.v1.n_crq_elem = num_entries;
1579 		tx_buff->num_entries = num_entries;
1580 		tx_buff->indir_arr[0] = tx_crq;
1581 		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1582 						    sizeof(tx_buff->indir_arr),
1583 						    DMA_TO_DEVICE);
1584 		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1585 			dev_kfree_skb_any(skb);
1586 			tx_buff->skb = NULL;
1587 			if (!firmware_has_feature(FW_FEATURE_CMO))
1588 				dev_err(dev, "tx: unable to map descriptor array\n");
1589 			tx_map_failed++;
1590 			tx_dropped++;
1591 			ret = NETDEV_TX_OK;
1592 			goto tx_err_out;
1593 		}
1594 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1595 					       (u64)tx_buff->indir_dma,
1596 					       (u64)num_entries);
1597 	} else {
1598 		tx_buff->num_entries = num_entries;
1599 		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1600 				      &tx_crq);
1601 	}
1602 	if (lpar_rc != H_SUCCESS) {
1603 		if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1604 			dev_err_ratelimited(dev, "tx: send failed\n");
1605 		dev_kfree_skb_any(skb);
1606 		tx_buff->skb = NULL;
1607 
1608 		if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1609 			/* Disable TX and report carrier off if queue is closed
1610 			 * or pending failover.
1611 			 * Firmware guarantees that a signal will be sent to the
1612 			 * driver, triggering a reset or some other action.
1613 			 */
1614 			netif_tx_stop_all_queues(netdev);
1615 			netif_carrier_off(netdev);
1616 		}
1617 
1618 		tx_send_failed++;
1619 		tx_dropped++;
1620 		ret = NETDEV_TX_OK;
1621 		goto tx_err_out;
1622 	}
1623 
1624 	if (atomic_add_return(num_entries, &tx_scrq->used)
1625 					>= adapter->req_tx_entries_per_subcrq) {
1626 		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1627 		netif_stop_subqueue(netdev, queue_num);
1628 	}
1629 
1630 	tx_packets++;
1631 	tx_bytes += skb->len;
1632 	txq->trans_start = jiffies;
1633 	ret = NETDEV_TX_OK;
1634 	goto out;
1635 
1636 tx_err_out:
1637 	/* roll back consumer index and map array*/
1638 	if (tx_pool->consumer_index == 0)
1639 		tx_pool->consumer_index =
1640 			tx_pool->num_buffers - 1;
1641 	else
1642 		tx_pool->consumer_index--;
1643 	tx_pool->free_map[tx_pool->consumer_index] = index;
1644 out:
1645 	netdev->stats.tx_dropped += tx_dropped;
1646 	netdev->stats.tx_bytes += tx_bytes;
1647 	netdev->stats.tx_packets += tx_packets;
1648 	adapter->tx_send_failed += tx_send_failed;
1649 	adapter->tx_map_failed += tx_map_failed;
1650 	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1651 	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1652 	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1653 
1654 	return ret;
1655 }
1656 
1657 static void ibmvnic_set_multi(struct net_device *netdev)
1658 {
1659 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1660 	struct netdev_hw_addr *ha;
1661 	union ibmvnic_crq crq;
1662 
1663 	memset(&crq, 0, sizeof(crq));
1664 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1665 	crq.request_capability.cmd = REQUEST_CAPABILITY;
1666 
1667 	if (netdev->flags & IFF_PROMISC) {
1668 		if (!adapter->promisc_supported)
1669 			return;
1670 	} else {
1671 		if (netdev->flags & IFF_ALLMULTI) {
1672 			/* Accept all multicast */
1673 			memset(&crq, 0, sizeof(crq));
1674 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1675 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1676 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1677 			ibmvnic_send_crq(adapter, &crq);
1678 		} else if (netdev_mc_empty(netdev)) {
1679 			/* Reject all multicast */
1680 			memset(&crq, 0, sizeof(crq));
1681 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1682 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1683 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1684 			ibmvnic_send_crq(adapter, &crq);
1685 		} else {
1686 			/* Accept one or more multicast(s) */
1687 			netdev_for_each_mc_addr(ha, netdev) {
1688 				memset(&crq, 0, sizeof(crq));
1689 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1690 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1691 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1692 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1693 						ha->addr);
1694 				ibmvnic_send_crq(adapter, &crq);
1695 			}
1696 		}
1697 	}
1698 }
1699 
1700 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1701 {
1702 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1703 	struct sockaddr *addr = p;
1704 	union ibmvnic_crq crq;
1705 	int rc;
1706 
1707 	if (!is_valid_ether_addr(addr->sa_data))
1708 		return -EADDRNOTAVAIL;
1709 
1710 	memset(&crq, 0, sizeof(crq));
1711 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1712 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1713 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1714 
1715 	init_completion(&adapter->fw_done);
1716 	rc = ibmvnic_send_crq(adapter, &crq);
1717 	if (rc)
1718 		return rc;
1719 	wait_for_completion(&adapter->fw_done);
1720 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1721 	return adapter->fw_done_rc ? -EIO : 0;
1722 }
1723 
1724 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1725 {
1726 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1727 	struct sockaddr *addr = p;
1728 	int rc;
1729 
1730 	if (adapter->state == VNIC_PROBED) {
1731 		memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1732 		adapter->mac_change_pending = true;
1733 		return 0;
1734 	}
1735 
1736 	rc = __ibmvnic_set_mac(netdev, addr);
1737 
1738 	return rc;
1739 }
1740 
1741 /**
1742  * do_reset returns zero if we are able to keep processing reset events, or
1743  * non-zero if we hit a fatal error and must halt.
1744  */
1745 static int do_reset(struct ibmvnic_adapter *adapter,
1746 		    struct ibmvnic_rwi *rwi, u32 reset_state)
1747 {
1748 	u64 old_num_rx_queues, old_num_tx_queues;
1749 	struct net_device *netdev = adapter->netdev;
1750 	int i, rc;
1751 
1752 	netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1753 		   rwi->reset_reason);
1754 
1755 	netif_carrier_off(netdev);
1756 	adapter->reset_reason = rwi->reset_reason;
1757 
1758 	old_num_rx_queues = adapter->req_rx_queues;
1759 	old_num_tx_queues = adapter->req_tx_queues;
1760 
1761 	ibmvnic_cleanup(netdev);
1762 
1763 	if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
1764 	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
1765 		rc = __ibmvnic_close(netdev);
1766 		if (rc)
1767 			return rc;
1768 	}
1769 
1770 	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1771 	    adapter->wait_for_reset) {
1772 		release_resources(adapter);
1773 		release_sub_crqs(adapter, 1);
1774 		release_crq_queue(adapter);
1775 	}
1776 
1777 	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1778 		/* remove the closed state so when we call open it appears
1779 		 * we are coming from the probed state.
1780 		 */
1781 		adapter->state = VNIC_PROBED;
1782 
1783 		if (adapter->wait_for_reset) {
1784 			rc = init_crq_queue(adapter);
1785 		} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1786 			rc = ibmvnic_reenable_crq_queue(adapter);
1787 			release_sub_crqs(adapter, 1);
1788 		} else {
1789 			rc = ibmvnic_reset_crq(adapter);
1790 			if (!rc)
1791 				rc = vio_enable_interrupts(adapter->vdev);
1792 		}
1793 
1794 		if (rc) {
1795 			netdev_err(adapter->netdev,
1796 				   "Couldn't initialize crq. rc=%d\n", rc);
1797 			return rc;
1798 		}
1799 
1800 		rc = ibmvnic_reset_init(adapter);
1801 		if (rc)
1802 			return IBMVNIC_INIT_FAILED;
1803 
1804 		/* If the adapter was in PROBE state prior to the reset,
1805 		 * exit here.
1806 		 */
1807 		if (reset_state == VNIC_PROBED)
1808 			return 0;
1809 
1810 		rc = ibmvnic_login(netdev);
1811 		if (rc) {
1812 			adapter->state = reset_state;
1813 			return rc;
1814 		}
1815 
1816 		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1817 		    adapter->wait_for_reset) {
1818 			rc = init_resources(adapter);
1819 			if (rc)
1820 				return rc;
1821 		} else if (adapter->req_rx_queues != old_num_rx_queues ||
1822 			   adapter->req_tx_queues != old_num_tx_queues) {
1823 			adapter->map_id = 1;
1824 			release_rx_pools(adapter);
1825 			release_tx_pools(adapter);
1826 			rc = init_rx_pools(netdev);
1827 			if (rc)
1828 				return rc;
1829 			rc = init_tx_pools(netdev);
1830 			if (rc)
1831 				return rc;
1832 
1833 			release_napi(adapter);
1834 			rc = init_napi(adapter);
1835 			if (rc)
1836 				return rc;
1837 		} else {
1838 			rc = reset_tx_pools(adapter);
1839 			if (rc)
1840 				return rc;
1841 
1842 			rc = reset_rx_pools(adapter);
1843 			if (rc)
1844 				return rc;
1845 		}
1846 		ibmvnic_disable_irqs(adapter);
1847 	}
1848 	adapter->state = VNIC_CLOSED;
1849 
1850 	if (reset_state == VNIC_CLOSED)
1851 		return 0;
1852 
1853 	rc = __ibmvnic_open(netdev);
1854 	if (rc) {
1855 		if (list_empty(&adapter->rwi_list))
1856 			adapter->state = VNIC_CLOSED;
1857 		else
1858 			adapter->state = reset_state;
1859 
1860 		return 0;
1861 	}
1862 
1863 	/* kick napi */
1864 	for (i = 0; i < adapter->req_rx_queues; i++)
1865 		napi_schedule(&adapter->napi[i]);
1866 
1867 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1868 	    adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1869 		netdev_notify_peers(netdev);
1870 
1871 	netif_carrier_on(netdev);
1872 
1873 	return 0;
1874 }
1875 
1876 static int do_hard_reset(struct ibmvnic_adapter *adapter,
1877 			 struct ibmvnic_rwi *rwi, u32 reset_state)
1878 {
1879 	struct net_device *netdev = adapter->netdev;
1880 	int rc;
1881 
1882 	netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1883 		   rwi->reset_reason);
1884 
1885 	netif_carrier_off(netdev);
1886 	adapter->reset_reason = rwi->reset_reason;
1887 
1888 	ibmvnic_cleanup(netdev);
1889 	release_resources(adapter);
1890 	release_sub_crqs(adapter, 0);
1891 	release_crq_queue(adapter);
1892 
1893 	/* remove the closed state so when we call open it appears
1894 	 * we are coming from the probed state.
1895 	 */
1896 	adapter->state = VNIC_PROBED;
1897 
1898 	rc = init_crq_queue(adapter);
1899 	if (rc) {
1900 		netdev_err(adapter->netdev,
1901 			   "Couldn't initialize crq. rc=%d\n", rc);
1902 		return rc;
1903 	}
1904 
1905 	rc = ibmvnic_init(adapter);
1906 	if (rc)
1907 		return rc;
1908 
1909 	/* If the adapter was in PROBE state prior to the reset,
1910 	 * exit here.
1911 	 */
1912 	if (reset_state == VNIC_PROBED)
1913 		return 0;
1914 
1915 	rc = ibmvnic_login(netdev);
1916 	if (rc) {
1917 		adapter->state = VNIC_PROBED;
1918 		return 0;
1919 	}
1920 	/* netif_set_real_num_xx_queues needs to take rtnl lock here
1921 	 * unless wait_for_reset is set, in which case the rtnl lock
1922 	 * has already been taken before initializing the reset
1923 	 */
1924 	if (!adapter->wait_for_reset) {
1925 		rtnl_lock();
1926 		rc = init_resources(adapter);
1927 		rtnl_unlock();
1928 	} else {
1929 		rc = init_resources(adapter);
1930 	}
1931 	if (rc)
1932 		return rc;
1933 
1934 	ibmvnic_disable_irqs(adapter);
1935 	adapter->state = VNIC_CLOSED;
1936 
1937 	if (reset_state == VNIC_CLOSED)
1938 		return 0;
1939 
1940 	rc = __ibmvnic_open(netdev);
1941 	if (rc) {
1942 		if (list_empty(&adapter->rwi_list))
1943 			adapter->state = VNIC_CLOSED;
1944 		else
1945 			adapter->state = reset_state;
1946 
1947 		return 0;
1948 	}
1949 
1950 	netif_carrier_on(netdev);
1951 
1952 	return 0;
1953 }
1954 
1955 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1956 {
1957 	struct ibmvnic_rwi *rwi;
1958 
1959 	mutex_lock(&adapter->rwi_lock);
1960 
1961 	if (!list_empty(&adapter->rwi_list)) {
1962 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1963 				       list);
1964 		list_del(&rwi->list);
1965 	} else {
1966 		rwi = NULL;
1967 	}
1968 
1969 	mutex_unlock(&adapter->rwi_lock);
1970 	return rwi;
1971 }
1972 
1973 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1974 {
1975 	struct ibmvnic_rwi *rwi;
1976 
1977 	rwi = get_next_rwi(adapter);
1978 	while (rwi) {
1979 		kfree(rwi);
1980 		rwi = get_next_rwi(adapter);
1981 	}
1982 }
1983 
1984 static void __ibmvnic_reset(struct work_struct *work)
1985 {
1986 	struct ibmvnic_rwi *rwi;
1987 	struct ibmvnic_adapter *adapter;
1988 	struct net_device *netdev;
1989 	u32 reset_state;
1990 	int rc = 0;
1991 
1992 	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1993 	netdev = adapter->netdev;
1994 
1995 	mutex_lock(&adapter->reset_lock);
1996 	reset_state = adapter->state;
1997 
1998 	rwi = get_next_rwi(adapter);
1999 	while (rwi) {
2000 		if (adapter->force_reset_recovery) {
2001 			adapter->force_reset_recovery = false;
2002 			rc = do_hard_reset(adapter, rwi, reset_state);
2003 		} else {
2004 			rc = do_reset(adapter, rwi, reset_state);
2005 		}
2006 		kfree(rwi);
2007 		if (rc && rc != IBMVNIC_INIT_FAILED &&
2008 		    !adapter->force_reset_recovery)
2009 			break;
2010 
2011 		rwi = get_next_rwi(adapter);
2012 	}
2013 
2014 	if (adapter->wait_for_reset) {
2015 		adapter->wait_for_reset = false;
2016 		adapter->reset_done_rc = rc;
2017 		complete(&adapter->reset_done);
2018 	}
2019 
2020 	if (rc) {
2021 		netdev_dbg(adapter->netdev, "Reset failed\n");
2022 		free_all_rwi(adapter);
2023 		mutex_unlock(&adapter->reset_lock);
2024 		return;
2025 	}
2026 
2027 	adapter->resetting = false;
2028 	mutex_unlock(&adapter->reset_lock);
2029 }
2030 
2031 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2032 			 enum ibmvnic_reset_reason reason)
2033 {
2034 	struct list_head *entry, *tmp_entry;
2035 	struct ibmvnic_rwi *rwi, *tmp;
2036 	struct net_device *netdev = adapter->netdev;
2037 	int ret;
2038 
2039 	if (adapter->state == VNIC_REMOVING ||
2040 	    adapter->state == VNIC_REMOVED ||
2041 	    adapter->failover_pending) {
2042 		ret = EBUSY;
2043 		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2044 		goto err;
2045 	}
2046 
2047 	if (adapter->state == VNIC_PROBING) {
2048 		netdev_warn(netdev, "Adapter reset during probe\n");
2049 		ret = adapter->init_done_rc = EAGAIN;
2050 		goto err;
2051 	}
2052 
2053 	mutex_lock(&adapter->rwi_lock);
2054 
2055 	list_for_each(entry, &adapter->rwi_list) {
2056 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
2057 		if (tmp->reset_reason == reason) {
2058 			netdev_dbg(netdev, "Skipping matching reset\n");
2059 			mutex_unlock(&adapter->rwi_lock);
2060 			ret = EBUSY;
2061 			goto err;
2062 		}
2063 	}
2064 
2065 	rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
2066 	if (!rwi) {
2067 		mutex_unlock(&adapter->rwi_lock);
2068 		ibmvnic_close(netdev);
2069 		ret = ENOMEM;
2070 		goto err;
2071 	}
2072 	/* if we just received a transport event,
2073 	 * flush reset queue and process this reset
2074 	 */
2075 	if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2076 		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2077 			list_del(entry);
2078 	}
2079 	rwi->reset_reason = reason;
2080 	list_add_tail(&rwi->list, &adapter->rwi_list);
2081 	mutex_unlock(&adapter->rwi_lock);
2082 	adapter->resetting = true;
2083 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2084 	schedule_work(&adapter->ibmvnic_reset);
2085 
2086 	return 0;
2087 err:
2088 	if (adapter->wait_for_reset)
2089 		adapter->wait_for_reset = false;
2090 	return -ret;
2091 }
2092 
2093 static void ibmvnic_tx_timeout(struct net_device *dev)
2094 {
2095 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2096 
2097 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2098 }
2099 
2100 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2101 				  struct ibmvnic_rx_buff *rx_buff)
2102 {
2103 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2104 
2105 	rx_buff->skb = NULL;
2106 
2107 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2108 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2109 
2110 	atomic_dec(&pool->available);
2111 }
2112 
2113 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2114 {
2115 	struct net_device *netdev = napi->dev;
2116 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2117 	int scrq_num = (int)(napi - adapter->napi);
2118 	int frames_processed = 0;
2119 
2120 restart_poll:
2121 	while (frames_processed < budget) {
2122 		struct sk_buff *skb;
2123 		struct ibmvnic_rx_buff *rx_buff;
2124 		union sub_crq *next;
2125 		u32 length;
2126 		u16 offset;
2127 		u8 flags = 0;
2128 
2129 		if (unlikely(adapter->resetting &&
2130 			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2131 			enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2132 			napi_complete_done(napi, frames_processed);
2133 			return frames_processed;
2134 		}
2135 
2136 		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2137 			break;
2138 		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2139 		rx_buff =
2140 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2141 							  rx_comp.correlator);
2142 		/* do error checking */
2143 		if (next->rx_comp.rc) {
2144 			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2145 				   be16_to_cpu(next->rx_comp.rc));
2146 			/* free the entry */
2147 			next->rx_comp.first = 0;
2148 			dev_kfree_skb_any(rx_buff->skb);
2149 			remove_buff_from_pool(adapter, rx_buff);
2150 			continue;
2151 		} else if (!rx_buff->skb) {
2152 			/* free the entry */
2153 			next->rx_comp.first = 0;
2154 			remove_buff_from_pool(adapter, rx_buff);
2155 			continue;
2156 		}
2157 
2158 		length = be32_to_cpu(next->rx_comp.len);
2159 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
2160 		flags = next->rx_comp.flags;
2161 		skb = rx_buff->skb;
2162 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
2163 					length);
2164 
2165 		/* VLAN Header has been stripped by the system firmware and
2166 		 * needs to be inserted by the driver
2167 		 */
2168 		if (adapter->rx_vlan_header_insertion &&
2169 		    (flags & IBMVNIC_VLAN_STRIPPED))
2170 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2171 					       ntohs(next->rx_comp.vlan_tci));
2172 
2173 		/* free the entry */
2174 		next->rx_comp.first = 0;
2175 		remove_buff_from_pool(adapter, rx_buff);
2176 
2177 		skb_put(skb, length);
2178 		skb->protocol = eth_type_trans(skb, netdev);
2179 		skb_record_rx_queue(skb, scrq_num);
2180 
2181 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2182 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2183 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2184 		}
2185 
2186 		length = skb->len;
2187 		napi_gro_receive(napi, skb); /* send it up */
2188 		netdev->stats.rx_packets++;
2189 		netdev->stats.rx_bytes += length;
2190 		adapter->rx_stats_buffers[scrq_num].packets++;
2191 		adapter->rx_stats_buffers[scrq_num].bytes += length;
2192 		frames_processed++;
2193 	}
2194 
2195 	if (adapter->state != VNIC_CLOSING)
2196 		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2197 
2198 	if (frames_processed < budget) {
2199 		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2200 		napi_complete_done(napi, frames_processed);
2201 		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2202 		    napi_reschedule(napi)) {
2203 			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2204 			goto restart_poll;
2205 		}
2206 	}
2207 	return frames_processed;
2208 }
2209 
2210 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2211 {
2212 	int rc, ret;
2213 
2214 	adapter->fallback.mtu = adapter->req_mtu;
2215 	adapter->fallback.rx_queues = adapter->req_rx_queues;
2216 	adapter->fallback.tx_queues = adapter->req_tx_queues;
2217 	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2218 	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2219 
2220 	init_completion(&adapter->reset_done);
2221 	adapter->wait_for_reset = true;
2222 	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2223 	if (rc)
2224 		return rc;
2225 	wait_for_completion(&adapter->reset_done);
2226 
2227 	ret = 0;
2228 	if (adapter->reset_done_rc) {
2229 		ret = -EIO;
2230 		adapter->desired.mtu = adapter->fallback.mtu;
2231 		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2232 		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2233 		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2234 		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2235 
2236 		init_completion(&adapter->reset_done);
2237 		adapter->wait_for_reset = true;
2238 		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2239 		if (rc)
2240 			return ret;
2241 		wait_for_completion(&adapter->reset_done);
2242 	}
2243 	adapter->wait_for_reset = false;
2244 
2245 	return ret;
2246 }
2247 
2248 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2249 {
2250 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2251 
2252 	adapter->desired.mtu = new_mtu + ETH_HLEN;
2253 
2254 	return wait_for_reset(adapter);
2255 }
2256 
2257 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2258 						struct net_device *dev,
2259 						netdev_features_t features)
2260 {
2261 	/* Some backing hardware adapters can not
2262 	 * handle packets with a MSS less than 224
2263 	 * or with only one segment.
2264 	 */
2265 	if (skb_is_gso(skb)) {
2266 		if (skb_shinfo(skb)->gso_size < 224 ||
2267 		    skb_shinfo(skb)->gso_segs == 1)
2268 			features &= ~NETIF_F_GSO_MASK;
2269 	}
2270 
2271 	return features;
2272 }
2273 
2274 static const struct net_device_ops ibmvnic_netdev_ops = {
2275 	.ndo_open		= ibmvnic_open,
2276 	.ndo_stop		= ibmvnic_close,
2277 	.ndo_start_xmit		= ibmvnic_xmit,
2278 	.ndo_set_rx_mode	= ibmvnic_set_multi,
2279 	.ndo_set_mac_address	= ibmvnic_set_mac,
2280 	.ndo_validate_addr	= eth_validate_addr,
2281 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2282 	.ndo_change_mtu		= ibmvnic_change_mtu,
2283 	.ndo_features_check     = ibmvnic_features_check,
2284 };
2285 
2286 /* ethtool functions */
2287 
2288 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2289 				      struct ethtool_link_ksettings *cmd)
2290 {
2291 	u32 supported, advertising;
2292 
2293 	supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2294 			  SUPPORTED_FIBRE);
2295 	advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2296 			    ADVERTISED_FIBRE);
2297 	cmd->base.speed = SPEED_1000;
2298 	cmd->base.duplex = DUPLEX_FULL;
2299 	cmd->base.port = PORT_FIBRE;
2300 	cmd->base.phy_address = 0;
2301 	cmd->base.autoneg = AUTONEG_ENABLE;
2302 
2303 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2304 						supported);
2305 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2306 						advertising);
2307 
2308 	return 0;
2309 }
2310 
2311 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2312 				struct ethtool_drvinfo *info)
2313 {
2314 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2315 
2316 	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2317 	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2318 	strlcpy(info->fw_version, adapter->fw_version,
2319 		sizeof(info->fw_version));
2320 }
2321 
2322 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2323 {
2324 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2325 
2326 	return adapter->msg_enable;
2327 }
2328 
2329 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2330 {
2331 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2332 
2333 	adapter->msg_enable = data;
2334 }
2335 
2336 static u32 ibmvnic_get_link(struct net_device *netdev)
2337 {
2338 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2339 
2340 	/* Don't need to send a query because we request a logical link up at
2341 	 * init and then we wait for link state indications
2342 	 */
2343 	return adapter->logical_link_state;
2344 }
2345 
2346 static void ibmvnic_get_ringparam(struct net_device *netdev,
2347 				  struct ethtool_ringparam *ring)
2348 {
2349 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2350 
2351 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2352 		ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2353 		ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2354 	} else {
2355 		ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2356 		ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2357 	}
2358 	ring->rx_mini_max_pending = 0;
2359 	ring->rx_jumbo_max_pending = 0;
2360 	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2361 	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2362 	ring->rx_mini_pending = 0;
2363 	ring->rx_jumbo_pending = 0;
2364 }
2365 
2366 static int ibmvnic_set_ringparam(struct net_device *netdev,
2367 				 struct ethtool_ringparam *ring)
2368 {
2369 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2370 	int ret;
2371 
2372 	ret = 0;
2373 	adapter->desired.rx_entries = ring->rx_pending;
2374 	adapter->desired.tx_entries = ring->tx_pending;
2375 
2376 	ret = wait_for_reset(adapter);
2377 
2378 	if (!ret &&
2379 	    (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2380 	     adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2381 		netdev_info(netdev,
2382 			    "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2383 			    ring->rx_pending, ring->tx_pending,
2384 			    adapter->req_rx_add_entries_per_subcrq,
2385 			    adapter->req_tx_entries_per_subcrq);
2386 	return ret;
2387 }
2388 
2389 static void ibmvnic_get_channels(struct net_device *netdev,
2390 				 struct ethtool_channels *channels)
2391 {
2392 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2393 
2394 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2395 		channels->max_rx = adapter->max_rx_queues;
2396 		channels->max_tx = adapter->max_tx_queues;
2397 	} else {
2398 		channels->max_rx = IBMVNIC_MAX_QUEUES;
2399 		channels->max_tx = IBMVNIC_MAX_QUEUES;
2400 	}
2401 
2402 	channels->max_other = 0;
2403 	channels->max_combined = 0;
2404 	channels->rx_count = adapter->req_rx_queues;
2405 	channels->tx_count = adapter->req_tx_queues;
2406 	channels->other_count = 0;
2407 	channels->combined_count = 0;
2408 }
2409 
2410 static int ibmvnic_set_channels(struct net_device *netdev,
2411 				struct ethtool_channels *channels)
2412 {
2413 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2414 	int ret;
2415 
2416 	ret = 0;
2417 	adapter->desired.rx_queues = channels->rx_count;
2418 	adapter->desired.tx_queues = channels->tx_count;
2419 
2420 	ret = wait_for_reset(adapter);
2421 
2422 	if (!ret &&
2423 	    (adapter->req_rx_queues != channels->rx_count ||
2424 	     adapter->req_tx_queues != channels->tx_count))
2425 		netdev_info(netdev,
2426 			    "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2427 			    channels->rx_count, channels->tx_count,
2428 			    adapter->req_rx_queues, adapter->req_tx_queues);
2429 	return ret;
2430 
2431 }
2432 
2433 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2434 {
2435 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2436 	int i;
2437 
2438 	switch (stringset) {
2439 	case ETH_SS_STATS:
2440 		for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2441 				i++, data += ETH_GSTRING_LEN)
2442 			memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2443 
2444 		for (i = 0; i < adapter->req_tx_queues; i++) {
2445 			snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2446 			data += ETH_GSTRING_LEN;
2447 
2448 			snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2449 			data += ETH_GSTRING_LEN;
2450 
2451 			snprintf(data, ETH_GSTRING_LEN,
2452 				 "tx%d_dropped_packets", i);
2453 			data += ETH_GSTRING_LEN;
2454 		}
2455 
2456 		for (i = 0; i < adapter->req_rx_queues; i++) {
2457 			snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2458 			data += ETH_GSTRING_LEN;
2459 
2460 			snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2461 			data += ETH_GSTRING_LEN;
2462 
2463 			snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2464 			data += ETH_GSTRING_LEN;
2465 		}
2466 		break;
2467 
2468 	case ETH_SS_PRIV_FLAGS:
2469 		for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2470 			strcpy(data + i * ETH_GSTRING_LEN,
2471 			       ibmvnic_priv_flags[i]);
2472 		break;
2473 	default:
2474 		return;
2475 	}
2476 }
2477 
2478 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2479 {
2480 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2481 
2482 	switch (sset) {
2483 	case ETH_SS_STATS:
2484 		return ARRAY_SIZE(ibmvnic_stats) +
2485 		       adapter->req_tx_queues * NUM_TX_STATS +
2486 		       adapter->req_rx_queues * NUM_RX_STATS;
2487 	case ETH_SS_PRIV_FLAGS:
2488 		return ARRAY_SIZE(ibmvnic_priv_flags);
2489 	default:
2490 		return -EOPNOTSUPP;
2491 	}
2492 }
2493 
2494 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2495 				      struct ethtool_stats *stats, u64 *data)
2496 {
2497 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2498 	union ibmvnic_crq crq;
2499 	int i, j;
2500 	int rc;
2501 
2502 	memset(&crq, 0, sizeof(crq));
2503 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2504 	crq.request_statistics.cmd = REQUEST_STATISTICS;
2505 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2506 	crq.request_statistics.len =
2507 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
2508 
2509 	/* Wait for data to be written */
2510 	init_completion(&adapter->stats_done);
2511 	rc = ibmvnic_send_crq(adapter, &crq);
2512 	if (rc)
2513 		return;
2514 	wait_for_completion(&adapter->stats_done);
2515 
2516 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2517 		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2518 						ibmvnic_stats[i].offset));
2519 
2520 	for (j = 0; j < adapter->req_tx_queues; j++) {
2521 		data[i] = adapter->tx_stats_buffers[j].packets;
2522 		i++;
2523 		data[i] = adapter->tx_stats_buffers[j].bytes;
2524 		i++;
2525 		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2526 		i++;
2527 	}
2528 
2529 	for (j = 0; j < adapter->req_rx_queues; j++) {
2530 		data[i] = adapter->rx_stats_buffers[j].packets;
2531 		i++;
2532 		data[i] = adapter->rx_stats_buffers[j].bytes;
2533 		i++;
2534 		data[i] = adapter->rx_stats_buffers[j].interrupts;
2535 		i++;
2536 	}
2537 }
2538 
2539 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2540 {
2541 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2542 
2543 	return adapter->priv_flags;
2544 }
2545 
2546 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2547 {
2548 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2549 	bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2550 
2551 	if (which_maxes)
2552 		adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2553 	else
2554 		adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2555 
2556 	return 0;
2557 }
2558 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2559 	.get_drvinfo		= ibmvnic_get_drvinfo,
2560 	.get_msglevel		= ibmvnic_get_msglevel,
2561 	.set_msglevel		= ibmvnic_set_msglevel,
2562 	.get_link		= ibmvnic_get_link,
2563 	.get_ringparam		= ibmvnic_get_ringparam,
2564 	.set_ringparam		= ibmvnic_set_ringparam,
2565 	.get_channels		= ibmvnic_get_channels,
2566 	.set_channels		= ibmvnic_set_channels,
2567 	.get_strings            = ibmvnic_get_strings,
2568 	.get_sset_count         = ibmvnic_get_sset_count,
2569 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
2570 	.get_link_ksettings	= ibmvnic_get_link_ksettings,
2571 	.get_priv_flags		= ibmvnic_get_priv_flags,
2572 	.set_priv_flags		= ibmvnic_set_priv_flags,
2573 };
2574 
2575 /* Routines for managing CRQs/sCRQs  */
2576 
2577 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2578 				   struct ibmvnic_sub_crq_queue *scrq)
2579 {
2580 	int rc;
2581 
2582 	if (scrq->irq) {
2583 		free_irq(scrq->irq, scrq);
2584 		irq_dispose_mapping(scrq->irq);
2585 		scrq->irq = 0;
2586 	}
2587 
2588 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2589 	atomic_set(&scrq->used, 0);
2590 	scrq->cur = 0;
2591 
2592 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2593 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2594 	return rc;
2595 }
2596 
2597 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2598 {
2599 	int i, rc;
2600 
2601 	for (i = 0; i < adapter->req_tx_queues; i++) {
2602 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2603 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2604 		if (rc)
2605 			return rc;
2606 	}
2607 
2608 	for (i = 0; i < adapter->req_rx_queues; i++) {
2609 		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2610 		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2611 		if (rc)
2612 			return rc;
2613 	}
2614 
2615 	return rc;
2616 }
2617 
2618 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2619 				  struct ibmvnic_sub_crq_queue *scrq,
2620 				  bool do_h_free)
2621 {
2622 	struct device *dev = &adapter->vdev->dev;
2623 	long rc;
2624 
2625 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2626 
2627 	if (do_h_free) {
2628 		/* Close the sub-crqs */
2629 		do {
2630 			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2631 						adapter->vdev->unit_address,
2632 						scrq->crq_num);
2633 		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2634 
2635 		if (rc) {
2636 			netdev_err(adapter->netdev,
2637 				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
2638 				   scrq->crq_num, rc);
2639 		}
2640 	}
2641 
2642 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2643 			 DMA_BIDIRECTIONAL);
2644 	free_pages((unsigned long)scrq->msgs, 2);
2645 	kfree(scrq);
2646 }
2647 
2648 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2649 							*adapter)
2650 {
2651 	struct device *dev = &adapter->vdev->dev;
2652 	struct ibmvnic_sub_crq_queue *scrq;
2653 	int rc;
2654 
2655 	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2656 	if (!scrq)
2657 		return NULL;
2658 
2659 	scrq->msgs =
2660 		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2661 	if (!scrq->msgs) {
2662 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2663 		goto zero_page_failed;
2664 	}
2665 
2666 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2667 					 DMA_BIDIRECTIONAL);
2668 	if (dma_mapping_error(dev, scrq->msg_token)) {
2669 		dev_warn(dev, "Couldn't map crq queue messages page\n");
2670 		goto map_failed;
2671 	}
2672 
2673 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2674 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2675 
2676 	if (rc == H_RESOURCE)
2677 		rc = ibmvnic_reset_crq(adapter);
2678 
2679 	if (rc == H_CLOSED) {
2680 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
2681 	} else if (rc) {
2682 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
2683 		goto reg_failed;
2684 	}
2685 
2686 	scrq->adapter = adapter;
2687 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2688 	spin_lock_init(&scrq->lock);
2689 
2690 	netdev_dbg(adapter->netdev,
2691 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2692 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
2693 
2694 	return scrq;
2695 
2696 reg_failed:
2697 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2698 			 DMA_BIDIRECTIONAL);
2699 map_failed:
2700 	free_pages((unsigned long)scrq->msgs, 2);
2701 zero_page_failed:
2702 	kfree(scrq);
2703 
2704 	return NULL;
2705 }
2706 
2707 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2708 {
2709 	int i;
2710 
2711 	if (adapter->tx_scrq) {
2712 		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2713 			if (!adapter->tx_scrq[i])
2714 				continue;
2715 
2716 			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2717 				   i);
2718 			if (adapter->tx_scrq[i]->irq) {
2719 				free_irq(adapter->tx_scrq[i]->irq,
2720 					 adapter->tx_scrq[i]);
2721 				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2722 				adapter->tx_scrq[i]->irq = 0;
2723 			}
2724 
2725 			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2726 					      do_h_free);
2727 		}
2728 
2729 		kfree(adapter->tx_scrq);
2730 		adapter->tx_scrq = NULL;
2731 		adapter->num_active_tx_scrqs = 0;
2732 	}
2733 
2734 	if (adapter->rx_scrq) {
2735 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2736 			if (!adapter->rx_scrq[i])
2737 				continue;
2738 
2739 			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2740 				   i);
2741 			if (adapter->rx_scrq[i]->irq) {
2742 				free_irq(adapter->rx_scrq[i]->irq,
2743 					 adapter->rx_scrq[i]);
2744 				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2745 				adapter->rx_scrq[i]->irq = 0;
2746 			}
2747 
2748 			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2749 					      do_h_free);
2750 		}
2751 
2752 		kfree(adapter->rx_scrq);
2753 		adapter->rx_scrq = NULL;
2754 		adapter->num_active_rx_scrqs = 0;
2755 	}
2756 }
2757 
2758 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2759 			    struct ibmvnic_sub_crq_queue *scrq)
2760 {
2761 	struct device *dev = &adapter->vdev->dev;
2762 	unsigned long rc;
2763 
2764 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2765 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2766 	if (rc)
2767 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2768 			scrq->hw_irq, rc);
2769 	return rc;
2770 }
2771 
2772 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2773 			   struct ibmvnic_sub_crq_queue *scrq)
2774 {
2775 	struct device *dev = &adapter->vdev->dev;
2776 	unsigned long rc;
2777 
2778 	if (scrq->hw_irq > 0x100000000ULL) {
2779 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2780 		return 1;
2781 	}
2782 
2783 	if (adapter->resetting &&
2784 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
2785 		u64 val = (0xff000000) | scrq->hw_irq;
2786 
2787 		rc = plpar_hcall_norets(H_EOI, val);
2788 		if (rc)
2789 			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2790 				val, rc);
2791 	}
2792 
2793 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2794 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2795 	if (rc)
2796 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2797 			scrq->hw_irq, rc);
2798 	return rc;
2799 }
2800 
2801 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2802 			       struct ibmvnic_sub_crq_queue *scrq)
2803 {
2804 	struct device *dev = &adapter->vdev->dev;
2805 	struct ibmvnic_tx_pool *tx_pool;
2806 	struct ibmvnic_tx_buff *txbuff;
2807 	union sub_crq *next;
2808 	int index;
2809 	int i, j;
2810 	u8 *first;
2811 
2812 restart_loop:
2813 	while (pending_scrq(adapter, scrq)) {
2814 		unsigned int pool = scrq->pool_index;
2815 		int num_entries = 0;
2816 
2817 		next = ibmvnic_next_scrq(adapter, scrq);
2818 		for (i = 0; i < next->tx_comp.num_comps; i++) {
2819 			if (next->tx_comp.rcs[i]) {
2820 				dev_err(dev, "tx error %x\n",
2821 					next->tx_comp.rcs[i]);
2822 				continue;
2823 			}
2824 			index = be32_to_cpu(next->tx_comp.correlators[i]);
2825 			if (index & IBMVNIC_TSO_POOL_MASK) {
2826 				tx_pool = &adapter->tso_pool[pool];
2827 				index &= ~IBMVNIC_TSO_POOL_MASK;
2828 			} else {
2829 				tx_pool = &adapter->tx_pool[pool];
2830 			}
2831 
2832 			txbuff = &tx_pool->tx_buff[index];
2833 
2834 			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2835 				if (!txbuff->data_dma[j])
2836 					continue;
2837 
2838 				txbuff->data_dma[j] = 0;
2839 			}
2840 			/* if sub_crq was sent indirectly */
2841 			first = &txbuff->indir_arr[0].generic.first;
2842 			if (*first == IBMVNIC_CRQ_CMD) {
2843 				dma_unmap_single(dev, txbuff->indir_dma,
2844 						 sizeof(txbuff->indir_arr),
2845 						 DMA_TO_DEVICE);
2846 				*first = 0;
2847 			}
2848 
2849 			if (txbuff->last_frag) {
2850 				dev_kfree_skb_any(txbuff->skb);
2851 				txbuff->skb = NULL;
2852 			}
2853 
2854 			num_entries += txbuff->num_entries;
2855 
2856 			tx_pool->free_map[tx_pool->producer_index] = index;
2857 			tx_pool->producer_index =
2858 				(tx_pool->producer_index + 1) %
2859 					tx_pool->num_buffers;
2860 		}
2861 		/* remove tx_comp scrq*/
2862 		next->tx_comp.first = 0;
2863 
2864 		if (atomic_sub_return(num_entries, &scrq->used) <=
2865 		    (adapter->req_tx_entries_per_subcrq / 2) &&
2866 		    __netif_subqueue_stopped(adapter->netdev,
2867 					     scrq->pool_index)) {
2868 			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2869 			netdev_dbg(adapter->netdev, "Started queue %d\n",
2870 				   scrq->pool_index);
2871 		}
2872 	}
2873 
2874 	enable_scrq_irq(adapter, scrq);
2875 
2876 	if (pending_scrq(adapter, scrq)) {
2877 		disable_scrq_irq(adapter, scrq);
2878 		goto restart_loop;
2879 	}
2880 
2881 	return 0;
2882 }
2883 
2884 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2885 {
2886 	struct ibmvnic_sub_crq_queue *scrq = instance;
2887 	struct ibmvnic_adapter *adapter = scrq->adapter;
2888 
2889 	disable_scrq_irq(adapter, scrq);
2890 	ibmvnic_complete_tx(adapter, scrq);
2891 
2892 	return IRQ_HANDLED;
2893 }
2894 
2895 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2896 {
2897 	struct ibmvnic_sub_crq_queue *scrq = instance;
2898 	struct ibmvnic_adapter *adapter = scrq->adapter;
2899 
2900 	/* When booting a kdump kernel we can hit pending interrupts
2901 	 * prior to completing driver initialization.
2902 	 */
2903 	if (unlikely(adapter->state != VNIC_OPEN))
2904 		return IRQ_NONE;
2905 
2906 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2907 
2908 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2909 		disable_scrq_irq(adapter, scrq);
2910 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
2911 	}
2912 
2913 	return IRQ_HANDLED;
2914 }
2915 
2916 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2917 {
2918 	struct device *dev = &adapter->vdev->dev;
2919 	struct ibmvnic_sub_crq_queue *scrq;
2920 	int i = 0, j = 0;
2921 	int rc = 0;
2922 
2923 	for (i = 0; i < adapter->req_tx_queues; i++) {
2924 		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2925 			   i);
2926 		scrq = adapter->tx_scrq[i];
2927 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2928 
2929 		if (!scrq->irq) {
2930 			rc = -EINVAL;
2931 			dev_err(dev, "Error mapping irq\n");
2932 			goto req_tx_irq_failed;
2933 		}
2934 
2935 		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2936 				 0, "ibmvnic_tx", scrq);
2937 
2938 		if (rc) {
2939 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2940 				scrq->irq, rc);
2941 			irq_dispose_mapping(scrq->irq);
2942 			goto req_tx_irq_failed;
2943 		}
2944 	}
2945 
2946 	for (i = 0; i < adapter->req_rx_queues; i++) {
2947 		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2948 			   i);
2949 		scrq = adapter->rx_scrq[i];
2950 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2951 		if (!scrq->irq) {
2952 			rc = -EINVAL;
2953 			dev_err(dev, "Error mapping irq\n");
2954 			goto req_rx_irq_failed;
2955 		}
2956 		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2957 				 0, "ibmvnic_rx", scrq);
2958 		if (rc) {
2959 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2960 				scrq->irq, rc);
2961 			irq_dispose_mapping(scrq->irq);
2962 			goto req_rx_irq_failed;
2963 		}
2964 	}
2965 	return rc;
2966 
2967 req_rx_irq_failed:
2968 	for (j = 0; j < i; j++) {
2969 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2970 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2971 	}
2972 	i = adapter->req_tx_queues;
2973 req_tx_irq_failed:
2974 	for (j = 0; j < i; j++) {
2975 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2976 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2977 	}
2978 	release_sub_crqs(adapter, 1);
2979 	return rc;
2980 }
2981 
2982 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2983 {
2984 	struct device *dev = &adapter->vdev->dev;
2985 	struct ibmvnic_sub_crq_queue **allqueues;
2986 	int registered_queues = 0;
2987 	int total_queues;
2988 	int more = 0;
2989 	int i;
2990 
2991 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2992 
2993 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2994 	if (!allqueues)
2995 		return -1;
2996 
2997 	for (i = 0; i < total_queues; i++) {
2998 		allqueues[i] = init_sub_crq_queue(adapter);
2999 		if (!allqueues[i]) {
3000 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3001 			break;
3002 		}
3003 		registered_queues++;
3004 	}
3005 
3006 	/* Make sure we were able to register the minimum number of queues */
3007 	if (registered_queues <
3008 	    adapter->min_tx_queues + adapter->min_rx_queues) {
3009 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
3010 		goto tx_failed;
3011 	}
3012 
3013 	/* Distribute the failed allocated queues*/
3014 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
3015 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3016 		switch (i % 3) {
3017 		case 0:
3018 			if (adapter->req_rx_queues > adapter->min_rx_queues)
3019 				adapter->req_rx_queues--;
3020 			else
3021 				more++;
3022 			break;
3023 		case 1:
3024 			if (adapter->req_tx_queues > adapter->min_tx_queues)
3025 				adapter->req_tx_queues--;
3026 			else
3027 				more++;
3028 			break;
3029 		}
3030 	}
3031 
3032 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3033 				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
3034 	if (!adapter->tx_scrq)
3035 		goto tx_failed;
3036 
3037 	for (i = 0; i < adapter->req_tx_queues; i++) {
3038 		adapter->tx_scrq[i] = allqueues[i];
3039 		adapter->tx_scrq[i]->pool_index = i;
3040 		adapter->num_active_tx_scrqs++;
3041 	}
3042 
3043 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3044 				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
3045 	if (!adapter->rx_scrq)
3046 		goto rx_failed;
3047 
3048 	for (i = 0; i < adapter->req_rx_queues; i++) {
3049 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3050 		adapter->rx_scrq[i]->scrq_num = i;
3051 		adapter->num_active_rx_scrqs++;
3052 	}
3053 
3054 	kfree(allqueues);
3055 	return 0;
3056 
3057 rx_failed:
3058 	kfree(adapter->tx_scrq);
3059 	adapter->tx_scrq = NULL;
3060 tx_failed:
3061 	for (i = 0; i < registered_queues; i++)
3062 		release_sub_crq_queue(adapter, allqueues[i], 1);
3063 	kfree(allqueues);
3064 	return -1;
3065 }
3066 
3067 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3068 {
3069 	struct device *dev = &adapter->vdev->dev;
3070 	union ibmvnic_crq crq;
3071 	int max_entries;
3072 
3073 	if (!retry) {
3074 		/* Sub-CRQ entries are 32 byte long */
3075 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3076 
3077 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
3078 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
3079 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3080 			return;
3081 		}
3082 
3083 		if (adapter->desired.mtu)
3084 			adapter->req_mtu = adapter->desired.mtu;
3085 		else
3086 			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3087 
3088 		if (!adapter->desired.tx_entries)
3089 			adapter->desired.tx_entries =
3090 					adapter->max_tx_entries_per_subcrq;
3091 		if (!adapter->desired.rx_entries)
3092 			adapter->desired.rx_entries =
3093 					adapter->max_rx_add_entries_per_subcrq;
3094 
3095 		max_entries = IBMVNIC_MAX_LTB_SIZE /
3096 			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3097 
3098 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3099 			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3100 			adapter->desired.tx_entries = max_entries;
3101 		}
3102 
3103 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3104 			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3105 			adapter->desired.rx_entries = max_entries;
3106 		}
3107 
3108 		if (adapter->desired.tx_entries)
3109 			adapter->req_tx_entries_per_subcrq =
3110 					adapter->desired.tx_entries;
3111 		else
3112 			adapter->req_tx_entries_per_subcrq =
3113 					adapter->max_tx_entries_per_subcrq;
3114 
3115 		if (adapter->desired.rx_entries)
3116 			adapter->req_rx_add_entries_per_subcrq =
3117 					adapter->desired.rx_entries;
3118 		else
3119 			adapter->req_rx_add_entries_per_subcrq =
3120 					adapter->max_rx_add_entries_per_subcrq;
3121 
3122 		if (adapter->desired.tx_queues)
3123 			adapter->req_tx_queues =
3124 					adapter->desired.tx_queues;
3125 		else
3126 			adapter->req_tx_queues =
3127 					adapter->opt_tx_comp_sub_queues;
3128 
3129 		if (adapter->desired.rx_queues)
3130 			adapter->req_rx_queues =
3131 					adapter->desired.rx_queues;
3132 		else
3133 			adapter->req_rx_queues =
3134 					adapter->opt_rx_comp_queues;
3135 
3136 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3137 	}
3138 
3139 	memset(&crq, 0, sizeof(crq));
3140 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
3141 	crq.request_capability.cmd = REQUEST_CAPABILITY;
3142 
3143 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3144 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3145 	atomic_inc(&adapter->running_cap_crqs);
3146 	ibmvnic_send_crq(adapter, &crq);
3147 
3148 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3149 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3150 	atomic_inc(&adapter->running_cap_crqs);
3151 	ibmvnic_send_crq(adapter, &crq);
3152 
3153 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3154 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3155 	atomic_inc(&adapter->running_cap_crqs);
3156 	ibmvnic_send_crq(adapter, &crq);
3157 
3158 	crq.request_capability.capability =
3159 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3160 	crq.request_capability.number =
3161 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3162 	atomic_inc(&adapter->running_cap_crqs);
3163 	ibmvnic_send_crq(adapter, &crq);
3164 
3165 	crq.request_capability.capability =
3166 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3167 	crq.request_capability.number =
3168 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3169 	atomic_inc(&adapter->running_cap_crqs);
3170 	ibmvnic_send_crq(adapter, &crq);
3171 
3172 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3173 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3174 	atomic_inc(&adapter->running_cap_crqs);
3175 	ibmvnic_send_crq(adapter, &crq);
3176 
3177 	if (adapter->netdev->flags & IFF_PROMISC) {
3178 		if (adapter->promisc_supported) {
3179 			crq.request_capability.capability =
3180 			    cpu_to_be16(PROMISC_REQUESTED);
3181 			crq.request_capability.number = cpu_to_be64(1);
3182 			atomic_inc(&adapter->running_cap_crqs);
3183 			ibmvnic_send_crq(adapter, &crq);
3184 		}
3185 	} else {
3186 		crq.request_capability.capability =
3187 		    cpu_to_be16(PROMISC_REQUESTED);
3188 		crq.request_capability.number = cpu_to_be64(0);
3189 		atomic_inc(&adapter->running_cap_crqs);
3190 		ibmvnic_send_crq(adapter, &crq);
3191 	}
3192 }
3193 
3194 static int pending_scrq(struct ibmvnic_adapter *adapter,
3195 			struct ibmvnic_sub_crq_queue *scrq)
3196 {
3197 	union sub_crq *entry = &scrq->msgs[scrq->cur];
3198 
3199 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3200 		return 1;
3201 	else
3202 		return 0;
3203 }
3204 
3205 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3206 					struct ibmvnic_sub_crq_queue *scrq)
3207 {
3208 	union sub_crq *entry;
3209 	unsigned long flags;
3210 
3211 	spin_lock_irqsave(&scrq->lock, flags);
3212 	entry = &scrq->msgs[scrq->cur];
3213 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3214 		if (++scrq->cur == scrq->size)
3215 			scrq->cur = 0;
3216 	} else {
3217 		entry = NULL;
3218 	}
3219 	spin_unlock_irqrestore(&scrq->lock, flags);
3220 
3221 	return entry;
3222 }
3223 
3224 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3225 {
3226 	struct ibmvnic_crq_queue *queue = &adapter->crq;
3227 	union ibmvnic_crq *crq;
3228 
3229 	crq = &queue->msgs[queue->cur];
3230 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3231 		if (++queue->cur == queue->size)
3232 			queue->cur = 0;
3233 	} else {
3234 		crq = NULL;
3235 	}
3236 
3237 	return crq;
3238 }
3239 
3240 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3241 {
3242 	switch (rc) {
3243 	case H_PARAMETER:
3244 		dev_warn_ratelimited(dev,
3245 				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3246 				     func, rc);
3247 		break;
3248 	case H_CLOSED:
3249 		dev_warn_ratelimited(dev,
3250 				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3251 				     func, rc);
3252 		break;
3253 	default:
3254 		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3255 		break;
3256 	}
3257 }
3258 
3259 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3260 		       union sub_crq *sub_crq)
3261 {
3262 	unsigned int ua = adapter->vdev->unit_address;
3263 	struct device *dev = &adapter->vdev->dev;
3264 	u64 *u64_crq = (u64 *)sub_crq;
3265 	int rc;
3266 
3267 	netdev_dbg(adapter->netdev,
3268 		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3269 		   (unsigned long int)cpu_to_be64(remote_handle),
3270 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3271 		   (unsigned long int)cpu_to_be64(u64_crq[1]),
3272 		   (unsigned long int)cpu_to_be64(u64_crq[2]),
3273 		   (unsigned long int)cpu_to_be64(u64_crq[3]));
3274 
3275 	/* Make sure the hypervisor sees the complete request */
3276 	mb();
3277 
3278 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3279 				cpu_to_be64(remote_handle),
3280 				cpu_to_be64(u64_crq[0]),
3281 				cpu_to_be64(u64_crq[1]),
3282 				cpu_to_be64(u64_crq[2]),
3283 				cpu_to_be64(u64_crq[3]));
3284 
3285 	if (rc)
3286 		print_subcrq_error(dev, rc, __func__);
3287 
3288 	return rc;
3289 }
3290 
3291 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3292 				u64 remote_handle, u64 ioba, u64 num_entries)
3293 {
3294 	unsigned int ua = adapter->vdev->unit_address;
3295 	struct device *dev = &adapter->vdev->dev;
3296 	int rc;
3297 
3298 	/* Make sure the hypervisor sees the complete request */
3299 	mb();
3300 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3301 				cpu_to_be64(remote_handle),
3302 				ioba, num_entries);
3303 
3304 	if (rc)
3305 		print_subcrq_error(dev, rc, __func__);
3306 
3307 	return rc;
3308 }
3309 
3310 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3311 			    union ibmvnic_crq *crq)
3312 {
3313 	unsigned int ua = adapter->vdev->unit_address;
3314 	struct device *dev = &adapter->vdev->dev;
3315 	u64 *u64_crq = (u64 *)crq;
3316 	int rc;
3317 
3318 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3319 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3320 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
3321 
3322 	if (!adapter->crq.active &&
3323 	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3324 		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3325 		return -EINVAL;
3326 	}
3327 
3328 	/* Make sure the hypervisor sees the complete request */
3329 	mb();
3330 
3331 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3332 				cpu_to_be64(u64_crq[0]),
3333 				cpu_to_be64(u64_crq[1]));
3334 
3335 	if (rc) {
3336 		if (rc == H_CLOSED) {
3337 			dev_warn(dev, "CRQ Queue closed\n");
3338 			if (adapter->resetting)
3339 				ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3340 		}
3341 
3342 		dev_warn(dev, "Send error (rc=%d)\n", rc);
3343 	}
3344 
3345 	return rc;
3346 }
3347 
3348 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3349 {
3350 	union ibmvnic_crq crq;
3351 
3352 	memset(&crq, 0, sizeof(crq));
3353 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3354 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3355 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3356 
3357 	return ibmvnic_send_crq(adapter, &crq);
3358 }
3359 
3360 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3361 {
3362 	union ibmvnic_crq crq;
3363 
3364 	memset(&crq, 0, sizeof(crq));
3365 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3366 	crq.version_exchange.cmd = VERSION_EXCHANGE;
3367 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3368 
3369 	return ibmvnic_send_crq(adapter, &crq);
3370 }
3371 
3372 struct vnic_login_client_data {
3373 	u8	type;
3374 	__be16	len;
3375 	char	name[];
3376 } __packed;
3377 
3378 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3379 {
3380 	int len;
3381 
3382 	/* Calculate the amount of buffer space needed for the
3383 	 * vnic client data in the login buffer. There are four entries,
3384 	 * OS name, LPAR name, device name, and a null last entry.
3385 	 */
3386 	len = 4 * sizeof(struct vnic_login_client_data);
3387 	len += 6; /* "Linux" plus NULL */
3388 	len += strlen(utsname()->nodename) + 1;
3389 	len += strlen(adapter->netdev->name) + 1;
3390 
3391 	return len;
3392 }
3393 
3394 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3395 				 struct vnic_login_client_data *vlcd)
3396 {
3397 	const char *os_name = "Linux";
3398 	int len;
3399 
3400 	/* Type 1 - LPAR OS */
3401 	vlcd->type = 1;
3402 	len = strlen(os_name) + 1;
3403 	vlcd->len = cpu_to_be16(len);
3404 	strncpy(vlcd->name, os_name, len);
3405 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3406 
3407 	/* Type 2 - LPAR name */
3408 	vlcd->type = 2;
3409 	len = strlen(utsname()->nodename) + 1;
3410 	vlcd->len = cpu_to_be16(len);
3411 	strncpy(vlcd->name, utsname()->nodename, len);
3412 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3413 
3414 	/* Type 3 - device name */
3415 	vlcd->type = 3;
3416 	len = strlen(adapter->netdev->name) + 1;
3417 	vlcd->len = cpu_to_be16(len);
3418 	strncpy(vlcd->name, adapter->netdev->name, len);
3419 }
3420 
3421 static int send_login(struct ibmvnic_adapter *adapter)
3422 {
3423 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3424 	struct ibmvnic_login_buffer *login_buffer;
3425 	struct device *dev = &adapter->vdev->dev;
3426 	dma_addr_t rsp_buffer_token;
3427 	dma_addr_t buffer_token;
3428 	size_t rsp_buffer_size;
3429 	union ibmvnic_crq crq;
3430 	size_t buffer_size;
3431 	__be64 *tx_list_p;
3432 	__be64 *rx_list_p;
3433 	int client_data_len;
3434 	struct vnic_login_client_data *vlcd;
3435 	int i;
3436 
3437 	if (!adapter->tx_scrq || !adapter->rx_scrq) {
3438 		netdev_err(adapter->netdev,
3439 			   "RX or TX queues are not allocated, device login failed\n");
3440 		return -1;
3441 	}
3442 
3443 	release_login_rsp_buffer(adapter);
3444 	client_data_len = vnic_client_data_len(adapter);
3445 
3446 	buffer_size =
3447 	    sizeof(struct ibmvnic_login_buffer) +
3448 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3449 	    client_data_len;
3450 
3451 	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3452 	if (!login_buffer)
3453 		goto buf_alloc_failed;
3454 
3455 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3456 				      DMA_TO_DEVICE);
3457 	if (dma_mapping_error(dev, buffer_token)) {
3458 		dev_err(dev, "Couldn't map login buffer\n");
3459 		goto buf_map_failed;
3460 	}
3461 
3462 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3463 			  sizeof(u64) * adapter->req_tx_queues +
3464 			  sizeof(u64) * adapter->req_rx_queues +
3465 			  sizeof(u64) * adapter->req_rx_queues +
3466 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3467 
3468 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3469 	if (!login_rsp_buffer)
3470 		goto buf_rsp_alloc_failed;
3471 
3472 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3473 					  rsp_buffer_size, DMA_FROM_DEVICE);
3474 	if (dma_mapping_error(dev, rsp_buffer_token)) {
3475 		dev_err(dev, "Couldn't map login rsp buffer\n");
3476 		goto buf_rsp_map_failed;
3477 	}
3478 
3479 	adapter->login_buf = login_buffer;
3480 	adapter->login_buf_token = buffer_token;
3481 	adapter->login_buf_sz = buffer_size;
3482 	adapter->login_rsp_buf = login_rsp_buffer;
3483 	adapter->login_rsp_buf_token = rsp_buffer_token;
3484 	adapter->login_rsp_buf_sz = rsp_buffer_size;
3485 
3486 	login_buffer->len = cpu_to_be32(buffer_size);
3487 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3488 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3489 	login_buffer->off_txcomp_subcrqs =
3490 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3491 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3492 	login_buffer->off_rxcomp_subcrqs =
3493 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3494 			sizeof(u64) * adapter->req_tx_queues);
3495 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3496 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3497 
3498 	tx_list_p = (__be64 *)((char *)login_buffer +
3499 				      sizeof(struct ibmvnic_login_buffer));
3500 	rx_list_p = (__be64 *)((char *)login_buffer +
3501 				      sizeof(struct ibmvnic_login_buffer) +
3502 				      sizeof(u64) * adapter->req_tx_queues);
3503 
3504 	for (i = 0; i < adapter->req_tx_queues; i++) {
3505 		if (adapter->tx_scrq[i]) {
3506 			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3507 						   crq_num);
3508 		}
3509 	}
3510 
3511 	for (i = 0; i < adapter->req_rx_queues; i++) {
3512 		if (adapter->rx_scrq[i]) {
3513 			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3514 						   crq_num);
3515 		}
3516 	}
3517 
3518 	/* Insert vNIC login client data */
3519 	vlcd = (struct vnic_login_client_data *)
3520 		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3521 	login_buffer->client_data_offset =
3522 			cpu_to_be32((char *)vlcd - (char *)login_buffer);
3523 	login_buffer->client_data_len = cpu_to_be32(client_data_len);
3524 
3525 	vnic_add_client_data(adapter, vlcd);
3526 
3527 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
3528 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3529 		netdev_dbg(adapter->netdev, "%016lx\n",
3530 			   ((unsigned long int *)(adapter->login_buf))[i]);
3531 	}
3532 
3533 	memset(&crq, 0, sizeof(crq));
3534 	crq.login.first = IBMVNIC_CRQ_CMD;
3535 	crq.login.cmd = LOGIN;
3536 	crq.login.ioba = cpu_to_be32(buffer_token);
3537 	crq.login.len = cpu_to_be32(buffer_size);
3538 	ibmvnic_send_crq(adapter, &crq);
3539 
3540 	return 0;
3541 
3542 buf_rsp_map_failed:
3543 	kfree(login_rsp_buffer);
3544 buf_rsp_alloc_failed:
3545 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3546 buf_map_failed:
3547 	kfree(login_buffer);
3548 buf_alloc_failed:
3549 	return -1;
3550 }
3551 
3552 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3553 			    u32 len, u8 map_id)
3554 {
3555 	union ibmvnic_crq crq;
3556 
3557 	memset(&crq, 0, sizeof(crq));
3558 	crq.request_map.first = IBMVNIC_CRQ_CMD;
3559 	crq.request_map.cmd = REQUEST_MAP;
3560 	crq.request_map.map_id = map_id;
3561 	crq.request_map.ioba = cpu_to_be32(addr);
3562 	crq.request_map.len = cpu_to_be32(len);
3563 	return ibmvnic_send_crq(adapter, &crq);
3564 }
3565 
3566 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3567 {
3568 	union ibmvnic_crq crq;
3569 
3570 	memset(&crq, 0, sizeof(crq));
3571 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3572 	crq.request_unmap.cmd = REQUEST_UNMAP;
3573 	crq.request_unmap.map_id = map_id;
3574 	return ibmvnic_send_crq(adapter, &crq);
3575 }
3576 
3577 static void send_map_query(struct ibmvnic_adapter *adapter)
3578 {
3579 	union ibmvnic_crq crq;
3580 
3581 	memset(&crq, 0, sizeof(crq));
3582 	crq.query_map.first = IBMVNIC_CRQ_CMD;
3583 	crq.query_map.cmd = QUERY_MAP;
3584 	ibmvnic_send_crq(adapter, &crq);
3585 }
3586 
3587 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3588 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3589 {
3590 	union ibmvnic_crq crq;
3591 
3592 	atomic_set(&adapter->running_cap_crqs, 0);
3593 	memset(&crq, 0, sizeof(crq));
3594 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
3595 	crq.query_capability.cmd = QUERY_CAPABILITY;
3596 
3597 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3598 	atomic_inc(&adapter->running_cap_crqs);
3599 	ibmvnic_send_crq(adapter, &crq);
3600 
3601 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3602 	atomic_inc(&adapter->running_cap_crqs);
3603 	ibmvnic_send_crq(adapter, &crq);
3604 
3605 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3606 	atomic_inc(&adapter->running_cap_crqs);
3607 	ibmvnic_send_crq(adapter, &crq);
3608 
3609 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3610 	atomic_inc(&adapter->running_cap_crqs);
3611 	ibmvnic_send_crq(adapter, &crq);
3612 
3613 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3614 	atomic_inc(&adapter->running_cap_crqs);
3615 	ibmvnic_send_crq(adapter, &crq);
3616 
3617 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3618 	atomic_inc(&adapter->running_cap_crqs);
3619 	ibmvnic_send_crq(adapter, &crq);
3620 
3621 	crq.query_capability.capability =
3622 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3623 	atomic_inc(&adapter->running_cap_crqs);
3624 	ibmvnic_send_crq(adapter, &crq);
3625 
3626 	crq.query_capability.capability =
3627 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3628 	atomic_inc(&adapter->running_cap_crqs);
3629 	ibmvnic_send_crq(adapter, &crq);
3630 
3631 	crq.query_capability.capability =
3632 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3633 	atomic_inc(&adapter->running_cap_crqs);
3634 	ibmvnic_send_crq(adapter, &crq);
3635 
3636 	crq.query_capability.capability =
3637 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3638 	atomic_inc(&adapter->running_cap_crqs);
3639 	ibmvnic_send_crq(adapter, &crq);
3640 
3641 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3642 	atomic_inc(&adapter->running_cap_crqs);
3643 	ibmvnic_send_crq(adapter, &crq);
3644 
3645 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3646 	atomic_inc(&adapter->running_cap_crqs);
3647 	ibmvnic_send_crq(adapter, &crq);
3648 
3649 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3650 	atomic_inc(&adapter->running_cap_crqs);
3651 	ibmvnic_send_crq(adapter, &crq);
3652 
3653 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3654 	atomic_inc(&adapter->running_cap_crqs);
3655 	ibmvnic_send_crq(adapter, &crq);
3656 
3657 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3658 	atomic_inc(&adapter->running_cap_crqs);
3659 	ibmvnic_send_crq(adapter, &crq);
3660 
3661 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3662 	atomic_inc(&adapter->running_cap_crqs);
3663 	ibmvnic_send_crq(adapter, &crq);
3664 
3665 	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3666 	atomic_inc(&adapter->running_cap_crqs);
3667 	ibmvnic_send_crq(adapter, &crq);
3668 
3669 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3670 	atomic_inc(&adapter->running_cap_crqs);
3671 	ibmvnic_send_crq(adapter, &crq);
3672 
3673 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3674 	atomic_inc(&adapter->running_cap_crqs);
3675 	ibmvnic_send_crq(adapter, &crq);
3676 
3677 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3678 	atomic_inc(&adapter->running_cap_crqs);
3679 	ibmvnic_send_crq(adapter, &crq);
3680 
3681 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3682 	atomic_inc(&adapter->running_cap_crqs);
3683 	ibmvnic_send_crq(adapter, &crq);
3684 
3685 	crq.query_capability.capability =
3686 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3687 	atomic_inc(&adapter->running_cap_crqs);
3688 	ibmvnic_send_crq(adapter, &crq);
3689 
3690 	crq.query_capability.capability =
3691 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3692 	atomic_inc(&adapter->running_cap_crqs);
3693 	ibmvnic_send_crq(adapter, &crq);
3694 
3695 	crq.query_capability.capability =
3696 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3697 	atomic_inc(&adapter->running_cap_crqs);
3698 	ibmvnic_send_crq(adapter, &crq);
3699 
3700 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3701 	atomic_inc(&adapter->running_cap_crqs);
3702 	ibmvnic_send_crq(adapter, &crq);
3703 }
3704 
3705 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3706 				struct ibmvnic_adapter *adapter)
3707 {
3708 	struct device *dev = &adapter->vdev->dev;
3709 
3710 	if (crq->get_vpd_size_rsp.rc.code) {
3711 		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3712 			crq->get_vpd_size_rsp.rc.code);
3713 		complete(&adapter->fw_done);
3714 		return;
3715 	}
3716 
3717 	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3718 	complete(&adapter->fw_done);
3719 }
3720 
3721 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3722 			   struct ibmvnic_adapter *adapter)
3723 {
3724 	struct device *dev = &adapter->vdev->dev;
3725 	unsigned char *substr = NULL;
3726 	u8 fw_level_len = 0;
3727 
3728 	memset(adapter->fw_version, 0, 32);
3729 
3730 	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3731 			 DMA_FROM_DEVICE);
3732 
3733 	if (crq->get_vpd_rsp.rc.code) {
3734 		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3735 			crq->get_vpd_rsp.rc.code);
3736 		goto complete;
3737 	}
3738 
3739 	/* get the position of the firmware version info
3740 	 * located after the ASCII 'RM' substring in the buffer
3741 	 */
3742 	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3743 	if (!substr) {
3744 		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3745 		goto complete;
3746 	}
3747 
3748 	/* get length of firmware level ASCII substring */
3749 	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3750 		fw_level_len = *(substr + 2);
3751 	} else {
3752 		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3753 		goto complete;
3754 	}
3755 
3756 	/* copy firmware version string from vpd into adapter */
3757 	if ((substr + 3 + fw_level_len) <
3758 	    (adapter->vpd->buff + adapter->vpd->len)) {
3759 		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3760 	} else {
3761 		dev_info(dev, "FW substr extrapolated VPD buff\n");
3762 	}
3763 
3764 complete:
3765 	if (adapter->fw_version[0] == '\0')
3766 		strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3767 	complete(&adapter->fw_done);
3768 }
3769 
3770 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3771 {
3772 	struct device *dev = &adapter->vdev->dev;
3773 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3774 	union ibmvnic_crq crq;
3775 	int i;
3776 
3777 	dma_unmap_single(dev, adapter->ip_offload_tok,
3778 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3779 
3780 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3781 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3782 		netdev_dbg(adapter->netdev, "%016lx\n",
3783 			   ((unsigned long int *)(buf))[i]);
3784 
3785 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3786 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3787 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3788 		   buf->tcp_ipv4_chksum);
3789 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3790 		   buf->tcp_ipv6_chksum);
3791 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3792 		   buf->udp_ipv4_chksum);
3793 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3794 		   buf->udp_ipv6_chksum);
3795 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3796 		   buf->large_tx_ipv4);
3797 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3798 		   buf->large_tx_ipv6);
3799 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3800 		   buf->large_rx_ipv4);
3801 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3802 		   buf->large_rx_ipv6);
3803 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3804 		   buf->max_ipv4_header_size);
3805 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3806 		   buf->max_ipv6_header_size);
3807 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3808 		   buf->max_tcp_header_size);
3809 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3810 		   buf->max_udp_header_size);
3811 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3812 		   buf->max_large_tx_size);
3813 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3814 		   buf->max_large_rx_size);
3815 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3816 		   buf->ipv6_extension_header);
3817 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3818 		   buf->tcp_pseudosum_req);
3819 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3820 		   buf->num_ipv6_ext_headers);
3821 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3822 		   buf->off_ipv6_ext_headers);
3823 
3824 	adapter->ip_offload_ctrl_tok =
3825 	    dma_map_single(dev, &adapter->ip_offload_ctrl,
3826 			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3827 
3828 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3829 		dev_err(dev, "Couldn't map ip offload control buffer\n");
3830 		return;
3831 	}
3832 
3833 	adapter->ip_offload_ctrl.len =
3834 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3835 	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3836 	adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3837 	adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3838 	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3839 	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3840 	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3841 	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3842 	adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3843 	adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3844 
3845 	/* large_rx disabled for now, additional features needed */
3846 	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3847 	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3848 
3849 	adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3850 
3851 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3852 		adapter->netdev->features |= NETIF_F_IP_CSUM;
3853 
3854 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3855 		adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3856 
3857 	if ((adapter->netdev->features &
3858 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3859 		adapter->netdev->features |= NETIF_F_RXCSUM;
3860 
3861 	if (buf->large_tx_ipv4)
3862 		adapter->netdev->features |= NETIF_F_TSO;
3863 	if (buf->large_tx_ipv6)
3864 		adapter->netdev->features |= NETIF_F_TSO6;
3865 
3866 	adapter->netdev->hw_features |= adapter->netdev->features;
3867 
3868 	memset(&crq, 0, sizeof(crq));
3869 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3870 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3871 	crq.control_ip_offload.len =
3872 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3873 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3874 	ibmvnic_send_crq(adapter, &crq);
3875 }
3876 
3877 static const char *ibmvnic_fw_err_cause(u16 cause)
3878 {
3879 	switch (cause) {
3880 	case ADAPTER_PROBLEM:
3881 		return "adapter problem";
3882 	case BUS_PROBLEM:
3883 		return "bus problem";
3884 	case FW_PROBLEM:
3885 		return "firmware problem";
3886 	case DD_PROBLEM:
3887 		return "device driver problem";
3888 	case EEH_RECOVERY:
3889 		return "EEH recovery";
3890 	case FW_UPDATED:
3891 		return "firmware updated";
3892 	case LOW_MEMORY:
3893 		return "low Memory";
3894 	default:
3895 		return "unknown";
3896 	}
3897 }
3898 
3899 static void handle_error_indication(union ibmvnic_crq *crq,
3900 				    struct ibmvnic_adapter *adapter)
3901 {
3902 	struct device *dev = &adapter->vdev->dev;
3903 	u16 cause;
3904 
3905 	cause = be16_to_cpu(crq->error_indication.error_cause);
3906 
3907 	dev_warn_ratelimited(dev,
3908 			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
3909 			     crq->error_indication.flags
3910 				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3911 			     ibmvnic_fw_err_cause(cause));
3912 
3913 	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3914 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3915 	else
3916 		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3917 }
3918 
3919 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3920 				 struct ibmvnic_adapter *adapter)
3921 {
3922 	struct net_device *netdev = adapter->netdev;
3923 	struct device *dev = &adapter->vdev->dev;
3924 	long rc;
3925 
3926 	rc = crq->change_mac_addr_rsp.rc.code;
3927 	if (rc) {
3928 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3929 		goto out;
3930 	}
3931 	memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3932 	       ETH_ALEN);
3933 out:
3934 	complete(&adapter->fw_done);
3935 	return rc;
3936 }
3937 
3938 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3939 				   struct ibmvnic_adapter *adapter)
3940 {
3941 	struct device *dev = &adapter->vdev->dev;
3942 	u64 *req_value;
3943 	char *name;
3944 
3945 	atomic_dec(&adapter->running_cap_crqs);
3946 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3947 	case REQ_TX_QUEUES:
3948 		req_value = &adapter->req_tx_queues;
3949 		name = "tx";
3950 		break;
3951 	case REQ_RX_QUEUES:
3952 		req_value = &adapter->req_rx_queues;
3953 		name = "rx";
3954 		break;
3955 	case REQ_RX_ADD_QUEUES:
3956 		req_value = &adapter->req_rx_add_queues;
3957 		name = "rx_add";
3958 		break;
3959 	case REQ_TX_ENTRIES_PER_SUBCRQ:
3960 		req_value = &adapter->req_tx_entries_per_subcrq;
3961 		name = "tx_entries_per_subcrq";
3962 		break;
3963 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3964 		req_value = &adapter->req_rx_add_entries_per_subcrq;
3965 		name = "rx_add_entries_per_subcrq";
3966 		break;
3967 	case REQ_MTU:
3968 		req_value = &adapter->req_mtu;
3969 		name = "mtu";
3970 		break;
3971 	case PROMISC_REQUESTED:
3972 		req_value = &adapter->promisc;
3973 		name = "promisc";
3974 		break;
3975 	default:
3976 		dev_err(dev, "Got invalid cap request rsp %d\n",
3977 			crq->request_capability.capability);
3978 		return;
3979 	}
3980 
3981 	switch (crq->request_capability_rsp.rc.code) {
3982 	case SUCCESS:
3983 		break;
3984 	case PARTIALSUCCESS:
3985 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3986 			 *req_value,
3987 			 (long int)be64_to_cpu(crq->request_capability_rsp.
3988 					       number), name);
3989 
3990 		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3991 		    REQ_MTU) {
3992 			pr_err("mtu of %llu is not supported. Reverting.\n",
3993 			       *req_value);
3994 			*req_value = adapter->fallback.mtu;
3995 		} else {
3996 			*req_value =
3997 				be64_to_cpu(crq->request_capability_rsp.number);
3998 		}
3999 
4000 		ibmvnic_send_req_caps(adapter, 1);
4001 		return;
4002 	default:
4003 		dev_err(dev, "Error %d in request cap rsp\n",
4004 			crq->request_capability_rsp.rc.code);
4005 		return;
4006 	}
4007 
4008 	/* Done receiving requested capabilities, query IP offload support */
4009 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4010 		union ibmvnic_crq newcrq;
4011 		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4012 		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4013 		    &adapter->ip_offload_buf;
4014 
4015 		adapter->wait_capability = false;
4016 		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4017 							 buf_sz,
4018 							 DMA_FROM_DEVICE);
4019 
4020 		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4021 			if (!firmware_has_feature(FW_FEATURE_CMO))
4022 				dev_err(dev, "Couldn't map offload buffer\n");
4023 			return;
4024 		}
4025 
4026 		memset(&newcrq, 0, sizeof(newcrq));
4027 		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4028 		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4029 		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4030 		newcrq.query_ip_offload.ioba =
4031 		    cpu_to_be32(adapter->ip_offload_tok);
4032 
4033 		ibmvnic_send_crq(adapter, &newcrq);
4034 	}
4035 }
4036 
4037 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4038 			    struct ibmvnic_adapter *adapter)
4039 {
4040 	struct device *dev = &adapter->vdev->dev;
4041 	struct net_device *netdev = adapter->netdev;
4042 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4043 	struct ibmvnic_login_buffer *login = adapter->login_buf;
4044 	int i;
4045 
4046 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4047 			 DMA_TO_DEVICE);
4048 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
4049 			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4050 
4051 	/* If the number of queues requested can't be allocated by the
4052 	 * server, the login response will return with code 1. We will need
4053 	 * to resend the login buffer with fewer queues requested.
4054 	 */
4055 	if (login_rsp_crq->generic.rc.code) {
4056 		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4057 		complete(&adapter->init_done);
4058 		return 0;
4059 	}
4060 
4061 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4062 
4063 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4064 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4065 		netdev_dbg(adapter->netdev, "%016lx\n",
4066 			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4067 	}
4068 
4069 	/* Sanity checks */
4070 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4071 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
4072 	     adapter->req_rx_add_queues !=
4073 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4074 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4075 		ibmvnic_remove(adapter->vdev);
4076 		return -EIO;
4077 	}
4078 	release_login_buffer(adapter);
4079 	complete(&adapter->init_done);
4080 
4081 	return 0;
4082 }
4083 
4084 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4085 				     struct ibmvnic_adapter *adapter)
4086 {
4087 	struct device *dev = &adapter->vdev->dev;
4088 	long rc;
4089 
4090 	rc = crq->request_unmap_rsp.rc.code;
4091 	if (rc)
4092 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4093 }
4094 
4095 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4096 				 struct ibmvnic_adapter *adapter)
4097 {
4098 	struct net_device *netdev = adapter->netdev;
4099 	struct device *dev = &adapter->vdev->dev;
4100 	long rc;
4101 
4102 	rc = crq->query_map_rsp.rc.code;
4103 	if (rc) {
4104 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4105 		return;
4106 	}
4107 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4108 		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4109 		   crq->query_map_rsp.free_pages);
4110 }
4111 
4112 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4113 				 struct ibmvnic_adapter *adapter)
4114 {
4115 	struct net_device *netdev = adapter->netdev;
4116 	struct device *dev = &adapter->vdev->dev;
4117 	long rc;
4118 
4119 	atomic_dec(&adapter->running_cap_crqs);
4120 	netdev_dbg(netdev, "Outstanding queries: %d\n",
4121 		   atomic_read(&adapter->running_cap_crqs));
4122 	rc = crq->query_capability.rc.code;
4123 	if (rc) {
4124 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4125 		goto out;
4126 	}
4127 
4128 	switch (be16_to_cpu(crq->query_capability.capability)) {
4129 	case MIN_TX_QUEUES:
4130 		adapter->min_tx_queues =
4131 		    be64_to_cpu(crq->query_capability.number);
4132 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
4133 			   adapter->min_tx_queues);
4134 		break;
4135 	case MIN_RX_QUEUES:
4136 		adapter->min_rx_queues =
4137 		    be64_to_cpu(crq->query_capability.number);
4138 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
4139 			   adapter->min_rx_queues);
4140 		break;
4141 	case MIN_RX_ADD_QUEUES:
4142 		adapter->min_rx_add_queues =
4143 		    be64_to_cpu(crq->query_capability.number);
4144 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4145 			   adapter->min_rx_add_queues);
4146 		break;
4147 	case MAX_TX_QUEUES:
4148 		adapter->max_tx_queues =
4149 		    be64_to_cpu(crq->query_capability.number);
4150 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
4151 			   adapter->max_tx_queues);
4152 		break;
4153 	case MAX_RX_QUEUES:
4154 		adapter->max_rx_queues =
4155 		    be64_to_cpu(crq->query_capability.number);
4156 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
4157 			   adapter->max_rx_queues);
4158 		break;
4159 	case MAX_RX_ADD_QUEUES:
4160 		adapter->max_rx_add_queues =
4161 		    be64_to_cpu(crq->query_capability.number);
4162 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4163 			   adapter->max_rx_add_queues);
4164 		break;
4165 	case MIN_TX_ENTRIES_PER_SUBCRQ:
4166 		adapter->min_tx_entries_per_subcrq =
4167 		    be64_to_cpu(crq->query_capability.number);
4168 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4169 			   adapter->min_tx_entries_per_subcrq);
4170 		break;
4171 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4172 		adapter->min_rx_add_entries_per_subcrq =
4173 		    be64_to_cpu(crq->query_capability.number);
4174 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4175 			   adapter->min_rx_add_entries_per_subcrq);
4176 		break;
4177 	case MAX_TX_ENTRIES_PER_SUBCRQ:
4178 		adapter->max_tx_entries_per_subcrq =
4179 		    be64_to_cpu(crq->query_capability.number);
4180 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4181 			   adapter->max_tx_entries_per_subcrq);
4182 		break;
4183 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4184 		adapter->max_rx_add_entries_per_subcrq =
4185 		    be64_to_cpu(crq->query_capability.number);
4186 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4187 			   adapter->max_rx_add_entries_per_subcrq);
4188 		break;
4189 	case TCP_IP_OFFLOAD:
4190 		adapter->tcp_ip_offload =
4191 		    be64_to_cpu(crq->query_capability.number);
4192 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4193 			   adapter->tcp_ip_offload);
4194 		break;
4195 	case PROMISC_SUPPORTED:
4196 		adapter->promisc_supported =
4197 		    be64_to_cpu(crq->query_capability.number);
4198 		netdev_dbg(netdev, "promisc_supported = %lld\n",
4199 			   adapter->promisc_supported);
4200 		break;
4201 	case MIN_MTU:
4202 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4203 		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4204 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4205 		break;
4206 	case MAX_MTU:
4207 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4208 		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4209 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4210 		break;
4211 	case MAX_MULTICAST_FILTERS:
4212 		adapter->max_multicast_filters =
4213 		    be64_to_cpu(crq->query_capability.number);
4214 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4215 			   adapter->max_multicast_filters);
4216 		break;
4217 	case VLAN_HEADER_INSERTION:
4218 		adapter->vlan_header_insertion =
4219 		    be64_to_cpu(crq->query_capability.number);
4220 		if (adapter->vlan_header_insertion)
4221 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4222 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4223 			   adapter->vlan_header_insertion);
4224 		break;
4225 	case RX_VLAN_HEADER_INSERTION:
4226 		adapter->rx_vlan_header_insertion =
4227 		    be64_to_cpu(crq->query_capability.number);
4228 		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4229 			   adapter->rx_vlan_header_insertion);
4230 		break;
4231 	case MAX_TX_SG_ENTRIES:
4232 		adapter->max_tx_sg_entries =
4233 		    be64_to_cpu(crq->query_capability.number);
4234 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4235 			   adapter->max_tx_sg_entries);
4236 		break;
4237 	case RX_SG_SUPPORTED:
4238 		adapter->rx_sg_supported =
4239 		    be64_to_cpu(crq->query_capability.number);
4240 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4241 			   adapter->rx_sg_supported);
4242 		break;
4243 	case OPT_TX_COMP_SUB_QUEUES:
4244 		adapter->opt_tx_comp_sub_queues =
4245 		    be64_to_cpu(crq->query_capability.number);
4246 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4247 			   adapter->opt_tx_comp_sub_queues);
4248 		break;
4249 	case OPT_RX_COMP_QUEUES:
4250 		adapter->opt_rx_comp_queues =
4251 		    be64_to_cpu(crq->query_capability.number);
4252 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4253 			   adapter->opt_rx_comp_queues);
4254 		break;
4255 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4256 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
4257 		    be64_to_cpu(crq->query_capability.number);
4258 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4259 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4260 		break;
4261 	case OPT_TX_ENTRIES_PER_SUBCRQ:
4262 		adapter->opt_tx_entries_per_subcrq =
4263 		    be64_to_cpu(crq->query_capability.number);
4264 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4265 			   adapter->opt_tx_entries_per_subcrq);
4266 		break;
4267 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4268 		adapter->opt_rxba_entries_per_subcrq =
4269 		    be64_to_cpu(crq->query_capability.number);
4270 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4271 			   adapter->opt_rxba_entries_per_subcrq);
4272 		break;
4273 	case TX_RX_DESC_REQ:
4274 		adapter->tx_rx_desc_req = crq->query_capability.number;
4275 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4276 			   adapter->tx_rx_desc_req);
4277 		break;
4278 
4279 	default:
4280 		netdev_err(netdev, "Got invalid cap rsp %d\n",
4281 			   crq->query_capability.capability);
4282 	}
4283 
4284 out:
4285 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4286 		adapter->wait_capability = false;
4287 		ibmvnic_send_req_caps(adapter, 0);
4288 	}
4289 }
4290 
4291 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4292 			       struct ibmvnic_adapter *adapter)
4293 {
4294 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4295 	struct net_device *netdev = adapter->netdev;
4296 	struct device *dev = &adapter->vdev->dev;
4297 	u64 *u64_crq = (u64 *)crq;
4298 	long rc;
4299 
4300 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4301 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
4302 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
4303 	switch (gen_crq->first) {
4304 	case IBMVNIC_CRQ_INIT_RSP:
4305 		switch (gen_crq->cmd) {
4306 		case IBMVNIC_CRQ_INIT:
4307 			dev_info(dev, "Partner initialized\n");
4308 			adapter->from_passive_init = true;
4309 			adapter->failover_pending = false;
4310 			if (!completion_done(&adapter->init_done)) {
4311 				complete(&adapter->init_done);
4312 				adapter->init_done_rc = -EIO;
4313 			}
4314 			ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4315 			break;
4316 		case IBMVNIC_CRQ_INIT_COMPLETE:
4317 			dev_info(dev, "Partner initialization complete\n");
4318 			adapter->crq.active = true;
4319 			send_version_xchg(adapter);
4320 			break;
4321 		default:
4322 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4323 		}
4324 		return;
4325 	case IBMVNIC_CRQ_XPORT_EVENT:
4326 		netif_carrier_off(netdev);
4327 		adapter->crq.active = false;
4328 		if (adapter->resetting)
4329 			adapter->force_reset_recovery = true;
4330 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4331 			dev_info(dev, "Migrated, re-enabling adapter\n");
4332 			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4333 		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4334 			dev_info(dev, "Backing device failover detected\n");
4335 			adapter->failover_pending = true;
4336 		} else {
4337 			/* The adapter lost the connection */
4338 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4339 				gen_crq->cmd);
4340 			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4341 		}
4342 		return;
4343 	case IBMVNIC_CRQ_CMD_RSP:
4344 		break;
4345 	default:
4346 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
4347 			gen_crq->first);
4348 		return;
4349 	}
4350 
4351 	switch (gen_crq->cmd) {
4352 	case VERSION_EXCHANGE_RSP:
4353 		rc = crq->version_exchange_rsp.rc.code;
4354 		if (rc) {
4355 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4356 			break;
4357 		}
4358 		dev_info(dev, "Partner protocol version is %d\n",
4359 			 crq->version_exchange_rsp.version);
4360 		if (be16_to_cpu(crq->version_exchange_rsp.version) <
4361 		    ibmvnic_version)
4362 			ibmvnic_version =
4363 			    be16_to_cpu(crq->version_exchange_rsp.version);
4364 		send_cap_queries(adapter);
4365 		break;
4366 	case QUERY_CAPABILITY_RSP:
4367 		handle_query_cap_rsp(crq, adapter);
4368 		break;
4369 	case QUERY_MAP_RSP:
4370 		handle_query_map_rsp(crq, adapter);
4371 		break;
4372 	case REQUEST_MAP_RSP:
4373 		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4374 		complete(&adapter->fw_done);
4375 		break;
4376 	case REQUEST_UNMAP_RSP:
4377 		handle_request_unmap_rsp(crq, adapter);
4378 		break;
4379 	case REQUEST_CAPABILITY_RSP:
4380 		handle_request_cap_rsp(crq, adapter);
4381 		break;
4382 	case LOGIN_RSP:
4383 		netdev_dbg(netdev, "Got Login Response\n");
4384 		handle_login_rsp(crq, adapter);
4385 		break;
4386 	case LOGICAL_LINK_STATE_RSP:
4387 		netdev_dbg(netdev,
4388 			   "Got Logical Link State Response, state: %d rc: %d\n",
4389 			   crq->logical_link_state_rsp.link_state,
4390 			   crq->logical_link_state_rsp.rc.code);
4391 		adapter->logical_link_state =
4392 		    crq->logical_link_state_rsp.link_state;
4393 		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4394 		complete(&adapter->init_done);
4395 		break;
4396 	case LINK_STATE_INDICATION:
4397 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
4398 		adapter->phys_link_state =
4399 		    crq->link_state_indication.phys_link_state;
4400 		adapter->logical_link_state =
4401 		    crq->link_state_indication.logical_link_state;
4402 		break;
4403 	case CHANGE_MAC_ADDR_RSP:
4404 		netdev_dbg(netdev, "Got MAC address change Response\n");
4405 		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4406 		break;
4407 	case ERROR_INDICATION:
4408 		netdev_dbg(netdev, "Got Error Indication\n");
4409 		handle_error_indication(crq, adapter);
4410 		break;
4411 	case REQUEST_STATISTICS_RSP:
4412 		netdev_dbg(netdev, "Got Statistics Response\n");
4413 		complete(&adapter->stats_done);
4414 		break;
4415 	case QUERY_IP_OFFLOAD_RSP:
4416 		netdev_dbg(netdev, "Got Query IP offload Response\n");
4417 		handle_query_ip_offload_rsp(adapter);
4418 		break;
4419 	case MULTICAST_CTRL_RSP:
4420 		netdev_dbg(netdev, "Got multicast control Response\n");
4421 		break;
4422 	case CONTROL_IP_OFFLOAD_RSP:
4423 		netdev_dbg(netdev, "Got Control IP offload Response\n");
4424 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4425 				 sizeof(adapter->ip_offload_ctrl),
4426 				 DMA_TO_DEVICE);
4427 		complete(&adapter->init_done);
4428 		break;
4429 	case COLLECT_FW_TRACE_RSP:
4430 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4431 		complete(&adapter->fw_done);
4432 		break;
4433 	case GET_VPD_SIZE_RSP:
4434 		handle_vpd_size_rsp(crq, adapter);
4435 		break;
4436 	case GET_VPD_RSP:
4437 		handle_vpd_rsp(crq, adapter);
4438 		break;
4439 	default:
4440 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4441 			   gen_crq->cmd);
4442 	}
4443 }
4444 
4445 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4446 {
4447 	struct ibmvnic_adapter *adapter = instance;
4448 
4449 	tasklet_schedule(&adapter->tasklet);
4450 	return IRQ_HANDLED;
4451 }
4452 
4453 static void ibmvnic_tasklet(void *data)
4454 {
4455 	struct ibmvnic_adapter *adapter = data;
4456 	struct ibmvnic_crq_queue *queue = &adapter->crq;
4457 	union ibmvnic_crq *crq;
4458 	unsigned long flags;
4459 	bool done = false;
4460 
4461 	spin_lock_irqsave(&queue->lock, flags);
4462 	while (!done) {
4463 		/* Pull all the valid messages off the CRQ */
4464 		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4465 			ibmvnic_handle_crq(crq, adapter);
4466 			crq->generic.first = 0;
4467 		}
4468 
4469 		/* remain in tasklet until all
4470 		 * capabilities responses are received
4471 		 */
4472 		if (!adapter->wait_capability)
4473 			done = true;
4474 	}
4475 	/* if capabilities CRQ's were sent in this tasklet, the following
4476 	 * tasklet must wait until all responses are received
4477 	 */
4478 	if (atomic_read(&adapter->running_cap_crqs) != 0)
4479 		adapter->wait_capability = true;
4480 	spin_unlock_irqrestore(&queue->lock, flags);
4481 }
4482 
4483 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4484 {
4485 	struct vio_dev *vdev = adapter->vdev;
4486 	int rc;
4487 
4488 	do {
4489 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4490 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4491 
4492 	if (rc)
4493 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4494 
4495 	return rc;
4496 }
4497 
4498 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4499 {
4500 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4501 	struct device *dev = &adapter->vdev->dev;
4502 	struct vio_dev *vdev = adapter->vdev;
4503 	int rc;
4504 
4505 	/* Close the CRQ */
4506 	do {
4507 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4508 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4509 
4510 	/* Clean out the queue */
4511 	memset(crq->msgs, 0, PAGE_SIZE);
4512 	crq->cur = 0;
4513 	crq->active = false;
4514 
4515 	/* And re-open it again */
4516 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4517 				crq->msg_token, PAGE_SIZE);
4518 
4519 	if (rc == H_CLOSED)
4520 		/* Adapter is good, but other end is not ready */
4521 		dev_warn(dev, "Partner adapter not ready\n");
4522 	else if (rc != 0)
4523 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4524 
4525 	return rc;
4526 }
4527 
4528 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4529 {
4530 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4531 	struct vio_dev *vdev = adapter->vdev;
4532 	long rc;
4533 
4534 	if (!crq->msgs)
4535 		return;
4536 
4537 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4538 	free_irq(vdev->irq, adapter);
4539 	tasklet_kill(&adapter->tasklet);
4540 	do {
4541 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4542 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4543 
4544 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4545 			 DMA_BIDIRECTIONAL);
4546 	free_page((unsigned long)crq->msgs);
4547 	crq->msgs = NULL;
4548 	crq->active = false;
4549 }
4550 
4551 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4552 {
4553 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4554 	struct device *dev = &adapter->vdev->dev;
4555 	struct vio_dev *vdev = adapter->vdev;
4556 	int rc, retrc = -ENOMEM;
4557 
4558 	if (crq->msgs)
4559 		return 0;
4560 
4561 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4562 	/* Should we allocate more than one page? */
4563 
4564 	if (!crq->msgs)
4565 		return -ENOMEM;
4566 
4567 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4568 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4569 					DMA_BIDIRECTIONAL);
4570 	if (dma_mapping_error(dev, crq->msg_token))
4571 		goto map_failed;
4572 
4573 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4574 				crq->msg_token, PAGE_SIZE);
4575 
4576 	if (rc == H_RESOURCE)
4577 		/* maybe kexecing and resource is busy. try a reset */
4578 		rc = ibmvnic_reset_crq(adapter);
4579 	retrc = rc;
4580 
4581 	if (rc == H_CLOSED) {
4582 		dev_warn(dev, "Partner adapter not ready\n");
4583 	} else if (rc) {
4584 		dev_warn(dev, "Error %d opening adapter\n", rc);
4585 		goto reg_crq_failed;
4586 	}
4587 
4588 	retrc = 0;
4589 
4590 	tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4591 		     (unsigned long)adapter);
4592 
4593 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4594 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4595 			 adapter);
4596 	if (rc) {
4597 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4598 			vdev->irq, rc);
4599 		goto req_irq_failed;
4600 	}
4601 
4602 	rc = vio_enable_interrupts(vdev);
4603 	if (rc) {
4604 		dev_err(dev, "Error %d enabling interrupts\n", rc);
4605 		goto req_irq_failed;
4606 	}
4607 
4608 	crq->cur = 0;
4609 	spin_lock_init(&crq->lock);
4610 
4611 	return retrc;
4612 
4613 req_irq_failed:
4614 	tasklet_kill(&adapter->tasklet);
4615 	do {
4616 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4617 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4618 reg_crq_failed:
4619 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4620 map_failed:
4621 	free_page((unsigned long)crq->msgs);
4622 	crq->msgs = NULL;
4623 	return retrc;
4624 }
4625 
4626 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4627 {
4628 	struct device *dev = &adapter->vdev->dev;
4629 	unsigned long timeout = msecs_to_jiffies(30000);
4630 	u64 old_num_rx_queues, old_num_tx_queues;
4631 	int rc;
4632 
4633 	adapter->from_passive_init = false;
4634 
4635 	old_num_rx_queues = adapter->req_rx_queues;
4636 	old_num_tx_queues = adapter->req_tx_queues;
4637 
4638 	init_completion(&adapter->init_done);
4639 	adapter->init_done_rc = 0;
4640 	ibmvnic_send_crq_init(adapter);
4641 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4642 		dev_err(dev, "Initialization sequence timed out\n");
4643 		return -1;
4644 	}
4645 
4646 	if (adapter->init_done_rc) {
4647 		release_crq_queue(adapter);
4648 		return adapter->init_done_rc;
4649 	}
4650 
4651 	if (adapter->from_passive_init) {
4652 		adapter->state = VNIC_OPEN;
4653 		adapter->from_passive_init = false;
4654 		return -1;
4655 	}
4656 
4657 	if (adapter->resetting && !adapter->wait_for_reset &&
4658 	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
4659 		if (adapter->req_rx_queues != old_num_rx_queues ||
4660 		    adapter->req_tx_queues != old_num_tx_queues) {
4661 			release_sub_crqs(adapter, 0);
4662 			rc = init_sub_crqs(adapter);
4663 		} else {
4664 			rc = reset_sub_crq_queues(adapter);
4665 		}
4666 	} else {
4667 		rc = init_sub_crqs(adapter);
4668 	}
4669 
4670 	if (rc) {
4671 		dev_err(dev, "Initialization of sub crqs failed\n");
4672 		release_crq_queue(adapter);
4673 		return rc;
4674 	}
4675 
4676 	rc = init_sub_crq_irqs(adapter);
4677 	if (rc) {
4678 		dev_err(dev, "Failed to initialize sub crq irqs\n");
4679 		release_crq_queue(adapter);
4680 	}
4681 
4682 	return rc;
4683 }
4684 
4685 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4686 {
4687 	struct device *dev = &adapter->vdev->dev;
4688 	unsigned long timeout = msecs_to_jiffies(30000);
4689 	int rc;
4690 
4691 	adapter->from_passive_init = false;
4692 
4693 	init_completion(&adapter->init_done);
4694 	adapter->init_done_rc = 0;
4695 	ibmvnic_send_crq_init(adapter);
4696 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4697 		dev_err(dev, "Initialization sequence timed out\n");
4698 		return -1;
4699 	}
4700 
4701 	if (adapter->init_done_rc) {
4702 		release_crq_queue(adapter);
4703 		return adapter->init_done_rc;
4704 	}
4705 
4706 	if (adapter->from_passive_init) {
4707 		adapter->state = VNIC_OPEN;
4708 		adapter->from_passive_init = false;
4709 		return -1;
4710 	}
4711 
4712 	rc = init_sub_crqs(adapter);
4713 	if (rc) {
4714 		dev_err(dev, "Initialization of sub crqs failed\n");
4715 		release_crq_queue(adapter);
4716 		return rc;
4717 	}
4718 
4719 	rc = init_sub_crq_irqs(adapter);
4720 	if (rc) {
4721 		dev_err(dev, "Failed to initialize sub crq irqs\n");
4722 		release_crq_queue(adapter);
4723 	}
4724 
4725 	return rc;
4726 }
4727 
4728 static struct device_attribute dev_attr_failover;
4729 
4730 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4731 {
4732 	struct ibmvnic_adapter *adapter;
4733 	struct net_device *netdev;
4734 	unsigned char *mac_addr_p;
4735 	int rc;
4736 
4737 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4738 		dev->unit_address);
4739 
4740 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4741 							VETH_MAC_ADDR, NULL);
4742 	if (!mac_addr_p) {
4743 		dev_err(&dev->dev,
4744 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4745 			__FILE__, __LINE__);
4746 		return 0;
4747 	}
4748 
4749 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4750 				   IBMVNIC_MAX_QUEUES);
4751 	if (!netdev)
4752 		return -ENOMEM;
4753 
4754 	adapter = netdev_priv(netdev);
4755 	adapter->state = VNIC_PROBING;
4756 	dev_set_drvdata(&dev->dev, netdev);
4757 	adapter->vdev = dev;
4758 	adapter->netdev = netdev;
4759 
4760 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
4761 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4762 	netdev->irq = dev->irq;
4763 	netdev->netdev_ops = &ibmvnic_netdev_ops;
4764 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4765 	SET_NETDEV_DEV(netdev, &dev->dev);
4766 
4767 	spin_lock_init(&adapter->stats_lock);
4768 
4769 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4770 	INIT_LIST_HEAD(&adapter->rwi_list);
4771 	mutex_init(&adapter->reset_lock);
4772 	mutex_init(&adapter->rwi_lock);
4773 	adapter->resetting = false;
4774 
4775 	adapter->mac_change_pending = false;
4776 
4777 	do {
4778 		rc = init_crq_queue(adapter);
4779 		if (rc) {
4780 			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4781 				rc);
4782 			goto ibmvnic_init_fail;
4783 		}
4784 
4785 		rc = ibmvnic_init(adapter);
4786 		if (rc && rc != EAGAIN)
4787 			goto ibmvnic_init_fail;
4788 	} while (rc == EAGAIN);
4789 
4790 	rc = init_stats_buffers(adapter);
4791 	if (rc)
4792 		goto ibmvnic_init_fail;
4793 
4794 	rc = init_stats_token(adapter);
4795 	if (rc)
4796 		goto ibmvnic_stats_fail;
4797 
4798 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4799 	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4800 	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4801 
4802 	rc = device_create_file(&dev->dev, &dev_attr_failover);
4803 	if (rc)
4804 		goto ibmvnic_dev_file_err;
4805 
4806 	netif_carrier_off(netdev);
4807 	rc = register_netdev(netdev);
4808 	if (rc) {
4809 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4810 		goto ibmvnic_register_fail;
4811 	}
4812 	dev_info(&dev->dev, "ibmvnic registered\n");
4813 
4814 	adapter->state = VNIC_PROBED;
4815 
4816 	adapter->wait_for_reset = false;
4817 
4818 	return 0;
4819 
4820 ibmvnic_register_fail:
4821 	device_remove_file(&dev->dev, &dev_attr_failover);
4822 
4823 ibmvnic_dev_file_err:
4824 	release_stats_token(adapter);
4825 
4826 ibmvnic_stats_fail:
4827 	release_stats_buffers(adapter);
4828 
4829 ibmvnic_init_fail:
4830 	release_sub_crqs(adapter, 1);
4831 	release_crq_queue(adapter);
4832 	free_netdev(netdev);
4833 
4834 	return rc;
4835 }
4836 
4837 static int ibmvnic_remove(struct vio_dev *dev)
4838 {
4839 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
4840 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4841 
4842 	adapter->state = VNIC_REMOVING;
4843 	unregister_netdev(netdev);
4844 	mutex_lock(&adapter->reset_lock);
4845 
4846 	release_resources(adapter);
4847 	release_sub_crqs(adapter, 1);
4848 	release_crq_queue(adapter);
4849 
4850 	release_stats_token(adapter);
4851 	release_stats_buffers(adapter);
4852 
4853 	adapter->state = VNIC_REMOVED;
4854 
4855 	mutex_unlock(&adapter->reset_lock);
4856 	device_remove_file(&dev->dev, &dev_attr_failover);
4857 	free_netdev(netdev);
4858 	dev_set_drvdata(&dev->dev, NULL);
4859 
4860 	return 0;
4861 }
4862 
4863 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4864 			      const char *buf, size_t count)
4865 {
4866 	struct net_device *netdev = dev_get_drvdata(dev);
4867 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4868 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4869 	__be64 session_token;
4870 	long rc;
4871 
4872 	if (!sysfs_streq(buf, "1"))
4873 		return -EINVAL;
4874 
4875 	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4876 			 H_GET_SESSION_TOKEN, 0, 0, 0);
4877 	if (rc) {
4878 		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4879 			   rc);
4880 		return -EINVAL;
4881 	}
4882 
4883 	session_token = (__be64)retbuf[0];
4884 	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4885 		   be64_to_cpu(session_token));
4886 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4887 				H_SESSION_ERR_DETECTED, session_token, 0, 0);
4888 	if (rc) {
4889 		netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4890 			   rc);
4891 		return -EINVAL;
4892 	}
4893 
4894 	return count;
4895 }
4896 
4897 static DEVICE_ATTR_WO(failover);
4898 
4899 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4900 {
4901 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4902 	struct ibmvnic_adapter *adapter;
4903 	struct iommu_table *tbl;
4904 	unsigned long ret = 0;
4905 	int i;
4906 
4907 	tbl = get_iommu_table_base(&vdev->dev);
4908 
4909 	/* netdev inits at probe time along with the structures we need below*/
4910 	if (!netdev)
4911 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4912 
4913 	adapter = netdev_priv(netdev);
4914 
4915 	ret += PAGE_SIZE; /* the crq message queue */
4916 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4917 
4918 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4919 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
4920 
4921 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4922 	     i++)
4923 		ret += adapter->rx_pool[i].size *
4924 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4925 
4926 	return ret;
4927 }
4928 
4929 static int ibmvnic_resume(struct device *dev)
4930 {
4931 	struct net_device *netdev = dev_get_drvdata(dev);
4932 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4933 
4934 	if (adapter->state != VNIC_OPEN)
4935 		return 0;
4936 
4937 	tasklet_schedule(&adapter->tasklet);
4938 
4939 	return 0;
4940 }
4941 
4942 static const struct vio_device_id ibmvnic_device_table[] = {
4943 	{"network", "IBM,vnic"},
4944 	{"", "" }
4945 };
4946 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4947 
4948 static const struct dev_pm_ops ibmvnic_pm_ops = {
4949 	.resume = ibmvnic_resume
4950 };
4951 
4952 static struct vio_driver ibmvnic_driver = {
4953 	.id_table       = ibmvnic_device_table,
4954 	.probe          = ibmvnic_probe,
4955 	.remove         = ibmvnic_remove,
4956 	.get_desired_dma = ibmvnic_get_desired_dma,
4957 	.name		= ibmvnic_driver_name,
4958 	.pm		= &ibmvnic_pm_ops,
4959 };
4960 
4961 /* module functions */
4962 static int __init ibmvnic_module_init(void)
4963 {
4964 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4965 		IBMVNIC_DRIVER_VERSION);
4966 
4967 	return vio_register_driver(&ibmvnic_driver);
4968 }
4969 
4970 static void __exit ibmvnic_module_exit(void)
4971 {
4972 	vio_unregister_driver(&ibmvnic_driver);
4973 }
4974 
4975 module_init(ibmvnic_module_init);
4976 module_exit(ibmvnic_module_exit);
4977