xref: /linux/drivers/net/ethernet/ibm/ibmvnic.c (revision f7308991bfeea3f6a4c6281c64fc1ba9dc6e56b3)
1 /**************************************************************************/
2 /*                                                                        */
3 /*  IBM System i and System p Virtual NIC Device Driver                   */
4 /*  Copyright (C) 2014 IBM Corp.                                          */
5 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
6 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
7 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
8 /*                                                                        */
9 /*  This program is free software; you can redistribute it and/or modify  */
10 /*  it under the terms of the GNU General Public License as published by  */
11 /*  the Free Software Foundation; either version 2 of the License, or     */
12 /*  (at your option) any later version.                                   */
13 /*                                                                        */
14 /*  This program is distributed in the hope that it will be useful,       */
15 /*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
16 /*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
17 /*  GNU General Public License for more details.                          */
18 /*                                                                        */
19 /*  You should have received a copy of the GNU General Public License     */
20 /*  along with this program.                                              */
21 /*                                                                        */
22 /* This module contains the implementation of a virtual ethernet device   */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
24 /* option of the RS/6000 Platform Architecture to interface with virtual  */
25 /* ethernet NICs that are presented to the partition by the hypervisor.   */
26 /*									   */
27 /* Messages are passed between the VNIC driver and the VNIC server using  */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
31 /* are used by the driver to notify the server that a packet is           */
32 /* ready for transmission or that a buffer has been added to receive a    */
33 /* packet. Subsequently, sCRQs are used by the server to notify the       */
34 /* driver that a packet transmission has been completed or that a packet  */
35 /* has been received and placed in a waiting buffer.                      */
36 /*                                                                        */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
39 /* or receive has been completed, the VNIC driver is required to use      */
40 /* "long term mapping". This entails that large, continuous DMA mapped    */
41 /* buffers are allocated on driver initialization and these buffers are   */
42 /* then continuously reused to pass skbs to and from the VNIC server.     */
43 /*                                                                        */
44 /**************************************************************************/
45 
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/mm.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
63 #include <linux/in.h>
64 #include <linux/ip.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
73 #include <asm/vio.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
80 
81 #include "ibmvnic.h"
82 
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
85 
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
90 
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 		       union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 			   struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 			    struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 			struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 					struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static void send_request_unmap(struct ibmvnic_adapter *, u8);
114 static void send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static void release_crq_queue(struct ibmvnic_adapter *);
120 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
121 
122 struct ibmvnic_stat {
123 	char name[ETH_GSTRING_LEN];
124 	int offset;
125 };
126 
127 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128 			     offsetof(struct ibmvnic_statistics, stat))
129 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
130 
131 static const struct ibmvnic_stat ibmvnic_stats[] = {
132 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
154 };
155 
156 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
157 			  unsigned long length, unsigned long *number,
158 			  unsigned long *irq)
159 {
160 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
161 	long rc;
162 
163 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
164 	*number = retbuf[0];
165 	*irq = retbuf[1];
166 
167 	return rc;
168 }
169 
170 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
171 				struct ibmvnic_long_term_buff *ltb, int size)
172 {
173 	struct device *dev = &adapter->vdev->dev;
174 
175 	ltb->size = size;
176 	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
177 				       GFP_KERNEL);
178 
179 	if (!ltb->buff) {
180 		dev_err(dev, "Couldn't alloc long term buffer\n");
181 		return -ENOMEM;
182 	}
183 	ltb->map_id = adapter->map_id;
184 	adapter->map_id++;
185 
186 	init_completion(&adapter->fw_done);
187 	send_request_map(adapter, ltb->addr,
188 			 ltb->size, ltb->map_id);
189 	wait_for_completion(&adapter->fw_done);
190 
191 	if (adapter->fw_done_rc) {
192 		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
193 			adapter->fw_done_rc);
194 		return -1;
195 	}
196 	return 0;
197 }
198 
199 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
200 				struct ibmvnic_long_term_buff *ltb)
201 {
202 	struct device *dev = &adapter->vdev->dev;
203 
204 	if (!ltb->buff)
205 		return;
206 
207 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
208 	    adapter->reset_reason != VNIC_RESET_MOBILITY)
209 		send_request_unmap(adapter, ltb->map_id);
210 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
211 }
212 
213 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
214 				struct ibmvnic_long_term_buff *ltb)
215 {
216 	memset(ltb->buff, 0, ltb->size);
217 
218 	init_completion(&adapter->fw_done);
219 	send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 	wait_for_completion(&adapter->fw_done);
221 
222 	if (adapter->fw_done_rc) {
223 		dev_info(&adapter->vdev->dev,
224 			 "Reset failed, attempting to free and reallocate buffer\n");
225 		free_long_term_buff(adapter, ltb);
226 		return alloc_long_term_buff(adapter, ltb, ltb->size);
227 	}
228 	return 0;
229 }
230 
231 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
232 {
233 	int i;
234 
235 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
236 	     i++)
237 		adapter->rx_pool[i].active = 0;
238 }
239 
240 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
241 			      struct ibmvnic_rx_pool *pool)
242 {
243 	int count = pool->size - atomic_read(&pool->available);
244 	struct device *dev = &adapter->vdev->dev;
245 	int buffers_added = 0;
246 	unsigned long lpar_rc;
247 	union sub_crq sub_crq;
248 	struct sk_buff *skb;
249 	unsigned int offset;
250 	dma_addr_t dma_addr;
251 	unsigned char *dst;
252 	u64 *handle_array;
253 	int shift = 0;
254 	int index;
255 	int i;
256 
257 	if (!pool->active)
258 		return;
259 
260 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 				      be32_to_cpu(adapter->login_rsp_buf->
262 				      off_rxadd_subcrqs));
263 
264 	for (i = 0; i < count; ++i) {
265 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
266 		if (!skb) {
267 			dev_err(dev, "Couldn't replenish rx buff\n");
268 			adapter->replenish_no_mem++;
269 			break;
270 		}
271 
272 		index = pool->free_map[pool->next_free];
273 
274 		if (pool->rx_buff[index].skb)
275 			dev_err(dev, "Inconsistent free_map!\n");
276 
277 		/* Copy the skb to the long term mapped DMA buffer */
278 		offset = index * pool->buff_size;
279 		dst = pool->long_term_buff.buff + offset;
280 		memset(dst, 0, pool->buff_size);
281 		dma_addr = pool->long_term_buff.addr + offset;
282 		pool->rx_buff[index].data = dst;
283 
284 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 		pool->rx_buff[index].dma = dma_addr;
286 		pool->rx_buff[index].skb = skb;
287 		pool->rx_buff[index].pool_index = pool->index;
288 		pool->rx_buff[index].size = pool->buff_size;
289 
290 		memset(&sub_crq, 0, sizeof(sub_crq));
291 		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 		sub_crq.rx_add.correlator =
293 		    cpu_to_be64((u64)&pool->rx_buff[index]);
294 		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
296 
297 		/* The length field of the sCRQ is defined to be 24 bits so the
298 		 * buffer size needs to be left shifted by a byte before it is
299 		 * converted to big endian to prevent the last byte from being
300 		 * truncated.
301 		 */
302 #ifdef __LITTLE_ENDIAN__
303 		shift = 8;
304 #endif
305 		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
306 
307 		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
308 				      &sub_crq);
309 		if (lpar_rc != H_SUCCESS)
310 			goto failure;
311 
312 		buffers_added++;
313 		adapter->replenish_add_buff_success++;
314 		pool->next_free = (pool->next_free + 1) % pool->size;
315 	}
316 	atomic_add(buffers_added, &pool->available);
317 	return;
318 
319 failure:
320 	dev_info(dev, "replenish pools failure\n");
321 	pool->free_map[pool->next_free] = index;
322 	pool->rx_buff[index].skb = NULL;
323 	if (!dma_mapping_error(dev, dma_addr))
324 		dma_unmap_single(dev, dma_addr, pool->buff_size,
325 				 DMA_FROM_DEVICE);
326 
327 	dev_kfree_skb_any(skb);
328 	adapter->replenish_add_buff_failure++;
329 	atomic_add(buffers_added, &pool->available);
330 
331 	if (lpar_rc == H_CLOSED) {
332 		/* Disable buffer pool replenishment and report carrier off if
333 		 * queue is closed. Firmware guarantees that a signal will
334 		 * be sent to the driver, triggering a reset.
335 		 */
336 		deactivate_rx_pools(adapter);
337 		netif_carrier_off(adapter->netdev);
338 	}
339 }
340 
341 static void replenish_pools(struct ibmvnic_adapter *adapter)
342 {
343 	int i;
344 
345 	adapter->replenish_task_cycles++;
346 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
347 	     i++) {
348 		if (adapter->rx_pool[i].active)
349 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350 	}
351 }
352 
353 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
354 {
355 	kfree(adapter->tx_stats_buffers);
356 	kfree(adapter->rx_stats_buffers);
357 	adapter->tx_stats_buffers = NULL;
358 	adapter->rx_stats_buffers = NULL;
359 }
360 
361 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
362 {
363 	adapter->tx_stats_buffers =
364 				kcalloc(IBMVNIC_MAX_QUEUES,
365 					sizeof(struct ibmvnic_tx_queue_stats),
366 					GFP_KERNEL);
367 	if (!adapter->tx_stats_buffers)
368 		return -ENOMEM;
369 
370 	adapter->rx_stats_buffers =
371 				kcalloc(IBMVNIC_MAX_QUEUES,
372 					sizeof(struct ibmvnic_rx_queue_stats),
373 					GFP_KERNEL);
374 	if (!adapter->rx_stats_buffers)
375 		return -ENOMEM;
376 
377 	return 0;
378 }
379 
380 static void release_stats_token(struct ibmvnic_adapter *adapter)
381 {
382 	struct device *dev = &adapter->vdev->dev;
383 
384 	if (!adapter->stats_token)
385 		return;
386 
387 	dma_unmap_single(dev, adapter->stats_token,
388 			 sizeof(struct ibmvnic_statistics),
389 			 DMA_FROM_DEVICE);
390 	adapter->stats_token = 0;
391 }
392 
393 static int init_stats_token(struct ibmvnic_adapter *adapter)
394 {
395 	struct device *dev = &adapter->vdev->dev;
396 	dma_addr_t stok;
397 
398 	stok = dma_map_single(dev, &adapter->stats,
399 			      sizeof(struct ibmvnic_statistics),
400 			      DMA_FROM_DEVICE);
401 	if (dma_mapping_error(dev, stok)) {
402 		dev_err(dev, "Couldn't map stats buffer\n");
403 		return -1;
404 	}
405 
406 	adapter->stats_token = stok;
407 	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
408 	return 0;
409 }
410 
411 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
412 {
413 	struct ibmvnic_rx_pool *rx_pool;
414 	int rx_scrqs;
415 	int i, j, rc;
416 	u64 *size_array;
417 
418 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
419 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
420 
421 	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
422 	for (i = 0; i < rx_scrqs; i++) {
423 		rx_pool = &adapter->rx_pool[i];
424 
425 		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
426 
427 		if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
428 			free_long_term_buff(adapter, &rx_pool->long_term_buff);
429 			rx_pool->buff_size = be64_to_cpu(size_array[i]);
430 			alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
431 					     rx_pool->size *
432 					     rx_pool->buff_size);
433 		} else {
434 			rc = reset_long_term_buff(adapter,
435 						  &rx_pool->long_term_buff);
436 		}
437 
438 		if (rc)
439 			return rc;
440 
441 		for (j = 0; j < rx_pool->size; j++)
442 			rx_pool->free_map[j] = j;
443 
444 		memset(rx_pool->rx_buff, 0,
445 		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
446 
447 		atomic_set(&rx_pool->available, 0);
448 		rx_pool->next_alloc = 0;
449 		rx_pool->next_free = 0;
450 		rx_pool->active = 1;
451 	}
452 
453 	return 0;
454 }
455 
456 static void release_rx_pools(struct ibmvnic_adapter *adapter)
457 {
458 	struct ibmvnic_rx_pool *rx_pool;
459 	int i, j;
460 
461 	if (!adapter->rx_pool)
462 		return;
463 
464 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
465 		rx_pool = &adapter->rx_pool[i];
466 
467 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
468 
469 		kfree(rx_pool->free_map);
470 		free_long_term_buff(adapter, &rx_pool->long_term_buff);
471 
472 		if (!rx_pool->rx_buff)
473 			continue;
474 
475 		for (j = 0; j < rx_pool->size; j++) {
476 			if (rx_pool->rx_buff[j].skb) {
477 				dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
478 				rx_pool->rx_buff[i].skb = NULL;
479 			}
480 		}
481 
482 		kfree(rx_pool->rx_buff);
483 	}
484 
485 	kfree(adapter->rx_pool);
486 	adapter->rx_pool = NULL;
487 	adapter->num_active_rx_pools = 0;
488 }
489 
490 static int init_rx_pools(struct net_device *netdev)
491 {
492 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
493 	struct device *dev = &adapter->vdev->dev;
494 	struct ibmvnic_rx_pool *rx_pool;
495 	int rxadd_subcrqs;
496 	u64 *size_array;
497 	int i, j;
498 
499 	rxadd_subcrqs =
500 		be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
501 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
502 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
503 
504 	adapter->rx_pool = kcalloc(rxadd_subcrqs,
505 				   sizeof(struct ibmvnic_rx_pool),
506 				   GFP_KERNEL);
507 	if (!adapter->rx_pool) {
508 		dev_err(dev, "Failed to allocate rx pools\n");
509 		return -1;
510 	}
511 
512 	adapter->num_active_rx_pools = rxadd_subcrqs;
513 
514 	for (i = 0; i < rxadd_subcrqs; i++) {
515 		rx_pool = &adapter->rx_pool[i];
516 
517 		netdev_dbg(adapter->netdev,
518 			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
519 			   i, adapter->req_rx_add_entries_per_subcrq,
520 			   be64_to_cpu(size_array[i]));
521 
522 		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
523 		rx_pool->index = i;
524 		rx_pool->buff_size = be64_to_cpu(size_array[i]);
525 		rx_pool->active = 1;
526 
527 		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
528 					    GFP_KERNEL);
529 		if (!rx_pool->free_map) {
530 			release_rx_pools(adapter);
531 			return -1;
532 		}
533 
534 		rx_pool->rx_buff = kcalloc(rx_pool->size,
535 					   sizeof(struct ibmvnic_rx_buff),
536 					   GFP_KERNEL);
537 		if (!rx_pool->rx_buff) {
538 			dev_err(dev, "Couldn't alloc rx buffers\n");
539 			release_rx_pools(adapter);
540 			return -1;
541 		}
542 
543 		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
544 					 rx_pool->size * rx_pool->buff_size)) {
545 			release_rx_pools(adapter);
546 			return -1;
547 		}
548 
549 		for (j = 0; j < rx_pool->size; ++j)
550 			rx_pool->free_map[j] = j;
551 
552 		atomic_set(&rx_pool->available, 0);
553 		rx_pool->next_alloc = 0;
554 		rx_pool->next_free = 0;
555 	}
556 
557 	return 0;
558 }
559 
560 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
561 {
562 	struct ibmvnic_tx_pool *tx_pool;
563 	int tx_scrqs;
564 	int i, j, rc;
565 
566 	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
567 	for (i = 0; i < tx_scrqs; i++) {
568 		netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i);
569 
570 		tx_pool = &adapter->tx_pool[i];
571 
572 		rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
573 		if (rc)
574 			return rc;
575 
576 		rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb);
577 		if (rc)
578 			return rc;
579 
580 		memset(tx_pool->tx_buff, 0,
581 		       adapter->req_tx_entries_per_subcrq *
582 		       sizeof(struct ibmvnic_tx_buff));
583 
584 		for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
585 			tx_pool->free_map[j] = j;
586 
587 		tx_pool->consumer_index = 0;
588 		tx_pool->producer_index = 0;
589 		tx_pool->tso_index = 0;
590 	}
591 
592 	return 0;
593 }
594 
595 static void release_vpd_data(struct ibmvnic_adapter *adapter)
596 {
597 	if (!adapter->vpd)
598 		return;
599 
600 	kfree(adapter->vpd->buff);
601 	kfree(adapter->vpd);
602 
603 	adapter->vpd = NULL;
604 }
605 
606 static void release_tx_pools(struct ibmvnic_adapter *adapter)
607 {
608 	struct ibmvnic_tx_pool *tx_pool;
609 	int i;
610 
611 	if (!adapter->tx_pool)
612 		return;
613 
614 	for (i = 0; i < adapter->num_active_tx_pools; i++) {
615 		netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
616 		tx_pool = &adapter->tx_pool[i];
617 		kfree(tx_pool->tx_buff);
618 		free_long_term_buff(adapter, &tx_pool->long_term_buff);
619 		free_long_term_buff(adapter, &tx_pool->tso_ltb);
620 		kfree(tx_pool->free_map);
621 	}
622 
623 	kfree(adapter->tx_pool);
624 	adapter->tx_pool = NULL;
625 	adapter->num_active_tx_pools = 0;
626 }
627 
628 static int init_tx_pools(struct net_device *netdev)
629 {
630 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
631 	struct device *dev = &adapter->vdev->dev;
632 	struct ibmvnic_tx_pool *tx_pool;
633 	int tx_subcrqs;
634 	int i, j;
635 
636 	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
637 	adapter->tx_pool = kcalloc(tx_subcrqs,
638 				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
639 	if (!adapter->tx_pool)
640 		return -1;
641 
642 	adapter->num_active_tx_pools = tx_subcrqs;
643 
644 	for (i = 0; i < tx_subcrqs; i++) {
645 		tx_pool = &adapter->tx_pool[i];
646 
647 		netdev_dbg(adapter->netdev,
648 			   "Initializing tx_pool[%d], %lld buffs\n",
649 			   i, adapter->req_tx_entries_per_subcrq);
650 
651 		tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
652 					   sizeof(struct ibmvnic_tx_buff),
653 					   GFP_KERNEL);
654 		if (!tx_pool->tx_buff) {
655 			dev_err(dev, "tx pool buffer allocation failed\n");
656 			release_tx_pools(adapter);
657 			return -1;
658 		}
659 
660 		if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
661 					 adapter->req_tx_entries_per_subcrq *
662 					 adapter->req_mtu)) {
663 			release_tx_pools(adapter);
664 			return -1;
665 		}
666 
667 		/* alloc TSO ltb */
668 		if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb,
669 					 IBMVNIC_TSO_BUFS *
670 					 IBMVNIC_TSO_BUF_SZ)) {
671 			release_tx_pools(adapter);
672 			return -1;
673 		}
674 
675 		tx_pool->tso_index = 0;
676 
677 		tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
678 					    sizeof(int), GFP_KERNEL);
679 		if (!tx_pool->free_map) {
680 			release_tx_pools(adapter);
681 			return -1;
682 		}
683 
684 		for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
685 			tx_pool->free_map[j] = j;
686 
687 		tx_pool->consumer_index = 0;
688 		tx_pool->producer_index = 0;
689 	}
690 
691 	return 0;
692 }
693 
694 static void release_error_buffers(struct ibmvnic_adapter *adapter)
695 {
696 	struct device *dev = &adapter->vdev->dev;
697 	struct ibmvnic_error_buff *error_buff, *tmp;
698 	unsigned long flags;
699 
700 	spin_lock_irqsave(&adapter->error_list_lock, flags);
701 	list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
702 		list_del(&error_buff->list);
703 		dma_unmap_single(dev, error_buff->dma, error_buff->len,
704 				 DMA_FROM_DEVICE);
705 		kfree(error_buff->buff);
706 		kfree(error_buff);
707 	}
708 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
709 }
710 
711 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
712 {
713 	int i;
714 
715 	if (adapter->napi_enabled)
716 		return;
717 
718 	for (i = 0; i < adapter->req_rx_queues; i++)
719 		napi_enable(&adapter->napi[i]);
720 
721 	adapter->napi_enabled = true;
722 }
723 
724 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
725 {
726 	int i;
727 
728 	if (!adapter->napi_enabled)
729 		return;
730 
731 	for (i = 0; i < adapter->req_rx_queues; i++) {
732 		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
733 		napi_disable(&adapter->napi[i]);
734 	}
735 
736 	adapter->napi_enabled = false;
737 }
738 
739 static int init_napi(struct ibmvnic_adapter *adapter)
740 {
741 	int i;
742 
743 	adapter->napi = kcalloc(adapter->req_rx_queues,
744 				sizeof(struct napi_struct), GFP_KERNEL);
745 	if (!adapter->napi)
746 		return -ENOMEM;
747 
748 	for (i = 0; i < adapter->req_rx_queues; i++) {
749 		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
750 		netif_napi_add(adapter->netdev, &adapter->napi[i],
751 			       ibmvnic_poll, NAPI_POLL_WEIGHT);
752 	}
753 
754 	adapter->num_active_rx_napi = adapter->req_rx_queues;
755 	return 0;
756 }
757 
758 static void release_napi(struct ibmvnic_adapter *adapter)
759 {
760 	int i;
761 
762 	if (!adapter->napi)
763 		return;
764 
765 	for (i = 0; i < adapter->num_active_rx_napi; i++) {
766 		if (&adapter->napi[i]) {
767 			netdev_dbg(adapter->netdev,
768 				   "Releasing napi[%d]\n", i);
769 			netif_napi_del(&adapter->napi[i]);
770 		}
771 	}
772 
773 	kfree(adapter->napi);
774 	adapter->napi = NULL;
775 	adapter->num_active_rx_napi = 0;
776 }
777 
778 static int ibmvnic_login(struct net_device *netdev)
779 {
780 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
781 	unsigned long timeout = msecs_to_jiffies(30000);
782 	struct device *dev = &adapter->vdev->dev;
783 	int rc;
784 
785 	do {
786 		if (adapter->renegotiate) {
787 			adapter->renegotiate = false;
788 			release_sub_crqs(adapter, 1);
789 
790 			reinit_completion(&adapter->init_done);
791 			send_cap_queries(adapter);
792 			if (!wait_for_completion_timeout(&adapter->init_done,
793 							 timeout)) {
794 				dev_err(dev, "Capabilities query timeout\n");
795 				return -1;
796 			}
797 			rc = init_sub_crqs(adapter);
798 			if (rc) {
799 				dev_err(dev,
800 					"Initialization of SCRQ's failed\n");
801 				return -1;
802 			}
803 			rc = init_sub_crq_irqs(adapter);
804 			if (rc) {
805 				dev_err(dev,
806 					"Initialization of SCRQ's irqs failed\n");
807 				return -1;
808 			}
809 		}
810 
811 		reinit_completion(&adapter->init_done);
812 		send_login(adapter);
813 		if (!wait_for_completion_timeout(&adapter->init_done,
814 						 timeout)) {
815 			dev_err(dev, "Login timeout\n");
816 			return -1;
817 		}
818 	} while (adapter->renegotiate);
819 
820 	/* handle pending MAC address changes after successful login */
821 	if (adapter->mac_change_pending) {
822 		__ibmvnic_set_mac(netdev, &adapter->desired.mac);
823 		adapter->mac_change_pending = false;
824 	}
825 
826 	return 0;
827 }
828 
829 static void release_login_buffer(struct ibmvnic_adapter *adapter)
830 {
831 	kfree(adapter->login_buf);
832 	adapter->login_buf = NULL;
833 }
834 
835 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
836 {
837 	kfree(adapter->login_rsp_buf);
838 	adapter->login_rsp_buf = NULL;
839 }
840 
841 static void release_resources(struct ibmvnic_adapter *adapter)
842 {
843 	release_vpd_data(adapter);
844 
845 	release_tx_pools(adapter);
846 	release_rx_pools(adapter);
847 
848 	release_stats_token(adapter);
849 	release_stats_buffers(adapter);
850 	release_error_buffers(adapter);
851 	release_napi(adapter);
852 	release_login_rsp_buffer(adapter);
853 }
854 
855 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
856 {
857 	struct net_device *netdev = adapter->netdev;
858 	unsigned long timeout = msecs_to_jiffies(30000);
859 	union ibmvnic_crq crq;
860 	bool resend;
861 	int rc;
862 
863 	netdev_dbg(netdev, "setting link state %d\n", link_state);
864 
865 	memset(&crq, 0, sizeof(crq));
866 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
867 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
868 	crq.logical_link_state.link_state = link_state;
869 
870 	do {
871 		resend = false;
872 
873 		reinit_completion(&adapter->init_done);
874 		rc = ibmvnic_send_crq(adapter, &crq);
875 		if (rc) {
876 			netdev_err(netdev, "Failed to set link state\n");
877 			return rc;
878 		}
879 
880 		if (!wait_for_completion_timeout(&adapter->init_done,
881 						 timeout)) {
882 			netdev_err(netdev, "timeout setting link state\n");
883 			return -1;
884 		}
885 
886 		if (adapter->init_done_rc == 1) {
887 			/* Partuial success, delay and re-send */
888 			mdelay(1000);
889 			resend = true;
890 		}
891 	} while (resend);
892 
893 	return 0;
894 }
895 
896 static int set_real_num_queues(struct net_device *netdev)
897 {
898 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
899 	int rc;
900 
901 	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
902 		   adapter->req_tx_queues, adapter->req_rx_queues);
903 
904 	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
905 	if (rc) {
906 		netdev_err(netdev, "failed to set the number of tx queues\n");
907 		return rc;
908 	}
909 
910 	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
911 	if (rc)
912 		netdev_err(netdev, "failed to set the number of rx queues\n");
913 
914 	return rc;
915 }
916 
917 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
918 {
919 	struct device *dev = &adapter->vdev->dev;
920 	union ibmvnic_crq crq;
921 	int len = 0;
922 
923 	if (adapter->vpd->buff)
924 		len = adapter->vpd->len;
925 
926 	init_completion(&adapter->fw_done);
927 	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
928 	crq.get_vpd_size.cmd = GET_VPD_SIZE;
929 	ibmvnic_send_crq(adapter, &crq);
930 	wait_for_completion(&adapter->fw_done);
931 
932 	if (!adapter->vpd->len)
933 		return -ENODATA;
934 
935 	if (!adapter->vpd->buff)
936 		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
937 	else if (adapter->vpd->len != len)
938 		adapter->vpd->buff =
939 			krealloc(adapter->vpd->buff,
940 				 adapter->vpd->len, GFP_KERNEL);
941 
942 	if (!adapter->vpd->buff) {
943 		dev_err(dev, "Could allocate VPD buffer\n");
944 		return -ENOMEM;
945 	}
946 
947 	adapter->vpd->dma_addr =
948 		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
949 			       DMA_FROM_DEVICE);
950 	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
951 		dev_err(dev, "Could not map VPD buffer\n");
952 		kfree(adapter->vpd->buff);
953 		adapter->vpd->buff = NULL;
954 		return -ENOMEM;
955 	}
956 
957 	reinit_completion(&adapter->fw_done);
958 	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
959 	crq.get_vpd.cmd = GET_VPD;
960 	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
961 	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
962 	ibmvnic_send_crq(adapter, &crq);
963 	wait_for_completion(&adapter->fw_done);
964 
965 	return 0;
966 }
967 
968 static int init_resources(struct ibmvnic_adapter *adapter)
969 {
970 	struct net_device *netdev = adapter->netdev;
971 	int rc;
972 
973 	rc = set_real_num_queues(netdev);
974 	if (rc)
975 		return rc;
976 
977 	rc = init_stats_buffers(adapter);
978 	if (rc)
979 		return rc;
980 
981 	rc = init_stats_token(adapter);
982 	if (rc)
983 		return rc;
984 
985 	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
986 	if (!adapter->vpd)
987 		return -ENOMEM;
988 
989 	/* Vital Product Data (VPD) */
990 	rc = ibmvnic_get_vpd(adapter);
991 	if (rc) {
992 		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
993 		return rc;
994 	}
995 
996 	adapter->map_id = 1;
997 
998 	rc = init_napi(adapter);
999 	if (rc)
1000 		return rc;
1001 
1002 	send_map_query(adapter);
1003 
1004 	rc = init_rx_pools(netdev);
1005 	if (rc)
1006 		return rc;
1007 
1008 	rc = init_tx_pools(netdev);
1009 	return rc;
1010 }
1011 
1012 static int __ibmvnic_open(struct net_device *netdev)
1013 {
1014 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1015 	enum vnic_state prev_state = adapter->state;
1016 	int i, rc;
1017 
1018 	adapter->state = VNIC_OPENING;
1019 	replenish_pools(adapter);
1020 	ibmvnic_napi_enable(adapter);
1021 
1022 	/* We're ready to receive frames, enable the sub-crq interrupts and
1023 	 * set the logical link state to up
1024 	 */
1025 	for (i = 0; i < adapter->req_rx_queues; i++) {
1026 		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1027 		if (prev_state == VNIC_CLOSED)
1028 			enable_irq(adapter->rx_scrq[i]->irq);
1029 		else
1030 			enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1031 	}
1032 
1033 	for (i = 0; i < adapter->req_tx_queues; i++) {
1034 		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1035 		if (prev_state == VNIC_CLOSED)
1036 			enable_irq(adapter->tx_scrq[i]->irq);
1037 		else
1038 			enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1039 	}
1040 
1041 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1042 	if (rc) {
1043 		for (i = 0; i < adapter->req_rx_queues; i++)
1044 			napi_disable(&adapter->napi[i]);
1045 		release_resources(adapter);
1046 		return rc;
1047 	}
1048 
1049 	netif_tx_start_all_queues(netdev);
1050 
1051 	if (prev_state == VNIC_CLOSED) {
1052 		for (i = 0; i < adapter->req_rx_queues; i++)
1053 			napi_schedule(&adapter->napi[i]);
1054 	}
1055 
1056 	adapter->state = VNIC_OPEN;
1057 	return rc;
1058 }
1059 
1060 static int ibmvnic_open(struct net_device *netdev)
1061 {
1062 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1063 	int rc;
1064 
1065 	mutex_lock(&adapter->reset_lock);
1066 
1067 	if (adapter->state != VNIC_CLOSED) {
1068 		rc = ibmvnic_login(netdev);
1069 		if (rc) {
1070 			mutex_unlock(&adapter->reset_lock);
1071 			return rc;
1072 		}
1073 
1074 		rc = init_resources(adapter);
1075 		if (rc) {
1076 			netdev_err(netdev, "failed to initialize resources\n");
1077 			release_resources(adapter);
1078 			mutex_unlock(&adapter->reset_lock);
1079 			return rc;
1080 		}
1081 	}
1082 
1083 	rc = __ibmvnic_open(netdev);
1084 	netif_carrier_on(netdev);
1085 
1086 	mutex_unlock(&adapter->reset_lock);
1087 
1088 	return rc;
1089 }
1090 
1091 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1092 {
1093 	struct ibmvnic_rx_pool *rx_pool;
1094 	u64 rx_entries;
1095 	int rx_scrqs;
1096 	int i, j;
1097 
1098 	if (!adapter->rx_pool)
1099 		return;
1100 
1101 	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
1102 	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1103 
1104 	/* Free any remaining skbs in the rx buffer pools */
1105 	for (i = 0; i < rx_scrqs; i++) {
1106 		rx_pool = &adapter->rx_pool[i];
1107 		if (!rx_pool)
1108 			continue;
1109 
1110 		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1111 		for (j = 0; j < rx_entries; j++) {
1112 			if (rx_pool->rx_buff[j].skb) {
1113 				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
1114 				rx_pool->rx_buff[j].skb = NULL;
1115 			}
1116 		}
1117 	}
1118 }
1119 
1120 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1121 {
1122 	struct ibmvnic_tx_pool *tx_pool;
1123 	u64 tx_entries;
1124 	int tx_scrqs;
1125 	int i, j;
1126 
1127 	if (!adapter->tx_pool)
1128 		return;
1129 
1130 	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
1131 	tx_entries = adapter->req_tx_entries_per_subcrq;
1132 
1133 	/* Free any remaining skbs in the tx buffer pools */
1134 	for (i = 0; i < tx_scrqs; i++) {
1135 		tx_pool = &adapter->tx_pool[i];
1136 		if (!tx_pool)
1137 			continue;
1138 
1139 		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1140 		for (j = 0; j < tx_entries; j++) {
1141 			if (tx_pool->tx_buff[j].skb) {
1142 				dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
1143 				tx_pool->tx_buff[j].skb = NULL;
1144 			}
1145 		}
1146 	}
1147 }
1148 
1149 static int __ibmvnic_close(struct net_device *netdev)
1150 {
1151 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1152 	int rc = 0;
1153 	int i;
1154 
1155 	adapter->state = VNIC_CLOSING;
1156 
1157 	/* ensure that transmissions are stopped if called by do_reset */
1158 	if (adapter->resetting)
1159 		netif_tx_disable(netdev);
1160 	else
1161 		netif_tx_stop_all_queues(netdev);
1162 
1163 	ibmvnic_napi_disable(adapter);
1164 
1165 	if (adapter->tx_scrq) {
1166 		for (i = 0; i < adapter->req_tx_queues; i++)
1167 			if (adapter->tx_scrq[i]->irq) {
1168 				netdev_dbg(adapter->netdev,
1169 					   "Disabling tx_scrq[%d] irq\n", i);
1170 				disable_irq(adapter->tx_scrq[i]->irq);
1171 			}
1172 	}
1173 
1174 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1175 	if (rc)
1176 		return rc;
1177 
1178 	if (adapter->rx_scrq) {
1179 		for (i = 0; i < adapter->req_rx_queues; i++) {
1180 			int retries = 10;
1181 
1182 			while (pending_scrq(adapter, adapter->rx_scrq[i])) {
1183 				retries--;
1184 				mdelay(100);
1185 
1186 				if (retries == 0)
1187 					break;
1188 			}
1189 
1190 			if (adapter->rx_scrq[i]->irq) {
1191 				netdev_dbg(adapter->netdev,
1192 					   "Disabling rx_scrq[%d] irq\n", i);
1193 				disable_irq(adapter->rx_scrq[i]->irq);
1194 			}
1195 		}
1196 	}
1197 	clean_rx_pools(adapter);
1198 	clean_tx_pools(adapter);
1199 	adapter->state = VNIC_CLOSED;
1200 	return rc;
1201 }
1202 
1203 static int ibmvnic_close(struct net_device *netdev)
1204 {
1205 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1206 	int rc;
1207 
1208 	mutex_lock(&adapter->reset_lock);
1209 	rc = __ibmvnic_close(netdev);
1210 	mutex_unlock(&adapter->reset_lock);
1211 
1212 	return rc;
1213 }
1214 
1215 /**
1216  * build_hdr_data - creates L2/L3/L4 header data buffer
1217  * @hdr_field - bitfield determining needed headers
1218  * @skb - socket buffer
1219  * @hdr_len - array of header lengths
1220  * @tot_len - total length of data
1221  *
1222  * Reads hdr_field to determine which headers are needed by firmware.
1223  * Builds a buffer containing these headers.  Saves individual header
1224  * lengths and total buffer length to be used to build descriptors.
1225  */
1226 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1227 			  int *hdr_len, u8 *hdr_data)
1228 {
1229 	int len = 0;
1230 	u8 *hdr;
1231 
1232 	hdr_len[0] = sizeof(struct ethhdr);
1233 
1234 	if (skb->protocol == htons(ETH_P_IP)) {
1235 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1236 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1237 			hdr_len[2] = tcp_hdrlen(skb);
1238 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1239 			hdr_len[2] = sizeof(struct udphdr);
1240 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1241 		hdr_len[1] = sizeof(struct ipv6hdr);
1242 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1243 			hdr_len[2] = tcp_hdrlen(skb);
1244 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1245 			hdr_len[2] = sizeof(struct udphdr);
1246 	} else if (skb->protocol == htons(ETH_P_ARP)) {
1247 		hdr_len[1] = arp_hdr_len(skb->dev);
1248 		hdr_len[2] = 0;
1249 	}
1250 
1251 	memset(hdr_data, 0, 120);
1252 	if ((hdr_field >> 6) & 1) {
1253 		hdr = skb_mac_header(skb);
1254 		memcpy(hdr_data, hdr, hdr_len[0]);
1255 		len += hdr_len[0];
1256 	}
1257 
1258 	if ((hdr_field >> 5) & 1) {
1259 		hdr = skb_network_header(skb);
1260 		memcpy(hdr_data + len, hdr, hdr_len[1]);
1261 		len += hdr_len[1];
1262 	}
1263 
1264 	if ((hdr_field >> 4) & 1) {
1265 		hdr = skb_transport_header(skb);
1266 		memcpy(hdr_data + len, hdr, hdr_len[2]);
1267 		len += hdr_len[2];
1268 	}
1269 	return len;
1270 }
1271 
1272 /**
1273  * create_hdr_descs - create header and header extension descriptors
1274  * @hdr_field - bitfield determining needed headers
1275  * @data - buffer containing header data
1276  * @len - length of data buffer
1277  * @hdr_len - array of individual header lengths
1278  * @scrq_arr - descriptor array
1279  *
1280  * Creates header and, if needed, header extension descriptors and
1281  * places them in a descriptor array, scrq_arr
1282  */
1283 
1284 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1285 			    union sub_crq *scrq_arr)
1286 {
1287 	union sub_crq hdr_desc;
1288 	int tmp_len = len;
1289 	int num_descs = 0;
1290 	u8 *data, *cur;
1291 	int tmp;
1292 
1293 	while (tmp_len > 0) {
1294 		cur = hdr_data + len - tmp_len;
1295 
1296 		memset(&hdr_desc, 0, sizeof(hdr_desc));
1297 		if (cur != hdr_data) {
1298 			data = hdr_desc.hdr_ext.data;
1299 			tmp = tmp_len > 29 ? 29 : tmp_len;
1300 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1301 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1302 			hdr_desc.hdr_ext.len = tmp;
1303 		} else {
1304 			data = hdr_desc.hdr.data;
1305 			tmp = tmp_len > 24 ? 24 : tmp_len;
1306 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1307 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1308 			hdr_desc.hdr.len = tmp;
1309 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1310 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1311 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1312 			hdr_desc.hdr.flag = hdr_field << 1;
1313 		}
1314 		memcpy(data, cur, tmp);
1315 		tmp_len -= tmp;
1316 		*scrq_arr = hdr_desc;
1317 		scrq_arr++;
1318 		num_descs++;
1319 	}
1320 
1321 	return num_descs;
1322 }
1323 
1324 /**
1325  * build_hdr_descs_arr - build a header descriptor array
1326  * @skb - socket buffer
1327  * @num_entries - number of descriptors to be sent
1328  * @subcrq - first TX descriptor
1329  * @hdr_field - bit field determining which headers will be sent
1330  *
1331  * This function will build a TX descriptor array with applicable
1332  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1333  */
1334 
1335 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1336 				int *num_entries, u8 hdr_field)
1337 {
1338 	int hdr_len[3] = {0, 0, 0};
1339 	int tot_len;
1340 	u8 *hdr_data = txbuff->hdr_data;
1341 
1342 	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1343 				 txbuff->hdr_data);
1344 	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1345 			 txbuff->indir_arr + 1);
1346 }
1347 
1348 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1349 {
1350 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1351 	int queue_num = skb_get_queue_mapping(skb);
1352 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1353 	struct device *dev = &adapter->vdev->dev;
1354 	struct ibmvnic_tx_buff *tx_buff = NULL;
1355 	struct ibmvnic_sub_crq_queue *tx_scrq;
1356 	struct ibmvnic_tx_pool *tx_pool;
1357 	unsigned int tx_send_failed = 0;
1358 	unsigned int tx_map_failed = 0;
1359 	unsigned int tx_dropped = 0;
1360 	unsigned int tx_packets = 0;
1361 	unsigned int tx_bytes = 0;
1362 	dma_addr_t data_dma_addr;
1363 	struct netdev_queue *txq;
1364 	unsigned long lpar_rc;
1365 	union sub_crq tx_crq;
1366 	unsigned int offset;
1367 	int num_entries = 1;
1368 	unsigned char *dst;
1369 	u64 *handle_array;
1370 	int index = 0;
1371 	u8 proto = 0;
1372 	int ret = 0;
1373 
1374 	if (adapter->resetting) {
1375 		if (!netif_subqueue_stopped(netdev, skb))
1376 			netif_stop_subqueue(netdev, queue_num);
1377 		dev_kfree_skb_any(skb);
1378 
1379 		tx_send_failed++;
1380 		tx_dropped++;
1381 		ret = NETDEV_TX_OK;
1382 		goto out;
1383 	}
1384 
1385 	tx_pool = &adapter->tx_pool[queue_num];
1386 	tx_scrq = adapter->tx_scrq[queue_num];
1387 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1388 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1389 		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1390 
1391 	index = tx_pool->free_map[tx_pool->consumer_index];
1392 
1393 	if (skb_is_gso(skb)) {
1394 		offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
1395 		dst = tx_pool->tso_ltb.buff + offset;
1396 		memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
1397 		data_dma_addr = tx_pool->tso_ltb.addr + offset;
1398 		tx_pool->tso_index++;
1399 		if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
1400 			tx_pool->tso_index = 0;
1401 	} else {
1402 		offset = index * adapter->req_mtu;
1403 		dst = tx_pool->long_term_buff.buff + offset;
1404 		memset(dst, 0, adapter->req_mtu);
1405 		data_dma_addr = tx_pool->long_term_buff.addr + offset;
1406 	}
1407 
1408 	if (skb_shinfo(skb)->nr_frags) {
1409 		int cur, i;
1410 
1411 		/* Copy the head */
1412 		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1413 		cur = skb_headlen(skb);
1414 
1415 		/* Copy the frags */
1416 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1417 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1418 
1419 			memcpy(dst + cur,
1420 			       page_address(skb_frag_page(frag)) +
1421 			       frag->page_offset, skb_frag_size(frag));
1422 			cur += skb_frag_size(frag);
1423 		}
1424 	} else {
1425 		skb_copy_from_linear_data(skb, dst, skb->len);
1426 	}
1427 
1428 	tx_pool->consumer_index =
1429 	    (tx_pool->consumer_index + 1) %
1430 		adapter->req_tx_entries_per_subcrq;
1431 
1432 	tx_buff = &tx_pool->tx_buff[index];
1433 	tx_buff->skb = skb;
1434 	tx_buff->data_dma[0] = data_dma_addr;
1435 	tx_buff->data_len[0] = skb->len;
1436 	tx_buff->index = index;
1437 	tx_buff->pool_index = queue_num;
1438 	tx_buff->last_frag = true;
1439 
1440 	memset(&tx_crq, 0, sizeof(tx_crq));
1441 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1442 	tx_crq.v1.type = IBMVNIC_TX_DESC;
1443 	tx_crq.v1.n_crq_elem = 1;
1444 	tx_crq.v1.n_sge = 1;
1445 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1446 	tx_crq.v1.correlator = cpu_to_be32(index);
1447 	if (skb_is_gso(skb))
1448 		tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id);
1449 	else
1450 		tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1451 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1452 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1453 
1454 	if (adapter->vlan_header_insertion) {
1455 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1456 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1457 	}
1458 
1459 	if (skb->protocol == htons(ETH_P_IP)) {
1460 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1461 		proto = ip_hdr(skb)->protocol;
1462 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1463 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1464 		proto = ipv6_hdr(skb)->nexthdr;
1465 	}
1466 
1467 	if (proto == IPPROTO_TCP)
1468 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1469 	else if (proto == IPPROTO_UDP)
1470 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1471 
1472 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1473 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1474 		hdrs += 2;
1475 	}
1476 	if (skb_is_gso(skb)) {
1477 		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1478 		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1479 		hdrs += 2;
1480 	}
1481 	/* determine if l2/3/4 headers are sent to firmware */
1482 	if ((*hdrs >> 7) & 1) {
1483 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1484 		tx_crq.v1.n_crq_elem = num_entries;
1485 		tx_buff->indir_arr[0] = tx_crq;
1486 		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1487 						    sizeof(tx_buff->indir_arr),
1488 						    DMA_TO_DEVICE);
1489 		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1490 			dev_kfree_skb_any(skb);
1491 			tx_buff->skb = NULL;
1492 			if (!firmware_has_feature(FW_FEATURE_CMO))
1493 				dev_err(dev, "tx: unable to map descriptor array\n");
1494 			tx_map_failed++;
1495 			tx_dropped++;
1496 			ret = NETDEV_TX_OK;
1497 			goto out;
1498 		}
1499 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1500 					       (u64)tx_buff->indir_dma,
1501 					       (u64)num_entries);
1502 	} else {
1503 		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1504 				      &tx_crq);
1505 	}
1506 	if (lpar_rc != H_SUCCESS) {
1507 		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1508 
1509 		if (tx_pool->consumer_index == 0)
1510 			tx_pool->consumer_index =
1511 				adapter->req_tx_entries_per_subcrq - 1;
1512 		else
1513 			tx_pool->consumer_index--;
1514 
1515 		dev_kfree_skb_any(skb);
1516 		tx_buff->skb = NULL;
1517 
1518 		if (lpar_rc == H_CLOSED) {
1519 			/* Disable TX and report carrier off if queue is closed.
1520 			 * Firmware guarantees that a signal will be sent to the
1521 			 * driver, triggering a reset or some other action.
1522 			 */
1523 			netif_tx_stop_all_queues(netdev);
1524 			netif_carrier_off(netdev);
1525 		}
1526 
1527 		tx_send_failed++;
1528 		tx_dropped++;
1529 		ret = NETDEV_TX_OK;
1530 		goto out;
1531 	}
1532 
1533 	if (atomic_add_return(num_entries, &tx_scrq->used)
1534 					>= adapter->req_tx_entries_per_subcrq) {
1535 		netdev_info(netdev, "Stopping queue %d\n", queue_num);
1536 		netif_stop_subqueue(netdev, queue_num);
1537 	}
1538 
1539 	tx_buff->num_entries = num_entries;
1540 	tx_packets++;
1541 	tx_bytes += skb->len;
1542 	txq->trans_start = jiffies;
1543 	ret = NETDEV_TX_OK;
1544 
1545 out:
1546 	netdev->stats.tx_dropped += tx_dropped;
1547 	netdev->stats.tx_bytes += tx_bytes;
1548 	netdev->stats.tx_packets += tx_packets;
1549 	adapter->tx_send_failed += tx_send_failed;
1550 	adapter->tx_map_failed += tx_map_failed;
1551 	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1552 	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1553 	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1554 
1555 	return ret;
1556 }
1557 
1558 static void ibmvnic_set_multi(struct net_device *netdev)
1559 {
1560 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1561 	struct netdev_hw_addr *ha;
1562 	union ibmvnic_crq crq;
1563 
1564 	memset(&crq, 0, sizeof(crq));
1565 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1566 	crq.request_capability.cmd = REQUEST_CAPABILITY;
1567 
1568 	if (netdev->flags & IFF_PROMISC) {
1569 		if (!adapter->promisc_supported)
1570 			return;
1571 	} else {
1572 		if (netdev->flags & IFF_ALLMULTI) {
1573 			/* Accept all multicast */
1574 			memset(&crq, 0, sizeof(crq));
1575 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1576 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1577 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1578 			ibmvnic_send_crq(adapter, &crq);
1579 		} else if (netdev_mc_empty(netdev)) {
1580 			/* Reject all multicast */
1581 			memset(&crq, 0, sizeof(crq));
1582 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1583 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1584 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1585 			ibmvnic_send_crq(adapter, &crq);
1586 		} else {
1587 			/* Accept one or more multicast(s) */
1588 			netdev_for_each_mc_addr(ha, netdev) {
1589 				memset(&crq, 0, sizeof(crq));
1590 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1591 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1592 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1593 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1594 						ha->addr);
1595 				ibmvnic_send_crq(adapter, &crq);
1596 			}
1597 		}
1598 	}
1599 }
1600 
1601 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1602 {
1603 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1604 	struct sockaddr *addr = p;
1605 	union ibmvnic_crq crq;
1606 
1607 	if (!is_valid_ether_addr(addr->sa_data))
1608 		return -EADDRNOTAVAIL;
1609 
1610 	memset(&crq, 0, sizeof(crq));
1611 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1612 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1613 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1614 
1615 	init_completion(&adapter->fw_done);
1616 	ibmvnic_send_crq(adapter, &crq);
1617 	wait_for_completion(&adapter->fw_done);
1618 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1619 	return adapter->fw_done_rc ? -EIO : 0;
1620 }
1621 
1622 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1623 {
1624 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1625 	struct sockaddr *addr = p;
1626 	int rc;
1627 
1628 	if (adapter->state == VNIC_PROBED) {
1629 		memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1630 		adapter->mac_change_pending = true;
1631 		return 0;
1632 	}
1633 
1634 	rc = __ibmvnic_set_mac(netdev, addr);
1635 
1636 	return rc;
1637 }
1638 
1639 /**
1640  * do_reset returns zero if we are able to keep processing reset events, or
1641  * non-zero if we hit a fatal error and must halt.
1642  */
1643 static int do_reset(struct ibmvnic_adapter *adapter,
1644 		    struct ibmvnic_rwi *rwi, u32 reset_state)
1645 {
1646 	u64 old_num_rx_queues, old_num_tx_queues;
1647 	struct net_device *netdev = adapter->netdev;
1648 	int i, rc;
1649 
1650 	netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1651 		   rwi->reset_reason);
1652 
1653 	netif_carrier_off(netdev);
1654 	adapter->reset_reason = rwi->reset_reason;
1655 
1656 	old_num_rx_queues = adapter->req_rx_queues;
1657 	old_num_tx_queues = adapter->req_tx_queues;
1658 
1659 	if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1660 		rc = ibmvnic_reenable_crq_queue(adapter);
1661 		if (rc)
1662 			return 0;
1663 	}
1664 
1665 	rc = __ibmvnic_close(netdev);
1666 	if (rc)
1667 		return rc;
1668 
1669 	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1670 	    adapter->wait_for_reset) {
1671 		release_resources(adapter);
1672 		release_sub_crqs(adapter, 1);
1673 		release_crq_queue(adapter);
1674 	}
1675 
1676 	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1677 		/* remove the closed state so when we call open it appears
1678 		 * we are coming from the probed state.
1679 		 */
1680 		adapter->state = VNIC_PROBED;
1681 
1682 		rc = ibmvnic_init(adapter);
1683 		if (rc)
1684 			return IBMVNIC_INIT_FAILED;
1685 
1686 		/* If the adapter was in PROBE state prior to the reset,
1687 		 * exit here.
1688 		 */
1689 		if (reset_state == VNIC_PROBED)
1690 			return 0;
1691 
1692 		rc = ibmvnic_login(netdev);
1693 		if (rc) {
1694 			adapter->state = VNIC_PROBED;
1695 			return 0;
1696 		}
1697 
1698 		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1699 		    adapter->wait_for_reset) {
1700 			rc = init_resources(adapter);
1701 			if (rc)
1702 				return rc;
1703 		} else if (adapter->req_rx_queues != old_num_rx_queues ||
1704 			   adapter->req_tx_queues != old_num_tx_queues) {
1705 			adapter->map_id = 1;
1706 			release_rx_pools(adapter);
1707 			release_tx_pools(adapter);
1708 			init_rx_pools(netdev);
1709 			init_tx_pools(netdev);
1710 
1711 			release_napi(adapter);
1712 			init_napi(adapter);
1713 		} else {
1714 			rc = reset_tx_pools(adapter);
1715 			if (rc)
1716 				return rc;
1717 
1718 			rc = reset_rx_pools(adapter);
1719 			if (rc)
1720 				return rc;
1721 
1722 			if (reset_state == VNIC_CLOSED)
1723 				return 0;
1724 		}
1725 	}
1726 
1727 	rc = __ibmvnic_open(netdev);
1728 	if (rc) {
1729 		if (list_empty(&adapter->rwi_list))
1730 			adapter->state = VNIC_CLOSED;
1731 		else
1732 			adapter->state = reset_state;
1733 
1734 		return 0;
1735 	}
1736 
1737 	/* kick napi */
1738 	for (i = 0; i < adapter->req_rx_queues; i++)
1739 		napi_schedule(&adapter->napi[i]);
1740 
1741 	if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1742 		netdev_notify_peers(netdev);
1743 
1744 	netif_carrier_on(netdev);
1745 
1746 	return 0;
1747 }
1748 
1749 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1750 {
1751 	struct ibmvnic_rwi *rwi;
1752 
1753 	mutex_lock(&adapter->rwi_lock);
1754 
1755 	if (!list_empty(&adapter->rwi_list)) {
1756 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1757 				       list);
1758 		list_del(&rwi->list);
1759 	} else {
1760 		rwi = NULL;
1761 	}
1762 
1763 	mutex_unlock(&adapter->rwi_lock);
1764 	return rwi;
1765 }
1766 
1767 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1768 {
1769 	struct ibmvnic_rwi *rwi;
1770 
1771 	rwi = get_next_rwi(adapter);
1772 	while (rwi) {
1773 		kfree(rwi);
1774 		rwi = get_next_rwi(adapter);
1775 	}
1776 }
1777 
1778 static void __ibmvnic_reset(struct work_struct *work)
1779 {
1780 	struct ibmvnic_rwi *rwi;
1781 	struct ibmvnic_adapter *adapter;
1782 	struct net_device *netdev;
1783 	u32 reset_state;
1784 	int rc = 0;
1785 
1786 	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1787 	netdev = adapter->netdev;
1788 
1789 	mutex_lock(&adapter->reset_lock);
1790 	adapter->resetting = true;
1791 	reset_state = adapter->state;
1792 
1793 	rwi = get_next_rwi(adapter);
1794 	while (rwi) {
1795 		rc = do_reset(adapter, rwi, reset_state);
1796 		kfree(rwi);
1797 		if (rc && rc != IBMVNIC_INIT_FAILED)
1798 			break;
1799 
1800 		rwi = get_next_rwi(adapter);
1801 	}
1802 
1803 	if (adapter->wait_for_reset) {
1804 		adapter->wait_for_reset = false;
1805 		adapter->reset_done_rc = rc;
1806 		complete(&adapter->reset_done);
1807 	}
1808 
1809 	if (rc) {
1810 		netdev_dbg(adapter->netdev, "Reset failed\n");
1811 		free_all_rwi(adapter);
1812 		mutex_unlock(&adapter->reset_lock);
1813 		return;
1814 	}
1815 
1816 	adapter->resetting = false;
1817 	mutex_unlock(&adapter->reset_lock);
1818 }
1819 
1820 static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1821 			  enum ibmvnic_reset_reason reason)
1822 {
1823 	struct ibmvnic_rwi *rwi, *tmp;
1824 	struct net_device *netdev = adapter->netdev;
1825 	struct list_head *entry;
1826 
1827 	if (adapter->state == VNIC_REMOVING ||
1828 	    adapter->state == VNIC_REMOVED) {
1829 		netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1830 		return;
1831 	}
1832 
1833 	if (adapter->state == VNIC_PROBING) {
1834 		netdev_warn(netdev, "Adapter reset during probe\n");
1835 		adapter->init_done_rc = EAGAIN;
1836 		return;
1837 	}
1838 
1839 	mutex_lock(&adapter->rwi_lock);
1840 
1841 	list_for_each(entry, &adapter->rwi_list) {
1842 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
1843 		if (tmp->reset_reason == reason) {
1844 			netdev_dbg(netdev, "Skipping matching reset\n");
1845 			mutex_unlock(&adapter->rwi_lock);
1846 			return;
1847 		}
1848 	}
1849 
1850 	rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1851 	if (!rwi) {
1852 		mutex_unlock(&adapter->rwi_lock);
1853 		ibmvnic_close(netdev);
1854 		return;
1855 	}
1856 
1857 	rwi->reset_reason = reason;
1858 	list_add_tail(&rwi->list, &adapter->rwi_list);
1859 	mutex_unlock(&adapter->rwi_lock);
1860 
1861 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
1862 	schedule_work(&adapter->ibmvnic_reset);
1863 }
1864 
1865 static void ibmvnic_tx_timeout(struct net_device *dev)
1866 {
1867 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
1868 
1869 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1870 }
1871 
1872 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1873 				  struct ibmvnic_rx_buff *rx_buff)
1874 {
1875 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1876 
1877 	rx_buff->skb = NULL;
1878 
1879 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1880 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1881 
1882 	atomic_dec(&pool->available);
1883 }
1884 
1885 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1886 {
1887 	struct net_device *netdev = napi->dev;
1888 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1889 	int scrq_num = (int)(napi - adapter->napi);
1890 	int frames_processed = 0;
1891 
1892 restart_poll:
1893 	while (frames_processed < budget) {
1894 		struct sk_buff *skb;
1895 		struct ibmvnic_rx_buff *rx_buff;
1896 		union sub_crq *next;
1897 		u32 length;
1898 		u16 offset;
1899 		u8 flags = 0;
1900 
1901 		if (unlikely(adapter->resetting &&
1902 			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
1903 			enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1904 			napi_complete_done(napi, frames_processed);
1905 			return frames_processed;
1906 		}
1907 
1908 		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1909 			break;
1910 		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1911 		rx_buff =
1912 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1913 							  rx_comp.correlator);
1914 		/* do error checking */
1915 		if (next->rx_comp.rc) {
1916 			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
1917 				   be16_to_cpu(next->rx_comp.rc));
1918 			/* free the entry */
1919 			next->rx_comp.first = 0;
1920 			dev_kfree_skb_any(rx_buff->skb);
1921 			remove_buff_from_pool(adapter, rx_buff);
1922 			continue;
1923 		}
1924 
1925 		length = be32_to_cpu(next->rx_comp.len);
1926 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
1927 		flags = next->rx_comp.flags;
1928 		skb = rx_buff->skb;
1929 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
1930 					length);
1931 
1932 		/* VLAN Header has been stripped by the system firmware and
1933 		 * needs to be inserted by the driver
1934 		 */
1935 		if (adapter->rx_vlan_header_insertion &&
1936 		    (flags & IBMVNIC_VLAN_STRIPPED))
1937 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1938 					       ntohs(next->rx_comp.vlan_tci));
1939 
1940 		/* free the entry */
1941 		next->rx_comp.first = 0;
1942 		remove_buff_from_pool(adapter, rx_buff);
1943 
1944 		skb_put(skb, length);
1945 		skb->protocol = eth_type_trans(skb, netdev);
1946 		skb_record_rx_queue(skb, scrq_num);
1947 
1948 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1949 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1950 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1951 		}
1952 
1953 		length = skb->len;
1954 		napi_gro_receive(napi, skb); /* send it up */
1955 		netdev->stats.rx_packets++;
1956 		netdev->stats.rx_bytes += length;
1957 		adapter->rx_stats_buffers[scrq_num].packets++;
1958 		adapter->rx_stats_buffers[scrq_num].bytes += length;
1959 		frames_processed++;
1960 	}
1961 
1962 	if (adapter->state != VNIC_CLOSING)
1963 		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1964 
1965 	if (frames_processed < budget) {
1966 		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1967 		napi_complete_done(napi, frames_processed);
1968 		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1969 		    napi_reschedule(napi)) {
1970 			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1971 			goto restart_poll;
1972 		}
1973 	}
1974 	return frames_processed;
1975 }
1976 
1977 #ifdef CONFIG_NET_POLL_CONTROLLER
1978 static void ibmvnic_netpoll_controller(struct net_device *dev)
1979 {
1980 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
1981 	int i;
1982 
1983 	replenish_pools(netdev_priv(dev));
1984 	for (i = 0; i < adapter->req_rx_queues; i++)
1985 		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1986 				     adapter->rx_scrq[i]);
1987 }
1988 #endif
1989 
1990 static int wait_for_reset(struct ibmvnic_adapter *adapter)
1991 {
1992 	adapter->fallback.mtu = adapter->req_mtu;
1993 	adapter->fallback.rx_queues = adapter->req_rx_queues;
1994 	adapter->fallback.tx_queues = adapter->req_tx_queues;
1995 	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
1996 	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
1997 
1998 	init_completion(&adapter->reset_done);
1999 	ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2000 	adapter->wait_for_reset = true;
2001 	wait_for_completion(&adapter->reset_done);
2002 
2003 	if (adapter->reset_done_rc) {
2004 		adapter->desired.mtu = adapter->fallback.mtu;
2005 		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2006 		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2007 		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2008 		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2009 
2010 		init_completion(&adapter->reset_done);
2011 		ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2012 		wait_for_completion(&adapter->reset_done);
2013 	}
2014 	adapter->wait_for_reset = false;
2015 
2016 	return adapter->reset_done_rc;
2017 }
2018 
2019 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2020 {
2021 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2022 
2023 	adapter->desired.mtu = new_mtu + ETH_HLEN;
2024 
2025 	return wait_for_reset(adapter);
2026 }
2027 
2028 static const struct net_device_ops ibmvnic_netdev_ops = {
2029 	.ndo_open		= ibmvnic_open,
2030 	.ndo_stop		= ibmvnic_close,
2031 	.ndo_start_xmit		= ibmvnic_xmit,
2032 	.ndo_set_rx_mode	= ibmvnic_set_multi,
2033 	.ndo_set_mac_address	= ibmvnic_set_mac,
2034 	.ndo_validate_addr	= eth_validate_addr,
2035 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2036 #ifdef CONFIG_NET_POLL_CONTROLLER
2037 	.ndo_poll_controller	= ibmvnic_netpoll_controller,
2038 #endif
2039 	.ndo_change_mtu		= ibmvnic_change_mtu,
2040 };
2041 
2042 /* ethtool functions */
2043 
2044 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2045 				      struct ethtool_link_ksettings *cmd)
2046 {
2047 	u32 supported, advertising;
2048 
2049 	supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2050 			  SUPPORTED_FIBRE);
2051 	advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2052 			    ADVERTISED_FIBRE);
2053 	cmd->base.speed = SPEED_1000;
2054 	cmd->base.duplex = DUPLEX_FULL;
2055 	cmd->base.port = PORT_FIBRE;
2056 	cmd->base.phy_address = 0;
2057 	cmd->base.autoneg = AUTONEG_ENABLE;
2058 
2059 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2060 						supported);
2061 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2062 						advertising);
2063 
2064 	return 0;
2065 }
2066 
2067 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2068 				struct ethtool_drvinfo *info)
2069 {
2070 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2071 
2072 	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2073 	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2074 	strlcpy(info->fw_version, adapter->fw_version,
2075 		sizeof(info->fw_version));
2076 }
2077 
2078 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2079 {
2080 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2081 
2082 	return adapter->msg_enable;
2083 }
2084 
2085 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2086 {
2087 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2088 
2089 	adapter->msg_enable = data;
2090 }
2091 
2092 static u32 ibmvnic_get_link(struct net_device *netdev)
2093 {
2094 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2095 
2096 	/* Don't need to send a query because we request a logical link up at
2097 	 * init and then we wait for link state indications
2098 	 */
2099 	return adapter->logical_link_state;
2100 }
2101 
2102 static void ibmvnic_get_ringparam(struct net_device *netdev,
2103 				  struct ethtool_ringparam *ring)
2104 {
2105 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2106 
2107 	ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2108 	ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2109 	ring->rx_mini_max_pending = 0;
2110 	ring->rx_jumbo_max_pending = 0;
2111 	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2112 	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2113 	ring->rx_mini_pending = 0;
2114 	ring->rx_jumbo_pending = 0;
2115 }
2116 
2117 static int ibmvnic_set_ringparam(struct net_device *netdev,
2118 				 struct ethtool_ringparam *ring)
2119 {
2120 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2121 
2122 	if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
2123 	    ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2124 		netdev_err(netdev, "Invalid request.\n");
2125 		netdev_err(netdev, "Max tx buffers = %llu\n",
2126 			   adapter->max_rx_add_entries_per_subcrq);
2127 		netdev_err(netdev, "Max rx buffers = %llu\n",
2128 			   adapter->max_tx_entries_per_subcrq);
2129 		return -EINVAL;
2130 	}
2131 
2132 	adapter->desired.rx_entries = ring->rx_pending;
2133 	adapter->desired.tx_entries = ring->tx_pending;
2134 
2135 	return wait_for_reset(adapter);
2136 }
2137 
2138 static void ibmvnic_get_channels(struct net_device *netdev,
2139 				 struct ethtool_channels *channels)
2140 {
2141 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2142 
2143 	channels->max_rx = adapter->max_rx_queues;
2144 	channels->max_tx = adapter->max_tx_queues;
2145 	channels->max_other = 0;
2146 	channels->max_combined = 0;
2147 	channels->rx_count = adapter->req_rx_queues;
2148 	channels->tx_count = adapter->req_tx_queues;
2149 	channels->other_count = 0;
2150 	channels->combined_count = 0;
2151 }
2152 
2153 static int ibmvnic_set_channels(struct net_device *netdev,
2154 				struct ethtool_channels *channels)
2155 {
2156 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2157 
2158 	adapter->desired.rx_queues = channels->rx_count;
2159 	adapter->desired.tx_queues = channels->tx_count;
2160 
2161 	return wait_for_reset(adapter);
2162 }
2163 
2164 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2165 {
2166 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2167 	int i;
2168 
2169 	if (stringset != ETH_SS_STATS)
2170 		return;
2171 
2172 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2173 		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2174 
2175 	for (i = 0; i < adapter->req_tx_queues; i++) {
2176 		snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2177 		data += ETH_GSTRING_LEN;
2178 
2179 		snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2180 		data += ETH_GSTRING_LEN;
2181 
2182 		snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2183 		data += ETH_GSTRING_LEN;
2184 	}
2185 
2186 	for (i = 0; i < adapter->req_rx_queues; i++) {
2187 		snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2188 		data += ETH_GSTRING_LEN;
2189 
2190 		snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2191 		data += ETH_GSTRING_LEN;
2192 
2193 		snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2194 		data += ETH_GSTRING_LEN;
2195 	}
2196 }
2197 
2198 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2199 {
2200 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2201 
2202 	switch (sset) {
2203 	case ETH_SS_STATS:
2204 		return ARRAY_SIZE(ibmvnic_stats) +
2205 		       adapter->req_tx_queues * NUM_TX_STATS +
2206 		       adapter->req_rx_queues * NUM_RX_STATS;
2207 	default:
2208 		return -EOPNOTSUPP;
2209 	}
2210 }
2211 
2212 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2213 				      struct ethtool_stats *stats, u64 *data)
2214 {
2215 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2216 	union ibmvnic_crq crq;
2217 	int i, j;
2218 
2219 	memset(&crq, 0, sizeof(crq));
2220 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2221 	crq.request_statistics.cmd = REQUEST_STATISTICS;
2222 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2223 	crq.request_statistics.len =
2224 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
2225 
2226 	/* Wait for data to be written */
2227 	init_completion(&adapter->stats_done);
2228 	ibmvnic_send_crq(adapter, &crq);
2229 	wait_for_completion(&adapter->stats_done);
2230 
2231 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2232 		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2233 						ibmvnic_stats[i].offset));
2234 
2235 	for (j = 0; j < adapter->req_tx_queues; j++) {
2236 		data[i] = adapter->tx_stats_buffers[j].packets;
2237 		i++;
2238 		data[i] = adapter->tx_stats_buffers[j].bytes;
2239 		i++;
2240 		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2241 		i++;
2242 	}
2243 
2244 	for (j = 0; j < adapter->req_rx_queues; j++) {
2245 		data[i] = adapter->rx_stats_buffers[j].packets;
2246 		i++;
2247 		data[i] = adapter->rx_stats_buffers[j].bytes;
2248 		i++;
2249 		data[i] = adapter->rx_stats_buffers[j].interrupts;
2250 		i++;
2251 	}
2252 }
2253 
2254 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2255 	.get_drvinfo		= ibmvnic_get_drvinfo,
2256 	.get_msglevel		= ibmvnic_get_msglevel,
2257 	.set_msglevel		= ibmvnic_set_msglevel,
2258 	.get_link		= ibmvnic_get_link,
2259 	.get_ringparam		= ibmvnic_get_ringparam,
2260 	.set_ringparam		= ibmvnic_set_ringparam,
2261 	.get_channels		= ibmvnic_get_channels,
2262 	.set_channels		= ibmvnic_set_channels,
2263 	.get_strings            = ibmvnic_get_strings,
2264 	.get_sset_count         = ibmvnic_get_sset_count,
2265 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
2266 	.get_link_ksettings	= ibmvnic_get_link_ksettings,
2267 };
2268 
2269 /* Routines for managing CRQs/sCRQs  */
2270 
2271 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2272 				   struct ibmvnic_sub_crq_queue *scrq)
2273 {
2274 	int rc;
2275 
2276 	if (scrq->irq) {
2277 		free_irq(scrq->irq, scrq);
2278 		irq_dispose_mapping(scrq->irq);
2279 		scrq->irq = 0;
2280 	}
2281 
2282 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2283 	scrq->cur = 0;
2284 
2285 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2286 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2287 	return rc;
2288 }
2289 
2290 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2291 {
2292 	int i, rc;
2293 
2294 	for (i = 0; i < adapter->req_tx_queues; i++) {
2295 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2296 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2297 		if (rc)
2298 			return rc;
2299 	}
2300 
2301 	for (i = 0; i < adapter->req_rx_queues; i++) {
2302 		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2303 		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2304 		if (rc)
2305 			return rc;
2306 	}
2307 
2308 	return rc;
2309 }
2310 
2311 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2312 				  struct ibmvnic_sub_crq_queue *scrq,
2313 				  bool do_h_free)
2314 {
2315 	struct device *dev = &adapter->vdev->dev;
2316 	long rc;
2317 
2318 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2319 
2320 	if (do_h_free) {
2321 		/* Close the sub-crqs */
2322 		do {
2323 			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2324 						adapter->vdev->unit_address,
2325 						scrq->crq_num);
2326 		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2327 
2328 		if (rc) {
2329 			netdev_err(adapter->netdev,
2330 				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
2331 				   scrq->crq_num, rc);
2332 		}
2333 	}
2334 
2335 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2336 			 DMA_BIDIRECTIONAL);
2337 	free_pages((unsigned long)scrq->msgs, 2);
2338 	kfree(scrq);
2339 }
2340 
2341 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2342 							*adapter)
2343 {
2344 	struct device *dev = &adapter->vdev->dev;
2345 	struct ibmvnic_sub_crq_queue *scrq;
2346 	int rc;
2347 
2348 	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2349 	if (!scrq)
2350 		return NULL;
2351 
2352 	scrq->msgs =
2353 		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2354 	if (!scrq->msgs) {
2355 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2356 		goto zero_page_failed;
2357 	}
2358 
2359 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2360 					 DMA_BIDIRECTIONAL);
2361 	if (dma_mapping_error(dev, scrq->msg_token)) {
2362 		dev_warn(dev, "Couldn't map crq queue messages page\n");
2363 		goto map_failed;
2364 	}
2365 
2366 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2367 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2368 
2369 	if (rc == H_RESOURCE)
2370 		rc = ibmvnic_reset_crq(adapter);
2371 
2372 	if (rc == H_CLOSED) {
2373 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
2374 	} else if (rc) {
2375 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
2376 		goto reg_failed;
2377 	}
2378 
2379 	scrq->adapter = adapter;
2380 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2381 	spin_lock_init(&scrq->lock);
2382 
2383 	netdev_dbg(adapter->netdev,
2384 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2385 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
2386 
2387 	return scrq;
2388 
2389 reg_failed:
2390 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2391 			 DMA_BIDIRECTIONAL);
2392 map_failed:
2393 	free_pages((unsigned long)scrq->msgs, 2);
2394 zero_page_failed:
2395 	kfree(scrq);
2396 
2397 	return NULL;
2398 }
2399 
2400 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2401 {
2402 	int i;
2403 
2404 	if (adapter->tx_scrq) {
2405 		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2406 			if (!adapter->tx_scrq[i])
2407 				continue;
2408 
2409 			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2410 				   i);
2411 			if (adapter->tx_scrq[i]->irq) {
2412 				free_irq(adapter->tx_scrq[i]->irq,
2413 					 adapter->tx_scrq[i]);
2414 				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2415 				adapter->tx_scrq[i]->irq = 0;
2416 			}
2417 
2418 			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2419 					      do_h_free);
2420 		}
2421 
2422 		kfree(adapter->tx_scrq);
2423 		adapter->tx_scrq = NULL;
2424 		adapter->num_active_tx_scrqs = 0;
2425 	}
2426 
2427 	if (adapter->rx_scrq) {
2428 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2429 			if (!adapter->rx_scrq[i])
2430 				continue;
2431 
2432 			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2433 				   i);
2434 			if (adapter->rx_scrq[i]->irq) {
2435 				free_irq(adapter->rx_scrq[i]->irq,
2436 					 adapter->rx_scrq[i]);
2437 				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2438 				adapter->rx_scrq[i]->irq = 0;
2439 			}
2440 
2441 			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2442 					      do_h_free);
2443 		}
2444 
2445 		kfree(adapter->rx_scrq);
2446 		adapter->rx_scrq = NULL;
2447 		adapter->num_active_rx_scrqs = 0;
2448 	}
2449 }
2450 
2451 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2452 			    struct ibmvnic_sub_crq_queue *scrq)
2453 {
2454 	struct device *dev = &adapter->vdev->dev;
2455 	unsigned long rc;
2456 
2457 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2458 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2459 	if (rc)
2460 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2461 			scrq->hw_irq, rc);
2462 	return rc;
2463 }
2464 
2465 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2466 			   struct ibmvnic_sub_crq_queue *scrq)
2467 {
2468 	struct device *dev = &adapter->vdev->dev;
2469 	unsigned long rc;
2470 
2471 	if (scrq->hw_irq > 0x100000000ULL) {
2472 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2473 		return 1;
2474 	}
2475 
2476 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2477 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2478 	if (rc)
2479 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2480 			scrq->hw_irq, rc);
2481 	return rc;
2482 }
2483 
2484 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2485 			       struct ibmvnic_sub_crq_queue *scrq)
2486 {
2487 	struct device *dev = &adapter->vdev->dev;
2488 	struct ibmvnic_tx_buff *txbuff;
2489 	union sub_crq *next;
2490 	int index;
2491 	int i, j;
2492 	u8 first;
2493 
2494 restart_loop:
2495 	while (pending_scrq(adapter, scrq)) {
2496 		unsigned int pool = scrq->pool_index;
2497 		int num_entries = 0;
2498 
2499 		next = ibmvnic_next_scrq(adapter, scrq);
2500 		for (i = 0; i < next->tx_comp.num_comps; i++) {
2501 			if (next->tx_comp.rcs[i]) {
2502 				dev_err(dev, "tx error %x\n",
2503 					next->tx_comp.rcs[i]);
2504 				continue;
2505 			}
2506 			index = be32_to_cpu(next->tx_comp.correlators[i]);
2507 			txbuff = &adapter->tx_pool[pool].tx_buff[index];
2508 
2509 			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2510 				if (!txbuff->data_dma[j])
2511 					continue;
2512 
2513 				txbuff->data_dma[j] = 0;
2514 			}
2515 			/* if sub_crq was sent indirectly */
2516 			first = txbuff->indir_arr[0].generic.first;
2517 			if (first == IBMVNIC_CRQ_CMD) {
2518 				dma_unmap_single(dev, txbuff->indir_dma,
2519 						 sizeof(txbuff->indir_arr),
2520 						 DMA_TO_DEVICE);
2521 			}
2522 
2523 			if (txbuff->last_frag) {
2524 				dev_kfree_skb_any(txbuff->skb);
2525 				txbuff->skb = NULL;
2526 			}
2527 
2528 			num_entries += txbuff->num_entries;
2529 
2530 			adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
2531 						     producer_index] = index;
2532 			adapter->tx_pool[pool].producer_index =
2533 			    (adapter->tx_pool[pool].producer_index + 1) %
2534 			    adapter->req_tx_entries_per_subcrq;
2535 		}
2536 		/* remove tx_comp scrq*/
2537 		next->tx_comp.first = 0;
2538 
2539 		if (atomic_sub_return(num_entries, &scrq->used) <=
2540 		    (adapter->req_tx_entries_per_subcrq / 2) &&
2541 		    __netif_subqueue_stopped(adapter->netdev,
2542 					     scrq->pool_index)) {
2543 			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2544 			netdev_info(adapter->netdev, "Started queue %d\n",
2545 				    scrq->pool_index);
2546 		}
2547 	}
2548 
2549 	enable_scrq_irq(adapter, scrq);
2550 
2551 	if (pending_scrq(adapter, scrq)) {
2552 		disable_scrq_irq(adapter, scrq);
2553 		goto restart_loop;
2554 	}
2555 
2556 	return 0;
2557 }
2558 
2559 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2560 {
2561 	struct ibmvnic_sub_crq_queue *scrq = instance;
2562 	struct ibmvnic_adapter *adapter = scrq->adapter;
2563 
2564 	disable_scrq_irq(adapter, scrq);
2565 	ibmvnic_complete_tx(adapter, scrq);
2566 
2567 	return IRQ_HANDLED;
2568 }
2569 
2570 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2571 {
2572 	struct ibmvnic_sub_crq_queue *scrq = instance;
2573 	struct ibmvnic_adapter *adapter = scrq->adapter;
2574 
2575 	/* When booting a kdump kernel we can hit pending interrupts
2576 	 * prior to completing driver initialization.
2577 	 */
2578 	if (unlikely(adapter->state != VNIC_OPEN))
2579 		return IRQ_NONE;
2580 
2581 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2582 
2583 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2584 		disable_scrq_irq(adapter, scrq);
2585 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
2586 	}
2587 
2588 	return IRQ_HANDLED;
2589 }
2590 
2591 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2592 {
2593 	struct device *dev = &adapter->vdev->dev;
2594 	struct ibmvnic_sub_crq_queue *scrq;
2595 	int i = 0, j = 0;
2596 	int rc = 0;
2597 
2598 	for (i = 0; i < adapter->req_tx_queues; i++) {
2599 		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2600 			   i);
2601 		scrq = adapter->tx_scrq[i];
2602 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2603 
2604 		if (!scrq->irq) {
2605 			rc = -EINVAL;
2606 			dev_err(dev, "Error mapping irq\n");
2607 			goto req_tx_irq_failed;
2608 		}
2609 
2610 		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2611 				 0, "ibmvnic_tx", scrq);
2612 
2613 		if (rc) {
2614 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2615 				scrq->irq, rc);
2616 			irq_dispose_mapping(scrq->irq);
2617 			goto req_tx_irq_failed;
2618 		}
2619 	}
2620 
2621 	for (i = 0; i < adapter->req_rx_queues; i++) {
2622 		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2623 			   i);
2624 		scrq = adapter->rx_scrq[i];
2625 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2626 		if (!scrq->irq) {
2627 			rc = -EINVAL;
2628 			dev_err(dev, "Error mapping irq\n");
2629 			goto req_rx_irq_failed;
2630 		}
2631 		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2632 				 0, "ibmvnic_rx", scrq);
2633 		if (rc) {
2634 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2635 				scrq->irq, rc);
2636 			irq_dispose_mapping(scrq->irq);
2637 			goto req_rx_irq_failed;
2638 		}
2639 	}
2640 	return rc;
2641 
2642 req_rx_irq_failed:
2643 	for (j = 0; j < i; j++) {
2644 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2645 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2646 	}
2647 	i = adapter->req_tx_queues;
2648 req_tx_irq_failed:
2649 	for (j = 0; j < i; j++) {
2650 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2651 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2652 	}
2653 	release_sub_crqs(adapter, 1);
2654 	return rc;
2655 }
2656 
2657 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2658 {
2659 	struct device *dev = &adapter->vdev->dev;
2660 	struct ibmvnic_sub_crq_queue **allqueues;
2661 	int registered_queues = 0;
2662 	int total_queues;
2663 	int more = 0;
2664 	int i;
2665 
2666 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2667 
2668 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2669 	if (!allqueues)
2670 		return -1;
2671 
2672 	for (i = 0; i < total_queues; i++) {
2673 		allqueues[i] = init_sub_crq_queue(adapter);
2674 		if (!allqueues[i]) {
2675 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2676 			break;
2677 		}
2678 		registered_queues++;
2679 	}
2680 
2681 	/* Make sure we were able to register the minimum number of queues */
2682 	if (registered_queues <
2683 	    adapter->min_tx_queues + adapter->min_rx_queues) {
2684 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
2685 		goto tx_failed;
2686 	}
2687 
2688 	/* Distribute the failed allocated queues*/
2689 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
2690 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2691 		switch (i % 3) {
2692 		case 0:
2693 			if (adapter->req_rx_queues > adapter->min_rx_queues)
2694 				adapter->req_rx_queues--;
2695 			else
2696 				more++;
2697 			break;
2698 		case 1:
2699 			if (adapter->req_tx_queues > adapter->min_tx_queues)
2700 				adapter->req_tx_queues--;
2701 			else
2702 				more++;
2703 			break;
2704 		}
2705 	}
2706 
2707 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2708 				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
2709 	if (!adapter->tx_scrq)
2710 		goto tx_failed;
2711 
2712 	for (i = 0; i < adapter->req_tx_queues; i++) {
2713 		adapter->tx_scrq[i] = allqueues[i];
2714 		adapter->tx_scrq[i]->pool_index = i;
2715 		adapter->num_active_tx_scrqs++;
2716 	}
2717 
2718 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
2719 				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
2720 	if (!adapter->rx_scrq)
2721 		goto rx_failed;
2722 
2723 	for (i = 0; i < adapter->req_rx_queues; i++) {
2724 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2725 		adapter->rx_scrq[i]->scrq_num = i;
2726 		adapter->num_active_rx_scrqs++;
2727 	}
2728 
2729 	kfree(allqueues);
2730 	return 0;
2731 
2732 rx_failed:
2733 	kfree(adapter->tx_scrq);
2734 	adapter->tx_scrq = NULL;
2735 tx_failed:
2736 	for (i = 0; i < registered_queues; i++)
2737 		release_sub_crq_queue(adapter, allqueues[i], 1);
2738 	kfree(allqueues);
2739 	return -1;
2740 }
2741 
2742 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2743 {
2744 	struct device *dev = &adapter->vdev->dev;
2745 	union ibmvnic_crq crq;
2746 	int max_entries;
2747 
2748 	if (!retry) {
2749 		/* Sub-CRQ entries are 32 byte long */
2750 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2751 
2752 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
2753 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
2754 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2755 			return;
2756 		}
2757 
2758 		if (adapter->desired.mtu)
2759 			adapter->req_mtu = adapter->desired.mtu;
2760 		else
2761 			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2762 
2763 		if (!adapter->desired.tx_entries)
2764 			adapter->desired.tx_entries =
2765 					adapter->max_tx_entries_per_subcrq;
2766 		if (!adapter->desired.rx_entries)
2767 			adapter->desired.rx_entries =
2768 					adapter->max_rx_add_entries_per_subcrq;
2769 
2770 		max_entries = IBMVNIC_MAX_LTB_SIZE /
2771 			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
2772 
2773 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2774 			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
2775 			adapter->desired.tx_entries = max_entries;
2776 		}
2777 
2778 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2779 			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
2780 			adapter->desired.rx_entries = max_entries;
2781 		}
2782 
2783 		if (adapter->desired.tx_entries)
2784 			adapter->req_tx_entries_per_subcrq =
2785 					adapter->desired.tx_entries;
2786 		else
2787 			adapter->req_tx_entries_per_subcrq =
2788 					adapter->max_tx_entries_per_subcrq;
2789 
2790 		if (adapter->desired.rx_entries)
2791 			adapter->req_rx_add_entries_per_subcrq =
2792 					adapter->desired.rx_entries;
2793 		else
2794 			adapter->req_rx_add_entries_per_subcrq =
2795 					adapter->max_rx_add_entries_per_subcrq;
2796 
2797 		if (adapter->desired.tx_queues)
2798 			adapter->req_tx_queues =
2799 					adapter->desired.tx_queues;
2800 		else
2801 			adapter->req_tx_queues =
2802 					adapter->opt_tx_comp_sub_queues;
2803 
2804 		if (adapter->desired.rx_queues)
2805 			adapter->req_rx_queues =
2806 					adapter->desired.rx_queues;
2807 		else
2808 			adapter->req_rx_queues =
2809 					adapter->opt_rx_comp_queues;
2810 
2811 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2812 	}
2813 
2814 	memset(&crq, 0, sizeof(crq));
2815 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
2816 	crq.request_capability.cmd = REQUEST_CAPABILITY;
2817 
2818 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2819 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2820 	atomic_inc(&adapter->running_cap_crqs);
2821 	ibmvnic_send_crq(adapter, &crq);
2822 
2823 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2824 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2825 	atomic_inc(&adapter->running_cap_crqs);
2826 	ibmvnic_send_crq(adapter, &crq);
2827 
2828 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2829 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2830 	atomic_inc(&adapter->running_cap_crqs);
2831 	ibmvnic_send_crq(adapter, &crq);
2832 
2833 	crq.request_capability.capability =
2834 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2835 	crq.request_capability.number =
2836 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2837 	atomic_inc(&adapter->running_cap_crqs);
2838 	ibmvnic_send_crq(adapter, &crq);
2839 
2840 	crq.request_capability.capability =
2841 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2842 	crq.request_capability.number =
2843 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2844 	atomic_inc(&adapter->running_cap_crqs);
2845 	ibmvnic_send_crq(adapter, &crq);
2846 
2847 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2848 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2849 	atomic_inc(&adapter->running_cap_crqs);
2850 	ibmvnic_send_crq(adapter, &crq);
2851 
2852 	if (adapter->netdev->flags & IFF_PROMISC) {
2853 		if (adapter->promisc_supported) {
2854 			crq.request_capability.capability =
2855 			    cpu_to_be16(PROMISC_REQUESTED);
2856 			crq.request_capability.number = cpu_to_be64(1);
2857 			atomic_inc(&adapter->running_cap_crqs);
2858 			ibmvnic_send_crq(adapter, &crq);
2859 		}
2860 	} else {
2861 		crq.request_capability.capability =
2862 		    cpu_to_be16(PROMISC_REQUESTED);
2863 		crq.request_capability.number = cpu_to_be64(0);
2864 		atomic_inc(&adapter->running_cap_crqs);
2865 		ibmvnic_send_crq(adapter, &crq);
2866 	}
2867 }
2868 
2869 static int pending_scrq(struct ibmvnic_adapter *adapter,
2870 			struct ibmvnic_sub_crq_queue *scrq)
2871 {
2872 	union sub_crq *entry = &scrq->msgs[scrq->cur];
2873 
2874 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
2875 		return 1;
2876 	else
2877 		return 0;
2878 }
2879 
2880 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2881 					struct ibmvnic_sub_crq_queue *scrq)
2882 {
2883 	union sub_crq *entry;
2884 	unsigned long flags;
2885 
2886 	spin_lock_irqsave(&scrq->lock, flags);
2887 	entry = &scrq->msgs[scrq->cur];
2888 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2889 		if (++scrq->cur == scrq->size)
2890 			scrq->cur = 0;
2891 	} else {
2892 		entry = NULL;
2893 	}
2894 	spin_unlock_irqrestore(&scrq->lock, flags);
2895 
2896 	return entry;
2897 }
2898 
2899 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2900 {
2901 	struct ibmvnic_crq_queue *queue = &adapter->crq;
2902 	union ibmvnic_crq *crq;
2903 
2904 	crq = &queue->msgs[queue->cur];
2905 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2906 		if (++queue->cur == queue->size)
2907 			queue->cur = 0;
2908 	} else {
2909 		crq = NULL;
2910 	}
2911 
2912 	return crq;
2913 }
2914 
2915 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2916 		       union sub_crq *sub_crq)
2917 {
2918 	unsigned int ua = adapter->vdev->unit_address;
2919 	struct device *dev = &adapter->vdev->dev;
2920 	u64 *u64_crq = (u64 *)sub_crq;
2921 	int rc;
2922 
2923 	netdev_dbg(adapter->netdev,
2924 		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2925 		   (unsigned long int)cpu_to_be64(remote_handle),
2926 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
2927 		   (unsigned long int)cpu_to_be64(u64_crq[1]),
2928 		   (unsigned long int)cpu_to_be64(u64_crq[2]),
2929 		   (unsigned long int)cpu_to_be64(u64_crq[3]));
2930 
2931 	/* Make sure the hypervisor sees the complete request */
2932 	mb();
2933 
2934 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2935 				cpu_to_be64(remote_handle),
2936 				cpu_to_be64(u64_crq[0]),
2937 				cpu_to_be64(u64_crq[1]),
2938 				cpu_to_be64(u64_crq[2]),
2939 				cpu_to_be64(u64_crq[3]));
2940 
2941 	if (rc) {
2942 		if (rc == H_CLOSED)
2943 			dev_warn(dev, "CRQ Queue closed\n");
2944 		dev_err(dev, "Send error (rc=%d)\n", rc);
2945 	}
2946 
2947 	return rc;
2948 }
2949 
2950 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2951 				u64 remote_handle, u64 ioba, u64 num_entries)
2952 {
2953 	unsigned int ua = adapter->vdev->unit_address;
2954 	struct device *dev = &adapter->vdev->dev;
2955 	int rc;
2956 
2957 	/* Make sure the hypervisor sees the complete request */
2958 	mb();
2959 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2960 				cpu_to_be64(remote_handle),
2961 				ioba, num_entries);
2962 
2963 	if (rc) {
2964 		if (rc == H_CLOSED)
2965 			dev_warn(dev, "CRQ Queue closed\n");
2966 		dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2967 	}
2968 
2969 	return rc;
2970 }
2971 
2972 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2973 			    union ibmvnic_crq *crq)
2974 {
2975 	unsigned int ua = adapter->vdev->unit_address;
2976 	struct device *dev = &adapter->vdev->dev;
2977 	u64 *u64_crq = (u64 *)crq;
2978 	int rc;
2979 
2980 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2981 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
2982 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
2983 
2984 	/* Make sure the hypervisor sees the complete request */
2985 	mb();
2986 
2987 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2988 				cpu_to_be64(u64_crq[0]),
2989 				cpu_to_be64(u64_crq[1]));
2990 
2991 	if (rc) {
2992 		if (rc == H_CLOSED) {
2993 			dev_warn(dev, "CRQ Queue closed\n");
2994 			if (adapter->resetting)
2995 				ibmvnic_reset(adapter, VNIC_RESET_FATAL);
2996 		}
2997 
2998 		dev_warn(dev, "Send error (rc=%d)\n", rc);
2999 	}
3000 
3001 	return rc;
3002 }
3003 
3004 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3005 {
3006 	union ibmvnic_crq crq;
3007 
3008 	memset(&crq, 0, sizeof(crq));
3009 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3010 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3011 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3012 
3013 	return ibmvnic_send_crq(adapter, &crq);
3014 }
3015 
3016 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3017 {
3018 	union ibmvnic_crq crq;
3019 
3020 	memset(&crq, 0, sizeof(crq));
3021 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3022 	crq.version_exchange.cmd = VERSION_EXCHANGE;
3023 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3024 
3025 	return ibmvnic_send_crq(adapter, &crq);
3026 }
3027 
3028 struct vnic_login_client_data {
3029 	u8	type;
3030 	__be16	len;
3031 	char	name;
3032 } __packed;
3033 
3034 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3035 {
3036 	int len;
3037 
3038 	/* Calculate the amount of buffer space needed for the
3039 	 * vnic client data in the login buffer. There are four entries,
3040 	 * OS name, LPAR name, device name, and a null last entry.
3041 	 */
3042 	len = 4 * sizeof(struct vnic_login_client_data);
3043 	len += 6; /* "Linux" plus NULL */
3044 	len += strlen(utsname()->nodename) + 1;
3045 	len += strlen(adapter->netdev->name) + 1;
3046 
3047 	return len;
3048 }
3049 
3050 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3051 				 struct vnic_login_client_data *vlcd)
3052 {
3053 	const char *os_name = "Linux";
3054 	int len;
3055 
3056 	/* Type 1 - LPAR OS */
3057 	vlcd->type = 1;
3058 	len = strlen(os_name) + 1;
3059 	vlcd->len = cpu_to_be16(len);
3060 	strncpy(&vlcd->name, os_name, len);
3061 	vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3062 
3063 	/* Type 2 - LPAR name */
3064 	vlcd->type = 2;
3065 	len = strlen(utsname()->nodename) + 1;
3066 	vlcd->len = cpu_to_be16(len);
3067 	strncpy(&vlcd->name, utsname()->nodename, len);
3068 	vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3069 
3070 	/* Type 3 - device name */
3071 	vlcd->type = 3;
3072 	len = strlen(adapter->netdev->name) + 1;
3073 	vlcd->len = cpu_to_be16(len);
3074 	strncpy(&vlcd->name, adapter->netdev->name, len);
3075 }
3076 
3077 static void send_login(struct ibmvnic_adapter *adapter)
3078 {
3079 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3080 	struct ibmvnic_login_buffer *login_buffer;
3081 	struct device *dev = &adapter->vdev->dev;
3082 	dma_addr_t rsp_buffer_token;
3083 	dma_addr_t buffer_token;
3084 	size_t rsp_buffer_size;
3085 	union ibmvnic_crq crq;
3086 	size_t buffer_size;
3087 	__be64 *tx_list_p;
3088 	__be64 *rx_list_p;
3089 	int client_data_len;
3090 	struct vnic_login_client_data *vlcd;
3091 	int i;
3092 
3093 	release_login_rsp_buffer(adapter);
3094 	client_data_len = vnic_client_data_len(adapter);
3095 
3096 	buffer_size =
3097 	    sizeof(struct ibmvnic_login_buffer) +
3098 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3099 	    client_data_len;
3100 
3101 	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3102 	if (!login_buffer)
3103 		goto buf_alloc_failed;
3104 
3105 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3106 				      DMA_TO_DEVICE);
3107 	if (dma_mapping_error(dev, buffer_token)) {
3108 		dev_err(dev, "Couldn't map login buffer\n");
3109 		goto buf_map_failed;
3110 	}
3111 
3112 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3113 			  sizeof(u64) * adapter->req_tx_queues +
3114 			  sizeof(u64) * adapter->req_rx_queues +
3115 			  sizeof(u64) * adapter->req_rx_queues +
3116 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3117 
3118 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3119 	if (!login_rsp_buffer)
3120 		goto buf_rsp_alloc_failed;
3121 
3122 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3123 					  rsp_buffer_size, DMA_FROM_DEVICE);
3124 	if (dma_mapping_error(dev, rsp_buffer_token)) {
3125 		dev_err(dev, "Couldn't map login rsp buffer\n");
3126 		goto buf_rsp_map_failed;
3127 	}
3128 
3129 	adapter->login_buf = login_buffer;
3130 	adapter->login_buf_token = buffer_token;
3131 	adapter->login_buf_sz = buffer_size;
3132 	adapter->login_rsp_buf = login_rsp_buffer;
3133 	adapter->login_rsp_buf_token = rsp_buffer_token;
3134 	adapter->login_rsp_buf_sz = rsp_buffer_size;
3135 
3136 	login_buffer->len = cpu_to_be32(buffer_size);
3137 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3138 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3139 	login_buffer->off_txcomp_subcrqs =
3140 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3141 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3142 	login_buffer->off_rxcomp_subcrqs =
3143 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3144 			sizeof(u64) * adapter->req_tx_queues);
3145 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3146 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3147 
3148 	tx_list_p = (__be64 *)((char *)login_buffer +
3149 				      sizeof(struct ibmvnic_login_buffer));
3150 	rx_list_p = (__be64 *)((char *)login_buffer +
3151 				      sizeof(struct ibmvnic_login_buffer) +
3152 				      sizeof(u64) * adapter->req_tx_queues);
3153 
3154 	for (i = 0; i < adapter->req_tx_queues; i++) {
3155 		if (adapter->tx_scrq[i]) {
3156 			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3157 						   crq_num);
3158 		}
3159 	}
3160 
3161 	for (i = 0; i < adapter->req_rx_queues; i++) {
3162 		if (adapter->rx_scrq[i]) {
3163 			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3164 						   crq_num);
3165 		}
3166 	}
3167 
3168 	/* Insert vNIC login client data */
3169 	vlcd = (struct vnic_login_client_data *)
3170 		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3171 	login_buffer->client_data_offset =
3172 			cpu_to_be32((char *)vlcd - (char *)login_buffer);
3173 	login_buffer->client_data_len = cpu_to_be32(client_data_len);
3174 
3175 	vnic_add_client_data(adapter, vlcd);
3176 
3177 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
3178 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3179 		netdev_dbg(adapter->netdev, "%016lx\n",
3180 			   ((unsigned long int *)(adapter->login_buf))[i]);
3181 	}
3182 
3183 	memset(&crq, 0, sizeof(crq));
3184 	crq.login.first = IBMVNIC_CRQ_CMD;
3185 	crq.login.cmd = LOGIN;
3186 	crq.login.ioba = cpu_to_be32(buffer_token);
3187 	crq.login.len = cpu_to_be32(buffer_size);
3188 	ibmvnic_send_crq(adapter, &crq);
3189 
3190 	return;
3191 
3192 buf_rsp_map_failed:
3193 	kfree(login_rsp_buffer);
3194 buf_rsp_alloc_failed:
3195 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3196 buf_map_failed:
3197 	kfree(login_buffer);
3198 buf_alloc_failed:
3199 	return;
3200 }
3201 
3202 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3203 			     u32 len, u8 map_id)
3204 {
3205 	union ibmvnic_crq crq;
3206 
3207 	memset(&crq, 0, sizeof(crq));
3208 	crq.request_map.first = IBMVNIC_CRQ_CMD;
3209 	crq.request_map.cmd = REQUEST_MAP;
3210 	crq.request_map.map_id = map_id;
3211 	crq.request_map.ioba = cpu_to_be32(addr);
3212 	crq.request_map.len = cpu_to_be32(len);
3213 	ibmvnic_send_crq(adapter, &crq);
3214 }
3215 
3216 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3217 {
3218 	union ibmvnic_crq crq;
3219 
3220 	memset(&crq, 0, sizeof(crq));
3221 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3222 	crq.request_unmap.cmd = REQUEST_UNMAP;
3223 	crq.request_unmap.map_id = map_id;
3224 	ibmvnic_send_crq(adapter, &crq);
3225 }
3226 
3227 static void send_map_query(struct ibmvnic_adapter *adapter)
3228 {
3229 	union ibmvnic_crq crq;
3230 
3231 	memset(&crq, 0, sizeof(crq));
3232 	crq.query_map.first = IBMVNIC_CRQ_CMD;
3233 	crq.query_map.cmd = QUERY_MAP;
3234 	ibmvnic_send_crq(adapter, &crq);
3235 }
3236 
3237 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3238 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3239 {
3240 	union ibmvnic_crq crq;
3241 
3242 	atomic_set(&adapter->running_cap_crqs, 0);
3243 	memset(&crq, 0, sizeof(crq));
3244 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
3245 	crq.query_capability.cmd = QUERY_CAPABILITY;
3246 
3247 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3248 	atomic_inc(&adapter->running_cap_crqs);
3249 	ibmvnic_send_crq(adapter, &crq);
3250 
3251 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3252 	atomic_inc(&adapter->running_cap_crqs);
3253 	ibmvnic_send_crq(adapter, &crq);
3254 
3255 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3256 	atomic_inc(&adapter->running_cap_crqs);
3257 	ibmvnic_send_crq(adapter, &crq);
3258 
3259 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3260 	atomic_inc(&adapter->running_cap_crqs);
3261 	ibmvnic_send_crq(adapter, &crq);
3262 
3263 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3264 	atomic_inc(&adapter->running_cap_crqs);
3265 	ibmvnic_send_crq(adapter, &crq);
3266 
3267 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3268 	atomic_inc(&adapter->running_cap_crqs);
3269 	ibmvnic_send_crq(adapter, &crq);
3270 
3271 	crq.query_capability.capability =
3272 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3273 	atomic_inc(&adapter->running_cap_crqs);
3274 	ibmvnic_send_crq(adapter, &crq);
3275 
3276 	crq.query_capability.capability =
3277 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3278 	atomic_inc(&adapter->running_cap_crqs);
3279 	ibmvnic_send_crq(adapter, &crq);
3280 
3281 	crq.query_capability.capability =
3282 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3283 	atomic_inc(&adapter->running_cap_crqs);
3284 	ibmvnic_send_crq(adapter, &crq);
3285 
3286 	crq.query_capability.capability =
3287 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3288 	atomic_inc(&adapter->running_cap_crqs);
3289 	ibmvnic_send_crq(adapter, &crq);
3290 
3291 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3292 	atomic_inc(&adapter->running_cap_crqs);
3293 	ibmvnic_send_crq(adapter, &crq);
3294 
3295 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3296 	atomic_inc(&adapter->running_cap_crqs);
3297 	ibmvnic_send_crq(adapter, &crq);
3298 
3299 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3300 	atomic_inc(&adapter->running_cap_crqs);
3301 	ibmvnic_send_crq(adapter, &crq);
3302 
3303 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3304 	atomic_inc(&adapter->running_cap_crqs);
3305 	ibmvnic_send_crq(adapter, &crq);
3306 
3307 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3308 	atomic_inc(&adapter->running_cap_crqs);
3309 	ibmvnic_send_crq(adapter, &crq);
3310 
3311 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3312 	atomic_inc(&adapter->running_cap_crqs);
3313 	ibmvnic_send_crq(adapter, &crq);
3314 
3315 	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3316 	atomic_inc(&adapter->running_cap_crqs);
3317 	ibmvnic_send_crq(adapter, &crq);
3318 
3319 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3320 	atomic_inc(&adapter->running_cap_crqs);
3321 	ibmvnic_send_crq(adapter, &crq);
3322 
3323 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3324 	atomic_inc(&adapter->running_cap_crqs);
3325 	ibmvnic_send_crq(adapter, &crq);
3326 
3327 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3328 	atomic_inc(&adapter->running_cap_crqs);
3329 	ibmvnic_send_crq(adapter, &crq);
3330 
3331 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3332 	atomic_inc(&adapter->running_cap_crqs);
3333 	ibmvnic_send_crq(adapter, &crq);
3334 
3335 	crq.query_capability.capability =
3336 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3337 	atomic_inc(&adapter->running_cap_crqs);
3338 	ibmvnic_send_crq(adapter, &crq);
3339 
3340 	crq.query_capability.capability =
3341 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3342 	atomic_inc(&adapter->running_cap_crqs);
3343 	ibmvnic_send_crq(adapter, &crq);
3344 
3345 	crq.query_capability.capability =
3346 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3347 	atomic_inc(&adapter->running_cap_crqs);
3348 	ibmvnic_send_crq(adapter, &crq);
3349 
3350 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3351 	atomic_inc(&adapter->running_cap_crqs);
3352 	ibmvnic_send_crq(adapter, &crq);
3353 }
3354 
3355 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3356 				struct ibmvnic_adapter *adapter)
3357 {
3358 	struct device *dev = &adapter->vdev->dev;
3359 
3360 	if (crq->get_vpd_size_rsp.rc.code) {
3361 		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3362 			crq->get_vpd_size_rsp.rc.code);
3363 		complete(&adapter->fw_done);
3364 		return;
3365 	}
3366 
3367 	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3368 	complete(&adapter->fw_done);
3369 }
3370 
3371 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3372 			   struct ibmvnic_adapter *adapter)
3373 {
3374 	struct device *dev = &adapter->vdev->dev;
3375 	unsigned char *substr = NULL;
3376 	u8 fw_level_len = 0;
3377 
3378 	memset(adapter->fw_version, 0, 32);
3379 
3380 	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3381 			 DMA_FROM_DEVICE);
3382 
3383 	if (crq->get_vpd_rsp.rc.code) {
3384 		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3385 			crq->get_vpd_rsp.rc.code);
3386 		goto complete;
3387 	}
3388 
3389 	/* get the position of the firmware version info
3390 	 * located after the ASCII 'RM' substring in the buffer
3391 	 */
3392 	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3393 	if (!substr) {
3394 		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3395 		goto complete;
3396 	}
3397 
3398 	/* get length of firmware level ASCII substring */
3399 	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3400 		fw_level_len = *(substr + 2);
3401 	} else {
3402 		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3403 		goto complete;
3404 	}
3405 
3406 	/* copy firmware version string from vpd into adapter */
3407 	if ((substr + 3 + fw_level_len) <
3408 	    (adapter->vpd->buff + adapter->vpd->len)) {
3409 		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3410 	} else {
3411 		dev_info(dev, "FW substr extrapolated VPD buff\n");
3412 	}
3413 
3414 complete:
3415 	if (adapter->fw_version[0] == '\0')
3416 		strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3417 	complete(&adapter->fw_done);
3418 }
3419 
3420 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3421 {
3422 	struct device *dev = &adapter->vdev->dev;
3423 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3424 	union ibmvnic_crq crq;
3425 	int i;
3426 
3427 	dma_unmap_single(dev, adapter->ip_offload_tok,
3428 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3429 
3430 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3431 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3432 		netdev_dbg(adapter->netdev, "%016lx\n",
3433 			   ((unsigned long int *)(buf))[i]);
3434 
3435 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3436 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3437 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3438 		   buf->tcp_ipv4_chksum);
3439 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3440 		   buf->tcp_ipv6_chksum);
3441 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3442 		   buf->udp_ipv4_chksum);
3443 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3444 		   buf->udp_ipv6_chksum);
3445 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3446 		   buf->large_tx_ipv4);
3447 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3448 		   buf->large_tx_ipv6);
3449 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3450 		   buf->large_rx_ipv4);
3451 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3452 		   buf->large_rx_ipv6);
3453 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3454 		   buf->max_ipv4_header_size);
3455 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3456 		   buf->max_ipv6_header_size);
3457 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3458 		   buf->max_tcp_header_size);
3459 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3460 		   buf->max_udp_header_size);
3461 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3462 		   buf->max_large_tx_size);
3463 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3464 		   buf->max_large_rx_size);
3465 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3466 		   buf->ipv6_extension_header);
3467 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3468 		   buf->tcp_pseudosum_req);
3469 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3470 		   buf->num_ipv6_ext_headers);
3471 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3472 		   buf->off_ipv6_ext_headers);
3473 
3474 	adapter->ip_offload_ctrl_tok =
3475 	    dma_map_single(dev, &adapter->ip_offload_ctrl,
3476 			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3477 
3478 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3479 		dev_err(dev, "Couldn't map ip offload control buffer\n");
3480 		return;
3481 	}
3482 
3483 	adapter->ip_offload_ctrl.len =
3484 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3485 	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3486 	adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3487 	adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3488 	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3489 	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3490 	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3491 	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3492 	adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3493 	adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3494 
3495 	/* large_rx disabled for now, additional features needed */
3496 	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3497 	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3498 
3499 	adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3500 
3501 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3502 		adapter->netdev->features |= NETIF_F_IP_CSUM;
3503 
3504 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3505 		adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3506 
3507 	if ((adapter->netdev->features &
3508 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3509 		adapter->netdev->features |= NETIF_F_RXCSUM;
3510 
3511 	if (buf->large_tx_ipv4)
3512 		adapter->netdev->features |= NETIF_F_TSO;
3513 	if (buf->large_tx_ipv6)
3514 		adapter->netdev->features |= NETIF_F_TSO6;
3515 
3516 	adapter->netdev->hw_features |= adapter->netdev->features;
3517 
3518 	memset(&crq, 0, sizeof(crq));
3519 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3520 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3521 	crq.control_ip_offload.len =
3522 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3523 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3524 	ibmvnic_send_crq(adapter, &crq);
3525 }
3526 
3527 static void handle_error_info_rsp(union ibmvnic_crq *crq,
3528 				  struct ibmvnic_adapter *adapter)
3529 {
3530 	struct device *dev = &adapter->vdev->dev;
3531 	struct ibmvnic_error_buff *error_buff, *tmp;
3532 	unsigned long flags;
3533 	bool found = false;
3534 	int i;
3535 
3536 	if (!crq->request_error_rsp.rc.code) {
3537 		dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3538 			 crq->request_error_rsp.rc.code);
3539 		return;
3540 	}
3541 
3542 	spin_lock_irqsave(&adapter->error_list_lock, flags);
3543 	list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
3544 		if (error_buff->error_id == crq->request_error_rsp.error_id) {
3545 			found = true;
3546 			list_del(&error_buff->list);
3547 			break;
3548 		}
3549 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3550 
3551 	if (!found) {
3552 		dev_err(dev, "Couldn't find error id %x\n",
3553 			be32_to_cpu(crq->request_error_rsp.error_id));
3554 		return;
3555 	}
3556 
3557 	dev_err(dev, "Detailed info for error id %x:",
3558 		be32_to_cpu(crq->request_error_rsp.error_id));
3559 
3560 	for (i = 0; i < error_buff->len; i++) {
3561 		pr_cont("%02x", (int)error_buff->buff[i]);
3562 		if (i % 8 == 7)
3563 			pr_cont(" ");
3564 	}
3565 	pr_cont("\n");
3566 
3567 	dma_unmap_single(dev, error_buff->dma, error_buff->len,
3568 			 DMA_FROM_DEVICE);
3569 	kfree(error_buff->buff);
3570 	kfree(error_buff);
3571 }
3572 
3573 static void request_error_information(struct ibmvnic_adapter *adapter,
3574 				      union ibmvnic_crq *err_crq)
3575 {
3576 	struct device *dev = &adapter->vdev->dev;
3577 	struct net_device *netdev = adapter->netdev;
3578 	struct ibmvnic_error_buff *error_buff;
3579 	unsigned long timeout = msecs_to_jiffies(30000);
3580 	union ibmvnic_crq crq;
3581 	unsigned long flags;
3582 	int rc, detail_len;
3583 
3584 	error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3585 	if (!error_buff)
3586 		return;
3587 
3588 	detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
3589 	error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3590 	if (!error_buff->buff) {
3591 		kfree(error_buff);
3592 		return;
3593 	}
3594 
3595 	error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3596 					 DMA_FROM_DEVICE);
3597 	if (dma_mapping_error(dev, error_buff->dma)) {
3598 		netdev_err(netdev, "Couldn't map error buffer\n");
3599 		kfree(error_buff->buff);
3600 		kfree(error_buff);
3601 		return;
3602 	}
3603 
3604 	error_buff->len = detail_len;
3605 	error_buff->error_id = err_crq->error_indication.error_id;
3606 
3607 	spin_lock_irqsave(&adapter->error_list_lock, flags);
3608 	list_add_tail(&error_buff->list, &adapter->errors);
3609 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3610 
3611 	memset(&crq, 0, sizeof(crq));
3612 	crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3613 	crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3614 	crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3615 	crq.request_error_info.len = cpu_to_be32(detail_len);
3616 	crq.request_error_info.error_id = err_crq->error_indication.error_id;
3617 
3618 	rc = ibmvnic_send_crq(adapter, &crq);
3619 	if (rc) {
3620 		netdev_err(netdev, "failed to request error information\n");
3621 		goto err_info_fail;
3622 	}
3623 
3624 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3625 		netdev_err(netdev, "timeout waiting for error information\n");
3626 		goto err_info_fail;
3627 	}
3628 
3629 	return;
3630 
3631 err_info_fail:
3632 	spin_lock_irqsave(&adapter->error_list_lock, flags);
3633 	list_del(&error_buff->list);
3634 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3635 
3636 	kfree(error_buff->buff);
3637 	kfree(error_buff);
3638 }
3639 
3640 static void handle_error_indication(union ibmvnic_crq *crq,
3641 				    struct ibmvnic_adapter *adapter)
3642 {
3643 	struct device *dev = &adapter->vdev->dev;
3644 
3645 	dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3646 		crq->error_indication.flags
3647 			& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3648 		be32_to_cpu(crq->error_indication.error_id),
3649 		be16_to_cpu(crq->error_indication.error_cause));
3650 
3651 	if (be32_to_cpu(crq->error_indication.error_id))
3652 		request_error_information(adapter, crq);
3653 
3654 	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3655 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3656 	else
3657 		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3658 }
3659 
3660 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3661 				 struct ibmvnic_adapter *adapter)
3662 {
3663 	struct net_device *netdev = adapter->netdev;
3664 	struct device *dev = &adapter->vdev->dev;
3665 	long rc;
3666 
3667 	rc = crq->change_mac_addr_rsp.rc.code;
3668 	if (rc) {
3669 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3670 		goto out;
3671 	}
3672 	memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3673 	       ETH_ALEN);
3674 out:
3675 	complete(&adapter->fw_done);
3676 	return rc;
3677 }
3678 
3679 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3680 				   struct ibmvnic_adapter *adapter)
3681 {
3682 	struct device *dev = &adapter->vdev->dev;
3683 	u64 *req_value;
3684 	char *name;
3685 
3686 	atomic_dec(&adapter->running_cap_crqs);
3687 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3688 	case REQ_TX_QUEUES:
3689 		req_value = &adapter->req_tx_queues;
3690 		name = "tx";
3691 		break;
3692 	case REQ_RX_QUEUES:
3693 		req_value = &adapter->req_rx_queues;
3694 		name = "rx";
3695 		break;
3696 	case REQ_RX_ADD_QUEUES:
3697 		req_value = &adapter->req_rx_add_queues;
3698 		name = "rx_add";
3699 		break;
3700 	case REQ_TX_ENTRIES_PER_SUBCRQ:
3701 		req_value = &adapter->req_tx_entries_per_subcrq;
3702 		name = "tx_entries_per_subcrq";
3703 		break;
3704 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3705 		req_value = &adapter->req_rx_add_entries_per_subcrq;
3706 		name = "rx_add_entries_per_subcrq";
3707 		break;
3708 	case REQ_MTU:
3709 		req_value = &adapter->req_mtu;
3710 		name = "mtu";
3711 		break;
3712 	case PROMISC_REQUESTED:
3713 		req_value = &adapter->promisc;
3714 		name = "promisc";
3715 		break;
3716 	default:
3717 		dev_err(dev, "Got invalid cap request rsp %d\n",
3718 			crq->request_capability.capability);
3719 		return;
3720 	}
3721 
3722 	switch (crq->request_capability_rsp.rc.code) {
3723 	case SUCCESS:
3724 		break;
3725 	case PARTIALSUCCESS:
3726 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3727 			 *req_value,
3728 			 (long int)be64_to_cpu(crq->request_capability_rsp.
3729 					       number), name);
3730 
3731 		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3732 		    REQ_MTU) {
3733 			pr_err("mtu of %llu is not supported. Reverting.\n",
3734 			       *req_value);
3735 			*req_value = adapter->fallback.mtu;
3736 		} else {
3737 			*req_value =
3738 				be64_to_cpu(crq->request_capability_rsp.number);
3739 		}
3740 
3741 		ibmvnic_send_req_caps(adapter, 1);
3742 		return;
3743 	default:
3744 		dev_err(dev, "Error %d in request cap rsp\n",
3745 			crq->request_capability_rsp.rc.code);
3746 		return;
3747 	}
3748 
3749 	/* Done receiving requested capabilities, query IP offload support */
3750 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
3751 		union ibmvnic_crq newcrq;
3752 		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3753 		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3754 		    &adapter->ip_offload_buf;
3755 
3756 		adapter->wait_capability = false;
3757 		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3758 							 buf_sz,
3759 							 DMA_FROM_DEVICE);
3760 
3761 		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3762 			if (!firmware_has_feature(FW_FEATURE_CMO))
3763 				dev_err(dev, "Couldn't map offload buffer\n");
3764 			return;
3765 		}
3766 
3767 		memset(&newcrq, 0, sizeof(newcrq));
3768 		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3769 		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3770 		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3771 		newcrq.query_ip_offload.ioba =
3772 		    cpu_to_be32(adapter->ip_offload_tok);
3773 
3774 		ibmvnic_send_crq(adapter, &newcrq);
3775 	}
3776 }
3777 
3778 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3779 			    struct ibmvnic_adapter *adapter)
3780 {
3781 	struct device *dev = &adapter->vdev->dev;
3782 	struct net_device *netdev = adapter->netdev;
3783 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3784 	struct ibmvnic_login_buffer *login = adapter->login_buf;
3785 	int i;
3786 
3787 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3788 			 DMA_BIDIRECTIONAL);
3789 	release_login_buffer(adapter);
3790 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
3791 			 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
3792 
3793 	/* If the number of queues requested can't be allocated by the
3794 	 * server, the login response will return with code 1. We will need
3795 	 * to resend the login buffer with fewer queues requested.
3796 	 */
3797 	if (login_rsp_crq->generic.rc.code) {
3798 		adapter->renegotiate = true;
3799 		complete(&adapter->init_done);
3800 		return 0;
3801 	}
3802 
3803 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
3804 
3805 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3806 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
3807 		netdev_dbg(adapter->netdev, "%016lx\n",
3808 			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
3809 	}
3810 
3811 	/* Sanity checks */
3812 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
3813 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
3814 	     adapter->req_rx_add_queues !=
3815 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
3816 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
3817 		ibmvnic_remove(adapter->vdev);
3818 		return -EIO;
3819 	}
3820 	complete(&adapter->init_done);
3821 
3822 	return 0;
3823 }
3824 
3825 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
3826 				     struct ibmvnic_adapter *adapter)
3827 {
3828 	struct device *dev = &adapter->vdev->dev;
3829 	long rc;
3830 
3831 	rc = crq->request_unmap_rsp.rc.code;
3832 	if (rc)
3833 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
3834 }
3835 
3836 static void handle_query_map_rsp(union ibmvnic_crq *crq,
3837 				 struct ibmvnic_adapter *adapter)
3838 {
3839 	struct net_device *netdev = adapter->netdev;
3840 	struct device *dev = &adapter->vdev->dev;
3841 	long rc;
3842 
3843 	rc = crq->query_map_rsp.rc.code;
3844 	if (rc) {
3845 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
3846 		return;
3847 	}
3848 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3849 		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
3850 		   crq->query_map_rsp.free_pages);
3851 }
3852 
3853 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
3854 				 struct ibmvnic_adapter *adapter)
3855 {
3856 	struct net_device *netdev = adapter->netdev;
3857 	struct device *dev = &adapter->vdev->dev;
3858 	long rc;
3859 
3860 	atomic_dec(&adapter->running_cap_crqs);
3861 	netdev_dbg(netdev, "Outstanding queries: %d\n",
3862 		   atomic_read(&adapter->running_cap_crqs));
3863 	rc = crq->query_capability.rc.code;
3864 	if (rc) {
3865 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
3866 		goto out;
3867 	}
3868 
3869 	switch (be16_to_cpu(crq->query_capability.capability)) {
3870 	case MIN_TX_QUEUES:
3871 		adapter->min_tx_queues =
3872 		    be64_to_cpu(crq->query_capability.number);
3873 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
3874 			   adapter->min_tx_queues);
3875 		break;
3876 	case MIN_RX_QUEUES:
3877 		adapter->min_rx_queues =
3878 		    be64_to_cpu(crq->query_capability.number);
3879 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
3880 			   adapter->min_rx_queues);
3881 		break;
3882 	case MIN_RX_ADD_QUEUES:
3883 		adapter->min_rx_add_queues =
3884 		    be64_to_cpu(crq->query_capability.number);
3885 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
3886 			   adapter->min_rx_add_queues);
3887 		break;
3888 	case MAX_TX_QUEUES:
3889 		adapter->max_tx_queues =
3890 		    be64_to_cpu(crq->query_capability.number);
3891 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
3892 			   adapter->max_tx_queues);
3893 		break;
3894 	case MAX_RX_QUEUES:
3895 		adapter->max_rx_queues =
3896 		    be64_to_cpu(crq->query_capability.number);
3897 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
3898 			   adapter->max_rx_queues);
3899 		break;
3900 	case MAX_RX_ADD_QUEUES:
3901 		adapter->max_rx_add_queues =
3902 		    be64_to_cpu(crq->query_capability.number);
3903 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3904 			   adapter->max_rx_add_queues);
3905 		break;
3906 	case MIN_TX_ENTRIES_PER_SUBCRQ:
3907 		adapter->min_tx_entries_per_subcrq =
3908 		    be64_to_cpu(crq->query_capability.number);
3909 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3910 			   adapter->min_tx_entries_per_subcrq);
3911 		break;
3912 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3913 		adapter->min_rx_add_entries_per_subcrq =
3914 		    be64_to_cpu(crq->query_capability.number);
3915 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3916 			   adapter->min_rx_add_entries_per_subcrq);
3917 		break;
3918 	case MAX_TX_ENTRIES_PER_SUBCRQ:
3919 		adapter->max_tx_entries_per_subcrq =
3920 		    be64_to_cpu(crq->query_capability.number);
3921 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3922 			   adapter->max_tx_entries_per_subcrq);
3923 		break;
3924 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3925 		adapter->max_rx_add_entries_per_subcrq =
3926 		    be64_to_cpu(crq->query_capability.number);
3927 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3928 			   adapter->max_rx_add_entries_per_subcrq);
3929 		break;
3930 	case TCP_IP_OFFLOAD:
3931 		adapter->tcp_ip_offload =
3932 		    be64_to_cpu(crq->query_capability.number);
3933 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3934 			   adapter->tcp_ip_offload);
3935 		break;
3936 	case PROMISC_SUPPORTED:
3937 		adapter->promisc_supported =
3938 		    be64_to_cpu(crq->query_capability.number);
3939 		netdev_dbg(netdev, "promisc_supported = %lld\n",
3940 			   adapter->promisc_supported);
3941 		break;
3942 	case MIN_MTU:
3943 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
3944 		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3945 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3946 		break;
3947 	case MAX_MTU:
3948 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
3949 		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3950 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3951 		break;
3952 	case MAX_MULTICAST_FILTERS:
3953 		adapter->max_multicast_filters =
3954 		    be64_to_cpu(crq->query_capability.number);
3955 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3956 			   adapter->max_multicast_filters);
3957 		break;
3958 	case VLAN_HEADER_INSERTION:
3959 		adapter->vlan_header_insertion =
3960 		    be64_to_cpu(crq->query_capability.number);
3961 		if (adapter->vlan_header_insertion)
3962 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3963 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3964 			   adapter->vlan_header_insertion);
3965 		break;
3966 	case RX_VLAN_HEADER_INSERTION:
3967 		adapter->rx_vlan_header_insertion =
3968 		    be64_to_cpu(crq->query_capability.number);
3969 		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3970 			   adapter->rx_vlan_header_insertion);
3971 		break;
3972 	case MAX_TX_SG_ENTRIES:
3973 		adapter->max_tx_sg_entries =
3974 		    be64_to_cpu(crq->query_capability.number);
3975 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3976 			   adapter->max_tx_sg_entries);
3977 		break;
3978 	case RX_SG_SUPPORTED:
3979 		adapter->rx_sg_supported =
3980 		    be64_to_cpu(crq->query_capability.number);
3981 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3982 			   adapter->rx_sg_supported);
3983 		break;
3984 	case OPT_TX_COMP_SUB_QUEUES:
3985 		adapter->opt_tx_comp_sub_queues =
3986 		    be64_to_cpu(crq->query_capability.number);
3987 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3988 			   adapter->opt_tx_comp_sub_queues);
3989 		break;
3990 	case OPT_RX_COMP_QUEUES:
3991 		adapter->opt_rx_comp_queues =
3992 		    be64_to_cpu(crq->query_capability.number);
3993 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
3994 			   adapter->opt_rx_comp_queues);
3995 		break;
3996 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
3997 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
3998 		    be64_to_cpu(crq->query_capability.number);
3999 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4000 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4001 		break;
4002 	case OPT_TX_ENTRIES_PER_SUBCRQ:
4003 		adapter->opt_tx_entries_per_subcrq =
4004 		    be64_to_cpu(crq->query_capability.number);
4005 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4006 			   adapter->opt_tx_entries_per_subcrq);
4007 		break;
4008 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4009 		adapter->opt_rxba_entries_per_subcrq =
4010 		    be64_to_cpu(crq->query_capability.number);
4011 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4012 			   adapter->opt_rxba_entries_per_subcrq);
4013 		break;
4014 	case TX_RX_DESC_REQ:
4015 		adapter->tx_rx_desc_req = crq->query_capability.number;
4016 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4017 			   adapter->tx_rx_desc_req);
4018 		break;
4019 
4020 	default:
4021 		netdev_err(netdev, "Got invalid cap rsp %d\n",
4022 			   crq->query_capability.capability);
4023 	}
4024 
4025 out:
4026 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4027 		adapter->wait_capability = false;
4028 		ibmvnic_send_req_caps(adapter, 0);
4029 	}
4030 }
4031 
4032 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4033 			       struct ibmvnic_adapter *adapter)
4034 {
4035 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4036 	struct net_device *netdev = adapter->netdev;
4037 	struct device *dev = &adapter->vdev->dev;
4038 	u64 *u64_crq = (u64 *)crq;
4039 	long rc;
4040 
4041 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4042 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
4043 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
4044 	switch (gen_crq->first) {
4045 	case IBMVNIC_CRQ_INIT_RSP:
4046 		switch (gen_crq->cmd) {
4047 		case IBMVNIC_CRQ_INIT:
4048 			dev_info(dev, "Partner initialized\n");
4049 			adapter->from_passive_init = true;
4050 			complete(&adapter->init_done);
4051 			break;
4052 		case IBMVNIC_CRQ_INIT_COMPLETE:
4053 			dev_info(dev, "Partner initialization complete\n");
4054 			send_version_xchg(adapter);
4055 			break;
4056 		default:
4057 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4058 		}
4059 		return;
4060 	case IBMVNIC_CRQ_XPORT_EVENT:
4061 		netif_carrier_off(netdev);
4062 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4063 			dev_info(dev, "Migrated, re-enabling adapter\n");
4064 			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4065 		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4066 			dev_info(dev, "Backing device failover detected\n");
4067 			ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4068 		} else {
4069 			/* The adapter lost the connection */
4070 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4071 				gen_crq->cmd);
4072 			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4073 		}
4074 		return;
4075 	case IBMVNIC_CRQ_CMD_RSP:
4076 		break;
4077 	default:
4078 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
4079 			gen_crq->first);
4080 		return;
4081 	}
4082 
4083 	switch (gen_crq->cmd) {
4084 	case VERSION_EXCHANGE_RSP:
4085 		rc = crq->version_exchange_rsp.rc.code;
4086 		if (rc) {
4087 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4088 			break;
4089 		}
4090 		dev_info(dev, "Partner protocol version is %d\n",
4091 			 crq->version_exchange_rsp.version);
4092 		if (be16_to_cpu(crq->version_exchange_rsp.version) <
4093 		    ibmvnic_version)
4094 			ibmvnic_version =
4095 			    be16_to_cpu(crq->version_exchange_rsp.version);
4096 		send_cap_queries(adapter);
4097 		break;
4098 	case QUERY_CAPABILITY_RSP:
4099 		handle_query_cap_rsp(crq, adapter);
4100 		break;
4101 	case QUERY_MAP_RSP:
4102 		handle_query_map_rsp(crq, adapter);
4103 		break;
4104 	case REQUEST_MAP_RSP:
4105 		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4106 		complete(&adapter->fw_done);
4107 		break;
4108 	case REQUEST_UNMAP_RSP:
4109 		handle_request_unmap_rsp(crq, adapter);
4110 		break;
4111 	case REQUEST_CAPABILITY_RSP:
4112 		handle_request_cap_rsp(crq, adapter);
4113 		break;
4114 	case LOGIN_RSP:
4115 		netdev_dbg(netdev, "Got Login Response\n");
4116 		handle_login_rsp(crq, adapter);
4117 		break;
4118 	case LOGICAL_LINK_STATE_RSP:
4119 		netdev_dbg(netdev,
4120 			   "Got Logical Link State Response, state: %d rc: %d\n",
4121 			   crq->logical_link_state_rsp.link_state,
4122 			   crq->logical_link_state_rsp.rc.code);
4123 		adapter->logical_link_state =
4124 		    crq->logical_link_state_rsp.link_state;
4125 		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4126 		complete(&adapter->init_done);
4127 		break;
4128 	case LINK_STATE_INDICATION:
4129 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
4130 		adapter->phys_link_state =
4131 		    crq->link_state_indication.phys_link_state;
4132 		adapter->logical_link_state =
4133 		    crq->link_state_indication.logical_link_state;
4134 		break;
4135 	case CHANGE_MAC_ADDR_RSP:
4136 		netdev_dbg(netdev, "Got MAC address change Response\n");
4137 		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4138 		break;
4139 	case ERROR_INDICATION:
4140 		netdev_dbg(netdev, "Got Error Indication\n");
4141 		handle_error_indication(crq, adapter);
4142 		break;
4143 	case REQUEST_ERROR_RSP:
4144 		netdev_dbg(netdev, "Got Error Detail Response\n");
4145 		handle_error_info_rsp(crq, adapter);
4146 		break;
4147 	case REQUEST_STATISTICS_RSP:
4148 		netdev_dbg(netdev, "Got Statistics Response\n");
4149 		complete(&adapter->stats_done);
4150 		break;
4151 	case QUERY_IP_OFFLOAD_RSP:
4152 		netdev_dbg(netdev, "Got Query IP offload Response\n");
4153 		handle_query_ip_offload_rsp(adapter);
4154 		break;
4155 	case MULTICAST_CTRL_RSP:
4156 		netdev_dbg(netdev, "Got multicast control Response\n");
4157 		break;
4158 	case CONTROL_IP_OFFLOAD_RSP:
4159 		netdev_dbg(netdev, "Got Control IP offload Response\n");
4160 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4161 				 sizeof(adapter->ip_offload_ctrl),
4162 				 DMA_TO_DEVICE);
4163 		complete(&adapter->init_done);
4164 		break;
4165 	case COLLECT_FW_TRACE_RSP:
4166 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4167 		complete(&adapter->fw_done);
4168 		break;
4169 	case GET_VPD_SIZE_RSP:
4170 		handle_vpd_size_rsp(crq, adapter);
4171 		break;
4172 	case GET_VPD_RSP:
4173 		handle_vpd_rsp(crq, adapter);
4174 		break;
4175 	default:
4176 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4177 			   gen_crq->cmd);
4178 	}
4179 }
4180 
4181 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4182 {
4183 	struct ibmvnic_adapter *adapter = instance;
4184 
4185 	tasklet_schedule(&adapter->tasklet);
4186 	return IRQ_HANDLED;
4187 }
4188 
4189 static void ibmvnic_tasklet(void *data)
4190 {
4191 	struct ibmvnic_adapter *adapter = data;
4192 	struct ibmvnic_crq_queue *queue = &adapter->crq;
4193 	union ibmvnic_crq *crq;
4194 	unsigned long flags;
4195 	bool done = false;
4196 
4197 	spin_lock_irqsave(&queue->lock, flags);
4198 	while (!done) {
4199 		/* Pull all the valid messages off the CRQ */
4200 		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4201 			ibmvnic_handle_crq(crq, adapter);
4202 			crq->generic.first = 0;
4203 		}
4204 
4205 		/* remain in tasklet until all
4206 		 * capabilities responses are received
4207 		 */
4208 		if (!adapter->wait_capability)
4209 			done = true;
4210 	}
4211 	/* if capabilities CRQ's were sent in this tasklet, the following
4212 	 * tasklet must wait until all responses are received
4213 	 */
4214 	if (atomic_read(&adapter->running_cap_crqs) != 0)
4215 		adapter->wait_capability = true;
4216 	spin_unlock_irqrestore(&queue->lock, flags);
4217 }
4218 
4219 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4220 {
4221 	struct vio_dev *vdev = adapter->vdev;
4222 	int rc;
4223 
4224 	do {
4225 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4226 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4227 
4228 	if (rc)
4229 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4230 
4231 	return rc;
4232 }
4233 
4234 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4235 {
4236 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4237 	struct device *dev = &adapter->vdev->dev;
4238 	struct vio_dev *vdev = adapter->vdev;
4239 	int rc;
4240 
4241 	/* Close the CRQ */
4242 	do {
4243 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4244 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4245 
4246 	/* Clean out the queue */
4247 	memset(crq->msgs, 0, PAGE_SIZE);
4248 	crq->cur = 0;
4249 
4250 	/* And re-open it again */
4251 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4252 				crq->msg_token, PAGE_SIZE);
4253 
4254 	if (rc == H_CLOSED)
4255 		/* Adapter is good, but other end is not ready */
4256 		dev_warn(dev, "Partner adapter not ready\n");
4257 	else if (rc != 0)
4258 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4259 
4260 	return rc;
4261 }
4262 
4263 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4264 {
4265 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4266 	struct vio_dev *vdev = adapter->vdev;
4267 	long rc;
4268 
4269 	if (!crq->msgs)
4270 		return;
4271 
4272 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4273 	free_irq(vdev->irq, adapter);
4274 	tasklet_kill(&adapter->tasklet);
4275 	do {
4276 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4277 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4278 
4279 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4280 			 DMA_BIDIRECTIONAL);
4281 	free_page((unsigned long)crq->msgs);
4282 	crq->msgs = NULL;
4283 }
4284 
4285 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4286 {
4287 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4288 	struct device *dev = &adapter->vdev->dev;
4289 	struct vio_dev *vdev = adapter->vdev;
4290 	int rc, retrc = -ENOMEM;
4291 
4292 	if (crq->msgs)
4293 		return 0;
4294 
4295 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4296 	/* Should we allocate more than one page? */
4297 
4298 	if (!crq->msgs)
4299 		return -ENOMEM;
4300 
4301 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4302 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4303 					DMA_BIDIRECTIONAL);
4304 	if (dma_mapping_error(dev, crq->msg_token))
4305 		goto map_failed;
4306 
4307 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4308 				crq->msg_token, PAGE_SIZE);
4309 
4310 	if (rc == H_RESOURCE)
4311 		/* maybe kexecing and resource is busy. try a reset */
4312 		rc = ibmvnic_reset_crq(adapter);
4313 	retrc = rc;
4314 
4315 	if (rc == H_CLOSED) {
4316 		dev_warn(dev, "Partner adapter not ready\n");
4317 	} else if (rc) {
4318 		dev_warn(dev, "Error %d opening adapter\n", rc);
4319 		goto reg_crq_failed;
4320 	}
4321 
4322 	retrc = 0;
4323 
4324 	tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4325 		     (unsigned long)adapter);
4326 
4327 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4328 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4329 			 adapter);
4330 	if (rc) {
4331 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4332 			vdev->irq, rc);
4333 		goto req_irq_failed;
4334 	}
4335 
4336 	rc = vio_enable_interrupts(vdev);
4337 	if (rc) {
4338 		dev_err(dev, "Error %d enabling interrupts\n", rc);
4339 		goto req_irq_failed;
4340 	}
4341 
4342 	crq->cur = 0;
4343 	spin_lock_init(&crq->lock);
4344 
4345 	return retrc;
4346 
4347 req_irq_failed:
4348 	tasklet_kill(&adapter->tasklet);
4349 	do {
4350 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4351 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4352 reg_crq_failed:
4353 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4354 map_failed:
4355 	free_page((unsigned long)crq->msgs);
4356 	crq->msgs = NULL;
4357 	return retrc;
4358 }
4359 
4360 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4361 {
4362 	struct device *dev = &adapter->vdev->dev;
4363 	unsigned long timeout = msecs_to_jiffies(30000);
4364 	u64 old_num_rx_queues, old_num_tx_queues;
4365 	int rc;
4366 
4367 	if (adapter->resetting && !adapter->wait_for_reset) {
4368 		rc = ibmvnic_reset_crq(adapter);
4369 		if (!rc)
4370 			rc = vio_enable_interrupts(adapter->vdev);
4371 	} else {
4372 		rc = init_crq_queue(adapter);
4373 	}
4374 
4375 	if (rc) {
4376 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
4377 		return rc;
4378 	}
4379 
4380 	adapter->from_passive_init = false;
4381 
4382 	old_num_rx_queues = adapter->req_rx_queues;
4383 	old_num_tx_queues = adapter->req_tx_queues;
4384 
4385 	init_completion(&adapter->init_done);
4386 	adapter->init_done_rc = 0;
4387 	ibmvnic_send_crq_init(adapter);
4388 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4389 		dev_err(dev, "Initialization sequence timed out\n");
4390 		return -1;
4391 	}
4392 
4393 	if (adapter->init_done_rc) {
4394 		release_crq_queue(adapter);
4395 		return adapter->init_done_rc;
4396 	}
4397 
4398 	if (adapter->from_passive_init) {
4399 		adapter->state = VNIC_OPEN;
4400 		adapter->from_passive_init = false;
4401 		return -1;
4402 	}
4403 
4404 	if (adapter->resetting && !adapter->wait_for_reset) {
4405 		if (adapter->req_rx_queues != old_num_rx_queues ||
4406 		    adapter->req_tx_queues != old_num_tx_queues) {
4407 			release_sub_crqs(adapter, 0);
4408 			rc = init_sub_crqs(adapter);
4409 		} else {
4410 			rc = reset_sub_crq_queues(adapter);
4411 		}
4412 	} else {
4413 		rc = init_sub_crqs(adapter);
4414 	}
4415 
4416 	if (rc) {
4417 		dev_err(dev, "Initialization of sub crqs failed\n");
4418 		release_crq_queue(adapter);
4419 		return rc;
4420 	}
4421 
4422 	rc = init_sub_crq_irqs(adapter);
4423 	if (rc) {
4424 		dev_err(dev, "Failed to initialize sub crq irqs\n");
4425 		release_crq_queue(adapter);
4426 	}
4427 
4428 	return rc;
4429 }
4430 
4431 static struct device_attribute dev_attr_failover;
4432 
4433 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4434 {
4435 	struct ibmvnic_adapter *adapter;
4436 	struct net_device *netdev;
4437 	unsigned char *mac_addr_p;
4438 	int rc;
4439 
4440 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4441 		dev->unit_address);
4442 
4443 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4444 							VETH_MAC_ADDR, NULL);
4445 	if (!mac_addr_p) {
4446 		dev_err(&dev->dev,
4447 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4448 			__FILE__, __LINE__);
4449 		return 0;
4450 	}
4451 
4452 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4453 				   IBMVNIC_MAX_QUEUES);
4454 	if (!netdev)
4455 		return -ENOMEM;
4456 
4457 	adapter = netdev_priv(netdev);
4458 	adapter->state = VNIC_PROBING;
4459 	dev_set_drvdata(&dev->dev, netdev);
4460 	adapter->vdev = dev;
4461 	adapter->netdev = netdev;
4462 
4463 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
4464 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4465 	netdev->irq = dev->irq;
4466 	netdev->netdev_ops = &ibmvnic_netdev_ops;
4467 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4468 	SET_NETDEV_DEV(netdev, &dev->dev);
4469 
4470 	spin_lock_init(&adapter->stats_lock);
4471 
4472 	INIT_LIST_HEAD(&adapter->errors);
4473 	spin_lock_init(&adapter->error_list_lock);
4474 
4475 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4476 	INIT_LIST_HEAD(&adapter->rwi_list);
4477 	mutex_init(&adapter->reset_lock);
4478 	mutex_init(&adapter->rwi_lock);
4479 	adapter->resetting = false;
4480 
4481 	adapter->mac_change_pending = false;
4482 
4483 	do {
4484 		rc = ibmvnic_init(adapter);
4485 		if (rc && rc != EAGAIN)
4486 			goto ibmvnic_init_fail;
4487 	} while (rc == EAGAIN);
4488 
4489 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4490 	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4491 	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4492 
4493 	rc = device_create_file(&dev->dev, &dev_attr_failover);
4494 	if (rc)
4495 		goto ibmvnic_init_fail;
4496 
4497 	netif_carrier_off(netdev);
4498 	rc = register_netdev(netdev);
4499 	if (rc) {
4500 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4501 		goto ibmvnic_register_fail;
4502 	}
4503 	dev_info(&dev->dev, "ibmvnic registered\n");
4504 
4505 	adapter->state = VNIC_PROBED;
4506 
4507 	adapter->wait_for_reset = false;
4508 
4509 	return 0;
4510 
4511 ibmvnic_register_fail:
4512 	device_remove_file(&dev->dev, &dev_attr_failover);
4513 
4514 ibmvnic_init_fail:
4515 	release_sub_crqs(adapter, 1);
4516 	release_crq_queue(adapter);
4517 	free_netdev(netdev);
4518 
4519 	return rc;
4520 }
4521 
4522 static int ibmvnic_remove(struct vio_dev *dev)
4523 {
4524 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
4525 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4526 
4527 	adapter->state = VNIC_REMOVING;
4528 	unregister_netdev(netdev);
4529 	mutex_lock(&adapter->reset_lock);
4530 
4531 	release_resources(adapter);
4532 	release_sub_crqs(adapter, 1);
4533 	release_crq_queue(adapter);
4534 
4535 	adapter->state = VNIC_REMOVED;
4536 
4537 	mutex_unlock(&adapter->reset_lock);
4538 	device_remove_file(&dev->dev, &dev_attr_failover);
4539 	free_netdev(netdev);
4540 	dev_set_drvdata(&dev->dev, NULL);
4541 
4542 	return 0;
4543 }
4544 
4545 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4546 			      const char *buf, size_t count)
4547 {
4548 	struct net_device *netdev = dev_get_drvdata(dev);
4549 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4550 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4551 	__be64 session_token;
4552 	long rc;
4553 
4554 	if (!sysfs_streq(buf, "1"))
4555 		return -EINVAL;
4556 
4557 	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4558 			 H_GET_SESSION_TOKEN, 0, 0, 0);
4559 	if (rc) {
4560 		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4561 			   rc);
4562 		return -EINVAL;
4563 	}
4564 
4565 	session_token = (__be64)retbuf[0];
4566 	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4567 		   be64_to_cpu(session_token));
4568 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4569 				H_SESSION_ERR_DETECTED, session_token, 0, 0);
4570 	if (rc) {
4571 		netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4572 			   rc);
4573 		return -EINVAL;
4574 	}
4575 
4576 	return count;
4577 }
4578 
4579 static DEVICE_ATTR_WO(failover);
4580 
4581 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4582 {
4583 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4584 	struct ibmvnic_adapter *adapter;
4585 	struct iommu_table *tbl;
4586 	unsigned long ret = 0;
4587 	int i;
4588 
4589 	tbl = get_iommu_table_base(&vdev->dev);
4590 
4591 	/* netdev inits at probe time along with the structures we need below*/
4592 	if (!netdev)
4593 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4594 
4595 	adapter = netdev_priv(netdev);
4596 
4597 	ret += PAGE_SIZE; /* the crq message queue */
4598 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4599 
4600 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4601 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
4602 
4603 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4604 	     i++)
4605 		ret += adapter->rx_pool[i].size *
4606 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4607 
4608 	return ret;
4609 }
4610 
4611 static int ibmvnic_resume(struct device *dev)
4612 {
4613 	struct net_device *netdev = dev_get_drvdata(dev);
4614 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4615 
4616 	if (adapter->state != VNIC_OPEN)
4617 		return 0;
4618 
4619 	tasklet_schedule(&adapter->tasklet);
4620 
4621 	return 0;
4622 }
4623 
4624 static const struct vio_device_id ibmvnic_device_table[] = {
4625 	{"network", "IBM,vnic"},
4626 	{"", "" }
4627 };
4628 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4629 
4630 static const struct dev_pm_ops ibmvnic_pm_ops = {
4631 	.resume = ibmvnic_resume
4632 };
4633 
4634 static struct vio_driver ibmvnic_driver = {
4635 	.id_table       = ibmvnic_device_table,
4636 	.probe          = ibmvnic_probe,
4637 	.remove         = ibmvnic_remove,
4638 	.get_desired_dma = ibmvnic_get_desired_dma,
4639 	.name		= ibmvnic_driver_name,
4640 	.pm		= &ibmvnic_pm_ops,
4641 };
4642 
4643 /* module functions */
4644 static int __init ibmvnic_module_init(void)
4645 {
4646 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4647 		IBMVNIC_DRIVER_VERSION);
4648 
4649 	return vio_register_driver(&ibmvnic_driver);
4650 }
4651 
4652 static void __exit ibmvnic_module_exit(void)
4653 {
4654 	vio_unregister_driver(&ibmvnic_driver);
4655 }
4656 
4657 module_init(ibmvnic_module_init);
4658 module_exit(ibmvnic_module_exit);
4659