xref: /linux/drivers/net/ethernet/ibm/ibmvnic.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /**************************************************************************/
2 /*                                                                        */
3 /*  IBM System i and System p Virtual NIC Device Driver                   */
4 /*  Copyright (C) 2014 IBM Corp.                                          */
5 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
6 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
7 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
8 /*                                                                        */
9 /*  This program is free software; you can redistribute it and/or modify  */
10 /*  it under the terms of the GNU General Public License as published by  */
11 /*  the Free Software Foundation; either version 2 of the License, or     */
12 /*  (at your option) any later version.                                   */
13 /*                                                                        */
14 /*  This program is distributed in the hope that it will be useful,       */
15 /*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
16 /*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
17 /*  GNU General Public License for more details.                          */
18 /*                                                                        */
19 /*  You should have received a copy of the GNU General Public License     */
20 /*  along with this program.                                              */
21 /*                                                                        */
22 /* This module contains the implementation of a virtual ethernet device   */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
24 /* option of the RS/6000 Platform Architecture to interface with virtual  */
25 /* ethernet NICs that are presented to the partition by the hypervisor.   */
26 /*									   */
27 /* Messages are passed between the VNIC driver and the VNIC server using  */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
31 /* are used by the driver to notify the server that a packet is           */
32 /* ready for transmission or that a buffer has been added to receive a    */
33 /* packet. Subsequently, sCRQs are used by the server to notify the       */
34 /* driver that a packet transmission has been completed or that a packet  */
35 /* has been received and placed in a waiting buffer.                      */
36 /*                                                                        */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
39 /* or receive has been completed, the VNIC driver is required to use      */
40 /* "long term mapping". This entails that large, continuous DMA mapped    */
41 /* buffers are allocated on driver initialization and these buffers are   */
42 /* then continuously reused to pass skbs to and from the VNIC server.     */
43 /*                                                                        */
44 /**************************************************************************/
45 
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/mm.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/in.h>
63 #include <linux/ip.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
73 #include <asm/vio.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
78 
79 #include "ibmvnic.h"
80 
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83 
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88 
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 		       union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101 			   struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103 			    struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105 			struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 					struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
112 
113 struct ibmvnic_stat {
114 	char name[ETH_GSTRING_LEN];
115 	int offset;
116 };
117 
118 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 			     offsetof(struct ibmvnic_statistics, stat))
120 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121 
122 static const struct ibmvnic_stat ibmvnic_stats[] = {
123 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145 };
146 
147 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 			  unsigned long length, unsigned long *number,
149 			  unsigned long *irq)
150 {
151 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 	long rc;
153 
154 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155 	*number = retbuf[0];
156 	*irq = retbuf[1];
157 
158 	return rc;
159 }
160 
161 /* net_device_ops functions */
162 
163 static void init_rx_pool(struct ibmvnic_adapter *adapter,
164 			 struct ibmvnic_rx_pool *rx_pool, int num, int index,
165 			 int buff_size, int active)
166 {
167 	netdev_dbg(adapter->netdev,
168 		   "Initializing rx_pool %d, %d buffs, %d bytes each\n",
169 		   index, num, buff_size);
170 	rx_pool->size = num;
171 	rx_pool->index = index;
172 	rx_pool->buff_size = buff_size;
173 	rx_pool->active = active;
174 }
175 
176 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
177 				struct ibmvnic_long_term_buff *ltb, int size)
178 {
179 	struct device *dev = &adapter->vdev->dev;
180 
181 	ltb->size = size;
182 	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
183 				       GFP_KERNEL);
184 
185 	if (!ltb->buff) {
186 		dev_err(dev, "Couldn't alloc long term buffer\n");
187 		return -ENOMEM;
188 	}
189 	ltb->map_id = adapter->map_id;
190 	adapter->map_id++;
191 	send_request_map(adapter, ltb->addr,
192 			 ltb->size, ltb->map_id);
193 	init_completion(&adapter->fw_done);
194 	wait_for_completion(&adapter->fw_done);
195 	return 0;
196 }
197 
198 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
199 				struct ibmvnic_long_term_buff *ltb)
200 {
201 	struct device *dev = &adapter->vdev->dev;
202 
203 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
204 	send_request_unmap(adapter, ltb->map_id);
205 }
206 
207 static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
208 			 struct ibmvnic_rx_pool *pool)
209 {
210 	struct device *dev = &adapter->vdev->dev;
211 	int i;
212 
213 	pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
214 	if (!pool->free_map)
215 		return -ENOMEM;
216 
217 	pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
218 				GFP_KERNEL);
219 
220 	if (!pool->rx_buff) {
221 		dev_err(dev, "Couldn't alloc rx buffers\n");
222 		kfree(pool->free_map);
223 		return -ENOMEM;
224 	}
225 
226 	if (alloc_long_term_buff(adapter, &pool->long_term_buff,
227 				 pool->size * pool->buff_size)) {
228 		kfree(pool->free_map);
229 		kfree(pool->rx_buff);
230 		return -ENOMEM;
231 	}
232 
233 	for (i = 0; i < pool->size; ++i)
234 		pool->free_map[i] = i;
235 
236 	atomic_set(&pool->available, 0);
237 	pool->next_alloc = 0;
238 	pool->next_free = 0;
239 
240 	return 0;
241 }
242 
243 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
244 			      struct ibmvnic_rx_pool *pool)
245 {
246 	int count = pool->size - atomic_read(&pool->available);
247 	struct device *dev = &adapter->vdev->dev;
248 	int buffers_added = 0;
249 	unsigned long lpar_rc;
250 	union sub_crq sub_crq;
251 	struct sk_buff *skb;
252 	unsigned int offset;
253 	dma_addr_t dma_addr;
254 	unsigned char *dst;
255 	u64 *handle_array;
256 	int shift = 0;
257 	int index;
258 	int i;
259 
260 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 				      be32_to_cpu(adapter->login_rsp_buf->
262 				      off_rxadd_subcrqs));
263 
264 	for (i = 0; i < count; ++i) {
265 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
266 		if (!skb) {
267 			dev_err(dev, "Couldn't replenish rx buff\n");
268 			adapter->replenish_no_mem++;
269 			break;
270 		}
271 
272 		index = pool->free_map[pool->next_free];
273 
274 		if (pool->rx_buff[index].skb)
275 			dev_err(dev, "Inconsistent free_map!\n");
276 
277 		/* Copy the skb to the long term mapped DMA buffer */
278 		offset = index * pool->buff_size;
279 		dst = pool->long_term_buff.buff + offset;
280 		memset(dst, 0, pool->buff_size);
281 		dma_addr = pool->long_term_buff.addr + offset;
282 		pool->rx_buff[index].data = dst;
283 
284 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 		pool->rx_buff[index].dma = dma_addr;
286 		pool->rx_buff[index].skb = skb;
287 		pool->rx_buff[index].pool_index = pool->index;
288 		pool->rx_buff[index].size = pool->buff_size;
289 
290 		memset(&sub_crq, 0, sizeof(sub_crq));
291 		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 		sub_crq.rx_add.correlator =
293 		    cpu_to_be64((u64)&pool->rx_buff[index]);
294 		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
296 
297 		/* The length field of the sCRQ is defined to be 24 bits so the
298 		 * buffer size needs to be left shifted by a byte before it is
299 		 * converted to big endian to prevent the last byte from being
300 		 * truncated.
301 		 */
302 #ifdef __LITTLE_ENDIAN__
303 		shift = 8;
304 #endif
305 		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
306 
307 		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
308 				      &sub_crq);
309 		if (lpar_rc != H_SUCCESS)
310 			goto failure;
311 
312 		buffers_added++;
313 		adapter->replenish_add_buff_success++;
314 		pool->next_free = (pool->next_free + 1) % pool->size;
315 	}
316 	atomic_add(buffers_added, &pool->available);
317 	return;
318 
319 failure:
320 	dev_info(dev, "replenish pools failure\n");
321 	pool->free_map[pool->next_free] = index;
322 	pool->rx_buff[index].skb = NULL;
323 	if (!dma_mapping_error(dev, dma_addr))
324 		dma_unmap_single(dev, dma_addr, pool->buff_size,
325 				 DMA_FROM_DEVICE);
326 
327 	dev_kfree_skb_any(skb);
328 	adapter->replenish_add_buff_failure++;
329 	atomic_add(buffers_added, &pool->available);
330 }
331 
332 static void replenish_pools(struct ibmvnic_adapter *adapter)
333 {
334 	int i;
335 
336 	if (adapter->migrated)
337 		return;
338 
339 	adapter->replenish_task_cycles++;
340 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
341 	     i++) {
342 		if (adapter->rx_pool[i].active)
343 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
344 	}
345 }
346 
347 static void free_rx_pool(struct ibmvnic_adapter *adapter,
348 			 struct ibmvnic_rx_pool *pool)
349 {
350 	int i;
351 
352 	kfree(pool->free_map);
353 	pool->free_map = NULL;
354 
355 	if (!pool->rx_buff)
356 		return;
357 
358 	for (i = 0; i < pool->size; i++) {
359 		if (pool->rx_buff[i].skb) {
360 			dev_kfree_skb_any(pool->rx_buff[i].skb);
361 			pool->rx_buff[i].skb = NULL;
362 		}
363 	}
364 	kfree(pool->rx_buff);
365 	pool->rx_buff = NULL;
366 }
367 
368 static int ibmvnic_open(struct net_device *netdev)
369 {
370 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
371 	struct device *dev = &adapter->vdev->dev;
372 	struct ibmvnic_tx_pool *tx_pool;
373 	union ibmvnic_crq crq;
374 	int rxadd_subcrqs;
375 	u64 *size_array;
376 	int tx_subcrqs;
377 	int i, j;
378 
379 	rxadd_subcrqs =
380 	    be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
381 	tx_subcrqs =
382 	    be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
383 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
384 				  be32_to_cpu(adapter->login_rsp_buf->
385 					      off_rxadd_buff_size));
386 	adapter->map_id = 1;
387 	adapter->napi = kcalloc(adapter->req_rx_queues,
388 				sizeof(struct napi_struct), GFP_KERNEL);
389 	if (!adapter->napi)
390 		goto alloc_napi_failed;
391 	for (i = 0; i < adapter->req_rx_queues; i++) {
392 		netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
393 			       NAPI_POLL_WEIGHT);
394 		napi_enable(&adapter->napi[i]);
395 	}
396 	adapter->rx_pool =
397 	    kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
398 
399 	if (!adapter->rx_pool)
400 		goto rx_pool_arr_alloc_failed;
401 	send_map_query(adapter);
402 	for (i = 0; i < rxadd_subcrqs; i++) {
403 		init_rx_pool(adapter, &adapter->rx_pool[i],
404 			     IBMVNIC_BUFFS_PER_POOL, i,
405 			     be64_to_cpu(size_array[i]), 1);
406 		if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
407 			dev_err(dev, "Couldn't alloc rx pool\n");
408 			goto rx_pool_alloc_failed;
409 		}
410 	}
411 	adapter->tx_pool =
412 	    kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
413 
414 	if (!adapter->tx_pool)
415 		goto tx_pool_arr_alloc_failed;
416 	for (i = 0; i < tx_subcrqs; i++) {
417 		tx_pool = &adapter->tx_pool[i];
418 		tx_pool->tx_buff =
419 		    kcalloc(adapter->max_tx_entries_per_subcrq,
420 			    sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
421 		if (!tx_pool->tx_buff)
422 			goto tx_pool_alloc_failed;
423 
424 		if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
425 					 adapter->max_tx_entries_per_subcrq *
426 					 adapter->req_mtu))
427 			goto tx_ltb_alloc_failed;
428 
429 		tx_pool->free_map =
430 		    kcalloc(adapter->max_tx_entries_per_subcrq,
431 			    sizeof(int), GFP_KERNEL);
432 		if (!tx_pool->free_map)
433 			goto tx_fm_alloc_failed;
434 
435 		for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
436 			tx_pool->free_map[j] = j;
437 
438 		tx_pool->consumer_index = 0;
439 		tx_pool->producer_index = 0;
440 	}
441 	adapter->bounce_buffer_size =
442 	    (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
443 	adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
444 					 GFP_KERNEL);
445 	if (!adapter->bounce_buffer)
446 		goto bounce_alloc_failed;
447 
448 	adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
449 						    adapter->bounce_buffer_size,
450 						    DMA_TO_DEVICE);
451 	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
452 		dev_err(dev, "Couldn't map tx bounce buffer\n");
453 		goto bounce_map_failed;
454 	}
455 	replenish_pools(adapter);
456 
457 	/* We're ready to receive frames, enable the sub-crq interrupts and
458 	 * set the logical link state to up
459 	 */
460 	for (i = 0; i < adapter->req_rx_queues; i++)
461 		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
462 
463 	for (i = 0; i < adapter->req_tx_queues; i++)
464 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
465 
466 	memset(&crq, 0, sizeof(crq));
467 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
468 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
469 	crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
470 	ibmvnic_send_crq(adapter, &crq);
471 
472 	netif_start_queue(netdev);
473 	return 0;
474 
475 bounce_map_failed:
476 	kfree(adapter->bounce_buffer);
477 bounce_alloc_failed:
478 	i = tx_subcrqs - 1;
479 	kfree(adapter->tx_pool[i].free_map);
480 tx_fm_alloc_failed:
481 	free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
482 tx_ltb_alloc_failed:
483 	kfree(adapter->tx_pool[i].tx_buff);
484 tx_pool_alloc_failed:
485 	for (j = 0; j < i; j++) {
486 		kfree(adapter->tx_pool[j].tx_buff);
487 		free_long_term_buff(adapter,
488 				    &adapter->tx_pool[j].long_term_buff);
489 		kfree(adapter->tx_pool[j].free_map);
490 	}
491 	kfree(adapter->tx_pool);
492 	adapter->tx_pool = NULL;
493 tx_pool_arr_alloc_failed:
494 	i = rxadd_subcrqs;
495 rx_pool_alloc_failed:
496 	for (j = 0; j < i; j++) {
497 		free_rx_pool(adapter, &adapter->rx_pool[j]);
498 		free_long_term_buff(adapter,
499 				    &adapter->rx_pool[j].long_term_buff);
500 	}
501 	kfree(adapter->rx_pool);
502 	adapter->rx_pool = NULL;
503 rx_pool_arr_alloc_failed:
504 	for (i = 0; i < adapter->req_rx_queues; i++)
505 		napi_enable(&adapter->napi[i]);
506 alloc_napi_failed:
507 	return -ENOMEM;
508 }
509 
510 static int ibmvnic_close(struct net_device *netdev)
511 {
512 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
513 	struct device *dev = &adapter->vdev->dev;
514 	union ibmvnic_crq crq;
515 	int i;
516 
517 	adapter->closing = true;
518 
519 	for (i = 0; i < adapter->req_rx_queues; i++)
520 		napi_disable(&adapter->napi[i]);
521 
522 	netif_stop_queue(netdev);
523 
524 	if (adapter->bounce_buffer) {
525 		if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
526 			dma_unmap_single(&adapter->vdev->dev,
527 					 adapter->bounce_buffer_dma,
528 					 adapter->bounce_buffer_size,
529 					 DMA_BIDIRECTIONAL);
530 			adapter->bounce_buffer_dma = DMA_ERROR_CODE;
531 		}
532 		kfree(adapter->bounce_buffer);
533 		adapter->bounce_buffer = NULL;
534 	}
535 
536 	memset(&crq, 0, sizeof(crq));
537 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
538 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
539 	crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
540 	ibmvnic_send_crq(adapter, &crq);
541 
542 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
543 	     i++) {
544 		kfree(adapter->tx_pool[i].tx_buff);
545 		free_long_term_buff(adapter,
546 				    &adapter->tx_pool[i].long_term_buff);
547 		kfree(adapter->tx_pool[i].free_map);
548 	}
549 	kfree(adapter->tx_pool);
550 	adapter->tx_pool = NULL;
551 
552 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
553 	     i++) {
554 		free_rx_pool(adapter, &adapter->rx_pool[i]);
555 		free_long_term_buff(adapter,
556 				    &adapter->rx_pool[i].long_term_buff);
557 	}
558 	kfree(adapter->rx_pool);
559 	adapter->rx_pool = NULL;
560 
561 	adapter->closing = false;
562 
563 	return 0;
564 }
565 
566 /**
567  * build_hdr_data - creates L2/L3/L4 header data buffer
568  * @hdr_field - bitfield determining needed headers
569  * @skb - socket buffer
570  * @hdr_len - array of header lengths
571  * @tot_len - total length of data
572  *
573  * Reads hdr_field to determine which headers are needed by firmware.
574  * Builds a buffer containing these headers.  Saves individual header
575  * lengths and total buffer length to be used to build descriptors.
576  */
577 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
578 			  int *hdr_len, u8 *hdr_data)
579 {
580 	int len = 0;
581 	u8 *hdr;
582 
583 	hdr_len[0] = sizeof(struct ethhdr);
584 
585 	if (skb->protocol == htons(ETH_P_IP)) {
586 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
587 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
588 			hdr_len[2] = tcp_hdrlen(skb);
589 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
590 			hdr_len[2] = sizeof(struct udphdr);
591 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
592 		hdr_len[1] = sizeof(struct ipv6hdr);
593 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
594 			hdr_len[2] = tcp_hdrlen(skb);
595 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
596 			hdr_len[2] = sizeof(struct udphdr);
597 	}
598 
599 	memset(hdr_data, 0, 120);
600 	if ((hdr_field >> 6) & 1) {
601 		hdr = skb_mac_header(skb);
602 		memcpy(hdr_data, hdr, hdr_len[0]);
603 		len += hdr_len[0];
604 	}
605 
606 	if ((hdr_field >> 5) & 1) {
607 		hdr = skb_network_header(skb);
608 		memcpy(hdr_data + len, hdr, hdr_len[1]);
609 		len += hdr_len[1];
610 	}
611 
612 	if ((hdr_field >> 4) & 1) {
613 		hdr = skb_transport_header(skb);
614 		memcpy(hdr_data + len, hdr, hdr_len[2]);
615 		len += hdr_len[2];
616 	}
617 	return len;
618 }
619 
620 /**
621  * create_hdr_descs - create header and header extension descriptors
622  * @hdr_field - bitfield determining needed headers
623  * @data - buffer containing header data
624  * @len - length of data buffer
625  * @hdr_len - array of individual header lengths
626  * @scrq_arr - descriptor array
627  *
628  * Creates header and, if needed, header extension descriptors and
629  * places them in a descriptor array, scrq_arr
630  */
631 
632 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
633 			     union sub_crq *scrq_arr)
634 {
635 	union sub_crq hdr_desc;
636 	int tmp_len = len;
637 	u8 *data, *cur;
638 	int tmp;
639 
640 	while (tmp_len > 0) {
641 		cur = hdr_data + len - tmp_len;
642 
643 		memset(&hdr_desc, 0, sizeof(hdr_desc));
644 		if (cur != hdr_data) {
645 			data = hdr_desc.hdr_ext.data;
646 			tmp = tmp_len > 29 ? 29 : tmp_len;
647 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
648 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
649 			hdr_desc.hdr_ext.len = tmp;
650 		} else {
651 			data = hdr_desc.hdr.data;
652 			tmp = tmp_len > 24 ? 24 : tmp_len;
653 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
654 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
655 			hdr_desc.hdr.len = tmp;
656 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
657 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
658 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
659 			hdr_desc.hdr.flag = hdr_field << 1;
660 		}
661 		memcpy(data, cur, tmp);
662 		tmp_len -= tmp;
663 		*scrq_arr = hdr_desc;
664 		scrq_arr++;
665 	}
666 }
667 
668 /**
669  * build_hdr_descs_arr - build a header descriptor array
670  * @skb - socket buffer
671  * @num_entries - number of descriptors to be sent
672  * @subcrq - first TX descriptor
673  * @hdr_field - bit field determining which headers will be sent
674  *
675  * This function will build a TX descriptor array with applicable
676  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
677  */
678 
679 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
680 				int *num_entries, u8 hdr_field)
681 {
682 	int hdr_len[3] = {0, 0, 0};
683 	int tot_len, len;
684 	u8 *hdr_data = txbuff->hdr_data;
685 
686 	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
687 				 txbuff->hdr_data);
688 	len = tot_len;
689 	len -= 24;
690 	if (len > 0)
691 		num_entries += len % 29 ? len / 29 + 1 : len / 29;
692 	create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
693 			 txbuff->indir_arr + 1);
694 }
695 
696 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
697 {
698 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
699 	int queue_num = skb_get_queue_mapping(skb);
700 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
701 	struct device *dev = &adapter->vdev->dev;
702 	struct ibmvnic_tx_buff *tx_buff = NULL;
703 	struct ibmvnic_tx_pool *tx_pool;
704 	unsigned int tx_send_failed = 0;
705 	unsigned int tx_map_failed = 0;
706 	unsigned int tx_dropped = 0;
707 	unsigned int tx_packets = 0;
708 	unsigned int tx_bytes = 0;
709 	dma_addr_t data_dma_addr;
710 	struct netdev_queue *txq;
711 	bool used_bounce = false;
712 	unsigned long lpar_rc;
713 	union sub_crq tx_crq;
714 	unsigned int offset;
715 	int num_entries = 1;
716 	unsigned char *dst;
717 	u64 *handle_array;
718 	int index = 0;
719 	int ret = 0;
720 
721 	tx_pool = &adapter->tx_pool[queue_num];
722 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
723 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
724 				   be32_to_cpu(adapter->login_rsp_buf->
725 					       off_txsubm_subcrqs));
726 	if (adapter->migrated) {
727 		tx_send_failed++;
728 		tx_dropped++;
729 		ret = NETDEV_TX_BUSY;
730 		goto out;
731 	}
732 
733 	index = tx_pool->free_map[tx_pool->consumer_index];
734 	offset = index * adapter->req_mtu;
735 	dst = tx_pool->long_term_buff.buff + offset;
736 	memset(dst, 0, adapter->req_mtu);
737 	skb_copy_from_linear_data(skb, dst, skb->len);
738 	data_dma_addr = tx_pool->long_term_buff.addr + offset;
739 
740 	tx_pool->consumer_index =
741 	    (tx_pool->consumer_index + 1) %
742 		adapter->max_tx_entries_per_subcrq;
743 
744 	tx_buff = &tx_pool->tx_buff[index];
745 	tx_buff->skb = skb;
746 	tx_buff->data_dma[0] = data_dma_addr;
747 	tx_buff->data_len[0] = skb->len;
748 	tx_buff->index = index;
749 	tx_buff->pool_index = queue_num;
750 	tx_buff->last_frag = true;
751 	tx_buff->used_bounce = used_bounce;
752 
753 	memset(&tx_crq, 0, sizeof(tx_crq));
754 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
755 	tx_crq.v1.type = IBMVNIC_TX_DESC;
756 	tx_crq.v1.n_crq_elem = 1;
757 	tx_crq.v1.n_sge = 1;
758 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
759 	tx_crq.v1.correlator = cpu_to_be32(index);
760 	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
761 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
762 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
763 
764 	if (adapter->vlan_header_insertion) {
765 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
766 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
767 	}
768 
769 	if (skb->protocol == htons(ETH_P_IP)) {
770 		if (ip_hdr(skb)->version == 4)
771 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
772 		else if (ip_hdr(skb)->version == 6)
773 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
774 
775 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
776 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
777 		else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
778 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
779 	}
780 
781 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
782 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
783 		hdrs += 2;
784 	}
785 	/* determine if l2/3/4 headers are sent to firmware */
786 	if ((*hdrs >> 7) & 1 &&
787 	    (skb->protocol == htons(ETH_P_IP) ||
788 	     skb->protocol == htons(ETH_P_IPV6))) {
789 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
790 		tx_crq.v1.n_crq_elem = num_entries;
791 		tx_buff->indir_arr[0] = tx_crq;
792 		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
793 						    sizeof(tx_buff->indir_arr),
794 						    DMA_TO_DEVICE);
795 		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
796 			if (!firmware_has_feature(FW_FEATURE_CMO))
797 				dev_err(dev, "tx: unable to map descriptor array\n");
798 			tx_map_failed++;
799 			tx_dropped++;
800 			ret = NETDEV_TX_BUSY;
801 			goto out;
802 		}
803 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
804 					       (u64)tx_buff->indir_dma,
805 					       (u64)num_entries);
806 	} else {
807 		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
808 				      &tx_crq);
809 	}
810 	if (lpar_rc != H_SUCCESS) {
811 		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
812 
813 		if (tx_pool->consumer_index == 0)
814 			tx_pool->consumer_index =
815 				adapter->max_tx_entries_per_subcrq - 1;
816 		else
817 			tx_pool->consumer_index--;
818 
819 		tx_send_failed++;
820 		tx_dropped++;
821 		ret = NETDEV_TX_BUSY;
822 		goto out;
823 	}
824 	tx_packets++;
825 	tx_bytes += skb->len;
826 	txq->trans_start = jiffies;
827 	ret = NETDEV_TX_OK;
828 
829 out:
830 	netdev->stats.tx_dropped += tx_dropped;
831 	netdev->stats.tx_bytes += tx_bytes;
832 	netdev->stats.tx_packets += tx_packets;
833 	adapter->tx_send_failed += tx_send_failed;
834 	adapter->tx_map_failed += tx_map_failed;
835 
836 	return ret;
837 }
838 
839 static void ibmvnic_set_multi(struct net_device *netdev)
840 {
841 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
842 	struct netdev_hw_addr *ha;
843 	union ibmvnic_crq crq;
844 
845 	memset(&crq, 0, sizeof(crq));
846 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
847 	crq.request_capability.cmd = REQUEST_CAPABILITY;
848 
849 	if (netdev->flags & IFF_PROMISC) {
850 		if (!adapter->promisc_supported)
851 			return;
852 	} else {
853 		if (netdev->flags & IFF_ALLMULTI) {
854 			/* Accept all multicast */
855 			memset(&crq, 0, sizeof(crq));
856 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
857 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
858 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
859 			ibmvnic_send_crq(adapter, &crq);
860 		} else if (netdev_mc_empty(netdev)) {
861 			/* Reject all multicast */
862 			memset(&crq, 0, sizeof(crq));
863 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
864 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
865 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
866 			ibmvnic_send_crq(adapter, &crq);
867 		} else {
868 			/* Accept one or more multicast(s) */
869 			netdev_for_each_mc_addr(ha, netdev) {
870 				memset(&crq, 0, sizeof(crq));
871 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
872 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
873 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
874 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
875 						ha->addr);
876 				ibmvnic_send_crq(adapter, &crq);
877 			}
878 		}
879 	}
880 }
881 
882 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
883 {
884 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
885 	struct sockaddr *addr = p;
886 	union ibmvnic_crq crq;
887 
888 	if (!is_valid_ether_addr(addr->sa_data))
889 		return -EADDRNOTAVAIL;
890 
891 	memset(&crq, 0, sizeof(crq));
892 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
893 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
894 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
895 	ibmvnic_send_crq(adapter, &crq);
896 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
897 	return 0;
898 }
899 
900 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
901 {
902 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
903 
904 	if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
905 		return -EINVAL;
906 
907 	netdev->mtu = new_mtu;
908 	return 0;
909 }
910 
911 static void ibmvnic_tx_timeout(struct net_device *dev)
912 {
913 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
914 	int rc;
915 
916 	/* Adapter timed out, resetting it */
917 	release_sub_crqs(adapter);
918 	rc = ibmvnic_reset_crq(adapter);
919 	if (rc)
920 		dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
921 	else
922 		ibmvnic_send_crq_init(adapter);
923 }
924 
925 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
926 				  struct ibmvnic_rx_buff *rx_buff)
927 {
928 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
929 
930 	rx_buff->skb = NULL;
931 
932 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
933 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
934 
935 	atomic_dec(&pool->available);
936 }
937 
938 static int ibmvnic_poll(struct napi_struct *napi, int budget)
939 {
940 	struct net_device *netdev = napi->dev;
941 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
942 	int scrq_num = (int)(napi - adapter->napi);
943 	int frames_processed = 0;
944 restart_poll:
945 	while (frames_processed < budget) {
946 		struct sk_buff *skb;
947 		struct ibmvnic_rx_buff *rx_buff;
948 		union sub_crq *next;
949 		u32 length;
950 		u16 offset;
951 		u8 flags = 0;
952 
953 		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
954 			break;
955 		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
956 		rx_buff =
957 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
958 							  rx_comp.correlator);
959 		/* do error checking */
960 		if (next->rx_comp.rc) {
961 			netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
962 			/* free the entry */
963 			next->rx_comp.first = 0;
964 			remove_buff_from_pool(adapter, rx_buff);
965 			break;
966 		}
967 
968 		length = be32_to_cpu(next->rx_comp.len);
969 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
970 		flags = next->rx_comp.flags;
971 		skb = rx_buff->skb;
972 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
973 					length);
974 		skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
975 		/* free the entry */
976 		next->rx_comp.first = 0;
977 		remove_buff_from_pool(adapter, rx_buff);
978 
979 		skb_put(skb, length);
980 		skb->protocol = eth_type_trans(skb, netdev);
981 
982 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
983 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
984 			skb->ip_summed = CHECKSUM_UNNECESSARY;
985 		}
986 
987 		length = skb->len;
988 		napi_gro_receive(napi, skb); /* send it up */
989 		netdev->stats.rx_packets++;
990 		netdev->stats.rx_bytes += length;
991 		frames_processed++;
992 	}
993 	replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
994 
995 	if (frames_processed < budget) {
996 		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
997 		napi_complete(napi);
998 		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
999 		    napi_reschedule(napi)) {
1000 			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1001 			goto restart_poll;
1002 		}
1003 	}
1004 	return frames_processed;
1005 }
1006 
1007 #ifdef CONFIG_NET_POLL_CONTROLLER
1008 static void ibmvnic_netpoll_controller(struct net_device *dev)
1009 {
1010 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
1011 	int i;
1012 
1013 	replenish_pools(netdev_priv(dev));
1014 	for (i = 0; i < adapter->req_rx_queues; i++)
1015 		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1016 				     adapter->rx_scrq[i]);
1017 }
1018 #endif
1019 
1020 static const struct net_device_ops ibmvnic_netdev_ops = {
1021 	.ndo_open		= ibmvnic_open,
1022 	.ndo_stop		= ibmvnic_close,
1023 	.ndo_start_xmit		= ibmvnic_xmit,
1024 	.ndo_set_rx_mode	= ibmvnic_set_multi,
1025 	.ndo_set_mac_address	= ibmvnic_set_mac,
1026 	.ndo_validate_addr	= eth_validate_addr,
1027 	.ndo_change_mtu		= ibmvnic_change_mtu,
1028 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
1029 #ifdef CONFIG_NET_POLL_CONTROLLER
1030 	.ndo_poll_controller	= ibmvnic_netpoll_controller,
1031 #endif
1032 };
1033 
1034 /* ethtool functions */
1035 
1036 static int ibmvnic_get_settings(struct net_device *netdev,
1037 				struct ethtool_cmd *cmd)
1038 {
1039 	cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1040 			  SUPPORTED_FIBRE);
1041 	cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1042 			    ADVERTISED_FIBRE);
1043 	ethtool_cmd_speed_set(cmd, SPEED_1000);
1044 	cmd->duplex = DUPLEX_FULL;
1045 	cmd->port = PORT_FIBRE;
1046 	cmd->phy_address = 0;
1047 	cmd->transceiver = XCVR_INTERNAL;
1048 	cmd->autoneg = AUTONEG_ENABLE;
1049 	cmd->maxtxpkt = 0;
1050 	cmd->maxrxpkt = 1;
1051 	return 0;
1052 }
1053 
1054 static void ibmvnic_get_drvinfo(struct net_device *dev,
1055 				struct ethtool_drvinfo *info)
1056 {
1057 	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1058 	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1059 }
1060 
1061 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1062 {
1063 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1064 
1065 	return adapter->msg_enable;
1066 }
1067 
1068 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1069 {
1070 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1071 
1072 	adapter->msg_enable = data;
1073 }
1074 
1075 static u32 ibmvnic_get_link(struct net_device *netdev)
1076 {
1077 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1078 
1079 	/* Don't need to send a query because we request a logical link up at
1080 	 * init and then we wait for link state indications
1081 	 */
1082 	return adapter->logical_link_state;
1083 }
1084 
1085 static void ibmvnic_get_ringparam(struct net_device *netdev,
1086 				  struct ethtool_ringparam *ring)
1087 {
1088 	ring->rx_max_pending = 0;
1089 	ring->tx_max_pending = 0;
1090 	ring->rx_mini_max_pending = 0;
1091 	ring->rx_jumbo_max_pending = 0;
1092 	ring->rx_pending = 0;
1093 	ring->tx_pending = 0;
1094 	ring->rx_mini_pending = 0;
1095 	ring->rx_jumbo_pending = 0;
1096 }
1097 
1098 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1099 {
1100 	int i;
1101 
1102 	if (stringset != ETH_SS_STATS)
1103 		return;
1104 
1105 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1106 		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1107 }
1108 
1109 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1110 {
1111 	switch (sset) {
1112 	case ETH_SS_STATS:
1113 		return ARRAY_SIZE(ibmvnic_stats);
1114 	default:
1115 		return -EOPNOTSUPP;
1116 	}
1117 }
1118 
1119 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1120 				      struct ethtool_stats *stats, u64 *data)
1121 {
1122 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
1123 	union ibmvnic_crq crq;
1124 	int i;
1125 
1126 	memset(&crq, 0, sizeof(crq));
1127 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1128 	crq.request_statistics.cmd = REQUEST_STATISTICS;
1129 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1130 	crq.request_statistics.len =
1131 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
1132 	ibmvnic_send_crq(adapter, &crq);
1133 
1134 	/* Wait for data to be written */
1135 	init_completion(&adapter->stats_done);
1136 	wait_for_completion(&adapter->stats_done);
1137 
1138 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1139 		data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1140 }
1141 
1142 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1143 	.get_settings		= ibmvnic_get_settings,
1144 	.get_drvinfo		= ibmvnic_get_drvinfo,
1145 	.get_msglevel		= ibmvnic_get_msglevel,
1146 	.set_msglevel		= ibmvnic_set_msglevel,
1147 	.get_link		= ibmvnic_get_link,
1148 	.get_ringparam		= ibmvnic_get_ringparam,
1149 	.get_strings            = ibmvnic_get_strings,
1150 	.get_sset_count         = ibmvnic_get_sset_count,
1151 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
1152 };
1153 
1154 /* Routines for managing CRQs/sCRQs  */
1155 
1156 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1157 				  struct ibmvnic_sub_crq_queue *scrq)
1158 {
1159 	struct device *dev = &adapter->vdev->dev;
1160 	long rc;
1161 
1162 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1163 
1164 	/* Close the sub-crqs */
1165 	do {
1166 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1167 					adapter->vdev->unit_address,
1168 					scrq->crq_num);
1169 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1170 
1171 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1172 			 DMA_BIDIRECTIONAL);
1173 	free_pages((unsigned long)scrq->msgs, 2);
1174 	kfree(scrq);
1175 }
1176 
1177 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1178 							*adapter)
1179 {
1180 	struct device *dev = &adapter->vdev->dev;
1181 	struct ibmvnic_sub_crq_queue *scrq;
1182 	int rc;
1183 
1184 	scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1185 	if (!scrq)
1186 		return NULL;
1187 
1188 	scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
1189 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1190 	if (!scrq->msgs) {
1191 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1192 		goto zero_page_failed;
1193 	}
1194 
1195 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1196 					 DMA_BIDIRECTIONAL);
1197 	if (dma_mapping_error(dev, scrq->msg_token)) {
1198 		dev_warn(dev, "Couldn't map crq queue messages page\n");
1199 		goto map_failed;
1200 	}
1201 
1202 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1203 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1204 
1205 	if (rc == H_RESOURCE)
1206 		rc = ibmvnic_reset_crq(adapter);
1207 
1208 	if (rc == H_CLOSED) {
1209 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
1210 	} else if (rc) {
1211 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
1212 		goto reg_failed;
1213 	}
1214 
1215 	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1216 	if (scrq->irq == NO_IRQ) {
1217 		dev_err(dev, "Error mapping irq\n");
1218 		goto map_irq_failed;
1219 	}
1220 
1221 	scrq->adapter = adapter;
1222 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1223 	scrq->cur = 0;
1224 	scrq->rx_skb_top = NULL;
1225 	spin_lock_init(&scrq->lock);
1226 
1227 	netdev_dbg(adapter->netdev,
1228 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1229 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
1230 
1231 	return scrq;
1232 
1233 map_irq_failed:
1234 	do {
1235 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1236 					adapter->vdev->unit_address,
1237 					scrq->crq_num);
1238 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1239 reg_failed:
1240 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1241 			 DMA_BIDIRECTIONAL);
1242 map_failed:
1243 	free_pages((unsigned long)scrq->msgs, 2);
1244 zero_page_failed:
1245 	kfree(scrq);
1246 
1247 	return NULL;
1248 }
1249 
1250 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1251 {
1252 	int i;
1253 
1254 	if (adapter->tx_scrq) {
1255 		for (i = 0; i < adapter->req_tx_queues; i++)
1256 			if (adapter->tx_scrq[i]) {
1257 				free_irq(adapter->tx_scrq[i]->irq,
1258 					 adapter->tx_scrq[i]);
1259 				release_sub_crq_queue(adapter,
1260 						      adapter->tx_scrq[i]);
1261 			}
1262 		adapter->tx_scrq = NULL;
1263 	}
1264 
1265 	if (adapter->rx_scrq) {
1266 		for (i = 0; i < adapter->req_rx_queues; i++)
1267 			if (adapter->rx_scrq[i]) {
1268 				free_irq(adapter->rx_scrq[i]->irq,
1269 					 adapter->rx_scrq[i]);
1270 				release_sub_crq_queue(adapter,
1271 						      adapter->rx_scrq[i]);
1272 			}
1273 		adapter->rx_scrq = NULL;
1274 	}
1275 
1276 	adapter->requested_caps = 0;
1277 }
1278 
1279 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1280 			    struct ibmvnic_sub_crq_queue *scrq)
1281 {
1282 	struct device *dev = &adapter->vdev->dev;
1283 	unsigned long rc;
1284 
1285 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1286 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1287 	if (rc)
1288 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1289 			scrq->hw_irq, rc);
1290 	return rc;
1291 }
1292 
1293 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1294 			   struct ibmvnic_sub_crq_queue *scrq)
1295 {
1296 	struct device *dev = &adapter->vdev->dev;
1297 	unsigned long rc;
1298 
1299 	if (scrq->hw_irq > 0x100000000ULL) {
1300 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1301 		return 1;
1302 	}
1303 
1304 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1305 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1306 	if (rc)
1307 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1308 			scrq->hw_irq, rc);
1309 	return rc;
1310 }
1311 
1312 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1313 			       struct ibmvnic_sub_crq_queue *scrq)
1314 {
1315 	struct device *dev = &adapter->vdev->dev;
1316 	struct ibmvnic_tx_buff *txbuff;
1317 	union sub_crq *next;
1318 	int index;
1319 	int i, j;
1320 	u8 first;
1321 
1322 restart_loop:
1323 	while (pending_scrq(adapter, scrq)) {
1324 		unsigned int pool = scrq->pool_index;
1325 
1326 		next = ibmvnic_next_scrq(adapter, scrq);
1327 		for (i = 0; i < next->tx_comp.num_comps; i++) {
1328 			if (next->tx_comp.rcs[i]) {
1329 				dev_err(dev, "tx error %x\n",
1330 					next->tx_comp.rcs[i]);
1331 				continue;
1332 			}
1333 			index = be32_to_cpu(next->tx_comp.correlators[i]);
1334 			txbuff = &adapter->tx_pool[pool].tx_buff[index];
1335 
1336 			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1337 				if (!txbuff->data_dma[j])
1338 					continue;
1339 
1340 				txbuff->data_dma[j] = 0;
1341 				txbuff->used_bounce = false;
1342 			}
1343 			/* if sub_crq was sent indirectly */
1344 			first = txbuff->indir_arr[0].generic.first;
1345 			if (first == IBMVNIC_CRQ_CMD) {
1346 				dma_unmap_single(dev, txbuff->indir_dma,
1347 						 sizeof(txbuff->indir_arr),
1348 						 DMA_TO_DEVICE);
1349 			}
1350 
1351 			if (txbuff->last_frag)
1352 				dev_kfree_skb_any(txbuff->skb);
1353 
1354 			adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1355 						     producer_index] = index;
1356 			adapter->tx_pool[pool].producer_index =
1357 			    (adapter->tx_pool[pool].producer_index + 1) %
1358 			    adapter->max_tx_entries_per_subcrq;
1359 		}
1360 		/* remove tx_comp scrq*/
1361 		next->tx_comp.first = 0;
1362 	}
1363 
1364 	enable_scrq_irq(adapter, scrq);
1365 
1366 	if (pending_scrq(adapter, scrq)) {
1367 		disable_scrq_irq(adapter, scrq);
1368 		goto restart_loop;
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1375 {
1376 	struct ibmvnic_sub_crq_queue *scrq = instance;
1377 	struct ibmvnic_adapter *adapter = scrq->adapter;
1378 
1379 	disable_scrq_irq(adapter, scrq);
1380 	ibmvnic_complete_tx(adapter, scrq);
1381 
1382 	return IRQ_HANDLED;
1383 }
1384 
1385 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1386 {
1387 	struct ibmvnic_sub_crq_queue *scrq = instance;
1388 	struct ibmvnic_adapter *adapter = scrq->adapter;
1389 
1390 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1391 		disable_scrq_irq(adapter, scrq);
1392 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
1393 	}
1394 
1395 	return IRQ_HANDLED;
1396 }
1397 
1398 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1399 {
1400 	struct device *dev = &adapter->vdev->dev;
1401 	struct ibmvnic_sub_crq_queue **allqueues;
1402 	int registered_queues = 0;
1403 	union ibmvnic_crq crq;
1404 	int total_queues;
1405 	int more = 0;
1406 	int i, j;
1407 	int rc;
1408 
1409 	if (!retry) {
1410 		/* Sub-CRQ entries are 32 byte long */
1411 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1412 
1413 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
1414 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
1415 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1416 			goto allqueues_failed;
1417 		}
1418 
1419 		/* Get the minimum between the queried max and the entries
1420 		 * that fit in our PAGE_SIZE
1421 		 */
1422 		adapter->req_tx_entries_per_subcrq =
1423 		    adapter->max_tx_entries_per_subcrq > entries_page ?
1424 		    entries_page : adapter->max_tx_entries_per_subcrq;
1425 		adapter->req_rx_add_entries_per_subcrq =
1426 		    adapter->max_rx_add_entries_per_subcrq > entries_page ?
1427 		    entries_page : adapter->max_rx_add_entries_per_subcrq;
1428 
1429 		/* Choosing the maximum number of queues supported by firmware*/
1430 		adapter->req_tx_queues = adapter->max_tx_queues;
1431 		adapter->req_rx_queues = adapter->max_rx_queues;
1432 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1433 
1434 		adapter->req_mtu = adapter->max_mtu;
1435 	}
1436 
1437 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1438 
1439 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1440 	if (!allqueues)
1441 		goto allqueues_failed;
1442 
1443 	for (i = 0; i < total_queues; i++) {
1444 		allqueues[i] = init_sub_crq_queue(adapter);
1445 		if (!allqueues[i]) {
1446 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1447 			break;
1448 		}
1449 		registered_queues++;
1450 	}
1451 
1452 	/* Make sure we were able to register the minimum number of queues */
1453 	if (registered_queues <
1454 	    adapter->min_tx_queues + adapter->min_rx_queues) {
1455 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
1456 		goto tx_failed;
1457 	}
1458 
1459 	/* Distribute the failed allocated queues*/
1460 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
1461 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1462 		switch (i % 3) {
1463 		case 0:
1464 			if (adapter->req_rx_queues > adapter->min_rx_queues)
1465 				adapter->req_rx_queues--;
1466 			else
1467 				more++;
1468 			break;
1469 		case 1:
1470 			if (adapter->req_tx_queues > adapter->min_tx_queues)
1471 				adapter->req_tx_queues--;
1472 			else
1473 				more++;
1474 			break;
1475 		}
1476 	}
1477 
1478 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1479 				   sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1480 	if (!adapter->tx_scrq)
1481 		goto tx_failed;
1482 
1483 	for (i = 0; i < adapter->req_tx_queues; i++) {
1484 		adapter->tx_scrq[i] = allqueues[i];
1485 		adapter->tx_scrq[i]->pool_index = i;
1486 		rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
1487 				 0, "ibmvnic_tx", adapter->tx_scrq[i]);
1488 		if (rc) {
1489 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1490 				adapter->tx_scrq[i]->irq, rc);
1491 			goto req_tx_irq_failed;
1492 		}
1493 	}
1494 
1495 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1496 				   sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1497 	if (!adapter->rx_scrq)
1498 		goto rx_failed;
1499 
1500 	for (i = 0; i < adapter->req_rx_queues; i++) {
1501 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1502 		adapter->rx_scrq[i]->scrq_num = i;
1503 		rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
1504 				 0, "ibmvnic_rx", adapter->rx_scrq[i]);
1505 		if (rc) {
1506 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1507 				adapter->rx_scrq[i]->irq, rc);
1508 			goto req_rx_irq_failed;
1509 		}
1510 	}
1511 
1512 	memset(&crq, 0, sizeof(crq));
1513 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1514 	crq.request_capability.cmd = REQUEST_CAPABILITY;
1515 
1516 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1517 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1518 	ibmvnic_send_crq(adapter, &crq);
1519 
1520 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1521 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1522 	ibmvnic_send_crq(adapter, &crq);
1523 
1524 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1525 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1526 	ibmvnic_send_crq(adapter, &crq);
1527 
1528 	crq.request_capability.capability =
1529 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1530 	crq.request_capability.number =
1531 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1532 	ibmvnic_send_crq(adapter, &crq);
1533 
1534 	crq.request_capability.capability =
1535 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1536 	crq.request_capability.number =
1537 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1538 	ibmvnic_send_crq(adapter, &crq);
1539 
1540 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1541 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1542 	ibmvnic_send_crq(adapter, &crq);
1543 
1544 	if (adapter->netdev->flags & IFF_PROMISC) {
1545 		if (adapter->promisc_supported) {
1546 			crq.request_capability.capability =
1547 			    cpu_to_be16(PROMISC_REQUESTED);
1548 			crq.request_capability.number = cpu_to_be64(1);
1549 			ibmvnic_send_crq(adapter, &crq);
1550 		}
1551 	} else {
1552 		crq.request_capability.capability =
1553 		    cpu_to_be16(PROMISC_REQUESTED);
1554 		crq.request_capability.number = cpu_to_be64(0);
1555 		ibmvnic_send_crq(adapter, &crq);
1556 	}
1557 
1558 	kfree(allqueues);
1559 
1560 	return;
1561 
1562 req_rx_irq_failed:
1563 	for (j = 0; j < i; j++)
1564 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1565 	i = adapter->req_tx_queues;
1566 req_tx_irq_failed:
1567 	for (j = 0; j < i; j++)
1568 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1569 	kfree(adapter->rx_scrq);
1570 	adapter->rx_scrq = NULL;
1571 rx_failed:
1572 	kfree(adapter->tx_scrq);
1573 	adapter->tx_scrq = NULL;
1574 tx_failed:
1575 	for (i = 0; i < registered_queues; i++)
1576 		release_sub_crq_queue(adapter, allqueues[i]);
1577 	kfree(allqueues);
1578 allqueues_failed:
1579 	ibmvnic_remove(adapter->vdev);
1580 }
1581 
1582 static int pending_scrq(struct ibmvnic_adapter *adapter,
1583 			struct ibmvnic_sub_crq_queue *scrq)
1584 {
1585 	union sub_crq *entry = &scrq->msgs[scrq->cur];
1586 
1587 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1588 		return 1;
1589 	else
1590 		return 0;
1591 }
1592 
1593 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1594 					struct ibmvnic_sub_crq_queue *scrq)
1595 {
1596 	union sub_crq *entry;
1597 	unsigned long flags;
1598 
1599 	spin_lock_irqsave(&scrq->lock, flags);
1600 	entry = &scrq->msgs[scrq->cur];
1601 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1602 		if (++scrq->cur == scrq->size)
1603 			scrq->cur = 0;
1604 	} else {
1605 		entry = NULL;
1606 	}
1607 	spin_unlock_irqrestore(&scrq->lock, flags);
1608 
1609 	return entry;
1610 }
1611 
1612 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1613 {
1614 	struct ibmvnic_crq_queue *queue = &adapter->crq;
1615 	union ibmvnic_crq *crq;
1616 
1617 	crq = &queue->msgs[queue->cur];
1618 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1619 		if (++queue->cur == queue->size)
1620 			queue->cur = 0;
1621 	} else {
1622 		crq = NULL;
1623 	}
1624 
1625 	return crq;
1626 }
1627 
1628 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1629 		       union sub_crq *sub_crq)
1630 {
1631 	unsigned int ua = adapter->vdev->unit_address;
1632 	struct device *dev = &adapter->vdev->dev;
1633 	u64 *u64_crq = (u64 *)sub_crq;
1634 	int rc;
1635 
1636 	netdev_dbg(adapter->netdev,
1637 		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1638 		   (unsigned long int)cpu_to_be64(remote_handle),
1639 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
1640 		   (unsigned long int)cpu_to_be64(u64_crq[1]),
1641 		   (unsigned long int)cpu_to_be64(u64_crq[2]),
1642 		   (unsigned long int)cpu_to_be64(u64_crq[3]));
1643 
1644 	/* Make sure the hypervisor sees the complete request */
1645 	mb();
1646 
1647 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1648 				cpu_to_be64(remote_handle),
1649 				cpu_to_be64(u64_crq[0]),
1650 				cpu_to_be64(u64_crq[1]),
1651 				cpu_to_be64(u64_crq[2]),
1652 				cpu_to_be64(u64_crq[3]));
1653 
1654 	if (rc) {
1655 		if (rc == H_CLOSED)
1656 			dev_warn(dev, "CRQ Queue closed\n");
1657 		dev_err(dev, "Send error (rc=%d)\n", rc);
1658 	}
1659 
1660 	return rc;
1661 }
1662 
1663 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1664 				u64 remote_handle, u64 ioba, u64 num_entries)
1665 {
1666 	unsigned int ua = adapter->vdev->unit_address;
1667 	struct device *dev = &adapter->vdev->dev;
1668 	int rc;
1669 
1670 	/* Make sure the hypervisor sees the complete request */
1671 	mb();
1672 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1673 				cpu_to_be64(remote_handle),
1674 				ioba, num_entries);
1675 
1676 	if (rc) {
1677 		if (rc == H_CLOSED)
1678 			dev_warn(dev, "CRQ Queue closed\n");
1679 		dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1680 	}
1681 
1682 	return rc;
1683 }
1684 
1685 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1686 			    union ibmvnic_crq *crq)
1687 {
1688 	unsigned int ua = adapter->vdev->unit_address;
1689 	struct device *dev = &adapter->vdev->dev;
1690 	u64 *u64_crq = (u64 *)crq;
1691 	int rc;
1692 
1693 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1694 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
1695 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
1696 
1697 	/* Make sure the hypervisor sees the complete request */
1698 	mb();
1699 
1700 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1701 				cpu_to_be64(u64_crq[0]),
1702 				cpu_to_be64(u64_crq[1]));
1703 
1704 	if (rc) {
1705 		if (rc == H_CLOSED)
1706 			dev_warn(dev, "CRQ Queue closed\n");
1707 		dev_warn(dev, "Send error (rc=%d)\n", rc);
1708 	}
1709 
1710 	return rc;
1711 }
1712 
1713 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1714 {
1715 	union ibmvnic_crq crq;
1716 
1717 	memset(&crq, 0, sizeof(crq));
1718 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1719 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
1720 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1721 
1722 	return ibmvnic_send_crq(adapter, &crq);
1723 }
1724 
1725 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1726 {
1727 	union ibmvnic_crq crq;
1728 
1729 	memset(&crq, 0, sizeof(crq));
1730 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1731 	crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1732 	netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1733 
1734 	return ibmvnic_send_crq(adapter, &crq);
1735 }
1736 
1737 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1738 {
1739 	union ibmvnic_crq crq;
1740 
1741 	memset(&crq, 0, sizeof(crq));
1742 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1743 	crq.version_exchange.cmd = VERSION_EXCHANGE;
1744 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1745 
1746 	return ibmvnic_send_crq(adapter, &crq);
1747 }
1748 
1749 static void send_login(struct ibmvnic_adapter *adapter)
1750 {
1751 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1752 	struct ibmvnic_login_buffer *login_buffer;
1753 	struct ibmvnic_inflight_cmd *inflight_cmd;
1754 	struct device *dev = &adapter->vdev->dev;
1755 	dma_addr_t rsp_buffer_token;
1756 	dma_addr_t buffer_token;
1757 	size_t rsp_buffer_size;
1758 	union ibmvnic_crq crq;
1759 	unsigned long flags;
1760 	size_t buffer_size;
1761 	__be64 *tx_list_p;
1762 	__be64 *rx_list_p;
1763 	int i;
1764 
1765 	buffer_size =
1766 	    sizeof(struct ibmvnic_login_buffer) +
1767 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1768 
1769 	login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1770 	if (!login_buffer)
1771 		goto buf_alloc_failed;
1772 
1773 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1774 				      DMA_TO_DEVICE);
1775 	if (dma_mapping_error(dev, buffer_token)) {
1776 		dev_err(dev, "Couldn't map login buffer\n");
1777 		goto buf_map_failed;
1778 	}
1779 
1780 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1781 			  sizeof(u64) * adapter->req_tx_queues +
1782 			  sizeof(u64) * adapter->req_rx_queues +
1783 			  sizeof(u64) * adapter->req_rx_queues +
1784 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1785 
1786 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1787 	if (!login_rsp_buffer)
1788 		goto buf_rsp_alloc_failed;
1789 
1790 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1791 					  rsp_buffer_size, DMA_FROM_DEVICE);
1792 	if (dma_mapping_error(dev, rsp_buffer_token)) {
1793 		dev_err(dev, "Couldn't map login rsp buffer\n");
1794 		goto buf_rsp_map_failed;
1795 	}
1796 	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1797 	if (!inflight_cmd) {
1798 		dev_err(dev, "Couldn't allocate inflight_cmd\n");
1799 		goto inflight_alloc_failed;
1800 	}
1801 	adapter->login_buf = login_buffer;
1802 	adapter->login_buf_token = buffer_token;
1803 	adapter->login_buf_sz = buffer_size;
1804 	adapter->login_rsp_buf = login_rsp_buffer;
1805 	adapter->login_rsp_buf_token = rsp_buffer_token;
1806 	adapter->login_rsp_buf_sz = rsp_buffer_size;
1807 
1808 	login_buffer->len = cpu_to_be32(buffer_size);
1809 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1810 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1811 	login_buffer->off_txcomp_subcrqs =
1812 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1813 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1814 	login_buffer->off_rxcomp_subcrqs =
1815 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1816 			sizeof(u64) * adapter->req_tx_queues);
1817 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1818 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1819 
1820 	tx_list_p = (__be64 *)((char *)login_buffer +
1821 				      sizeof(struct ibmvnic_login_buffer));
1822 	rx_list_p = (__be64 *)((char *)login_buffer +
1823 				      sizeof(struct ibmvnic_login_buffer) +
1824 				      sizeof(u64) * adapter->req_tx_queues);
1825 
1826 	for (i = 0; i < adapter->req_tx_queues; i++) {
1827 		if (adapter->tx_scrq[i]) {
1828 			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1829 						   crq_num);
1830 		}
1831 	}
1832 
1833 	for (i = 0; i < adapter->req_rx_queues; i++) {
1834 		if (adapter->rx_scrq[i]) {
1835 			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1836 						   crq_num);
1837 		}
1838 	}
1839 
1840 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
1841 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1842 		netdev_dbg(adapter->netdev, "%016lx\n",
1843 			   ((unsigned long int *)(adapter->login_buf))[i]);
1844 	}
1845 
1846 	memset(&crq, 0, sizeof(crq));
1847 	crq.login.first = IBMVNIC_CRQ_CMD;
1848 	crq.login.cmd = LOGIN;
1849 	crq.login.ioba = cpu_to_be32(buffer_token);
1850 	crq.login.len = cpu_to_be32(buffer_size);
1851 
1852 	memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1853 
1854 	spin_lock_irqsave(&adapter->inflight_lock, flags);
1855 	list_add_tail(&inflight_cmd->list, &adapter->inflight);
1856 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1857 
1858 	ibmvnic_send_crq(adapter, &crq);
1859 
1860 	return;
1861 
1862 inflight_alloc_failed:
1863 	dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1864 			 DMA_FROM_DEVICE);
1865 buf_rsp_map_failed:
1866 	kfree(login_rsp_buffer);
1867 buf_rsp_alloc_failed:
1868 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1869 buf_map_failed:
1870 	kfree(login_buffer);
1871 buf_alloc_failed:
1872 	return;
1873 }
1874 
1875 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1876 			     u32 len, u8 map_id)
1877 {
1878 	union ibmvnic_crq crq;
1879 
1880 	memset(&crq, 0, sizeof(crq));
1881 	crq.request_map.first = IBMVNIC_CRQ_CMD;
1882 	crq.request_map.cmd = REQUEST_MAP;
1883 	crq.request_map.map_id = map_id;
1884 	crq.request_map.ioba = cpu_to_be32(addr);
1885 	crq.request_map.len = cpu_to_be32(len);
1886 	ibmvnic_send_crq(adapter, &crq);
1887 }
1888 
1889 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1890 {
1891 	union ibmvnic_crq crq;
1892 
1893 	memset(&crq, 0, sizeof(crq));
1894 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1895 	crq.request_unmap.cmd = REQUEST_UNMAP;
1896 	crq.request_unmap.map_id = map_id;
1897 	ibmvnic_send_crq(adapter, &crq);
1898 }
1899 
1900 static void send_map_query(struct ibmvnic_adapter *adapter)
1901 {
1902 	union ibmvnic_crq crq;
1903 
1904 	memset(&crq, 0, sizeof(crq));
1905 	crq.query_map.first = IBMVNIC_CRQ_CMD;
1906 	crq.query_map.cmd = QUERY_MAP;
1907 	ibmvnic_send_crq(adapter, &crq);
1908 }
1909 
1910 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1911 static void send_cap_queries(struct ibmvnic_adapter *adapter)
1912 {
1913 	union ibmvnic_crq crq;
1914 
1915 	atomic_set(&adapter->running_cap_queries, 0);
1916 	memset(&crq, 0, sizeof(crq));
1917 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
1918 	crq.query_capability.cmd = QUERY_CAPABILITY;
1919 
1920 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1921 	atomic_inc(&adapter->running_cap_queries);
1922 	ibmvnic_send_crq(adapter, &crq);
1923 
1924 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1925 	atomic_inc(&adapter->running_cap_queries);
1926 	ibmvnic_send_crq(adapter, &crq);
1927 
1928 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1929 	atomic_inc(&adapter->running_cap_queries);
1930 	ibmvnic_send_crq(adapter, &crq);
1931 
1932 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1933 	atomic_inc(&adapter->running_cap_queries);
1934 	ibmvnic_send_crq(adapter, &crq);
1935 
1936 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1937 	atomic_inc(&adapter->running_cap_queries);
1938 	ibmvnic_send_crq(adapter, &crq);
1939 
1940 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1941 	atomic_inc(&adapter->running_cap_queries);
1942 	ibmvnic_send_crq(adapter, &crq);
1943 
1944 	crq.query_capability.capability =
1945 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1946 	atomic_inc(&adapter->running_cap_queries);
1947 	ibmvnic_send_crq(adapter, &crq);
1948 
1949 	crq.query_capability.capability =
1950 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1951 	atomic_inc(&adapter->running_cap_queries);
1952 	ibmvnic_send_crq(adapter, &crq);
1953 
1954 	crq.query_capability.capability =
1955 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
1956 	atomic_inc(&adapter->running_cap_queries);
1957 	ibmvnic_send_crq(adapter, &crq);
1958 
1959 	crq.query_capability.capability =
1960 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
1961 	atomic_inc(&adapter->running_cap_queries);
1962 	ibmvnic_send_crq(adapter, &crq);
1963 
1964 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
1965 	atomic_inc(&adapter->running_cap_queries);
1966 	ibmvnic_send_crq(adapter, &crq);
1967 
1968 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
1969 	atomic_inc(&adapter->running_cap_queries);
1970 	ibmvnic_send_crq(adapter, &crq);
1971 
1972 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
1973 	atomic_inc(&adapter->running_cap_queries);
1974 	ibmvnic_send_crq(adapter, &crq);
1975 
1976 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
1977 	atomic_inc(&adapter->running_cap_queries);
1978 	ibmvnic_send_crq(adapter, &crq);
1979 
1980 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
1981 	atomic_inc(&adapter->running_cap_queries);
1982 	ibmvnic_send_crq(adapter, &crq);
1983 
1984 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
1985 	atomic_inc(&adapter->running_cap_queries);
1986 	ibmvnic_send_crq(adapter, &crq);
1987 
1988 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
1989 	atomic_inc(&adapter->running_cap_queries);
1990 	ibmvnic_send_crq(adapter, &crq);
1991 
1992 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
1993 	atomic_inc(&adapter->running_cap_queries);
1994 	ibmvnic_send_crq(adapter, &crq);
1995 
1996 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
1997 	atomic_inc(&adapter->running_cap_queries);
1998 	ibmvnic_send_crq(adapter, &crq);
1999 
2000 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2001 	atomic_inc(&adapter->running_cap_queries);
2002 	ibmvnic_send_crq(adapter, &crq);
2003 
2004 	crq.query_capability.capability =
2005 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2006 	atomic_inc(&adapter->running_cap_queries);
2007 	ibmvnic_send_crq(adapter, &crq);
2008 
2009 	crq.query_capability.capability =
2010 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2011 	atomic_inc(&adapter->running_cap_queries);
2012 	ibmvnic_send_crq(adapter, &crq);
2013 
2014 	crq.query_capability.capability =
2015 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2016 	atomic_inc(&adapter->running_cap_queries);
2017 	ibmvnic_send_crq(adapter, &crq);
2018 
2019 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2020 	atomic_inc(&adapter->running_cap_queries);
2021 	ibmvnic_send_crq(adapter, &crq);
2022 }
2023 
2024 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2025 {
2026 	struct device *dev = &adapter->vdev->dev;
2027 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2028 	union ibmvnic_crq crq;
2029 	int i;
2030 
2031 	dma_unmap_single(dev, adapter->ip_offload_tok,
2032 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2033 
2034 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2035 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2036 		netdev_dbg(adapter->netdev, "%016lx\n",
2037 			   ((unsigned long int *)(buf))[i]);
2038 
2039 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2040 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2041 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2042 		   buf->tcp_ipv4_chksum);
2043 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2044 		   buf->tcp_ipv6_chksum);
2045 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2046 		   buf->udp_ipv4_chksum);
2047 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2048 		   buf->udp_ipv6_chksum);
2049 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2050 		   buf->large_tx_ipv4);
2051 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2052 		   buf->large_tx_ipv6);
2053 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2054 		   buf->large_rx_ipv4);
2055 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2056 		   buf->large_rx_ipv6);
2057 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2058 		   buf->max_ipv4_header_size);
2059 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2060 		   buf->max_ipv6_header_size);
2061 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2062 		   buf->max_tcp_header_size);
2063 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2064 		   buf->max_udp_header_size);
2065 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2066 		   buf->max_large_tx_size);
2067 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2068 		   buf->max_large_rx_size);
2069 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2070 		   buf->ipv6_extension_header);
2071 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2072 		   buf->tcp_pseudosum_req);
2073 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2074 		   buf->num_ipv6_ext_headers);
2075 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2076 		   buf->off_ipv6_ext_headers);
2077 
2078 	adapter->ip_offload_ctrl_tok =
2079 	    dma_map_single(dev, &adapter->ip_offload_ctrl,
2080 			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2081 
2082 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2083 		dev_err(dev, "Couldn't map ip offload control buffer\n");
2084 		return;
2085 	}
2086 
2087 	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2088 	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2089 	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2090 	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2091 	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2092 
2093 	/* large_tx/rx disabled for now, additional features needed */
2094 	adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2095 	adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2096 	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2097 	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2098 
2099 	adapter->netdev->features = NETIF_F_GSO;
2100 
2101 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2102 		adapter->netdev->features |= NETIF_F_IP_CSUM;
2103 
2104 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2105 		adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2106 
2107 	if ((adapter->netdev->features &
2108 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2109 		adapter->netdev->features |= NETIF_F_RXCSUM;
2110 
2111 	memset(&crq, 0, sizeof(crq));
2112 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2113 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2114 	crq.control_ip_offload.len =
2115 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2116 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2117 	ibmvnic_send_crq(adapter, &crq);
2118 }
2119 
2120 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2121 				  struct ibmvnic_adapter *adapter)
2122 {
2123 	struct device *dev = &adapter->vdev->dev;
2124 	struct ibmvnic_error_buff *error_buff;
2125 	unsigned long flags;
2126 	bool found = false;
2127 	int i;
2128 
2129 	if (!crq->request_error_rsp.rc.code) {
2130 		dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2131 			 crq->request_error_rsp.rc.code);
2132 		return;
2133 	}
2134 
2135 	spin_lock_irqsave(&adapter->error_list_lock, flags);
2136 	list_for_each_entry(error_buff, &adapter->errors, list)
2137 		if (error_buff->error_id == crq->request_error_rsp.error_id) {
2138 			found = true;
2139 			list_del(&error_buff->list);
2140 			break;
2141 		}
2142 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2143 
2144 	if (!found) {
2145 		dev_err(dev, "Couldn't find error id %x\n",
2146 			crq->request_error_rsp.error_id);
2147 		return;
2148 	}
2149 
2150 	dev_err(dev, "Detailed info for error id %x:",
2151 		crq->request_error_rsp.error_id);
2152 
2153 	for (i = 0; i < error_buff->len; i++) {
2154 		pr_cont("%02x", (int)error_buff->buff[i]);
2155 		if (i % 8 == 7)
2156 			pr_cont(" ");
2157 	}
2158 	pr_cont("\n");
2159 
2160 	dma_unmap_single(dev, error_buff->dma, error_buff->len,
2161 			 DMA_FROM_DEVICE);
2162 	kfree(error_buff->buff);
2163 	kfree(error_buff);
2164 }
2165 
2166 static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2167 				 struct ibmvnic_adapter *adapter)
2168 {
2169 	int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2170 	struct ibmvnic_inflight_cmd *inflight_cmd;
2171 	struct device *dev = &adapter->vdev->dev;
2172 	union ibmvnic_crq newcrq;
2173 	unsigned long flags;
2174 
2175 	/* allocate and map buffer */
2176 	adapter->dump_data = kmalloc(len, GFP_KERNEL);
2177 	if (!adapter->dump_data) {
2178 		complete(&adapter->fw_done);
2179 		return;
2180 	}
2181 
2182 	adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2183 						  DMA_FROM_DEVICE);
2184 
2185 	if (dma_mapping_error(dev, adapter->dump_data_token)) {
2186 		if (!firmware_has_feature(FW_FEATURE_CMO))
2187 			dev_err(dev, "Couldn't map dump data\n");
2188 		kfree(adapter->dump_data);
2189 		complete(&adapter->fw_done);
2190 		return;
2191 	}
2192 
2193 	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2194 	if (!inflight_cmd) {
2195 		dma_unmap_single(dev, adapter->dump_data_token, len,
2196 				 DMA_FROM_DEVICE);
2197 		kfree(adapter->dump_data);
2198 		complete(&adapter->fw_done);
2199 		return;
2200 	}
2201 
2202 	memset(&newcrq, 0, sizeof(newcrq));
2203 	newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2204 	newcrq.request_dump.cmd = REQUEST_DUMP;
2205 	newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2206 	newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2207 
2208 	memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2209 
2210 	spin_lock_irqsave(&adapter->inflight_lock, flags);
2211 	list_add_tail(&inflight_cmd->list, &adapter->inflight);
2212 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2213 
2214 	ibmvnic_send_crq(adapter, &newcrq);
2215 }
2216 
2217 static void handle_error_indication(union ibmvnic_crq *crq,
2218 				    struct ibmvnic_adapter *adapter)
2219 {
2220 	int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2221 	struct ibmvnic_inflight_cmd *inflight_cmd;
2222 	struct device *dev = &adapter->vdev->dev;
2223 	struct ibmvnic_error_buff *error_buff;
2224 	union ibmvnic_crq new_crq;
2225 	unsigned long flags;
2226 
2227 	dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2228 		crq->error_indication.
2229 		    flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2230 		crq->error_indication.error_id,
2231 		crq->error_indication.error_cause);
2232 
2233 	error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2234 	if (!error_buff)
2235 		return;
2236 
2237 	error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2238 	if (!error_buff->buff) {
2239 		kfree(error_buff);
2240 		return;
2241 	}
2242 
2243 	error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2244 					 DMA_FROM_DEVICE);
2245 	if (dma_mapping_error(dev, error_buff->dma)) {
2246 		if (!firmware_has_feature(FW_FEATURE_CMO))
2247 			dev_err(dev, "Couldn't map error buffer\n");
2248 		kfree(error_buff->buff);
2249 		kfree(error_buff);
2250 		return;
2251 	}
2252 
2253 	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2254 	if (!inflight_cmd) {
2255 		dma_unmap_single(dev, error_buff->dma, detail_len,
2256 				 DMA_FROM_DEVICE);
2257 		kfree(error_buff->buff);
2258 		kfree(error_buff);
2259 		return;
2260 	}
2261 
2262 	error_buff->len = detail_len;
2263 	error_buff->error_id = crq->error_indication.error_id;
2264 
2265 	spin_lock_irqsave(&adapter->error_list_lock, flags);
2266 	list_add_tail(&error_buff->list, &adapter->errors);
2267 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2268 
2269 	memset(&new_crq, 0, sizeof(new_crq));
2270 	new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2271 	new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2272 	new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2273 	new_crq.request_error_info.len = cpu_to_be32(detail_len);
2274 	new_crq.request_error_info.error_id = crq->error_indication.error_id;
2275 
2276 	memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2277 
2278 	spin_lock_irqsave(&adapter->inflight_lock, flags);
2279 	list_add_tail(&inflight_cmd->list, &adapter->inflight);
2280 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2281 
2282 	ibmvnic_send_crq(adapter, &new_crq);
2283 }
2284 
2285 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2286 				  struct ibmvnic_adapter *adapter)
2287 {
2288 	struct net_device *netdev = adapter->netdev;
2289 	struct device *dev = &adapter->vdev->dev;
2290 	long rc;
2291 
2292 	rc = crq->change_mac_addr_rsp.rc.code;
2293 	if (rc) {
2294 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2295 		return;
2296 	}
2297 	memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2298 	       ETH_ALEN);
2299 }
2300 
2301 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2302 				   struct ibmvnic_adapter *adapter)
2303 {
2304 	struct device *dev = &adapter->vdev->dev;
2305 	u64 *req_value;
2306 	char *name;
2307 
2308 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2309 	case REQ_TX_QUEUES:
2310 		req_value = &adapter->req_tx_queues;
2311 		name = "tx";
2312 		break;
2313 	case REQ_RX_QUEUES:
2314 		req_value = &adapter->req_rx_queues;
2315 		name = "rx";
2316 		break;
2317 	case REQ_RX_ADD_QUEUES:
2318 		req_value = &adapter->req_rx_add_queues;
2319 		name = "rx_add";
2320 		break;
2321 	case REQ_TX_ENTRIES_PER_SUBCRQ:
2322 		req_value = &adapter->req_tx_entries_per_subcrq;
2323 		name = "tx_entries_per_subcrq";
2324 		break;
2325 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2326 		req_value = &adapter->req_rx_add_entries_per_subcrq;
2327 		name = "rx_add_entries_per_subcrq";
2328 		break;
2329 	case REQ_MTU:
2330 		req_value = &adapter->req_mtu;
2331 		name = "mtu";
2332 		break;
2333 	case PROMISC_REQUESTED:
2334 		req_value = &adapter->promisc;
2335 		name = "promisc";
2336 		break;
2337 	default:
2338 		dev_err(dev, "Got invalid cap request rsp %d\n",
2339 			crq->request_capability.capability);
2340 		return;
2341 	}
2342 
2343 	switch (crq->request_capability_rsp.rc.code) {
2344 	case SUCCESS:
2345 		break;
2346 	case PARTIALSUCCESS:
2347 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2348 			 *req_value,
2349 			 (long int)be32_to_cpu(crq->request_capability_rsp.
2350 					       number), name);
2351 		release_sub_crqs(adapter);
2352 		*req_value = be32_to_cpu(crq->request_capability_rsp.number);
2353 		complete(&adapter->init_done);
2354 		return;
2355 	default:
2356 		dev_err(dev, "Error %d in request cap rsp\n",
2357 			crq->request_capability_rsp.rc.code);
2358 		return;
2359 	}
2360 
2361 	/* Done receiving requested capabilities, query IP offload support */
2362 	if (++adapter->requested_caps == 7) {
2363 		union ibmvnic_crq newcrq;
2364 		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2365 		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2366 		    &adapter->ip_offload_buf;
2367 
2368 		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2369 							 buf_sz,
2370 							 DMA_FROM_DEVICE);
2371 
2372 		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2373 			if (!firmware_has_feature(FW_FEATURE_CMO))
2374 				dev_err(dev, "Couldn't map offload buffer\n");
2375 			return;
2376 		}
2377 
2378 		memset(&newcrq, 0, sizeof(newcrq));
2379 		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2380 		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2381 		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2382 		newcrq.query_ip_offload.ioba =
2383 		    cpu_to_be32(adapter->ip_offload_tok);
2384 
2385 		ibmvnic_send_crq(adapter, &newcrq);
2386 	}
2387 }
2388 
2389 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2390 			    struct ibmvnic_adapter *adapter)
2391 {
2392 	struct device *dev = &adapter->vdev->dev;
2393 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2394 	struct ibmvnic_login_buffer *login = adapter->login_buf;
2395 	union ibmvnic_crq crq;
2396 	int i;
2397 
2398 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2399 			 DMA_BIDIRECTIONAL);
2400 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
2401 			 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2402 
2403 	/* If the number of queues requested can't be allocated by the
2404 	 * server, the login response will return with code 1. We will need
2405 	 * to resend the login buffer with fewer queues requested.
2406 	 */
2407 	if (login_rsp_crq->generic.rc.code) {
2408 		adapter->renegotiate = true;
2409 		complete(&adapter->init_done);
2410 		return 0;
2411 	}
2412 
2413 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2414 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2415 		netdev_dbg(adapter->netdev, "%016lx\n",
2416 			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2417 	}
2418 
2419 	/* Sanity checks */
2420 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2421 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
2422 	     adapter->req_rx_add_queues !=
2423 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2424 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2425 		ibmvnic_remove(adapter->vdev);
2426 		return -EIO;
2427 	}
2428 	complete(&adapter->init_done);
2429 
2430 	memset(&crq, 0, sizeof(crq));
2431 	crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2432 	crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2433 	ibmvnic_send_crq(adapter, &crq);
2434 
2435 	return 0;
2436 }
2437 
2438 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2439 				   struct ibmvnic_adapter *adapter)
2440 {
2441 	struct device *dev = &adapter->vdev->dev;
2442 	u8 map_id = crq->request_map_rsp.map_id;
2443 	int tx_subcrqs;
2444 	int rx_subcrqs;
2445 	long rc;
2446 	int i;
2447 
2448 	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2449 	rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2450 
2451 	rc = crq->request_map_rsp.rc.code;
2452 	if (rc) {
2453 		dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2454 		adapter->map_id--;
2455 		/* need to find and zero tx/rx_pool map_id */
2456 		for (i = 0; i < tx_subcrqs; i++) {
2457 			if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2458 				adapter->tx_pool[i].long_term_buff.map_id = 0;
2459 		}
2460 		for (i = 0; i < rx_subcrqs; i++) {
2461 			if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2462 				adapter->rx_pool[i].long_term_buff.map_id = 0;
2463 		}
2464 	}
2465 	complete(&adapter->fw_done);
2466 }
2467 
2468 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2469 				     struct ibmvnic_adapter *adapter)
2470 {
2471 	struct device *dev = &adapter->vdev->dev;
2472 	long rc;
2473 
2474 	rc = crq->request_unmap_rsp.rc.code;
2475 	if (rc)
2476 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2477 }
2478 
2479 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2480 				 struct ibmvnic_adapter *adapter)
2481 {
2482 	struct net_device *netdev = adapter->netdev;
2483 	struct device *dev = &adapter->vdev->dev;
2484 	long rc;
2485 
2486 	rc = crq->query_map_rsp.rc.code;
2487 	if (rc) {
2488 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2489 		return;
2490 	}
2491 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2492 		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2493 		   crq->query_map_rsp.free_pages);
2494 }
2495 
2496 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2497 				 struct ibmvnic_adapter *adapter)
2498 {
2499 	struct net_device *netdev = adapter->netdev;
2500 	struct device *dev = &adapter->vdev->dev;
2501 	long rc;
2502 
2503 	atomic_dec(&adapter->running_cap_queries);
2504 	netdev_dbg(netdev, "Outstanding queries: %d\n",
2505 		   atomic_read(&adapter->running_cap_queries));
2506 	rc = crq->query_capability.rc.code;
2507 	if (rc) {
2508 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2509 		goto out;
2510 	}
2511 
2512 	switch (be16_to_cpu(crq->query_capability.capability)) {
2513 	case MIN_TX_QUEUES:
2514 		adapter->min_tx_queues =
2515 		    be64_to_cpu(crq->query_capability.number);
2516 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
2517 			   adapter->min_tx_queues);
2518 		break;
2519 	case MIN_RX_QUEUES:
2520 		adapter->min_rx_queues =
2521 		    be64_to_cpu(crq->query_capability.number);
2522 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
2523 			   adapter->min_rx_queues);
2524 		break;
2525 	case MIN_RX_ADD_QUEUES:
2526 		adapter->min_rx_add_queues =
2527 		    be64_to_cpu(crq->query_capability.number);
2528 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2529 			   adapter->min_rx_add_queues);
2530 		break;
2531 	case MAX_TX_QUEUES:
2532 		adapter->max_tx_queues =
2533 		    be64_to_cpu(crq->query_capability.number);
2534 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
2535 			   adapter->max_tx_queues);
2536 		break;
2537 	case MAX_RX_QUEUES:
2538 		adapter->max_rx_queues =
2539 		    be64_to_cpu(crq->query_capability.number);
2540 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
2541 			   adapter->max_rx_queues);
2542 		break;
2543 	case MAX_RX_ADD_QUEUES:
2544 		adapter->max_rx_add_queues =
2545 		    be64_to_cpu(crq->query_capability.number);
2546 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2547 			   adapter->max_rx_add_queues);
2548 		break;
2549 	case MIN_TX_ENTRIES_PER_SUBCRQ:
2550 		adapter->min_tx_entries_per_subcrq =
2551 		    be64_to_cpu(crq->query_capability.number);
2552 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2553 			   adapter->min_tx_entries_per_subcrq);
2554 		break;
2555 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2556 		adapter->min_rx_add_entries_per_subcrq =
2557 		    be64_to_cpu(crq->query_capability.number);
2558 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2559 			   adapter->min_rx_add_entries_per_subcrq);
2560 		break;
2561 	case MAX_TX_ENTRIES_PER_SUBCRQ:
2562 		adapter->max_tx_entries_per_subcrq =
2563 		    be64_to_cpu(crq->query_capability.number);
2564 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2565 			   adapter->max_tx_entries_per_subcrq);
2566 		break;
2567 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2568 		adapter->max_rx_add_entries_per_subcrq =
2569 		    be64_to_cpu(crq->query_capability.number);
2570 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2571 			   adapter->max_rx_add_entries_per_subcrq);
2572 		break;
2573 	case TCP_IP_OFFLOAD:
2574 		adapter->tcp_ip_offload =
2575 		    be64_to_cpu(crq->query_capability.number);
2576 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2577 			   adapter->tcp_ip_offload);
2578 		break;
2579 	case PROMISC_SUPPORTED:
2580 		adapter->promisc_supported =
2581 		    be64_to_cpu(crq->query_capability.number);
2582 		netdev_dbg(netdev, "promisc_supported = %lld\n",
2583 			   adapter->promisc_supported);
2584 		break;
2585 	case MIN_MTU:
2586 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2587 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2588 		break;
2589 	case MAX_MTU:
2590 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2591 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2592 		break;
2593 	case MAX_MULTICAST_FILTERS:
2594 		adapter->max_multicast_filters =
2595 		    be64_to_cpu(crq->query_capability.number);
2596 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2597 			   adapter->max_multicast_filters);
2598 		break;
2599 	case VLAN_HEADER_INSERTION:
2600 		adapter->vlan_header_insertion =
2601 		    be64_to_cpu(crq->query_capability.number);
2602 		if (adapter->vlan_header_insertion)
2603 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2604 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2605 			   adapter->vlan_header_insertion);
2606 		break;
2607 	case MAX_TX_SG_ENTRIES:
2608 		adapter->max_tx_sg_entries =
2609 		    be64_to_cpu(crq->query_capability.number);
2610 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2611 			   adapter->max_tx_sg_entries);
2612 		break;
2613 	case RX_SG_SUPPORTED:
2614 		adapter->rx_sg_supported =
2615 		    be64_to_cpu(crq->query_capability.number);
2616 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2617 			   adapter->rx_sg_supported);
2618 		break;
2619 	case OPT_TX_COMP_SUB_QUEUES:
2620 		adapter->opt_tx_comp_sub_queues =
2621 		    be64_to_cpu(crq->query_capability.number);
2622 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2623 			   adapter->opt_tx_comp_sub_queues);
2624 		break;
2625 	case OPT_RX_COMP_QUEUES:
2626 		adapter->opt_rx_comp_queues =
2627 		    be64_to_cpu(crq->query_capability.number);
2628 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2629 			   adapter->opt_rx_comp_queues);
2630 		break;
2631 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2632 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
2633 		    be64_to_cpu(crq->query_capability.number);
2634 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2635 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
2636 		break;
2637 	case OPT_TX_ENTRIES_PER_SUBCRQ:
2638 		adapter->opt_tx_entries_per_subcrq =
2639 		    be64_to_cpu(crq->query_capability.number);
2640 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2641 			   adapter->opt_tx_entries_per_subcrq);
2642 		break;
2643 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2644 		adapter->opt_rxba_entries_per_subcrq =
2645 		    be64_to_cpu(crq->query_capability.number);
2646 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2647 			   adapter->opt_rxba_entries_per_subcrq);
2648 		break;
2649 	case TX_RX_DESC_REQ:
2650 		adapter->tx_rx_desc_req = crq->query_capability.number;
2651 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2652 			   adapter->tx_rx_desc_req);
2653 		break;
2654 
2655 	default:
2656 		netdev_err(netdev, "Got invalid cap rsp %d\n",
2657 			   crq->query_capability.capability);
2658 	}
2659 
2660 out:
2661 	if (atomic_read(&adapter->running_cap_queries) == 0)
2662 		complete(&adapter->init_done);
2663 		/* We're done querying the capabilities, initialize sub-crqs */
2664 }
2665 
2666 static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2667 				   struct ibmvnic_adapter *adapter)
2668 {
2669 	u8 correlator = crq->control_ras_rsp.correlator;
2670 	struct device *dev = &adapter->vdev->dev;
2671 	bool found = false;
2672 	int i;
2673 
2674 	if (crq->control_ras_rsp.rc.code) {
2675 		dev_warn(dev, "Control ras failed rc=%d\n",
2676 			 crq->control_ras_rsp.rc.code);
2677 		return;
2678 	}
2679 
2680 	for (i = 0; i < adapter->ras_comp_num; i++) {
2681 		if (adapter->ras_comps[i].correlator == correlator) {
2682 			found = true;
2683 			break;
2684 		}
2685 	}
2686 
2687 	if (!found) {
2688 		dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2689 		return;
2690 	}
2691 
2692 	switch (crq->control_ras_rsp.op) {
2693 	case IBMVNIC_TRACE_LEVEL:
2694 		adapter->ras_comps[i].trace_level = crq->control_ras.level;
2695 		break;
2696 	case IBMVNIC_ERROR_LEVEL:
2697 		adapter->ras_comps[i].error_check_level =
2698 		    crq->control_ras.level;
2699 		break;
2700 	case IBMVNIC_TRACE_PAUSE:
2701 		adapter->ras_comp_int[i].paused = 1;
2702 		break;
2703 	case IBMVNIC_TRACE_RESUME:
2704 		adapter->ras_comp_int[i].paused = 0;
2705 		break;
2706 	case IBMVNIC_TRACE_ON:
2707 		adapter->ras_comps[i].trace_on = 1;
2708 		break;
2709 	case IBMVNIC_TRACE_OFF:
2710 		adapter->ras_comps[i].trace_on = 0;
2711 		break;
2712 	case IBMVNIC_CHG_TRACE_BUFF_SZ:
2713 		/* trace_buff_sz is 3 bytes, stuff it into an int */
2714 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2715 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2716 		    crq->control_ras_rsp.trace_buff_sz[0];
2717 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2718 		    crq->control_ras_rsp.trace_buff_sz[1];
2719 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2720 		    crq->control_ras_rsp.trace_buff_sz[2];
2721 		break;
2722 	default:
2723 		dev_err(dev, "invalid op %d on control_ras_rsp",
2724 			crq->control_ras_rsp.op);
2725 	}
2726 }
2727 
2728 static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
2729 {
2730 	file->private_data = inode->i_private;
2731 	return 0;
2732 }
2733 
2734 static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2735 			  loff_t *ppos)
2736 {
2737 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2738 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2739 	struct device *dev = &adapter->vdev->dev;
2740 	struct ibmvnic_fw_trace_entry *trace;
2741 	int num = ras_comp_int->num;
2742 	union ibmvnic_crq crq;
2743 	dma_addr_t trace_tok;
2744 
2745 	if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2746 		return 0;
2747 
2748 	trace =
2749 	    dma_alloc_coherent(dev,
2750 			       be32_to_cpu(adapter->ras_comps[num].
2751 					   trace_buff_size), &trace_tok,
2752 			       GFP_KERNEL);
2753 	if (!trace) {
2754 		dev_err(dev, "Couldn't alloc trace buffer\n");
2755 		return 0;
2756 	}
2757 
2758 	memset(&crq, 0, sizeof(crq));
2759 	crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2760 	crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2761 	crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2762 	crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2763 	crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2764 	ibmvnic_send_crq(adapter, &crq);
2765 
2766 	init_completion(&adapter->fw_done);
2767 	wait_for_completion(&adapter->fw_done);
2768 
2769 	if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2770 		len =
2771 		    be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2772 		    *ppos;
2773 
2774 	copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2775 
2776 	dma_free_coherent(dev,
2777 			  be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2778 			  trace, trace_tok);
2779 	*ppos += len;
2780 	return len;
2781 }
2782 
2783 static const struct file_operations trace_ops = {
2784 	.owner		= THIS_MODULE,
2785 	.open		= ibmvnic_fw_comp_open,
2786 	.read		= trace_read,
2787 };
2788 
2789 static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2790 			   loff_t *ppos)
2791 {
2792 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2793 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2794 	int num = ras_comp_int->num;
2795 	char buff[5]; /*  1 or 0 plus \n and \0 */
2796 	int size;
2797 
2798 	size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2799 
2800 	if (*ppos >= size)
2801 		return 0;
2802 
2803 	copy_to_user(user_buf, buff, size);
2804 	*ppos += size;
2805 	return size;
2806 }
2807 
2808 static ssize_t paused_write(struct file *file, const char __user *user_buf,
2809 			    size_t len, loff_t *ppos)
2810 {
2811 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2812 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2813 	int num = ras_comp_int->num;
2814 	union ibmvnic_crq crq;
2815 	unsigned long val;
2816 	char buff[9]; /* decimal max int plus \n and \0 */
2817 
2818 	copy_from_user(buff, user_buf, sizeof(buff));
2819 	val = kstrtoul(buff, 10, NULL);
2820 
2821 	adapter->ras_comp_int[num].paused = val ? 1 : 0;
2822 
2823 	memset(&crq, 0, sizeof(crq));
2824 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2825 	crq.control_ras.cmd = CONTROL_RAS;
2826 	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2827 	crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2828 	ibmvnic_send_crq(adapter, &crq);
2829 
2830 	return len;
2831 }
2832 
2833 static const struct file_operations paused_ops = {
2834 	.owner		= THIS_MODULE,
2835 	.open		= ibmvnic_fw_comp_open,
2836 	.read		= paused_read,
2837 	.write		= paused_write,
2838 };
2839 
2840 static ssize_t tracing_read(struct file *file, char __user *user_buf,
2841 			    size_t len, loff_t *ppos)
2842 {
2843 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2844 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2845 	int num = ras_comp_int->num;
2846 	char buff[5]; /*  1 or 0 plus \n and \0 */
2847 	int size;
2848 
2849 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2850 
2851 	if (*ppos >= size)
2852 		return 0;
2853 
2854 	copy_to_user(user_buf, buff, size);
2855 	*ppos += size;
2856 	return size;
2857 }
2858 
2859 static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2860 			     size_t len, loff_t *ppos)
2861 {
2862 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2863 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2864 	int num = ras_comp_int->num;
2865 	union ibmvnic_crq crq;
2866 	unsigned long val;
2867 	char buff[9]; /* decimal max int plus \n and \0 */
2868 
2869 	copy_from_user(buff, user_buf, sizeof(buff));
2870 	val = kstrtoul(buff, 10, NULL);
2871 
2872 	memset(&crq, 0, sizeof(crq));
2873 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2874 	crq.control_ras.cmd = CONTROL_RAS;
2875 	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2876 	crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2877 
2878 	return len;
2879 }
2880 
2881 static const struct file_operations tracing_ops = {
2882 	.owner		= THIS_MODULE,
2883 	.open		= ibmvnic_fw_comp_open,
2884 	.read		= tracing_read,
2885 	.write		= tracing_write,
2886 };
2887 
2888 static ssize_t error_level_read(struct file *file, char __user *user_buf,
2889 				size_t len, loff_t *ppos)
2890 {
2891 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2892 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2893 	int num = ras_comp_int->num;
2894 	char buff[5]; /* decimal max char plus \n and \0 */
2895 	int size;
2896 
2897 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2898 
2899 	if (*ppos >= size)
2900 		return 0;
2901 
2902 	copy_to_user(user_buf, buff, size);
2903 	*ppos += size;
2904 	return size;
2905 }
2906 
2907 static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2908 				 size_t len, loff_t *ppos)
2909 {
2910 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2911 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2912 	int num = ras_comp_int->num;
2913 	union ibmvnic_crq crq;
2914 	unsigned long val;
2915 	char buff[9]; /* decimal max int plus \n and \0 */
2916 
2917 	copy_from_user(buff, user_buf, sizeof(buff));
2918 	val = kstrtoul(buff, 10, NULL);
2919 
2920 	if (val > 9)
2921 		val = 9;
2922 
2923 	memset(&crq, 0, sizeof(crq));
2924 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2925 	crq.control_ras.cmd = CONTROL_RAS;
2926 	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2927 	crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2928 	crq.control_ras.level = val;
2929 	ibmvnic_send_crq(adapter, &crq);
2930 
2931 	return len;
2932 }
2933 
2934 static const struct file_operations error_level_ops = {
2935 	.owner		= THIS_MODULE,
2936 	.open		= ibmvnic_fw_comp_open,
2937 	.read		= error_level_read,
2938 	.write		= error_level_write,
2939 };
2940 
2941 static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2942 				size_t len, loff_t *ppos)
2943 {
2944 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2945 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2946 	int num = ras_comp_int->num;
2947 	char buff[5]; /* decimal max char plus \n and \0 */
2948 	int size;
2949 
2950 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2951 	if (*ppos >= size)
2952 		return 0;
2953 
2954 	copy_to_user(user_buf, buff, size);
2955 	*ppos += size;
2956 	return size;
2957 }
2958 
2959 static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
2960 				 size_t len, loff_t *ppos)
2961 {
2962 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2963 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2964 	union ibmvnic_crq crq;
2965 	unsigned long val;
2966 	char buff[9]; /* decimal max int plus \n and \0 */
2967 
2968 	copy_from_user(buff, user_buf, sizeof(buff));
2969 	val = kstrtoul(buff, 10, NULL);
2970 	if (val > 9)
2971 		val = 9;
2972 
2973 	memset(&crq, 0, sizeof(crq));
2974 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2975 	crq.control_ras.cmd = CONTROL_RAS;
2976 	crq.control_ras.correlator =
2977 	    adapter->ras_comps[ras_comp_int->num].correlator;
2978 	crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
2979 	crq.control_ras.level = val;
2980 	ibmvnic_send_crq(adapter, &crq);
2981 
2982 	return len;
2983 }
2984 
2985 static const struct file_operations trace_level_ops = {
2986 	.owner		= THIS_MODULE,
2987 	.open		= ibmvnic_fw_comp_open,
2988 	.read		= trace_level_read,
2989 	.write		= trace_level_write,
2990 };
2991 
2992 static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
2993 				    size_t len, loff_t *ppos)
2994 {
2995 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2996 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2997 	int num = ras_comp_int->num;
2998 	char buff[9]; /* decimal max int plus \n and \0 */
2999 	int size;
3000 
3001 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3002 	if (*ppos >= size)
3003 		return 0;
3004 
3005 	copy_to_user(user_buf, buff, size);
3006 	*ppos += size;
3007 	return size;
3008 }
3009 
3010 static ssize_t trace_buff_size_write(struct file *file,
3011 				     const char __user *user_buf, size_t len,
3012 				     loff_t *ppos)
3013 {
3014 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3015 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3016 	union ibmvnic_crq crq;
3017 	unsigned long val;
3018 	char buff[9]; /* decimal max int plus \n and \0 */
3019 
3020 	copy_from_user(buff, user_buf, sizeof(buff));
3021 	val = kstrtoul(buff, 10, NULL);
3022 
3023 	memset(&crq, 0, sizeof(crq));
3024 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
3025 	crq.control_ras.cmd = CONTROL_RAS;
3026 	crq.control_ras.correlator =
3027 	    adapter->ras_comps[ras_comp_int->num].correlator;
3028 	crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3029 	/* trace_buff_sz is 3 bytes, stuff an int into it */
3030 	crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3031 	crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3032 	crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3033 	ibmvnic_send_crq(adapter, &crq);
3034 
3035 	return len;
3036 }
3037 
3038 static const struct file_operations trace_size_ops = {
3039 	.owner		= THIS_MODULE,
3040 	.open		= ibmvnic_fw_comp_open,
3041 	.read		= trace_buff_size_read,
3042 	.write		= trace_buff_size_write,
3043 };
3044 
3045 static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3046 					 struct ibmvnic_adapter *adapter)
3047 {
3048 	struct device *dev = &adapter->vdev->dev;
3049 	struct dentry *dir_ent;
3050 	struct dentry *ent;
3051 	int i;
3052 
3053 	debugfs_remove_recursive(adapter->ras_comps_ent);
3054 
3055 	adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3056 						    adapter->debugfs_dir);
3057 	if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3058 		dev_info(dev, "debugfs create ras_comps dir failed\n");
3059 		return;
3060 	}
3061 
3062 	for (i = 0; i < adapter->ras_comp_num; i++) {
3063 		dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3064 					     adapter->ras_comps_ent);
3065 		if (!dir_ent || IS_ERR(dir_ent)) {
3066 			dev_info(dev, "debugfs create %s dir failed\n",
3067 				 adapter->ras_comps[i].name);
3068 			continue;
3069 		}
3070 
3071 		adapter->ras_comp_int[i].adapter = adapter;
3072 		adapter->ras_comp_int[i].num = i;
3073 		adapter->ras_comp_int[i].desc_blob.data =
3074 		    &adapter->ras_comps[i].description;
3075 		adapter->ras_comp_int[i].desc_blob.size =
3076 		    sizeof(adapter->ras_comps[i].description);
3077 
3078 		/* Don't need to remember the dentry's because the debugfs dir
3079 		 * gets removed recursively
3080 		 */
3081 		ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3082 					  &adapter->ras_comp_int[i].desc_blob);
3083 		ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3084 					  dir_ent, &adapter->ras_comp_int[i],
3085 					  &trace_size_ops);
3086 		ent = debugfs_create_file("trace_level",
3087 					  S_IRUGO |
3088 					  (adapter->ras_comps[i].trace_level !=
3089 					   0xFF  ? S_IWUSR : 0),
3090 					   dir_ent, &adapter->ras_comp_int[i],
3091 					   &trace_level_ops);
3092 		ent = debugfs_create_file("error_level",
3093 					  S_IRUGO |
3094 					  (adapter->
3095 					   ras_comps[i].error_check_level !=
3096 					   0xFF ? S_IWUSR : 0),
3097 					  dir_ent, &adapter->ras_comp_int[i],
3098 					  &trace_level_ops);
3099 		ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3100 					  dir_ent, &adapter->ras_comp_int[i],
3101 					  &tracing_ops);
3102 		ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3103 					  dir_ent, &adapter->ras_comp_int[i],
3104 					  &paused_ops);
3105 		ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3106 					  &adapter->ras_comp_int[i],
3107 					  &trace_ops);
3108 	}
3109 }
3110 
3111 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3112 					    struct ibmvnic_adapter *adapter)
3113 {
3114 	int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3115 	struct device *dev = &adapter->vdev->dev;
3116 	union ibmvnic_crq newcrq;
3117 
3118 	adapter->ras_comps = dma_alloc_coherent(dev, len,
3119 						&adapter->ras_comps_tok,
3120 						GFP_KERNEL);
3121 	if (!adapter->ras_comps) {
3122 		if (!firmware_has_feature(FW_FEATURE_CMO))
3123 			dev_err(dev, "Couldn't alloc fw comps buffer\n");
3124 		return;
3125 	}
3126 
3127 	adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3128 					sizeof(struct ibmvnic_fw_comp_internal),
3129 					GFP_KERNEL);
3130 	if (!adapter->ras_comp_int)
3131 		dma_free_coherent(dev, len, adapter->ras_comps,
3132 				  adapter->ras_comps_tok);
3133 
3134 	memset(&newcrq, 0, sizeof(newcrq));
3135 	newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3136 	newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3137 	newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3138 	newcrq.request_ras_comps.len = cpu_to_be32(len);
3139 	ibmvnic_send_crq(adapter, &newcrq);
3140 }
3141 
3142 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3143 {
3144 	struct ibmvnic_inflight_cmd *inflight_cmd;
3145 	struct device *dev = &adapter->vdev->dev;
3146 	struct ibmvnic_error_buff *error_buff;
3147 	unsigned long flags;
3148 	unsigned long flags2;
3149 
3150 	spin_lock_irqsave(&adapter->inflight_lock, flags);
3151 	list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
3152 		switch (inflight_cmd->crq.generic.cmd) {
3153 		case LOGIN:
3154 			dma_unmap_single(dev, adapter->login_buf_token,
3155 					 adapter->login_buf_sz,
3156 					 DMA_BIDIRECTIONAL);
3157 			dma_unmap_single(dev, adapter->login_rsp_buf_token,
3158 					 adapter->login_rsp_buf_sz,
3159 					 DMA_BIDIRECTIONAL);
3160 			kfree(adapter->login_rsp_buf);
3161 			kfree(adapter->login_buf);
3162 			break;
3163 		case REQUEST_DUMP:
3164 			complete(&adapter->fw_done);
3165 			break;
3166 		case REQUEST_ERROR_INFO:
3167 			spin_lock_irqsave(&adapter->error_list_lock, flags2);
3168 			list_for_each_entry(error_buff, &adapter->errors,
3169 					    list) {
3170 				dma_unmap_single(dev, error_buff->dma,
3171 						 error_buff->len,
3172 						 DMA_FROM_DEVICE);
3173 				kfree(error_buff->buff);
3174 				list_del(&error_buff->list);
3175 				kfree(error_buff);
3176 			}
3177 			spin_unlock_irqrestore(&adapter->error_list_lock,
3178 					       flags2);
3179 			break;
3180 		}
3181 		list_del(&inflight_cmd->list);
3182 		kfree(inflight_cmd);
3183 	}
3184 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3185 }
3186 
3187 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3188 			       struct ibmvnic_adapter *adapter)
3189 {
3190 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3191 	struct net_device *netdev = adapter->netdev;
3192 	struct device *dev = &adapter->vdev->dev;
3193 	long rc;
3194 
3195 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3196 		   ((unsigned long int *)crq)[0],
3197 		   ((unsigned long int *)crq)[1]);
3198 	switch (gen_crq->first) {
3199 	case IBMVNIC_CRQ_INIT_RSP:
3200 		switch (gen_crq->cmd) {
3201 		case IBMVNIC_CRQ_INIT:
3202 			dev_info(dev, "Partner initialized\n");
3203 			/* Send back a response */
3204 			rc = ibmvnic_send_crq_init_complete(adapter);
3205 			if (rc == 0)
3206 				send_version_xchg(adapter);
3207 			else
3208 				dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3209 			break;
3210 		case IBMVNIC_CRQ_INIT_COMPLETE:
3211 			dev_info(dev, "Partner initialization complete\n");
3212 			send_version_xchg(adapter);
3213 			break;
3214 		default:
3215 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3216 		}
3217 		return;
3218 	case IBMVNIC_CRQ_XPORT_EVENT:
3219 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3220 			dev_info(dev, "Re-enabling adapter\n");
3221 			adapter->migrated = true;
3222 			ibmvnic_free_inflight(adapter);
3223 			release_sub_crqs(adapter);
3224 			rc = ibmvnic_reenable_crq_queue(adapter);
3225 			if (rc)
3226 				dev_err(dev, "Error after enable rc=%ld\n", rc);
3227 			adapter->migrated = false;
3228 			rc = ibmvnic_send_crq_init(adapter);
3229 			if (rc)
3230 				dev_err(dev, "Error sending init rc=%ld\n", rc);
3231 		} else {
3232 			/* The adapter lost the connection */
3233 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3234 				gen_crq->cmd);
3235 			ibmvnic_free_inflight(adapter);
3236 			release_sub_crqs(adapter);
3237 		}
3238 		return;
3239 	case IBMVNIC_CRQ_CMD_RSP:
3240 		break;
3241 	default:
3242 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
3243 			gen_crq->first);
3244 		return;
3245 	}
3246 
3247 	switch (gen_crq->cmd) {
3248 	case VERSION_EXCHANGE_RSP:
3249 		rc = crq->version_exchange_rsp.rc.code;
3250 		if (rc) {
3251 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3252 			break;
3253 		}
3254 		dev_info(dev, "Partner protocol version is %d\n",
3255 			 crq->version_exchange_rsp.version);
3256 		if (be16_to_cpu(crq->version_exchange_rsp.version) <
3257 		    ibmvnic_version)
3258 			ibmvnic_version =
3259 			    be16_to_cpu(crq->version_exchange_rsp.version);
3260 		send_cap_queries(adapter);
3261 		break;
3262 	case QUERY_CAPABILITY_RSP:
3263 		handle_query_cap_rsp(crq, adapter);
3264 		break;
3265 	case QUERY_MAP_RSP:
3266 		handle_query_map_rsp(crq, adapter);
3267 		break;
3268 	case REQUEST_MAP_RSP:
3269 		handle_request_map_rsp(crq, adapter);
3270 		break;
3271 	case REQUEST_UNMAP_RSP:
3272 		handle_request_unmap_rsp(crq, adapter);
3273 		break;
3274 	case REQUEST_CAPABILITY_RSP:
3275 		handle_request_cap_rsp(crq, adapter);
3276 		break;
3277 	case LOGIN_RSP:
3278 		netdev_dbg(netdev, "Got Login Response\n");
3279 		handle_login_rsp(crq, adapter);
3280 		break;
3281 	case LOGICAL_LINK_STATE_RSP:
3282 		netdev_dbg(netdev, "Got Logical Link State Response\n");
3283 		adapter->logical_link_state =
3284 		    crq->logical_link_state_rsp.link_state;
3285 		break;
3286 	case LINK_STATE_INDICATION:
3287 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
3288 		adapter->phys_link_state =
3289 		    crq->link_state_indication.phys_link_state;
3290 		adapter->logical_link_state =
3291 		    crq->link_state_indication.logical_link_state;
3292 		break;
3293 	case CHANGE_MAC_ADDR_RSP:
3294 		netdev_dbg(netdev, "Got MAC address change Response\n");
3295 		handle_change_mac_rsp(crq, adapter);
3296 		break;
3297 	case ERROR_INDICATION:
3298 		netdev_dbg(netdev, "Got Error Indication\n");
3299 		handle_error_indication(crq, adapter);
3300 		break;
3301 	case REQUEST_ERROR_RSP:
3302 		netdev_dbg(netdev, "Got Error Detail Response\n");
3303 		handle_error_info_rsp(crq, adapter);
3304 		break;
3305 	case REQUEST_STATISTICS_RSP:
3306 		netdev_dbg(netdev, "Got Statistics Response\n");
3307 		complete(&adapter->stats_done);
3308 		break;
3309 	case REQUEST_DUMP_SIZE_RSP:
3310 		netdev_dbg(netdev, "Got Request Dump Size Response\n");
3311 		handle_dump_size_rsp(crq, adapter);
3312 		break;
3313 	case REQUEST_DUMP_RSP:
3314 		netdev_dbg(netdev, "Got Request Dump Response\n");
3315 		complete(&adapter->fw_done);
3316 		break;
3317 	case QUERY_IP_OFFLOAD_RSP:
3318 		netdev_dbg(netdev, "Got Query IP offload Response\n");
3319 		handle_query_ip_offload_rsp(adapter);
3320 		break;
3321 	case MULTICAST_CTRL_RSP:
3322 		netdev_dbg(netdev, "Got multicast control Response\n");
3323 		break;
3324 	case CONTROL_IP_OFFLOAD_RSP:
3325 		netdev_dbg(netdev, "Got Control IP offload Response\n");
3326 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3327 				 sizeof(adapter->ip_offload_ctrl),
3328 				 DMA_TO_DEVICE);
3329 		/* We're done with the queries, perform the login */
3330 		send_login(adapter);
3331 		break;
3332 	case REQUEST_RAS_COMP_NUM_RSP:
3333 		netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3334 		if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3335 			netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3336 			break;
3337 		}
3338 		adapter->ras_comp_num =
3339 		    be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3340 		handle_request_ras_comp_num_rsp(crq, adapter);
3341 		break;
3342 	case REQUEST_RAS_COMPS_RSP:
3343 		netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3344 		handle_request_ras_comps_rsp(crq, adapter);
3345 		break;
3346 	case CONTROL_RAS_RSP:
3347 		netdev_dbg(netdev, "Got Control RAS Response\n");
3348 		handle_control_ras_rsp(crq, adapter);
3349 		break;
3350 	case COLLECT_FW_TRACE_RSP:
3351 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3352 		complete(&adapter->fw_done);
3353 		break;
3354 	default:
3355 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3356 			   gen_crq->cmd);
3357 	}
3358 }
3359 
3360 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3361 {
3362 	struct ibmvnic_adapter *adapter = instance;
3363 	struct ibmvnic_crq_queue *queue = &adapter->crq;
3364 	struct vio_dev *vdev = adapter->vdev;
3365 	union ibmvnic_crq *crq;
3366 	unsigned long flags;
3367 	bool done = false;
3368 
3369 	spin_lock_irqsave(&queue->lock, flags);
3370 	vio_disable_interrupts(vdev);
3371 	while (!done) {
3372 		/* Pull all the valid messages off the CRQ */
3373 		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3374 			ibmvnic_handle_crq(crq, adapter);
3375 			crq->generic.first = 0;
3376 		}
3377 		vio_enable_interrupts(vdev);
3378 		crq = ibmvnic_next_crq(adapter);
3379 		if (crq) {
3380 			vio_disable_interrupts(vdev);
3381 			ibmvnic_handle_crq(crq, adapter);
3382 			crq->generic.first = 0;
3383 		} else {
3384 			done = true;
3385 		}
3386 	}
3387 	spin_unlock_irqrestore(&queue->lock, flags);
3388 	return IRQ_HANDLED;
3389 }
3390 
3391 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3392 {
3393 	struct vio_dev *vdev = adapter->vdev;
3394 	int rc;
3395 
3396 	do {
3397 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3398 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3399 
3400 	if (rc)
3401 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3402 
3403 	return rc;
3404 }
3405 
3406 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3407 {
3408 	struct ibmvnic_crq_queue *crq = &adapter->crq;
3409 	struct device *dev = &adapter->vdev->dev;
3410 	struct vio_dev *vdev = adapter->vdev;
3411 	int rc;
3412 
3413 	/* Close the CRQ */
3414 	do {
3415 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3416 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3417 
3418 	/* Clean out the queue */
3419 	memset(crq->msgs, 0, PAGE_SIZE);
3420 	crq->cur = 0;
3421 
3422 	/* And re-open it again */
3423 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3424 				crq->msg_token, PAGE_SIZE);
3425 
3426 	if (rc == H_CLOSED)
3427 		/* Adapter is good, but other end is not ready */
3428 		dev_warn(dev, "Partner adapter not ready\n");
3429 	else if (rc != 0)
3430 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3431 
3432 	return rc;
3433 }
3434 
3435 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3436 {
3437 	struct ibmvnic_crq_queue *crq = &adapter->crq;
3438 	struct vio_dev *vdev = adapter->vdev;
3439 	long rc;
3440 
3441 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3442 	free_irq(vdev->irq, adapter);
3443 	do {
3444 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3445 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3446 
3447 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3448 			 DMA_BIDIRECTIONAL);
3449 	free_page((unsigned long)crq->msgs);
3450 }
3451 
3452 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3453 {
3454 	struct ibmvnic_crq_queue *crq = &adapter->crq;
3455 	struct device *dev = &adapter->vdev->dev;
3456 	struct vio_dev *vdev = adapter->vdev;
3457 	int rc, retrc = -ENOMEM;
3458 
3459 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3460 	/* Should we allocate more than one page? */
3461 
3462 	if (!crq->msgs)
3463 		return -ENOMEM;
3464 
3465 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3466 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3467 					DMA_BIDIRECTIONAL);
3468 	if (dma_mapping_error(dev, crq->msg_token))
3469 		goto map_failed;
3470 
3471 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3472 				crq->msg_token, PAGE_SIZE);
3473 
3474 	if (rc == H_RESOURCE)
3475 		/* maybe kexecing and resource is busy. try a reset */
3476 		rc = ibmvnic_reset_crq(adapter);
3477 	retrc = rc;
3478 
3479 	if (rc == H_CLOSED) {
3480 		dev_warn(dev, "Partner adapter not ready\n");
3481 	} else if (rc) {
3482 		dev_warn(dev, "Error %d opening adapter\n", rc);
3483 		goto reg_crq_failed;
3484 	}
3485 
3486 	retrc = 0;
3487 
3488 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3489 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3490 			 adapter);
3491 	if (rc) {
3492 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3493 			vdev->irq, rc);
3494 		goto req_irq_failed;
3495 	}
3496 
3497 	rc = vio_enable_interrupts(vdev);
3498 	if (rc) {
3499 		dev_err(dev, "Error %d enabling interrupts\n", rc);
3500 		goto req_irq_failed;
3501 	}
3502 
3503 	crq->cur = 0;
3504 	spin_lock_init(&crq->lock);
3505 
3506 	return retrc;
3507 
3508 req_irq_failed:
3509 	do {
3510 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3511 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3512 reg_crq_failed:
3513 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3514 map_failed:
3515 	free_page((unsigned long)crq->msgs);
3516 	return retrc;
3517 }
3518 
3519 /* debugfs for dump */
3520 static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3521 {
3522 	struct net_device *netdev = seq->private;
3523 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3524 	struct device *dev = &adapter->vdev->dev;
3525 	union ibmvnic_crq crq;
3526 
3527 	memset(&crq, 0, sizeof(crq));
3528 	crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3529 	crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3530 	ibmvnic_send_crq(adapter, &crq);
3531 
3532 	init_completion(&adapter->fw_done);
3533 	wait_for_completion(&adapter->fw_done);
3534 
3535 	seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3536 
3537 	dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3538 			 DMA_BIDIRECTIONAL);
3539 
3540 	kfree(adapter->dump_data);
3541 
3542 	return 0;
3543 }
3544 
3545 static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3546 {
3547 	return single_open(file, ibmvnic_dump_show, inode->i_private);
3548 }
3549 
3550 static const struct file_operations ibmvnic_dump_ops = {
3551 	.owner          = THIS_MODULE,
3552 	.open           = ibmvnic_dump_open,
3553 	.read           = seq_read,
3554 	.llseek         = seq_lseek,
3555 	.release        = single_release,
3556 };
3557 
3558 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3559 {
3560 	struct ibmvnic_adapter *adapter;
3561 	struct net_device *netdev;
3562 	unsigned char *mac_addr_p;
3563 	struct dentry *ent;
3564 	char buf[16]; /* debugfs name buf */
3565 	int rc;
3566 
3567 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3568 		dev->unit_address);
3569 
3570 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3571 							VETH_MAC_ADDR, NULL);
3572 	if (!mac_addr_p) {
3573 		dev_err(&dev->dev,
3574 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3575 			__FILE__, __LINE__);
3576 		return 0;
3577 	}
3578 
3579 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3580 				   IBMVNIC_MAX_TX_QUEUES);
3581 	if (!netdev)
3582 		return -ENOMEM;
3583 
3584 	adapter = netdev_priv(netdev);
3585 	dev_set_drvdata(&dev->dev, netdev);
3586 	adapter->vdev = dev;
3587 	adapter->netdev = netdev;
3588 
3589 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
3590 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3591 	netdev->irq = dev->irq;
3592 	netdev->netdev_ops = &ibmvnic_netdev_ops;
3593 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3594 	SET_NETDEV_DEV(netdev, &dev->dev);
3595 
3596 	spin_lock_init(&adapter->stats_lock);
3597 
3598 	rc = ibmvnic_init_crq_queue(adapter);
3599 	if (rc) {
3600 		dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3601 		goto free_netdev;
3602 	}
3603 
3604 	INIT_LIST_HEAD(&adapter->errors);
3605 	INIT_LIST_HEAD(&adapter->inflight);
3606 	spin_lock_init(&adapter->error_list_lock);
3607 	spin_lock_init(&adapter->inflight_lock);
3608 
3609 	adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3610 					      sizeof(struct ibmvnic_statistics),
3611 					      DMA_FROM_DEVICE);
3612 	if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3613 		if (!firmware_has_feature(FW_FEATURE_CMO))
3614 			dev_err(&dev->dev, "Couldn't map stats buffer\n");
3615 		goto free_crq;
3616 	}
3617 
3618 	snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3619 	ent = debugfs_create_dir(buf, NULL);
3620 	if (!ent || IS_ERR(ent)) {
3621 		dev_info(&dev->dev, "debugfs create directory failed\n");
3622 		adapter->debugfs_dir = NULL;
3623 	} else {
3624 		adapter->debugfs_dir = ent;
3625 		ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3626 					  netdev, &ibmvnic_dump_ops);
3627 		if (!ent || IS_ERR(ent)) {
3628 			dev_info(&dev->dev,
3629 				 "debugfs create dump file failed\n");
3630 			adapter->debugfs_dump = NULL;
3631 		} else {
3632 			adapter->debugfs_dump = ent;
3633 		}
3634 	}
3635 	ibmvnic_send_crq_init(adapter);
3636 
3637 	init_completion(&adapter->init_done);
3638 	wait_for_completion(&adapter->init_done);
3639 
3640 	do {
3641 		adapter->renegotiate = false;
3642 
3643 		init_sub_crqs(adapter, 0);
3644 		reinit_completion(&adapter->init_done);
3645 		wait_for_completion(&adapter->init_done);
3646 
3647 		if (adapter->renegotiate) {
3648 			release_sub_crqs(adapter);
3649 			send_cap_queries(adapter);
3650 
3651 			reinit_completion(&adapter->init_done);
3652 			wait_for_completion(&adapter->init_done);
3653 		}
3654 	} while (adapter->renegotiate);
3655 
3656 	/* if init_sub_crqs is partially successful, retry */
3657 	while (!adapter->tx_scrq || !adapter->rx_scrq) {
3658 		init_sub_crqs(adapter, 1);
3659 
3660 		reinit_completion(&adapter->init_done);
3661 		wait_for_completion(&adapter->init_done);
3662 	}
3663 
3664 	netdev->real_num_tx_queues = adapter->req_tx_queues;
3665 
3666 	rc = register_netdev(netdev);
3667 	if (rc) {
3668 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3669 		goto free_debugfs;
3670 	}
3671 	dev_info(&dev->dev, "ibmvnic registered\n");
3672 
3673 	return 0;
3674 
3675 free_debugfs:
3676 	if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3677 		debugfs_remove_recursive(adapter->debugfs_dir);
3678 free_crq:
3679 	ibmvnic_release_crq_queue(adapter);
3680 free_netdev:
3681 	free_netdev(netdev);
3682 	return rc;
3683 }
3684 
3685 static int ibmvnic_remove(struct vio_dev *dev)
3686 {
3687 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
3688 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3689 
3690 	unregister_netdev(netdev);
3691 
3692 	release_sub_crqs(adapter);
3693 
3694 	ibmvnic_release_crq_queue(adapter);
3695 
3696 	if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3697 		debugfs_remove_recursive(adapter->debugfs_dir);
3698 
3699 	if (adapter->ras_comps)
3700 		dma_free_coherent(&dev->dev,
3701 				  adapter->ras_comp_num *
3702 				  sizeof(struct ibmvnic_fw_component),
3703 				  adapter->ras_comps, adapter->ras_comps_tok);
3704 
3705 	kfree(adapter->ras_comp_int);
3706 
3707 	free_netdev(netdev);
3708 	dev_set_drvdata(&dev->dev, NULL);
3709 
3710 	return 0;
3711 }
3712 
3713 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3714 {
3715 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3716 	struct ibmvnic_adapter *adapter;
3717 	struct iommu_table *tbl;
3718 	unsigned long ret = 0;
3719 	int i;
3720 
3721 	tbl = get_iommu_table_base(&vdev->dev);
3722 
3723 	/* netdev inits at probe time along with the structures we need below*/
3724 	if (!netdev)
3725 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3726 
3727 	adapter = netdev_priv(netdev);
3728 
3729 	ret += PAGE_SIZE; /* the crq message queue */
3730 	ret += adapter->bounce_buffer_size;
3731 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3732 
3733 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3734 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
3735 
3736 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3737 	     i++)
3738 		ret += adapter->rx_pool[i].size *
3739 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3740 
3741 	return ret;
3742 }
3743 
3744 static int ibmvnic_resume(struct device *dev)
3745 {
3746 	struct net_device *netdev = dev_get_drvdata(dev);
3747 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3748 	int i;
3749 
3750 	/* kick the interrupt handlers just in case we lost an interrupt */
3751 	for (i = 0; i < adapter->req_rx_queues; i++)
3752 		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3753 				     adapter->rx_scrq[i]);
3754 
3755 	return 0;
3756 }
3757 
3758 static struct vio_device_id ibmvnic_device_table[] = {
3759 	{"network", "IBM,vnic"},
3760 	{"", "" }
3761 };
3762 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3763 
3764 static const struct dev_pm_ops ibmvnic_pm_ops = {
3765 	.resume = ibmvnic_resume
3766 };
3767 
3768 static struct vio_driver ibmvnic_driver = {
3769 	.id_table       = ibmvnic_device_table,
3770 	.probe          = ibmvnic_probe,
3771 	.remove         = ibmvnic_remove,
3772 	.get_desired_dma = ibmvnic_get_desired_dma,
3773 	.name		= ibmvnic_driver_name,
3774 	.pm		= &ibmvnic_pm_ops,
3775 };
3776 
3777 /* module functions */
3778 static int __init ibmvnic_module_init(void)
3779 {
3780 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3781 		IBMVNIC_DRIVER_VERSION);
3782 
3783 	return vio_register_driver(&ibmvnic_driver);
3784 }
3785 
3786 static void __exit ibmvnic_module_exit(void)
3787 {
3788 	vio_unregister_driver(&ibmvnic_driver);
3789 }
3790 
3791 module_init(ibmvnic_module_init);
3792 module_exit(ibmvnic_module_exit);
3793