xref: /linux/drivers/net/hyperv/netvsc.c (revision 97f0b13452198290799fd6780f05fbaa74f927d3)
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
25 #include <linux/mm.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <asm/sync_bitops.h>
33 
34 #include "hyperv_net.h"
35 
36 
37 static struct netvsc_device *alloc_net_device(struct hv_device *device)
38 {
39 	struct netvsc_device *net_device;
40 	struct net_device *ndev = hv_get_drvdata(device);
41 	int i;
42 
43 	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
44 	if (!net_device)
45 		return NULL;
46 
47 	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
48 	if (!net_device->cb_buffer) {
49 		kfree(net_device);
50 		return NULL;
51 	}
52 
53 	init_waitqueue_head(&net_device->wait_drain);
54 	net_device->start_remove = false;
55 	net_device->destroy = false;
56 	net_device->dev = device;
57 	net_device->ndev = ndev;
58 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
59 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
60 
61 	for (i = 0; i < num_online_cpus(); i++)
62 		spin_lock_init(&net_device->msd[i].lock);
63 
64 	hv_set_drvdata(device, net_device);
65 	return net_device;
66 }
67 
68 static void free_netvsc_device(struct netvsc_device *nvdev)
69 {
70 	kfree(nvdev->cb_buffer);
71 	kfree(nvdev);
72 }
73 
74 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
75 {
76 	struct netvsc_device *net_device;
77 
78 	net_device = hv_get_drvdata(device);
79 	if (net_device && net_device->destroy)
80 		net_device = NULL;
81 
82 	return net_device;
83 }
84 
85 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
86 {
87 	struct netvsc_device *net_device;
88 
89 	net_device = hv_get_drvdata(device);
90 
91 	if (!net_device)
92 		goto get_in_err;
93 
94 	if (net_device->destroy &&
95 		atomic_read(&net_device->num_outstanding_sends) == 0)
96 		net_device = NULL;
97 
98 get_in_err:
99 	return net_device;
100 }
101 
102 
103 static int netvsc_destroy_buf(struct netvsc_device *net_device)
104 {
105 	struct nvsp_message *revoke_packet;
106 	int ret = 0;
107 	struct net_device *ndev = net_device->ndev;
108 
109 	/*
110 	 * If we got a section count, it means we received a
111 	 * SendReceiveBufferComplete msg (ie sent
112 	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
113 	 * to send a revoke msg here
114 	 */
115 	if (net_device->recv_section_cnt) {
116 		/* Send the revoke receive buffer */
117 		revoke_packet = &net_device->revoke_packet;
118 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
119 
120 		revoke_packet->hdr.msg_type =
121 			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
122 		revoke_packet->msg.v1_msg.
123 		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
124 
125 		ret = vmbus_sendpacket(net_device->dev->channel,
126 				       revoke_packet,
127 				       sizeof(struct nvsp_message),
128 				       (unsigned long)revoke_packet,
129 				       VM_PKT_DATA_INBAND, 0);
130 		/*
131 		 * If we failed here, we might as well return and
132 		 * have a leak rather than continue and a bugchk
133 		 */
134 		if (ret != 0) {
135 			netdev_err(ndev, "unable to send "
136 				"revoke receive buffer to netvsp\n");
137 			return ret;
138 		}
139 	}
140 
141 	/* Teardown the gpadl on the vsp end */
142 	if (net_device->recv_buf_gpadl_handle) {
143 		ret = vmbus_teardown_gpadl(net_device->dev->channel,
144 			   net_device->recv_buf_gpadl_handle);
145 
146 		/* If we failed here, we might as well return and have a leak
147 		 * rather than continue and a bugchk
148 		 */
149 		if (ret != 0) {
150 			netdev_err(ndev,
151 				   "unable to teardown receive buffer's gpadl\n");
152 			return ret;
153 		}
154 		net_device->recv_buf_gpadl_handle = 0;
155 	}
156 
157 	if (net_device->recv_buf) {
158 		/* Free up the receive buffer */
159 		vfree(net_device->recv_buf);
160 		net_device->recv_buf = NULL;
161 	}
162 
163 	if (net_device->recv_section) {
164 		net_device->recv_section_cnt = 0;
165 		kfree(net_device->recv_section);
166 		net_device->recv_section = NULL;
167 	}
168 
169 	/* Deal with the send buffer we may have setup.
170 	 * If we got a  send section size, it means we received a
171 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
172 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
173 	 * to send a revoke msg here
174 	 */
175 	if (net_device->send_section_size) {
176 		/* Send the revoke receive buffer */
177 		revoke_packet = &net_device->revoke_packet;
178 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
179 
180 		revoke_packet->hdr.msg_type =
181 			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
182 		revoke_packet->msg.v1_msg.revoke_send_buf.id =
183 			NETVSC_SEND_BUFFER_ID;
184 
185 		ret = vmbus_sendpacket(net_device->dev->channel,
186 				       revoke_packet,
187 				       sizeof(struct nvsp_message),
188 				       (unsigned long)revoke_packet,
189 				       VM_PKT_DATA_INBAND, 0);
190 		/* If we failed here, we might as well return and
191 		 * have a leak rather than continue and a bugchk
192 		 */
193 		if (ret != 0) {
194 			netdev_err(ndev, "unable to send "
195 				   "revoke send buffer to netvsp\n");
196 			return ret;
197 		}
198 	}
199 	/* Teardown the gpadl on the vsp end */
200 	if (net_device->send_buf_gpadl_handle) {
201 		ret = vmbus_teardown_gpadl(net_device->dev->channel,
202 					   net_device->send_buf_gpadl_handle);
203 
204 		/* If we failed here, we might as well return and have a leak
205 		 * rather than continue and a bugchk
206 		 */
207 		if (ret != 0) {
208 			netdev_err(ndev,
209 				   "unable to teardown send buffer's gpadl\n");
210 			return ret;
211 		}
212 		net_device->send_buf_gpadl_handle = 0;
213 	}
214 	if (net_device->send_buf) {
215 		/* Free up the send buffer */
216 		vfree(net_device->send_buf);
217 		net_device->send_buf = NULL;
218 	}
219 	kfree(net_device->send_section_map);
220 
221 	return ret;
222 }
223 
224 static int netvsc_init_buf(struct hv_device *device)
225 {
226 	int ret = 0;
227 	unsigned long t;
228 	struct netvsc_device *net_device;
229 	struct nvsp_message *init_packet;
230 	struct net_device *ndev;
231 
232 	net_device = get_outbound_net_device(device);
233 	if (!net_device)
234 		return -ENODEV;
235 	ndev = net_device->ndev;
236 
237 	net_device->recv_buf = vzalloc(net_device->recv_buf_size);
238 	if (!net_device->recv_buf) {
239 		netdev_err(ndev, "unable to allocate receive "
240 			"buffer of size %d\n", net_device->recv_buf_size);
241 		ret = -ENOMEM;
242 		goto cleanup;
243 	}
244 
245 	/*
246 	 * Establish the gpadl handle for this buffer on this
247 	 * channel.  Note: This call uses the vmbus connection rather
248 	 * than the channel to establish the gpadl handle.
249 	 */
250 	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
251 				    net_device->recv_buf_size,
252 				    &net_device->recv_buf_gpadl_handle);
253 	if (ret != 0) {
254 		netdev_err(ndev,
255 			"unable to establish receive buffer's gpadl\n");
256 		goto cleanup;
257 	}
258 
259 
260 	/* Notify the NetVsp of the gpadl handle */
261 	init_packet = &net_device->channel_init_pkt;
262 
263 	memset(init_packet, 0, sizeof(struct nvsp_message));
264 
265 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
266 	init_packet->msg.v1_msg.send_recv_buf.
267 		gpadl_handle = net_device->recv_buf_gpadl_handle;
268 	init_packet->msg.v1_msg.
269 		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
270 
271 	/* Send the gpadl notification request */
272 	ret = vmbus_sendpacket(device->channel, init_packet,
273 			       sizeof(struct nvsp_message),
274 			       (unsigned long)init_packet,
275 			       VM_PKT_DATA_INBAND,
276 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
277 	if (ret != 0) {
278 		netdev_err(ndev,
279 			"unable to send receive buffer's gpadl to netvsp\n");
280 		goto cleanup;
281 	}
282 
283 	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
284 	BUG_ON(t == 0);
285 
286 
287 	/* Check the response */
288 	if (init_packet->msg.v1_msg.
289 	    send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
290 		netdev_err(ndev, "Unable to complete receive buffer "
291 			   "initialization with NetVsp - status %d\n",
292 			   init_packet->msg.v1_msg.
293 			   send_recv_buf_complete.status);
294 		ret = -EINVAL;
295 		goto cleanup;
296 	}
297 
298 	/* Parse the response */
299 
300 	net_device->recv_section_cnt = init_packet->msg.
301 		v1_msg.send_recv_buf_complete.num_sections;
302 
303 	net_device->recv_section = kmemdup(
304 		init_packet->msg.v1_msg.send_recv_buf_complete.sections,
305 		net_device->recv_section_cnt *
306 		sizeof(struct nvsp_1_receive_buffer_section),
307 		GFP_KERNEL);
308 	if (net_device->recv_section == NULL) {
309 		ret = -EINVAL;
310 		goto cleanup;
311 	}
312 
313 	/*
314 	 * For 1st release, there should only be 1 section that represents the
315 	 * entire receive buffer
316 	 */
317 	if (net_device->recv_section_cnt != 1 ||
318 	    net_device->recv_section->offset != 0) {
319 		ret = -EINVAL;
320 		goto cleanup;
321 	}
322 
323 	/* Now setup the send buffer.
324 	 */
325 	net_device->send_buf = vzalloc(net_device->send_buf_size);
326 	if (!net_device->send_buf) {
327 		netdev_err(ndev, "unable to allocate send "
328 			   "buffer of size %d\n", net_device->send_buf_size);
329 		ret = -ENOMEM;
330 		goto cleanup;
331 	}
332 
333 	/* Establish the gpadl handle for this buffer on this
334 	 * channel.  Note: This call uses the vmbus connection rather
335 	 * than the channel to establish the gpadl handle.
336 	 */
337 	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
338 				    net_device->send_buf_size,
339 				    &net_device->send_buf_gpadl_handle);
340 	if (ret != 0) {
341 		netdev_err(ndev,
342 			   "unable to establish send buffer's gpadl\n");
343 		goto cleanup;
344 	}
345 
346 	/* Notify the NetVsp of the gpadl handle */
347 	init_packet = &net_device->channel_init_pkt;
348 	memset(init_packet, 0, sizeof(struct nvsp_message));
349 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
350 	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
351 		net_device->send_buf_gpadl_handle;
352 	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
353 
354 	/* Send the gpadl notification request */
355 	ret = vmbus_sendpacket(device->channel, init_packet,
356 			       sizeof(struct nvsp_message),
357 			       (unsigned long)init_packet,
358 			       VM_PKT_DATA_INBAND,
359 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
360 	if (ret != 0) {
361 		netdev_err(ndev,
362 			   "unable to send send buffer's gpadl to netvsp\n");
363 		goto cleanup;
364 	}
365 
366 	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
367 	BUG_ON(t == 0);
368 
369 	/* Check the response */
370 	if (init_packet->msg.v1_msg.
371 	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
372 		netdev_err(ndev, "Unable to complete send buffer "
373 			   "initialization with NetVsp - status %d\n",
374 			   init_packet->msg.v1_msg.
375 			   send_send_buf_complete.status);
376 		ret = -EINVAL;
377 		goto cleanup;
378 	}
379 
380 	/* Parse the response */
381 	net_device->send_section_size = init_packet->msg.
382 				v1_msg.send_send_buf_complete.section_size;
383 
384 	/* Section count is simply the size divided by the section size.
385 	 */
386 	net_device->send_section_cnt =
387 		net_device->send_buf_size/net_device->send_section_size;
388 
389 	dev_info(&device->device, "Send section size: %d, Section count:%d\n",
390 		 net_device->send_section_size, net_device->send_section_cnt);
391 
392 	/* Setup state for managing the send buffer. */
393 	net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
394 					     BITS_PER_LONG);
395 
396 	net_device->send_section_map =
397 		kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
398 	if (net_device->send_section_map == NULL) {
399 		ret = -ENOMEM;
400 		goto cleanup;
401 	}
402 
403 	goto exit;
404 
405 cleanup:
406 	netvsc_destroy_buf(net_device);
407 
408 exit:
409 	return ret;
410 }
411 
412 
413 /* Negotiate NVSP protocol version */
414 static int negotiate_nvsp_ver(struct hv_device *device,
415 			      struct netvsc_device *net_device,
416 			      struct nvsp_message *init_packet,
417 			      u32 nvsp_ver)
418 {
419 	int ret;
420 	unsigned long t;
421 
422 	memset(init_packet, 0, sizeof(struct nvsp_message));
423 	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
424 	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
425 	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
426 
427 	/* Send the init request */
428 	ret = vmbus_sendpacket(device->channel, init_packet,
429 			       sizeof(struct nvsp_message),
430 			       (unsigned long)init_packet,
431 			       VM_PKT_DATA_INBAND,
432 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
433 
434 	if (ret != 0)
435 		return ret;
436 
437 	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
438 
439 	if (t == 0)
440 		return -ETIMEDOUT;
441 
442 	if (init_packet->msg.init_msg.init_complete.status !=
443 	    NVSP_STAT_SUCCESS)
444 		return -EINVAL;
445 
446 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
447 		return 0;
448 
449 	/* NVSPv2 only: Send NDIS config */
450 	memset(init_packet, 0, sizeof(struct nvsp_message));
451 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
452 	init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
453 						       ETH_HLEN;
454 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
455 
456 	ret = vmbus_sendpacket(device->channel, init_packet,
457 				sizeof(struct nvsp_message),
458 				(unsigned long)init_packet,
459 				VM_PKT_DATA_INBAND, 0);
460 
461 	return ret;
462 }
463 
464 static int netvsc_connect_vsp(struct hv_device *device)
465 {
466 	int ret;
467 	struct netvsc_device *net_device;
468 	struct nvsp_message *init_packet;
469 	int ndis_version;
470 	struct net_device *ndev;
471 	u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
472 		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
473 	int i, num_ver = 4; /* number of different NVSP versions */
474 
475 	net_device = get_outbound_net_device(device);
476 	if (!net_device)
477 		return -ENODEV;
478 	ndev = net_device->ndev;
479 
480 	init_packet = &net_device->channel_init_pkt;
481 
482 	/* Negotiate the latest NVSP protocol supported */
483 	for (i = num_ver - 1; i >= 0; i--)
484 		if (negotiate_nvsp_ver(device, net_device, init_packet,
485 				       ver_list[i])  == 0) {
486 			net_device->nvsp_version = ver_list[i];
487 			break;
488 		}
489 
490 	if (i < 0) {
491 		ret = -EPROTO;
492 		goto cleanup;
493 	}
494 
495 	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
496 
497 	/* Send the ndis version */
498 	memset(init_packet, 0, sizeof(struct nvsp_message));
499 
500 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
501 		ndis_version = 0x00060001;
502 	else
503 		ndis_version = 0x0006001e;
504 
505 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
506 	init_packet->msg.v1_msg.
507 		send_ndis_ver.ndis_major_ver =
508 				(ndis_version & 0xFFFF0000) >> 16;
509 	init_packet->msg.v1_msg.
510 		send_ndis_ver.ndis_minor_ver =
511 				ndis_version & 0xFFFF;
512 
513 	/* Send the init request */
514 	ret = vmbus_sendpacket(device->channel, init_packet,
515 				sizeof(struct nvsp_message),
516 				(unsigned long)init_packet,
517 				VM_PKT_DATA_INBAND, 0);
518 	if (ret != 0)
519 		goto cleanup;
520 
521 	/* Post the big receive buffer to NetVSP */
522 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
523 		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
524 	else
525 		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
526 	net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
527 
528 	ret = netvsc_init_buf(device);
529 
530 cleanup:
531 	return ret;
532 }
533 
534 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
535 {
536 	netvsc_destroy_buf(net_device);
537 }
538 
539 /*
540  * netvsc_device_remove - Callback when the root bus device is removed
541  */
542 int netvsc_device_remove(struct hv_device *device)
543 {
544 	struct netvsc_device *net_device;
545 	unsigned long flags;
546 
547 	net_device = hv_get_drvdata(device);
548 
549 	netvsc_disconnect_vsp(net_device);
550 
551 	/*
552 	 * Since we have already drained, we don't need to busy wait
553 	 * as was done in final_release_stor_device()
554 	 * Note that we cannot set the ext pointer to NULL until
555 	 * we have drained - to drain the outgoing packets, we need to
556 	 * allow incoming packets.
557 	 */
558 
559 	spin_lock_irqsave(&device->channel->inbound_lock, flags);
560 	hv_set_drvdata(device, NULL);
561 	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
562 
563 	/*
564 	 * At this point, no one should be accessing net_device
565 	 * except in here
566 	 */
567 	dev_notice(&device->device, "net device safe to remove\n");
568 
569 	/* Now, we can close the channel safely */
570 	vmbus_close(device->channel);
571 
572 	/* Release all resources */
573 	vfree(net_device->sub_cb_buf);
574 	free_netvsc_device(net_device);
575 	return 0;
576 }
577 
578 
579 #define RING_AVAIL_PERCENT_HIWATER 20
580 #define RING_AVAIL_PERCENT_LOWATER 10
581 
582 /*
583  * Get the percentage of available bytes to write in the ring.
584  * The return value is in range from 0 to 100.
585  */
586 static inline u32 hv_ringbuf_avail_percent(
587 		struct hv_ring_buffer_info *ring_info)
588 {
589 	u32 avail_read, avail_write;
590 
591 	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
592 
593 	return avail_write * 100 / ring_info->ring_datasize;
594 }
595 
596 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
597 					 u32 index)
598 {
599 	sync_change_bit(index, net_device->send_section_map);
600 }
601 
602 static void netvsc_send_completion(struct netvsc_device *net_device,
603 				   struct hv_device *device,
604 				   struct vmpacket_descriptor *packet)
605 {
606 	struct nvsp_message *nvsp_packet;
607 	struct hv_netvsc_packet *nvsc_packet;
608 	struct net_device *ndev;
609 	u32 send_index;
610 
611 	ndev = net_device->ndev;
612 
613 	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
614 			(packet->offset8 << 3));
615 
616 	if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
617 	    (nvsp_packet->hdr.msg_type ==
618 	     NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
619 	    (nvsp_packet->hdr.msg_type ==
620 	     NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
621 	    (nvsp_packet->hdr.msg_type ==
622 	     NVSP_MSG5_TYPE_SUBCHANNEL)) {
623 		/* Copy the response back */
624 		memcpy(&net_device->channel_init_pkt, nvsp_packet,
625 		       sizeof(struct nvsp_message));
626 		complete(&net_device->channel_init_wait);
627 	} else if (nvsp_packet->hdr.msg_type ==
628 		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
629 		int num_outstanding_sends;
630 		u16 q_idx = 0;
631 		struct vmbus_channel *channel = device->channel;
632 		int queue_sends;
633 
634 		/* Get the send context */
635 		nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
636 			packet->trans_id;
637 
638 		/* Notify the layer above us */
639 		if (nvsc_packet) {
640 			send_index = nvsc_packet->send_buf_index;
641 			if (send_index != NETVSC_INVALID_INDEX)
642 				netvsc_free_send_slot(net_device, send_index);
643 			q_idx = nvsc_packet->q_idx;
644 			channel = nvsc_packet->channel;
645 			nvsc_packet->send_completion(nvsc_packet->
646 						     send_completion_ctx);
647 		}
648 
649 		num_outstanding_sends =
650 			atomic_dec_return(&net_device->num_outstanding_sends);
651 		queue_sends = atomic_dec_return(&net_device->
652 						queue_sends[q_idx]);
653 
654 		if (net_device->destroy && num_outstanding_sends == 0)
655 			wake_up(&net_device->wait_drain);
656 
657 		if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
658 		    !net_device->start_remove &&
659 		    (hv_ringbuf_avail_percent(&channel->outbound) >
660 		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
661 				netif_tx_wake_queue(netdev_get_tx_queue(
662 						    ndev, q_idx));
663 	} else {
664 		netdev_err(ndev, "Unknown send completion packet type- "
665 			   "%d received!!\n", nvsp_packet->hdr.msg_type);
666 	}
667 
668 }
669 
670 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
671 {
672 	unsigned long index;
673 	u32 max_words = net_device->map_words;
674 	unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
675 	u32 section_cnt = net_device->send_section_cnt;
676 	int ret_val = NETVSC_INVALID_INDEX;
677 	int i;
678 	int prev_val;
679 
680 	for (i = 0; i < max_words; i++) {
681 		if (!~(map_addr[i]))
682 			continue;
683 		index = ffz(map_addr[i]);
684 		prev_val = sync_test_and_set_bit(index, &map_addr[i]);
685 		if (prev_val)
686 			continue;
687 		if ((index + (i * BITS_PER_LONG)) >= section_cnt)
688 			break;
689 		ret_val = (index + (i * BITS_PER_LONG));
690 		break;
691 	}
692 	return ret_val;
693 }
694 
695 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
696 				   unsigned int section_index,
697 				   u32 pend_size,
698 				   struct hv_netvsc_packet *packet)
699 {
700 	char *start = net_device->send_buf;
701 	char *dest = start + (section_index * net_device->send_section_size)
702 		     + pend_size;
703 	int i;
704 	u32 msg_size = 0;
705 	u32 padding = 0;
706 	u32 remain = packet->total_data_buflen % net_device->pkt_align;
707 	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
708 		packet->page_buf_cnt;
709 
710 	/* Add padding */
711 	if (packet->is_data_pkt && packet->xmit_more && remain &&
712 	    !packet->cp_partial) {
713 		padding = net_device->pkt_align - remain;
714 		packet->rndis_msg->msg_len += padding;
715 		packet->total_data_buflen += padding;
716 	}
717 
718 	for (i = 0; i < page_count; i++) {
719 		char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
720 		u32 offset = packet->page_buf[i].offset;
721 		u32 len = packet->page_buf[i].len;
722 
723 		memcpy(dest, (src + offset), len);
724 		msg_size += len;
725 		dest += len;
726 	}
727 
728 	if (padding) {
729 		memset(dest, 0, padding);
730 		msg_size += padding;
731 	}
732 
733 	return msg_size;
734 }
735 
736 static inline int netvsc_send_pkt(
737 	struct hv_netvsc_packet *packet,
738 	struct netvsc_device *net_device)
739 {
740 	struct nvsp_message nvmsg;
741 	struct vmbus_channel *out_channel = packet->channel;
742 	u16 q_idx = packet->q_idx;
743 	struct net_device *ndev = net_device->ndev;
744 	u64 req_id;
745 	int ret;
746 	struct hv_page_buffer *pgbuf;
747 
748 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
749 	if (packet->is_data_pkt) {
750 		/* 0 is RMC_DATA; */
751 		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
752 	} else {
753 		/* 1 is RMC_CONTROL; */
754 		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
755 	}
756 
757 	nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
758 		packet->send_buf_index;
759 	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
760 		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
761 	else
762 		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
763 			packet->total_data_buflen;
764 
765 	if (packet->send_completion)
766 		req_id = (ulong)packet;
767 	else
768 		req_id = 0;
769 
770 	if (out_channel->rescind)
771 		return -ENODEV;
772 
773 	if (packet->page_buf_cnt) {
774 		pgbuf = packet->cp_partial ? packet->page_buf +
775 			packet->rmsg_pgcnt : packet->page_buf;
776 		ret = vmbus_sendpacket_pagebuffer(out_channel,
777 						  pgbuf,
778 						  packet->page_buf_cnt,
779 						  &nvmsg,
780 						  sizeof(struct nvsp_message),
781 						  req_id);
782 	} else {
783 		ret = vmbus_sendpacket(
784 				out_channel, &nvmsg,
785 				sizeof(struct nvsp_message),
786 				req_id,
787 				VM_PKT_DATA_INBAND,
788 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
789 	}
790 
791 	if (ret == 0) {
792 		atomic_inc(&net_device->num_outstanding_sends);
793 		atomic_inc(&net_device->queue_sends[q_idx]);
794 
795 		if (hv_ringbuf_avail_percent(&out_channel->outbound) <
796 			RING_AVAIL_PERCENT_LOWATER) {
797 			netif_tx_stop_queue(netdev_get_tx_queue(
798 					    ndev, q_idx));
799 
800 			if (atomic_read(&net_device->
801 				queue_sends[q_idx]) < 1)
802 				netif_tx_wake_queue(netdev_get_tx_queue(
803 						    ndev, q_idx));
804 		}
805 	} else if (ret == -EAGAIN) {
806 		netif_tx_stop_queue(netdev_get_tx_queue(
807 				    ndev, q_idx));
808 		if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
809 			netif_tx_wake_queue(netdev_get_tx_queue(
810 					    ndev, q_idx));
811 			ret = -ENOSPC;
812 		}
813 	} else {
814 		netdev_err(ndev, "Unable to send packet %p ret %d\n",
815 			   packet, ret);
816 	}
817 
818 	return ret;
819 }
820 
821 int netvsc_send(struct hv_device *device,
822 		struct hv_netvsc_packet *packet)
823 {
824 	struct netvsc_device *net_device;
825 	int ret = 0, m_ret = 0;
826 	struct vmbus_channel *out_channel;
827 	u16 q_idx = packet->q_idx;
828 	u32 pktlen = packet->total_data_buflen, msd_len = 0;
829 	unsigned int section_index = NETVSC_INVALID_INDEX;
830 	unsigned long flag;
831 	struct multi_send_data *msdp;
832 	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
833 	bool try_batch;
834 
835 	net_device = get_outbound_net_device(device);
836 	if (!net_device)
837 		return -ENODEV;
838 
839 	out_channel = net_device->chn_table[q_idx];
840 	if (!out_channel) {
841 		out_channel = device->channel;
842 		q_idx = 0;
843 		packet->q_idx = 0;
844 	}
845 	packet->channel = out_channel;
846 	packet->send_buf_index = NETVSC_INVALID_INDEX;
847 	packet->cp_partial = false;
848 
849 	msdp = &net_device->msd[q_idx];
850 
851 	/* batch packets in send buffer if possible */
852 	spin_lock_irqsave(&msdp->lock, flag);
853 	if (msdp->pkt)
854 		msd_len = msdp->pkt->total_data_buflen;
855 
856 	try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
857 		    net_device->max_pkt;
858 
859 	if (try_batch && msd_len + pktlen + net_device->pkt_align <
860 	    net_device->send_section_size) {
861 		section_index = msdp->pkt->send_buf_index;
862 
863 	} else if (try_batch && msd_len + packet->rmsg_size <
864 		   net_device->send_section_size) {
865 		section_index = msdp->pkt->send_buf_index;
866 		packet->cp_partial = true;
867 
868 	} else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
869 		   net_device->send_section_size) {
870 		section_index = netvsc_get_next_send_section(net_device);
871 		if (section_index != NETVSC_INVALID_INDEX) {
872 				msd_send = msdp->pkt;
873 				msdp->pkt = NULL;
874 				msdp->count = 0;
875 				msd_len = 0;
876 		}
877 	}
878 
879 	if (section_index != NETVSC_INVALID_INDEX) {
880 		netvsc_copy_to_send_buf(net_device,
881 					section_index, msd_len,
882 					packet);
883 
884 		packet->send_buf_index = section_index;
885 
886 		if (packet->cp_partial) {
887 			packet->page_buf_cnt -= packet->rmsg_pgcnt;
888 			packet->total_data_buflen = msd_len + packet->rmsg_size;
889 		} else {
890 			packet->page_buf_cnt = 0;
891 			packet->total_data_buflen += msd_len;
892 		}
893 
894 		if (msdp->pkt)
895 			netvsc_xmit_completion(msdp->pkt);
896 
897 		if (packet->xmit_more && !packet->cp_partial) {
898 			msdp->pkt = packet;
899 			msdp->count++;
900 		} else {
901 			cur_send = packet;
902 			msdp->pkt = NULL;
903 			msdp->count = 0;
904 		}
905 	} else {
906 		msd_send = msdp->pkt;
907 		msdp->pkt = NULL;
908 		msdp->count = 0;
909 		cur_send = packet;
910 	}
911 
912 	spin_unlock_irqrestore(&msdp->lock, flag);
913 
914 	if (msd_send) {
915 		m_ret = netvsc_send_pkt(msd_send, net_device);
916 
917 		if (m_ret != 0) {
918 			netvsc_free_send_slot(net_device,
919 					      msd_send->send_buf_index);
920 			netvsc_xmit_completion(msd_send);
921 		}
922 	}
923 
924 	if (cur_send)
925 		ret = netvsc_send_pkt(cur_send, net_device);
926 
927 	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
928 		netvsc_free_send_slot(net_device, section_index);
929 
930 	return ret;
931 }
932 
933 static void netvsc_send_recv_completion(struct hv_device *device,
934 					struct vmbus_channel *channel,
935 					struct netvsc_device *net_device,
936 					u64 transaction_id, u32 status)
937 {
938 	struct nvsp_message recvcompMessage;
939 	int retries = 0;
940 	int ret;
941 	struct net_device *ndev;
942 
943 	ndev = net_device->ndev;
944 
945 	recvcompMessage.hdr.msg_type =
946 				NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
947 
948 	recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
949 
950 retry_send_cmplt:
951 	/* Send the completion */
952 	ret = vmbus_sendpacket(channel, &recvcompMessage,
953 			       sizeof(struct nvsp_message), transaction_id,
954 			       VM_PKT_COMP, 0);
955 	if (ret == 0) {
956 		/* success */
957 		/* no-op */
958 	} else if (ret == -EAGAIN) {
959 		/* no more room...wait a bit and attempt to retry 3 times */
960 		retries++;
961 		netdev_err(ndev, "unable to send receive completion pkt"
962 			" (tid %llx)...retrying %d\n", transaction_id, retries);
963 
964 		if (retries < 4) {
965 			udelay(100);
966 			goto retry_send_cmplt;
967 		} else {
968 			netdev_err(ndev, "unable to send receive "
969 				"completion pkt (tid %llx)...give up retrying\n",
970 				transaction_id);
971 		}
972 	} else {
973 		netdev_err(ndev, "unable to send receive "
974 			"completion pkt - %llx\n", transaction_id);
975 	}
976 }
977 
978 static void netvsc_receive(struct netvsc_device *net_device,
979 			struct vmbus_channel *channel,
980 			struct hv_device *device,
981 			struct vmpacket_descriptor *packet)
982 {
983 	struct vmtransfer_page_packet_header *vmxferpage_packet;
984 	struct nvsp_message *nvsp_packet;
985 	struct hv_netvsc_packet nv_pkt;
986 	struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
987 	u32 status = NVSP_STAT_SUCCESS;
988 	int i;
989 	int count = 0;
990 	struct net_device *ndev;
991 
992 	ndev = net_device->ndev;
993 
994 	/*
995 	 * All inbound packets other than send completion should be xfer page
996 	 * packet
997 	 */
998 	if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
999 		netdev_err(ndev, "Unknown packet type received - %d\n",
1000 			   packet->type);
1001 		return;
1002 	}
1003 
1004 	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
1005 			(packet->offset8 << 3));
1006 
1007 	/* Make sure this is a valid nvsp packet */
1008 	if (nvsp_packet->hdr.msg_type !=
1009 	    NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
1010 		netdev_err(ndev, "Unknown nvsp packet type received-"
1011 			" %d\n", nvsp_packet->hdr.msg_type);
1012 		return;
1013 	}
1014 
1015 	vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
1016 
1017 	if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
1018 		netdev_err(ndev, "Invalid xfer page set id - "
1019 			   "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
1020 			   vmxferpage_packet->xfer_pageset_id);
1021 		return;
1022 	}
1023 
1024 	count = vmxferpage_packet->range_cnt;
1025 	netvsc_packet->channel = channel;
1026 
1027 	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1028 	for (i = 0; i < count; i++) {
1029 		/* Initialize the netvsc packet */
1030 		netvsc_packet->status = NVSP_STAT_SUCCESS;
1031 		netvsc_packet->data = (void *)((unsigned long)net_device->
1032 			recv_buf + vmxferpage_packet->ranges[i].byte_offset);
1033 		netvsc_packet->total_data_buflen =
1034 					vmxferpage_packet->ranges[i].byte_count;
1035 
1036 		/* Pass it to the upper layer */
1037 		rndis_filter_receive(device, netvsc_packet);
1038 
1039 		if (netvsc_packet->status != NVSP_STAT_SUCCESS)
1040 			status = NVSP_STAT_FAIL;
1041 	}
1042 
1043 	netvsc_send_recv_completion(device, channel, net_device,
1044 				    vmxferpage_packet->d.trans_id, status);
1045 }
1046 
1047 
1048 static void netvsc_send_table(struct hv_device *hdev,
1049 			      struct vmpacket_descriptor *vmpkt)
1050 {
1051 	struct netvsc_device *nvscdev;
1052 	struct net_device *ndev;
1053 	struct nvsp_message *nvmsg;
1054 	int i;
1055 	u32 count, *tab;
1056 
1057 	nvscdev = get_outbound_net_device(hdev);
1058 	if (!nvscdev)
1059 		return;
1060 	ndev = nvscdev->ndev;
1061 
1062 	nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
1063 					(vmpkt->offset8 << 3));
1064 
1065 	if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
1066 		return;
1067 
1068 	count = nvmsg->msg.v5_msg.send_table.count;
1069 	if (count != VRSS_SEND_TAB_SIZE) {
1070 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1071 		return;
1072 	}
1073 
1074 	tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1075 		      nvmsg->msg.v5_msg.send_table.offset);
1076 
1077 	for (i = 0; i < count; i++)
1078 		nvscdev->send_table[i] = tab[i];
1079 }
1080 
1081 void netvsc_channel_cb(void *context)
1082 {
1083 	int ret;
1084 	struct vmbus_channel *channel = (struct vmbus_channel *)context;
1085 	struct hv_device *device;
1086 	struct netvsc_device *net_device;
1087 	u32 bytes_recvd;
1088 	u64 request_id;
1089 	struct vmpacket_descriptor *desc;
1090 	unsigned char *buffer;
1091 	int bufferlen = NETVSC_PACKET_SIZE;
1092 	struct net_device *ndev;
1093 
1094 	if (channel->primary_channel != NULL)
1095 		device = channel->primary_channel->device_obj;
1096 	else
1097 		device = channel->device_obj;
1098 
1099 	net_device = get_inbound_net_device(device);
1100 	if (!net_device)
1101 		return;
1102 	ndev = net_device->ndev;
1103 	buffer = get_per_channel_state(channel);
1104 
1105 	do {
1106 		ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
1107 					   &bytes_recvd, &request_id);
1108 		if (ret == 0) {
1109 			if (bytes_recvd > 0) {
1110 				desc = (struct vmpacket_descriptor *)buffer;
1111 				switch (desc->type) {
1112 				case VM_PKT_COMP:
1113 					netvsc_send_completion(net_device,
1114 								device, desc);
1115 					break;
1116 
1117 				case VM_PKT_DATA_USING_XFER_PAGES:
1118 					netvsc_receive(net_device, channel,
1119 						       device, desc);
1120 					break;
1121 
1122 				case VM_PKT_DATA_INBAND:
1123 					netvsc_send_table(device, desc);
1124 					break;
1125 
1126 				default:
1127 					netdev_err(ndev,
1128 						   "unhandled packet type %d, "
1129 						   "tid %llx len %d\n",
1130 						   desc->type, request_id,
1131 						   bytes_recvd);
1132 					break;
1133 				}
1134 
1135 			} else {
1136 				/*
1137 				 * We are done for this pass.
1138 				 */
1139 				break;
1140 			}
1141 
1142 		} else if (ret == -ENOBUFS) {
1143 			if (bufferlen > NETVSC_PACKET_SIZE)
1144 				kfree(buffer);
1145 			/* Handle large packet */
1146 			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1147 			if (buffer == NULL) {
1148 				/* Try again next time around */
1149 				netdev_err(ndev,
1150 					   "unable to allocate buffer of size "
1151 					   "(%d)!!\n", bytes_recvd);
1152 				break;
1153 			}
1154 
1155 			bufferlen = bytes_recvd;
1156 		}
1157 	} while (1);
1158 
1159 	if (bufferlen > NETVSC_PACKET_SIZE)
1160 		kfree(buffer);
1161 	return;
1162 }
1163 
1164 /*
1165  * netvsc_device_add - Callback when the device belonging to this
1166  * driver is added
1167  */
1168 int netvsc_device_add(struct hv_device *device, void *additional_info)
1169 {
1170 	int ret = 0;
1171 	int ring_size =
1172 	((struct netvsc_device_info *)additional_info)->ring_size;
1173 	struct netvsc_device *net_device;
1174 	struct net_device *ndev;
1175 
1176 	net_device = alloc_net_device(device);
1177 	if (!net_device)
1178 		return -ENOMEM;
1179 
1180 	net_device->ring_size = ring_size;
1181 
1182 	/*
1183 	 * Coming into this function, struct net_device * is
1184 	 * registered as the driver private data.
1185 	 * In alloc_net_device(), we register struct netvsc_device *
1186 	 * as the driver private data and stash away struct net_device *
1187 	 * in struct netvsc_device *.
1188 	 */
1189 	ndev = net_device->ndev;
1190 
1191 	/* Add netvsc_device context to netvsc_device */
1192 	net_device->nd_ctx = netdev_priv(ndev);
1193 
1194 	/* Initialize the NetVSC channel extension */
1195 	init_completion(&net_device->channel_init_wait);
1196 
1197 	set_per_channel_state(device->channel, net_device->cb_buffer);
1198 
1199 	/* Open the channel */
1200 	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1201 			 ring_size * PAGE_SIZE, NULL, 0,
1202 			 netvsc_channel_cb, device->channel);
1203 
1204 	if (ret != 0) {
1205 		netdev_err(ndev, "unable to open channel: %d\n", ret);
1206 		goto cleanup;
1207 	}
1208 
1209 	/* Channel is opened */
1210 	pr_info("hv_netvsc channel opened successfully\n");
1211 
1212 	net_device->chn_table[0] = device->channel;
1213 
1214 	/* Connect with the NetVsp */
1215 	ret = netvsc_connect_vsp(device);
1216 	if (ret != 0) {
1217 		netdev_err(ndev,
1218 			"unable to connect to NetVSP - %d\n", ret);
1219 		goto close;
1220 	}
1221 
1222 	return ret;
1223 
1224 close:
1225 	/* Now, we can close the channel safely */
1226 	vmbus_close(device->channel);
1227 
1228 cleanup:
1229 	free_netvsc_device(net_device);
1230 
1231 	return ret;
1232 }
1233