1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9 #include <linux/ethtool.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/wait.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/io.h>
16 #include <linux/if_ether.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_vlan.h>
19 #include <linux/nls.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/ucs2_string.h>
23 #include <linux/string.h>
24
25 #include "hyperv_net.h"
26 #include "netvsc_trace.h"
27
28 static void rndis_set_multicast(struct work_struct *w);
29
30 #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
31 struct rndis_request {
32 struct list_head list_ent;
33 struct completion wait_event;
34
35 struct rndis_message response_msg;
36 /*
37 * The buffer for extended info after the RNDIS response message. It's
38 * referenced based on the data offset in the RNDIS message. Its size
39 * is enough for current needs, and should be sufficient for the near
40 * future.
41 */
42 u8 response_ext[RNDIS_EXT_LEN];
43
44 /* Simplify allocation by having a netvsc packet inline */
45 struct hv_netvsc_packet pkt;
46
47 struct rndis_message request_msg;
48 /*
49 * The buffer for the extended info after the RNDIS request message.
50 * It is referenced and sized in a similar way as response_ext.
51 */
52 u8 request_ext[RNDIS_EXT_LEN];
53 };
54
55 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
56 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
57 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
58 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
59 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
60 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
61 };
62
get_rndis_device(void)63 static struct rndis_device *get_rndis_device(void)
64 {
65 struct rndis_device *device;
66
67 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
68 if (!device)
69 return NULL;
70
71 spin_lock_init(&device->request_lock);
72
73 INIT_LIST_HEAD(&device->req_list);
74 INIT_WORK(&device->mcast_work, rndis_set_multicast);
75
76 device->state = RNDIS_DEV_UNINITIALIZED;
77
78 return device;
79 }
80
get_rndis_request(struct rndis_device * dev,u32 msg_type,u32 msg_len)81 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
82 u32 msg_type,
83 u32 msg_len)
84 {
85 struct rndis_request *request;
86 struct rndis_message *rndis_msg;
87 struct rndis_set_request *set;
88 unsigned long flags;
89
90 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
91 if (!request)
92 return NULL;
93
94 init_completion(&request->wait_event);
95
96 rndis_msg = &request->request_msg;
97 rndis_msg->ndis_msg_type = msg_type;
98 rndis_msg->msg_len = msg_len;
99
100 request->pkt.q_idx = 0;
101
102 /*
103 * Set the request id. This field is always after the rndis header for
104 * request/response packet types so we just used the SetRequest as a
105 * template
106 */
107 set = &rndis_msg->msg.set_req;
108 set->req_id = atomic_inc_return(&dev->new_req_id);
109
110 /* Add to the request list */
111 spin_lock_irqsave(&dev->request_lock, flags);
112 list_add_tail(&request->list_ent, &dev->req_list);
113 spin_unlock_irqrestore(&dev->request_lock, flags);
114
115 return request;
116 }
117
put_rndis_request(struct rndis_device * dev,struct rndis_request * req)118 static void put_rndis_request(struct rndis_device *dev,
119 struct rndis_request *req)
120 {
121 unsigned long flags;
122
123 spin_lock_irqsave(&dev->request_lock, flags);
124 list_del(&req->list_ent);
125 spin_unlock_irqrestore(&dev->request_lock, flags);
126
127 kfree(req);
128 }
129
dump_rndis_message(struct net_device * netdev,const struct rndis_message * rndis_msg,const void * data)130 static void dump_rndis_message(struct net_device *netdev,
131 const struct rndis_message *rndis_msg,
132 const void *data)
133 {
134 switch (rndis_msg->ndis_msg_type) {
135 case RNDIS_MSG_PACKET:
136 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
137 const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
138 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
139 "data offset %u data len %u, # oob %u, "
140 "oob offset %u, oob len %u, pkt offset %u, "
141 "pkt len %u\n",
142 rndis_msg->msg_len,
143 pkt->data_offset,
144 pkt->data_len,
145 pkt->num_oob_data_elements,
146 pkt->oob_data_offset,
147 pkt->oob_data_len,
148 pkt->per_pkt_info_offset,
149 pkt->per_pkt_info_len);
150 }
151 break;
152
153 case RNDIS_MSG_INIT_C:
154 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
155 sizeof(struct rndis_initialize_complete)) {
156 const struct rndis_initialize_complete *init_complete =
157 data + RNDIS_HEADER_SIZE;
158 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
159 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
160 "device flags %d, max xfer size 0x%x, max pkts %u, "
161 "pkt aligned %u)\n",
162 rndis_msg->msg_len,
163 init_complete->req_id,
164 init_complete->status,
165 init_complete->major_ver,
166 init_complete->minor_ver,
167 init_complete->dev_flags,
168 init_complete->max_xfer_size,
169 init_complete->max_pkt_per_msg,
170 init_complete->pkt_alignment_factor);
171 }
172 break;
173
174 case RNDIS_MSG_QUERY_C:
175 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
176 sizeof(struct rndis_query_complete)) {
177 const struct rndis_query_complete *query_complete =
178 data + RNDIS_HEADER_SIZE;
179 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
180 "(len %u, id 0x%x, status 0x%x, buf len %u, "
181 "buf offset %u)\n",
182 rndis_msg->msg_len,
183 query_complete->req_id,
184 query_complete->status,
185 query_complete->info_buflen,
186 query_complete->info_buf_offset);
187 }
188 break;
189
190 case RNDIS_MSG_SET_C:
191 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
192 const struct rndis_set_complete *set_complete =
193 data + RNDIS_HEADER_SIZE;
194 netdev_dbg(netdev,
195 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
196 rndis_msg->msg_len,
197 set_complete->req_id,
198 set_complete->status);
199 }
200 break;
201
202 case RNDIS_MSG_INDICATE:
203 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
204 sizeof(struct rndis_indicate_status)) {
205 const struct rndis_indicate_status *indicate_status =
206 data + RNDIS_HEADER_SIZE;
207 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
208 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
209 rndis_msg->msg_len,
210 indicate_status->status,
211 indicate_status->status_buflen,
212 indicate_status->status_buf_offset);
213 }
214 break;
215
216 default:
217 netdev_dbg(netdev, "0x%x (len %u)\n",
218 rndis_msg->ndis_msg_type,
219 rndis_msg->msg_len);
220 break;
221 }
222 }
223
rndis_filter_send_request(struct rndis_device * dev,struct rndis_request * req)224 static int rndis_filter_send_request(struct rndis_device *dev,
225 struct rndis_request *req)
226 {
227 struct hv_netvsc_packet *packet;
228 struct hv_page_buffer pb;
229 int ret;
230
231 /* Setup the packet to send it */
232 packet = &req->pkt;
233
234 packet->total_data_buflen = req->request_msg.msg_len;
235 packet->page_buf_cnt = 1;
236
237 pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
238 pb.len = req->request_msg.msg_len;
239 pb.offset = offset_in_hvpage(&req->request_msg);
240
241 trace_rndis_send(dev->ndev, 0, &req->request_msg);
242
243 rcu_read_lock_bh();
244 ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
245 rcu_read_unlock_bh();
246
247 return ret;
248 }
249
rndis_set_link_state(struct rndis_device * rdev,struct rndis_request * request)250 static void rndis_set_link_state(struct rndis_device *rdev,
251 struct rndis_request *request)
252 {
253 u32 link_status;
254 struct rndis_query_complete *query_complete;
255 u32 msg_len = request->response_msg.msg_len;
256
257 /* Ensure the packet is big enough to access its fields */
258 if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
259 return;
260
261 query_complete = &request->response_msg.msg.query_complete;
262
263 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
264 query_complete->info_buflen >= sizeof(u32) &&
265 query_complete->info_buf_offset >= sizeof(*query_complete) &&
266 msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
267 msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
268 >= query_complete->info_buflen) {
269 memcpy(&link_status, (void *)((unsigned long)query_complete +
270 query_complete->info_buf_offset), sizeof(u32));
271 rdev->link_state = link_status != 0;
272 }
273 }
274
rndis_filter_receive_response(struct net_device * ndev,struct netvsc_device * nvdev,struct rndis_message * resp,void * data)275 static void rndis_filter_receive_response(struct net_device *ndev,
276 struct netvsc_device *nvdev,
277 struct rndis_message *resp,
278 void *data)
279 {
280 u32 *req_id = &resp->msg.init_complete.req_id;
281 struct rndis_device *dev = nvdev->extension;
282 struct rndis_request *request = NULL;
283 bool found = false;
284 unsigned long flags;
285
286 /* This should never happen, it means control message
287 * response received after device removed.
288 */
289 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
290 netdev_err(ndev,
291 "got rndis message uninitialized\n");
292 return;
293 }
294
295 /* Ensure the packet is big enough to read req_id. Req_id is the 1st
296 * field in any request/response message, so the payload should have at
297 * least sizeof(u32) bytes
298 */
299 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
300 netdev_err(ndev, "rndis msg_len too small: %u\n",
301 resp->msg_len);
302 return;
303 }
304
305 /* Copy the request ID into nvchan->recv_buf */
306 *req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
307
308 spin_lock_irqsave(&dev->request_lock, flags);
309 list_for_each_entry(request, &dev->req_list, list_ent) {
310 /*
311 * All request/response message contains RequestId as the 1st
312 * field
313 */
314 if (request->request_msg.msg.init_req.req_id == *req_id) {
315 found = true;
316 break;
317 }
318 }
319 spin_unlock_irqrestore(&dev->request_lock, flags);
320
321 if (found) {
322 if (resp->msg_len <=
323 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
324 memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
325 unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
326 data + RNDIS_HEADER_SIZE + sizeof(*req_id),
327 resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
328 "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
329 if (request->request_msg.ndis_msg_type ==
330 RNDIS_MSG_QUERY && request->request_msg.msg.
331 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
332 rndis_set_link_state(dev, request);
333 } else {
334 netdev_err(ndev,
335 "rndis response buffer overflow "
336 "detected (size %u max %zu)\n",
337 resp->msg_len,
338 sizeof(struct rndis_message));
339
340 if (resp->ndis_msg_type ==
341 RNDIS_MSG_RESET_C) {
342 /* does not have a request id field */
343 request->response_msg.msg.reset_complete.
344 status = RNDIS_STATUS_BUFFER_OVERFLOW;
345 } else {
346 request->response_msg.msg.
347 init_complete.status =
348 RNDIS_STATUS_BUFFER_OVERFLOW;
349 }
350 }
351
352 netvsc_dma_unmap(((struct net_device_context *)
353 netdev_priv(ndev))->device_ctx, &request->pkt);
354 complete(&request->wait_event);
355 } else {
356 netdev_err(ndev,
357 "no rndis request found for this response "
358 "(id 0x%x res type 0x%x)\n",
359 *req_id,
360 resp->ndis_msg_type);
361 }
362 }
363
364 /*
365 * Get the Per-Packet-Info with the specified type
366 * return NULL if not found.
367 */
rndis_get_ppi(struct net_device * ndev,struct rndis_packet * rpkt,u32 rpkt_len,u32 type,u8 internal,u32 ppi_size,void * data)368 static inline void *rndis_get_ppi(struct net_device *ndev,
369 struct rndis_packet *rpkt,
370 u32 rpkt_len, u32 type, u8 internal,
371 u32 ppi_size, void *data)
372 {
373 struct rndis_per_packet_info *ppi;
374 int len;
375
376 if (rpkt->per_pkt_info_offset == 0)
377 return NULL;
378
379 /* Validate info_offset and info_len */
380 if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
381 rpkt->per_pkt_info_offset > rpkt_len) {
382 netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
383 rpkt->per_pkt_info_offset);
384 return NULL;
385 }
386
387 if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
388 rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
389 netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
390 rpkt->per_pkt_info_len);
391 return NULL;
392 }
393
394 ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
395 rpkt->per_pkt_info_offset);
396 /* Copy the PPIs into nvchan->recv_buf */
397 memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
398 len = rpkt->per_pkt_info_len;
399
400 while (len > 0) {
401 /* Validate ppi_offset and ppi_size */
402 if (ppi->size > len) {
403 netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
404 continue;
405 }
406
407 if (ppi->ppi_offset >= ppi->size) {
408 netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
409 continue;
410 }
411
412 if (ppi->type == type && ppi->internal == internal) {
413 /* ppi->size should be big enough to hold the returned object. */
414 if (ppi->size - ppi->ppi_offset < ppi_size) {
415 netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
416 ppi->size, ppi->ppi_offset);
417 continue;
418 }
419 return (void *)((ulong)ppi + ppi->ppi_offset);
420 }
421 len -= ppi->size;
422 ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
423 }
424
425 return NULL;
426 }
427
428 static inline
rsc_add_data(struct netvsc_channel * nvchan,const struct ndis_pkt_8021q_info * vlan,const struct ndis_tcp_ip_checksum_info * csum_info,const u32 * hash_info,void * data,u32 len)429 void rsc_add_data(struct netvsc_channel *nvchan,
430 const struct ndis_pkt_8021q_info *vlan,
431 const struct ndis_tcp_ip_checksum_info *csum_info,
432 const u32 *hash_info,
433 void *data, u32 len)
434 {
435 u32 cnt = nvchan->rsc.cnt;
436
437 if (cnt) {
438 nvchan->rsc.pktlen += len;
439 } else {
440 /* The data/values pointed by vlan, csum_info and hash_info are shared
441 * across the different 'fragments' of the RSC packet; store them into
442 * the packet itself.
443 */
444 if (vlan != NULL) {
445 memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
446 nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
447 } else {
448 nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
449 }
450 if (csum_info != NULL) {
451 memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
452 nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
453 } else {
454 nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
455 }
456 nvchan->rsc.pktlen = len;
457 if (hash_info != NULL) {
458 nvchan->rsc.hash_info = *hash_info;
459 nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
460 } else {
461 nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
462 }
463 }
464
465 nvchan->rsc.data[cnt] = data;
466 nvchan->rsc.len[cnt] = len;
467 nvchan->rsc.cnt++;
468 }
469
rndis_filter_receive_data(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_channel * nvchan,struct rndis_message * msg,void * data,u32 data_buflen)470 static int rndis_filter_receive_data(struct net_device *ndev,
471 struct netvsc_device *nvdev,
472 struct netvsc_channel *nvchan,
473 struct rndis_message *msg,
474 void *data, u32 data_buflen)
475 {
476 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
477 const struct ndis_tcp_ip_checksum_info *csum_info;
478 const struct ndis_pkt_8021q_info *vlan;
479 const struct rndis_pktinfo_id *pktinfo_id;
480 const u32 *hash_info;
481 u32 data_offset, rpkt_len;
482 bool rsc_more = false;
483 int ret;
484
485 /* Ensure data_buflen is big enough to read header fields */
486 if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
487 netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
488 data_buflen);
489 return NVSP_STAT_FAIL;
490 }
491
492 /* Copy the RNDIS packet into nvchan->recv_buf */
493 memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
494
495 /* Validate rndis_pkt offset */
496 if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
497 netdev_err(ndev, "invalid rndis packet offset: %u\n",
498 rndis_pkt->data_offset);
499 return NVSP_STAT_FAIL;
500 }
501
502 /* Remove the rndis header and pass it back up the stack */
503 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
504
505 rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
506 data_buflen -= data_offset;
507
508 /*
509 * Make sure we got a valid RNDIS message, now total_data_buflen
510 * should be the data packet size plus the trailer padding size
511 */
512 if (unlikely(data_buflen < rndis_pkt->data_len)) {
513 netdev_err(ndev, "rndis message buffer "
514 "overflow detected (got %u, min %u)"
515 "...dropping this message!\n",
516 data_buflen, rndis_pkt->data_len);
517 return NVSP_STAT_FAIL;
518 }
519
520 vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
521 data);
522
523 csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
524 sizeof(*csum_info), data);
525
526 hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
527 sizeof(*hash_info), data);
528
529 pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
530 sizeof(*pktinfo_id), data);
531
532 /* Identify RSC frags, drop erroneous packets */
533 if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
534 if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
535 nvchan->rsc.cnt = 0;
536 else if (nvchan->rsc.cnt == 0)
537 goto drop;
538
539 rsc_more = true;
540
541 if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
542 rsc_more = false;
543
544 if (rsc_more && nvchan->rsc.is_last)
545 goto drop;
546 } else {
547 nvchan->rsc.cnt = 0;
548 }
549
550 if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
551 goto drop;
552
553 /* Put data into per channel structure.
554 * Also, remove the rndis trailer padding from rndis packet message
555 * rndis_pkt->data_len tell us the real data length, we only copy
556 * the data packet to the stack, without the rndis trailer padding
557 */
558 rsc_add_data(nvchan, vlan, csum_info, hash_info,
559 data + data_offset, rndis_pkt->data_len);
560
561 if (rsc_more)
562 return NVSP_STAT_SUCCESS;
563
564 ret = netvsc_recv_callback(ndev, nvdev, nvchan);
565 nvchan->rsc.cnt = 0;
566
567 return ret;
568
569 drop:
570 return NVSP_STAT_FAIL;
571 }
572
rndis_filter_receive(struct net_device * ndev,struct netvsc_device * net_dev,struct netvsc_channel * nvchan,void * data,u32 buflen)573 int rndis_filter_receive(struct net_device *ndev,
574 struct netvsc_device *net_dev,
575 struct netvsc_channel *nvchan,
576 void *data, u32 buflen)
577 {
578 struct net_device_context *net_device_ctx = netdev_priv(ndev);
579 struct rndis_message *rndis_msg = nvchan->recv_buf;
580
581 if (buflen < RNDIS_HEADER_SIZE) {
582 netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
583 return NVSP_STAT_FAIL;
584 }
585
586 /* Copy the RNDIS msg header into nvchan->recv_buf */
587 memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
588
589 /* Validate incoming rndis_message packet */
590 if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
591 buflen < rndis_msg->msg_len) {
592 netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
593 buflen, rndis_msg->msg_len);
594 return NVSP_STAT_FAIL;
595 }
596
597 if (netif_msg_rx_status(net_device_ctx))
598 dump_rndis_message(ndev, rndis_msg, data);
599
600 switch (rndis_msg->ndis_msg_type) {
601 case RNDIS_MSG_PACKET:
602 return rndis_filter_receive_data(ndev, net_dev, nvchan,
603 rndis_msg, data, buflen);
604 case RNDIS_MSG_INIT_C:
605 case RNDIS_MSG_QUERY_C:
606 case RNDIS_MSG_SET_C:
607 /* completion msgs */
608 rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
609 break;
610
611 case RNDIS_MSG_INDICATE:
612 /* notification msgs */
613 netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen);
614 break;
615 default:
616 netdev_err(ndev,
617 "unhandled rndis message (type %u len %u)\n",
618 rndis_msg->ndis_msg_type,
619 rndis_msg->msg_len);
620 return NVSP_STAT_FAIL;
621 }
622
623 return NVSP_STAT_SUCCESS;
624 }
625
rndis_filter_query_device(struct rndis_device * dev,struct netvsc_device * nvdev,u32 oid,void * result,u32 * result_size)626 static int rndis_filter_query_device(struct rndis_device *dev,
627 struct netvsc_device *nvdev,
628 u32 oid, void *result, u32 *result_size)
629 {
630 struct rndis_request *request;
631 u32 inresult_size = *result_size;
632 struct rndis_query_request *query;
633 struct rndis_query_complete *query_complete;
634 u32 msg_len;
635 int ret = 0;
636
637 if (!result)
638 return -EINVAL;
639
640 *result_size = 0;
641 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
642 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
643 if (!request) {
644 ret = -ENOMEM;
645 goto cleanup;
646 }
647
648 /* Setup the rndis query */
649 query = &request->request_msg.msg.query_req;
650 query->oid = oid;
651 query->info_buf_offset = sizeof(struct rndis_query_request);
652 query->info_buflen = 0;
653 query->dev_vc_handle = 0;
654
655 if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
656 struct ndis_offload *hwcaps;
657 u32 nvsp_version = nvdev->nvsp_version;
658 u8 ndis_rev;
659 size_t size;
660
661 if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
662 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
663 size = NDIS_OFFLOAD_SIZE;
664 } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
665 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
666 size = NDIS_OFFLOAD_SIZE_6_1;
667 } else {
668 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
669 size = NDIS_OFFLOAD_SIZE_6_0;
670 }
671
672 request->request_msg.msg_len += size;
673 query->info_buflen = size;
674 hwcaps = (struct ndis_offload *)
675 ((unsigned long)query + query->info_buf_offset);
676
677 hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
678 hwcaps->header.revision = ndis_rev;
679 hwcaps->header.size = size;
680
681 } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
682 struct ndis_recv_scale_cap *cap;
683
684 request->request_msg.msg_len +=
685 sizeof(struct ndis_recv_scale_cap);
686 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
687 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
688 query->info_buf_offset);
689 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
690 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
691 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
692 }
693
694 ret = rndis_filter_send_request(dev, request);
695 if (ret != 0)
696 goto cleanup;
697
698 wait_for_completion(&request->wait_event);
699
700 /* Copy the response back */
701 query_complete = &request->response_msg.msg.query_complete;
702 msg_len = request->response_msg.msg_len;
703
704 /* Ensure the packet is big enough to access its fields */
705 if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
706 ret = -1;
707 goto cleanup;
708 }
709
710 if (query_complete->info_buflen > inresult_size ||
711 query_complete->info_buf_offset < sizeof(*query_complete) ||
712 msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
713 msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
714 < query_complete->info_buflen) {
715 ret = -1;
716 goto cleanup;
717 }
718
719 memcpy(result,
720 (void *)((unsigned long)query_complete +
721 query_complete->info_buf_offset),
722 query_complete->info_buflen);
723
724 *result_size = query_complete->info_buflen;
725
726 cleanup:
727 if (request)
728 put_rndis_request(dev, request);
729
730 return ret;
731 }
732
733 /* Get the hardware offload capabilities */
734 static int
rndis_query_hwcaps(struct rndis_device * dev,struct netvsc_device * net_device,struct ndis_offload * caps)735 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
736 struct ndis_offload *caps)
737 {
738 u32 caps_len = sizeof(*caps);
739 int ret;
740
741 memset(caps, 0, sizeof(*caps));
742
743 ret = rndis_filter_query_device(dev, net_device,
744 OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
745 caps, &caps_len);
746 if (ret)
747 return ret;
748
749 if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
750 netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
751 caps->header.type);
752 return -EINVAL;
753 }
754
755 if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
756 netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
757 caps->header.revision);
758 return -EINVAL;
759 }
760
761 if (caps->header.size > caps_len ||
762 caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
763 netdev_warn(dev->ndev,
764 "invalid NDIS objsize %u, data size %u\n",
765 caps->header.size, caps_len);
766 return -EINVAL;
767 }
768
769 return 0;
770 }
771
rndis_filter_query_device_mac(struct rndis_device * dev,struct netvsc_device * net_device)772 static int rndis_filter_query_device_mac(struct rndis_device *dev,
773 struct netvsc_device *net_device)
774 {
775 u32 size = ETH_ALEN;
776
777 return rndis_filter_query_device(dev, net_device,
778 RNDIS_OID_802_3_PERMANENT_ADDRESS,
779 dev->hw_mac_adr, &size);
780 }
781
782 #define NWADR_STR "NetworkAddress"
783 #define NWADR_STRLEN 14
784
rndis_filter_set_device_mac(struct netvsc_device * nvdev,const char * mac)785 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
786 const char *mac)
787 {
788 struct rndis_device *rdev = nvdev->extension;
789 struct rndis_request *request;
790 struct rndis_set_request *set;
791 struct rndis_config_parameter_info *cpi;
792 wchar_t *cfg_nwadr, *cfg_mac;
793 struct rndis_set_complete *set_complete;
794 char macstr[2*ETH_ALEN+1];
795 u32 extlen = sizeof(struct rndis_config_parameter_info) +
796 2*NWADR_STRLEN + 4*ETH_ALEN;
797 int ret;
798
799 request = get_rndis_request(rdev, RNDIS_MSG_SET,
800 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
801 if (!request)
802 return -ENOMEM;
803
804 set = &request->request_msg.msg.set_req;
805 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
806 set->info_buflen = extlen;
807 set->info_buf_offset = sizeof(struct rndis_set_request);
808 set->dev_vc_handle = 0;
809
810 cpi = (struct rndis_config_parameter_info *)((ulong)set +
811 set->info_buf_offset);
812 cpi->parameter_name_offset =
813 sizeof(struct rndis_config_parameter_info);
814 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
815 cpi->parameter_name_length = 2*NWADR_STRLEN;
816 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
817 cpi->parameter_value_offset =
818 cpi->parameter_name_offset + cpi->parameter_name_length;
819 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
820 cpi->parameter_value_length = 4*ETH_ALEN;
821
822 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
823 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
824 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
825 cfg_nwadr, NWADR_STRLEN);
826 if (ret < 0)
827 goto cleanup;
828 snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
829 ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
830 cfg_mac, 2*ETH_ALEN);
831 if (ret < 0)
832 goto cleanup;
833
834 ret = rndis_filter_send_request(rdev, request);
835 if (ret != 0)
836 goto cleanup;
837
838 wait_for_completion(&request->wait_event);
839
840 set_complete = &request->response_msg.msg.set_complete;
841 if (set_complete->status != RNDIS_STATUS_SUCCESS)
842 ret = -EIO;
843
844 cleanup:
845 put_rndis_request(rdev, request);
846 return ret;
847 }
848
849 int
rndis_filter_set_offload_params(struct net_device * ndev,struct netvsc_device * nvdev,struct ndis_offload_params * req_offloads)850 rndis_filter_set_offload_params(struct net_device *ndev,
851 struct netvsc_device *nvdev,
852 struct ndis_offload_params *req_offloads)
853 {
854 struct rndis_device *rdev = nvdev->extension;
855 struct rndis_request *request;
856 struct rndis_set_request *set;
857 struct ndis_offload_params *offload_params;
858 struct rndis_set_complete *set_complete;
859 u32 extlen = sizeof(struct ndis_offload_params);
860 int ret;
861 u32 vsp_version = nvdev->nvsp_version;
862
863 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
864 extlen = VERSION_4_OFFLOAD_SIZE;
865 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
866 * UDP checksum offload.
867 */
868 req_offloads->udp_ip_v4_csum = 0;
869 req_offloads->udp_ip_v6_csum = 0;
870 }
871
872 request = get_rndis_request(rdev, RNDIS_MSG_SET,
873 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
874 if (!request)
875 return -ENOMEM;
876
877 set = &request->request_msg.msg.set_req;
878 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
879 set->info_buflen = extlen;
880 set->info_buf_offset = sizeof(struct rndis_set_request);
881 set->dev_vc_handle = 0;
882
883 offload_params = (struct ndis_offload_params *)((ulong)set +
884 set->info_buf_offset);
885 *offload_params = *req_offloads;
886 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
887 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
888 offload_params->header.size = extlen;
889
890 ret = rndis_filter_send_request(rdev, request);
891 if (ret != 0)
892 goto cleanup;
893
894 wait_for_completion(&request->wait_event);
895 set_complete = &request->response_msg.msg.set_complete;
896 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
897 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
898 set_complete->status);
899 ret = -EINVAL;
900 }
901
902 cleanup:
903 put_rndis_request(rdev, request);
904 return ret;
905 }
906
rndis_set_rss_param_msg(struct rndis_device * rdev,const u8 * rss_key,u16 flag)907 static int rndis_set_rss_param_msg(struct rndis_device *rdev,
908 const u8 *rss_key, u16 flag)
909 {
910 struct net_device *ndev = rdev->ndev;
911 struct net_device_context *ndc = netdev_priv(ndev);
912 struct rndis_request *request;
913 struct rndis_set_request *set;
914 struct rndis_set_complete *set_complete;
915 u32 extlen = sizeof(struct ndis_recv_scale_param) +
916 4 * ndc->rx_table_sz + NETVSC_HASH_KEYLEN;
917 struct ndis_recv_scale_param *rssp;
918 u32 *itab;
919 u8 *keyp;
920 int i, ret;
921
922 request = get_rndis_request(
923 rdev, RNDIS_MSG_SET,
924 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
925 if (!request)
926 return -ENOMEM;
927
928 set = &request->request_msg.msg.set_req;
929 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
930 set->info_buflen = extlen;
931 set->info_buf_offset = sizeof(struct rndis_set_request);
932 set->dev_vc_handle = 0;
933
934 rssp = (struct ndis_recv_scale_param *)(set + 1);
935 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
936 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
937 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
938 rssp->flag = flag;
939 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
940 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
941 NDIS_HASH_TCP_IPV6;
942 rssp->indirect_tabsize = 4 * ndc->rx_table_sz;
943 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
944 rssp->hashkey_size = NETVSC_HASH_KEYLEN;
945 rssp->hashkey_offset = rssp->indirect_taboffset +
946 rssp->indirect_tabsize;
947
948 /* Set indirection table entries */
949 itab = (u32 *)(rssp + 1);
950 for (i = 0; i < ndc->rx_table_sz; i++)
951 itab[i] = ndc->rx_table[i];
952
953 /* Set hask key values */
954 keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
955 memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
956
957 ret = rndis_filter_send_request(rdev, request);
958 if (ret != 0)
959 goto cleanup;
960
961 wait_for_completion(&request->wait_event);
962 set_complete = &request->response_msg.msg.set_complete;
963 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
964 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
965 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
966 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
967
968 } else {
969 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
970 set_complete->status);
971 ret = -EINVAL;
972 }
973
974 cleanup:
975 put_rndis_request(rdev, request);
976 return ret;
977 }
978
rndis_filter_set_rss_param(struct rndis_device * rdev,const u8 * rss_key)979 int rndis_filter_set_rss_param(struct rndis_device *rdev,
980 const u8 *rss_key)
981 {
982 /* Disable RSS before change */
983 rndis_set_rss_param_msg(rdev, rss_key,
984 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
985
986 return rndis_set_rss_param_msg(rdev, rss_key, 0);
987 }
988
rndis_filter_query_device_link_status(struct rndis_device * dev,struct netvsc_device * net_device)989 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
990 struct netvsc_device *net_device)
991 {
992 u32 size = sizeof(u32);
993 u32 link_status;
994
995 return rndis_filter_query_device(dev, net_device,
996 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
997 &link_status, &size);
998 }
999
rndis_filter_query_link_speed(struct rndis_device * dev,struct netvsc_device * net_device)1000 static int rndis_filter_query_link_speed(struct rndis_device *dev,
1001 struct netvsc_device *net_device)
1002 {
1003 u32 size = sizeof(u32);
1004 u32 link_speed;
1005 struct net_device_context *ndc;
1006 int ret;
1007
1008 ret = rndis_filter_query_device(dev, net_device,
1009 RNDIS_OID_GEN_LINK_SPEED,
1010 &link_speed, &size);
1011
1012 if (!ret) {
1013 ndc = netdev_priv(dev->ndev);
1014
1015 /* The link speed reported from host is in 100bps unit, so
1016 * we convert it to Mbps here.
1017 */
1018 ndc->speed = link_speed / 10000;
1019 }
1020
1021 return ret;
1022 }
1023
rndis_filter_set_packet_filter(struct rndis_device * dev,u32 new_filter)1024 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
1025 u32 new_filter)
1026 {
1027 struct rndis_request *request;
1028 struct rndis_set_request *set;
1029 int ret;
1030
1031 if (dev->filter == new_filter)
1032 return 0;
1033
1034 request = get_rndis_request(dev, RNDIS_MSG_SET,
1035 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
1036 sizeof(u32));
1037 if (!request)
1038 return -ENOMEM;
1039
1040 /* Setup the rndis set */
1041 set = &request->request_msg.msg.set_req;
1042 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
1043 set->info_buflen = sizeof(u32);
1044 set->info_buf_offset = offsetof(typeof(*set), info_buf);
1045 memcpy(set->info_buf, &new_filter, sizeof(u32));
1046
1047 ret = rndis_filter_send_request(dev, request);
1048 if (ret == 0) {
1049 wait_for_completion(&request->wait_event);
1050 dev->filter = new_filter;
1051 }
1052
1053 put_rndis_request(dev, request);
1054
1055 return ret;
1056 }
1057
rndis_set_multicast(struct work_struct * w)1058 static void rndis_set_multicast(struct work_struct *w)
1059 {
1060 struct rndis_device *rdev
1061 = container_of(w, struct rndis_device, mcast_work);
1062 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
1063 unsigned int flags = rdev->ndev->flags;
1064
1065 if (flags & IFF_PROMISC) {
1066 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
1067 } else {
1068 if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
1069 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
1070 if (flags & IFF_BROADCAST)
1071 filter |= NDIS_PACKET_TYPE_BROADCAST;
1072 }
1073
1074 rndis_filter_set_packet_filter(rdev, filter);
1075 }
1076
rndis_filter_update(struct netvsc_device * nvdev)1077 void rndis_filter_update(struct netvsc_device *nvdev)
1078 {
1079 struct rndis_device *rdev = nvdev->extension;
1080
1081 schedule_work(&rdev->mcast_work);
1082 }
1083
rndis_filter_init_device(struct rndis_device * dev,struct netvsc_device * nvdev)1084 static int rndis_filter_init_device(struct rndis_device *dev,
1085 struct netvsc_device *nvdev)
1086 {
1087 struct rndis_request *request;
1088 struct rndis_initialize_request *init;
1089 struct rndis_initialize_complete *init_complete;
1090 u32 status;
1091 int ret;
1092
1093 request = get_rndis_request(dev, RNDIS_MSG_INIT,
1094 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1095 if (!request) {
1096 ret = -ENOMEM;
1097 goto cleanup;
1098 }
1099
1100 /* Setup the rndis set */
1101 init = &request->request_msg.msg.init_req;
1102 init->major_ver = RNDIS_MAJOR_VERSION;
1103 init->minor_ver = RNDIS_MINOR_VERSION;
1104 init->max_xfer_size = 0x4000;
1105
1106 dev->state = RNDIS_DEV_INITIALIZING;
1107
1108 ret = rndis_filter_send_request(dev, request);
1109 if (ret != 0) {
1110 dev->state = RNDIS_DEV_UNINITIALIZED;
1111 goto cleanup;
1112 }
1113
1114 wait_for_completion(&request->wait_event);
1115
1116 init_complete = &request->response_msg.msg.init_complete;
1117 status = init_complete->status;
1118 if (status == RNDIS_STATUS_SUCCESS) {
1119 dev->state = RNDIS_DEV_INITIALIZED;
1120 nvdev->max_pkt = init_complete->max_pkt_per_msg;
1121 nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1122 ret = 0;
1123 } else {
1124 dev->state = RNDIS_DEV_UNINITIALIZED;
1125 ret = -EINVAL;
1126 }
1127
1128 cleanup:
1129 if (request)
1130 put_rndis_request(dev, request);
1131
1132 return ret;
1133 }
1134
netvsc_device_idle(const struct netvsc_device * nvdev)1135 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1136 {
1137 int i;
1138
1139 for (i = 0; i < nvdev->num_chn; i++) {
1140 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1141
1142 if (nvchan->mrc.first != nvchan->mrc.next)
1143 return false;
1144
1145 if (atomic_read(&nvchan->queue_sends) > 0)
1146 return false;
1147 }
1148
1149 return true;
1150 }
1151
rndis_filter_halt_device(struct netvsc_device * nvdev,struct rndis_device * dev)1152 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1153 struct rndis_device *dev)
1154 {
1155 struct rndis_request *request;
1156 struct rndis_halt_request *halt;
1157
1158 /* Attempt to do a rndis device halt */
1159 request = get_rndis_request(dev, RNDIS_MSG_HALT,
1160 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1161 if (!request)
1162 goto cleanup;
1163
1164 /* Setup the rndis set */
1165 halt = &request->request_msg.msg.halt_req;
1166 halt->req_id = atomic_inc_return(&dev->new_req_id);
1167
1168 /* Ignore return since this msg is optional. */
1169 rndis_filter_send_request(dev, request);
1170
1171 dev->state = RNDIS_DEV_UNINITIALIZED;
1172
1173 cleanup:
1174 nvdev->destroy = true;
1175
1176 /* Force flag to be ordered before waiting */
1177 wmb();
1178
1179 /* Wait for all send completions */
1180 wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1181
1182 if (request)
1183 put_rndis_request(dev, request);
1184 }
1185
rndis_filter_open_device(struct rndis_device * dev)1186 static int rndis_filter_open_device(struct rndis_device *dev)
1187 {
1188 int ret;
1189
1190 if (dev->state != RNDIS_DEV_INITIALIZED)
1191 return 0;
1192
1193 ret = rndis_filter_set_packet_filter(dev,
1194 NDIS_PACKET_TYPE_BROADCAST |
1195 NDIS_PACKET_TYPE_ALL_MULTICAST |
1196 NDIS_PACKET_TYPE_DIRECTED);
1197 if (ret == 0)
1198 dev->state = RNDIS_DEV_DATAINITIALIZED;
1199
1200 return ret;
1201 }
1202
rndis_filter_close_device(struct rndis_device * dev)1203 static int rndis_filter_close_device(struct rndis_device *dev)
1204 {
1205 int ret;
1206
1207 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1208 return 0;
1209
1210 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1211 cancel_work_sync(&dev->mcast_work);
1212
1213 ret = rndis_filter_set_packet_filter(dev, 0);
1214 if (ret == -ENODEV)
1215 ret = 0;
1216
1217 if (ret == 0)
1218 dev->state = RNDIS_DEV_INITIALIZED;
1219
1220 return ret;
1221 }
1222
netvsc_sc_open(struct vmbus_channel * new_sc)1223 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1224 {
1225 struct net_device *ndev =
1226 hv_get_drvdata(new_sc->primary_channel->device_obj);
1227 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1228 struct netvsc_device *nvscdev;
1229 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1230 struct netvsc_channel *nvchan;
1231 int ret;
1232
1233 /* This is safe because this callback only happens when
1234 * new device is being setup and waiting on the channel_init_wait.
1235 */
1236 nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1237 if (!nvscdev || chn_index >= nvscdev->num_chn)
1238 return;
1239
1240 nvchan = nvscdev->chan_table + chn_index;
1241
1242 /* Because the device uses NAPI, all the interrupt batching and
1243 * control is done via Net softirq, not the channel handling
1244 */
1245 set_channel_read_mode(new_sc, HV_CALL_ISR);
1246
1247 /* Set the channel before opening.*/
1248 nvchan->channel = new_sc;
1249
1250 new_sc->next_request_id_callback = vmbus_next_request_id;
1251 new_sc->request_addr_callback = vmbus_request_addr;
1252 new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1253 new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1254
1255 ret = vmbus_open(new_sc, netvsc_ring_bytes,
1256 netvsc_ring_bytes, NULL, 0,
1257 netvsc_channel_cb, nvchan);
1258 if (ret == 0) {
1259 napi_enable(&nvchan->napi);
1260 netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
1261 &nvchan->napi);
1262 netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
1263 &nvchan->napi);
1264 } else {
1265 netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1266 }
1267
1268 if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1269 wake_up(&nvscdev->subchan_open);
1270 }
1271
1272 /* Open sub-channels after completing the handling of the device probe.
1273 * This breaks overlap of processing the host message for the
1274 * new primary channel with the initialization of sub-channels.
1275 */
rndis_set_subchannel(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_device_info * dev_info)1276 int rndis_set_subchannel(struct net_device *ndev,
1277 struct netvsc_device *nvdev,
1278 struct netvsc_device_info *dev_info)
1279 {
1280 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1281 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1282 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1283 struct rndis_device *rdev = nvdev->extension;
1284 int i, ret;
1285
1286 ASSERT_RTNL();
1287
1288 memset(init_packet, 0, sizeof(struct nvsp_message));
1289 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1290 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1291 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1292 nvdev->num_chn - 1;
1293 trace_nvsp_send(ndev, init_packet);
1294
1295 ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1296 sizeof(struct nvsp_message),
1297 (unsigned long)init_packet,
1298 VM_PKT_DATA_INBAND,
1299 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1300 if (ret) {
1301 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1302 return ret;
1303 }
1304
1305 wait_for_completion(&nvdev->channel_init_wait);
1306 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1307 netdev_err(ndev, "sub channel request failed\n");
1308 return -EIO;
1309 }
1310
1311 /* Check that number of allocated sub channel is within the expected range */
1312 if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
1313 netdev_err(ndev, "invalid number of allocated sub channel\n");
1314 return -EINVAL;
1315 }
1316 nvdev->num_chn = 1 +
1317 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1318
1319 /* wait for all sub channels to open */
1320 wait_event(nvdev->subchan_open,
1321 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1322
1323 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1324 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1325
1326 /* ignore failures from setting rss parameters, still have channels */
1327 if (dev_info)
1328 rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1329 else
1330 rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1331
1332 netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1333 netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1334
1335 return 0;
1336 }
1337
rndis_netdev_set_hwcaps(struct rndis_device * rndis_device,struct netvsc_device * nvdev)1338 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1339 struct netvsc_device *nvdev)
1340 {
1341 struct net_device *net = rndis_device->ndev;
1342 struct net_device_context *net_device_ctx = netdev_priv(net);
1343 struct ndis_offload hwcaps;
1344 struct ndis_offload_params offloads;
1345 int ret;
1346
1347 nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;
1348
1349 /* Find HW offload capabilities */
1350 ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1351 if (ret != 0)
1352 return ret;
1353
1354 /* A value of zero means "no change"; now turn on what we want. */
1355 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1356
1357 /* Linux does not care about IP checksum, always does in kernel */
1358 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1359
1360 /* Reset previously set hw_features flags */
1361 net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1362 net_device_ctx->tx_checksum_mask = 0;
1363
1364 /* Compute tx offload settings based on hw capabilities */
1365 net->hw_features |= NETIF_F_RXCSUM;
1366 net->hw_features |= NETIF_F_SG;
1367 net->hw_features |= NETIF_F_RXHASH;
1368
1369 if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1370 /* Can checksum TCP */
1371 net->hw_features |= NETIF_F_IP_CSUM;
1372 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1373
1374 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1375
1376 if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1377 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1378 net->hw_features |= NETIF_F_TSO;
1379
1380 if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
1381 nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
1382 }
1383
1384 if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1385 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1386 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1387 }
1388 }
1389
1390 if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1391 net->hw_features |= NETIF_F_IPV6_CSUM;
1392
1393 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1394 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1395
1396 if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1397 (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1398 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1399 net->hw_features |= NETIF_F_TSO6;
1400
1401 if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
1402 nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
1403 }
1404
1405 if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1406 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1407 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1408 }
1409 }
1410
1411 if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1412 net->hw_features |= NETIF_F_LRO;
1413
1414 if (net->features & NETIF_F_LRO) {
1415 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1416 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1417 } else {
1418 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1419 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1420 }
1421 }
1422
1423 /* In case some hw_features disappeared we need to remove them from
1424 * net->features list as they're no longer supported.
1425 */
1426 net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1427
1428 netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);
1429
1430 ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1431
1432 return ret;
1433 }
1434
rndis_get_friendly_name(struct net_device * net,struct rndis_device * rndis_device,struct netvsc_device * net_device)1435 static void rndis_get_friendly_name(struct net_device *net,
1436 struct rndis_device *rndis_device,
1437 struct netvsc_device *net_device)
1438 {
1439 ucs2_char_t wname[256];
1440 unsigned long len;
1441 u8 ifalias[256];
1442 u32 size;
1443
1444 size = sizeof(wname);
1445 if (rndis_filter_query_device(rndis_device, net_device,
1446 RNDIS_OID_GEN_FRIENDLY_NAME,
1447 wname, &size) != 0)
1448 return; /* ignore if host does not support */
1449
1450 if (size == 0)
1451 return; /* name not set */
1452
1453 /* Convert Windows Unicode string to UTF-8 */
1454 len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1455
1456 /* ignore the default value from host */
1457 if (strcmp(ifalias, "Network Adapter") != 0)
1458 dev_set_alias(net, ifalias, len);
1459 }
1460
rndis_filter_device_add(struct hv_device * dev,struct netvsc_device_info * device_info)1461 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1462 struct netvsc_device_info *device_info)
1463 {
1464 struct net_device *net = hv_get_drvdata(dev);
1465 struct net_device_context *ndc = netdev_priv(net);
1466 struct netvsc_device *net_device;
1467 struct rndis_device *rndis_device;
1468 struct ndis_recv_scale_cap rsscap;
1469 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1470 u32 mtu, size;
1471 u32 num_possible_rss_qs;
1472 int i, ret;
1473
1474 rndis_device = get_rndis_device();
1475 if (!rndis_device)
1476 return ERR_PTR(-ENODEV);
1477
1478 /* Let the inner driver handle this first to create the netvsc channel
1479 * NOTE! Once the channel is created, we may get a receive callback
1480 * (RndisFilterOnReceive()) before this call is completed
1481 */
1482 net_device = netvsc_device_add(dev, device_info);
1483 if (IS_ERR(net_device)) {
1484 kfree(rndis_device);
1485 return net_device;
1486 }
1487
1488 /* Initialize the rndis device */
1489 net_device->max_chn = 1;
1490 net_device->num_chn = 1;
1491
1492 net_device->extension = rndis_device;
1493 rndis_device->ndev = net;
1494
1495 /* Send the rndis initialization message */
1496 ret = rndis_filter_init_device(rndis_device, net_device);
1497 if (ret != 0)
1498 goto err_dev_remv;
1499
1500 /* Get the MTU from the host */
1501 size = sizeof(u32);
1502 ret = rndis_filter_query_device(rndis_device, net_device,
1503 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1504 &mtu, &size);
1505 if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1506 net->mtu = mtu;
1507
1508 /* Get the mac address */
1509 ret = rndis_filter_query_device_mac(rndis_device, net_device);
1510 if (ret != 0)
1511 goto err_dev_remv;
1512
1513 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1514
1515 /* Get friendly name as ifalias*/
1516 if (!net->ifalias)
1517 rndis_get_friendly_name(net, rndis_device, net_device);
1518
1519 /* Query and set hardware capabilities */
1520 ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1521 if (ret != 0)
1522 goto err_dev_remv;
1523
1524 rndis_filter_query_device_link_status(rndis_device, net_device);
1525
1526 netdev_dbg(net, "Device MAC %pM link state %s\n",
1527 rndis_device->hw_mac_adr,
1528 rndis_device->link_state ? "down" : "up");
1529
1530 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1531 goto out;
1532
1533 rndis_filter_query_link_speed(rndis_device, net_device);
1534
1535 /* vRSS setup */
1536 memset(&rsscap, 0, rsscap_size);
1537 ret = rndis_filter_query_device(rndis_device, net_device,
1538 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1539 &rsscap, &rsscap_size);
1540 if (ret || rsscap.num_recv_que < 2)
1541 goto out;
1542
1543 if (rsscap.num_indirect_tabent &&
1544 rsscap.num_indirect_tabent <= ITAB_NUM_MAX)
1545 ndc->rx_table_sz = rsscap.num_indirect_tabent;
1546 else
1547 ndc->rx_table_sz = ITAB_NUM;
1548
1549 ndc->rx_table = kcalloc(ndc->rx_table_sz, sizeof(u16), GFP_KERNEL);
1550 if (!ndc->rx_table) {
1551 ret = -ENOMEM;
1552 goto err_dev_remv;
1553 }
1554
1555 /* This guarantees that num_possible_rss_qs <= num_online_cpus */
1556 num_possible_rss_qs = min_t(u32, num_online_cpus(),
1557 rsscap.num_recv_que);
1558
1559 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1560
1561 /* We will use the given number of channels if available. */
1562 net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1563
1564 if (!netif_is_rxfh_configured(net)) {
1565 for (i = 0; i < ndc->rx_table_sz; i++)
1566 ndc->rx_table[i] = ethtool_rxfh_indir_default(
1567 i, net_device->num_chn);
1568 }
1569
1570 atomic_set(&net_device->open_chn, 1);
1571 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1572
1573 for (i = 1; i < net_device->num_chn; i++) {
1574 ret = netvsc_alloc_recv_comp_ring(net_device, i);
1575 if (ret) {
1576 while (--i != 0)
1577 vfree(net_device->chan_table[i].mrc.slots);
1578 goto out;
1579 }
1580 }
1581
1582 for (i = 1; i < net_device->num_chn; i++)
1583 netif_napi_add(net, &net_device->chan_table[i].napi,
1584 netvsc_poll);
1585
1586 return net_device;
1587
1588 out:
1589 /* setting up multiple channels failed */
1590 net_device->max_chn = 1;
1591 net_device->num_chn = 1;
1592 return net_device;
1593
1594 err_dev_remv:
1595 rndis_filter_device_remove(dev, net_device);
1596 return ERR_PTR(ret);
1597 }
1598
rndis_filter_device_remove(struct hv_device * dev,struct netvsc_device * net_dev)1599 void rndis_filter_device_remove(struct hv_device *dev,
1600 struct netvsc_device *net_dev)
1601 {
1602 struct rndis_device *rndis_dev = net_dev->extension;
1603 struct net_device *net = hv_get_drvdata(dev);
1604 struct net_device_context *ndc;
1605
1606 ndc = netdev_priv(net);
1607
1608 /* Halt and release the rndis device */
1609 rndis_filter_halt_device(net_dev, rndis_dev);
1610
1611 netvsc_device_remove(dev);
1612
1613 ndc->rx_table_sz = 0;
1614 kfree(ndc->rx_table);
1615 ndc->rx_table = NULL;
1616 }
1617
rndis_filter_open(struct netvsc_device * nvdev)1618 int rndis_filter_open(struct netvsc_device *nvdev)
1619 {
1620 if (!nvdev)
1621 return -EINVAL;
1622
1623 return rndis_filter_open_device(nvdev->extension);
1624 }
1625
rndis_filter_close(struct netvsc_device * nvdev)1626 int rndis_filter_close(struct netvsc_device *nvdev)
1627 {
1628 if (!nvdev)
1629 return -EINVAL;
1630
1631 return rndis_filter_close_device(nvdev->extension);
1632 }
1633