Lines Matching refs:skb

26 #define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)  argument
28 static size_t validate_header_len(struct sk_buff *skb) in validate_header_len() argument
30 if (unlikely(skb->len < sizeof(struct message_header))) in validate_header_len()
32 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && in validate_header_len()
33 skb->len >= MESSAGE_MINIMUM_LENGTH) in validate_header_len()
35 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && in validate_header_len()
36 skb->len == sizeof(struct message_handshake_initiation)) in validate_header_len()
38 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && in validate_header_len()
39 skb->len == sizeof(struct message_handshake_response)) in validate_header_len()
41 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && in validate_header_len()
42 skb->len == sizeof(struct message_handshake_cookie)) in validate_header_len()
47 static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) in prepare_skb_header() argument
52 if (unlikely(!wg_check_packet_protocol(skb) || in prepare_skb_header()
53 skb_transport_header(skb) < skb->head || in prepare_skb_header()
54 (skb_transport_header(skb) + sizeof(struct udphdr)) > in prepare_skb_header()
55 skb_tail_pointer(skb))) in prepare_skb_header()
57 udp = udp_hdr(skb); in prepare_skb_header()
58 data_offset = (u8 *)udp - skb->data; in prepare_skb_header()
60 data_offset + sizeof(struct udphdr) > skb->len)) in prepare_skb_header()
67 data_len > skb->len - data_offset)) in prepare_skb_header()
73 data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data; in prepare_skb_header()
74 if (unlikely(!pskb_may_pull(skb, in prepare_skb_header()
76 pskb_trim(skb, data_len + data_offset) < 0)) in prepare_skb_header()
78 skb_pull(skb, data_offset); in prepare_skb_header()
79 if (unlikely(skb->len != data_len)) in prepare_skb_header()
82 header_len = validate_header_len(skb); in prepare_skb_header()
85 __skb_push(skb, data_offset); in prepare_skb_header()
86 if (unlikely(!pskb_may_pull(skb, data_offset + header_len))) in prepare_skb_header()
88 __skb_pull(skb, data_offset); in prepare_skb_header()
93 struct sk_buff *skb) in wg_receive_handshake_packet() argument
104 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) { in wg_receive_handshake_packet()
106 wg->dev->name, skb); in wg_receive_handshake_packet()
108 (struct message_handshake_cookie *)skb->data, wg); in wg_receive_handshake_packet()
121 mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, in wg_receive_handshake_packet()
130 wg->dev->name, skb); in wg_receive_handshake_packet()
134 switch (SKB_TYPE_LE32(skb)) { in wg_receive_handshake_packet()
137 (struct message_handshake_initiation *)skb->data; in wg_receive_handshake_packet()
140 wg_packet_send_handshake_cookie(wg, skb, in wg_receive_handshake_packet()
147 wg->dev->name, skb); in wg_receive_handshake_packet()
150 wg_socket_set_peer_endpoint_from_skb(peer, skb); in wg_receive_handshake_packet()
159 (struct message_handshake_response *)skb->data; in wg_receive_handshake_packet()
162 wg_packet_send_handshake_cookie(wg, skb, in wg_receive_handshake_packet()
169 wg->dev->name, skb); in wg_receive_handshake_packet()
172 wg_socket_set_peer_endpoint_from_skb(peer, skb); in wg_receive_handshake_packet()
198 update_rx_stats(peer, skb->len); in wg_receive_handshake_packet()
210 struct sk_buff *skb; in wg_packet_handshake_receive_worker() local
212 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { in wg_packet_handshake_receive_worker()
213 wg_receive_handshake_packet(wg, skb); in wg_packet_handshake_receive_worker()
214 dev_kfree_skb(skb); in wg_packet_handshake_receive_worker()
242 static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) in decrypt_packet() argument
259 PACKET_CB(skb)->nonce = in decrypt_packet()
260 le64_to_cpu(((struct message_data *)skb->data)->counter); in decrypt_packet()
266 offset = -skb_network_offset(skb); in decrypt_packet()
267 skb_push(skb, offset); in decrypt_packet()
268 num_frags = skb_cow_data(skb, 0, &trailer); in decrypt_packet()
270 skb_pull(skb, offset); in decrypt_packet()
275 if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0) in decrypt_packet()
278 if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, in decrypt_packet()
279 PACKET_CB(skb)->nonce, in decrypt_packet()
286 skb_push(skb, offset); in decrypt_packet()
287 if (pskb_trim(skb, skb->len - noise_encrypted_len(0))) in decrypt_packet()
289 skb_pull(skb, offset); in decrypt_packet()
336 struct sk_buff *skb, in wg_packet_consume_data_done() argument
346 PACKET_CB(skb)->keypair))) { in wg_packet_consume_data_done()
357 if (unlikely(!skb->len)) { in wg_packet_consume_data_done()
367 if (unlikely(skb_network_header(skb) < skb->head)) in wg_packet_consume_data_done()
369 if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) && in wg_packet_consume_data_done()
370 (ip_hdr(skb)->version == 4 || in wg_packet_consume_data_done()
371 (ip_hdr(skb)->version == 6 && in wg_packet_consume_data_done()
372 pskb_network_may_pull(skb, sizeof(struct ipv6hdr))))))) in wg_packet_consume_data_done()
375 skb->dev = dev; in wg_packet_consume_data_done()
382 skb->ip_summed = CHECKSUM_UNNECESSARY; in wg_packet_consume_data_done()
383 skb->csum_level = ~0; /* All levels */ in wg_packet_consume_data_done()
384 skb->protocol = ip_tunnel_parse_protocol(skb); in wg_packet_consume_data_done()
385 if (skb->protocol == htons(ETH_P_IP)) { in wg_packet_consume_data_done()
386 len = ntohs(ip_hdr(skb)->tot_len); in wg_packet_consume_data_done()
389 INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); in wg_packet_consume_data_done()
390 } else if (skb->protocol == htons(ETH_P_IPV6)) { in wg_packet_consume_data_done()
391 len = ntohs(ipv6_hdr(skb)->payload_len) + in wg_packet_consume_data_done()
393 INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); in wg_packet_consume_data_done()
398 if (unlikely(len > skb->len)) in wg_packet_consume_data_done()
400 len_before_trim = skb->len; in wg_packet_consume_data_done()
401 if (unlikely(pskb_trim(skb, len))) in wg_packet_consume_data_done()
405 skb); in wg_packet_consume_data_done()
411 napi_gro_receive(&peer->napi, skb); in wg_packet_consume_data_done()
417 dev->name, skb, peer->internal_id, in wg_packet_consume_data_done()
435 dev_kfree_skb(skb); in wg_packet_consume_data_done()
444 struct sk_buff *skb; in wg_packet_rx_poll() local
451 while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && in wg_packet_rx_poll()
452 (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != in wg_packet_rx_poll()
455 keypair = PACKET_CB(skb)->keypair; in wg_packet_rx_poll()
462 PACKET_CB(skb)->nonce))) { in wg_packet_rx_poll()
465 PACKET_CB(skb)->nonce, in wg_packet_rx_poll()
470 if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) in wg_packet_rx_poll()
473 wg_reset_packet(skb, false); in wg_packet_rx_poll()
474 wg_packet_consume_data_done(peer, skb, &endpoint); in wg_packet_rx_poll()
481 dev_kfree_skb(skb); in wg_packet_rx_poll()
497 struct sk_buff *skb; in wg_packet_decrypt_worker() local
499 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { in wg_packet_decrypt_worker()
501 likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? in wg_packet_decrypt_worker()
503 wg_queue_enqueue_per_peer_rx(skb, state); in wg_packet_decrypt_worker()
509 static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) in wg_packet_consume_data() argument
511 __le32 idx = ((struct message_data *)skb->data)->key_idx; in wg_packet_consume_data()
516 PACKET_CB(skb)->keypair = in wg_packet_consume_data()
520 if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair))) in wg_packet_consume_data()
526 ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, in wg_packet_consume_data()
529 wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); in wg_packet_consume_data()
535 wg_noise_keypair_put(PACKET_CB(skb)->keypair, false); in wg_packet_consume_data()
539 dev_kfree_skb(skb); in wg_packet_consume_data()
542 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) in wg_packet_receive() argument
544 if (unlikely(prepare_skb_header(skb, wg) < 0)) in wg_packet_receive()
546 switch (SKB_TYPE_LE32(skb)) { in wg_packet_receive()
556 ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); in wg_packet_receive()
560 ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); in wg_packet_receive()
564 wg->dev->name, skb); in wg_packet_receive()
575 PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb); in wg_packet_receive()
576 wg_packet_consume_data(wg, skb); in wg_packet_receive()
585 dev_kfree_skb(skb); in wg_packet_receive()