1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2011 Instituto Nokia de Tecnologia
4 *
5 * Authors:
6 * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
7 * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
11
12 #include <net/tcp_states.h>
13 #include <linux/nfc.h>
14 #include <linux/export.h>
15 #include <linux/kcov.h>
16
17 #include "nfc.h"
18
19 static struct nfc_sock_list raw_sk_list = {
20 .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock)
21 };
22
nfc_sock_link(struct nfc_sock_list * l,struct sock * sk)23 static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk)
24 {
25 write_lock(&l->lock);
26 sk_add_node(sk, &l->head);
27 write_unlock(&l->lock);
28 }
29
nfc_sock_unlink(struct nfc_sock_list * l,struct sock * sk)30 static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk)
31 {
32 write_lock(&l->lock);
33 sk_del_node_init(sk);
34 write_unlock(&l->lock);
35 }
36
rawsock_write_queue_purge(struct sock * sk)37 static void rawsock_write_queue_purge(struct sock *sk)
38 {
39 pr_debug("sk=%p\n", sk);
40
41 spin_lock_bh(&sk->sk_write_queue.lock);
42 __skb_queue_purge(&sk->sk_write_queue);
43 nfc_rawsock(sk)->tx_work_scheduled = false;
44 spin_unlock_bh(&sk->sk_write_queue.lock);
45 }
46
rawsock_report_error(struct sock * sk,int err)47 static void rawsock_report_error(struct sock *sk, int err)
48 {
49 pr_debug("sk=%p err=%d\n", sk, err);
50
51 sk->sk_shutdown = SHUTDOWN_MASK;
52 sk->sk_err = -err;
53 sk_error_report(sk);
54
55 rawsock_write_queue_purge(sk);
56 }
57
rawsock_release(struct socket * sock)58 static int rawsock_release(struct socket *sock)
59 {
60 struct sock *sk = sock->sk;
61
62 pr_debug("sock=%p sk=%p\n", sock, sk);
63
64 if (!sk)
65 return 0;
66
67 if (sock->type == SOCK_RAW)
68 nfc_sock_unlink(&raw_sk_list, sk);
69
70 if (sk->sk_state == TCP_ESTABLISHED) {
71 /* Prevent rawsock_tx_work from starting new transmits and
72 * wait for any in-progress work to finish. This must happen
73 * before the socket is orphaned to avoid a race where
74 * rawsock_tx_work runs after the NCI device has been freed.
75 */
76 sk->sk_shutdown |= SEND_SHUTDOWN;
77 cancel_work_sync(&nfc_rawsock(sk)->tx_work);
78 rawsock_write_queue_purge(sk);
79 }
80
81 sock_orphan(sk);
82 sock_put(sk);
83
84 return 0;
85 }
86
rawsock_connect(struct socket * sock,struct sockaddr_unsized * _addr,int len,int flags)87 static int rawsock_connect(struct socket *sock, struct sockaddr_unsized *_addr,
88 int len, int flags)
89 {
90 struct sock *sk = sock->sk;
91 struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr;
92 struct nfc_dev *dev;
93 int rc = 0;
94
95 pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
96
97 if (!addr || len < sizeof(struct sockaddr_nfc) ||
98 addr->sa_family != AF_NFC)
99 return -EINVAL;
100
101 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
102 addr->dev_idx, addr->target_idx, addr->nfc_protocol);
103
104 lock_sock(sk);
105
106 if (sock->state == SS_CONNECTED) {
107 rc = -EISCONN;
108 goto error;
109 }
110
111 dev = nfc_get_device(addr->dev_idx);
112 if (!dev) {
113 rc = -ENODEV;
114 goto error;
115 }
116
117 if (addr->target_idx > dev->target_next_idx - 1 ||
118 addr->target_idx < dev->target_next_idx - dev->n_targets) {
119 rc = -EINVAL;
120 goto put_dev;
121 }
122
123 rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
124 if (rc)
125 goto put_dev;
126
127 nfc_rawsock(sk)->dev = dev;
128 nfc_rawsock(sk)->target_idx = addr->target_idx;
129 sock->state = SS_CONNECTED;
130 sk->sk_state = TCP_ESTABLISHED;
131 sk->sk_state_change(sk);
132
133 release_sock(sk);
134 return 0;
135
136 put_dev:
137 nfc_put_device(dev);
138 error:
139 release_sock(sk);
140 return rc;
141 }
142
rawsock_add_header(struct sk_buff * skb)143 static int rawsock_add_header(struct sk_buff *skb)
144 {
145 *(u8 *)skb_push(skb, NFC_HEADER_SIZE) = 0;
146
147 return 0;
148 }
149
rawsock_data_exchange_complete(void * context,struct sk_buff * skb,int err)150 static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
151 int err)
152 {
153 struct sock *sk = (struct sock *) context;
154
155 BUG_ON(in_hardirq());
156
157 pr_debug("sk=%p err=%d\n", sk, err);
158
159 if (err)
160 goto error;
161
162 err = rawsock_add_header(skb);
163 if (err)
164 goto error_skb;
165
166 err = sock_queue_rcv_skb(sk, skb);
167 if (err)
168 goto error_skb;
169
170 spin_lock_bh(&sk->sk_write_queue.lock);
171 if (!skb_queue_empty(&sk->sk_write_queue))
172 schedule_work(&nfc_rawsock(sk)->tx_work);
173 else
174 nfc_rawsock(sk)->tx_work_scheduled = false;
175 spin_unlock_bh(&sk->sk_write_queue.lock);
176
177 sock_put(sk);
178 return;
179
180 error_skb:
181 kfree_skb(skb);
182
183 error:
184 rawsock_report_error(sk, err);
185 sock_put(sk);
186 }
187
rawsock_tx_work(struct work_struct * work)188 static void rawsock_tx_work(struct work_struct *work)
189 {
190 struct sock *sk = to_rawsock_sk(work);
191 struct nfc_dev *dev = nfc_rawsock(sk)->dev;
192 u32 target_idx = nfc_rawsock(sk)->target_idx;
193 struct sk_buff *skb;
194 int rc;
195
196 pr_debug("sk=%p target_idx=%u\n", sk, target_idx);
197
198 if (sk->sk_shutdown & SEND_SHUTDOWN) {
199 rawsock_write_queue_purge(sk);
200 return;
201 }
202
203 skb = skb_dequeue(&sk->sk_write_queue);
204 kcov_remote_start_common(skb_get_kcov_handle(skb));
205
206 sock_hold(sk);
207 rc = nfc_data_exchange(dev, target_idx, skb,
208 rawsock_data_exchange_complete, sk);
209 if (rc) {
210 rawsock_report_error(sk, rc);
211 sock_put(sk);
212 }
213 kcov_remote_stop();
214 }
215
rawsock_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)216 static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
217 {
218 struct sock *sk = sock->sk;
219 struct nfc_dev *dev = nfc_rawsock(sk)->dev;
220 struct sk_buff *skb;
221 int rc;
222
223 pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);
224
225 if (msg->msg_namelen)
226 return -EOPNOTSUPP;
227
228 if (sock->state != SS_CONNECTED)
229 return -ENOTCONN;
230
231 skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
232 if (skb == NULL)
233 return rc;
234
235 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
236 if (rc < 0) {
237 kfree_skb(skb);
238 return rc;
239 }
240
241 spin_lock_bh(&sk->sk_write_queue.lock);
242 __skb_queue_tail(&sk->sk_write_queue, skb);
243 if (!nfc_rawsock(sk)->tx_work_scheduled) {
244 schedule_work(&nfc_rawsock(sk)->tx_work);
245 nfc_rawsock(sk)->tx_work_scheduled = true;
246 }
247 spin_unlock_bh(&sk->sk_write_queue.lock);
248
249 return len;
250 }
251
rawsock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)252 static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
253 int flags)
254 {
255 struct sock *sk = sock->sk;
256 struct sk_buff *skb;
257 int copied;
258 int rc;
259
260 pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
261
262 skb = skb_recv_datagram(sk, flags, &rc);
263 if (!skb)
264 return rc;
265
266 copied = skb->len;
267 if (len < copied) {
268 msg->msg_flags |= MSG_TRUNC;
269 copied = len;
270 }
271
272 rc = skb_copy_datagram_msg(skb, 0, msg, copied);
273
274 skb_free_datagram(sk, skb);
275
276 return rc ? : copied;
277 }
278
279 static const struct proto_ops rawsock_ops = {
280 .family = PF_NFC,
281 .owner = THIS_MODULE,
282 .release = rawsock_release,
283 .bind = sock_no_bind,
284 .connect = rawsock_connect,
285 .socketpair = sock_no_socketpair,
286 .accept = sock_no_accept,
287 .getname = sock_no_getname,
288 .poll = datagram_poll,
289 .ioctl = sock_no_ioctl,
290 .listen = sock_no_listen,
291 .shutdown = sock_no_shutdown,
292 .sendmsg = rawsock_sendmsg,
293 .recvmsg = rawsock_recvmsg,
294 .mmap = sock_no_mmap,
295 };
296
297 static const struct proto_ops rawsock_raw_ops = {
298 .family = PF_NFC,
299 .owner = THIS_MODULE,
300 .release = rawsock_release,
301 .bind = sock_no_bind,
302 .connect = sock_no_connect,
303 .socketpair = sock_no_socketpair,
304 .accept = sock_no_accept,
305 .getname = sock_no_getname,
306 .poll = datagram_poll,
307 .ioctl = sock_no_ioctl,
308 .listen = sock_no_listen,
309 .shutdown = sock_no_shutdown,
310 .sendmsg = sock_no_sendmsg,
311 .recvmsg = rawsock_recvmsg,
312 .mmap = sock_no_mmap,
313 };
314
rawsock_destruct(struct sock * sk)315 static void rawsock_destruct(struct sock *sk)
316 {
317 pr_debug("sk=%p\n", sk);
318
319 if (sk->sk_state == TCP_ESTABLISHED) {
320 nfc_deactivate_target(nfc_rawsock(sk)->dev,
321 nfc_rawsock(sk)->target_idx,
322 NFC_TARGET_MODE_IDLE);
323 nfc_put_device(nfc_rawsock(sk)->dev);
324 }
325
326 skb_queue_purge(&sk->sk_receive_queue);
327
328 if (!sock_flag(sk, SOCK_DEAD)) {
329 pr_err("Freeing alive NFC raw socket %p\n", sk);
330 return;
331 }
332 }
333
rawsock_create(struct net * net,struct socket * sock,const struct nfc_protocol * nfc_proto,int kern)334 static int rawsock_create(struct net *net, struct socket *sock,
335 const struct nfc_protocol *nfc_proto, int kern)
336 {
337 struct sock *sk;
338
339 pr_debug("sock=%p\n", sock);
340
341 if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
342 return -ESOCKTNOSUPPORT;
343
344 if (sock->type == SOCK_RAW) {
345 if (!ns_capable(net->user_ns, CAP_NET_RAW))
346 return -EPERM;
347 sock->ops = &rawsock_raw_ops;
348 } else {
349 sock->ops = &rawsock_ops;
350 }
351
352 sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
353 if (!sk)
354 return -ENOMEM;
355
356 sock_init_data(sock, sk);
357 sk->sk_protocol = nfc_proto->id;
358 sk->sk_destruct = rawsock_destruct;
359 sock->state = SS_UNCONNECTED;
360 if (sock->type == SOCK_RAW)
361 nfc_sock_link(&raw_sk_list, sk);
362 else {
363 INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
364 nfc_rawsock(sk)->tx_work_scheduled = false;
365 }
366
367 return 0;
368 }
369
nfc_send_to_raw_sock(struct nfc_dev * dev,struct sk_buff * skb,u8 payload_type,u8 direction)370 void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
371 u8 payload_type, u8 direction)
372 {
373 struct sk_buff *skb_copy = NULL, *nskb;
374 struct sock *sk;
375 u8 *data;
376
377 read_lock(&raw_sk_list.lock);
378
379 sk_for_each(sk, &raw_sk_list.head) {
380 if (!skb_copy) {
381 skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
382 GFP_ATOMIC, true);
383 if (!skb_copy)
384 continue;
385
386 data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE);
387
388 data[0] = dev ? dev->idx : 0xFF;
389 data[1] = direction & 0x01;
390 data[1] |= (payload_type << 1);
391 }
392
393 nskb = skb_clone(skb_copy, GFP_ATOMIC);
394 if (!nskb)
395 continue;
396
397 if (sock_queue_rcv_skb(sk, nskb))
398 kfree_skb(nskb);
399 }
400
401 read_unlock(&raw_sk_list.lock);
402
403 kfree_skb(skb_copy);
404 }
405 EXPORT_SYMBOL(nfc_send_to_raw_sock);
406
407 static struct proto rawsock_proto = {
408 .name = "NFC_RAW",
409 .owner = THIS_MODULE,
410 .obj_size = sizeof(struct nfc_rawsock),
411 };
412
413 static const struct nfc_protocol rawsock_nfc_proto = {
414 .id = NFC_SOCKPROTO_RAW,
415 .proto = &rawsock_proto,
416 .owner = THIS_MODULE,
417 .create = rawsock_create
418 };
419
rawsock_init(void)420 int __init rawsock_init(void)
421 {
422 int rc;
423
424 rc = nfc_proto_register(&rawsock_nfc_proto);
425
426 return rc;
427 }
428
rawsock_exit(void)429 void rawsock_exit(void)
430 {
431 nfc_proto_unregister(&rawsock_nfc_proto);
432 }
433