xref: /linux/net/nfc/nci/core.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  *  The NFC Controller Interface is the communication protocol between an
3  *  NFC Controller (NFCC) and a Device Host (DH).
4  *
5  *  Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  *  Written by Ilan Elias <ilane@ti.com>
8  *
9  *  Acknowledgements:
10  *  This file is based on hci_core.c, which was written
11  *  by Maxim Krasnyansky.
12  *
13  *  This program is free software; you can redistribute it and/or modify
14  *  it under the terms of the GNU General Public License version 2
15  *  as published by the Free Software Foundation
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
24  *
25  */
26 
27 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
28 
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/workqueue.h>
32 #include <linux/completion.h>
33 #include <linux/export.h>
34 #include <linux/sched.h>
35 #include <linux/bitops.h>
36 #include <linux/skbuff.h>
37 
38 #include "../nfc.h"
39 #include <net/nfc/nci.h>
40 #include <net/nfc/nci_core.h>
41 #include <linux/nfc.h>
42 
43 static void nci_cmd_work(struct work_struct *work);
44 static void nci_rx_work(struct work_struct *work);
45 static void nci_tx_work(struct work_struct *work);
46 
47 /* ---- NCI requests ---- */
48 
49 void nci_req_complete(struct nci_dev *ndev, int result)
50 {
51 	if (ndev->req_status == NCI_REQ_PEND) {
52 		ndev->req_result = result;
53 		ndev->req_status = NCI_REQ_DONE;
54 		complete(&ndev->req_completion);
55 	}
56 }
57 
58 static void nci_req_cancel(struct nci_dev *ndev, int err)
59 {
60 	if (ndev->req_status == NCI_REQ_PEND) {
61 		ndev->req_result = err;
62 		ndev->req_status = NCI_REQ_CANCELED;
63 		complete(&ndev->req_completion);
64 	}
65 }
66 
67 /* Execute request and wait for completion. */
68 static int __nci_request(struct nci_dev *ndev,
69 			 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 			 unsigned long opt, __u32 timeout)
71 {
72 	int rc = 0;
73 	long completion_rc;
74 
75 	ndev->req_status = NCI_REQ_PEND;
76 
77 	reinit_completion(&ndev->req_completion);
78 	req(ndev, opt);
79 	completion_rc =
80 		wait_for_completion_interruptible_timeout(&ndev->req_completion,
81 							  timeout);
82 
83 	pr_debug("wait_for_completion return %ld\n", completion_rc);
84 
85 	if (completion_rc > 0) {
86 		switch (ndev->req_status) {
87 		case NCI_REQ_DONE:
88 			rc = nci_to_errno(ndev->req_result);
89 			break;
90 
91 		case NCI_REQ_CANCELED:
92 			rc = -ndev->req_result;
93 			break;
94 
95 		default:
96 			rc = -ETIMEDOUT;
97 			break;
98 		}
99 	} else {
100 		pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
101 		       completion_rc);
102 
103 		rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
104 	}
105 
106 	ndev->req_status = ndev->req_result = 0;
107 
108 	return rc;
109 }
110 
111 static inline int nci_request(struct nci_dev *ndev,
112 			      void (*req)(struct nci_dev *ndev,
113 					  unsigned long opt),
114 			      unsigned long opt, __u32 timeout)
115 {
116 	int rc;
117 
118 	if (!test_bit(NCI_UP, &ndev->flags))
119 		return -ENETDOWN;
120 
121 	/* Serialize all requests */
122 	mutex_lock(&ndev->req_lock);
123 	rc = __nci_request(ndev, req, opt, timeout);
124 	mutex_unlock(&ndev->req_lock);
125 
126 	return rc;
127 }
128 
129 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
130 {
131 	struct nci_core_reset_cmd cmd;
132 
133 	cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
134 	nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
135 }
136 
137 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
138 {
139 	nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
140 }
141 
142 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
143 {
144 	struct nci_rf_disc_map_cmd cmd;
145 	struct disc_map_config *cfg = cmd.mapping_configs;
146 	__u8 *num = &cmd.num_mapping_configs;
147 	int i;
148 
149 	/* set rf mapping configurations */
150 	*num = 0;
151 
152 	/* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 	for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 		if (ndev->supported_rf_interfaces[i] ==
155 		    NCI_RF_INTERFACE_ISO_DEP) {
156 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 			cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 				NCI_DISC_MAP_MODE_LISTEN;
159 			cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
160 			(*num)++;
161 		} else if (ndev->supported_rf_interfaces[i] ==
162 			   NCI_RF_INTERFACE_NFC_DEP) {
163 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 			cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 				NCI_DISC_MAP_MODE_LISTEN;
166 			cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
167 			(*num)++;
168 		}
169 
170 		if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
171 			break;
172 	}
173 
174 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
175 		     (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
176 }
177 
178 struct nci_set_config_param {
179 	__u8	id;
180 	size_t	len;
181 	__u8	*val;
182 };
183 
184 static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
185 {
186 	struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
187 	struct nci_core_set_config_cmd cmd;
188 
189 	BUG_ON(param->len > NCI_MAX_PARAM_LEN);
190 
191 	cmd.num_params = 1;
192 	cmd.param.id = param->id;
193 	cmd.param.len = param->len;
194 	memcpy(cmd.param.val, param->val, param->len);
195 
196 	nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
197 }
198 
199 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
200 {
201 	struct nci_rf_disc_cmd cmd;
202 	__u32 protocols = opt;
203 
204 	cmd.num_disc_configs = 0;
205 
206 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
207 	    (protocols & NFC_PROTO_JEWEL_MASK ||
208 	     protocols & NFC_PROTO_MIFARE_MASK ||
209 	     protocols & NFC_PROTO_ISO14443_MASK ||
210 	     protocols & NFC_PROTO_NFC_DEP_MASK)) {
211 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
212 			NCI_NFC_A_PASSIVE_POLL_MODE;
213 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
214 		cmd.num_disc_configs++;
215 	}
216 
217 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
218 	    (protocols & NFC_PROTO_ISO14443_B_MASK)) {
219 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
220 			NCI_NFC_B_PASSIVE_POLL_MODE;
221 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
222 		cmd.num_disc_configs++;
223 	}
224 
225 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
226 	    (protocols & NFC_PROTO_FELICA_MASK ||
227 	     protocols & NFC_PROTO_NFC_DEP_MASK)) {
228 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
229 			NCI_NFC_F_PASSIVE_POLL_MODE;
230 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
231 		cmd.num_disc_configs++;
232 	}
233 
234 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
235 		     (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
236 		     &cmd);
237 }
238 
239 struct nci_rf_discover_select_param {
240 	__u8	rf_discovery_id;
241 	__u8	rf_protocol;
242 };
243 
244 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
245 {
246 	struct nci_rf_discover_select_param *param =
247 		(struct nci_rf_discover_select_param *)opt;
248 	struct nci_rf_discover_select_cmd cmd;
249 
250 	cmd.rf_discovery_id = param->rf_discovery_id;
251 	cmd.rf_protocol = param->rf_protocol;
252 
253 	switch (cmd.rf_protocol) {
254 	case NCI_RF_PROTOCOL_ISO_DEP:
255 		cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
256 		break;
257 
258 	case NCI_RF_PROTOCOL_NFC_DEP:
259 		cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
260 		break;
261 
262 	default:
263 		cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
264 		break;
265 	}
266 
267 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
268 		     sizeof(struct nci_rf_discover_select_cmd), &cmd);
269 }
270 
271 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
272 {
273 	struct nci_rf_deactivate_cmd cmd;
274 
275 	cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
276 
277 	nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
278 		     sizeof(struct nci_rf_deactivate_cmd), &cmd);
279 }
280 
281 static int nci_open_device(struct nci_dev *ndev)
282 {
283 	int rc = 0;
284 
285 	mutex_lock(&ndev->req_lock);
286 
287 	if (test_bit(NCI_UP, &ndev->flags)) {
288 		rc = -EALREADY;
289 		goto done;
290 	}
291 
292 	if (ndev->ops->open(ndev)) {
293 		rc = -EIO;
294 		goto done;
295 	}
296 
297 	atomic_set(&ndev->cmd_cnt, 1);
298 
299 	set_bit(NCI_INIT, &ndev->flags);
300 
301 	rc = __nci_request(ndev, nci_reset_req, 0,
302 			   msecs_to_jiffies(NCI_RESET_TIMEOUT));
303 
304 	if (ndev->ops->setup)
305 		ndev->ops->setup(ndev);
306 
307 	if (!rc) {
308 		rc = __nci_request(ndev, nci_init_req, 0,
309 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
310 	}
311 
312 	if (!rc) {
313 		rc = __nci_request(ndev, nci_init_complete_req, 0,
314 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
315 	}
316 
317 	clear_bit(NCI_INIT, &ndev->flags);
318 
319 	if (!rc) {
320 		set_bit(NCI_UP, &ndev->flags);
321 		nci_clear_target_list(ndev);
322 		atomic_set(&ndev->state, NCI_IDLE);
323 	} else {
324 		/* Init failed, cleanup */
325 		skb_queue_purge(&ndev->cmd_q);
326 		skb_queue_purge(&ndev->rx_q);
327 		skb_queue_purge(&ndev->tx_q);
328 
329 		ndev->ops->close(ndev);
330 		ndev->flags = 0;
331 	}
332 
333 done:
334 	mutex_unlock(&ndev->req_lock);
335 	return rc;
336 }
337 
338 static int nci_close_device(struct nci_dev *ndev)
339 {
340 	nci_req_cancel(ndev, ENODEV);
341 	mutex_lock(&ndev->req_lock);
342 
343 	if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
344 		del_timer_sync(&ndev->cmd_timer);
345 		del_timer_sync(&ndev->data_timer);
346 		mutex_unlock(&ndev->req_lock);
347 		return 0;
348 	}
349 
350 	/* Drop RX and TX queues */
351 	skb_queue_purge(&ndev->rx_q);
352 	skb_queue_purge(&ndev->tx_q);
353 
354 	/* Flush RX and TX wq */
355 	flush_workqueue(ndev->rx_wq);
356 	flush_workqueue(ndev->tx_wq);
357 
358 	/* Reset device */
359 	skb_queue_purge(&ndev->cmd_q);
360 	atomic_set(&ndev->cmd_cnt, 1);
361 
362 	set_bit(NCI_INIT, &ndev->flags);
363 	__nci_request(ndev, nci_reset_req, 0,
364 		      msecs_to_jiffies(NCI_RESET_TIMEOUT));
365 	clear_bit(NCI_INIT, &ndev->flags);
366 
367 	del_timer_sync(&ndev->cmd_timer);
368 
369 	/* Flush cmd wq */
370 	flush_workqueue(ndev->cmd_wq);
371 
372 	/* After this point our queues are empty
373 	 * and no works are scheduled. */
374 	ndev->ops->close(ndev);
375 
376 	/* Clear flags */
377 	ndev->flags = 0;
378 
379 	mutex_unlock(&ndev->req_lock);
380 
381 	return 0;
382 }
383 
384 /* NCI command timer function */
385 static void nci_cmd_timer(unsigned long arg)
386 {
387 	struct nci_dev *ndev = (void *) arg;
388 
389 	atomic_set(&ndev->cmd_cnt, 1);
390 	queue_work(ndev->cmd_wq, &ndev->cmd_work);
391 }
392 
393 /* NCI data exchange timer function */
394 static void nci_data_timer(unsigned long arg)
395 {
396 	struct nci_dev *ndev = (void *) arg;
397 
398 	set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
399 	queue_work(ndev->rx_wq, &ndev->rx_work);
400 }
401 
402 static int nci_dev_up(struct nfc_dev *nfc_dev)
403 {
404 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
405 
406 	return nci_open_device(ndev);
407 }
408 
409 static int nci_dev_down(struct nfc_dev *nfc_dev)
410 {
411 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
412 
413 	return nci_close_device(ndev);
414 }
415 
416 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val)
417 {
418 	struct nci_set_config_param param;
419 
420 	if (!val || !len)
421 		return 0;
422 
423 	param.id = id;
424 	param.len = len;
425 	param.val = val;
426 
427 	return __nci_request(ndev, nci_set_config_req, (unsigned long)&param,
428 			     msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
429 }
430 EXPORT_SYMBOL(nci_set_config);
431 
432 static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
433 {
434 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
435 	struct nci_set_config_param param;
436 
437 	param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
438 	if ((param.val == NULL) || (param.len == 0))
439 		return 0;
440 
441 	if (param.len > NFC_MAX_GT_LEN)
442 		return -EINVAL;
443 
444 	param.id = NCI_PN_ATR_REQ_GEN_BYTES;
445 
446 	return nci_request(ndev, nci_set_config_req, (unsigned long)&param,
447 			   msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
448 }
449 
450 static int nci_start_poll(struct nfc_dev *nfc_dev,
451 			  __u32 im_protocols, __u32 tm_protocols)
452 {
453 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
454 	int rc;
455 
456 	if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
457 	    (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
458 		pr_err("unable to start poll, since poll is already active\n");
459 		return -EBUSY;
460 	}
461 
462 	if (ndev->target_active_prot) {
463 		pr_err("there is an active target\n");
464 		return -EBUSY;
465 	}
466 
467 	if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
468 	    (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
469 		pr_debug("target active or w4 select, implicitly deactivate\n");
470 
471 		rc = nci_request(ndev, nci_rf_deactivate_req, 0,
472 				 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
473 		if (rc)
474 			return -EBUSY;
475 	}
476 
477 	if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
478 		rc = nci_set_local_general_bytes(nfc_dev);
479 		if (rc) {
480 			pr_err("failed to set local general bytes\n");
481 			return rc;
482 		}
483 	}
484 
485 	rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
486 			 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
487 
488 	if (!rc)
489 		ndev->poll_prots = im_protocols;
490 
491 	return rc;
492 }
493 
494 static void nci_stop_poll(struct nfc_dev *nfc_dev)
495 {
496 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
497 
498 	if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
499 	    (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
500 		pr_err("unable to stop poll, since poll is not active\n");
501 		return;
502 	}
503 
504 	nci_request(ndev, nci_rf_deactivate_req, 0,
505 		    msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
506 }
507 
508 static int nci_activate_target(struct nfc_dev *nfc_dev,
509 			       struct nfc_target *target, __u32 protocol)
510 {
511 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
512 	struct nci_rf_discover_select_param param;
513 	struct nfc_target *nci_target = NULL;
514 	int i;
515 	int rc = 0;
516 
517 	pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
518 
519 	if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
520 	    (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
521 		pr_err("there is no available target to activate\n");
522 		return -EINVAL;
523 	}
524 
525 	if (ndev->target_active_prot) {
526 		pr_err("there is already an active target\n");
527 		return -EBUSY;
528 	}
529 
530 	for (i = 0; i < ndev->n_targets; i++) {
531 		if (ndev->targets[i].idx == target->idx) {
532 			nci_target = &ndev->targets[i];
533 			break;
534 		}
535 	}
536 
537 	if (!nci_target) {
538 		pr_err("unable to find the selected target\n");
539 		return -EINVAL;
540 	}
541 
542 	if (!(nci_target->supported_protocols & (1 << protocol))) {
543 		pr_err("target does not support the requested protocol 0x%x\n",
544 		       protocol);
545 		return -EINVAL;
546 	}
547 
548 	if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
549 		param.rf_discovery_id = nci_target->logical_idx;
550 
551 		if (protocol == NFC_PROTO_JEWEL)
552 			param.rf_protocol = NCI_RF_PROTOCOL_T1T;
553 		else if (protocol == NFC_PROTO_MIFARE)
554 			param.rf_protocol = NCI_RF_PROTOCOL_T2T;
555 		else if (protocol == NFC_PROTO_FELICA)
556 			param.rf_protocol = NCI_RF_PROTOCOL_T3T;
557 		else if (protocol == NFC_PROTO_ISO14443 ||
558 			 protocol == NFC_PROTO_ISO14443_B)
559 			param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
560 		else
561 			param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
562 
563 		rc = nci_request(ndev, nci_rf_discover_select_req,
564 				 (unsigned long)&param,
565 				 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
566 	}
567 
568 	if (!rc)
569 		ndev->target_active_prot = protocol;
570 
571 	return rc;
572 }
573 
574 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
575 				  struct nfc_target *target)
576 {
577 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
578 
579 	pr_debug("entry\n");
580 
581 	if (!ndev->target_active_prot) {
582 		pr_err("unable to deactivate target, no active target\n");
583 		return;
584 	}
585 
586 	ndev->target_active_prot = 0;
587 
588 	if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
589 		nci_request(ndev, nci_rf_deactivate_req, 0,
590 			    msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
591 	}
592 }
593 
594 static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
595 			   __u8 comm_mode, __u8 *gb, size_t gb_len)
596 {
597 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
598 	int rc;
599 
600 	pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
601 
602 	rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
603 	if (rc)
604 		return rc;
605 
606 	rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
607 					  ndev->remote_gb_len);
608 	if (!rc)
609 		rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
610 					NFC_RF_INITIATOR);
611 
612 	return rc;
613 }
614 
615 static int nci_dep_link_down(struct nfc_dev *nfc_dev)
616 {
617 	pr_debug("entry\n");
618 
619 	nci_deactivate_target(nfc_dev, NULL);
620 
621 	return 0;
622 }
623 
624 
625 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
626 			  struct sk_buff *skb,
627 			  data_exchange_cb_t cb, void *cb_context)
628 {
629 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
630 	int rc;
631 
632 	pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
633 
634 	if (!ndev->target_active_prot) {
635 		pr_err("unable to exchange data, no active target\n");
636 		return -EINVAL;
637 	}
638 
639 	if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
640 		return -EBUSY;
641 
642 	/* store cb and context to be used on receiving data */
643 	ndev->data_exchange_cb = cb;
644 	ndev->data_exchange_cb_context = cb_context;
645 
646 	rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
647 	if (rc)
648 		clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
649 
650 	return rc;
651 }
652 
653 static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
654 {
655 	return 0;
656 }
657 
658 static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
659 {
660 	return 0;
661 }
662 
663 static int nci_discover_se(struct nfc_dev *nfc_dev)
664 {
665 	return 0;
666 }
667 
668 static struct nfc_ops nci_nfc_ops = {
669 	.dev_up = nci_dev_up,
670 	.dev_down = nci_dev_down,
671 	.start_poll = nci_start_poll,
672 	.stop_poll = nci_stop_poll,
673 	.dep_link_up = nci_dep_link_up,
674 	.dep_link_down = nci_dep_link_down,
675 	.activate_target = nci_activate_target,
676 	.deactivate_target = nci_deactivate_target,
677 	.im_transceive = nci_transceive,
678 	.enable_se = nci_enable_se,
679 	.disable_se = nci_disable_se,
680 	.discover_se = nci_discover_se,
681 };
682 
683 /* ---- Interface to NCI drivers ---- */
684 
685 /**
686  * nci_allocate_device - allocate a new nci device
687  *
688  * @ops: device operations
689  * @supported_protocols: NFC protocols supported by the device
690  */
691 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
692 				    __u32 supported_protocols,
693 				    int tx_headroom, int tx_tailroom)
694 {
695 	struct nci_dev *ndev;
696 
697 	pr_debug("supported_protocols 0x%x\n", supported_protocols);
698 
699 	if (!ops->open || !ops->close || !ops->send)
700 		return NULL;
701 
702 	if (!supported_protocols)
703 		return NULL;
704 
705 	ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
706 	if (!ndev)
707 		return NULL;
708 
709 	ndev->ops = ops;
710 	ndev->tx_headroom = tx_headroom;
711 	ndev->tx_tailroom = tx_tailroom;
712 	init_completion(&ndev->req_completion);
713 
714 	ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
715 					    supported_protocols,
716 					    tx_headroom + NCI_DATA_HDR_SIZE,
717 					    tx_tailroom);
718 	if (!ndev->nfc_dev)
719 		goto free_exit;
720 
721 	nfc_set_drvdata(ndev->nfc_dev, ndev);
722 
723 	return ndev;
724 
725 free_exit:
726 	kfree(ndev);
727 	return NULL;
728 }
729 EXPORT_SYMBOL(nci_allocate_device);
730 
731 /**
732  * nci_free_device - deallocate nci device
733  *
734  * @ndev: The nci device to deallocate
735  */
736 void nci_free_device(struct nci_dev *ndev)
737 {
738 	nfc_free_device(ndev->nfc_dev);
739 	kfree(ndev);
740 }
741 EXPORT_SYMBOL(nci_free_device);
742 
743 /**
744  * nci_register_device - register a nci device in the nfc subsystem
745  *
746  * @dev: The nci device to register
747  */
748 int nci_register_device(struct nci_dev *ndev)
749 {
750 	int rc;
751 	struct device *dev = &ndev->nfc_dev->dev;
752 	char name[32];
753 
754 	rc = nfc_register_device(ndev->nfc_dev);
755 	if (rc)
756 		goto exit;
757 
758 	ndev->flags = 0;
759 
760 	INIT_WORK(&ndev->cmd_work, nci_cmd_work);
761 	snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
762 	ndev->cmd_wq = create_singlethread_workqueue(name);
763 	if (!ndev->cmd_wq) {
764 		rc = -ENOMEM;
765 		goto unreg_exit;
766 	}
767 
768 	INIT_WORK(&ndev->rx_work, nci_rx_work);
769 	snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
770 	ndev->rx_wq = create_singlethread_workqueue(name);
771 	if (!ndev->rx_wq) {
772 		rc = -ENOMEM;
773 		goto destroy_cmd_wq_exit;
774 	}
775 
776 	INIT_WORK(&ndev->tx_work, nci_tx_work);
777 	snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
778 	ndev->tx_wq = create_singlethread_workqueue(name);
779 	if (!ndev->tx_wq) {
780 		rc = -ENOMEM;
781 		goto destroy_rx_wq_exit;
782 	}
783 
784 	skb_queue_head_init(&ndev->cmd_q);
785 	skb_queue_head_init(&ndev->rx_q);
786 	skb_queue_head_init(&ndev->tx_q);
787 
788 	setup_timer(&ndev->cmd_timer, nci_cmd_timer,
789 		    (unsigned long) ndev);
790 	setup_timer(&ndev->data_timer, nci_data_timer,
791 		    (unsigned long) ndev);
792 
793 	mutex_init(&ndev->req_lock);
794 
795 	goto exit;
796 
797 destroy_rx_wq_exit:
798 	destroy_workqueue(ndev->rx_wq);
799 
800 destroy_cmd_wq_exit:
801 	destroy_workqueue(ndev->cmd_wq);
802 
803 unreg_exit:
804 	nfc_unregister_device(ndev->nfc_dev);
805 
806 exit:
807 	return rc;
808 }
809 EXPORT_SYMBOL(nci_register_device);
810 
811 /**
812  * nci_unregister_device - unregister a nci device in the nfc subsystem
813  *
814  * @dev: The nci device to unregister
815  */
816 void nci_unregister_device(struct nci_dev *ndev)
817 {
818 	nci_close_device(ndev);
819 
820 	destroy_workqueue(ndev->cmd_wq);
821 	destroy_workqueue(ndev->rx_wq);
822 	destroy_workqueue(ndev->tx_wq);
823 
824 	nfc_unregister_device(ndev->nfc_dev);
825 }
826 EXPORT_SYMBOL(nci_unregister_device);
827 
828 /**
829  * nci_recv_frame - receive frame from NCI drivers
830  *
831  * @ndev: The nci device
832  * @skb: The sk_buff to receive
833  */
834 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
835 {
836 	pr_debug("len %d\n", skb->len);
837 
838 	if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
839 	    !test_bit(NCI_INIT, &ndev->flags))) {
840 		kfree_skb(skb);
841 		return -ENXIO;
842 	}
843 
844 	/* Queue frame for rx worker thread */
845 	skb_queue_tail(&ndev->rx_q, skb);
846 	queue_work(ndev->rx_wq, &ndev->rx_work);
847 
848 	return 0;
849 }
850 EXPORT_SYMBOL(nci_recv_frame);
851 
852 static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
853 {
854 	pr_debug("len %d\n", skb->len);
855 
856 	if (!ndev) {
857 		kfree_skb(skb);
858 		return -ENODEV;
859 	}
860 
861 	/* Get rid of skb owner, prior to sending to the driver. */
862 	skb_orphan(skb);
863 
864 	/* Send copy to sniffer */
865 	nfc_send_to_raw_sock(ndev->nfc_dev, skb,
866 			     RAW_PAYLOAD_NCI, NFC_DIRECTION_TX);
867 
868 	return ndev->ops->send(ndev, skb);
869 }
870 
871 /* Send NCI command */
872 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
873 {
874 	struct nci_ctrl_hdr *hdr;
875 	struct sk_buff *skb;
876 
877 	pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
878 
879 	skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
880 	if (!skb) {
881 		pr_err("no memory for command\n");
882 		return -ENOMEM;
883 	}
884 
885 	hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
886 	hdr->gid = nci_opcode_gid(opcode);
887 	hdr->oid = nci_opcode_oid(opcode);
888 	hdr->plen = plen;
889 
890 	nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
891 	nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
892 
893 	if (plen)
894 		memcpy(skb_put(skb, plen), payload, plen);
895 
896 	skb_queue_tail(&ndev->cmd_q, skb);
897 	queue_work(ndev->cmd_wq, &ndev->cmd_work);
898 
899 	return 0;
900 }
901 
902 /* ---- NCI TX Data worker thread ---- */
903 
904 static void nci_tx_work(struct work_struct *work)
905 {
906 	struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
907 	struct sk_buff *skb;
908 
909 	pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
910 
911 	/* Send queued tx data */
912 	while (atomic_read(&ndev->credits_cnt)) {
913 		skb = skb_dequeue(&ndev->tx_q);
914 		if (!skb)
915 			return;
916 
917 		/* Check if data flow control is used */
918 		if (atomic_read(&ndev->credits_cnt) !=
919 		    NCI_DATA_FLOW_CONTROL_NOT_USED)
920 			atomic_dec(&ndev->credits_cnt);
921 
922 		pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
923 			 nci_pbf(skb->data),
924 			 nci_conn_id(skb->data),
925 			 nci_plen(skb->data));
926 
927 		nci_send_frame(ndev, skb);
928 
929 		mod_timer(&ndev->data_timer,
930 			  jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
931 	}
932 }
933 
934 /* ----- NCI RX worker thread (data & control) ----- */
935 
936 static void nci_rx_work(struct work_struct *work)
937 {
938 	struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
939 	struct sk_buff *skb;
940 
941 	while ((skb = skb_dequeue(&ndev->rx_q))) {
942 
943 		/* Send copy to sniffer */
944 		nfc_send_to_raw_sock(ndev->nfc_dev, skb,
945 				     RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
946 
947 		/* Process frame */
948 		switch (nci_mt(skb->data)) {
949 		case NCI_MT_RSP_PKT:
950 			nci_rsp_packet(ndev, skb);
951 			break;
952 
953 		case NCI_MT_NTF_PKT:
954 			nci_ntf_packet(ndev, skb);
955 			break;
956 
957 		case NCI_MT_DATA_PKT:
958 			nci_rx_data_packet(ndev, skb);
959 			break;
960 
961 		default:
962 			pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
963 			kfree_skb(skb);
964 			break;
965 		}
966 	}
967 
968 	/* check if a data exchange timout has occurred */
969 	if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
970 		/* complete the data exchange transaction, if exists */
971 		if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
972 			nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
973 
974 		clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
975 	}
976 }
977 
978 /* ----- NCI TX CMD worker thread ----- */
979 
980 static void nci_cmd_work(struct work_struct *work)
981 {
982 	struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
983 	struct sk_buff *skb;
984 
985 	pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
986 
987 	/* Send queued command */
988 	if (atomic_read(&ndev->cmd_cnt)) {
989 		skb = skb_dequeue(&ndev->cmd_q);
990 		if (!skb)
991 			return;
992 
993 		atomic_dec(&ndev->cmd_cnt);
994 
995 		pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
996 			 nci_pbf(skb->data),
997 			 nci_opcode_gid(nci_opcode(skb->data)),
998 			 nci_opcode_oid(nci_opcode(skb->data)),
999 			 nci_plen(skb->data));
1000 
1001 		nci_send_frame(ndev, skb);
1002 
1003 		mod_timer(&ndev->cmd_timer,
1004 			  jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
1005 	}
1006 }
1007 
1008 MODULE_LICENSE("GPL");
1009