xref: /linux/net/nfc/nci/core.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  *  The NFC Controller Interface is the communication protocol between an
3  *  NFC Controller (NFCC) and a Device Host (DH).
4  *
5  *  Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  *  Written by Ilan Elias <ilane@ti.com>
8  *
9  *  Acknowledgements:
10  *  This file is based on hci_core.c, which was written
11  *  by Maxim Krasnyansky.
12  *
13  *  This program is free software; you can redistribute it and/or modify
14  *  it under the terms of the GNU General Public License version 2
15  *  as published by the Free Software Foundation
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
29 
30 #include <linux/types.h>
31 #include <linux/workqueue.h>
32 #include <linux/completion.h>
33 #include <linux/export.h>
34 #include <linux/sched.h>
35 #include <linux/bitops.h>
36 #include <linux/skbuff.h>
37 
38 #include "../nfc.h"
39 #include <net/nfc/nci.h>
40 #include <net/nfc/nci_core.h>
41 #include <linux/nfc.h>
42 
43 static void nci_cmd_work(struct work_struct *work);
44 static void nci_rx_work(struct work_struct *work);
45 static void nci_tx_work(struct work_struct *work);
46 
47 /* ---- NCI requests ---- */
48 
49 void nci_req_complete(struct nci_dev *ndev, int result)
50 {
51 	if (ndev->req_status == NCI_REQ_PEND) {
52 		ndev->req_result = result;
53 		ndev->req_status = NCI_REQ_DONE;
54 		complete(&ndev->req_completion);
55 	}
56 }
57 
58 static void nci_req_cancel(struct nci_dev *ndev, int err)
59 {
60 	if (ndev->req_status == NCI_REQ_PEND) {
61 		ndev->req_result = err;
62 		ndev->req_status = NCI_REQ_CANCELED;
63 		complete(&ndev->req_completion);
64 	}
65 }
66 
67 /* Execute request and wait for completion. */
68 static int __nci_request(struct nci_dev *ndev,
69 			 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 			 unsigned long opt, __u32 timeout)
71 {
72 	int rc = 0;
73 	long completion_rc;
74 
75 	ndev->req_status = NCI_REQ_PEND;
76 
77 	init_completion(&ndev->req_completion);
78 	req(ndev, opt);
79 	completion_rc =
80 		wait_for_completion_interruptible_timeout(&ndev->req_completion,
81 							  timeout);
82 
83 	pr_debug("wait_for_completion return %ld\n", completion_rc);
84 
85 	if (completion_rc > 0) {
86 		switch (ndev->req_status) {
87 		case NCI_REQ_DONE:
88 			rc = nci_to_errno(ndev->req_result);
89 			break;
90 
91 		case NCI_REQ_CANCELED:
92 			rc = -ndev->req_result;
93 			break;
94 
95 		default:
96 			rc = -ETIMEDOUT;
97 			break;
98 		}
99 	} else {
100 		pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
101 		       completion_rc);
102 
103 		rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
104 	}
105 
106 	ndev->req_status = ndev->req_result = 0;
107 
108 	return rc;
109 }
110 
111 static inline int nci_request(struct nci_dev *ndev,
112 			      void (*req)(struct nci_dev *ndev,
113 					  unsigned long opt),
114 			      unsigned long opt, __u32 timeout)
115 {
116 	int rc;
117 
118 	if (!test_bit(NCI_UP, &ndev->flags))
119 		return -ENETDOWN;
120 
121 	/* Serialize all requests */
122 	mutex_lock(&ndev->req_lock);
123 	rc = __nci_request(ndev, req, opt, timeout);
124 	mutex_unlock(&ndev->req_lock);
125 
126 	return rc;
127 }
128 
129 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
130 {
131 	struct nci_core_reset_cmd cmd;
132 
133 	cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
134 	nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
135 }
136 
137 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
138 {
139 	nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
140 }
141 
142 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
143 {
144 	struct nci_rf_disc_map_cmd cmd;
145 	struct disc_map_config *cfg = cmd.mapping_configs;
146 	__u8 *num = &cmd.num_mapping_configs;
147 	int i;
148 
149 	/* set rf mapping configurations */
150 	*num = 0;
151 
152 	/* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 	for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 		if (ndev->supported_rf_interfaces[i] ==
155 		    NCI_RF_INTERFACE_ISO_DEP) {
156 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 			cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 				NCI_DISC_MAP_MODE_LISTEN;
159 			cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
160 			(*num)++;
161 		} else if (ndev->supported_rf_interfaces[i] ==
162 			   NCI_RF_INTERFACE_NFC_DEP) {
163 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 			cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 				NCI_DISC_MAP_MODE_LISTEN;
166 			cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
167 			(*num)++;
168 		}
169 
170 		if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
171 			break;
172 	}
173 
174 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
175 		     (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
176 }
177 
178 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
179 {
180 	struct nci_rf_disc_cmd cmd;
181 	__u32 protocols = opt;
182 
183 	cmd.num_disc_configs = 0;
184 
185 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
186 	    (protocols & NFC_PROTO_JEWEL_MASK
187 	     || protocols & NFC_PROTO_MIFARE_MASK
188 	     || protocols & NFC_PROTO_ISO14443_MASK
189 	     || protocols & NFC_PROTO_NFC_DEP_MASK)) {
190 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
191 			NCI_NFC_A_PASSIVE_POLL_MODE;
192 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
193 		cmd.num_disc_configs++;
194 	}
195 
196 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
197 	    (protocols & NFC_PROTO_ISO14443_MASK)) {
198 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
199 			NCI_NFC_B_PASSIVE_POLL_MODE;
200 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
201 		cmd.num_disc_configs++;
202 	}
203 
204 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
205 	    (protocols & NFC_PROTO_FELICA_MASK
206 	     || protocols & NFC_PROTO_NFC_DEP_MASK)) {
207 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
208 			NCI_NFC_F_PASSIVE_POLL_MODE;
209 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
210 		cmd.num_disc_configs++;
211 	}
212 
213 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
214 		     (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
215 		     &cmd);
216 }
217 
218 struct nci_rf_discover_select_param {
219 	__u8	rf_discovery_id;
220 	__u8	rf_protocol;
221 };
222 
223 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
224 {
225 	struct nci_rf_discover_select_param *param =
226 		(struct nci_rf_discover_select_param *)opt;
227 	struct nci_rf_discover_select_cmd cmd;
228 
229 	cmd.rf_discovery_id = param->rf_discovery_id;
230 	cmd.rf_protocol = param->rf_protocol;
231 
232 	switch (cmd.rf_protocol) {
233 	case NCI_RF_PROTOCOL_ISO_DEP:
234 		cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
235 		break;
236 
237 	case NCI_RF_PROTOCOL_NFC_DEP:
238 		cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
239 		break;
240 
241 	default:
242 		cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
243 		break;
244 	}
245 
246 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
247 		     sizeof(struct nci_rf_discover_select_cmd), &cmd);
248 }
249 
250 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
251 {
252 	struct nci_rf_deactivate_cmd cmd;
253 
254 	cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
255 
256 	nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
257 		     sizeof(struct nci_rf_deactivate_cmd), &cmd);
258 }
259 
260 static int nci_open_device(struct nci_dev *ndev)
261 {
262 	int rc = 0;
263 
264 	mutex_lock(&ndev->req_lock);
265 
266 	if (test_bit(NCI_UP, &ndev->flags)) {
267 		rc = -EALREADY;
268 		goto done;
269 	}
270 
271 	if (ndev->ops->open(ndev)) {
272 		rc = -EIO;
273 		goto done;
274 	}
275 
276 	atomic_set(&ndev->cmd_cnt, 1);
277 
278 	set_bit(NCI_INIT, &ndev->flags);
279 
280 	rc = __nci_request(ndev, nci_reset_req, 0,
281 			   msecs_to_jiffies(NCI_RESET_TIMEOUT));
282 
283 	if (!rc) {
284 		rc = __nci_request(ndev, nci_init_req, 0,
285 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
286 	}
287 
288 	if (!rc) {
289 		rc = __nci_request(ndev, nci_init_complete_req, 0,
290 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
291 	}
292 
293 	clear_bit(NCI_INIT, &ndev->flags);
294 
295 	if (!rc) {
296 		set_bit(NCI_UP, &ndev->flags);
297 		nci_clear_target_list(ndev);
298 		atomic_set(&ndev->state, NCI_IDLE);
299 	} else {
300 		/* Init failed, cleanup */
301 		skb_queue_purge(&ndev->cmd_q);
302 		skb_queue_purge(&ndev->rx_q);
303 		skb_queue_purge(&ndev->tx_q);
304 
305 		ndev->ops->close(ndev);
306 		ndev->flags = 0;
307 	}
308 
309 done:
310 	mutex_unlock(&ndev->req_lock);
311 	return rc;
312 }
313 
314 static int nci_close_device(struct nci_dev *ndev)
315 {
316 	nci_req_cancel(ndev, ENODEV);
317 	mutex_lock(&ndev->req_lock);
318 
319 	if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
320 		del_timer_sync(&ndev->cmd_timer);
321 		del_timer_sync(&ndev->data_timer);
322 		mutex_unlock(&ndev->req_lock);
323 		return 0;
324 	}
325 
326 	/* Drop RX and TX queues */
327 	skb_queue_purge(&ndev->rx_q);
328 	skb_queue_purge(&ndev->tx_q);
329 
330 	/* Flush RX and TX wq */
331 	flush_workqueue(ndev->rx_wq);
332 	flush_workqueue(ndev->tx_wq);
333 
334 	/* Reset device */
335 	skb_queue_purge(&ndev->cmd_q);
336 	atomic_set(&ndev->cmd_cnt, 1);
337 
338 	set_bit(NCI_INIT, &ndev->flags);
339 	__nci_request(ndev, nci_reset_req, 0,
340 		      msecs_to_jiffies(NCI_RESET_TIMEOUT));
341 	clear_bit(NCI_INIT, &ndev->flags);
342 
343 	/* Flush cmd wq */
344 	flush_workqueue(ndev->cmd_wq);
345 
346 	/* After this point our queues are empty
347 	 * and no works are scheduled. */
348 	ndev->ops->close(ndev);
349 
350 	/* Clear flags */
351 	ndev->flags = 0;
352 
353 	mutex_unlock(&ndev->req_lock);
354 
355 	return 0;
356 }
357 
358 /* NCI command timer function */
359 static void nci_cmd_timer(unsigned long arg)
360 {
361 	struct nci_dev *ndev = (void *) arg;
362 
363 	atomic_set(&ndev->cmd_cnt, 1);
364 	queue_work(ndev->cmd_wq, &ndev->cmd_work);
365 }
366 
367 /* NCI data exchange timer function */
368 static void nci_data_timer(unsigned long arg)
369 {
370 	struct nci_dev *ndev = (void *) arg;
371 
372 	set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
373 	queue_work(ndev->rx_wq, &ndev->rx_work);
374 }
375 
376 static int nci_dev_up(struct nfc_dev *nfc_dev)
377 {
378 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
379 
380 	return nci_open_device(ndev);
381 }
382 
383 static int nci_dev_down(struct nfc_dev *nfc_dev)
384 {
385 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
386 
387 	return nci_close_device(ndev);
388 }
389 
390 static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
391 {
392 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
393 	int rc;
394 
395 	if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
396 	    (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
397 		pr_err("unable to start poll, since poll is already active\n");
398 		return -EBUSY;
399 	}
400 
401 	if (ndev->target_active_prot) {
402 		pr_err("there is an active target\n");
403 		return -EBUSY;
404 	}
405 
406 	if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
407 	    (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
408 		pr_debug("target active or w4 select, implicitly deactivate\n");
409 
410 		rc = nci_request(ndev, nci_rf_deactivate_req, 0,
411 				 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
412 		if (rc)
413 			return -EBUSY;
414 	}
415 
416 	rc = nci_request(ndev, nci_rf_discover_req, protocols,
417 			 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
418 
419 	if (!rc)
420 		ndev->poll_prots = protocols;
421 
422 	return rc;
423 }
424 
425 static void nci_stop_poll(struct nfc_dev *nfc_dev)
426 {
427 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
428 
429 	if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
430 	    (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
431 		pr_err("unable to stop poll, since poll is not active\n");
432 		return;
433 	}
434 
435 	nci_request(ndev, nci_rf_deactivate_req, 0,
436 		    msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
437 }
438 
439 static int nci_activate_target(struct nfc_dev *nfc_dev,
440 			       struct nfc_target *target, __u32 protocol)
441 {
442 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
443 	struct nci_rf_discover_select_param param;
444 	struct nfc_target *nci_target = NULL;
445 	int i;
446 	int rc = 0;
447 
448 	pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
449 
450 	if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
451 	    (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
452 		pr_err("there is no available target to activate\n");
453 		return -EINVAL;
454 	}
455 
456 	if (ndev->target_active_prot) {
457 		pr_err("there is already an active target\n");
458 		return -EBUSY;
459 	}
460 
461 	for (i = 0; i < ndev->n_targets; i++) {
462 		if (ndev->targets[i].idx == target->idx) {
463 			nci_target = &ndev->targets[i];
464 			break;
465 		}
466 	}
467 
468 	if (!nci_target) {
469 		pr_err("unable to find the selected target\n");
470 		return -EINVAL;
471 	}
472 
473 	if (!(nci_target->supported_protocols & (1 << protocol))) {
474 		pr_err("target does not support the requested protocol 0x%x\n",
475 		       protocol);
476 		return -EINVAL;
477 	}
478 
479 	if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
480 		param.rf_discovery_id = nci_target->logical_idx;
481 
482 		if (protocol == NFC_PROTO_JEWEL)
483 			param.rf_protocol = NCI_RF_PROTOCOL_T1T;
484 		else if (protocol == NFC_PROTO_MIFARE)
485 			param.rf_protocol = NCI_RF_PROTOCOL_T2T;
486 		else if (protocol == NFC_PROTO_FELICA)
487 			param.rf_protocol = NCI_RF_PROTOCOL_T3T;
488 		else if (protocol == NFC_PROTO_ISO14443)
489 			param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
490 		else
491 			param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
492 
493 		rc = nci_request(ndev, nci_rf_discover_select_req,
494 				 (unsigned long)&param,
495 				 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
496 	}
497 
498 	if (!rc)
499 		ndev->target_active_prot = protocol;
500 
501 	return rc;
502 }
503 
504 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
505 				  struct nfc_target *target)
506 {
507 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
508 
509 	pr_debug("target_idx %d\n", target->idx);
510 
511 	if (!ndev->target_active_prot) {
512 		pr_err("unable to deactivate target, no active target\n");
513 		return;
514 	}
515 
516 	ndev->target_active_prot = 0;
517 
518 	if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
519 		nci_request(ndev, nci_rf_deactivate_req, 0,
520 			    msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
521 	}
522 }
523 
524 static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
525 			     struct sk_buff *skb,
526 			     data_exchange_cb_t cb, void *cb_context)
527 {
528 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
529 	int rc;
530 
531 	pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
532 
533 	if (!ndev->target_active_prot) {
534 		pr_err("unable to exchange data, no active target\n");
535 		return -EINVAL;
536 	}
537 
538 	if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
539 		return -EBUSY;
540 
541 	/* store cb and context to be used on receiving data */
542 	ndev->data_exchange_cb = cb;
543 	ndev->data_exchange_cb_context = cb_context;
544 
545 	rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
546 	if (rc)
547 		clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
548 
549 	return rc;
550 }
551 
552 static struct nfc_ops nci_nfc_ops = {
553 	.dev_up = nci_dev_up,
554 	.dev_down = nci_dev_down,
555 	.start_poll = nci_start_poll,
556 	.stop_poll = nci_stop_poll,
557 	.activate_target = nci_activate_target,
558 	.deactivate_target = nci_deactivate_target,
559 	.data_exchange = nci_data_exchange,
560 };
561 
562 /* ---- Interface to NCI drivers ---- */
563 
564 /**
565  * nci_allocate_device - allocate a new nci device
566  *
567  * @ops: device operations
568  * @supported_protocols: NFC protocols supported by the device
569  */
570 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
571 				    __u32 supported_protocols,
572 				    int tx_headroom, int tx_tailroom)
573 {
574 	struct nci_dev *ndev;
575 
576 	pr_debug("supported_protocols 0x%x\n", supported_protocols);
577 
578 	if (!ops->open || !ops->close || !ops->send)
579 		return NULL;
580 
581 	if (!supported_protocols)
582 		return NULL;
583 
584 	ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
585 	if (!ndev)
586 		return NULL;
587 
588 	ndev->ops = ops;
589 	ndev->tx_headroom = tx_headroom;
590 	ndev->tx_tailroom = tx_tailroom;
591 
592 	ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
593 					    supported_protocols,
594 					    tx_headroom + NCI_DATA_HDR_SIZE,
595 					    tx_tailroom);
596 	if (!ndev->nfc_dev)
597 		goto free_exit;
598 
599 	nfc_set_drvdata(ndev->nfc_dev, ndev);
600 
601 	return ndev;
602 
603 free_exit:
604 	kfree(ndev);
605 	return NULL;
606 }
607 EXPORT_SYMBOL(nci_allocate_device);
608 
609 /**
610  * nci_free_device - deallocate nci device
611  *
612  * @ndev: The nci device to deallocate
613  */
614 void nci_free_device(struct nci_dev *ndev)
615 {
616 	nfc_free_device(ndev->nfc_dev);
617 	kfree(ndev);
618 }
619 EXPORT_SYMBOL(nci_free_device);
620 
621 /**
622  * nci_register_device - register a nci device in the nfc subsystem
623  *
624  * @dev: The nci device to register
625  */
626 int nci_register_device(struct nci_dev *ndev)
627 {
628 	int rc;
629 	struct device *dev = &ndev->nfc_dev->dev;
630 	char name[32];
631 
632 	rc = nfc_register_device(ndev->nfc_dev);
633 	if (rc)
634 		goto exit;
635 
636 	ndev->flags = 0;
637 
638 	INIT_WORK(&ndev->cmd_work, nci_cmd_work);
639 	snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
640 	ndev->cmd_wq = create_singlethread_workqueue(name);
641 	if (!ndev->cmd_wq) {
642 		rc = -ENOMEM;
643 		goto unreg_exit;
644 	}
645 
646 	INIT_WORK(&ndev->rx_work, nci_rx_work);
647 	snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
648 	ndev->rx_wq = create_singlethread_workqueue(name);
649 	if (!ndev->rx_wq) {
650 		rc = -ENOMEM;
651 		goto destroy_cmd_wq_exit;
652 	}
653 
654 	INIT_WORK(&ndev->tx_work, nci_tx_work);
655 	snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
656 	ndev->tx_wq = create_singlethread_workqueue(name);
657 	if (!ndev->tx_wq) {
658 		rc = -ENOMEM;
659 		goto destroy_rx_wq_exit;
660 	}
661 
662 	skb_queue_head_init(&ndev->cmd_q);
663 	skb_queue_head_init(&ndev->rx_q);
664 	skb_queue_head_init(&ndev->tx_q);
665 
666 	setup_timer(&ndev->cmd_timer, nci_cmd_timer,
667 		    (unsigned long) ndev);
668 	setup_timer(&ndev->data_timer, nci_data_timer,
669 		    (unsigned long) ndev);
670 
671 	mutex_init(&ndev->req_lock);
672 
673 	goto exit;
674 
675 destroy_rx_wq_exit:
676 	destroy_workqueue(ndev->rx_wq);
677 
678 destroy_cmd_wq_exit:
679 	destroy_workqueue(ndev->cmd_wq);
680 
681 unreg_exit:
682 	nfc_unregister_device(ndev->nfc_dev);
683 
684 exit:
685 	return rc;
686 }
687 EXPORT_SYMBOL(nci_register_device);
688 
689 /**
690  * nci_unregister_device - unregister a nci device in the nfc subsystem
691  *
692  * @dev: The nci device to unregister
693  */
694 void nci_unregister_device(struct nci_dev *ndev)
695 {
696 	nci_close_device(ndev);
697 
698 	destroy_workqueue(ndev->cmd_wq);
699 	destroy_workqueue(ndev->rx_wq);
700 	destroy_workqueue(ndev->tx_wq);
701 
702 	nfc_unregister_device(ndev->nfc_dev);
703 }
704 EXPORT_SYMBOL(nci_unregister_device);
705 
706 /**
707  * nci_recv_frame - receive frame from NCI drivers
708  *
709  * @skb: The sk_buff to receive
710  */
711 int nci_recv_frame(struct sk_buff *skb)
712 {
713 	struct nci_dev *ndev = (struct nci_dev *) skb->dev;
714 
715 	pr_debug("len %d\n", skb->len);
716 
717 	if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
718 		      && !test_bit(NCI_INIT, &ndev->flags))) {
719 		kfree_skb(skb);
720 		return -ENXIO;
721 	}
722 
723 	/* Queue frame for rx worker thread */
724 	skb_queue_tail(&ndev->rx_q, skb);
725 	queue_work(ndev->rx_wq, &ndev->rx_work);
726 
727 	return 0;
728 }
729 EXPORT_SYMBOL(nci_recv_frame);
730 
731 static int nci_send_frame(struct sk_buff *skb)
732 {
733 	struct nci_dev *ndev = (struct nci_dev *) skb->dev;
734 
735 	pr_debug("len %d\n", skb->len);
736 
737 	if (!ndev) {
738 		kfree_skb(skb);
739 		return -ENODEV;
740 	}
741 
742 	/* Get rid of skb owner, prior to sending to the driver. */
743 	skb_orphan(skb);
744 
745 	return ndev->ops->send(skb);
746 }
747 
748 /* Send NCI command */
749 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
750 {
751 	struct nci_ctrl_hdr *hdr;
752 	struct sk_buff *skb;
753 
754 	pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
755 
756 	skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
757 	if (!skb) {
758 		pr_err("no memory for command\n");
759 		return -ENOMEM;
760 	}
761 
762 	hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
763 	hdr->gid = nci_opcode_gid(opcode);
764 	hdr->oid = nci_opcode_oid(opcode);
765 	hdr->plen = plen;
766 
767 	nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
768 	nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
769 
770 	if (plen)
771 		memcpy(skb_put(skb, plen), payload, plen);
772 
773 	skb->dev = (void *) ndev;
774 
775 	skb_queue_tail(&ndev->cmd_q, skb);
776 	queue_work(ndev->cmd_wq, &ndev->cmd_work);
777 
778 	return 0;
779 }
780 
781 /* ---- NCI TX Data worker thread ---- */
782 
783 static void nci_tx_work(struct work_struct *work)
784 {
785 	struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
786 	struct sk_buff *skb;
787 
788 	pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
789 
790 	/* Send queued tx data */
791 	while (atomic_read(&ndev->credits_cnt)) {
792 		skb = skb_dequeue(&ndev->tx_q);
793 		if (!skb)
794 			return;
795 
796 		/* Check if data flow control is used */
797 		if (atomic_read(&ndev->credits_cnt) !=
798 		    NCI_DATA_FLOW_CONTROL_NOT_USED)
799 			atomic_dec(&ndev->credits_cnt);
800 
801 		pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
802 			 nci_pbf(skb->data),
803 			 nci_conn_id(skb->data),
804 			 nci_plen(skb->data));
805 
806 		nci_send_frame(skb);
807 
808 		mod_timer(&ndev->data_timer,
809 			  jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
810 	}
811 }
812 
813 /* ----- NCI RX worker thread (data & control) ----- */
814 
815 static void nci_rx_work(struct work_struct *work)
816 {
817 	struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
818 	struct sk_buff *skb;
819 
820 	while ((skb = skb_dequeue(&ndev->rx_q))) {
821 		/* Process frame */
822 		switch (nci_mt(skb->data)) {
823 		case NCI_MT_RSP_PKT:
824 			nci_rsp_packet(ndev, skb);
825 			break;
826 
827 		case NCI_MT_NTF_PKT:
828 			nci_ntf_packet(ndev, skb);
829 			break;
830 
831 		case NCI_MT_DATA_PKT:
832 			nci_rx_data_packet(ndev, skb);
833 			break;
834 
835 		default:
836 			pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
837 			kfree_skb(skb);
838 			break;
839 		}
840 	}
841 
842 	/* check if a data exchange timout has occurred */
843 	if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
844 		/* complete the data exchange transaction, if exists */
845 		if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
846 			nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
847 
848 		clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
849 	}
850 }
851 
852 /* ----- NCI TX CMD worker thread ----- */
853 
854 static void nci_cmd_work(struct work_struct *work)
855 {
856 	struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
857 	struct sk_buff *skb;
858 
859 	pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
860 
861 	/* Send queued command */
862 	if (atomic_read(&ndev->cmd_cnt)) {
863 		skb = skb_dequeue(&ndev->cmd_q);
864 		if (!skb)
865 			return;
866 
867 		atomic_dec(&ndev->cmd_cnt);
868 
869 		pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
870 			 nci_pbf(skb->data),
871 			 nci_opcode_gid(nci_opcode(skb->data)),
872 			 nci_opcode_oid(nci_opcode(skb->data)),
873 			 nci_plen(skb->data));
874 
875 		nci_send_frame(skb);
876 
877 		mod_timer(&ndev->cmd_timer,
878 			  jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
879 	}
880 }
881