xref: /linux/net/nfc/nci/core.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  *  The NFC Controller Interface is the communication protocol between an
3  *  NFC Controller (NFCC) and a Device Host (DH).
4  *
5  *  Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  *  Written by Ilan Elias <ilane@ti.com>
8  *
9  *  Acknowledgements:
10  *  This file is based on hci_core.c, which was written
11  *  by Maxim Krasnyansky.
12  *
13  *  This program is free software; you can redistribute it and/or modify
14  *  it under the terms of the GNU General Public License version 2
15  *  as published by the Free Software Foundation
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <linux/completion.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/bitops.h>
34 #include <linux/skbuff.h>
35 
36 #include "../nfc.h"
37 #include <net/nfc/nci.h>
38 #include <net/nfc/nci_core.h>
39 #include <linux/nfc.h>
40 
41 static void nci_cmd_work(struct work_struct *work);
42 static void nci_rx_work(struct work_struct *work);
43 static void nci_tx_work(struct work_struct *work);
44 
45 /* ---- NCI requests ---- */
46 
47 void nci_req_complete(struct nci_dev *ndev, int result)
48 {
49 	if (ndev->req_status == NCI_REQ_PEND) {
50 		ndev->req_result = result;
51 		ndev->req_status = NCI_REQ_DONE;
52 		complete(&ndev->req_completion);
53 	}
54 }
55 
56 static void nci_req_cancel(struct nci_dev *ndev, int err)
57 {
58 	if (ndev->req_status == NCI_REQ_PEND) {
59 		ndev->req_result = err;
60 		ndev->req_status = NCI_REQ_CANCELED;
61 		complete(&ndev->req_completion);
62 	}
63 }
64 
65 /* Execute request and wait for completion. */
66 static int __nci_request(struct nci_dev *ndev,
67 	void (*req)(struct nci_dev *ndev, unsigned long opt),
68 	unsigned long opt,
69 	__u32 timeout)
70 {
71 	int rc = 0;
72 	unsigned long completion_rc;
73 
74 	ndev->req_status = NCI_REQ_PEND;
75 
76 	init_completion(&ndev->req_completion);
77 	req(ndev, opt);
78 	completion_rc = wait_for_completion_interruptible_timeout(
79 							&ndev->req_completion,
80 							timeout);
81 
82 	nfc_dbg("wait_for_completion return %ld", completion_rc);
83 
84 	if (completion_rc > 0) {
85 		switch (ndev->req_status) {
86 		case NCI_REQ_DONE:
87 			rc = nci_to_errno(ndev->req_result);
88 			break;
89 
90 		case NCI_REQ_CANCELED:
91 			rc = -ndev->req_result;
92 			break;
93 
94 		default:
95 			rc = -ETIMEDOUT;
96 			break;
97 		}
98 	} else {
99 		nfc_err("wait_for_completion_interruptible_timeout failed %ld",
100 			completion_rc);
101 
102 		rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
103 	}
104 
105 	ndev->req_status = ndev->req_result = 0;
106 
107 	return rc;
108 }
109 
110 static inline int nci_request(struct nci_dev *ndev,
111 		void (*req)(struct nci_dev *ndev, unsigned long opt),
112 		unsigned long opt, __u32 timeout)
113 {
114 	int rc;
115 
116 	if (!test_bit(NCI_UP, &ndev->flags))
117 		return -ENETDOWN;
118 
119 	/* Serialize all requests */
120 	mutex_lock(&ndev->req_lock);
121 	rc = __nci_request(ndev, req, opt, timeout);
122 	mutex_unlock(&ndev->req_lock);
123 
124 	return rc;
125 }
126 
127 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
128 {
129 	nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
130 }
131 
132 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
133 {
134 	nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
135 }
136 
137 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
138 {
139 	struct nci_core_conn_create_cmd conn_cmd;
140 	struct nci_rf_disc_map_cmd cmd;
141 	struct disc_map_config *cfg = cmd.mapping_configs;
142 	__u8 *num = &cmd.num_mapping_configs;
143 	int i;
144 
145 	/* create static rf connection */
146 	conn_cmd.target_handle = 0;
147 	conn_cmd.num_target_specific_params = 0;
148 	nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
149 
150 	/* set rf mapping configurations */
151 	*num = 0;
152 
153 	/* by default mapping is set to NCI_RF_INTERFACE_FRAME */
154 	for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
155 		if (ndev->supported_rf_interfaces[i] ==
156 			NCI_RF_INTERFACE_ISO_DEP) {
157 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
158 			cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
159 			cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
160 			(*num)++;
161 		} else if (ndev->supported_rf_interfaces[i] ==
162 			NCI_RF_INTERFACE_NFC_DEP) {
163 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 			cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
165 			cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
166 			(*num)++;
167 		}
168 
169 		if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
170 			break;
171 	}
172 
173 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
174 		(1 + ((*num)*sizeof(struct disc_map_config))),
175 		&cmd);
176 }
177 
178 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
179 {
180 	struct nci_rf_disc_cmd cmd;
181 	__u32 protocols = opt;
182 
183 	cmd.num_disc_configs = 0;
184 
185 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
186 		(protocols & NFC_PROTO_JEWEL_MASK
187 		|| protocols & NFC_PROTO_MIFARE_MASK
188 		|| protocols & NFC_PROTO_ISO14443_MASK
189 		|| protocols & NFC_PROTO_NFC_DEP_MASK)) {
190 		cmd.disc_configs[cmd.num_disc_configs].type =
191 		NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
192 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
193 		cmd.num_disc_configs++;
194 	}
195 
196 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
197 		(protocols & NFC_PROTO_ISO14443_MASK)) {
198 		cmd.disc_configs[cmd.num_disc_configs].type =
199 		NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
200 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
201 		cmd.num_disc_configs++;
202 	}
203 
204 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
205 		(protocols & NFC_PROTO_FELICA_MASK
206 		|| protocols & NFC_PROTO_NFC_DEP_MASK)) {
207 		cmd.disc_configs[cmd.num_disc_configs].type =
208 		NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
209 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
210 		cmd.num_disc_configs++;
211 	}
212 
213 	nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
214 		(1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
215 		&cmd);
216 }
217 
218 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
219 {
220 	struct nci_rf_deactivate_cmd cmd;
221 
222 	cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
223 
224 	nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
225 			sizeof(struct nci_rf_deactivate_cmd),
226 			&cmd);
227 }
228 
229 static int nci_open_device(struct nci_dev *ndev)
230 {
231 	int rc = 0;
232 
233 	mutex_lock(&ndev->req_lock);
234 
235 	if (test_bit(NCI_UP, &ndev->flags)) {
236 		rc = -EALREADY;
237 		goto done;
238 	}
239 
240 	if (ndev->ops->open(ndev)) {
241 		rc = -EIO;
242 		goto done;
243 	}
244 
245 	atomic_set(&ndev->cmd_cnt, 1);
246 
247 	set_bit(NCI_INIT, &ndev->flags);
248 
249 	rc = __nci_request(ndev, nci_reset_req, 0,
250 				msecs_to_jiffies(NCI_RESET_TIMEOUT));
251 
252 	if (!rc) {
253 		rc = __nci_request(ndev, nci_init_req, 0,
254 				msecs_to_jiffies(NCI_INIT_TIMEOUT));
255 	}
256 
257 	if (!rc) {
258 		rc = __nci_request(ndev, nci_init_complete_req, 0,
259 				msecs_to_jiffies(NCI_INIT_TIMEOUT));
260 	}
261 
262 	clear_bit(NCI_INIT, &ndev->flags);
263 
264 	if (!rc) {
265 		set_bit(NCI_UP, &ndev->flags);
266 	} else {
267 		/* Init failed, cleanup */
268 		skb_queue_purge(&ndev->cmd_q);
269 		skb_queue_purge(&ndev->rx_q);
270 		skb_queue_purge(&ndev->tx_q);
271 
272 		ndev->ops->close(ndev);
273 		ndev->flags = 0;
274 	}
275 
276 done:
277 	mutex_unlock(&ndev->req_lock);
278 	return rc;
279 }
280 
281 static int nci_close_device(struct nci_dev *ndev)
282 {
283 	nci_req_cancel(ndev, ENODEV);
284 	mutex_lock(&ndev->req_lock);
285 
286 	if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
287 		del_timer_sync(&ndev->cmd_timer);
288 		mutex_unlock(&ndev->req_lock);
289 		return 0;
290 	}
291 
292 	/* Drop RX and TX queues */
293 	skb_queue_purge(&ndev->rx_q);
294 	skb_queue_purge(&ndev->tx_q);
295 
296 	/* Flush RX and TX wq */
297 	flush_workqueue(ndev->rx_wq);
298 	flush_workqueue(ndev->tx_wq);
299 
300 	/* Reset device */
301 	skb_queue_purge(&ndev->cmd_q);
302 	atomic_set(&ndev->cmd_cnt, 1);
303 
304 	set_bit(NCI_INIT, &ndev->flags);
305 	__nci_request(ndev, nci_reset_req, 0,
306 				msecs_to_jiffies(NCI_RESET_TIMEOUT));
307 	clear_bit(NCI_INIT, &ndev->flags);
308 
309 	/* Flush cmd wq */
310 	flush_workqueue(ndev->cmd_wq);
311 
312 	/* After this point our queues are empty
313 	 * and no works are scheduled. */
314 	ndev->ops->close(ndev);
315 
316 	/* Clear flags */
317 	ndev->flags = 0;
318 
319 	mutex_unlock(&ndev->req_lock);
320 
321 	return 0;
322 }
323 
324 /* NCI command timer function */
325 static void nci_cmd_timer(unsigned long arg)
326 {
327 	struct nci_dev *ndev = (void *) arg;
328 
329 	nfc_dbg("entry");
330 
331 	atomic_set(&ndev->cmd_cnt, 1);
332 	queue_work(ndev->cmd_wq, &ndev->cmd_work);
333 }
334 
335 static int nci_dev_up(struct nfc_dev *nfc_dev)
336 {
337 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
338 
339 	nfc_dbg("entry");
340 
341 	return nci_open_device(ndev);
342 }
343 
344 static int nci_dev_down(struct nfc_dev *nfc_dev)
345 {
346 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
347 
348 	nfc_dbg("entry");
349 
350 	return nci_close_device(ndev);
351 }
352 
353 static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
354 {
355 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
356 	int rc;
357 
358 	nfc_dbg("entry");
359 
360 	if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
361 		nfc_err("unable to start poll, since poll is already active");
362 		return -EBUSY;
363 	}
364 
365 	if (ndev->target_active_prot) {
366 		nfc_err("there is an active target");
367 		return -EBUSY;
368 	}
369 
370 	if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
371 		nfc_dbg("target is active, implicitly deactivate...");
372 
373 		rc = nci_request(ndev, nci_rf_deactivate_req, 0,
374 			msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
375 		if (rc)
376 			return -EBUSY;
377 	}
378 
379 	rc = nci_request(ndev, nci_rf_discover_req, protocols,
380 		msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
381 
382 	if (!rc)
383 		ndev->poll_prots = protocols;
384 
385 	return rc;
386 }
387 
388 static void nci_stop_poll(struct nfc_dev *nfc_dev)
389 {
390 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
391 
392 	nfc_dbg("entry");
393 
394 	if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
395 		nfc_err("unable to stop poll, since poll is not active");
396 		return;
397 	}
398 
399 	nci_request(ndev, nci_rf_deactivate_req, 0,
400 		msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
401 }
402 
403 static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
404 				__u32 protocol)
405 {
406 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
407 
408 	nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
409 
410 	if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
411 		nfc_err("there is no available target to activate");
412 		return -EINVAL;
413 	}
414 
415 	if (ndev->target_active_prot) {
416 		nfc_err("there is already an active target");
417 		return -EBUSY;
418 	}
419 
420 	if (!(ndev->target_available_prots & (1 << protocol))) {
421 		nfc_err("target does not support the requested protocol 0x%x",
422 			protocol);
423 		return -EINVAL;
424 	}
425 
426 	ndev->target_active_prot = protocol;
427 	ndev->target_available_prots = 0;
428 
429 	return 0;
430 }
431 
432 static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
433 {
434 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
435 
436 	nfc_dbg("entry, target_idx %d", target_idx);
437 
438 	if (!ndev->target_active_prot) {
439 		nfc_err("unable to deactivate target, no active target");
440 		return;
441 	}
442 
443 	ndev->target_active_prot = 0;
444 
445 	if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
446 		nci_request(ndev, nci_rf_deactivate_req, 0,
447 			msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
448 	}
449 }
450 
451 static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
452 						struct sk_buff *skb,
453 						data_exchange_cb_t cb,
454 						void *cb_context)
455 {
456 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
457 	int rc;
458 
459 	nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
460 
461 	if (!ndev->target_active_prot) {
462 		nfc_err("unable to exchange data, no active target");
463 		return -EINVAL;
464 	}
465 
466 	if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
467 		return -EBUSY;
468 
469 	/* store cb and context to be used on receiving data */
470 	ndev->data_exchange_cb = cb;
471 	ndev->data_exchange_cb_context = cb_context;
472 
473 	rc = nci_send_data(ndev, ndev->conn_id, skb);
474 	if (rc)
475 		clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
476 
477 	return rc;
478 }
479 
480 static struct nfc_ops nci_nfc_ops = {
481 	.dev_up = nci_dev_up,
482 	.dev_down = nci_dev_down,
483 	.start_poll = nci_start_poll,
484 	.stop_poll = nci_stop_poll,
485 	.activate_target = nci_activate_target,
486 	.deactivate_target = nci_deactivate_target,
487 	.data_exchange = nci_data_exchange,
488 };
489 
490 /* ---- Interface to NCI drivers ---- */
491 
492 /**
493  * nci_allocate_device - allocate a new nci device
494  *
495  * @ops: device operations
496  * @supported_protocols: NFC protocols supported by the device
497  */
498 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
499 					__u32 supported_protocols,
500 					int tx_headroom,
501 					int tx_tailroom)
502 {
503 	struct nci_dev *ndev;
504 
505 	nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
506 
507 	if (!ops->open || !ops->close || !ops->send)
508 		return NULL;
509 
510 	if (!supported_protocols)
511 		return NULL;
512 
513 	ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
514 	if (!ndev)
515 		return NULL;
516 
517 	ndev->ops = ops;
518 	ndev->tx_headroom = tx_headroom;
519 	ndev->tx_tailroom = tx_tailroom;
520 
521 	ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
522 						supported_protocols,
523 						tx_headroom + NCI_DATA_HDR_SIZE,
524 						tx_tailroom);
525 	if (!ndev->nfc_dev)
526 		goto free_exit;
527 
528 	nfc_set_drvdata(ndev->nfc_dev, ndev);
529 
530 	return ndev;
531 
532 free_exit:
533 	kfree(ndev);
534 	return NULL;
535 }
536 EXPORT_SYMBOL(nci_allocate_device);
537 
538 /**
539  * nci_free_device - deallocate nci device
540  *
541  * @ndev: The nci device to deallocate
542  */
543 void nci_free_device(struct nci_dev *ndev)
544 {
545 	nfc_dbg("entry");
546 
547 	nfc_free_device(ndev->nfc_dev);
548 	kfree(ndev);
549 }
550 EXPORT_SYMBOL(nci_free_device);
551 
552 /**
553  * nci_register_device - register a nci device in the nfc subsystem
554  *
555  * @dev: The nci device to register
556  */
557 int nci_register_device(struct nci_dev *ndev)
558 {
559 	int rc;
560 	struct device *dev = &ndev->nfc_dev->dev;
561 	char name[32];
562 
563 	nfc_dbg("entry");
564 
565 	rc = nfc_register_device(ndev->nfc_dev);
566 	if (rc)
567 		goto exit;
568 
569 	ndev->flags = 0;
570 
571 	INIT_WORK(&ndev->cmd_work, nci_cmd_work);
572 	snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
573 	ndev->cmd_wq = create_singlethread_workqueue(name);
574 	if (!ndev->cmd_wq) {
575 		rc = -ENOMEM;
576 		goto unreg_exit;
577 	}
578 
579 	INIT_WORK(&ndev->rx_work, nci_rx_work);
580 	snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
581 	ndev->rx_wq = create_singlethread_workqueue(name);
582 	if (!ndev->rx_wq) {
583 		rc = -ENOMEM;
584 		goto destroy_cmd_wq_exit;
585 	}
586 
587 	INIT_WORK(&ndev->tx_work, nci_tx_work);
588 	snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
589 	ndev->tx_wq = create_singlethread_workqueue(name);
590 	if (!ndev->tx_wq) {
591 		rc = -ENOMEM;
592 		goto destroy_rx_wq_exit;
593 	}
594 
595 	skb_queue_head_init(&ndev->cmd_q);
596 	skb_queue_head_init(&ndev->rx_q);
597 	skb_queue_head_init(&ndev->tx_q);
598 
599 	setup_timer(&ndev->cmd_timer, nci_cmd_timer,
600 			(unsigned long) ndev);
601 
602 	mutex_init(&ndev->req_lock);
603 
604 	goto exit;
605 
606 destroy_rx_wq_exit:
607 	destroy_workqueue(ndev->rx_wq);
608 
609 destroy_cmd_wq_exit:
610 	destroy_workqueue(ndev->cmd_wq);
611 
612 unreg_exit:
613 	nfc_unregister_device(ndev->nfc_dev);
614 
615 exit:
616 	return rc;
617 }
618 EXPORT_SYMBOL(nci_register_device);
619 
620 /**
621  * nci_unregister_device - unregister a nci device in the nfc subsystem
622  *
623  * @dev: The nci device to unregister
624  */
625 void nci_unregister_device(struct nci_dev *ndev)
626 {
627 	nfc_dbg("entry");
628 
629 	nci_close_device(ndev);
630 
631 	destroy_workqueue(ndev->cmd_wq);
632 	destroy_workqueue(ndev->rx_wq);
633 	destroy_workqueue(ndev->tx_wq);
634 
635 	nfc_unregister_device(ndev->nfc_dev);
636 }
637 EXPORT_SYMBOL(nci_unregister_device);
638 
639 /**
640  * nci_recv_frame - receive frame from NCI drivers
641  *
642  * @skb: The sk_buff to receive
643  */
644 int nci_recv_frame(struct sk_buff *skb)
645 {
646 	struct nci_dev *ndev = (struct nci_dev *) skb->dev;
647 
648 	nfc_dbg("entry, len %d", skb->len);
649 
650 	if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
651 		&& !test_bit(NCI_INIT, &ndev->flags))) {
652 		kfree_skb(skb);
653 		return -ENXIO;
654 	}
655 
656 	/* Queue frame for rx worker thread */
657 	skb_queue_tail(&ndev->rx_q, skb);
658 	queue_work(ndev->rx_wq, &ndev->rx_work);
659 
660 	return 0;
661 }
662 EXPORT_SYMBOL(nci_recv_frame);
663 
664 static int nci_send_frame(struct sk_buff *skb)
665 {
666 	struct nci_dev *ndev = (struct nci_dev *) skb->dev;
667 
668 	nfc_dbg("entry, len %d", skb->len);
669 
670 	if (!ndev) {
671 		kfree_skb(skb);
672 		return -ENODEV;
673 	}
674 
675 	/* Get rid of skb owner, prior to sending to the driver. */
676 	skb_orphan(skb);
677 
678 	return ndev->ops->send(skb);
679 }
680 
681 /* Send NCI command */
682 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
683 {
684 	struct nci_ctrl_hdr *hdr;
685 	struct sk_buff *skb;
686 
687 	nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
688 
689 	skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
690 	if (!skb) {
691 		nfc_err("no memory for command");
692 		return -ENOMEM;
693 	}
694 
695 	hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
696 	hdr->gid = nci_opcode_gid(opcode);
697 	hdr->oid = nci_opcode_oid(opcode);
698 	hdr->plen = plen;
699 
700 	nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
701 	nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
702 
703 	if (plen)
704 		memcpy(skb_put(skb, plen), payload, plen);
705 
706 	skb->dev = (void *) ndev;
707 
708 	skb_queue_tail(&ndev->cmd_q, skb);
709 	queue_work(ndev->cmd_wq, &ndev->cmd_work);
710 
711 	return 0;
712 }
713 
714 /* ---- NCI TX Data worker thread ---- */
715 
716 static void nci_tx_work(struct work_struct *work)
717 {
718 	struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
719 	struct sk_buff *skb;
720 
721 	nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
722 
723 	/* Send queued tx data */
724 	while (atomic_read(&ndev->credits_cnt)) {
725 		skb = skb_dequeue(&ndev->tx_q);
726 		if (!skb)
727 			return;
728 
729 		atomic_dec(&ndev->credits_cnt);
730 
731 		nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
732 				nci_pbf(skb->data),
733 				nci_conn_id(skb->data),
734 				nci_plen(skb->data));
735 
736 		nci_send_frame(skb);
737 	}
738 }
739 
740 /* ----- NCI RX worker thread (data & control) ----- */
741 
742 static void nci_rx_work(struct work_struct *work)
743 {
744 	struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
745 	struct sk_buff *skb;
746 
747 	while ((skb = skb_dequeue(&ndev->rx_q))) {
748 		/* Process frame */
749 		switch (nci_mt(skb->data)) {
750 		case NCI_MT_RSP_PKT:
751 			nci_rsp_packet(ndev, skb);
752 			break;
753 
754 		case NCI_MT_NTF_PKT:
755 			nci_ntf_packet(ndev, skb);
756 			break;
757 
758 		case NCI_MT_DATA_PKT:
759 			nci_rx_data_packet(ndev, skb);
760 			break;
761 
762 		default:
763 			nfc_err("unknown MT 0x%x", nci_mt(skb->data));
764 			kfree_skb(skb);
765 			break;
766 		}
767 	}
768 }
769 
770 /* ----- NCI TX CMD worker thread ----- */
771 
772 static void nci_cmd_work(struct work_struct *work)
773 {
774 	struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
775 	struct sk_buff *skb;
776 
777 	nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
778 
779 	/* Send queued command */
780 	if (atomic_read(&ndev->cmd_cnt)) {
781 		skb = skb_dequeue(&ndev->cmd_q);
782 		if (!skb)
783 			return;
784 
785 		atomic_dec(&ndev->cmd_cnt);
786 
787 		nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
788 				nci_pbf(skb->data),
789 				nci_opcode_gid(nci_opcode(skb->data)),
790 				nci_opcode_oid(nci_opcode(skb->data)),
791 				nci_plen(skb->data));
792 
793 		nci_send_frame(skb);
794 
795 		mod_timer(&ndev->cmd_timer,
796 			jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
797 	}
798 }
799