xref: /linux/net/mac802154/tx.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007-2012 Siemens AG
4  *
5  * Written by:
6  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
7  * Sergey Lapin <slapin@ossfans.org>
8  * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
9  * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
10  */
11 
12 #include <linux/netdevice.h>
13 #include <linux/if_arp.h>
14 #include <linux/crc-ccitt.h>
15 #include <linux/unaligned.h>
16 
17 #include <net/rtnetlink.h>
18 #include <net/ieee802154_netdev.h>
19 #include <net/mac802154.h>
20 #include <net/cfg802154.h>
21 
22 #include "ieee802154_i.h"
23 #include "driver-ops.h"
24 
ieee802154_xmit_sync_worker(struct work_struct * work)25 void ieee802154_xmit_sync_worker(struct work_struct *work)
26 {
27 	struct ieee802154_local *local =
28 		container_of(work, struct ieee802154_local, sync_tx_work);
29 	struct sk_buff *skb = local->tx_skb;
30 	struct net_device *dev = skb->dev;
31 	int res;
32 
33 	res = drv_xmit_sync(local, skb);
34 	if (res)
35 		goto err_tx;
36 
37 	DEV_STATS_INC(dev, tx_packets);
38 	DEV_STATS_ADD(dev, tx_bytes, skb->len);
39 
40 	ieee802154_xmit_complete(&local->hw, skb, false);
41 
42 	return;
43 
44 err_tx:
45 	/* Restart the netif queue on each sub_if_data object. */
46 	ieee802154_release_queue(local);
47 	if (atomic_dec_and_test(&local->phy->ongoing_txs))
48 		wake_up(&local->phy->sync_txq);
49 	kfree_skb(skb);
50 	netdev_dbg(dev, "transmission failed\n");
51 }
52 
53 static netdev_tx_t
ieee802154_tx(struct ieee802154_local * local,struct sk_buff * skb)54 ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
55 {
56 	struct net_device *dev = skb->dev;
57 	int ret;
58 
59 	if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
60 		struct sk_buff *nskb;
61 		u16 crc;
62 
63 		if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
64 			nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
65 					       GFP_ATOMIC);
66 			if (likely(nskb)) {
67 				consume_skb(skb);
68 				skb = nskb;
69 			} else {
70 				goto err_free_skb;
71 			}
72 		}
73 
74 		crc = crc_ccitt(0, skb->data, skb->len);
75 		put_unaligned_le16(crc, skb_put(skb, 2));
76 	}
77 
78 	/* Stop the netif queue on each sub_if_data object. */
79 	ieee802154_hold_queue(local);
80 	atomic_inc(&local->phy->ongoing_txs);
81 
82 	/* Drivers should preferably implement the async callback. In some rare
83 	 * cases they only provide a sync callback which we will use as a
84 	 * fallback.
85 	 */
86 	if (local->ops->xmit_async) {
87 		unsigned int len = skb->len;
88 
89 		ret = drv_xmit_async(local, skb);
90 		if (ret)
91 			goto err_wake_netif_queue;
92 
93 		DEV_STATS_INC(dev, tx_packets);
94 		DEV_STATS_ADD(dev, tx_bytes, len);
95 	} else {
96 		local->tx_skb = skb;
97 		queue_work(local->workqueue, &local->sync_tx_work);
98 	}
99 
100 	return NETDEV_TX_OK;
101 
102 err_wake_netif_queue:
103 	ieee802154_release_queue(local);
104 	if (atomic_dec_and_test(&local->phy->ongoing_txs))
105 		wake_up(&local->phy->sync_txq);
106 err_free_skb:
107 	kfree_skb(skb);
108 	return NETDEV_TX_OK;
109 }
110 
ieee802154_sync_queue(struct ieee802154_local * local)111 static int ieee802154_sync_queue(struct ieee802154_local *local)
112 {
113 	int ret;
114 
115 	ieee802154_hold_queue(local);
116 	ieee802154_disable_queue(local);
117 	wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
118 	ret = local->tx_result;
119 	ieee802154_release_queue(local);
120 
121 	return ret;
122 }
123 
ieee802154_sync_and_hold_queue(struct ieee802154_local * local)124 int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
125 {
126 	int ret;
127 
128 	ieee802154_hold_queue(local);
129 	ret = ieee802154_sync_queue(local);
130 	set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
131 
132 	return ret;
133 }
134 
ieee802154_mlme_op_pre(struct ieee802154_local * local)135 int ieee802154_mlme_op_pre(struct ieee802154_local *local)
136 {
137 	return ieee802154_sync_and_hold_queue(local);
138 }
139 
ieee802154_mlme_tx_locked(struct ieee802154_local * local,struct ieee802154_sub_if_data * sdata,struct sk_buff * skb)140 int ieee802154_mlme_tx_locked(struct ieee802154_local *local,
141 			      struct ieee802154_sub_if_data *sdata,
142 			      struct sk_buff *skb)
143 {
144 	/* Avoid possible calls to ->ndo_stop() when we asynchronously perform
145 	 * MLME transmissions.
146 	 */
147 	ASSERT_RTNL();
148 
149 	/* Ensure the device was not stopped, otherwise error out */
150 	if (!local->open_count)
151 		return -ENETDOWN;
152 
153 	/* Warn if the ieee802154 core thinks MLME frames can be sent while the
154 	 * net interface expects this cannot happen.
155 	 */
156 	if (WARN_ON_ONCE(!netif_running(sdata->dev)))
157 		return -ENETDOWN;
158 
159 	ieee802154_tx(local, skb);
160 	return ieee802154_sync_queue(local);
161 }
162 
ieee802154_mlme_tx(struct ieee802154_local * local,struct ieee802154_sub_if_data * sdata,struct sk_buff * skb)163 int ieee802154_mlme_tx(struct ieee802154_local *local,
164 		       struct ieee802154_sub_if_data *sdata,
165 		       struct sk_buff *skb)
166 {
167 	int ret;
168 
169 	rtnl_lock();
170 	ret = ieee802154_mlme_tx_locked(local, sdata, skb);
171 	rtnl_unlock();
172 
173 	return ret;
174 }
175 
ieee802154_mlme_op_post(struct ieee802154_local * local)176 void ieee802154_mlme_op_post(struct ieee802154_local *local)
177 {
178 	ieee802154_release_queue(local);
179 }
180 
ieee802154_mlme_tx_one(struct ieee802154_local * local,struct ieee802154_sub_if_data * sdata,struct sk_buff * skb)181 int ieee802154_mlme_tx_one(struct ieee802154_local *local,
182 			   struct ieee802154_sub_if_data *sdata,
183 			   struct sk_buff *skb)
184 {
185 	int ret;
186 
187 	ieee802154_mlme_op_pre(local);
188 	ret = ieee802154_mlme_tx(local, sdata, skb);
189 	ieee802154_mlme_op_post(local);
190 
191 	return ret;
192 }
193 
ieee802154_mlme_tx_one_locked(struct ieee802154_local * local,struct ieee802154_sub_if_data * sdata,struct sk_buff * skb)194 int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
195 				  struct ieee802154_sub_if_data *sdata,
196 				  struct sk_buff *skb)
197 {
198 	int ret;
199 
200 	ieee802154_mlme_op_pre(local);
201 	ret = ieee802154_mlme_tx_locked(local, sdata, skb);
202 	ieee802154_mlme_op_post(local);
203 
204 	return ret;
205 }
206 
ieee802154_queue_is_stopped(struct ieee802154_local * local)207 static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
208 {
209 	return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
210 }
211 
212 static netdev_tx_t
ieee802154_hot_tx(struct ieee802154_local * local,struct sk_buff * skb)213 ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
214 {
215 	/* Warn if the net interface tries to transmit frames while the
216 	 * ieee802154 core assumes the queue is stopped.
217 	 */
218 	WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
219 
220 	return ieee802154_tx(local, skb);
221 }
222 
223 netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff * skb,struct net_device * dev)224 ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
225 {
226 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
227 
228 	skb->skb_iif = dev->ifindex;
229 
230 	return ieee802154_hot_tx(sdata->local, skb);
231 }
232 
233 netdev_tx_t
ieee802154_subif_start_xmit(struct sk_buff * skb,struct net_device * dev)234 ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
235 {
236 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
237 	int rc;
238 
239 	/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
240 	 * functions. The reason is wireshark will show a mac header which is
241 	 * with security fields but the payload is not encrypted.
242 	 */
243 	rc = mac802154_llsec_encrypt(&sdata->sec, skb);
244 	if (rc) {
245 		netdev_warn(dev, "encryption failed: %i\n", rc);
246 		kfree_skb(skb);
247 		return NETDEV_TX_OK;
248 	}
249 
250 	skb->skb_iif = dev->ifindex;
251 
252 	return ieee802154_hot_tx(sdata->local, skb);
253 }
254