xref: /linux/net/caif/caif_dev.c (revision 0dd9ac63ce26ec87b080ca9c3e6efed33c23ace6)
1 /*
2  * CAIF Interface registration.
3  * Copyright (C) ST-Ericsson AB 2010
4  * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
5  * License terms: GNU General Public License (GPL) version 2
6  *
7  * Borrowed heavily from file: pn_dev.c. Thanks to
8  *  Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9  *  and Sakari Ailus <sakari.ailus@nokia.com>
10  */
11 
12 #include <linux/version.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/if_arp.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <net/netns/generic.h>
22 #include <net/net_namespace.h>
23 #include <net/pkt_sched.h>
24 #include <net/caif/caif_device.h>
25 #include <net/caif/caif_dev.h>
26 #include <net/caif/caif_layer.h>
27 #include <net/caif/cfpkt.h>
28 #include <net/caif/cfcnfg.h>
29 
30 MODULE_LICENSE("GPL");
31 #define TIMEOUT (HZ*5)
32 
33 /* Used for local tracking of the CAIF net devices */
34 struct caif_device_entry {
35 	struct cflayer layer;
36 	struct list_head list;
37 	atomic_t in_use;
38 	atomic_t state;
39 	u16 phyid;
40 	struct net_device *netdev;
41 	wait_queue_head_t event;
42 };
43 
44 struct caif_device_entry_list {
45 	struct list_head list;
46 	/* Protects simulanous deletes in list */
47 	spinlock_t lock;
48 };
49 
50 struct caif_net {
51 	struct caif_device_entry_list caifdevs;
52 };
53 
54 static int caif_net_id;
55 static struct cfcnfg *cfg;
56 
57 static struct caif_device_entry_list *caif_device_list(struct net *net)
58 {
59 	struct caif_net *caifn;
60 	BUG_ON(!net);
61 	caifn = net_generic(net, caif_net_id);
62 	BUG_ON(!caifn);
63 	return &caifn->caifdevs;
64 }
65 
66 /* Allocate new CAIF device. */
67 static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
68 {
69 	struct caif_device_entry_list *caifdevs;
70 	struct caif_device_entry *caifd;
71 	caifdevs = caif_device_list(dev_net(dev));
72 	BUG_ON(!caifdevs);
73 	caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
74 	if (!caifd)
75 		return NULL;
76 	caifd->netdev = dev;
77 	list_add(&caifd->list, &caifdevs->list);
78 	init_waitqueue_head(&caifd->event);
79 	return caifd;
80 }
81 
82 static struct caif_device_entry *caif_get(struct net_device *dev)
83 {
84 	struct caif_device_entry_list *caifdevs =
85 	    caif_device_list(dev_net(dev));
86 	struct caif_device_entry *caifd;
87 	BUG_ON(!caifdevs);
88 	list_for_each_entry(caifd, &caifdevs->list, list) {
89 		if (caifd->netdev == dev)
90 			return caifd;
91 	}
92 	return NULL;
93 }
94 
95 static void caif_device_destroy(struct net_device *dev)
96 {
97 	struct caif_device_entry_list *caifdevs =
98 	    caif_device_list(dev_net(dev));
99 	struct caif_device_entry *caifd;
100 	ASSERT_RTNL();
101 	if (dev->type != ARPHRD_CAIF)
102 		return;
103 
104 	spin_lock_bh(&caifdevs->lock);
105 	caifd = caif_get(dev);
106 	if (caifd == NULL) {
107 		spin_unlock_bh(&caifdevs->lock);
108 		return;
109 	}
110 
111 	list_del(&caifd->list);
112 	spin_unlock_bh(&caifdevs->lock);
113 
114 	kfree(caifd);
115 }
116 
117 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
118 {
119 	struct caif_device_entry *caifd =
120 	    container_of(layer, struct caif_device_entry, layer);
121 	struct sk_buff *skb, *skb2;
122 	int ret = -EINVAL;
123 	skb = cfpkt_tonative(pkt);
124 	skb->dev = caifd->netdev;
125 	/*
126 	 * Don't allow SKB to be destroyed upon error, but signal resend
127 	 * notification to clients. We can't rely on the return value as
128 	 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
129 	 */
130 	if (netif_queue_stopped(caifd->netdev))
131 		return -EAGAIN;
132 	skb2 = skb_get(skb);
133 
134 	ret = dev_queue_xmit(skb2);
135 
136 	if (!ret)
137 		kfree_skb(skb);
138 	else
139 		return -EAGAIN;
140 
141 	return 0;
142 }
143 
144 static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
145 {
146 	struct caif_device_entry *caifd;
147 	struct caif_dev_common *caifdev;
148 	caifd = container_of(layr, struct caif_device_entry, layer);
149 	caifdev = netdev_priv(caifd->netdev);
150 	if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
151 		atomic_set(&caifd->in_use, 1);
152 		wake_up_interruptible(&caifd->event);
153 
154 	} else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
155 		atomic_set(&caifd->in_use, 0);
156 		wake_up_interruptible(&caifd->event);
157 	}
158 	return 0;
159 }
160 
161 /*
162  * Stuff received packets to associated sockets.
163  * On error, returns non-zero and releases the skb.
164  */
165 static int receive(struct sk_buff *skb, struct net_device *dev,
166 		   struct packet_type *pkttype, struct net_device *orig_dev)
167 {
168 	struct net *net;
169 	struct cfpkt *pkt;
170 	struct caif_device_entry *caifd;
171 	net = dev_net(dev);
172 	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
173 	caifd = caif_get(dev);
174 	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
175 		return NET_RX_DROP;
176 
177 	if (caifd->layer.up->receive(caifd->layer.up, pkt))
178 		return NET_RX_DROP;
179 
180 	return 0;
181 }
182 
183 static struct packet_type caif_packet_type __read_mostly = {
184 	.type = cpu_to_be16(ETH_P_CAIF),
185 	.func = receive,
186 };
187 
188 static void dev_flowctrl(struct net_device *dev, int on)
189 {
190 	struct caif_device_entry *caifd = caif_get(dev);
191 	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
192 		return;
193 
194 	caifd->layer.up->ctrlcmd(caifd->layer.up,
195 				 on ?
196 				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
197 				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
198 				 caifd->layer.id);
199 }
200 
201 /* notify Caif of device events */
202 static int caif_device_notify(struct notifier_block *me, unsigned long what,
203 			      void *arg)
204 {
205 	struct net_device *dev = arg;
206 	struct caif_device_entry *caifd = NULL;
207 	struct caif_dev_common *caifdev;
208 	enum cfcnfg_phy_preference pref;
209 	int res = -EINVAL;
210 	enum cfcnfg_phy_type phy_type;
211 
212 	if (dev->type != ARPHRD_CAIF)
213 		return 0;
214 
215 	switch (what) {
216 	case NETDEV_REGISTER:
217 		pr_info("CAIF: %s():register %s\n", __func__, dev->name);
218 		caifd = caif_device_alloc(dev);
219 		if (caifd == NULL)
220 			break;
221 		caifdev = netdev_priv(dev);
222 		caifdev->flowctrl = dev_flowctrl;
223 		atomic_set(&caifd->state, what);
224 		res = 0;
225 		break;
226 
227 	case NETDEV_UP:
228 		pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
229 		caifd = caif_get(dev);
230 		if (caifd == NULL)
231 			break;
232 		caifdev = netdev_priv(dev);
233 		if (atomic_read(&caifd->state) == NETDEV_UP) {
234 			pr_info("CAIF: %s():%s already up\n",
235 				__func__, dev->name);
236 			break;
237 		}
238 		atomic_set(&caifd->state, what);
239 		caifd->layer.transmit = transmit;
240 		caifd->layer.modemcmd = modemcmd;
241 
242 		if (caifdev->use_frag)
243 			phy_type = CFPHYTYPE_FRAG;
244 		else
245 			phy_type = CFPHYTYPE_CAIF;
246 
247 		switch (caifdev->link_select) {
248 		case CAIF_LINK_HIGH_BANDW:
249 			pref = CFPHYPREF_HIGH_BW;
250 			break;
251 		case CAIF_LINK_LOW_LATENCY:
252 			pref = CFPHYPREF_LOW_LAT;
253 			break;
254 		default:
255 			pref = CFPHYPREF_HIGH_BW;
256 			break;
257 		}
258 
259 		cfcnfg_add_phy_layer(get_caif_conf(),
260 				     phy_type,
261 				     dev,
262 				     &caifd->layer,
263 				     &caifd->phyid,
264 				     pref,
265 				     caifdev->use_fcs,
266 				     caifdev->use_stx);
267 		strncpy(caifd->layer.name, dev->name,
268 			sizeof(caifd->layer.name) - 1);
269 		caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
270 		break;
271 
272 	case NETDEV_GOING_DOWN:
273 		caifd = caif_get(dev);
274 		if (caifd == NULL)
275 			break;
276 		pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
277 
278 		if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
279 			atomic_read(&caifd->state) == NETDEV_DOWN)
280 			break;
281 
282 		atomic_set(&caifd->state, what);
283 		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
284 			return -EINVAL;
285 		caifd->layer.up->ctrlcmd(caifd->layer.up,
286 					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
287 					 caifd->layer.id);
288 		res = wait_event_interruptible_timeout(caifd->event,
289 					atomic_read(&caifd->in_use) == 0,
290 					TIMEOUT);
291 		break;
292 
293 	case NETDEV_DOWN:
294 		caifd = caif_get(dev);
295 		if (caifd == NULL)
296 			break;
297 		pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
298 		if (atomic_read(&caifd->in_use))
299 			pr_warning("CAIF: %s(): "
300 				   "Unregistering an active CAIF device: %s\n",
301 				   __func__, dev->name);
302 		cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
303 		atomic_set(&caifd->state, what);
304 		break;
305 
306 	case NETDEV_UNREGISTER:
307 		caifd = caif_get(dev);
308 		pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
309 		atomic_set(&caifd->state, what);
310 		caif_device_destroy(dev);
311 		break;
312 	}
313 	return 0;
314 }
315 
316 static struct notifier_block caif_device_notifier = {
317 	.notifier_call = caif_device_notify,
318 	.priority = 0,
319 };
320 
321 
322 struct cfcnfg *get_caif_conf(void)
323 {
324 	return cfg;
325 }
326 EXPORT_SYMBOL(get_caif_conf);
327 
328 int caif_connect_client(struct caif_connect_request *conn_req,
329 			   struct cflayer *client_layer)
330 {
331 	struct cfctrl_link_param param;
332 	int ret;
333 	ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
334 	if (ret)
335 		return ret;
336 	/* Hook up the adaptation layer. */
337 	return cfcnfg_add_adaptation_layer(get_caif_conf(),
338 						&param, client_layer);
339 }
340 EXPORT_SYMBOL(caif_connect_client);
341 
342 int caif_disconnect_client(struct cflayer *adap_layer)
343 {
344        return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
345 }
346 EXPORT_SYMBOL(caif_disconnect_client);
347 
348 void caif_release_client(struct cflayer *adap_layer)
349 {
350        cfcnfg_release_adap_layer(adap_layer);
351 }
352 EXPORT_SYMBOL(caif_release_client);
353 
354 /* Per-namespace Caif devices handling */
355 static int caif_init_net(struct net *net)
356 {
357 	struct caif_net *caifn = net_generic(net, caif_net_id);
358 	INIT_LIST_HEAD(&caifn->caifdevs.list);
359 	spin_lock_init(&caifn->caifdevs.lock);
360 	return 0;
361 }
362 
363 static void caif_exit_net(struct net *net)
364 {
365 	struct net_device *dev;
366 	int res;
367 	rtnl_lock();
368 	for_each_netdev(net, dev) {
369 		if (dev->type != ARPHRD_CAIF)
370 			continue;
371 		res = dev_close(dev);
372 		caif_device_destroy(dev);
373 	}
374 	rtnl_unlock();
375 }
376 
377 static struct pernet_operations caif_net_ops = {
378 	.init = caif_init_net,
379 	.exit = caif_exit_net,
380 	.id   = &caif_net_id,
381 	.size = sizeof(struct caif_net),
382 };
383 
384 /* Initialize Caif devices list */
385 static int __init caif_device_init(void)
386 {
387 	int result;
388 	cfg = cfcnfg_create();
389 	if (!cfg) {
390 		pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
391 		goto err_cfcnfg_create_failed;
392 	}
393 	result = register_pernet_device(&caif_net_ops);
394 
395 	if (result) {
396 		kfree(cfg);
397 		cfg = NULL;
398 		return result;
399 	}
400 	dev_add_pack(&caif_packet_type);
401 	register_netdevice_notifier(&caif_device_notifier);
402 
403 	return result;
404 err_cfcnfg_create_failed:
405 	return -ENODEV;
406 }
407 
408 static void __exit caif_device_exit(void)
409 {
410 	dev_remove_pack(&caif_packet_type);
411 	unregister_pernet_device(&caif_net_ops);
412 	unregister_netdevice_notifier(&caif_device_notifier);
413 	cfcnfg_remove(cfg);
414 }
415 
416 module_init(caif_device_init);
417 module_exit(caif_device_exit);
418