xref: /linux/net/caif/cfrfml.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
4  * License terms: GNU General Public License (GPL) version 2
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8 
9 #include <linux/stddef.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <asm/unaligned.h>
13 #include <net/caif/caif_layer.h>
14 #include <net/caif/cfsrvl.h>
15 #include <net/caif/cfpkt.h>
16 
17 #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
18 #define RFM_SEGMENTATION_BIT 0x01
19 #define RFM_HEAD_SIZE 7
20 
21 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
22 static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
23 
24 struct cfrfml {
25 	struct cfsrvl serv;
26 	struct cfpkt *incomplete_frm;
27 	int fragment_size;
28 	u8  seghead[6];
29 	u16 pdu_size;
30 	/* Protects serialized processing of packets */
31 	spinlock_t sync;
32 };
33 
34 static void cfrfml_release(struct cflayer *layer)
35 {
36 	struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
37 	struct cfrfml *rfml = container_obj(&srvl->layer);
38 
39 	if (rfml->incomplete_frm)
40 		cfpkt_destroy(rfml->incomplete_frm);
41 
42 	kfree(srvl);
43 }
44 
45 struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
46 					int mtu_size)
47 {
48 	int tmp;
49 	struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
50 
51 	if (!this)
52 		return NULL;
53 
54 	cfsrvl_init(&this->serv, channel_id, dev_info, false);
55 	this->serv.release = cfrfml_release;
56 	this->serv.layer.receive = cfrfml_receive;
57 	this->serv.layer.transmit = cfrfml_transmit;
58 
59 	/* Round down to closest multiple of 16 */
60 	tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
61 	tmp *= 16;
62 
63 	this->fragment_size = tmp;
64 	spin_lock_init(&this->sync);
65 	snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
66 		"rfm%d", channel_id);
67 
68 	return &this->serv.layer;
69 }
70 
71 static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
72 			struct cfpkt *pkt, int *err)
73 {
74 	struct cfpkt *tmppkt;
75 	*err = -EPROTO;
76 	/* n-th but not last segment */
77 
78 	if (cfpkt_extr_head(pkt, seghead, 6) < 0)
79 		return NULL;
80 
81 	/* Verify correct header */
82 	if (memcmp(seghead, rfml->seghead, 6) != 0)
83 		return NULL;
84 
85 	tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
86 			rfml->pdu_size + RFM_HEAD_SIZE);
87 
88 	/* If cfpkt_append failes input pkts are not freed */
89 	*err = -ENOMEM;
90 	if (tmppkt == NULL)
91 		return NULL;
92 
93 	*err = 0;
94 	return tmppkt;
95 }
96 
97 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
98 {
99 	u8 tmp;
100 	bool segmented;
101 	int err;
102 	u8 seghead[6];
103 	struct cfrfml *rfml;
104 	struct cfpkt *tmppkt = NULL;
105 
106 	caif_assert(layr->up != NULL);
107 	caif_assert(layr->receive != NULL);
108 	rfml = container_obj(layr);
109 	spin_lock(&rfml->sync);
110 
111 	err = -EPROTO;
112 	if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
113 		goto out;
114 	segmented = tmp & RFM_SEGMENTATION_BIT;
115 
116 	if (segmented) {
117 		if (rfml->incomplete_frm == NULL) {
118 			/* Initial Segment */
119 			if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
120 				goto out;
121 
122 			rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
123 
124 			if (cfpkt_erroneous(pkt))
125 				goto out;
126 			rfml->incomplete_frm = pkt;
127 			pkt = NULL;
128 		} else {
129 
130 			tmppkt = rfm_append(rfml, seghead, pkt, &err);
131 			if (tmppkt == NULL)
132 				goto out;
133 
134 			if (cfpkt_erroneous(tmppkt))
135 				goto out;
136 
137 			rfml->incomplete_frm = tmppkt;
138 
139 
140 			if (cfpkt_erroneous(tmppkt))
141 				goto out;
142 		}
143 		err = 0;
144 		goto out;
145 	}
146 
147 	if (rfml->incomplete_frm) {
148 
149 		/* Last Segment */
150 		tmppkt = rfm_append(rfml, seghead, pkt, &err);
151 		if (tmppkt == NULL)
152 			goto out;
153 
154 		if (cfpkt_erroneous(tmppkt))
155 			goto out;
156 
157 		rfml->incomplete_frm = NULL;
158 		pkt = tmppkt;
159 		tmppkt = NULL;
160 
161 		/* Verify that length is correct */
162 		err = EPROTO;
163 		if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
164 			goto out;
165 	}
166 
167 	err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
168 
169 out:
170 
171 	if (err != 0) {
172 		if (tmppkt)
173 			cfpkt_destroy(tmppkt);
174 		if (pkt)
175 			cfpkt_destroy(pkt);
176 		if (rfml->incomplete_frm)
177 			cfpkt_destroy(rfml->incomplete_frm);
178 		rfml->incomplete_frm = NULL;
179 
180 		pr_info("Connection error %d triggered on RFM link\n", err);
181 
182 		/* Trigger connection error upon failure.*/
183 		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
184 					rfml->serv.dev_info.id);
185 	}
186 	spin_unlock(&rfml->sync);
187 	return err;
188 }
189 
190 
191 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
192 {
193 	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
194 
195 	/* Add info for MUX-layer to route the packet out. */
196 	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
197 
198 	/*
199 	 * To optimize alignment, we add up the size of CAIF header before
200 	 * payload.
201 	 */
202 	cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
203 	cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
204 
205 	return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
206 }
207 
208 static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
209 {
210 	int err;
211 	u8 seg;
212 	u8 head[6];
213 	struct cfpkt *rearpkt = NULL;
214 	struct cfpkt *frontpkt = pkt;
215 	struct cfrfml *rfml = container_obj(layr);
216 
217 	caif_assert(layr->dn != NULL);
218 	caif_assert(layr->dn->transmit != NULL);
219 
220 	if (!cfsrvl_ready(&rfml->serv, &err))
221 		return err;
222 
223 	err = -EPROTO;
224 	if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
225 		goto out;
226 
227 	err = 0;
228 	if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
229 		err = cfpkt_peek_head(pkt, head, 6);
230 
231 	if (err < 0)
232 		goto out;
233 
234 	while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
235 
236 		seg = 1;
237 		err = -EPROTO;
238 
239 		if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
240 			goto out;
241 		/*
242 		 * On OOM error cfpkt_split returns NULL.
243 		 *
244 		 * NOTE: Segmented pdu is not correctly aligned.
245 		 * This has negative performance impact.
246 		 */
247 
248 		rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
249 		if (rearpkt == NULL)
250 			goto out;
251 
252 		err = cfrfml_transmit_segment(rfml, frontpkt);
253 
254 		if (err != 0)
255 			goto out;
256 		frontpkt = rearpkt;
257 		rearpkt = NULL;
258 
259 		err = -ENOMEM;
260 		if (frontpkt == NULL)
261 			goto out;
262 		err = -EPROTO;
263 		if (cfpkt_add_head(frontpkt, head, 6) < 0)
264 			goto out;
265 
266 	}
267 
268 	seg = 0;
269 	err = -EPROTO;
270 
271 	if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
272 		goto out;
273 
274 	err = cfrfml_transmit_segment(rfml, frontpkt);
275 
276 	frontpkt = NULL;
277 out:
278 
279 	if (err != 0) {
280 		pr_info("Connection error %d triggered on RFM link\n", err);
281 		/* Trigger connection error upon failure.*/
282 
283 		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
284 					rfml->serv.dev_info.id);
285 
286 		if (rearpkt)
287 			cfpkt_destroy(rearpkt);
288 
289 		if (frontpkt && frontpkt != pkt) {
290 
291 			cfpkt_destroy(frontpkt);
292 			/*
293 			 * Socket layer will free the original packet,
294 			 * but this packet may already be sent and
295 			 * freed. So we have to return 0 in this case
296 			 * to avoid socket layer to re-free this packet.
297 			 * The return of shutdown indication will
298 			 * cause connection to be invalidated anyhow.
299 			 */
300 			err = 0;
301 		}
302 	}
303 
304 	return err;
305 }
306