1 /* 2 * Copyright (C) ST-Ericsson AB 2010 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 4 * License terms: GNU General Public License (GPL) version 2 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/spinlock.h> 9 #include <linux/slab.h> 10 #include <net/caif/caif_layer.h> 11 #include <net/caif/cfpkt.h> 12 #include <net/caif/cfserl.h> 13 14 #define container_obj(layr) ((struct cfserl *) layr) 15 16 #define CFSERL_STX 0x02 17 #define CAIF_MINIUM_PACKET_SIZE 4 18 struct cfserl { 19 struct cflayer layer; 20 struct cfpkt *incomplete_frm; 21 /* Protects parallel processing of incoming packets */ 22 spinlock_t sync; 23 bool usestx; 24 }; 25 #define STXLEN(layr) (layr->usestx ? 1 : 0) 26 27 static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); 28 static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); 29 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 30 int phyid); 31 32 struct cflayer *cfserl_create(int type, int instance, bool use_stx) 33 { 34 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); 35 if (!this) { 36 pr_warning("CAIF: %s(): Out of memory\n", __func__); 37 return NULL; 38 } 39 caif_assert(offsetof(struct cfserl, layer) == 0); 40 memset(this, 0, sizeof(struct cfserl)); 41 this->layer.receive = cfserl_receive; 42 this->layer.transmit = cfserl_transmit; 43 this->layer.ctrlcmd = cfserl_ctrlcmd; 44 this->layer.type = type; 45 this->usestx = use_stx; 46 spin_lock_init(&this->sync); 47 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); 48 return &this->layer; 49 } 50 51 static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) 52 { 53 struct cfserl *layr = container_obj(l); 54 u16 pkt_len; 55 struct cfpkt *pkt = NULL; 56 struct cfpkt *tail_pkt = NULL; 57 u8 tmp8; 58 u16 tmp; 59 u8 stx = CFSERL_STX; 60 int ret; 61 u16 expectlen = 0; 62 63 caif_assert(newpkt != NULL); 64 spin_lock(&layr->sync); 65 66 if (layr->incomplete_frm != NULL) { 67 layr->incomplete_frm = 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 69 pkt = layr->incomplete_frm; 70 if (pkt == NULL) { 71 spin_unlock(&layr->sync); 72 return -ENOMEM; 73 } 74 } else { 75 pkt = newpkt; 76 } 77 layr->incomplete_frm = NULL; 78 79 do { 80 /* Search for STX at start of pkt if STX is used */ 81 if (layr->usestx) { 82 cfpkt_extr_head(pkt, &tmp8, 1); 83 if (tmp8 != CFSERL_STX) { 84 while (cfpkt_more(pkt) 85 && tmp8 != CFSERL_STX) { 86 cfpkt_extr_head(pkt, &tmp8, 1); 87 } 88 if (!cfpkt_more(pkt)) { 89 cfpkt_destroy(pkt); 90 layr->incomplete_frm = NULL; 91 spin_unlock(&layr->sync); 92 return -EPROTO; 93 } 94 } 95 } 96 97 pkt_len = cfpkt_getlen(pkt); 98 99 /* 100 * pkt_len is the accumulated length of the packet data 101 * we have received so far. 102 * Exit if frame doesn't hold length. 103 */ 104 105 if (pkt_len < 2) { 106 if (layr->usestx) 107 cfpkt_add_head(pkt, &stx, 1); 108 layr->incomplete_frm = pkt; 109 spin_unlock(&layr->sync); 110 return 0; 111 } 112 113 /* 114 * Find length of frame. 115 * expectlen is the length we need for a full frame. 116 */ 117 cfpkt_peek_head(pkt, &tmp, 2); 118 expectlen = le16_to_cpu(tmp) + 2; 119 /* 120 * Frame error handling 121 */ 122 if (expectlen < CAIF_MINIUM_PACKET_SIZE 123 || expectlen > CAIF_MAX_FRAMESIZE) { 124 if (!layr->usestx) { 125 if (pkt != NULL) 126 cfpkt_destroy(pkt); 127 layr->incomplete_frm = NULL; 128 expectlen = 0; 129 spin_unlock(&layr->sync); 130 return -EPROTO; 131 } 132 continue; 133 } 134 135 if (pkt_len < expectlen) { 136 /* Too little received data */ 137 if (layr->usestx) 138 cfpkt_add_head(pkt, &stx, 1); 139 layr->incomplete_frm = pkt; 140 spin_unlock(&layr->sync); 141 return 0; 142 } 143 144 /* 145 * Enough data for at least one frame. 146 * Split the frame, if too long 147 */ 148 if (pkt_len > expectlen) 149 tail_pkt = cfpkt_split(pkt, expectlen); 150 else 151 tail_pkt = NULL; 152 153 /* Send the first part of packet upwards.*/ 154 spin_unlock(&layr->sync); 155 ret = layr->layer.up->receive(layr->layer.up, pkt); 156 spin_lock(&layr->sync); 157 if (ret == -EILSEQ) { 158 if (layr->usestx) { 159 if (tail_pkt != NULL) 160 pkt = cfpkt_append(pkt, tail_pkt, 0); 161 /* Start search for next STX if frame failed */ 162 continue; 163 } else { 164 cfpkt_destroy(pkt); 165 pkt = NULL; 166 } 167 } 168 169 pkt = tail_pkt; 170 171 } while (pkt != NULL); 172 173 spin_unlock(&layr->sync); 174 return 0; 175 } 176 177 static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) 178 { 179 struct cfserl *layr = container_obj(layer); 180 int ret; 181 u8 tmp8 = CFSERL_STX; 182 if (layr->usestx) 183 cfpkt_add_head(newpkt, &tmp8, 1); 184 ret = layer->dn->transmit(layer->dn, newpkt); 185 if (ret < 0) 186 cfpkt_extr_head(newpkt, &tmp8, 1); 187 188 return ret; 189 } 190 191 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 192 int phyid) 193 { 194 layr->up->ctrlcmd(layr->up, ctrl, phyid); 195 } 196