xref: /linux/net/caif/cfmuxl.c (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) ST-Ericsson AB 2010
4  * Author:	Sjur Brendeland
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8 
9 #include <linux/stddef.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
13 #include <net/caif/cfpkt.h>
14 #include <net/caif/cfmuxl.h>
15 #include <net/caif/cfsrvl.h>
16 #include <net/caif/cffrml.h>
17 
18 #define container_obj(layr) container_of(layr, struct cfmuxl, layer)
19 
20 #define CAIF_CTRL_CHANNEL 0
21 #define UP_CACHE_SIZE 8
22 #define DN_CACHE_SIZE 8
23 
24 struct cfmuxl {
25 	struct cflayer layer;
26 	struct list_head srvl_list;
27 	struct list_head frml_list;
28 	struct cflayer *up_cache[UP_CACHE_SIZE];
29 	struct cflayer *dn_cache[DN_CACHE_SIZE];
30 	/*
31 	 * Set when inserting or removing downwards layers.
32 	 */
33 	spinlock_t transmit_lock;
34 
35 	/*
36 	 * Set when inserting or removing upwards layers.
37 	 */
38 	spinlock_t receive_lock;
39 
40 };
41 
42 static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
43 static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
44 static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
45 			   int phyid);
46 static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
47 
48 struct cflayer *cfmuxl_create(void)
49 {
50 	struct cfmuxl *this = kzalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
51 
52 	if (!this)
53 		return NULL;
54 	this->layer.receive = cfmuxl_receive;
55 	this->layer.transmit = cfmuxl_transmit;
56 	this->layer.ctrlcmd = cfmuxl_ctrlcmd;
57 	INIT_LIST_HEAD(&this->srvl_list);
58 	INIT_LIST_HEAD(&this->frml_list);
59 	spin_lock_init(&this->transmit_lock);
60 	spin_lock_init(&this->receive_lock);
61 	snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
62 	return &this->layer;
63 }
64 
65 int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
66 {
67 	struct cfmuxl *muxl = (struct cfmuxl *) layr;
68 
69 	spin_lock_bh(&muxl->transmit_lock);
70 	list_add_rcu(&dn->node, &muxl->frml_list);
71 	spin_unlock_bh(&muxl->transmit_lock);
72 	return 0;
73 }
74 
75 static struct cflayer *get_from_id(struct list_head *list, u16 id)
76 {
77 	struct cflayer *lyr;
78 	list_for_each_entry_rcu(lyr, list, node) {
79 		if (lyr->id == id)
80 			return lyr;
81 	}
82 
83 	return NULL;
84 }
85 
86 int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
87 {
88 	struct cfmuxl *muxl = container_obj(layr);
89 	struct cflayer *old;
90 
91 	spin_lock_bh(&muxl->receive_lock);
92 
93 	/* Two entries with same id is wrong, so remove old layer from mux */
94 	old = get_from_id(&muxl->srvl_list, linkid);
95 	if (old != NULL)
96 		list_del_rcu(&old->node);
97 
98 	list_add_rcu(&up->node, &muxl->srvl_list);
99 	spin_unlock_bh(&muxl->receive_lock);
100 
101 	return 0;
102 }
103 
104 struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
105 {
106 	struct cfmuxl *muxl = container_obj(layr);
107 	struct cflayer *dn;
108 	int idx = phyid % DN_CACHE_SIZE;
109 
110 	spin_lock_bh(&muxl->transmit_lock);
111 	RCU_INIT_POINTER(muxl->dn_cache[idx], NULL);
112 	dn = get_from_id(&muxl->frml_list, phyid);
113 	if (dn == NULL)
114 		goto out;
115 
116 	list_del_rcu(&dn->node);
117 	caif_assert(dn != NULL);
118 out:
119 	spin_unlock_bh(&muxl->transmit_lock);
120 	return dn;
121 }
122 
123 static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
124 {
125 	struct cflayer *up;
126 	int idx = id % UP_CACHE_SIZE;
127 	up = rcu_dereference(muxl->up_cache[idx]);
128 	if (up == NULL || up->id != id) {
129 		spin_lock_bh(&muxl->receive_lock);
130 		up = get_from_id(&muxl->srvl_list, id);
131 		rcu_assign_pointer(muxl->up_cache[idx], up);
132 		spin_unlock_bh(&muxl->receive_lock);
133 	}
134 	return up;
135 }
136 
137 static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
138 {
139 	struct cflayer *dn;
140 	int idx = dev_info->id % DN_CACHE_SIZE;
141 	dn = rcu_dereference(muxl->dn_cache[idx]);
142 	if (dn == NULL || dn->id != dev_info->id) {
143 		spin_lock_bh(&muxl->transmit_lock);
144 		dn = get_from_id(&muxl->frml_list, dev_info->id);
145 		rcu_assign_pointer(muxl->dn_cache[idx], dn);
146 		spin_unlock_bh(&muxl->transmit_lock);
147 	}
148 	return dn;
149 }
150 
151 struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
152 {
153 	struct cflayer *up;
154 	struct cfmuxl *muxl = container_obj(layr);
155 	int idx = id % UP_CACHE_SIZE;
156 
157 	if (id == 0) {
158 		pr_warn("Trying to remove control layer\n");
159 		return NULL;
160 	}
161 
162 	spin_lock_bh(&muxl->receive_lock);
163 	up = get_from_id(&muxl->srvl_list, id);
164 	if (up == NULL)
165 		goto out;
166 
167 	RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
168 	list_del_rcu(&up->node);
169 out:
170 	spin_unlock_bh(&muxl->receive_lock);
171 	return up;
172 }
173 
174 static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
175 {
176 	int ret;
177 	struct cfmuxl *muxl = container_obj(layr);
178 	u8 id;
179 	struct cflayer *up;
180 	if (cfpkt_extr_head(pkt, &id, 1) < 0) {
181 		pr_err("erroneous Caif Packet\n");
182 		cfpkt_destroy(pkt);
183 		return -EPROTO;
184 	}
185 	rcu_read_lock();
186 	up = get_up(muxl, id);
187 
188 	if (up == NULL) {
189 		pr_debug("Received data on unknown link ID = %d (0x%x)"
190 			" up == NULL", id, id);
191 		cfpkt_destroy(pkt);
192 		/*
193 		 * Don't return ERROR, since modem misbehaves and sends out
194 		 * flow on before linksetup response.
195 		 */
196 
197 		rcu_read_unlock();
198 		return /* CFGLU_EPROT; */ 0;
199 	}
200 
201 	/* We can't hold rcu_lock during receive, so take a ref count instead */
202 	cfsrvl_get(up);
203 	rcu_read_unlock();
204 
205 	ret = up->receive(up, pkt);
206 
207 	cfsrvl_put(up);
208 	return ret;
209 }
210 
211 static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
212 {
213 	struct cfmuxl *muxl = container_obj(layr);
214 	int err;
215 	u8 linkid;
216 	struct cflayer *dn;
217 	struct caif_payload_info *info = cfpkt_info(pkt);
218 	BUG_ON(!info);
219 
220 	rcu_read_lock();
221 
222 	dn = get_dn(muxl, info->dev_info);
223 	if (dn == NULL) {
224 		pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
225 			info->dev_info->id, info->dev_info->id);
226 		rcu_read_unlock();
227 		cfpkt_destroy(pkt);
228 		return -ENOTCONN;
229 	}
230 
231 	info->hdr_len += 1;
232 	linkid = info->channel_id;
233 	cfpkt_add_head(pkt, &linkid, 1);
234 
235 	/* We can't hold rcu_lock during receive, so take a ref count instead */
236 	cffrml_hold(dn);
237 
238 	rcu_read_unlock();
239 
240 	err = dn->transmit(dn, pkt);
241 
242 	cffrml_put(dn);
243 	return err;
244 }
245 
246 static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
247 			   int phyid)
248 {
249 	struct cfmuxl *muxl = container_obj(layr);
250 	struct cflayer *layer;
251 
252 	rcu_read_lock();
253 	list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
254 
255 		if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
256 
257 			if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
258 				ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
259 					layer->id != 0)
260 				cfmuxl_remove_uplayer(layr, layer->id);
261 
262 			/* NOTE: ctrlcmd is not allowed to block */
263 			layer->ctrlcmd(layer, ctrl, phyid);
264 		}
265 	}
266 	rcu_read_unlock();
267 }
268