xref: /linux/drivers/net/ethernet/netronome/nfp/ccm_mbox.c (revision a6cdeeb16bff89c8486324f53577db058cbe81ba)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/io.h>
6 #include <linux/skbuff.h>
7 
8 #include "ccm.h"
9 #include "nfp_net.h"
10 
11 /* CCM messages via the mailbox.  CMSGs get wrapped into simple TLVs
12  * and copied into the mailbox.  Multiple messages can be copied to
13  * form a batch.  Threads come in with CMSG formed in an skb, then
14  * enqueue that skb onto the request queue.  If threads skb is first
15  * in queue this thread will handle the mailbox operation.  It copies
16  * up to 16 messages into the mailbox (making sure that both requests
17  * and replies will fit.  After FW is done processing the batch it
18  * copies the data out and wakes waiting threads.
19  * If a thread is waiting it either gets its the message completed
20  * (response is copied into the same skb as the request, overwriting
21  * it), or becomes the first in queue.
22  * Completions and next-to-run are signaled via the control buffer
23  * to limit potential cache line bounces.
24  */
25 
26 #define NFP_CCM_MBOX_BATCH_LIMIT	16
27 #define NFP_CCM_TIMEOUT			(NFP_NET_POLL_TIMEOUT * 1000)
28 #define NFP_CCM_MAX_QLEN		256
29 
30 enum nfp_net_mbox_cmsg_state {
31 	NFP_NET_MBOX_CMSG_STATE_QUEUED,
32 	NFP_NET_MBOX_CMSG_STATE_NEXT,
33 	NFP_NET_MBOX_CMSG_STATE_BUSY,
34 	NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND,
35 	NFP_NET_MBOX_CMSG_STATE_DONE,
36 };
37 
38 /**
39  * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info
40  * @state:	processing state (/stage) of the message
41  * @err:	error encountered during processing if any
42  * @max_len:	max(request_len, reply_len)
43  * @exp_reply:	expected reply length (0 means don't validate)
44  */
45 struct nfp_ccm_mbox_cmsg_cb {
46 	enum nfp_net_mbox_cmsg_state state;
47 	int err;
48 	unsigned int max_len;
49 	unsigned int exp_reply;
50 };
51 
52 static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
53 {
54 	return round_down(nn->tlv_caps.mbox_len, 4) -
55 		NFP_NET_CFG_MBOX_SIMPLE_VAL - /* common mbox command header */
56 		4 * 2; /* Msg TLV plus End TLV headers */
57 }
58 
59 static void
60 nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
61 {
62 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
63 
64 	cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
65 	cb->err = 0;
66 	cb->max_len = max_len;
67 	cb->exp_reply = exp_reply;
68 }
69 
70 static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
71 {
72 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
73 
74 	return cb->max_len;
75 }
76 
77 static bool nfp_ccm_mbox_done(struct sk_buff *skb)
78 {
79 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
80 
81 	return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
82 }
83 
84 static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
85 {
86 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
87 
88 	return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
89 	       cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
90 }
91 
92 static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
93 {
94 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
95 
96 	cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
97 }
98 
99 static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
100 {
101 	return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
102 }
103 
104 static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
105 {
106 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
107 
108 	return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
109 }
110 
111 static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
112 {
113 	struct nfp_ccm_mbox_cmsg_cb *cb;
114 	struct sk_buff *skb;
115 
116 	skb = skb_peek(&nn->mbox_cmsg.queue);
117 	if (!skb)
118 		return;
119 
120 	cb = (void *)skb->cb;
121 	cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
122 }
123 
124 static void
125 nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len)
126 {
127 	nn_writel(nn, off,
128 		  FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) |
129 		  FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len));
130 }
131 
132 static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last)
133 {
134 	struct sk_buff *skb;
135 	int reserve, i, cnt;
136 	__be32 *data;
137 	u32 off, len;
138 
139 	off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
140 	skb = __skb_peek(&nn->mbox_cmsg.queue);
141 	while (true) {
142 		nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG,
143 				       skb->len);
144 		off += 4;
145 
146 		/* Write data word by word, skb->data should be aligned */
147 		data = (__be32 *)skb->data;
148 		cnt = skb->len / 4;
149 		for (i = 0 ; i < cnt; i++) {
150 			nn_writel(nn, off, be32_to_cpu(data[i]));
151 			off += 4;
152 		}
153 		if (skb->len & 3) {
154 			__be32 tmp = 0;
155 
156 			memcpy(&tmp, &data[i], skb->len & 3);
157 			nn_writel(nn, off, be32_to_cpu(tmp));
158 			off += 4;
159 		}
160 
161 		/* Reserve space if reply is bigger */
162 		len = round_up(skb->len, 4);
163 		reserve = nfp_ccm_mbox_maxlen(skb) - len;
164 		if (reserve > 0) {
165 			nfp_ccm_mbox_write_tlv(nn, off,
166 					       NFP_NET_MBOX_TLV_TYPE_RESV,
167 					       reserve);
168 			off += 4 + reserve;
169 		}
170 
171 		if (skb == last)
172 			break;
173 		skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
174 	}
175 
176 	nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0);
177 }
178 
179 static struct sk_buff *
180 nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last)
181 {
182 	struct sk_buff *skb;
183 
184 	skb = __skb_peek(&nn->mbox_cmsg.queue);
185 	while (true) {
186 		if (__nfp_ccm_get_tag(skb) == tag)
187 			return skb;
188 
189 		if (skb == last)
190 			return NULL;
191 		skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
192 	}
193 }
194 
195 static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
196 {
197 	struct nfp_ccm_mbox_cmsg_cb *cb;
198 	u8 __iomem *data, *end;
199 	struct sk_buff *skb;
200 
201 	data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off +
202 		NFP_NET_CFG_MBOX_SIMPLE_VAL;
203 	end = data + nn->tlv_caps.mbox_len;
204 
205 	while (true) {
206 		unsigned int length, offset, type;
207 		struct nfp_ccm_hdr hdr;
208 		__be32 *skb_data;
209 		u32 tlv_hdr;
210 		int i, cnt;
211 
212 		tlv_hdr = readl(data);
213 		type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
214 		length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr);
215 		offset = data - nn->dp.ctrl_bar;
216 
217 		/* Advance past the header */
218 		data += 4;
219 
220 		if (data + length > end) {
221 			nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n",
222 				   type, offset, length);
223 			break;
224 		}
225 
226 		if (type == NFP_NET_MBOX_TLV_TYPE_END)
227 			break;
228 		if (type == NFP_NET_MBOX_TLV_TYPE_RESV)
229 			goto next_tlv;
230 		if (type != NFP_NET_MBOX_TLV_TYPE_MSG &&
231 		    type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
232 			nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n",
233 				   type, offset, length);
234 			break;
235 		}
236 
237 		if (length < 4) {
238 			nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
239 				   type, offset, length);
240 			break;
241 		}
242 
243 		hdr.raw = cpu_to_be32(readl(data));
244 
245 		skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
246 		if (!skb) {
247 			nn_dp_warn(&nn->dp, "mailbox request not found:%u\n",
248 				   be16_to_cpu(hdr.tag));
249 			break;
250 		}
251 		cb = (void *)skb->cb;
252 
253 		if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
254 			nn_dp_warn(&nn->dp,
255 				   "mailbox msg not supported type:%d\n",
256 				   nfp_ccm_get_type(skb));
257 			cb->err = -EIO;
258 			goto next_tlv;
259 		}
260 
261 		if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
262 			nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n",
263 				   hdr.type,
264 				   __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
265 			cb->err = -EIO;
266 			goto next_tlv;
267 		}
268 		if (cb->exp_reply && length != cb->exp_reply) {
269 			nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
270 				   hdr.type, length, cb->exp_reply);
271 			cb->err = -EIO;
272 			goto next_tlv;
273 		}
274 		if (length > cb->max_len) {
275 			nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n",
276 				   hdr.type, cb->max_len, length);
277 			cb->err = -EIO;
278 			goto next_tlv;
279 		}
280 
281 		if (length <= skb->len)
282 			__skb_trim(skb, length);
283 		else
284 			skb_put(skb, length - skb->len);
285 
286 		/* We overcopy here slightly, but that's okay, the skb is large
287 		 * enough, and the garbage will be ignored (beyond skb->len).
288 		 */
289 		skb_data = (__be32 *)skb->data;
290 		memcpy(skb_data, &hdr, 4);
291 
292 		cnt = DIV_ROUND_UP(length, 4);
293 		for (i = 1 ; i < cnt; i++)
294 			skb_data[i] = cpu_to_be32(readl(data + i * 4));
295 
296 		cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
297 next_tlv:
298 		data += round_up(length, 4);
299 		if (data + 4 > end) {
300 			nn_dp_warn(&nn->dp,
301 				   "reached end of MBOX without END TLV\n");
302 			break;
303 		}
304 	}
305 
306 	smp_wmb(); /* order the skb->data vs. cb->state */
307 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
308 	do {
309 		skb = __skb_dequeue(&nn->mbox_cmsg.queue);
310 		cb = (void *)skb->cb;
311 
312 		if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
313 			cb->err = -ENOENT;
314 			smp_wmb(); /* order the cb->err vs. cb->state */
315 		}
316 		cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
317 	} while (skb != last);
318 
319 	nfp_ccm_mbox_mark_next_runner(nn);
320 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
321 }
322 
323 static void
324 nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err)
325 {
326 	struct nfp_ccm_mbox_cmsg_cb *cb;
327 	struct sk_buff *skb;
328 
329 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
330 	do {
331 		skb = __skb_dequeue(&nn->mbox_cmsg.queue);
332 		cb = (void *)skb->cb;
333 
334 		cb->err = err;
335 		smp_wmb(); /* order the cb->err vs. cb->state */
336 		cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
337 	} while (skb != last);
338 
339 	nfp_ccm_mbox_mark_next_runner(nn);
340 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
341 }
342 
343 static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn)
344 	__releases(&nn->mbox_cmsg.queue.lock)
345 {
346 	int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL;
347 	struct sk_buff *skb, *last;
348 	int cnt, err;
349 
350 	space -= 4; /* for End TLV */
351 
352 	/* First skb must fit, because it's ours and we checked it fits */
353 	cnt = 1;
354 	last = skb = __skb_peek(&nn->mbox_cmsg.queue);
355 	space -= 4 + nfp_ccm_mbox_maxlen(skb);
356 
357 	while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
358 		skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
359 		space -= 4 + nfp_ccm_mbox_maxlen(skb);
360 		if (space < 0)
361 			break;
362 		last = skb;
363 		nfp_ccm_mbox_set_busy(skb);
364 		cnt++;
365 		if (cnt == NFP_CCM_MBOX_BATCH_LIMIT)
366 			break;
367 	}
368 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
369 
370 	/* Now we own all skb's marked in progress, new requests may arrive
371 	 * at the end of the queue.
372 	 */
373 
374 	nn_ctrl_bar_lock(nn);
375 
376 	nfp_ccm_mbox_copy_in(nn, last);
377 
378 	err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
379 	if (!err)
380 		nfp_ccm_mbox_copy_out(nn, last);
381 	else
382 		nfp_ccm_mbox_mark_all_err(nn, last, -EIO);
383 
384 	nn_ctrl_bar_unlock(nn);
385 
386 	wake_up_all(&nn->mbox_cmsg.wq);
387 }
388 
389 static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
390 {
391 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
392 
393 	if (cb->err)
394 		dev_kfree_skb_any(skb);
395 	return cb->err;
396 }
397 
398 /* If wait timed out but the command is already in progress we have
399  * to wait until it finishes.  Runners has ownership of the skbs marked
400  * as busy.
401  */
402 static int
403 nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
404 			   enum nfp_ccm_type type)
405 	__releases(&nn->mbox_cmsg.queue.lock)
406 {
407 	bool was_first;
408 
409 	if (nfp_ccm_mbox_in_progress(skb)) {
410 		spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
411 
412 		wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
413 		smp_rmb(); /* pairs with smp_wmb() after data is written */
414 		return nfp_ccm_mbox_skb_return(skb);
415 	}
416 
417 	was_first = nfp_ccm_mbox_should_run(nn, skb);
418 	__skb_unlink(skb, &nn->mbox_cmsg.queue);
419 	if (was_first)
420 		nfp_ccm_mbox_mark_next_runner(nn);
421 
422 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
423 
424 	if (was_first)
425 		wake_up_all(&nn->mbox_cmsg.wq);
426 
427 	nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n",
428 		   type);
429 	return -ETIMEDOUT;
430 }
431 
432 static int
433 nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
434 			 enum nfp_ccm_type type,
435 			 unsigned int reply_size, unsigned int max_reply_size,
436 			 gfp_t flags)
437 {
438 	const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn);
439 	unsigned int max_len;
440 	ssize_t undersize;
441 	int err;
442 
443 	if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) {
444 		nn_dp_warn(&nn->dp,
445 			   "message type %d not supported by mailbox\n", type);
446 		return -EINVAL;
447 	}
448 
449 	/* If the reply size is unknown assume it will take the entire
450 	 * mailbox, the callers should do their best for this to never
451 	 * happen.
452 	 */
453 	if (!max_reply_size)
454 		max_reply_size = mbox_max;
455 	max_reply_size = round_up(max_reply_size, 4);
456 
457 	/* Make sure we can fit the entire reply into the skb,
458 	 * and that we don't have to slow down the mbox handler
459 	 * with allocations.
460 	 */
461 	undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
462 	if (undersize > 0) {
463 		err = pskb_expand_head(skb, 0, undersize, flags);
464 		if (err) {
465 			nn_dp_warn(&nn->dp,
466 				   "can't allocate reply buffer for mailbox\n");
467 			return err;
468 		}
469 	}
470 
471 	/* Make sure that request and response both fit into the mailbox */
472 	max_len = max(max_reply_size, round_up(skb->len, 4));
473 	if (max_len > mbox_max) {
474 		nn_dp_warn(&nn->dp,
475 			   "message too big for tha mailbox: %u/%u vs %u\n",
476 			   skb->len, max_reply_size, mbox_max);
477 		return -EMSGSIZE;
478 	}
479 
480 	nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
481 
482 	return 0;
483 }
484 
485 static int
486 nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
487 			 enum nfp_ccm_type type)
488 {
489 	struct nfp_ccm_hdr *hdr;
490 
491 	assert_spin_locked(&nn->mbox_cmsg.queue.lock);
492 
493 	if (nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
494 		nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
495 		return -EBUSY;
496 	}
497 
498 	hdr = (void *)skb->data;
499 	hdr->ver = NFP_CCM_ABI_VERSION;
500 	hdr->type = type;
501 	hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++);
502 
503 	__skb_queue_tail(&nn->mbox_cmsg.queue, skb);
504 
505 	return 0;
506 }
507 
508 int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
509 			     enum nfp_ccm_type type,
510 			     unsigned int reply_size,
511 			     unsigned int max_reply_size)
512 {
513 	int err;
514 
515 	err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
516 				       max_reply_size, GFP_KERNEL);
517 	if (err)
518 		goto err_free_skb;
519 
520 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
521 
522 	err = nfp_ccm_mbox_msg_enqueue(nn, skb, type);
523 	if (err)
524 		goto err_unlock;
525 
526 	/* First in queue takes the mailbox lock and processes the batch */
527 	if (!nfp_ccm_mbox_is_first(nn, skb)) {
528 		bool to;
529 
530 		spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
531 
532 		to = !wait_event_timeout(nn->mbox_cmsg.wq,
533 					 nfp_ccm_mbox_done(skb) ||
534 					 nfp_ccm_mbox_should_run(nn, skb),
535 					 msecs_to_jiffies(NFP_CCM_TIMEOUT));
536 
537 		/* fast path for those completed by another thread */
538 		if (nfp_ccm_mbox_done(skb)) {
539 			smp_rmb(); /* pairs with wmb after data is written */
540 			return nfp_ccm_mbox_skb_return(skb);
541 		}
542 
543 		spin_lock_bh(&nn->mbox_cmsg.queue.lock);
544 
545 		if (!nfp_ccm_mbox_is_first(nn, skb)) {
546 			WARN_ON(!to);
547 
548 			err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
549 			if (err)
550 				goto err_free_skb;
551 			return 0;
552 		}
553 	}
554 
555 	/* run queue expects the lock held */
556 	nfp_ccm_mbox_run_queue_unlock(nn);
557 	return nfp_ccm_mbox_skb_return(skb);
558 
559 err_unlock:
560 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
561 err_free_skb:
562 	dev_kfree_skb_any(skb);
563 	return err;
564 }
565 
566 struct sk_buff *
567 nfp_ccm_mbox_alloc(struct nfp_net *nn, unsigned int req_size,
568 		   unsigned int reply_size, gfp_t flags)
569 {
570 	unsigned int max_size;
571 	struct sk_buff *skb;
572 
573 	if (!reply_size)
574 		max_size = nfp_ccm_mbox_max_msg(nn);
575 	else
576 		max_size = max(req_size, reply_size);
577 	max_size = round_up(max_size, 4);
578 
579 	skb = alloc_skb(max_size, flags);
580 	if (!skb)
581 		return NULL;
582 
583 	skb_put(skb, req_size);
584 
585 	return skb;
586 }
587 
588 bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
589 {
590 	return nfp_ccm_mbox_max_msg(nn) >= size;
591 }
592