xref: /linux/drivers/firmware/tegra/bpmp.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk/tegra.h>
7 #include <linux/genalloc.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm.h>
14 #include <linux/semaphore.h>
15 #include <linux/sched/clock.h>
16 
17 #include <soc/tegra/bpmp.h>
18 #include <soc/tegra/bpmp-abi.h>
19 #include <soc/tegra/ivc.h>
20 
21 #include "bpmp-private.h"
22 
23 #define MSG_ACK		BIT(0)
24 #define MSG_RING	BIT(1)
25 #define TAG_SZ		32
26 
27 static inline const struct tegra_bpmp_ops *
channel_to_ops(struct tegra_bpmp_channel * channel)28 channel_to_ops(struct tegra_bpmp_channel *channel)
29 {
30 	struct tegra_bpmp *bpmp = channel->bpmp;
31 
32 	return bpmp->soc->ops;
33 }
34 
tegra_bpmp_get(struct device * dev)35 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
36 {
37 	struct platform_device *pdev;
38 	struct tegra_bpmp *bpmp;
39 	struct device_node *np;
40 
41 	np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
42 	if (!np)
43 		return ERR_PTR(-ENOENT);
44 
45 	pdev = of_find_device_by_node(np);
46 	if (!pdev) {
47 		bpmp = ERR_PTR(-ENODEV);
48 		goto put;
49 	}
50 
51 	bpmp = platform_get_drvdata(pdev);
52 	if (!bpmp) {
53 		bpmp = ERR_PTR(-EPROBE_DEFER);
54 		put_device(&pdev->dev);
55 		goto put;
56 	}
57 
58 put:
59 	of_node_put(np);
60 	return bpmp;
61 }
62 EXPORT_SYMBOL_GPL(tegra_bpmp_get);
63 
tegra_bpmp_put(struct tegra_bpmp * bpmp)64 void tegra_bpmp_put(struct tegra_bpmp *bpmp)
65 {
66 	if (bpmp)
67 		put_device(bpmp->dev);
68 }
69 EXPORT_SYMBOL_GPL(tegra_bpmp_put);
70 
71 static int
tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel * channel)72 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
73 {
74 	struct tegra_bpmp *bpmp = channel->bpmp;
75 	unsigned int count;
76 	int index;
77 
78 	count = bpmp->soc->channels.thread.count;
79 
80 	index = channel - channel->bpmp->threaded_channels;
81 	if (index < 0 || index >= count)
82 		return -EINVAL;
83 
84 	return index;
85 }
86 
tegra_bpmp_message_valid(const struct tegra_bpmp_message * msg)87 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
88 {
89 	return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
90 	       (msg->rx.size <= MSG_DATA_MIN_SZ) &&
91 	       (msg->tx.size == 0 || msg->tx.data) &&
92 	       (msg->rx.size == 0 || msg->rx.data);
93 }
94 
tegra_bpmp_is_response_ready(struct tegra_bpmp_channel * channel)95 static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
96 {
97 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
98 
99 	return ops->is_response_ready(channel);
100 }
101 
tegra_bpmp_is_request_ready(struct tegra_bpmp_channel * channel)102 static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
103 {
104 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
105 
106 	return ops->is_request_ready(channel);
107 }
108 
tegra_bpmp_wait_response(struct tegra_bpmp_channel * channel)109 static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
110 {
111 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
112 	ktime_t end;
113 
114 	end = ktime_add_us(ktime_get(), timeout);
115 
116 	do {
117 		if (tegra_bpmp_is_response_ready(channel))
118 			return 0;
119 	} while (ktime_before(ktime_get(), end));
120 
121 	return -ETIMEDOUT;
122 }
123 
tegra_bpmp_ack_response(struct tegra_bpmp_channel * channel)124 static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
125 {
126 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
127 
128 	return ops->ack_response(channel);
129 }
130 
tegra_bpmp_ack_request(struct tegra_bpmp_channel * channel)131 static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
132 {
133 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
134 
135 	return ops->ack_request(channel);
136 }
137 
138 static bool
tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel * channel)139 tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
140 {
141 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
142 
143 	return ops->is_request_channel_free(channel);
144 }
145 
146 static bool
tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel * channel)147 tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
148 {
149 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
150 
151 	return ops->is_response_channel_free(channel);
152 }
153 
154 static int
tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel * channel)155 tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
156 {
157 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
158 	ktime_t start, now;
159 
160 	start = ns_to_ktime(local_clock());
161 
162 	do {
163 		if (tegra_bpmp_is_request_channel_free(channel))
164 			return 0;
165 
166 		now = ns_to_ktime(local_clock());
167 	} while (ktime_us_delta(now, start) < timeout);
168 
169 	return -ETIMEDOUT;
170 }
171 
tegra_bpmp_post_request(struct tegra_bpmp_channel * channel)172 static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
173 {
174 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
175 
176 	return ops->post_request(channel);
177 }
178 
tegra_bpmp_post_response(struct tegra_bpmp_channel * channel)179 static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
180 {
181 	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
182 
183 	return ops->post_response(channel);
184 }
185 
tegra_bpmp_ring_doorbell(struct tegra_bpmp * bpmp)186 static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
187 {
188 	return bpmp->soc->ops->ring_doorbell(bpmp);
189 }
190 
__tegra_bpmp_channel_read(struct tegra_bpmp_channel * channel,void * data,size_t size,int * ret)191 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
192 					 void *data, size_t size, int *ret)
193 {
194 	int err;
195 
196 	if (data && size > 0)
197 		tegra_bpmp_mb_read(data, &channel->ib, size);
198 
199 	err = tegra_bpmp_ack_response(channel);
200 	if (err < 0)
201 		return err;
202 
203 	*ret = tegra_bpmp_mb_read_field(&channel->ib, code);
204 
205 	return 0;
206 }
207 
tegra_bpmp_channel_read(struct tegra_bpmp_channel * channel,void * data,size_t size,int * ret)208 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
209 				       void *data, size_t size, int *ret)
210 {
211 	struct tegra_bpmp *bpmp = channel->bpmp;
212 	unsigned long flags;
213 	ssize_t err;
214 	int index;
215 
216 	index = tegra_bpmp_channel_get_thread_index(channel);
217 	if (index < 0) {
218 		err = index;
219 		goto unlock;
220 	}
221 
222 	spin_lock_irqsave(&bpmp->lock, flags);
223 	err = __tegra_bpmp_channel_read(channel, data, size, ret);
224 	clear_bit(index, bpmp->threaded.allocated);
225 	spin_unlock_irqrestore(&bpmp->lock, flags);
226 
227 unlock:
228 	up(&bpmp->threaded.lock);
229 
230 	return err;
231 }
232 
__tegra_bpmp_channel_write(struct tegra_bpmp_channel * channel,unsigned int mrq,unsigned long flags,const void * data,size_t size)233 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
234 					  unsigned int mrq, unsigned long flags,
235 					  const void *data, size_t size)
236 {
237 	tegra_bpmp_mb_write_field(&channel->ob, code, mrq);
238 	tegra_bpmp_mb_write_field(&channel->ob, flags, flags);
239 
240 	if (data && size > 0)
241 		tegra_bpmp_mb_write(&channel->ob, data, size);
242 
243 	return tegra_bpmp_post_request(channel);
244 }
245 
246 static struct tegra_bpmp_channel *
tegra_bpmp_write_threaded(struct tegra_bpmp * bpmp,unsigned int mrq,const void * data,size_t size)247 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
248 			  const void *data, size_t size)
249 {
250 	unsigned long timeout = bpmp->soc->channels.thread.timeout;
251 	unsigned int count = bpmp->soc->channels.thread.count;
252 	struct tegra_bpmp_channel *channel;
253 	unsigned long flags;
254 	unsigned int index;
255 	int err;
256 
257 	err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
258 	if (err < 0)
259 		return ERR_PTR(err);
260 
261 	spin_lock_irqsave(&bpmp->lock, flags);
262 
263 	index = find_first_zero_bit(bpmp->threaded.allocated, count);
264 	if (index == count) {
265 		err = -EBUSY;
266 		goto unlock;
267 	}
268 
269 	channel = &bpmp->threaded_channels[index];
270 
271 	if (!tegra_bpmp_is_request_channel_free(channel)) {
272 		err = -EBUSY;
273 		goto unlock;
274 	}
275 
276 	set_bit(index, bpmp->threaded.allocated);
277 
278 	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
279 					 data, size);
280 	if (err < 0)
281 		goto clear_allocated;
282 
283 	set_bit(index, bpmp->threaded.busy);
284 
285 	spin_unlock_irqrestore(&bpmp->lock, flags);
286 	return channel;
287 
288 clear_allocated:
289 	clear_bit(index, bpmp->threaded.allocated);
290 unlock:
291 	spin_unlock_irqrestore(&bpmp->lock, flags);
292 	up(&bpmp->threaded.lock);
293 
294 	return ERR_PTR(err);
295 }
296 
tegra_bpmp_channel_write(struct tegra_bpmp_channel * channel,unsigned int mrq,unsigned long flags,const void * data,size_t size)297 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
298 					unsigned int mrq, unsigned long flags,
299 					const void *data, size_t size)
300 {
301 	int err;
302 
303 	err = tegra_bpmp_wait_request_channel_free(channel);
304 	if (err < 0)
305 		return err;
306 
307 	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
308 }
309 
310 static int __maybe_unused tegra_bpmp_resume(struct device *dev);
311 
tegra_bpmp_transfer_atomic(struct tegra_bpmp * bpmp,struct tegra_bpmp_message * msg)312 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
313 			       struct tegra_bpmp_message *msg)
314 {
315 	struct tegra_bpmp_channel *channel;
316 	int err;
317 
318 	if (WARN_ON(!irqs_disabled()))
319 		return -EPERM;
320 
321 	if (!tegra_bpmp_message_valid(msg))
322 		return -EINVAL;
323 
324 	if (bpmp->suspended) {
325 		/* Reset BPMP IPC channels during resume based on flags passed */
326 		if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
327 			tegra_bpmp_resume(bpmp->dev);
328 		else
329 			return -EAGAIN;
330 	}
331 
332 	channel = bpmp->tx_channel;
333 
334 	spin_lock(&bpmp->atomic_tx_lock);
335 
336 	err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
337 				       msg->tx.data, msg->tx.size);
338 	if (err < 0) {
339 		spin_unlock(&bpmp->atomic_tx_lock);
340 		return err;
341 	}
342 
343 	spin_unlock(&bpmp->atomic_tx_lock);
344 
345 	err = tegra_bpmp_ring_doorbell(bpmp);
346 	if (err < 0)
347 		return err;
348 
349 	err = tegra_bpmp_wait_response(channel);
350 	if (err < 0)
351 		return err;
352 
353 	return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
354 					 &msg->rx.ret);
355 }
356 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
357 
tegra_bpmp_transfer(struct tegra_bpmp * bpmp,struct tegra_bpmp_message * msg)358 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
359 			struct tegra_bpmp_message *msg)
360 {
361 	struct tegra_bpmp_channel *channel;
362 	unsigned long timeout;
363 	int err;
364 
365 	if (WARN_ON(irqs_disabled()))
366 		return -EPERM;
367 
368 	if (!tegra_bpmp_message_valid(msg))
369 		return -EINVAL;
370 
371 	if (bpmp->suspended) {
372 		/* Reset BPMP IPC channels during resume based on flags passed */
373 		if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
374 			tegra_bpmp_resume(bpmp->dev);
375 		else
376 			return -EAGAIN;
377 	}
378 
379 	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
380 					    msg->tx.size);
381 	if (IS_ERR(channel))
382 		return PTR_ERR(channel);
383 
384 	err = tegra_bpmp_ring_doorbell(bpmp);
385 	if (err < 0)
386 		return err;
387 
388 	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
389 
390 	err = wait_for_completion_timeout(&channel->completion, timeout);
391 	if (err == 0)
392 		return -ETIMEDOUT;
393 
394 	return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
395 				       &msg->rx.ret);
396 }
397 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
398 
tegra_bpmp_find_mrq(struct tegra_bpmp * bpmp,unsigned int mrq)399 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
400 						  unsigned int mrq)
401 {
402 	struct tegra_bpmp_mrq *entry;
403 
404 	list_for_each_entry(entry, &bpmp->mrqs, list)
405 		if (entry->mrq == mrq)
406 			return entry;
407 
408 	return NULL;
409 }
410 
tegra_bpmp_mrq_return(struct tegra_bpmp_channel * channel,int code,const void * data,size_t size)411 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
412 			   const void *data, size_t size)
413 {
414 	unsigned long flags = tegra_bpmp_mb_read_field(&channel->ib, flags);
415 	struct tegra_bpmp *bpmp = channel->bpmp;
416 	int err;
417 
418 	if (WARN_ON(size > MSG_DATA_MIN_SZ))
419 		return;
420 
421 	err = tegra_bpmp_ack_request(channel);
422 	if (WARN_ON(err < 0))
423 		return;
424 
425 	if ((flags & MSG_ACK) == 0)
426 		return;
427 
428 	if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
429 		return;
430 
431 	tegra_bpmp_mb_write_field(&channel->ob, code, code);
432 
433 	if (data && size > 0)
434 		tegra_bpmp_mb_write(&channel->ob, data, size);
435 
436 	err = tegra_bpmp_post_response(channel);
437 	if (WARN_ON(err < 0))
438 		return;
439 
440 	if (flags & MSG_RING) {
441 		err = tegra_bpmp_ring_doorbell(bpmp);
442 		if (WARN_ON(err < 0))
443 			return;
444 	}
445 }
446 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
447 
tegra_bpmp_handle_mrq(struct tegra_bpmp * bpmp,unsigned int mrq,struct tegra_bpmp_channel * channel)448 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
449 				  unsigned int mrq,
450 				  struct tegra_bpmp_channel *channel)
451 {
452 	struct tegra_bpmp_mrq *entry;
453 	u32 zero = 0;
454 
455 	spin_lock(&bpmp->lock);
456 
457 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
458 	if (!entry) {
459 		spin_unlock(&bpmp->lock);
460 		tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
461 		return;
462 	}
463 
464 	entry->handler(mrq, channel, entry->data);
465 
466 	spin_unlock(&bpmp->lock);
467 }
468 
tegra_bpmp_request_mrq(struct tegra_bpmp * bpmp,unsigned int mrq,tegra_bpmp_mrq_handler_t handler,void * data)469 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
470 			   tegra_bpmp_mrq_handler_t handler, void *data)
471 {
472 	struct tegra_bpmp_mrq *entry;
473 	unsigned long flags;
474 
475 	if (!handler)
476 		return -EINVAL;
477 
478 	entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
479 	if (!entry)
480 		return -ENOMEM;
481 
482 	spin_lock_irqsave(&bpmp->lock, flags);
483 
484 	entry->mrq = mrq;
485 	entry->handler = handler;
486 	entry->data = data;
487 	list_add(&entry->list, &bpmp->mrqs);
488 
489 	spin_unlock_irqrestore(&bpmp->lock, flags);
490 
491 	return 0;
492 }
493 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
494 
tegra_bpmp_free_mrq(struct tegra_bpmp * bpmp,unsigned int mrq,void * data)495 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
496 {
497 	struct tegra_bpmp_mrq *entry;
498 	unsigned long flags;
499 
500 	spin_lock_irqsave(&bpmp->lock, flags);
501 
502 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
503 	if (!entry)
504 		goto unlock;
505 
506 	list_del(&entry->list);
507 	devm_kfree(bpmp->dev, entry);
508 
509 unlock:
510 	spin_unlock_irqrestore(&bpmp->lock, flags);
511 }
512 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
513 
tegra_bpmp_mrq_is_supported(struct tegra_bpmp * bpmp,unsigned int mrq)514 bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
515 {
516 	struct mrq_query_abi_request req = { .mrq = mrq };
517 	struct mrq_query_abi_response resp;
518 	struct tegra_bpmp_message msg = {
519 		.mrq = MRQ_QUERY_ABI,
520 		.tx = {
521 			.data = &req,
522 			.size = sizeof(req),
523 		},
524 		.rx = {
525 			.data = &resp,
526 			.size = sizeof(resp),
527 		},
528 	};
529 	int err;
530 
531 	err = tegra_bpmp_transfer(bpmp, &msg);
532 	if (err || msg.rx.ret)
533 		return false;
534 
535 	return resp.status == 0;
536 }
537 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
538 
tegra_bpmp_mrq_handle_ping(unsigned int mrq,struct tegra_bpmp_channel * channel,void * data)539 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
540 				       struct tegra_bpmp_channel *channel,
541 				       void *data)
542 {
543 	struct mrq_ping_request request;
544 	struct mrq_ping_response response;
545 
546 	tegra_bpmp_mb_read(&request, &channel->ib, sizeof(request));
547 
548 	memset(&response, 0, sizeof(response));
549 	response.reply = request.challenge << 1;
550 
551 	tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
552 }
553 
tegra_bpmp_ping(struct tegra_bpmp * bpmp)554 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
555 {
556 	struct mrq_ping_response response;
557 	struct mrq_ping_request request;
558 	struct tegra_bpmp_message msg;
559 	unsigned long flags;
560 	ktime_t start, end;
561 	int err;
562 
563 	memset(&request, 0, sizeof(request));
564 	request.challenge = 1;
565 
566 	memset(&response, 0, sizeof(response));
567 
568 	memset(&msg, 0, sizeof(msg));
569 	msg.mrq = MRQ_PING;
570 	msg.tx.data = &request;
571 	msg.tx.size = sizeof(request);
572 	msg.rx.data = &response;
573 	msg.rx.size = sizeof(response);
574 
575 	local_irq_save(flags);
576 	start = ktime_get();
577 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
578 	end = ktime_get();
579 	local_irq_restore(flags);
580 
581 	if (!err)
582 		dev_dbg(bpmp->dev,
583 			"ping ok: challenge: %u, response: %u, time: %lld\n",
584 			request.challenge, response.reply,
585 			ktime_to_us(ktime_sub(end, start)));
586 
587 	return err;
588 }
589 
590 /* deprecated version of tag query */
tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp * bpmp,char * tag,size_t size)591 static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
592 					   size_t size)
593 {
594 	struct mrq_query_tag_request request;
595 	struct tegra_bpmp_message msg;
596 	unsigned long flags;
597 	dma_addr_t phys;
598 	void *virt;
599 	int err;
600 
601 	if (size != TAG_SZ)
602 		return -EINVAL;
603 
604 	virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
605 				  GFP_KERNEL | GFP_DMA32);
606 	if (!virt)
607 		return -ENOMEM;
608 
609 	memset(&request, 0, sizeof(request));
610 	request.addr = phys;
611 
612 	memset(&msg, 0, sizeof(msg));
613 	msg.mrq = MRQ_QUERY_TAG;
614 	msg.tx.data = &request;
615 	msg.tx.size = sizeof(request);
616 
617 	local_irq_save(flags);
618 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
619 	local_irq_restore(flags);
620 
621 	if (err == 0)
622 		memcpy(tag, virt, TAG_SZ);
623 
624 	dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
625 
626 	return err;
627 }
628 
tegra_bpmp_get_firmware_tag(struct tegra_bpmp * bpmp,char * tag,size_t size)629 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
630 				       size_t size)
631 {
632 	if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
633 		struct mrq_query_fw_tag_response resp;
634 		struct tegra_bpmp_message msg = {
635 			.mrq = MRQ_QUERY_FW_TAG,
636 			.rx = {
637 				.data = &resp,
638 				.size = sizeof(resp),
639 			},
640 		};
641 		int err;
642 
643 		if (size != sizeof(resp.tag))
644 			return -EINVAL;
645 
646 		err = tegra_bpmp_transfer(bpmp, &msg);
647 
648 		if (err)
649 			return err;
650 		if (msg.rx.ret < 0)
651 			return -EINVAL;
652 
653 		memcpy(tag, resp.tag, sizeof(resp.tag));
654 		return 0;
655 	}
656 
657 	return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
658 }
659 
tegra_bpmp_channel_signal(struct tegra_bpmp_channel * channel)660 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
661 {
662 	unsigned long flags = tegra_bpmp_mb_read_field(&channel->ob, flags);
663 
664 	if ((flags & MSG_RING) == 0)
665 		return;
666 
667 	complete(&channel->completion);
668 }
669 
tegra_bpmp_handle_rx(struct tegra_bpmp * bpmp)670 void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
671 {
672 	struct tegra_bpmp_channel *channel;
673 	unsigned int i, count;
674 	unsigned long *busy;
675 
676 	channel = bpmp->rx_channel;
677 	count = bpmp->soc->channels.thread.count;
678 	busy = bpmp->threaded.busy;
679 
680 	if (tegra_bpmp_is_request_ready(channel)) {
681 		unsigned int mrq = tegra_bpmp_mb_read_field(&channel->ib, code);
682 
683 		tegra_bpmp_handle_mrq(bpmp, mrq, channel);
684 	}
685 
686 	spin_lock(&bpmp->lock);
687 
688 	for_each_set_bit(i, busy, count) {
689 		struct tegra_bpmp_channel *channel;
690 
691 		channel = &bpmp->threaded_channels[i];
692 
693 		if (tegra_bpmp_is_response_ready(channel)) {
694 			tegra_bpmp_channel_signal(channel);
695 			clear_bit(i, busy);
696 		}
697 	}
698 
699 	spin_unlock(&bpmp->lock);
700 }
701 
tegra_bpmp_probe(struct platform_device * pdev)702 static int tegra_bpmp_probe(struct platform_device *pdev)
703 {
704 	struct tegra_bpmp *bpmp;
705 	char tag[TAG_SZ];
706 	size_t size;
707 	int err;
708 
709 	bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
710 	if (!bpmp)
711 		return -ENOMEM;
712 
713 	bpmp->soc = of_device_get_match_data(&pdev->dev);
714 	bpmp->dev = &pdev->dev;
715 
716 	INIT_LIST_HEAD(&bpmp->mrqs);
717 	spin_lock_init(&bpmp->lock);
718 
719 	bpmp->threaded.count = bpmp->soc->channels.thread.count;
720 	sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
721 
722 	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
723 
724 	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
725 	if (!bpmp->threaded.allocated)
726 		return -ENOMEM;
727 
728 	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
729 	if (!bpmp->threaded.busy)
730 		return -ENOMEM;
731 
732 	spin_lock_init(&bpmp->atomic_tx_lock);
733 	bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
734 					GFP_KERNEL);
735 	if (!bpmp->tx_channel)
736 		return -ENOMEM;
737 
738 	bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
739 	                                GFP_KERNEL);
740 	if (!bpmp->rx_channel)
741 		return -ENOMEM;
742 
743 	bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
744 					       sizeof(*bpmp->threaded_channels),
745 					       GFP_KERNEL);
746 	if (!bpmp->threaded_channels)
747 		return -ENOMEM;
748 
749 	platform_set_drvdata(pdev, bpmp);
750 
751 	err = bpmp->soc->ops->init(bpmp);
752 	if (err < 0)
753 		return err;
754 
755 	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
756 				     tegra_bpmp_mrq_handle_ping, bpmp);
757 	if (err < 0)
758 		goto deinit;
759 
760 	err = tegra_bpmp_ping(bpmp);
761 	if (err < 0) {
762 		dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
763 		goto free_mrq;
764 	}
765 
766 	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
767 	if (err < 0) {
768 		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
769 		goto free_mrq;
770 	}
771 
772 	dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
773 
774 	err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
775 	if (err < 0)
776 		goto free_mrq;
777 
778 	if (of_property_present(pdev->dev.of_node, "#clock-cells")) {
779 		err = tegra_bpmp_init_clocks(bpmp);
780 		if (err < 0)
781 			goto free_mrq;
782 	}
783 
784 	if (of_property_present(pdev->dev.of_node, "#reset-cells")) {
785 		err = tegra_bpmp_init_resets(bpmp);
786 		if (err < 0)
787 			goto free_mrq;
788 	}
789 
790 	if (of_property_present(pdev->dev.of_node, "#power-domain-cells")) {
791 		err = tegra_bpmp_init_powergates(bpmp);
792 		if (err < 0)
793 			goto free_mrq;
794 	}
795 
796 	err = tegra_bpmp_init_debugfs(bpmp);
797 	if (err < 0)
798 		dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
799 
800 	return 0;
801 
802 free_mrq:
803 	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
804 deinit:
805 	if (bpmp->soc->ops->deinit)
806 		bpmp->soc->ops->deinit(bpmp);
807 
808 	return err;
809 }
810 
tegra_bpmp_suspend(struct device * dev)811 static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
812 {
813 	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
814 
815 	bpmp->suspended = true;
816 
817 	return 0;
818 }
819 
tegra_bpmp_resume(struct device * dev)820 static int __maybe_unused tegra_bpmp_resume(struct device *dev)
821 {
822 	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
823 
824 	bpmp->suspended = false;
825 
826 	if (bpmp->soc->ops->resume)
827 		return bpmp->soc->ops->resume(bpmp);
828 	else
829 		return 0;
830 }
831 
832 static const struct dev_pm_ops tegra_bpmp_pm_ops = {
833 	.suspend_noirq = tegra_bpmp_suspend,
834 	.resume_noirq = tegra_bpmp_resume,
835 };
836 
837 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
838     IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
839     IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
840 static const struct tegra_bpmp_soc tegra186_soc = {
841 	.channels = {
842 		.cpu_tx = {
843 			.offset = 3,
844 			.timeout = 60 * USEC_PER_SEC,
845 		},
846 		.thread = {
847 			.offset = 0,
848 			.count = 3,
849 			.timeout = 600 * USEC_PER_SEC,
850 		},
851 		.cpu_rx = {
852 			.offset = 13,
853 			.timeout = 0,
854 		},
855 	},
856 	.ops = &tegra186_bpmp_ops,
857 	.num_resets = 193,
858 };
859 #endif
860 
861 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
862 static const struct tegra_bpmp_soc tegra210_soc = {
863 	.channels = {
864 		.cpu_tx = {
865 			.offset = 0,
866 			.count = 1,
867 			.timeout = 60 * USEC_PER_SEC,
868 		},
869 		.thread = {
870 			.offset = 4,
871 			.count = 1,
872 			.timeout = 600 * USEC_PER_SEC,
873 		},
874 		.cpu_rx = {
875 			.offset = 8,
876 			.count = 1,
877 			.timeout = 0,
878 		},
879 	},
880 	.ops = &tegra210_bpmp_ops,
881 };
882 #endif
883 
884 static const struct of_device_id tegra_bpmp_match[] = {
885 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
886     IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
887     IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
888 	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
889 #endif
890 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
891 	{ .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
892 #endif
893 	{ }
894 };
895 
896 static struct platform_driver tegra_bpmp_driver = {
897 	.driver = {
898 		.name = "tegra-bpmp",
899 		.of_match_table = tegra_bpmp_match,
900 		.pm = &tegra_bpmp_pm_ops,
901 		.suppress_bind_attrs = true,
902 	},
903 	.probe = tegra_bpmp_probe,
904 };
905 builtin_platform_driver(tegra_bpmp_driver);
906