xref: /linux/drivers/gpu/host1x/intr.c (revision 40d269c000bda9fcd276a0412a9cebd3f6e344c5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x Interrupt Management
4  *
5  * Copyright (c) 2010-2021, NVIDIA Corporation.
6  */
7 
8 #include <linux/clk.h>
9 
10 #include "dev.h"
11 #include "fence.h"
12 #include "intr.h"
13 
14 static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
15 					  struct host1x_syncpt_fence *fence)
16 {
17 	struct host1x_syncpt_fence *fence_in_list;
18 
19 	list_for_each_entry_reverse(fence_in_list, &list->list, list) {
20 		if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
21 			/* Fence in list is before us, we can insert here */
22 			list_add(&fence->list, &fence_in_list->list);
23 			return;
24 		}
25 	}
26 
27 	/* Add as first in list */
28 	list_add(&fence->list, &list->list);
29 }
30 
31 static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
32 {
33 	struct host1x_syncpt_fence *fence;
34 
35 	if (!list_empty(&sp->fences.list)) {
36 		fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);
37 
38 		host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
39 		host1x_hw_intr_enable_syncpt_intr(host, sp->id);
40 	} else {
41 		host1x_hw_intr_disable_syncpt_intr(host, sp->id);
42 	}
43 }
44 
45 void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
46 {
47 	struct host1x_fence_list *fence_list = &fence->sp->fences;
48 
49 	INIT_LIST_HEAD(&fence->list);
50 
51 	host1x_intr_add_fence_to_list(fence_list, fence);
52 	host1x_intr_update_hw_state(host, fence->sp);
53 }
54 
55 bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
56 {
57 	struct host1x_fence_list *fence_list = &fence->sp->fences;
58 	unsigned long irqflags;
59 
60 	spin_lock_irqsave(&fence_list->lock, irqflags);
61 
62 	if (list_empty(&fence->list)) {
63 		spin_unlock_irqrestore(&fence_list->lock, irqflags);
64 		return false;
65 	}
66 
67 	list_del_init(&fence->list);
68 	host1x_intr_update_hw_state(host, fence->sp);
69 
70 	spin_unlock_irqrestore(&fence_list->lock, irqflags);
71 
72 	return true;
73 }
74 
75 void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
76 {
77 	struct host1x_syncpt *sp = &host->syncpt[id];
78 	struct host1x_syncpt_fence *fence, *tmp;
79 	unsigned int value;
80 
81 	value = host1x_syncpt_load(sp);
82 
83 	spin_lock(&sp->fences.lock);
84 
85 	list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
86 		if (((value - fence->threshold) & 0x80000000U) != 0U) {
87 			/* Fence is not yet expired, we are done */
88 			break;
89 		}
90 
91 		list_del_init(&fence->list);
92 		host1x_fence_signal(fence);
93 	}
94 
95 	/* Re-enable interrupt if necessary */
96 	host1x_intr_update_hw_state(host, sp);
97 
98 	spin_unlock(&sp->fences.lock);
99 }
100 
101 int host1x_intr_init(struct host1x *host)
102 {
103 	unsigned int id;
104 
105 	mutex_init(&host->intr_mutex);
106 
107 	for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
108 		struct host1x_syncpt *syncpt = &host->syncpt[id];
109 
110 		spin_lock_init(&syncpt->fences.lock);
111 		INIT_LIST_HEAD(&syncpt->fences.list);
112 	}
113 
114 	return 0;
115 }
116 
117 void host1x_intr_deinit(struct host1x *host)
118 {
119 }
120 
121 void host1x_intr_start(struct host1x *host)
122 {
123 	u32 hz = clk_get_rate(host->clk);
124 	int err;
125 
126 	mutex_lock(&host->intr_mutex);
127 	err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
128 	if (err) {
129 		mutex_unlock(&host->intr_mutex);
130 		return;
131 	}
132 	mutex_unlock(&host->intr_mutex);
133 }
134 
135 void host1x_intr_stop(struct host1x *host)
136 {
137 	host1x_hw_intr_disable_all_syncpt_intrs(host);
138 }
139