xref: /linux/drivers/gpu/drm/nouveau/nvkm/core/uevent.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #define nvkm_uevent(p) container_of((p), struct nvkm_uevent, object)
23 #include <core/event.h>
24 #include <core/client.h>
25 
26 #include <nvif/if000e.h>
27 
28 struct nvkm_uevent {
29 	struct nvkm_object object;
30 	struct nvkm_object *parent;
31 	nvkm_uevent_func func;
32 	bool wait;
33 
34 	struct nvkm_event_ntfy ntfy;
35 	atomic_t allowed;
36 };
37 
38 static int
nvkm_uevent_mthd_block(struct nvkm_uevent * uevent,union nvif_event_block_args * args,u32 argc)39 nvkm_uevent_mthd_block(struct nvkm_uevent *uevent, union nvif_event_block_args *args, u32 argc)
40 {
41 	if (argc != sizeof(args->vn))
42 		return -ENOSYS;
43 
44 	nvkm_event_ntfy_block(&uevent->ntfy);
45 	atomic_set(&uevent->allowed, 0);
46 	return 0;
47 }
48 
49 static int
nvkm_uevent_mthd_allow(struct nvkm_uevent * uevent,union nvif_event_allow_args * args,u32 argc)50 nvkm_uevent_mthd_allow(struct nvkm_uevent *uevent, union nvif_event_allow_args *args, u32 argc)
51 {
52 	if (argc != sizeof(args->vn))
53 		return -ENOSYS;
54 
55 	nvkm_event_ntfy_allow(&uevent->ntfy);
56 	atomic_set(&uevent->allowed, 1);
57 	return 0;
58 }
59 
60 static int
nvkm_uevent_mthd(struct nvkm_object * object,u32 mthd,void * argv,u32 argc)61 nvkm_uevent_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
62 {
63 	struct nvkm_uevent *uevent = nvkm_uevent(object);
64 
65 	switch (mthd) {
66 	case NVIF_EVENT_V0_ALLOW: return nvkm_uevent_mthd_allow(uevent, argv, argc);
67 	case NVIF_EVENT_V0_BLOCK: return nvkm_uevent_mthd_block(uevent, argv, argc);
68 	default:
69 		break;
70 	}
71 
72 	return -EINVAL;
73 }
74 
75 static int
nvkm_uevent_fini(struct nvkm_object * object,bool suspend)76 nvkm_uevent_fini(struct nvkm_object *object, bool suspend)
77 {
78 	struct nvkm_uevent *uevent = nvkm_uevent(object);
79 
80 	nvkm_event_ntfy_block(&uevent->ntfy);
81 	return 0;
82 }
83 
84 static int
nvkm_uevent_init(struct nvkm_object * object)85 nvkm_uevent_init(struct nvkm_object *object)
86 {
87 	struct nvkm_uevent *uevent = nvkm_uevent(object);
88 
89 	if (atomic_read(&uevent->allowed))
90 		nvkm_event_ntfy_allow(&uevent->ntfy);
91 
92 	return 0;
93 }
94 
95 static void *
nvkm_uevent_dtor(struct nvkm_object * object)96 nvkm_uevent_dtor(struct nvkm_object *object)
97 {
98 	struct nvkm_uevent *uevent = nvkm_uevent(object);
99 
100 	nvkm_event_ntfy_del(&uevent->ntfy);
101 	return uevent;
102 }
103 
104 static const struct nvkm_object_func
105 nvkm_uevent = {
106 	.dtor = nvkm_uevent_dtor,
107 	.init = nvkm_uevent_init,
108 	.fini = nvkm_uevent_fini,
109 	.mthd = nvkm_uevent_mthd,
110 };
111 
112 static int
nvkm_uevent_ntfy(struct nvkm_event_ntfy * ntfy,u32 bits)113 nvkm_uevent_ntfy(struct nvkm_event_ntfy *ntfy, u32 bits)
114 {
115 	struct nvkm_uevent *uevent = container_of(ntfy, typeof(*uevent), ntfy);
116 	struct nvkm_client *client = uevent->object.client;
117 
118 	if (uevent->func)
119 		return uevent->func(uevent->parent, uevent->object.object, bits);
120 
121 	return client->event(uevent->object.object, NULL, 0);
122 }
123 
124 int
nvkm_uevent_add(struct nvkm_uevent * uevent,struct nvkm_event * event,int id,u32 bits,nvkm_uevent_func func)125 nvkm_uevent_add(struct nvkm_uevent *uevent, struct nvkm_event *event, int id, u32 bits,
126 		nvkm_uevent_func func)
127 {
128 	if (WARN_ON(uevent->func))
129 		return -EBUSY;
130 
131 	nvkm_event_ntfy_add(event, id, bits, uevent->wait, nvkm_uevent_ntfy, &uevent->ntfy);
132 	uevent->func = func;
133 	return 0;
134 }
135 
136 int
nvkm_uevent_new(const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)137 nvkm_uevent_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
138 		struct nvkm_object **pobject)
139 {
140 	struct nvkm_object *parent = oclass->parent;
141 	struct nvkm_uevent *uevent;
142 	union nvif_event_args *args = argv;
143 
144 	if (argc < sizeof(args->v0) || args->v0.version != 0)
145 		return -ENOSYS;
146 
147 	if (!(uevent = kzalloc(sizeof(*uevent), GFP_KERNEL)))
148 		return -ENOMEM;
149 	*pobject = &uevent->object;
150 
151 	nvkm_object_ctor(&nvkm_uevent, oclass, &uevent->object);
152 	uevent->parent = parent;
153 	uevent->func = NULL;
154 	uevent->wait = args->v0.wait;
155 	uevent->ntfy.event = NULL;
156 	return parent->func->uevent(parent, &args->v0.data, argc - sizeof(args->v0), uevent);
157 }
158