1 /*
2 * Copyright 2021 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include "cgrp.h"
23 #include "chan.h"
24 #include "chid.h"
25 #include "runl.h"
26 #include "priv.h"
27
28 #include <core/gpuobj.h>
29 #include <subdev/mmu.h>
30
31 static void
nvkm_cgrp_ectx_put(struct nvkm_cgrp * cgrp,struct nvkm_ectx ** pectx)32 nvkm_cgrp_ectx_put(struct nvkm_cgrp *cgrp, struct nvkm_ectx **pectx)
33 {
34 struct nvkm_ectx *ectx = *pectx;
35
36 if (ectx) {
37 struct nvkm_engn *engn = ectx->engn;
38
39 if (refcount_dec_and_test(&ectx->refs)) {
40 CGRP_TRACE(cgrp, "dtor ectx %d[%s]", engn->id, engn->engine->subdev.name);
41 nvkm_object_del(&ectx->object);
42 list_del(&ectx->head);
43 kfree(ectx);
44 }
45
46 *pectx = NULL;
47 }
48 }
49
50 static int
nvkm_cgrp_ectx_get(struct nvkm_cgrp * cgrp,struct nvkm_engn * engn,struct nvkm_ectx ** pectx,struct nvkm_chan * chan,struct nvkm_client * client)51 nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_ectx **pectx,
52 struct nvkm_chan *chan, struct nvkm_client *client)
53 {
54 struct nvkm_engine *engine = engn->engine;
55 struct nvkm_oclass cclass = {
56 .client = client,
57 .engine = engine,
58 };
59 struct nvkm_ectx *ectx;
60 int ret = 0;
61
62 /* Look for an existing context for this engine in the channel group. */
63 ectx = nvkm_list_find(ectx, &cgrp->ectxs, head, ectx->engn == engn);
64 if (ectx) {
65 refcount_inc(&ectx->refs);
66 *pectx = ectx;
67 return 0;
68 }
69
70 /* Nope - create a fresh one. */
71 CGRP_TRACE(cgrp, "ctor ectx %d[%s]", engn->id, engn->engine->subdev.name);
72 if (!(ectx = *pectx = kzalloc(sizeof(*ectx), GFP_KERNEL)))
73 return -ENOMEM;
74
75 ectx->engn = engn;
76 refcount_set(&ectx->refs, 1);
77 refcount_set(&ectx->uses, 0);
78 list_add_tail(&ectx->head, &cgrp->ectxs);
79
80 /* Allocate the HW structures. */
81 if (engine->func->fifo.cclass)
82 ret = engine->func->fifo.cclass(chan, &cclass, &ectx->object);
83 else if (engine->func->cclass)
84 ret = nvkm_object_new_(engine->func->cclass, &cclass, NULL, 0, &ectx->object);
85
86 if (ret)
87 nvkm_cgrp_ectx_put(cgrp, pectx);
88
89 return ret;
90 }
91
92 void
nvkm_cgrp_vctx_put(struct nvkm_cgrp * cgrp,struct nvkm_vctx ** pvctx)93 nvkm_cgrp_vctx_put(struct nvkm_cgrp *cgrp, struct nvkm_vctx **pvctx)
94 {
95 struct nvkm_vctx *vctx = *pvctx;
96
97 if (vctx) {
98 struct nvkm_engn *engn = vctx->ectx->engn;
99
100 if (refcount_dec_and_test(&vctx->refs)) {
101 CGRP_TRACE(cgrp, "dtor vctx %d[%s]", engn->id, engn->engine->subdev.name);
102 nvkm_vmm_put(vctx->vmm, &vctx->vma);
103 nvkm_gpuobj_del(&vctx->inst);
104
105 nvkm_cgrp_ectx_put(cgrp, &vctx->ectx);
106 if (vctx->vmm) {
107 atomic_dec(&vctx->vmm->engref[engn->engine->subdev.type]);
108 nvkm_vmm_unref(&vctx->vmm);
109 }
110 list_del(&vctx->head);
111 kfree(vctx);
112 }
113
114 *pvctx = NULL;
115 }
116 }
117
118 int
nvkm_cgrp_vctx_get(struct nvkm_cgrp * cgrp,struct nvkm_engn * engn,struct nvkm_chan * chan,struct nvkm_vctx ** pvctx,struct nvkm_client * client)119 nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_chan *chan,
120 struct nvkm_vctx **pvctx, struct nvkm_client *client)
121 {
122 struct nvkm_ectx *ectx;
123 struct nvkm_vctx *vctx;
124 int ret;
125
126 /* Look for an existing sub-context for this engine+VEID in the channel group. */
127 vctx = nvkm_list_find(vctx, &cgrp->vctxs, head,
128 vctx->ectx->engn == engn && vctx->vmm == chan->vmm);
129 if (vctx) {
130 refcount_inc(&vctx->refs);
131 *pvctx = vctx;
132 return 0;
133 }
134
135 /* Nope - create a fresh one. But, context first. */
136 ret = nvkm_cgrp_ectx_get(cgrp, engn, &ectx, chan, client);
137 if (ret) {
138 CGRP_ERROR(cgrp, "ectx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
139 return ret;
140 }
141
142 /* Now, create the sub-context. */
143 CGRP_TRACE(cgrp, "ctor vctx %d[%s]", engn->id, engn->engine->subdev.name);
144 if (!(vctx = *pvctx = kzalloc(sizeof(*vctx), GFP_KERNEL))) {
145 nvkm_cgrp_ectx_put(cgrp, &ectx);
146 return -ENOMEM;
147 }
148
149 vctx->ectx = ectx;
150 vctx->vmm = nvkm_vmm_ref(chan->vmm);
151 refcount_set(&vctx->refs, 1);
152 list_add_tail(&vctx->head, &cgrp->vctxs);
153
154 /* MMU on some GPUs needs to know engine usage for TLB invalidation. */
155 if (vctx->vmm)
156 atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
157
158 /* Allocate the HW structures. */
159 if (engn->func->ctor2) {
160 ret = engn->func->ctor2(engn, vctx, chan);
161 } else
162 if (engn->func->bind) {
163 ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
164 if (ret == 0 && engn->func->ctor)
165 ret = engn->func->ctor(engn, vctx);
166 }
167
168 if (ret)
169 nvkm_cgrp_vctx_put(cgrp, pvctx);
170
171 return ret;
172 }
173
174 static void
nvkm_cgrp_del(struct kref * kref)175 nvkm_cgrp_del(struct kref *kref)
176 {
177 struct nvkm_cgrp *cgrp = container_of(kref, typeof(*cgrp), kref);
178 struct nvkm_runl *runl = cgrp->runl;
179
180 if (runl->cgid)
181 nvkm_chid_put(runl->cgid, cgrp->id, &cgrp->lock);
182
183 mutex_destroy(&cgrp->mutex);
184 nvkm_vmm_unref(&cgrp->vmm);
185 kfree(cgrp);
186 }
187
188 void
nvkm_cgrp_unref(struct nvkm_cgrp ** pcgrp)189 nvkm_cgrp_unref(struct nvkm_cgrp **pcgrp)
190 {
191 struct nvkm_cgrp *cgrp = *pcgrp;
192
193 if (!cgrp)
194 return;
195
196 kref_put(&cgrp->kref, nvkm_cgrp_del);
197 *pcgrp = NULL;
198 }
199
200 struct nvkm_cgrp *
nvkm_cgrp_ref(struct nvkm_cgrp * cgrp)201 nvkm_cgrp_ref(struct nvkm_cgrp *cgrp)
202 {
203 if (cgrp)
204 kref_get(&cgrp->kref);
205
206 return cgrp;
207 }
208
209 void
nvkm_cgrp_put(struct nvkm_cgrp ** pcgrp,unsigned long irqflags)210 nvkm_cgrp_put(struct nvkm_cgrp **pcgrp, unsigned long irqflags)
211 {
212 struct nvkm_cgrp *cgrp = *pcgrp;
213
214 if (!cgrp)
215 return;
216
217 *pcgrp = NULL;
218 spin_unlock_irqrestore(&cgrp->lock, irqflags);
219 }
220
221 int
nvkm_cgrp_new(struct nvkm_runl * runl,const char * name,struct nvkm_vmm * vmm,bool hw,struct nvkm_cgrp ** pcgrp)222 nvkm_cgrp_new(struct nvkm_runl *runl, const char *name, struct nvkm_vmm *vmm, bool hw,
223 struct nvkm_cgrp **pcgrp)
224 {
225 struct nvkm_cgrp *cgrp;
226
227 if (!(cgrp = *pcgrp = kmalloc(sizeof(*cgrp), GFP_KERNEL)))
228 return -ENOMEM;
229
230 cgrp->func = runl->fifo->func->cgrp.func;
231 strscpy(cgrp->name, name, sizeof(cgrp->name));
232 cgrp->runl = runl;
233 cgrp->vmm = nvkm_vmm_ref(vmm);
234 cgrp->hw = hw;
235 cgrp->id = -1;
236 kref_init(&cgrp->kref);
237 INIT_LIST_HEAD(&cgrp->chans);
238 cgrp->chan_nr = 0;
239 spin_lock_init(&cgrp->lock);
240 INIT_LIST_HEAD(&cgrp->ectxs);
241 INIT_LIST_HEAD(&cgrp->vctxs);
242 mutex_init(&cgrp->mutex);
243 atomic_set(&cgrp->rc, NVKM_CGRP_RC_NONE);
244
245 if (runl->cgid) {
246 cgrp->id = nvkm_chid_get(runl->cgid, cgrp);
247 if (cgrp->id < 0) {
248 RUNL_ERROR(runl, "!cgids");
249 nvkm_cgrp_unref(pcgrp);
250 return -ENOSPC;
251 }
252 }
253
254 return 0;
255 }
256