xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c (revision a940daa52167e9db8ecce82213813b735a9d9f23)
1 /*
2  * Copyright 2015 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "chan.h"
25 
26 #include <engine/fifo.h>
27 
28 #include <nvif/event.h>
29 #include <nvif/unpack.h>
30 
31 bool
nvkm_sw_chan_mthd(struct nvkm_sw_chan * chan,int subc,u32 mthd,u32 data)32 nvkm_sw_chan_mthd(struct nvkm_sw_chan *chan, int subc, u32 mthd, u32 data)
33 {
34 	switch (mthd) {
35 	case 0x0000:
36 		return true;
37 	case 0x0500:
38 		nvkm_event_ntfy(&chan->event, 0, NVKM_SW_CHAN_EVENT_PAGE_FLIP);
39 		return true;
40 	default:
41 		if (chan->func->mthd)
42 			return chan->func->mthd(chan, subc, mthd, data);
43 		break;
44 	}
45 	return false;
46 }
47 
48 static const struct nvkm_event_func
49 nvkm_sw_chan_event = {
50 };
51 
52 static void *
nvkm_sw_chan_dtor(struct nvkm_object * object)53 nvkm_sw_chan_dtor(struct nvkm_object *object)
54 {
55 	struct nvkm_sw_chan *chan = nvkm_sw_chan(object);
56 	struct nvkm_sw *sw = chan->sw;
57 	unsigned long flags;
58 	void *data = chan;
59 
60 	if (chan->func->dtor)
61 		data = chan->func->dtor(chan);
62 	nvkm_event_fini(&chan->event);
63 
64 	spin_lock_irqsave(&sw->engine.lock, flags);
65 	list_del(&chan->head);
66 	spin_unlock_irqrestore(&sw->engine.lock, flags);
67 	return data;
68 }
69 
70 static const struct nvkm_object_func
71 nvkm_sw_chan = {
72 	.dtor = nvkm_sw_chan_dtor,
73 };
74 
75 int
nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func * func,struct nvkm_sw * sw,struct nvkm_chan * fifo,const struct nvkm_oclass * oclass,struct nvkm_sw_chan * chan)76 nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
77 		  struct nvkm_chan *fifo, const struct nvkm_oclass *oclass,
78 		  struct nvkm_sw_chan *chan)
79 {
80 	unsigned long flags;
81 
82 	nvkm_object_ctor(&nvkm_sw_chan, oclass, &chan->object);
83 	chan->func = func;
84 	chan->sw = sw;
85 	chan->fifo = fifo;
86 	spin_lock_irqsave(&sw->engine.lock, flags);
87 	list_add(&chan->head, &sw->chan);
88 	spin_unlock_irqrestore(&sw->engine.lock, flags);
89 
90 	return nvkm_event_init(&nvkm_sw_chan_event, &sw->engine.subdev, 1, 1, &chan->event);
91 }
92