xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h (revision 0898782247ae533d1f4e47a06bc5d4870931b284)
1 /* SPDX-License-Identifier: MIT */
2 #ifndef __NVKM_BUS_HWSQ_H__
3 #define __NVKM_BUS_HWSQ_H__
4 #include <subdev/bus.h>
5 
6 struct hwsq {
7 	struct nvkm_subdev *subdev;
8 	struct nvkm_hwsq *hwsq;
9 	int sequence;
10 };
11 
12 struct hwsq_reg {
13 	int sequence;
14 	bool force;
15 	u32 addr;
16 	u32 stride; /* in bytes */
17 	u32 mask;
18 	u32 data;
19 };
20 
21 static inline struct hwsq_reg
hwsq_stride(u32 addr,u32 stride,u32 mask)22 hwsq_stride(u32 addr, u32 stride, u32 mask)
23 {
24 	return (struct hwsq_reg) {
25 		.sequence = 0,
26 		.force = 0,
27 		.addr = addr,
28 		.stride = stride,
29 		.mask = mask,
30 		.data = 0xdeadbeef,
31 	};
32 }
33 
34 static inline struct hwsq_reg
hwsq_reg2(u32 addr1,u32 addr2)35 hwsq_reg2(u32 addr1, u32 addr2)
36 {
37 	return (struct hwsq_reg) {
38 		.sequence = 0,
39 		.force = 0,
40 		.addr = addr1,
41 		.stride = addr2 - addr1,
42 		.mask = 0x3,
43 		.data = 0xdeadbeef,
44 	};
45 }
46 
47 static inline struct hwsq_reg
hwsq_reg(u32 addr)48 hwsq_reg(u32 addr)
49 {
50 	return (struct hwsq_reg) {
51 		.sequence = 0,
52 		.force = 0,
53 		.addr = addr,
54 		.stride = 0,
55 		.mask = 0x1,
56 		.data = 0xdeadbeef,
57 	};
58 }
59 
60 static inline int
hwsq_init(struct hwsq * ram,struct nvkm_subdev * subdev)61 hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
62 {
63 	int ret;
64 
65 	ret = nvkm_hwsq_init(subdev, &ram->hwsq);
66 	if (ret)
67 		return ret;
68 
69 	ram->sequence++;
70 	ram->subdev = subdev;
71 	return 0;
72 }
73 
74 static inline int
hwsq_exec(struct hwsq * ram,bool exec)75 hwsq_exec(struct hwsq *ram, bool exec)
76 {
77 	int ret = 0;
78 	if (ram->subdev) {
79 		ret = nvkm_hwsq_fini(&ram->hwsq, exec);
80 		ram->subdev = NULL;
81 	}
82 	return ret;
83 }
84 
85 static inline u32
hwsq_rd32(struct hwsq * ram,struct hwsq_reg * reg)86 hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
87 {
88 	struct nvkm_device *device = ram->subdev->device;
89 	if (reg->sequence != ram->sequence)
90 		reg->data = nvkm_rd32(device, reg->addr);
91 	return reg->data;
92 }
93 
94 static inline void
hwsq_wr32(struct hwsq * ram,struct hwsq_reg * reg,u32 data)95 hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
96 {
97 	u32 mask, off = 0;
98 
99 	reg->sequence = ram->sequence;
100 	reg->data = data;
101 
102 	for (mask = reg->mask; mask > 0; mask = (mask & ~1) >> 1) {
103 		if (mask & 1)
104 			nvkm_hwsq_wr32(ram->hwsq, reg->addr+off, reg->data);
105 
106 		off += reg->stride;
107 	}
108 }
109 
110 static inline void
hwsq_nuke(struct hwsq * ram,struct hwsq_reg * reg)111 hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
112 {
113 	reg->force = true;
114 }
115 
116 static inline u32
hwsq_mask(struct hwsq * ram,struct hwsq_reg * reg,u32 mask,u32 data)117 hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
118 {
119 	u32 temp = hwsq_rd32(ram, reg);
120 	if (temp != ((temp & ~mask) | data) || reg->force)
121 		hwsq_wr32(ram, reg, (temp & ~mask) | data);
122 	return temp;
123 }
124 
125 static inline void
hwsq_setf(struct hwsq * ram,u8 flag,int data)126 hwsq_setf(struct hwsq *ram, u8 flag, int data)
127 {
128 	nvkm_hwsq_setf(ram->hwsq, flag, data);
129 }
130 
131 static inline void
hwsq_wait(struct hwsq * ram,u8 flag,u8 data)132 hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
133 {
134 	nvkm_hwsq_wait(ram->hwsq, flag, data);
135 }
136 
137 static inline void
hwsq_wait_vblank(struct hwsq * ram)138 hwsq_wait_vblank(struct hwsq *ram)
139 {
140 	nvkm_hwsq_wait_vblank(ram->hwsq);
141 }
142 
143 static inline void
hwsq_nsec(struct hwsq * ram,u32 nsec)144 hwsq_nsec(struct hwsq *ram, u32 nsec)
145 {
146 	nvkm_hwsq_nsec(ram->hwsq, nsec);
147 }
148 #endif
149