xref: /linux/arch/powerpc/platforms/cell/spufs/hw_ops.c (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* hw_ops.c - query/set operations on active SPU context.
3  *
4  * Copyright (C) IBM 2005
5  * Author: Mark Nutter <mnutter@us.ibm.com>
6  */
7 
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/poll.h>
13 #include <linux/smp.h>
14 #include <linux/stddef.h>
15 #include <linux/unistd.h>
16 
17 #include <asm/io.h>
18 #include <asm/spu.h>
19 #include <asm/spu_priv1.h>
20 #include <asm/spu_csa.h>
21 #include <asm/mmu_context.h>
22 #include "spufs.h"
23 
spu_hw_mbox_read(struct spu_context * ctx,u32 * data)24 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
25 {
26 	struct spu *spu = ctx->spu;
27 	struct spu_problem __iomem *prob = spu->problem;
28 	u32 mbox_stat;
29 	int ret = 0;
30 
31 	spin_lock_irq(&spu->register_lock);
32 	mbox_stat = in_be32(&prob->mb_stat_R);
33 	if (mbox_stat & 0x0000ff) {
34 		*data = in_be32(&prob->pu_mb_R);
35 		ret = 4;
36 	}
37 	spin_unlock_irq(&spu->register_lock);
38 	return ret;
39 }
40 
spu_hw_mbox_stat_read(struct spu_context * ctx)41 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
42 {
43 	return in_be32(&ctx->spu->problem->mb_stat_R);
44 }
45 
spu_hw_mbox_stat_poll(struct spu_context * ctx,__poll_t events)46 static __poll_t spu_hw_mbox_stat_poll(struct spu_context *ctx, __poll_t events)
47 {
48 	struct spu *spu = ctx->spu;
49 	__poll_t ret = 0;
50 	u32 stat;
51 
52 	spin_lock_irq(&spu->register_lock);
53 	stat = in_be32(&spu->problem->mb_stat_R);
54 
55 	/* if the requested event is there, return the poll
56 	   mask, otherwise enable the interrupt to get notified,
57 	   but first mark any pending interrupts as done so
58 	   we don't get woken up unnecessarily */
59 
60 	if (events & (EPOLLIN | EPOLLRDNORM)) {
61 		if (stat & 0xff0000)
62 			ret |= EPOLLIN | EPOLLRDNORM;
63 		else {
64 			spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
65 			spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
66 		}
67 	}
68 	if (events & (EPOLLOUT | EPOLLWRNORM)) {
69 		if (stat & 0x00ff00)
70 			ret = EPOLLOUT | EPOLLWRNORM;
71 		else {
72 			spu_int_stat_clear(spu, 2,
73 					CLASS2_MAILBOX_THRESHOLD_INTR);
74 			spu_int_mask_or(spu, 2,
75 					CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
76 		}
77 	}
78 	spin_unlock_irq(&spu->register_lock);
79 	return ret;
80 }
81 
spu_hw_ibox_read(struct spu_context * ctx,u32 * data)82 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
83 {
84 	struct spu *spu = ctx->spu;
85 	struct spu_problem __iomem *prob = spu->problem;
86 	struct spu_priv2 __iomem *priv2 = spu->priv2;
87 	int ret;
88 
89 	spin_lock_irq(&spu->register_lock);
90 	if (in_be32(&prob->mb_stat_R) & 0xff0000) {
91 		/* read the first available word */
92 		*data = in_be64(&priv2->puint_mb_R);
93 		ret = 4;
94 	} else {
95 		/* make sure we get woken up by the interrupt */
96 		spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
97 		ret = 0;
98 	}
99 	spin_unlock_irq(&spu->register_lock);
100 	return ret;
101 }
102 
spu_hw_wbox_write(struct spu_context * ctx,u32 data)103 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
104 {
105 	struct spu *spu = ctx->spu;
106 	struct spu_problem __iomem *prob = spu->problem;
107 	int ret;
108 
109 	spin_lock_irq(&spu->register_lock);
110 	if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
111 		/* we have space to write wbox_data to */
112 		out_be32(&prob->spu_mb_W, data);
113 		ret = 4;
114 	} else {
115 		/* make sure we get woken up by the interrupt when space
116 		   becomes available */
117 		spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
118 		ret = 0;
119 	}
120 	spin_unlock_irq(&spu->register_lock);
121 	return ret;
122 }
123 
spu_hw_signal1_write(struct spu_context * ctx,u32 data)124 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
125 {
126 	out_be32(&ctx->spu->problem->signal_notify1, data);
127 }
128 
spu_hw_signal2_write(struct spu_context * ctx,u32 data)129 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
130 {
131 	out_be32(&ctx->spu->problem->signal_notify2, data);
132 }
133 
spu_hw_signal1_type_set(struct spu_context * ctx,u64 val)134 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
135 {
136 	struct spu *spu = ctx->spu;
137 	struct spu_priv2 __iomem *priv2 = spu->priv2;
138 	u64 tmp;
139 
140 	spin_lock_irq(&spu->register_lock);
141 	tmp = in_be64(&priv2->spu_cfg_RW);
142 	if (val)
143 		tmp |= 1;
144 	else
145 		tmp &= ~1;
146 	out_be64(&priv2->spu_cfg_RW, tmp);
147 	spin_unlock_irq(&spu->register_lock);
148 }
149 
spu_hw_signal1_type_get(struct spu_context * ctx)150 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
151 {
152 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
153 }
154 
spu_hw_signal2_type_set(struct spu_context * ctx,u64 val)155 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
156 {
157 	struct spu *spu = ctx->spu;
158 	struct spu_priv2 __iomem *priv2 = spu->priv2;
159 	u64 tmp;
160 
161 	spin_lock_irq(&spu->register_lock);
162 	tmp = in_be64(&priv2->spu_cfg_RW);
163 	if (val)
164 		tmp |= 2;
165 	else
166 		tmp &= ~2;
167 	out_be64(&priv2->spu_cfg_RW, tmp);
168 	spin_unlock_irq(&spu->register_lock);
169 }
170 
spu_hw_signal2_type_get(struct spu_context * ctx)171 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
172 {
173 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
174 }
175 
spu_hw_npc_read(struct spu_context * ctx)176 static u32 spu_hw_npc_read(struct spu_context *ctx)
177 {
178 	return in_be32(&ctx->spu->problem->spu_npc_RW);
179 }
180 
spu_hw_npc_write(struct spu_context * ctx,u32 val)181 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
182 {
183 	out_be32(&ctx->spu->problem->spu_npc_RW, val);
184 }
185 
spu_hw_status_read(struct spu_context * ctx)186 static u32 spu_hw_status_read(struct spu_context *ctx)
187 {
188 	return in_be32(&ctx->spu->problem->spu_status_R);
189 }
190 
spu_hw_get_ls(struct spu_context * ctx)191 static char *spu_hw_get_ls(struct spu_context *ctx)
192 {
193 	return ctx->spu->local_store;
194 }
195 
spu_hw_privcntl_write(struct spu_context * ctx,u64 val)196 static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val)
197 {
198 	out_be64(&ctx->spu->priv2->spu_privcntl_RW, val);
199 }
200 
spu_hw_runcntl_read(struct spu_context * ctx)201 static u32 spu_hw_runcntl_read(struct spu_context *ctx)
202 {
203 	return in_be32(&ctx->spu->problem->spu_runcntl_RW);
204 }
205 
spu_hw_runcntl_write(struct spu_context * ctx,u32 val)206 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
207 {
208 	spin_lock_irq(&ctx->spu->register_lock);
209 	if (val & SPU_RUNCNTL_ISOLATE)
210 		spu_hw_privcntl_write(ctx,
211 			SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK);
212 	out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
213 	spin_unlock_irq(&ctx->spu->register_lock);
214 }
215 
spu_hw_runcntl_stop(struct spu_context * ctx)216 static void spu_hw_runcntl_stop(struct spu_context *ctx)
217 {
218 	spin_lock_irq(&ctx->spu->register_lock);
219 	out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
220 	while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
221 		cpu_relax();
222 	spin_unlock_irq(&ctx->spu->register_lock);
223 }
224 
spu_hw_master_start(struct spu_context * ctx)225 static void spu_hw_master_start(struct spu_context *ctx)
226 {
227 	struct spu *spu = ctx->spu;
228 	u64 sr1;
229 
230 	spin_lock_irq(&spu->register_lock);
231 	sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
232 	spu_mfc_sr1_set(spu, sr1);
233 	spin_unlock_irq(&spu->register_lock);
234 }
235 
spu_hw_master_stop(struct spu_context * ctx)236 static void spu_hw_master_stop(struct spu_context *ctx)
237 {
238 	struct spu *spu = ctx->spu;
239 	u64 sr1;
240 
241 	spin_lock_irq(&spu->register_lock);
242 	sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
243 	spu_mfc_sr1_set(spu, sr1);
244 	spin_unlock_irq(&spu->register_lock);
245 }
246 
spu_hw_set_mfc_query(struct spu_context * ctx,u32 mask,u32 mode)247 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
248 {
249 	struct spu_problem __iomem *prob = ctx->spu->problem;
250 	int ret;
251 
252 	spin_lock_irq(&ctx->spu->register_lock);
253 	ret = -EAGAIN;
254 	if (in_be32(&prob->dma_querytype_RW))
255 		goto out;
256 	ret = 0;
257 	out_be32(&prob->dma_querymask_RW, mask);
258 	out_be32(&prob->dma_querytype_RW, mode);
259 out:
260 	spin_unlock_irq(&ctx->spu->register_lock);
261 	return ret;
262 }
263 
spu_hw_read_mfc_tagstatus(struct spu_context * ctx)264 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
265 {
266 	return in_be32(&ctx->spu->problem->dma_tagstatus_R);
267 }
268 
spu_hw_get_mfc_free_elements(struct spu_context * ctx)269 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
270 {
271 	return in_be32(&ctx->spu->problem->dma_qstatus_R);
272 }
273 
spu_hw_send_mfc_command(struct spu_context * ctx,struct mfc_dma_command * cmd)274 static int spu_hw_send_mfc_command(struct spu_context *ctx,
275 					struct mfc_dma_command *cmd)
276 {
277 	u32 status;
278 	struct spu_problem __iomem *prob = ctx->spu->problem;
279 
280 	spin_lock_irq(&ctx->spu->register_lock);
281 	out_be32(&prob->mfc_lsa_W, cmd->lsa);
282 	out_be64(&prob->mfc_ea_W, cmd->ea);
283 	out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
284 				cmd->size << 16 | cmd->tag);
285 	out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
286 				cmd->class << 16 | cmd->cmd);
287 	status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
288 	spin_unlock_irq(&ctx->spu->register_lock);
289 
290 	switch (status & 0xffff) {
291 	case 0:
292 		return 0;
293 	case 2:
294 		return -EAGAIN;
295 	default:
296 		return -EINVAL;
297 	}
298 }
299 
spu_hw_restart_dma(struct spu_context * ctx)300 static void spu_hw_restart_dma(struct spu_context *ctx)
301 {
302 	struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
303 
304 	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
305 		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
306 }
307 
308 struct spu_context_ops spu_hw_ops = {
309 	.mbox_read = spu_hw_mbox_read,
310 	.mbox_stat_read = spu_hw_mbox_stat_read,
311 	.mbox_stat_poll = spu_hw_mbox_stat_poll,
312 	.ibox_read = spu_hw_ibox_read,
313 	.wbox_write = spu_hw_wbox_write,
314 	.signal1_write = spu_hw_signal1_write,
315 	.signal2_write = spu_hw_signal2_write,
316 	.signal1_type_set = spu_hw_signal1_type_set,
317 	.signal1_type_get = spu_hw_signal1_type_get,
318 	.signal2_type_set = spu_hw_signal2_type_set,
319 	.signal2_type_get = spu_hw_signal2_type_get,
320 	.npc_read = spu_hw_npc_read,
321 	.npc_write = spu_hw_npc_write,
322 	.status_read = spu_hw_status_read,
323 	.get_ls = spu_hw_get_ls,
324 	.privcntl_write = spu_hw_privcntl_write,
325 	.runcntl_read = spu_hw_runcntl_read,
326 	.runcntl_write = spu_hw_runcntl_write,
327 	.runcntl_stop = spu_hw_runcntl_stop,
328 	.master_start = spu_hw_master_start,
329 	.master_stop = spu_hw_master_stop,
330 	.set_mfc_query = spu_hw_set_mfc_query,
331 	.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
332 	.get_mfc_free_elements = spu_hw_get_mfc_free_elements,
333 	.send_mfc_command = spu_hw_send_mfc_command,
334 	.restart_dma = spu_hw_restart_dma,
335 };
336