xref: /linux/arch/powerpc/platforms/cell/spufs/run.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 #define DEBUG
2 
3 #include <linux/wait.h>
4 #include <linux/ptrace.h>
5 
6 #include <asm/spu.h>
7 #include <asm/spu_priv1.h>
8 #include <asm/io.h>
9 #include <asm/unistd.h>
10 
11 #include "spufs.h"
12 
13 /* interrupt-level stop callback function. */
14 void spufs_stop_callback(struct spu *spu)
15 {
16 	struct spu_context *ctx = spu->ctx;
17 
18 	wake_up_all(&ctx->stop_wq);
19 }
20 
21 void spufs_dma_callback(struct spu *spu, int type)
22 {
23 	struct spu_context *ctx = spu->ctx;
24 
25 	if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
26 		ctx->event_return |= type;
27 		wake_up_all(&ctx->stop_wq);
28 	} else {
29 		switch (type) {
30 		case SPE_EVENT_DMA_ALIGNMENT:
31 		case SPE_EVENT_SPE_DATA_STORAGE:
32 		case SPE_EVENT_INVALID_DMA:
33 			force_sig(SIGBUS, /* info, */ current);
34 			break;
35 		case SPE_EVENT_SPE_ERROR:
36 			force_sig(SIGILL, /* info */ current);
37 			break;
38 		}
39 	}
40 }
41 
42 static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
43 {
44 	struct spu *spu;
45 	u64 pte_fault;
46 
47 	*stat = ctx->ops->status_read(ctx);
48 	if (ctx->state != SPU_STATE_RUNNABLE)
49 		return 1;
50 	spu = ctx->spu;
51 	pte_fault = spu->dsisr &
52 	    (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
53 	return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
54 }
55 
56 static int spu_setup_isolated(struct spu_context *ctx)
57 {
58 	int ret;
59 	u64 __iomem *mfc_cntl;
60 	u64 sr1;
61 	u32 status;
62 	unsigned long timeout;
63 	const u32 status_loading = SPU_STATUS_RUNNING
64 		| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
65 
66 	if (!isolated_loader)
67 		return -ENODEV;
68 
69 	ret = spu_acquire_exclusive(ctx);
70 	if (ret)
71 		goto out;
72 
73 	mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
74 
75 	/* purge the MFC DMA queue to ensure no spurious accesses before we
76 	 * enter kernel mode */
77 	timeout = jiffies + HZ;
78 	out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
79 	while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
80 			!= MFC_CNTL_PURGE_DMA_COMPLETE) {
81 		if (time_after(jiffies, timeout)) {
82 			printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
83 					__FUNCTION__);
84 			ret = -EIO;
85 			goto out_unlock;
86 		}
87 		cond_resched();
88 	}
89 
90 	/* put the SPE in kernel mode to allow access to the loader */
91 	sr1 = spu_mfc_sr1_get(ctx->spu);
92 	sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
93 	spu_mfc_sr1_set(ctx->spu, sr1);
94 
95 	/* start the loader */
96 	ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
97 	ctx->ops->signal2_write(ctx,
98 			(unsigned long)isolated_loader & 0xffffffff);
99 
100 	ctx->ops->runcntl_write(ctx,
101 			SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
102 
103 	ret = 0;
104 	timeout = jiffies + HZ;
105 	while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
106 				status_loading) {
107 		if (time_after(jiffies, timeout)) {
108 			printk(KERN_ERR "%s: timeout waiting for loader\n",
109 					__FUNCTION__);
110 			ret = -EIO;
111 			goto out_drop_priv;
112 		}
113 		cond_resched();
114 	}
115 
116 	if (!(status & SPU_STATUS_RUNNING)) {
117 		/* If isolated LOAD has failed: run SPU, we will get a stop-and
118 		 * signal later. */
119 		pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
120 		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
121 		ret = -EACCES;
122 
123 	} else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
124 		/* This isn't allowed by the CBEA, but check anyway */
125 		pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
126 		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
127 		ret = -EINVAL;
128 	}
129 
130 out_drop_priv:
131 	/* Finished accessing the loader. Drop kernel mode */
132 	sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
133 	spu_mfc_sr1_set(ctx->spu, sr1);
134 
135 out_unlock:
136 	spu_release(ctx);
137 out:
138 	return ret;
139 }
140 
141 static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
142 {
143 	int ret;
144 	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
145 
146 	ret = spu_acquire_runnable(ctx, 0);
147 	if (ret)
148 		return ret;
149 
150 	if (ctx->flags & SPU_CREATE_ISOLATE) {
151 		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
152 			/* Need to release ctx, because spu_setup_isolated will
153 			 * acquire it exclusively.
154 			 */
155 			spu_release(ctx);
156 			ret = spu_setup_isolated(ctx);
157 			if (!ret)
158 				ret = spu_acquire_runnable(ctx, 0);
159 		}
160 
161 		/* if userspace has set the runcntrl register (eg, to issue an
162 		 * isolated exit), we need to re-set it here */
163 		runcntl = ctx->ops->runcntl_read(ctx) &
164 			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
165 		if (runcntl == 0)
166 			runcntl = SPU_RUNCNTL_RUNNABLE;
167 	} else {
168 		spu_start_tick(ctx);
169 		ctx->ops->npc_write(ctx, *npc);
170 	}
171 
172 	ctx->ops->runcntl_write(ctx, runcntl);
173 	return ret;
174 }
175 
176 static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
177 			       u32 * status)
178 {
179 	int ret = 0;
180 
181 	spu_stop_tick(ctx);
182 	*status = ctx->ops->status_read(ctx);
183 	*npc = ctx->ops->npc_read(ctx);
184 	spu_release(ctx);
185 
186 	if (signal_pending(current))
187 		ret = -ERESTARTSYS;
188 
189 	return ret;
190 }
191 
192 static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
193 				         u32 *status)
194 {
195 	int ret;
196 
197 	if ((ret = spu_run_fini(ctx, npc, status)) != 0)
198 		return ret;
199 	if (*status & (SPU_STATUS_STOPPED_BY_STOP |
200 		       SPU_STATUS_STOPPED_BY_HALT)) {
201 		return *status;
202 	}
203 	if ((ret = spu_run_init(ctx, npc)) != 0)
204 		return ret;
205 	return 0;
206 }
207 
208 /*
209  * SPU syscall restarting is tricky because we violate the basic
210  * assumption that the signal handler is running on the interrupted
211  * thread. Here instead, the handler runs on PowerPC user space code,
212  * while the syscall was called from the SPU.
213  * This means we can only do a very rough approximation of POSIX
214  * signal semantics.
215  */
216 int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
217 			  unsigned int *npc)
218 {
219 	int ret;
220 
221 	switch (*spu_ret) {
222 	case -ERESTARTSYS:
223 	case -ERESTARTNOINTR:
224 		/*
225 		 * Enter the regular syscall restarting for
226 		 * sys_spu_run, then restart the SPU syscall
227 		 * callback.
228 		 */
229 		*npc -= 8;
230 		ret = -ERESTARTSYS;
231 		break;
232 	case -ERESTARTNOHAND:
233 	case -ERESTART_RESTARTBLOCK:
234 		/*
235 		 * Restart block is too hard for now, just return -EINTR
236 		 * to the SPU.
237 		 * ERESTARTNOHAND comes from sys_pause, we also return
238 		 * -EINTR from there.
239 		 * Assume that we need to be restarted ourselves though.
240 		 */
241 		*spu_ret = -EINTR;
242 		ret = -ERESTARTSYS;
243 		break;
244 	default:
245 		printk(KERN_WARNING "%s: unexpected return code %ld\n",
246 			__FUNCTION__, *spu_ret);
247 		ret = 0;
248 	}
249 	return ret;
250 }
251 
252 int spu_process_callback(struct spu_context *ctx)
253 {
254 	struct spu_syscall_block s;
255 	u32 ls_pointer, npc;
256 	char *ls;
257 	long spu_ret;
258 	int ret;
259 
260 	/* get syscall block from local store */
261 	npc = ctx->ops->npc_read(ctx);
262 	ls = ctx->ops->get_ls(ctx);
263 	ls_pointer = *(u32*)(ls + npc);
264 	if (ls_pointer > (LS_SIZE - sizeof(s)))
265 		return -EFAULT;
266 	memcpy(&s, ls + ls_pointer, sizeof (s));
267 
268 	/* do actual syscall without pinning the spu */
269 	ret = 0;
270 	spu_ret = -ENOSYS;
271 	npc += 4;
272 
273 	if (s.nr_ret < __NR_syscalls) {
274 		spu_release(ctx);
275 		/* do actual system call from here */
276 		spu_ret = spu_sys_callback(&s);
277 		if (spu_ret <= -ERESTARTSYS) {
278 			ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
279 		}
280 		spu_acquire(ctx);
281 		if (ret == -ERESTARTSYS)
282 			return ret;
283 	}
284 
285 	/* write result, jump over indirect pointer */
286 	memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
287 	ctx->ops->npc_write(ctx, npc);
288 	ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
289 	return ret;
290 }
291 
292 static inline int spu_process_events(struct spu_context *ctx)
293 {
294 	struct spu *spu = ctx->spu;
295 	u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
296 	int ret = 0;
297 
298 	if (spu->dsisr & pte_fault)
299 		ret = spu_irq_class_1_bottom(spu);
300 	if (spu->class_0_pending)
301 		ret = spu_irq_class_0_bottom(spu);
302 	if (!ret && signal_pending(current))
303 		ret = -ERESTARTSYS;
304 	return ret;
305 }
306 
307 long spufs_run_spu(struct file *file, struct spu_context *ctx,
308 		   u32 *npc, u32 *event)
309 {
310 	int ret;
311 	u32 status;
312 
313 	if (down_interruptible(&ctx->run_sema))
314 		return -ERESTARTSYS;
315 
316 	ctx->ops->master_start(ctx);
317 	ctx->event_return = 0;
318 	ret = spu_run_init(ctx, npc);
319 	if (ret)
320 		goto out;
321 
322 	do {
323 		ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
324 		if (unlikely(ret))
325 			break;
326 		if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
327 		    (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
328 			ret = spu_process_callback(ctx);
329 			if (ret)
330 				break;
331 			status &= ~SPU_STATUS_STOPPED_BY_STOP;
332 		}
333 		if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
334 			ret = spu_reacquire_runnable(ctx, npc, &status);
335 			if (ret) {
336 				spu_stop_tick(ctx);
337 				goto out2;
338 			}
339 			continue;
340 		}
341 		ret = spu_process_events(ctx);
342 
343 	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
344 				      SPU_STATUS_STOPPED_BY_HALT)));
345 
346 	ctx->ops->master_stop(ctx);
347 	ret = spu_run_fini(ctx, npc, &status);
348 	spu_yield(ctx);
349 
350 out2:
351 	if ((ret == 0) ||
352 	    ((ret == -ERESTARTSYS) &&
353 	     ((status & SPU_STATUS_STOPPED_BY_HALT) ||
354 	      ((status & SPU_STATUS_STOPPED_BY_STOP) &&
355 	       (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
356 		ret = status;
357 
358 	if ((status & SPU_STATUS_STOPPED_BY_STOP)
359 	    && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
360 		force_sig(SIGTRAP, current);
361 		ret = -ERESTARTSYS;
362 	}
363 
364 out:
365 	*event = ctx->event_return;
366 	up(&ctx->run_sema);
367 	return ret;
368 }
369