xref: /linux/arch/powerpc/platforms/cell/spufs/run.c (revision ccea15f45eb0ab12d658f88b5d4be005cb2bb1a7)
1 #include <linux/wait.h>
2 #include <linux/ptrace.h>
3 
4 #include <asm/spu.h>
5 #include <asm/unistd.h>
6 
7 #include "spufs.h"
8 
9 /* interrupt-level stop callback function. */
10 void spufs_stop_callback(struct spu *spu)
11 {
12 	struct spu_context *ctx = spu->ctx;
13 
14 	wake_up_all(&ctx->stop_wq);
15 }
16 
17 static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
18 {
19 	struct spu *spu;
20 	u64 pte_fault;
21 
22 	*stat = ctx->ops->status_read(ctx);
23 	if (ctx->state != SPU_STATE_RUNNABLE)
24 		return 1;
25 	spu = ctx->spu;
26 	pte_fault = spu->dsisr &
27 	    (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
28 	return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
29 }
30 
31 static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
32 			       u32 * status)
33 {
34 	int ret;
35 
36 	if ((ret = spu_acquire_runnable(ctx)) != 0)
37 		return ret;
38 	ctx->ops->npc_write(ctx, *npc);
39 	ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
40 	return 0;
41 }
42 
43 static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
44 			       u32 * status)
45 {
46 	int ret = 0;
47 
48 	*status = ctx->ops->status_read(ctx);
49 	*npc = ctx->ops->npc_read(ctx);
50 	spu_release(ctx);
51 
52 	if (signal_pending(current))
53 		ret = -ERESTARTSYS;
54 	if (unlikely(current->ptrace & PT_PTRACED)) {
55 		if ((*status & SPU_STATUS_STOPPED_BY_STOP)
56 		    && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
57 			force_sig(SIGTRAP, current);
58 			ret = -ERESTARTSYS;
59 		}
60 	}
61 	return ret;
62 }
63 
64 static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
65 				         u32 *status)
66 {
67 	int ret;
68 
69 	if ((ret = spu_run_fini(ctx, npc, status)) != 0)
70 		return ret;
71 	if (*status & (SPU_STATUS_STOPPED_BY_STOP |
72 		       SPU_STATUS_STOPPED_BY_HALT)) {
73 		return *status;
74 	}
75 	if ((ret = spu_run_init(ctx, npc, status)) != 0)
76 		return ret;
77 	return 0;
78 }
79 
80 /*
81  * SPU syscall restarting is tricky because we violate the basic
82  * assumption that the signal handler is running on the interrupted
83  * thread. Here instead, the handler runs on PowerPC user space code,
84  * while the syscall was called from the SPU.
85  * This means we can only do a very rough approximation of POSIX
86  * signal semantics.
87  */
88 int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
89 			  unsigned int *npc)
90 {
91 	int ret;
92 
93 	switch (*spu_ret) {
94 	case -ERESTARTSYS:
95 	case -ERESTARTNOINTR:
96 		/*
97 		 * Enter the regular syscall restarting for
98 		 * sys_spu_run, then restart the SPU syscall
99 		 * callback.
100 		 */
101 		*npc -= 8;
102 		ret = -ERESTARTSYS;
103 		break;
104 	case -ERESTARTNOHAND:
105 	case -ERESTART_RESTARTBLOCK:
106 		/*
107 		 * Restart block is too hard for now, just return -EINTR
108 		 * to the SPU.
109 		 * ERESTARTNOHAND comes from sys_pause, we also return
110 		 * -EINTR from there.
111 		 * Assume that we need to be restarted ourselves though.
112 		 */
113 		*spu_ret = -EINTR;
114 		ret = -ERESTARTSYS;
115 		break;
116 	default:
117 		printk(KERN_WARNING "%s: unexpected return code %ld\n",
118 			__FUNCTION__, *spu_ret);
119 		ret = 0;
120 	}
121 	return ret;
122 }
123 
124 int spu_process_callback(struct spu_context *ctx)
125 {
126 	struct spu_syscall_block s;
127 	u32 ls_pointer, npc;
128 	char *ls;
129 	long spu_ret;
130 	int ret;
131 
132 	/* get syscall block from local store */
133 	npc = ctx->ops->npc_read(ctx);
134 	ls = ctx->ops->get_ls(ctx);
135 	ls_pointer = *(u32*)(ls + npc);
136 	if (ls_pointer > (LS_SIZE - sizeof(s)))
137 		return -EFAULT;
138 	memcpy(&s, ls + ls_pointer, sizeof (s));
139 
140 	/* do actual syscall without pinning the spu */
141 	ret = 0;
142 	spu_ret = -ENOSYS;
143 	npc += 4;
144 
145 	if (s.nr_ret < __NR_syscalls) {
146 		spu_release(ctx);
147 		/* do actual system call from here */
148 		spu_ret = spu_sys_callback(&s);
149 		if (spu_ret <= -ERESTARTSYS) {
150 			ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
151 		}
152 		spu_acquire(ctx);
153 		if (ret == -ERESTARTSYS)
154 			return ret;
155 	}
156 
157 	/* write result, jump over indirect pointer */
158 	memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
159 	ctx->ops->npc_write(ctx, npc);
160 	ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
161 	return ret;
162 }
163 
164 static inline int spu_process_events(struct spu_context *ctx)
165 {
166 	struct spu *spu = ctx->spu;
167 	u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
168 	int ret = 0;
169 
170 	if (spu->dsisr & pte_fault)
171 		ret = spu_irq_class_1_bottom(spu);
172 	if (spu->class_0_pending)
173 		ret = spu_irq_class_0_bottom(spu);
174 	if (!ret && signal_pending(current))
175 		ret = -ERESTARTSYS;
176 	return ret;
177 }
178 
179 long spufs_run_spu(struct file *file, struct spu_context *ctx,
180 		   u32 * npc, u32 * status)
181 {
182 	int ret;
183 
184 	if (down_interruptible(&ctx->run_sema))
185 		return -ERESTARTSYS;
186 
187 	ret = spu_run_init(ctx, npc, status);
188 	if (ret)
189 		goto out;
190 
191 	do {
192 		ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
193 		if (unlikely(ret))
194 			break;
195 		if ((*status & SPU_STATUS_STOPPED_BY_STOP) &&
196 		    (*status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
197 			ret = spu_process_callback(ctx);
198 			if (ret)
199 				break;
200 			*status &= ~SPU_STATUS_STOPPED_BY_STOP;
201 		}
202 		if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
203 			ret = spu_reacquire_runnable(ctx, npc, status);
204 			if (ret)
205 				goto out;
206 			continue;
207 		}
208 		ret = spu_process_events(ctx);
209 
210 	} while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
211 				      SPU_STATUS_STOPPED_BY_HALT)));
212 
213 	ctx->ops->runcntl_stop(ctx);
214 	ret = spu_run_fini(ctx, npc, status);
215 	if (!ret)
216 		ret = *status;
217 	spu_yield(ctx);
218 
219 out:
220 	up(&ctx->run_sema);
221 	return ret;
222 }
223 
224