xref: /linux/arch/powerpc/platforms/cell/spufs/context.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * SPU file system -- SPU context management
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/slab.h>
26 #include <asm/spu.h>
27 #include <asm/spu_csa.h>
28 #include "spufs.h"
29 
30 struct spu_context *alloc_spu_context(void)
31 {
32 	struct spu_context *ctx;
33 	ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
34 	if (!ctx)
35 		goto out;
36 	/* Binding to physical processor deferred
37 	 * until spu_activate().
38 	 */
39 	spu_init_csa(&ctx->csa);
40 	if (!ctx->csa.lscsa) {
41 		goto out_free;
42 	}
43 	spin_lock_init(&ctx->mmio_lock);
44 	kref_init(&ctx->kref);
45 	init_rwsem(&ctx->state_sema);
46 	init_MUTEX(&ctx->run_sema);
47 	init_waitqueue_head(&ctx->ibox_wq);
48 	init_waitqueue_head(&ctx->wbox_wq);
49 	init_waitqueue_head(&ctx->stop_wq);
50 	init_waitqueue_head(&ctx->mfc_wq);
51 	ctx->ibox_fasync = NULL;
52 	ctx->wbox_fasync = NULL;
53 	ctx->mfc_fasync = NULL;
54 	ctx->mfc = NULL;
55 	ctx->tagwait = 0;
56 	ctx->state = SPU_STATE_SAVED;
57 	ctx->local_store = NULL;
58 	ctx->cntl = NULL;
59 	ctx->signal1 = NULL;
60 	ctx->signal2 = NULL;
61 	ctx->spu = NULL;
62 	ctx->ops = &spu_backing_ops;
63 	ctx->owner = get_task_mm(current);
64 	goto out;
65 out_free:
66 	kfree(ctx);
67 	ctx = NULL;
68 out:
69 	return ctx;
70 }
71 
72 void destroy_spu_context(struct kref *kref)
73 {
74 	struct spu_context *ctx;
75 	ctx = container_of(kref, struct spu_context, kref);
76 	down_write(&ctx->state_sema);
77 	spu_deactivate(ctx);
78 	up_write(&ctx->state_sema);
79 	spu_fini_csa(&ctx->csa);
80 	kfree(ctx);
81 }
82 
83 struct spu_context * get_spu_context(struct spu_context *ctx)
84 {
85 	kref_get(&ctx->kref);
86 	return ctx;
87 }
88 
89 int put_spu_context(struct spu_context *ctx)
90 {
91 	return kref_put(&ctx->kref, &destroy_spu_context);
92 }
93 
94 /* give up the mm reference when the context is about to be destroyed */
95 void spu_forget(struct spu_context *ctx)
96 {
97 	struct mm_struct *mm;
98 	spu_acquire_saved(ctx);
99 	mm = ctx->owner;
100 	ctx->owner = NULL;
101 	mmput(mm);
102 	spu_release(ctx);
103 }
104 
105 void spu_acquire(struct spu_context *ctx)
106 {
107 	down_read(&ctx->state_sema);
108 }
109 
110 void spu_release(struct spu_context *ctx)
111 {
112 	up_read(&ctx->state_sema);
113 }
114 
115 void spu_unmap_mappings(struct spu_context *ctx)
116 {
117 	if (ctx->local_store)
118 		unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
119 	if (ctx->mfc)
120 		unmap_mapping_range(ctx->mfc, 0, 0x4000, 1);
121 	if (ctx->cntl)
122 		unmap_mapping_range(ctx->cntl, 0, 0x4000, 1);
123 	if (ctx->signal1)
124 		unmap_mapping_range(ctx->signal1, 0, 0x4000, 1);
125 	if (ctx->signal2)
126 		unmap_mapping_range(ctx->signal2, 0, 0x4000, 1);
127 }
128 
129 int spu_acquire_runnable(struct spu_context *ctx)
130 {
131 	int ret = 0;
132 
133 	down_read(&ctx->state_sema);
134 	if (ctx->state == SPU_STATE_RUNNABLE) {
135 		ctx->spu->prio = current->prio;
136 		return 0;
137 	}
138 	up_read(&ctx->state_sema);
139 
140 	down_write(&ctx->state_sema);
141 	/* ctx is about to be freed, can't acquire any more */
142 	if (!ctx->owner) {
143 		ret = -EINVAL;
144 		goto out;
145 	}
146 
147 	if (ctx->state == SPU_STATE_SAVED) {
148 		ret = spu_activate(ctx, 0);
149 		if (ret)
150 			goto out;
151 		ctx->state = SPU_STATE_RUNNABLE;
152 	}
153 
154 	downgrade_write(&ctx->state_sema);
155 	/* On success, we return holding the lock */
156 
157 	return ret;
158 out:
159 	/* Release here, to simplify calling code. */
160 	up_write(&ctx->state_sema);
161 
162 	return ret;
163 }
164 
165 void spu_acquire_saved(struct spu_context *ctx)
166 {
167 	down_read(&ctx->state_sema);
168 
169 	if (ctx->state == SPU_STATE_SAVED)
170 		return;
171 
172 	up_read(&ctx->state_sema);
173 	down_write(&ctx->state_sema);
174 
175 	if (ctx->state == SPU_STATE_RUNNABLE) {
176 		spu_deactivate(ctx);
177 		ctx->state = SPU_STATE_SAVED;
178 	}
179 
180 	downgrade_write(&ctx->state_sema);
181 }
182