1 /* 2 * SPU file system -- SPU context management 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/fs.h> 24 #include <linux/mm.h> 25 #include <linux/slab.h> 26 #include <asm/spu.h> 27 #include <asm/spu_csa.h> 28 #include "spufs.h" 29 30 struct spu_context *alloc_spu_context(struct spu_gang *gang) 31 { 32 struct spu_context *ctx; 33 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 34 if (!ctx) 35 goto out; 36 /* Binding to physical processor deferred 37 * until spu_activate(). 38 */ 39 spu_init_csa(&ctx->csa); 40 if (!ctx->csa.lscsa) { 41 goto out_free; 42 } 43 spin_lock_init(&ctx->mmio_lock); 44 kref_init(&ctx->kref); 45 mutex_init(&ctx->state_mutex); 46 init_MUTEX(&ctx->run_sema); 47 init_waitqueue_head(&ctx->ibox_wq); 48 init_waitqueue_head(&ctx->wbox_wq); 49 init_waitqueue_head(&ctx->stop_wq); 50 init_waitqueue_head(&ctx->mfc_wq); 51 ctx->state = SPU_STATE_SAVED; 52 ctx->ops = &spu_backing_ops; 53 ctx->owner = get_task_mm(current); 54 if (gang) 55 spu_gang_add_ctx(gang, ctx); 56 ctx->rt_priority = current->rt_priority; 57 ctx->policy = current->policy; 58 ctx->prio = current->prio; 59 INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick); 60 goto out; 61 out_free: 62 kfree(ctx); 63 ctx = NULL; 64 out: 65 return ctx; 66 } 67 68 void destroy_spu_context(struct kref *kref) 69 { 70 struct spu_context *ctx; 71 ctx = container_of(kref, struct spu_context, kref); 72 mutex_lock(&ctx->state_mutex); 73 spu_deactivate(ctx); 74 mutex_unlock(&ctx->state_mutex); 75 spu_fini_csa(&ctx->csa); 76 if (ctx->gang) 77 spu_gang_remove_ctx(ctx->gang, ctx); 78 kfree(ctx); 79 } 80 81 struct spu_context * get_spu_context(struct spu_context *ctx) 82 { 83 kref_get(&ctx->kref); 84 return ctx; 85 } 86 87 int put_spu_context(struct spu_context *ctx) 88 { 89 return kref_put(&ctx->kref, &destroy_spu_context); 90 } 91 92 /* give up the mm reference when the context is about to be destroyed */ 93 void spu_forget(struct spu_context *ctx) 94 { 95 struct mm_struct *mm; 96 spu_acquire_saved(ctx); 97 mm = ctx->owner; 98 ctx->owner = NULL; 99 mmput(mm); 100 spu_release(ctx); 101 } 102 103 void spu_unmap_mappings(struct spu_context *ctx) 104 { 105 if (ctx->local_store) 106 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 107 if (ctx->mfc) 108 unmap_mapping_range(ctx->mfc, 0, 0x1000, 1); 109 if (ctx->cntl) 110 unmap_mapping_range(ctx->cntl, 0, 0x1000, 1); 111 if (ctx->signal1) 112 unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1); 113 if (ctx->signal2) 114 unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1); 115 if (ctx->mss) 116 unmap_mapping_range(ctx->mss, 0, 0x1000, 1); 117 if (ctx->psmap) 118 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); 119 } 120 121 /** 122 * spu_acquire_exclusive - lock spu contex and protect against userspace access 123 * @ctx: spu contex to lock 124 * 125 * Note: 126 * Returns 0 and with the context locked on success 127 * Returns negative error and with the context _unlocked_ on failure. 128 */ 129 int spu_acquire_exclusive(struct spu_context *ctx) 130 { 131 int ret = -EINVAL; 132 133 spu_acquire(ctx); 134 /* 135 * Context is about to be freed, so we can't acquire it anymore. 136 */ 137 if (!ctx->owner) 138 goto out_unlock; 139 140 if (ctx->state == SPU_STATE_SAVED) { 141 ret = spu_activate(ctx, 0); 142 if (ret) 143 goto out_unlock; 144 } else { 145 /* 146 * We need to exclude userspace access to the context. 147 * 148 * To protect against memory access we invalidate all ptes 149 * and make sure the pagefault handlers block on the mutex. 150 */ 151 spu_unmap_mappings(ctx); 152 } 153 154 return 0; 155 156 out_unlock: 157 spu_release(ctx); 158 return ret; 159 } 160 161 /** 162 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state 163 * @ctx: spu contex to lock 164 * 165 * Note: 166 * Returns 0 and with the context locked on success 167 * Returns negative error and with the context _unlocked_ on failure. 168 */ 169 int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags) 170 { 171 int ret = -EINVAL; 172 173 spu_acquire(ctx); 174 if (ctx->state == SPU_STATE_SAVED) { 175 /* 176 * Context is about to be freed, so we can't acquire it anymore. 177 */ 178 if (!ctx->owner) 179 goto out_unlock; 180 ret = spu_activate(ctx, flags); 181 if (ret) 182 goto out_unlock; 183 } 184 185 return 0; 186 187 out_unlock: 188 spu_release(ctx); 189 return ret; 190 } 191 192 /** 193 * spu_acquire_saved - lock spu contex and make sure it is in saved state 194 * @ctx: spu contex to lock 195 */ 196 void spu_acquire_saved(struct spu_context *ctx) 197 { 198 spu_acquire(ctx); 199 if (ctx->state != SPU_STATE_SAVED) 200 spu_deactivate(ctx); 201 } 202