1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/mutex.h> 24 #include <linux/log2.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/amd-iommu.h> 28 #include <linux/notifier.h> 29 #include <linux/compat.h> 30 31 struct mm_struct; 32 33 #include "kfd_priv.h" 34 35 /* 36 * Initial size for the array of queues. 37 * The allocated size is doubled each time 38 * it is exceeded up to MAX_PROCESS_QUEUES. 39 */ 40 #define INITIAL_QUEUE_ARRAY_SIZE 16 41 42 /* 43 * List of struct kfd_process (field kfd_process). 44 * Unique/indexed by mm_struct* 45 */ 46 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 47 static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 48 static DEFINE_MUTEX(kfd_processes_mutex); 49 50 DEFINE_STATIC_SRCU(kfd_processes_srcu); 51 52 static struct workqueue_struct *kfd_process_wq; 53 54 struct kfd_process_release_work { 55 struct work_struct kfd_work; 56 struct kfd_process *p; 57 }; 58 59 static struct kfd_process *find_process(const struct task_struct *thread); 60 static struct kfd_process *create_process(const struct task_struct *thread); 61 62 void kfd_process_create_wq(void) 63 { 64 if (!kfd_process_wq) 65 kfd_process_wq = create_workqueue("kfd_process_wq"); 66 } 67 68 void kfd_process_destroy_wq(void) 69 { 70 if (kfd_process_wq) { 71 flush_workqueue(kfd_process_wq); 72 destroy_workqueue(kfd_process_wq); 73 kfd_process_wq = NULL; 74 } 75 } 76 77 struct kfd_process *kfd_create_process(const struct task_struct *thread) 78 { 79 struct kfd_process *process; 80 81 BUG_ON(!kfd_process_wq); 82 83 if (thread->mm == NULL) 84 return ERR_PTR(-EINVAL); 85 86 /* Only the pthreads threading model is supported. */ 87 if (thread->group_leader->mm != thread->mm) 88 return ERR_PTR(-EINVAL); 89 90 /* Take mmap_sem because we call __mmu_notifier_register inside */ 91 down_write(&thread->mm->mmap_sem); 92 93 /* 94 * take kfd processes mutex before starting of process creation 95 * so there won't be a case where two threads of the same process 96 * create two kfd_process structures 97 */ 98 mutex_lock(&kfd_processes_mutex); 99 100 /* A prior open of /dev/kfd could have already created the process. */ 101 process = find_process(thread); 102 if (process) 103 pr_debug("kfd: process already found\n"); 104 105 if (!process) 106 process = create_process(thread); 107 108 mutex_unlock(&kfd_processes_mutex); 109 110 up_write(&thread->mm->mmap_sem); 111 112 return process; 113 } 114 115 struct kfd_process *kfd_get_process(const struct task_struct *thread) 116 { 117 struct kfd_process *process; 118 119 if (thread->mm == NULL) 120 return ERR_PTR(-EINVAL); 121 122 /* Only the pthreads threading model is supported. */ 123 if (thread->group_leader->mm != thread->mm) 124 return ERR_PTR(-EINVAL); 125 126 process = find_process(thread); 127 128 return process; 129 } 130 131 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) 132 { 133 struct kfd_process *process; 134 135 hash_for_each_possible_rcu(kfd_processes_table, process, 136 kfd_processes, (uintptr_t)mm) 137 if (process->mm == mm) 138 return process; 139 140 return NULL; 141 } 142 143 static struct kfd_process *find_process(const struct task_struct *thread) 144 { 145 struct kfd_process *p; 146 int idx; 147 148 idx = srcu_read_lock(&kfd_processes_srcu); 149 p = find_process_by_mm(thread->mm); 150 srcu_read_unlock(&kfd_processes_srcu, idx); 151 152 return p; 153 } 154 155 static void kfd_process_wq_release(struct work_struct *work) 156 { 157 struct kfd_process_release_work *my_work; 158 struct kfd_process_device *pdd, *temp; 159 struct kfd_process *p; 160 161 my_work = (struct kfd_process_release_work *) work; 162 163 p = my_work->p; 164 165 mutex_lock(&p->mutex); 166 167 list_for_each_entry_safe(pdd, temp, &p->per_device_data, 168 per_device_list) { 169 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid); 170 list_del(&pdd->per_device_list); 171 172 kfree(pdd); 173 } 174 175 kfd_pasid_free(p->pasid); 176 177 mutex_unlock(&p->mutex); 178 179 mutex_destroy(&p->mutex); 180 181 kfree(p->queues); 182 183 kfree(p); 184 185 kfree((void *)work); 186 } 187 188 static void kfd_process_destroy_delayed(struct rcu_head *rcu) 189 { 190 struct kfd_process_release_work *work; 191 struct kfd_process *p; 192 193 BUG_ON(!kfd_process_wq); 194 195 p = container_of(rcu, struct kfd_process, rcu); 196 BUG_ON(atomic_read(&p->mm->mm_count) <= 0); 197 198 mmdrop(p->mm); 199 200 work = (struct kfd_process_release_work *) 201 kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC); 202 203 if (work) { 204 INIT_WORK((struct work_struct *) work, kfd_process_wq_release); 205 work->p = p; 206 queue_work(kfd_process_wq, (struct work_struct *) work); 207 } 208 } 209 210 static void kfd_process_notifier_release(struct mmu_notifier *mn, 211 struct mm_struct *mm) 212 { 213 struct kfd_process *p; 214 215 /* 216 * The kfd_process structure can not be free because the 217 * mmu_notifier srcu is read locked 218 */ 219 p = container_of(mn, struct kfd_process, mmu_notifier); 220 BUG_ON(p->mm != mm); 221 222 mutex_lock(&kfd_processes_mutex); 223 hash_del_rcu(&p->kfd_processes); 224 mutex_unlock(&kfd_processes_mutex); 225 synchronize_srcu(&kfd_processes_srcu); 226 227 mutex_lock(&p->mutex); 228 229 /* In case our notifier is called before IOMMU notifier */ 230 pqm_uninit(&p->pqm); 231 232 mutex_unlock(&p->mutex); 233 234 /* 235 * Because we drop mm_count inside kfd_process_destroy_delayed 236 * and because the mmu_notifier_unregister function also drop 237 * mm_count we need to take an extra count here. 238 */ 239 atomic_inc(&p->mm->mm_count); 240 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm); 241 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed); 242 } 243 244 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { 245 .release = kfd_process_notifier_release, 246 }; 247 248 static struct kfd_process *create_process(const struct task_struct *thread) 249 { 250 struct kfd_process *process; 251 int err = -ENOMEM; 252 253 process = kzalloc(sizeof(*process), GFP_KERNEL); 254 255 if (!process) 256 goto err_alloc_process; 257 258 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE, 259 sizeof(process->queues[0]), GFP_KERNEL); 260 if (!process->queues) 261 goto err_alloc_queues; 262 263 process->pasid = kfd_pasid_alloc(); 264 if (process->pasid == 0) 265 goto err_alloc_pasid; 266 267 mutex_init(&process->mutex); 268 269 process->mm = thread->mm; 270 271 /* register notifier */ 272 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops; 273 err = __mmu_notifier_register(&process->mmu_notifier, process->mm); 274 if (err) 275 goto err_mmu_notifier; 276 277 hash_add_rcu(kfd_processes_table, &process->kfd_processes, 278 (uintptr_t)process->mm); 279 280 process->lead_thread = thread->group_leader; 281 282 process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE; 283 284 INIT_LIST_HEAD(&process->per_device_data); 285 286 err = pqm_init(&process->pqm, process); 287 if (err != 0) 288 goto err_process_pqm_init; 289 290 /* init process apertures*/ 291 process->is_32bit_user_mode = is_compat_task(); 292 if (kfd_init_apertures(process) != 0) 293 goto err_init_apretures; 294 295 return process; 296 297 err_init_apretures: 298 pqm_uninit(&process->pqm); 299 err_process_pqm_init: 300 hash_del_rcu(&process->kfd_processes); 301 synchronize_rcu(); 302 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); 303 err_mmu_notifier: 304 kfd_pasid_free(process->pasid); 305 err_alloc_pasid: 306 kfree(process->queues); 307 err_alloc_queues: 308 kfree(process); 309 err_alloc_process: 310 return ERR_PTR(err); 311 } 312 313 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, 314 struct kfd_process *p) 315 { 316 struct kfd_process_device *pdd = NULL; 317 318 list_for_each_entry(pdd, &p->per_device_data, per_device_list) 319 if (pdd->dev == dev) 320 break; 321 322 return pdd; 323 } 324 325 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, 326 struct kfd_process *p) 327 { 328 struct kfd_process_device *pdd = NULL; 329 330 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); 331 if (pdd != NULL) { 332 pdd->dev = dev; 333 INIT_LIST_HEAD(&pdd->qpd.queues_list); 334 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); 335 pdd->qpd.dqm = dev->dqm; 336 list_add(&pdd->per_device_list, &p->per_device_data); 337 } 338 339 return pdd; 340 } 341 342 /* 343 * Direct the IOMMU to bind the process (specifically the pasid->mm) 344 * to the device. 345 * Unbinding occurs when the process dies or the device is removed. 346 * 347 * Assumes that the process lock is held. 348 */ 349 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, 350 struct kfd_process *p) 351 { 352 struct kfd_process_device *pdd; 353 int err; 354 355 pdd = kfd_get_process_device_data(dev, p); 356 if (!pdd) { 357 pr_err("Process device data doesn't exist\n"); 358 return ERR_PTR(-ENOMEM); 359 } 360 361 if (pdd->bound) 362 return pdd; 363 364 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread); 365 if (err < 0) 366 return ERR_PTR(err); 367 368 pdd->bound = true; 369 370 return pdd; 371 } 372 373 void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) 374 { 375 struct kfd_process *p; 376 struct kfd_process_device *pdd; 377 int idx, i; 378 379 BUG_ON(dev == NULL); 380 381 idx = srcu_read_lock(&kfd_processes_srcu); 382 383 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) 384 if (p->pasid == pasid) 385 break; 386 387 srcu_read_unlock(&kfd_processes_srcu, idx); 388 389 BUG_ON(p->pasid != pasid); 390 391 mutex_lock(&p->mutex); 392 393 pqm_uninit(&p->pqm); 394 395 pdd = kfd_get_process_device_data(dev, p); 396 397 /* 398 * Just mark pdd as unbound, because we still need it to call 399 * amd_iommu_unbind_pasid() in when the process exits. 400 * We don't call amd_iommu_unbind_pasid() here 401 * because the IOMMU called us. 402 */ 403 if (pdd) 404 pdd->bound = false; 405 406 mutex_unlock(&p->mutex); 407 } 408 409 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) 410 { 411 return list_first_entry(&p->per_device_data, 412 struct kfd_process_device, 413 per_device_list); 414 } 415 416 struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, 417 struct kfd_process_device *pdd) 418 { 419 if (list_is_last(&pdd->per_device_list, &p->per_device_data)) 420 return NULL; 421 return list_next_entry(pdd, per_device_list); 422 } 423 424 bool kfd_has_process_device_data(struct kfd_process *p) 425 { 426 return !(list_empty(&p->per_device_data)); 427 } 428