xref: /linux/arch/powerpc/kvm/book3s_64_vio.c (revision 2b0cfa6e49566c8fa6759734cf821aa6e8271a9e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
22 #include <linux/mm.h>
23 #include <linux/rcupdate_wait.h>
24 
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/book3s/64/mmu-hash.h>
28 #include <asm/hvcall.h>
29 #include <asm/synch.h>
30 #include <asm/ppc-opcode.h>
31 #include <asm/udbg.h>
32 #include <asm/iommu.h>
33 #include <asm/tce.h>
34 #include <asm/mmu_context.h>
35 
36 static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
37 	unsigned long liobn)
38 {
39 	struct kvmppc_spapr_tce_table *stt;
40 
41 	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
42 		if (stt->liobn == liobn)
43 			return stt;
44 
45 	return NULL;
46 }
47 
48 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
49 {
50 	return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
51 }
52 
53 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
54 {
55 	unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
56 			(tce_pages * sizeof(struct page *));
57 
58 	return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
59 }
60 
61 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
62 {
63 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
64 			struct kvmppc_spapr_tce_iommu_table, rcu);
65 
66 	iommu_tce_table_put(stit->tbl);
67 
68 	kfree(stit);
69 }
70 
71 static void kvm_spapr_tce_liobn_put(struct kref *kref)
72 {
73 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
74 			struct kvmppc_spapr_tce_iommu_table, kref);
75 
76 	list_del_rcu(&stit->next);
77 
78 	call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
79 }
80 
81 void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
82 				       struct iommu_group *grp)
83 {
84 	int i;
85 	struct kvmppc_spapr_tce_table *stt;
86 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
87 	struct iommu_table_group *table_group = NULL;
88 
89 	rcu_read_lock();
90 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
91 
92 		table_group = iommu_group_get_iommudata(grp);
93 		if (WARN_ON(!table_group))
94 			continue;
95 
96 		list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
97 			for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
98 				if (table_group->tables[i] != stit->tbl)
99 					continue;
100 
101 				kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
102 			}
103 		}
104 		cond_resched_rcu();
105 	}
106 	rcu_read_unlock();
107 }
108 
109 long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
110 				      struct iommu_group *grp)
111 {
112 	struct kvmppc_spapr_tce_table *stt = NULL;
113 	bool found = false;
114 	struct iommu_table *tbl = NULL;
115 	struct iommu_table_group *table_group;
116 	long i;
117 	struct kvmppc_spapr_tce_iommu_table *stit;
118 	struct fd f;
119 
120 	f = fdget(tablefd);
121 	if (!f.file)
122 		return -EBADF;
123 
124 	rcu_read_lock();
125 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
126 		if (stt == f.file->private_data) {
127 			found = true;
128 			break;
129 		}
130 	}
131 	rcu_read_unlock();
132 
133 	fdput(f);
134 
135 	if (!found)
136 		return -EINVAL;
137 
138 	table_group = iommu_group_get_iommudata(grp);
139 	if (WARN_ON(!table_group))
140 		return -EFAULT;
141 
142 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
143 		struct iommu_table *tbltmp = table_group->tables[i];
144 
145 		if (!tbltmp)
146 			continue;
147 		/* Make sure hardware table parameters are compatible */
148 		if ((tbltmp->it_page_shift <= stt->page_shift) &&
149 				(tbltmp->it_offset << tbltmp->it_page_shift ==
150 				 stt->offset << stt->page_shift) &&
151 				(tbltmp->it_size << tbltmp->it_page_shift >=
152 				 stt->size << stt->page_shift)) {
153 			/*
154 			 * Reference the table to avoid races with
155 			 * add/remove DMA windows.
156 			 */
157 			tbl = iommu_tce_table_get(tbltmp);
158 			break;
159 		}
160 	}
161 	if (!tbl)
162 		return -EINVAL;
163 
164 	rcu_read_lock();
165 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
166 		if (tbl != stit->tbl)
167 			continue;
168 
169 		if (!kref_get_unless_zero(&stit->kref)) {
170 			/* stit is being destroyed */
171 			iommu_tce_table_put(tbl);
172 			rcu_read_unlock();
173 			return -ENOTTY;
174 		}
175 		/*
176 		 * The table is already known to this KVM, we just increased
177 		 * its KVM reference counter and can return.
178 		 */
179 		rcu_read_unlock();
180 		return 0;
181 	}
182 	rcu_read_unlock();
183 
184 	stit = kzalloc(sizeof(*stit), GFP_KERNEL);
185 	if (!stit) {
186 		iommu_tce_table_put(tbl);
187 		return -ENOMEM;
188 	}
189 
190 	stit->tbl = tbl;
191 	kref_init(&stit->kref);
192 
193 	list_add_rcu(&stit->next, &stt->iommu_tables);
194 
195 	return 0;
196 }
197 
198 static void release_spapr_tce_table(struct rcu_head *head)
199 {
200 	struct kvmppc_spapr_tce_table *stt = container_of(head,
201 			struct kvmppc_spapr_tce_table, rcu);
202 	unsigned long i, npages = kvmppc_tce_pages(stt->size);
203 
204 	for (i = 0; i < npages; i++)
205 		if (stt->pages[i])
206 			__free_page(stt->pages[i]);
207 
208 	kfree(stt);
209 }
210 
211 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
212 		unsigned long sttpage)
213 {
214 	struct page *page = stt->pages[sttpage];
215 
216 	if (page)
217 		return page;
218 
219 	mutex_lock(&stt->alloc_lock);
220 	page = stt->pages[sttpage];
221 	if (!page) {
222 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
223 		WARN_ON_ONCE(!page);
224 		if (page)
225 			stt->pages[sttpage] = page;
226 	}
227 	mutex_unlock(&stt->alloc_lock);
228 
229 	return page;
230 }
231 
232 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
233 {
234 	struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
235 	struct page *page;
236 
237 	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
238 		return VM_FAULT_SIGBUS;
239 
240 	page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
241 	if (!page)
242 		return VM_FAULT_OOM;
243 
244 	get_page(page);
245 	vmf->page = page;
246 	return 0;
247 }
248 
249 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
250 	.fault = kvm_spapr_tce_fault,
251 };
252 
253 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
254 {
255 	vma->vm_ops = &kvm_spapr_tce_vm_ops;
256 	return 0;
257 }
258 
259 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
260 {
261 	struct kvmppc_spapr_tce_table *stt = filp->private_data;
262 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
263 	struct kvm *kvm = stt->kvm;
264 
265 	mutex_lock(&kvm->lock);
266 	list_del_rcu(&stt->list);
267 	mutex_unlock(&kvm->lock);
268 
269 	list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
270 		WARN_ON(!kref_read(&stit->kref));
271 		while (1) {
272 			if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
273 				break;
274 		}
275 	}
276 
277 	account_locked_vm(kvm->mm,
278 		kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
279 
280 	kvm_put_kvm(stt->kvm);
281 
282 	call_rcu(&stt->rcu, release_spapr_tce_table);
283 
284 	return 0;
285 }
286 
287 static const struct file_operations kvm_spapr_tce_fops = {
288 	.mmap           = kvm_spapr_tce_mmap,
289 	.release	= kvm_spapr_tce_release,
290 };
291 
292 int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
293 				  struct kvm_create_spapr_tce_64 *args)
294 {
295 	struct kvmppc_spapr_tce_table *stt = NULL;
296 	struct kvmppc_spapr_tce_table *siter;
297 	struct mm_struct *mm = kvm->mm;
298 	unsigned long npages;
299 	int ret;
300 
301 	if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
302 		(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
303 		return -EINVAL;
304 
305 	npages = kvmppc_tce_pages(args->size);
306 	ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
307 	if (ret)
308 		return ret;
309 
310 	ret = -ENOMEM;
311 	stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
312 	if (!stt)
313 		goto fail_acct;
314 
315 	stt->liobn = args->liobn;
316 	stt->page_shift = args->page_shift;
317 	stt->offset = args->offset;
318 	stt->size = args->size;
319 	stt->kvm = kvm;
320 	mutex_init(&stt->alloc_lock);
321 	INIT_LIST_HEAD_RCU(&stt->iommu_tables);
322 
323 	mutex_lock(&kvm->lock);
324 
325 	/* Check this LIOBN hasn't been previously allocated */
326 	ret = 0;
327 	list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
328 		if (siter->liobn == args->liobn) {
329 			ret = -EBUSY;
330 			break;
331 		}
332 	}
333 
334 	kvm_get_kvm(kvm);
335 	if (!ret)
336 		ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
337 				       stt, O_RDWR | O_CLOEXEC);
338 
339 	if (ret >= 0)
340 		list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
341 	else
342 		kvm_put_kvm_no_destroy(kvm);
343 
344 	mutex_unlock(&kvm->lock);
345 
346 	if (ret >= 0)
347 		return ret;
348 
349 	kfree(stt);
350  fail_acct:
351 	account_locked_vm(mm, kvmppc_stt_pages(npages), false);
352 	return ret;
353 }
354 
355 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
356 		unsigned long *ua)
357 {
358 	unsigned long gfn = tce >> PAGE_SHIFT;
359 	struct kvm_memory_slot *memslot;
360 
361 	memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
362 	if (!memslot)
363 		return -EINVAL;
364 
365 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
366 		(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
367 
368 	return 0;
369 }
370 
371 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
372 		unsigned long tce)
373 {
374 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
375 	enum dma_data_direction dir = iommu_tce_direction(tce);
376 	struct kvmppc_spapr_tce_iommu_table *stit;
377 	unsigned long ua = 0;
378 
379 	/* Allow userspace to poison TCE table */
380 	if (dir == DMA_NONE)
381 		return H_SUCCESS;
382 
383 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
384 		return H_TOO_HARD;
385 
386 	if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
387 		return H_TOO_HARD;
388 
389 	rcu_read_lock();
390 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
391 		unsigned long hpa = 0;
392 		struct mm_iommu_table_group_mem_t *mem;
393 		long shift = stit->tbl->it_page_shift;
394 
395 		mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
396 		if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
397 			rcu_read_unlock();
398 			return H_TOO_HARD;
399 		}
400 	}
401 	rcu_read_unlock();
402 
403 	return H_SUCCESS;
404 }
405 
406 /*
407  * Handles TCE requests for emulated devices.
408  * Puts guest TCE values to the table and expects user space to convert them.
409  * Cannot fail so kvmppc_tce_validate must be called before it.
410  */
411 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
412 		unsigned long idx, unsigned long tce)
413 {
414 	struct page *page;
415 	u64 *tbl;
416 	unsigned long sttpage;
417 
418 	idx -= stt->offset;
419 	sttpage = idx / TCES_PER_PAGE;
420 	page = stt->pages[sttpage];
421 
422 	if (!page) {
423 		/* We allow any TCE, not just with read|write permissions */
424 		if (!tce)
425 			return;
426 
427 		page = kvm_spapr_get_tce_page(stt, sttpage);
428 		if (!page)
429 			return;
430 	}
431 	tbl = page_to_virt(page);
432 
433 	tbl[idx % TCES_PER_PAGE] = tce;
434 }
435 
436 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
437 		struct iommu_table *tbl, unsigned long entry)
438 {
439 	unsigned long i;
440 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
441 	unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
442 
443 	for (i = 0; i < subpages; ++i) {
444 		unsigned long hpa = 0;
445 		enum dma_data_direction dir = DMA_NONE;
446 
447 		iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
448 	}
449 }
450 
451 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
452 		struct iommu_table *tbl, unsigned long entry)
453 {
454 	struct mm_iommu_table_group_mem_t *mem = NULL;
455 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
456 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
457 
458 	if (!pua)
459 		return H_SUCCESS;
460 
461 	mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
462 	if (!mem)
463 		return H_TOO_HARD;
464 
465 	mm_iommu_mapped_dec(mem);
466 
467 	*pua = cpu_to_be64(0);
468 
469 	return H_SUCCESS;
470 }
471 
472 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
473 		struct iommu_table *tbl, unsigned long entry)
474 {
475 	enum dma_data_direction dir = DMA_NONE;
476 	unsigned long hpa = 0;
477 	long ret;
478 
479 	if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
480 					&dir)))
481 		return H_TOO_HARD;
482 
483 	if (dir == DMA_NONE)
484 		return H_SUCCESS;
485 
486 	ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
487 	if (ret != H_SUCCESS)
488 		iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
489 
490 	return ret;
491 }
492 
493 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
494 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
495 		unsigned long entry)
496 {
497 	unsigned long i, ret = H_SUCCESS;
498 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
499 	unsigned long io_entry = entry * subpages;
500 
501 	for (i = 0; i < subpages; ++i) {
502 		ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
503 		if (ret != H_SUCCESS)
504 			break;
505 	}
506 
507 	iommu_tce_kill(tbl, io_entry, subpages);
508 
509 	return ret;
510 }
511 
512 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
513 		unsigned long entry, unsigned long ua,
514 		enum dma_data_direction dir)
515 {
516 	long ret;
517 	unsigned long hpa;
518 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
519 	struct mm_iommu_table_group_mem_t *mem;
520 
521 	if (!pua)
522 		/* it_userspace allocation might be delayed */
523 		return H_TOO_HARD;
524 
525 	mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
526 	if (!mem)
527 		/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
528 		return H_TOO_HARD;
529 
530 	if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
531 		return H_TOO_HARD;
532 
533 	if (mm_iommu_mapped_inc(mem))
534 		return H_TOO_HARD;
535 
536 	ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
537 	if (WARN_ON_ONCE(ret)) {
538 		mm_iommu_mapped_dec(mem);
539 		return H_TOO_HARD;
540 	}
541 
542 	if (dir != DMA_NONE)
543 		kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
544 
545 	*pua = cpu_to_be64(ua);
546 
547 	return 0;
548 }
549 
550 static long kvmppc_tce_iommu_map(struct kvm *kvm,
551 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
552 		unsigned long entry, unsigned long ua,
553 		enum dma_data_direction dir)
554 {
555 	unsigned long i, pgoff, ret = H_SUCCESS;
556 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
557 	unsigned long io_entry = entry * subpages;
558 
559 	for (i = 0, pgoff = 0; i < subpages;
560 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
561 
562 		ret = kvmppc_tce_iommu_do_map(kvm, tbl,
563 				io_entry + i, ua + pgoff, dir);
564 		if (ret != H_SUCCESS)
565 			break;
566 	}
567 
568 	iommu_tce_kill(tbl, io_entry, subpages);
569 
570 	return ret;
571 }
572 
573 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
574 		      unsigned long ioba, unsigned long tce)
575 {
576 	struct kvmppc_spapr_tce_table *stt;
577 	long ret, idx;
578 	struct kvmppc_spapr_tce_iommu_table *stit;
579 	unsigned long entry, ua = 0;
580 	enum dma_data_direction dir;
581 
582 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
583 	/* 	    liobn, ioba, tce); */
584 
585 	stt = kvmppc_find_table(vcpu->kvm, liobn);
586 	if (!stt)
587 		return H_TOO_HARD;
588 
589 	ret = kvmppc_ioba_validate(stt, ioba, 1);
590 	if (ret != H_SUCCESS)
591 		return ret;
592 
593 	idx = srcu_read_lock(&vcpu->kvm->srcu);
594 
595 	ret = kvmppc_tce_validate(stt, tce);
596 	if (ret != H_SUCCESS)
597 		goto unlock_exit;
598 
599 	dir = iommu_tce_direction(tce);
600 
601 	if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
602 		ret = H_PARAMETER;
603 		goto unlock_exit;
604 	}
605 
606 	entry = ioba >> stt->page_shift;
607 
608 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
609 		if (dir == DMA_NONE)
610 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
611 					stit->tbl, entry);
612 		else
613 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
614 					entry, ua, dir);
615 
616 
617 		if (ret != H_SUCCESS) {
618 			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
619 			goto unlock_exit;
620 		}
621 	}
622 
623 	kvmppc_tce_put(stt, entry, tce);
624 
625 unlock_exit:
626 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
627 
628 	return ret;
629 }
630 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
631 
632 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
633 		unsigned long liobn, unsigned long ioba,
634 		unsigned long tce_list, unsigned long npages)
635 {
636 	struct kvmppc_spapr_tce_table *stt;
637 	long i, ret = H_SUCCESS, idx;
638 	unsigned long entry, ua = 0;
639 	u64 __user *tces;
640 	u64 tce;
641 	struct kvmppc_spapr_tce_iommu_table *stit;
642 
643 	stt = kvmppc_find_table(vcpu->kvm, liobn);
644 	if (!stt)
645 		return H_TOO_HARD;
646 
647 	entry = ioba >> stt->page_shift;
648 	/*
649 	 * SPAPR spec says that the maximum size of the list is 512 TCEs
650 	 * so the whole table fits in 4K page
651 	 */
652 	if (npages > 512)
653 		return H_PARAMETER;
654 
655 	if (tce_list & (SZ_4K - 1))
656 		return H_PARAMETER;
657 
658 	ret = kvmppc_ioba_validate(stt, ioba, npages);
659 	if (ret != H_SUCCESS)
660 		return ret;
661 
662 	idx = srcu_read_lock(&vcpu->kvm->srcu);
663 	if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
664 		ret = H_TOO_HARD;
665 		goto unlock_exit;
666 	}
667 	tces = (u64 __user *) ua;
668 
669 	for (i = 0; i < npages; ++i) {
670 		if (get_user(tce, tces + i)) {
671 			ret = H_TOO_HARD;
672 			goto unlock_exit;
673 		}
674 		tce = be64_to_cpu(tce);
675 
676 		ret = kvmppc_tce_validate(stt, tce);
677 		if (ret != H_SUCCESS)
678 			goto unlock_exit;
679 	}
680 
681 	for (i = 0; i < npages; ++i) {
682 		/*
683 		 * This looks unsafe, because we validate, then regrab
684 		 * the TCE from userspace which could have been changed by
685 		 * another thread.
686 		 *
687 		 * But it actually is safe, because the relevant checks will be
688 		 * re-executed in the following code.  If userspace tries to
689 		 * change this dodgily it will result in a messier failure mode
690 		 * but won't threaten the host.
691 		 */
692 		if (get_user(tce, tces + i)) {
693 			ret = H_TOO_HARD;
694 			goto unlock_exit;
695 		}
696 		tce = be64_to_cpu(tce);
697 
698 		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
699 			ret = H_PARAMETER;
700 			goto unlock_exit;
701 		}
702 
703 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
704 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
705 					stit->tbl, entry + i, ua,
706 					iommu_tce_direction(tce));
707 
708 			if (ret != H_SUCCESS) {
709 				kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
710 						 entry + i);
711 				goto unlock_exit;
712 			}
713 		}
714 
715 		kvmppc_tce_put(stt, entry + i, tce);
716 	}
717 
718 unlock_exit:
719 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
720 
721 	return ret;
722 }
723 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
724 
725 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
726 		unsigned long liobn, unsigned long ioba,
727 		unsigned long tce_value, unsigned long npages)
728 {
729 	struct kvmppc_spapr_tce_table *stt;
730 	long i, ret;
731 	struct kvmppc_spapr_tce_iommu_table *stit;
732 
733 	stt = kvmppc_find_table(vcpu->kvm, liobn);
734 	if (!stt)
735 		return H_TOO_HARD;
736 
737 	ret = kvmppc_ioba_validate(stt, ioba, npages);
738 	if (ret != H_SUCCESS)
739 		return ret;
740 
741 	/* Check permission bits only to allow userspace poison TCE for debug */
742 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
743 		return H_PARAMETER;
744 
745 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
746 		unsigned long entry = ioba >> stt->page_shift;
747 
748 		for (i = 0; i < npages; ++i) {
749 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
750 					stit->tbl, entry + i);
751 
752 			if (ret == H_SUCCESS)
753 				continue;
754 
755 			if (ret == H_TOO_HARD)
756 				return ret;
757 
758 			WARN_ON_ONCE(1);
759 			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
760 		}
761 	}
762 
763 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
764 		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
765 
766 	return ret;
767 }
768 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
769 
770 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
771 		      unsigned long ioba)
772 {
773 	struct kvmppc_spapr_tce_table *stt;
774 	long ret;
775 	unsigned long idx;
776 	struct page *page;
777 	u64 *tbl;
778 
779 	stt = kvmppc_find_table(vcpu->kvm, liobn);
780 	if (!stt)
781 		return H_TOO_HARD;
782 
783 	ret = kvmppc_ioba_validate(stt, ioba, 1);
784 	if (ret != H_SUCCESS)
785 		return ret;
786 
787 	idx = (ioba >> stt->page_shift) - stt->offset;
788 	page = stt->pages[idx / TCES_PER_PAGE];
789 	if (!page) {
790 		kvmppc_set_gpr(vcpu, 4, 0);
791 		return H_SUCCESS;
792 	}
793 	tbl = (u64 *)page_address(page);
794 
795 	kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]);
796 
797 	return H_SUCCESS;
798 }
799 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
800