xref: /linux/arch/powerpc/platforms/pseries/lpar.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * pSeries_lpar.c
3  * Copyright (C) 2001 Todd Inglett, IBM Corporation
4  *
5  * pSeries LPAR support.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20  */
21 
22 /* Enables debugging of low-level hash table routines - careful! */
23 #undef DEBUG
24 
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/console.h>
28 #include <linux/export.h>
29 #include <linux/jump_label.h>
30 #include <asm/processor.h>
31 #include <asm/mmu.h>
32 #include <asm/page.h>
33 #include <asm/pgtable.h>
34 #include <asm/machdep.h>
35 #include <asm/mmu_context.h>
36 #include <asm/iommu.h>
37 #include <asm/tlbflush.h>
38 #include <asm/tlb.h>
39 #include <asm/prom.h>
40 #include <asm/cputable.h>
41 #include <asm/udbg.h>
42 #include <asm/smp.h>
43 #include <asm/trace.h>
44 #include <asm/firmware.h>
45 #include <asm/plpar_wrappers.h>
46 #include <asm/kexec.h>
47 #include <asm/fadump.h>
48 
49 #include "pseries.h"
50 
51 /* Flag bits for H_BULK_REMOVE */
52 #define HBR_REQUEST	0x4000000000000000UL
53 #define HBR_RESPONSE	0x8000000000000000UL
54 #define HBR_END		0xc000000000000000UL
55 #define HBR_AVPN	0x0200000000000000UL
56 #define HBR_ANDCOND	0x0100000000000000UL
57 
58 
59 /* in hvCall.S */
60 EXPORT_SYMBOL(plpar_hcall);
61 EXPORT_SYMBOL(plpar_hcall9);
62 EXPORT_SYMBOL(plpar_hcall_norets);
63 
64 void vpa_init(int cpu)
65 {
66 	int hwcpu = get_hard_smp_processor_id(cpu);
67 	unsigned long addr;
68 	long ret;
69 	struct paca_struct *pp;
70 	struct dtl_entry *dtl;
71 
72 	/*
73 	 * The spec says it "may be problematic" if CPU x registers the VPA of
74 	 * CPU y. We should never do that, but wail if we ever do.
75 	 */
76 	WARN_ON(cpu != smp_processor_id());
77 
78 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
79 		lppaca_of(cpu).vmxregs_in_use = 1;
80 
81 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
82 		lppaca_of(cpu).ebb_regs_in_use = 1;
83 
84 	addr = __pa(&lppaca_of(cpu));
85 	ret = register_vpa(hwcpu, addr);
86 
87 	if (ret) {
88 		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
89 		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
90 		return;
91 	}
92 	/*
93 	 * PAPR says this feature is SLB-Buffer but firmware never
94 	 * reports that.  All SPLPAR support SLB shadow buffer.
95 	 */
96 	addr = __pa(paca[cpu].slb_shadow_ptr);
97 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
98 		ret = register_slb_shadow(hwcpu, addr);
99 		if (ret)
100 			pr_err("WARNING: SLB shadow buffer registration for "
101 			       "cpu %d (hw %d) of area %lx failed with %ld\n",
102 			       cpu, hwcpu, addr, ret);
103 	}
104 
105 	/*
106 	 * Register dispatch trace log, if one has been allocated.
107 	 */
108 	pp = &paca[cpu];
109 	dtl = pp->dispatch_log;
110 	if (dtl) {
111 		pp->dtl_ridx = 0;
112 		pp->dtl_curr = dtl;
113 		lppaca_of(cpu).dtl_idx = 0;
114 
115 		/* hypervisor reads buffer length from this field */
116 		dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
117 		ret = register_dtl(hwcpu, __pa(dtl));
118 		if (ret)
119 			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
120 			       "failed with %ld\n", smp_processor_id(),
121 			       hwcpu, ret);
122 		lppaca_of(cpu).dtl_enable_mask = 2;
123 	}
124 }
125 
126 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
127 				     unsigned long vpn, unsigned long pa,
128 				     unsigned long rflags, unsigned long vflags,
129 				     int psize, int apsize, int ssize)
130 {
131 	unsigned long lpar_rc;
132 	unsigned long flags;
133 	unsigned long slot;
134 	unsigned long hpte_v, hpte_r;
135 
136 	if (!(vflags & HPTE_V_BOLTED))
137 		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
138 			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
139 			 hpte_group, vpn,  pa, rflags, vflags, psize);
140 
141 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
142 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
143 
144 	if (!(vflags & HPTE_V_BOLTED))
145 		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
146 
147 	/* Now fill in the actual HPTE */
148 	/* Set CEC cookie to 0         */
149 	/* Zero page = 0               */
150 	/* I-cache Invalidate = 0      */
151 	/* I-cache synchronize = 0     */
152 	/* Exact = 0                   */
153 	flags = 0;
154 
155 	/* Make pHyp happy */
156 	if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
157 		hpte_r &= ~HPTE_R_M;
158 
159 	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
160 		flags |= H_COALESCE_CAND;
161 
162 	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
163 	if (unlikely(lpar_rc == H_PTEG_FULL)) {
164 		if (!(vflags & HPTE_V_BOLTED))
165 			pr_devel(" full\n");
166 		return -1;
167 	}
168 
169 	/*
170 	 * Since we try and ioremap PHBs we don't own, the pte insert
171 	 * will fail. However we must catch the failure in hash_page
172 	 * or we will loop forever, so return -2 in this case.
173 	 */
174 	if (unlikely(lpar_rc != H_SUCCESS)) {
175 		if (!(vflags & HPTE_V_BOLTED))
176 			pr_devel(" lpar err %ld\n", lpar_rc);
177 		return -2;
178 	}
179 	if (!(vflags & HPTE_V_BOLTED))
180 		pr_devel(" -> slot: %lu\n", slot & 7);
181 
182 	/* Because of iSeries, we have to pass down the secondary
183 	 * bucket bit here as well
184 	 */
185 	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
186 }
187 
188 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
189 
190 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
191 {
192 	unsigned long slot_offset;
193 	unsigned long lpar_rc;
194 	int i;
195 	unsigned long dummy1, dummy2;
196 
197 	/* pick a random slot to start at */
198 	slot_offset = mftb() & 0x7;
199 
200 	for (i = 0; i < HPTES_PER_GROUP; i++) {
201 
202 		/* don't remove a bolted entry */
203 		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
204 					   (0x1UL << 4), &dummy1, &dummy2);
205 		if (lpar_rc == H_SUCCESS)
206 			return i;
207 
208 		/*
209 		 * The test for adjunct partition is performed before the
210 		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
211 		 * check for that as well.
212 		 */
213 		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
214 
215 		slot_offset++;
216 		slot_offset &= 0x7;
217 	}
218 
219 	return -1;
220 }
221 
222 static void pSeries_lpar_hptab_clear(void)
223 {
224 	unsigned long size_bytes = 1UL << ppc64_pft_size;
225 	unsigned long hpte_count = size_bytes >> 4;
226 	struct {
227 		unsigned long pteh;
228 		unsigned long ptel;
229 	} ptes[4];
230 	long lpar_rc;
231 	unsigned long i, j;
232 
233 	/* Read in batches of 4,
234 	 * invalidate only valid entries not in the VRMA
235 	 * hpte_count will be a multiple of 4
236          */
237 	for (i = 0; i < hpte_count; i += 4) {
238 		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
239 		if (lpar_rc != H_SUCCESS)
240 			continue;
241 		for (j = 0; j < 4; j++){
242 			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
243 				HPTE_V_VRMA_MASK)
244 				continue;
245 			if (ptes[j].pteh & HPTE_V_VALID)
246 				plpar_pte_remove_raw(0, i + j, 0,
247 					&(ptes[j].pteh), &(ptes[j].ptel));
248 		}
249 	}
250 
251 #ifdef __LITTLE_ENDIAN__
252 	/*
253 	 * Reset exceptions to big endian.
254 	 *
255 	 * FIXME this is a hack for kexec, we need to reset the exception
256 	 * endian before starting the new kernel and this is a convenient place
257 	 * to do it.
258 	 *
259 	 * This is also called on boot when a fadump happens. In that case we
260 	 * must not change the exception endian mode.
261 	 */
262 	if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
263 		long rc;
264 
265 		rc = pseries_big_endian_exceptions();
266 		/*
267 		 * At this point it is unlikely panic() will get anything
268 		 * out to the user, but at least this will stop us from
269 		 * continuing on further and creating an even more
270 		 * difficult to debug situation.
271 		 *
272 		 * There is a known problem when kdump'ing, if cpus are offline
273 		 * the above call will fail. Rather than panicking again, keep
274 		 * going and hope the kdump kernel is also little endian, which
275 		 * it usually is.
276 		 */
277 		if (rc && !kdump_in_progress())
278 			panic("Could not enable big endian exceptions");
279 	}
280 #endif
281 }
282 
283 /*
284  * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
285  * the low 3 bits of flags happen to line up.  So no transform is needed.
286  * We can probably optimize here and assume the high bits of newpp are
287  * already zero.  For now I am paranoid.
288  */
289 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
290 				       unsigned long newpp,
291 				       unsigned long vpn,
292 				       int psize, int apsize,
293 				       int ssize, unsigned long inv_flags)
294 {
295 	unsigned long lpar_rc;
296 	unsigned long flags = (newpp & 7) | H_AVPN;
297 	unsigned long want_v;
298 
299 	want_v = hpte_encode_avpn(vpn, psize, ssize);
300 
301 	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
302 		 want_v, slot, flags, psize);
303 
304 	lpar_rc = plpar_pte_protect(flags, slot, want_v);
305 
306 	if (lpar_rc == H_NOT_FOUND) {
307 		pr_devel("not found !\n");
308 		return -1;
309 	}
310 
311 	pr_devel("ok\n");
312 
313 	BUG_ON(lpar_rc != H_SUCCESS);
314 
315 	return 0;
316 }
317 
318 static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
319 {
320 	unsigned long dword0;
321 	unsigned long lpar_rc;
322 	unsigned long dummy_word1;
323 	unsigned long flags;
324 
325 	/* Read 1 pte at a time                        */
326 	/* Do not need RPN to logical page translation */
327 	/* No cross CEC PFT access                     */
328 	flags = 0;
329 
330 	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
331 
332 	BUG_ON(lpar_rc != H_SUCCESS);
333 
334 	return dword0;
335 }
336 
337 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
338 {
339 	unsigned long hash;
340 	unsigned long i;
341 	long slot;
342 	unsigned long want_v, hpte_v;
343 
344 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
345 	want_v = hpte_encode_avpn(vpn, psize, ssize);
346 
347 	/* Bolted entries are always in the primary group */
348 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
349 	for (i = 0; i < HPTES_PER_GROUP; i++) {
350 		hpte_v = pSeries_lpar_hpte_getword0(slot);
351 
352 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
353 			/* HPTE matches */
354 			return slot;
355 		++slot;
356 	}
357 
358 	return -1;
359 }
360 
361 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
362 					     unsigned long ea,
363 					     int psize, int ssize)
364 {
365 	unsigned long vpn;
366 	unsigned long lpar_rc, slot, vsid, flags;
367 
368 	vsid = get_kernel_vsid(ea, ssize);
369 	vpn = hpt_vpn(ea, vsid, ssize);
370 
371 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
372 	BUG_ON(slot == -1);
373 
374 	flags = newpp & 7;
375 	lpar_rc = plpar_pte_protect(flags, slot, 0);
376 
377 	BUG_ON(lpar_rc != H_SUCCESS);
378 }
379 
380 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
381 					 int psize, int apsize,
382 					 int ssize, int local)
383 {
384 	unsigned long want_v;
385 	unsigned long lpar_rc;
386 	unsigned long dummy1, dummy2;
387 
388 	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
389 		 slot, vpn, psize, local);
390 
391 	want_v = hpte_encode_avpn(vpn, psize, ssize);
392 	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
393 	if (lpar_rc == H_NOT_FOUND)
394 		return;
395 
396 	BUG_ON(lpar_rc != H_SUCCESS);
397 }
398 
399 /*
400  * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
401  * to make sure that we avoid bouncing the hypervisor tlbie lock.
402  */
403 #define PPC64_HUGE_HPTE_BATCH 12
404 
405 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
406 					     unsigned long *vpn, int count,
407 					     int psize, int ssize)
408 {
409 	unsigned long param[8];
410 	int i = 0, pix = 0, rc;
411 	unsigned long flags = 0;
412 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
413 
414 	if (lock_tlbie)
415 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
416 
417 	for (i = 0; i < count; i++) {
418 
419 		if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
420 			pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
421 						     ssize, 0);
422 		} else {
423 			param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
424 			param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
425 			pix += 2;
426 			if (pix == 8) {
427 				rc = plpar_hcall9(H_BULK_REMOVE, param,
428 						  param[0], param[1], param[2],
429 						  param[3], param[4], param[5],
430 						  param[6], param[7]);
431 				BUG_ON(rc != H_SUCCESS);
432 				pix = 0;
433 			}
434 		}
435 	}
436 	if (pix) {
437 		param[pix] = HBR_END;
438 		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
439 				  param[2], param[3], param[4], param[5],
440 				  param[6], param[7]);
441 		BUG_ON(rc != H_SUCCESS);
442 	}
443 
444 	if (lock_tlbie)
445 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
446 }
447 
448 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
449 					     unsigned long addr,
450 					     unsigned char *hpte_slot_array,
451 					     int psize, int ssize, int local)
452 {
453 	int i, index = 0;
454 	unsigned long s_addr = addr;
455 	unsigned int max_hpte_count, valid;
456 	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
457 	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
458 	unsigned long shift, hidx, vpn = 0, hash, slot;
459 
460 	shift = mmu_psize_defs[psize].shift;
461 	max_hpte_count = 1U << (PMD_SHIFT - shift);
462 
463 	for (i = 0; i < max_hpte_count; i++) {
464 		valid = hpte_valid(hpte_slot_array, i);
465 		if (!valid)
466 			continue;
467 		hidx =  hpte_hash_index(hpte_slot_array, i);
468 
469 		/* get the vpn */
470 		addr = s_addr + (i * (1ul << shift));
471 		vpn = hpt_vpn(addr, vsid, ssize);
472 		hash = hpt_hash(vpn, shift, ssize);
473 		if (hidx & _PTEIDX_SECONDARY)
474 			hash = ~hash;
475 
476 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
477 		slot += hidx & _PTEIDX_GROUP_IX;
478 
479 		slot_array[index] = slot;
480 		vpn_array[index] = vpn;
481 		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
482 			/*
483 			 * Now do a bluk invalidate
484 			 */
485 			__pSeries_lpar_hugepage_invalidate(slot_array,
486 							   vpn_array,
487 							   PPC64_HUGE_HPTE_BATCH,
488 							   psize, ssize);
489 			index = 0;
490 		} else
491 			index++;
492 	}
493 	if (index)
494 		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
495 						   index, psize, ssize);
496 }
497 
498 static void pSeries_lpar_hpte_removebolted(unsigned long ea,
499 					   int psize, int ssize)
500 {
501 	unsigned long vpn;
502 	unsigned long slot, vsid;
503 
504 	vsid = get_kernel_vsid(ea, ssize);
505 	vpn = hpt_vpn(ea, vsid, ssize);
506 
507 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
508 	BUG_ON(slot == -1);
509 	/*
510 	 * lpar doesn't use the passed actual page size
511 	 */
512 	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
513 }
514 
515 /*
516  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
517  * lock.
518  */
519 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
520 {
521 	unsigned long vpn;
522 	unsigned long i, pix, rc;
523 	unsigned long flags = 0;
524 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
525 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
526 	unsigned long param[9];
527 	unsigned long hash, index, shift, hidx, slot;
528 	real_pte_t pte;
529 	int psize, ssize;
530 
531 	if (lock_tlbie)
532 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
533 
534 	psize = batch->psize;
535 	ssize = batch->ssize;
536 	pix = 0;
537 	for (i = 0; i < number; i++) {
538 		vpn = batch->vpn[i];
539 		pte = batch->pte[i];
540 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
541 			hash = hpt_hash(vpn, shift, ssize);
542 			hidx = __rpte_to_hidx(pte, index);
543 			if (hidx & _PTEIDX_SECONDARY)
544 				hash = ~hash;
545 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
546 			slot += hidx & _PTEIDX_GROUP_IX;
547 			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
548 				/*
549 				 * lpar doesn't use the passed actual page size
550 				 */
551 				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
552 							     0, ssize, local);
553 			} else {
554 				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
555 				param[pix+1] = hpte_encode_avpn(vpn, psize,
556 								ssize);
557 				pix += 2;
558 				if (pix == 8) {
559 					rc = plpar_hcall9(H_BULK_REMOVE, param,
560 						param[0], param[1], param[2],
561 						param[3], param[4], param[5],
562 						param[6], param[7]);
563 					BUG_ON(rc != H_SUCCESS);
564 					pix = 0;
565 				}
566 			}
567 		} pte_iterate_hashed_end();
568 	}
569 	if (pix) {
570 		param[pix] = HBR_END;
571 		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
572 				  param[2], param[3], param[4], param[5],
573 				  param[6], param[7]);
574 		BUG_ON(rc != H_SUCCESS);
575 	}
576 
577 	if (lock_tlbie)
578 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
579 }
580 
581 static int __init disable_bulk_remove(char *str)
582 {
583 	if (strcmp(str, "off") == 0 &&
584 	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
585 			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
586 			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
587 	}
588 	return 1;
589 }
590 
591 __setup("bulk_remove=", disable_bulk_remove);
592 
593 void __init hpte_init_lpar(void)
594 {
595 	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
596 	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
597 	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
598 	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
599 	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
600 	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
601 	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
602 	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
603 	ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
604 }
605 
606 #ifdef CONFIG_PPC_SMLPAR
607 #define CMO_FREE_HINT_DEFAULT 1
608 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
609 
610 static int __init cmo_free_hint(char *str)
611 {
612 	char *parm;
613 	parm = strstrip(str);
614 
615 	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
616 		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
617 		cmo_free_hint_flag = 0;
618 		return 1;
619 	}
620 
621 	cmo_free_hint_flag = 1;
622 	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
623 
624 	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
625 		return 1;
626 
627 	return 0;
628 }
629 
630 __setup("cmo_free_hint=", cmo_free_hint);
631 
632 static void pSeries_set_page_state(struct page *page, int order,
633 				   unsigned long state)
634 {
635 	int i, j;
636 	unsigned long cmo_page_sz, addr;
637 
638 	cmo_page_sz = cmo_get_page_size();
639 	addr = __pa((unsigned long)page_address(page));
640 
641 	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
642 		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
643 			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
644 	}
645 }
646 
647 void arch_free_page(struct page *page, int order)
648 {
649 	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
650 		return;
651 
652 	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
653 }
654 EXPORT_SYMBOL(arch_free_page);
655 
656 #endif
657 
658 #ifdef CONFIG_TRACEPOINTS
659 #ifdef HAVE_JUMP_LABEL
660 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
661 
662 void hcall_tracepoint_regfunc(void)
663 {
664 	static_key_slow_inc(&hcall_tracepoint_key);
665 }
666 
667 void hcall_tracepoint_unregfunc(void)
668 {
669 	static_key_slow_dec(&hcall_tracepoint_key);
670 }
671 #else
672 /*
673  * We optimise our hcall path by placing hcall_tracepoint_refcount
674  * directly in the TOC so we can check if the hcall tracepoints are
675  * enabled via a single load.
676  */
677 
678 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
679 extern long hcall_tracepoint_refcount;
680 
681 void hcall_tracepoint_regfunc(void)
682 {
683 	hcall_tracepoint_refcount++;
684 }
685 
686 void hcall_tracepoint_unregfunc(void)
687 {
688 	hcall_tracepoint_refcount--;
689 }
690 #endif
691 
692 /*
693  * Since the tracing code might execute hcalls we need to guard against
694  * recursion. One example of this are spinlocks calling H_YIELD on
695  * shared processor partitions.
696  */
697 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
698 
699 
700 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
701 {
702 	unsigned long flags;
703 	unsigned int *depth;
704 
705 	/*
706 	 * We cannot call tracepoints inside RCU idle regions which
707 	 * means we must not trace H_CEDE.
708 	 */
709 	if (opcode == H_CEDE)
710 		return;
711 
712 	local_irq_save(flags);
713 
714 	depth = this_cpu_ptr(&hcall_trace_depth);
715 
716 	if (*depth)
717 		goto out;
718 
719 	(*depth)++;
720 	preempt_disable();
721 	trace_hcall_entry(opcode, args);
722 	(*depth)--;
723 
724 out:
725 	local_irq_restore(flags);
726 }
727 
728 void __trace_hcall_exit(long opcode, unsigned long retval,
729 			unsigned long *retbuf)
730 {
731 	unsigned long flags;
732 	unsigned int *depth;
733 
734 	if (opcode == H_CEDE)
735 		return;
736 
737 	local_irq_save(flags);
738 
739 	depth = this_cpu_ptr(&hcall_trace_depth);
740 
741 	if (*depth)
742 		goto out;
743 
744 	(*depth)++;
745 	trace_hcall_exit(opcode, retval, retbuf);
746 	preempt_enable();
747 	(*depth)--;
748 
749 out:
750 	local_irq_restore(flags);
751 }
752 #endif
753 
754 /**
755  * h_get_mpp
756  * H_GET_MPP hcall returns info in 7 parms
757  */
758 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
759 {
760 	int rc;
761 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
762 
763 	rc = plpar_hcall9(H_GET_MPP, retbuf);
764 
765 	mpp_data->entitled_mem = retbuf[0];
766 	mpp_data->mapped_mem = retbuf[1];
767 
768 	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
769 	mpp_data->pool_num = retbuf[2] & 0xffff;
770 
771 	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
772 	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
773 	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
774 
775 	mpp_data->pool_size = retbuf[4];
776 	mpp_data->loan_request = retbuf[5];
777 	mpp_data->backing_mem = retbuf[6];
778 
779 	return rc;
780 }
781 EXPORT_SYMBOL(h_get_mpp);
782 
783 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
784 {
785 	int rc;
786 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
787 
788 	rc = plpar_hcall9(H_GET_MPP_X, retbuf);
789 
790 	mpp_x_data->coalesced_bytes = retbuf[0];
791 	mpp_x_data->pool_coalesced_bytes = retbuf[1];
792 	mpp_x_data->pool_purr_cycles = retbuf[2];
793 	mpp_x_data->pool_spurr_cycles = retbuf[3];
794 
795 	return rc;
796 }
797