1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3 #define _ASM_POWERPC_PLPAR_WRAPPERS_H
4
5 #ifdef CONFIG_PPC_PSERIES
6
7 #include <linux/string.h>
8 #include <linux/irqflags.h>
9 #include <linux/delay.h>
10
11 #include <asm/hvcall.h>
12 #include <asm/paca.h>
13 #include <asm/lppaca.h>
14 #include <asm/page.h>
15
poll_pending(void)16 static inline long poll_pending(void)
17 {
18 return plpar_hcall_norets(H_POLL_PENDING);
19 }
20
cede_processor(void)21 static inline long cede_processor(void)
22 {
23 /*
24 * We cannot call tracepoints inside RCU idle regions which
25 * means we must not trace H_CEDE.
26 */
27 return plpar_hcall_norets_notrace(H_CEDE);
28 }
29
vpa_call(unsigned long flags,unsigned long cpu,unsigned long vpa)30 static inline long vpa_call(unsigned long flags, unsigned long cpu,
31 unsigned long vpa)
32 {
33 flags = flags << H_VPA_FUNC_SHIFT;
34
35 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
36 }
37
unregister_vpa(unsigned long cpu)38 static inline long unregister_vpa(unsigned long cpu)
39 {
40 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
41 }
42
register_vpa(unsigned long cpu,unsigned long vpa)43 static inline long register_vpa(unsigned long cpu, unsigned long vpa)
44 {
45 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
46 }
47
unregister_slb_shadow(unsigned long cpu)48 static inline long unregister_slb_shadow(unsigned long cpu)
49 {
50 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
51 }
52
register_slb_shadow(unsigned long cpu,unsigned long vpa)53 static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
54 {
55 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
56 }
57
unregister_dtl(unsigned long cpu)58 static inline long unregister_dtl(unsigned long cpu)
59 {
60 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
61 }
62
register_dtl(unsigned long cpu,unsigned long vpa)63 static inline long register_dtl(unsigned long cpu, unsigned long vpa)
64 {
65 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
66 }
67
68 extern void vpa_init(int cpu);
69
plpar_pte_enter(unsigned long flags,unsigned long hpte_group,unsigned long hpte_v,unsigned long hpte_r,unsigned long * slot)70 static inline long plpar_pte_enter(unsigned long flags,
71 unsigned long hpte_group, unsigned long hpte_v,
72 unsigned long hpte_r, unsigned long *slot)
73 {
74 long rc;
75 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
76
77 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
78
79 *slot = retbuf[0];
80
81 return rc;
82 }
83
plpar_pte_remove(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)84 static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
85 unsigned long avpn, unsigned long *old_pteh_ret,
86 unsigned long *old_ptel_ret)
87 {
88 long rc;
89 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
90
91 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
92
93 *old_pteh_ret = retbuf[0];
94 *old_ptel_ret = retbuf[1];
95
96 return rc;
97 }
98
99 /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_remove_raw(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)100 static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
101 unsigned long avpn, unsigned long *old_pteh_ret,
102 unsigned long *old_ptel_ret)
103 {
104 long rc;
105 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
106
107 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
108
109 *old_pteh_ret = retbuf[0];
110 *old_ptel_ret = retbuf[1];
111
112 return rc;
113 }
114
plpar_pte_read(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)115 static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
116 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
117 {
118 long rc;
119 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
120
121 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
122
123 *old_pteh_ret = retbuf[0];
124 *old_ptel_ret = retbuf[1];
125
126 return rc;
127 }
128
129 /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_read_raw(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)130 static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
131 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
132 {
133 long rc;
134 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
135
136 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
137
138 *old_pteh_ret = retbuf[0];
139 *old_ptel_ret = retbuf[1];
140
141 return rc;
142 }
143
144 /*
145 * ptes must be 8*sizeof(unsigned long)
146 */
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)147 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
148 unsigned long *ptes)
149
150 {
151 long rc;
152 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
153
154 rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
155
156 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
157
158 return rc;
159 }
160
161 /*
162 * plpar_pte_read_4_raw can be called in real mode.
163 * ptes must be 8*sizeof(unsigned long)
164 */
plpar_pte_read_4_raw(unsigned long flags,unsigned long ptex,unsigned long * ptes)165 static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
166 unsigned long *ptes)
167
168 {
169 long rc;
170 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
171
172 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
173
174 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
175
176 return rc;
177 }
178
plpar_pte_protect(unsigned long flags,unsigned long ptex,unsigned long avpn)179 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
180 unsigned long avpn)
181 {
182 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
183 }
184
plpar_resize_hpt_prepare(unsigned long flags,unsigned long shift)185 static inline long plpar_resize_hpt_prepare(unsigned long flags,
186 unsigned long shift)
187 {
188 return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
189 }
190
plpar_resize_hpt_commit(unsigned long flags,unsigned long shift)191 static inline long plpar_resize_hpt_commit(unsigned long flags,
192 unsigned long shift)
193 {
194 return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
195 }
196
plpar_tce_get(unsigned long liobn,unsigned long ioba,unsigned long * tce_ret)197 static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
198 unsigned long *tce_ret)
199 {
200 long rc;
201 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
202
203 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
204
205 *tce_ret = retbuf[0];
206
207 return rc;
208 }
209
plpar_tce_put(unsigned long liobn,unsigned long ioba,unsigned long tceval)210 static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
211 unsigned long tceval)
212 {
213 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
214 }
215
plpar_tce_put_indirect(unsigned long liobn,unsigned long ioba,unsigned long page,unsigned long count)216 static inline long plpar_tce_put_indirect(unsigned long liobn,
217 unsigned long ioba, unsigned long page, unsigned long count)
218 {
219 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
220 }
221
plpar_tce_stuff(unsigned long liobn,unsigned long ioba,unsigned long tceval,unsigned long count)222 static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
223 unsigned long tceval, unsigned long count)
224 {
225 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
226 }
227
228 /* Set various resource mode parameters */
plpar_set_mode(unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)229 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
230 unsigned long value1, unsigned long value2)
231 {
232 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
233 }
234
235 /*
236 * Enable relocation on exceptions on this partition
237 *
238 * Note: this call has a partition wide scope and can take a while to complete.
239 * If it returns H_LONG_BUSY_* it should be retried periodically until it
240 * returns H_SUCCESS.
241 */
enable_reloc_on_exceptions(void)242 static inline long enable_reloc_on_exceptions(void)
243 {
244 /* mflags = 3: Exceptions at 0xC000000000004000 */
245 return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
246 }
247
248 /*
249 * Disable relocation on exceptions on this partition
250 *
251 * Note: this call has a partition wide scope and can take a while to complete.
252 * If it returns H_LONG_BUSY_* it should be retried periodically until it
253 * returns H_SUCCESS.
254 */
disable_reloc_on_exceptions(void)255 static inline long disable_reloc_on_exceptions(void) {
256 return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
257 }
258
259 /*
260 * Take exceptions in big endian mode on this partition
261 *
262 * Note: this call has a partition wide scope and can take a while to complete.
263 * If it returns H_LONG_BUSY_* it should be retried periodically until it
264 * returns H_SUCCESS.
265 */
enable_big_endian_exceptions(void)266 static inline long enable_big_endian_exceptions(void)
267 {
268 /* mflags = 0: big endian exceptions */
269 return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
270 }
271
272 /*
273 * Take exceptions in little endian mode on this partition
274 *
275 * Note: this call has a partition wide scope and can take a while to complete.
276 * If it returns H_LONG_BUSY_* it should be retried periodically until it
277 * returns H_SUCCESS.
278 */
enable_little_endian_exceptions(void)279 static inline long enable_little_endian_exceptions(void)
280 {
281 /* mflags = 1: little endian exceptions */
282 return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
283 }
284
plpar_set_ciabr(unsigned long ciabr)285 static inline long plpar_set_ciabr(unsigned long ciabr)
286 {
287 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
288 }
289
plpar_set_watchpoint0(unsigned long dawr0,unsigned long dawrx0)290 static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
291 {
292 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
293 }
294
plpar_set_watchpoint1(unsigned long dawr1,unsigned long dawrx1)295 static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
296 {
297 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
298 }
299
plpar_signal_sys_reset(long cpu)300 static inline long plpar_signal_sys_reset(long cpu)
301 {
302 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
303 }
304
plpar_get_cpu_characteristics(struct h_cpu_char_result * p)305 static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
306 {
307 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
308 long rc;
309
310 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
311 if (rc == H_SUCCESS) {
312 p->character = retbuf[0];
313 p->behaviour = retbuf[1];
314 }
315
316 return rc;
317 }
318
plpar_guest_create(unsigned long flags,unsigned long * guest_id)319 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
320 {
321 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
322 unsigned long token;
323 long rc;
324
325 token = -1UL;
326 do {
327 rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
328 if (rc == H_SUCCESS)
329 *guest_id = retbuf[0];
330
331 if (rc == H_BUSY) {
332 token = retbuf[0];
333 cond_resched();
334 }
335
336 if (H_IS_LONG_BUSY(rc)) {
337 token = retbuf[0];
338 msleep(get_longbusy_msecs(rc));
339 rc = H_BUSY;
340 }
341
342 } while (rc == H_BUSY);
343
344 return rc;
345 }
346
plpar_guest_create_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id)347 static inline long plpar_guest_create_vcpu(unsigned long flags,
348 unsigned long guest_id,
349 unsigned long vcpu_id)
350 {
351 long rc;
352
353 do {
354 rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
355
356 if (rc == H_BUSY)
357 cond_resched();
358
359 if (H_IS_LONG_BUSY(rc)) {
360 msleep(get_longbusy_msecs(rc));
361 rc = H_BUSY;
362 }
363
364 } while (rc == H_BUSY);
365
366 return rc;
367 }
368
plpar_guest_set_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)369 static inline long plpar_guest_set_state(unsigned long flags,
370 unsigned long guest_id,
371 unsigned long vcpu_id,
372 unsigned long data_buffer,
373 unsigned long data_size,
374 unsigned long *failed_index)
375 {
376 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
377 long rc;
378
379 while (true) {
380 rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
381 vcpu_id, data_buffer, data_size);
382
383 if (rc == H_BUSY) {
384 cpu_relax();
385 continue;
386 }
387
388 if (H_IS_LONG_BUSY(rc)) {
389 mdelay(get_longbusy_msecs(rc));
390 continue;
391 }
392
393 if (rc == H_INVALID_ELEMENT_ID)
394 *failed_index = retbuf[0];
395 else if (rc == H_INVALID_ELEMENT_SIZE)
396 *failed_index = retbuf[0];
397 else if (rc == H_INVALID_ELEMENT_VALUE)
398 *failed_index = retbuf[0];
399
400 break;
401 }
402
403 return rc;
404 }
405
plpar_guest_get_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)406 static inline long plpar_guest_get_state(unsigned long flags,
407 unsigned long guest_id,
408 unsigned long vcpu_id,
409 unsigned long data_buffer,
410 unsigned long data_size,
411 unsigned long *failed_index)
412 {
413 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
414 long rc;
415
416 while (true) {
417 rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
418 vcpu_id, data_buffer, data_size);
419
420 if (rc == H_BUSY) {
421 cpu_relax();
422 continue;
423 }
424
425 if (H_IS_LONG_BUSY(rc)) {
426 mdelay(get_longbusy_msecs(rc));
427 continue;
428 }
429
430 if (rc == H_INVALID_ELEMENT_ID)
431 *failed_index = retbuf[0];
432 else if (rc == H_INVALID_ELEMENT_SIZE)
433 *failed_index = retbuf[0];
434 else if (rc == H_INVALID_ELEMENT_VALUE)
435 *failed_index = retbuf[0];
436
437 break;
438 }
439
440 return rc;
441 }
442
plpar_guest_run_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,int * trap,unsigned long * failed_index)443 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
444 unsigned long vcpu_id, int *trap,
445 unsigned long *failed_index)
446 {
447 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
448 long rc;
449
450 rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
451 if (rc == H_SUCCESS)
452 *trap = retbuf[0];
453 else if (rc == H_INVALID_ELEMENT_ID)
454 *failed_index = retbuf[0];
455 else if (rc == H_INVALID_ELEMENT_SIZE)
456 *failed_index = retbuf[0];
457 else if (rc == H_INVALID_ELEMENT_VALUE)
458 *failed_index = retbuf[0];
459
460 return rc;
461 }
462
plpar_guest_delete(unsigned long flags,u64 guest_id)463 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
464 {
465 long rc;
466
467 do {
468 rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
469 if (rc == H_BUSY)
470 cond_resched();
471
472 if (H_IS_LONG_BUSY(rc)) {
473 msleep(get_longbusy_msecs(rc));
474 rc = H_BUSY;
475 }
476
477 } while (rc == H_BUSY);
478
479 return rc;
480 }
481
plpar_guest_set_capabilities(unsigned long flags,unsigned long capabilities)482 static inline long plpar_guest_set_capabilities(unsigned long flags,
483 unsigned long capabilities)
484 {
485 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
486 long rc;
487
488 do {
489 rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
490 if (rc == H_BUSY)
491 cond_resched();
492
493 if (H_IS_LONG_BUSY(rc)) {
494 msleep(get_longbusy_msecs(rc));
495 rc = H_BUSY;
496 }
497 } while (rc == H_BUSY);
498
499 return rc;
500 }
501
plpar_guest_get_capabilities(unsigned long flags,unsigned long * capabilities)502 static inline long plpar_guest_get_capabilities(unsigned long flags,
503 unsigned long *capabilities)
504 {
505 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
506 long rc;
507
508 do {
509 rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
510 if (rc == H_BUSY)
511 cond_resched();
512
513 if (H_IS_LONG_BUSY(rc)) {
514 msleep(get_longbusy_msecs(rc));
515 rc = H_BUSY;
516 }
517 } while (rc == H_BUSY);
518
519 if (rc == H_SUCCESS)
520 *capabilities = retbuf[0];
521
522 return rc;
523 }
524
525 /*
526 * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
527 *
528 * - Returns H_SUCCESS on success
529 * - For H_BUSY return value, we retry the hcall.
530 * - For any other hcall failures, attempt a full flush once before
531 * resorting to BUG().
532 *
533 * Note: This hcall is expected to fail only very rarely. The correct
534 * error recovery of killing the process/guest will be eventually
535 * needed.
536 */
pseries_rpt_invalidate(u64 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)537 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
538 u64 page_sizes, u64 start, u64 end)
539 {
540 long rc;
541 unsigned long all;
542
543 while (true) {
544 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
545 page_sizes, start, end);
546 if (rc == H_BUSY) {
547 cpu_relax();
548 continue;
549 } else if (rc == H_SUCCESS)
550 return rc;
551
552 /* Flush request failed, try with a full flush once */
553 if (type & H_RPTI_TYPE_NESTED)
554 all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
555 else
556 all = H_RPTI_TYPE_ALL;
557 retry:
558 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
559 all, page_sizes, 0, -1UL);
560 if (rc == H_BUSY) {
561 cpu_relax();
562 goto retry;
563 } else if (rc == H_SUCCESS)
564 return rc;
565
566 BUG();
567 }
568 }
569
570 #else /* !CONFIG_PPC_PSERIES */
571
plpar_set_ciabr(unsigned long ciabr)572 static inline long plpar_set_ciabr(unsigned long ciabr)
573 {
574 return 0;
575 }
576
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)577 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
578 unsigned long *ptes)
579 {
580 return 0;
581 }
582
pseries_rpt_invalidate(u64 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)583 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
584 u64 page_sizes, u64 start, u64 end)
585 {
586 return 0;
587 }
588
plpar_guest_create_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id)589 static inline long plpar_guest_create_vcpu(unsigned long flags,
590 unsigned long guest_id,
591 unsigned long vcpu_id)
592 {
593 return 0;
594 }
595
plpar_guest_get_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)596 static inline long plpar_guest_get_state(unsigned long flags,
597 unsigned long guest_id,
598 unsigned long vcpu_id,
599 unsigned long data_buffer,
600 unsigned long data_size,
601 unsigned long *failed_index)
602 {
603 return 0;
604 }
605
plpar_guest_set_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)606 static inline long plpar_guest_set_state(unsigned long flags,
607 unsigned long guest_id,
608 unsigned long vcpu_id,
609 unsigned long data_buffer,
610 unsigned long data_size,
611 unsigned long *failed_index)
612 {
613 return 0;
614 }
615
plpar_guest_run_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,int * trap,unsigned long * failed_index)616 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
617 unsigned long vcpu_id, int *trap,
618 unsigned long *failed_index)
619 {
620 return 0;
621 }
622
plpar_guest_create(unsigned long flags,unsigned long * guest_id)623 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
624 {
625 return 0;
626 }
627
plpar_guest_delete(unsigned long flags,u64 guest_id)628 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
629 {
630 return 0;
631 }
632
plpar_guest_get_capabilities(unsigned long flags,unsigned long * capabilities)633 static inline long plpar_guest_get_capabilities(unsigned long flags,
634 unsigned long *capabilities)
635 {
636 return 0;
637 }
638
plpar_guest_set_capabilities(unsigned long flags,unsigned long capabilities)639 static inline long plpar_guest_set_capabilities(unsigned long flags,
640 unsigned long capabilities)
641 {
642 return 0;
643 }
644
645 #endif /* CONFIG_PPC_PSERIES */
646
647 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
648