xref: /linux/arch/powerpc/include/asm/lppaca.h (revision 95298d63c67673c654c08952672d016212b26054)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * lppaca.h
4  * Copyright (C) 2001  Mike Corrigan IBM Corporation
5  */
6 #ifndef _ASM_POWERPC_LPPACA_H
7 #define _ASM_POWERPC_LPPACA_H
8 
9 /*
10  * The below VPHN macros are outside the __KERNEL__ check since these are
11  * used for compiling the vphn selftest in userspace
12  */
13 
14 /* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
15 #define VPHN_REGISTER_COUNT 6
16 
17 /*
18  * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
19  * form the complete property we have to add the length in the first cell.
20  */
21 #define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
22 
23 /*
24  * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags:
25  * 1 for retrieving associativity information for a guest cpu
26  * 2 for retrieving associativity information for a host/hypervisor cpu
27  */
28 #define VPHN_FLAG_VCPU	1
29 #define VPHN_FLAG_PCPU	2
30 
31 #ifdef __KERNEL__
32 
33 /*
34  * These definitions relate to hypervisors that only exist when using
35  * a server type processor
36  */
37 #ifdef CONFIG_PPC_BOOK3S
38 
39 /*
40  * This control block contains the data that is shared between the
41  * hypervisor and the OS.
42  */
43 #include <linux/cache.h>
44 #include <linux/threads.h>
45 #include <linux/spinlock_types.h>
46 #include <asm/types.h>
47 #include <asm/mmu.h>
48 #include <asm/firmware.h>
49 
50 /*
51  * The lppaca is the "virtual processor area" registered with the hypervisor,
52  * H_REGISTER_VPA etc.
53  *
54  * According to PAPR, the structure is 640 bytes long, must be L1 cache line
55  * aligned, and must not cross a 4kB boundary. Its size field must be at
56  * least 640 bytes (but may be more).
57  *
58  * Pre-v4.14 KVM hypervisors reject the VPA if its size field is smaller than
59  * 1kB, so we dynamically allocate 1kB and advertise size as 1kB, but keep
60  * this structure as the canonical 640 byte size.
61  */
62 struct lppaca {
63 	/* cacheline 1 contains read-only data */
64 
65 	__be32	desc;			/* Eye catcher 0xD397D781 */
66 	__be16	size;			/* Size of this struct */
67 	u8	reserved1[3];
68 	u8	__old_status;		/* Old status, including shared proc */
69 	u8	reserved3[14];
70 	volatile __be32 dyn_hw_node_id;	/* Dynamic hardware node id */
71 	volatile __be32 dyn_hw_proc_id;	/* Dynamic hardware proc id */
72 	u8	reserved4[56];
73 	volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
74 					  /* associativity change counters */
75 	u8	reserved5[32];
76 
77 	/* cacheline 2 contains local read-write data */
78 
79 	u8	reserved6[48];
80 	u8	cede_latency_hint;
81 	u8	ebb_regs_in_use;
82 	u8	reserved7[6];
83 	u8	dtl_enable_mask;	/* Dispatch Trace Log mask */
84 	u8	donate_dedicated_cpu;	/* Donate dedicated CPU cycles */
85 	u8	fpregs_in_use;
86 	u8	pmcregs_in_use;
87 	u8	reserved8[28];
88 	__be64	wait_state_cycles;	/* Wait cycles for this proc */
89 	u8	reserved9[28];
90 	__be16	slb_count;		/* # of SLBs to maintain */
91 	u8	idle;			/* Indicate OS is idle */
92 	u8	vmxregs_in_use;
93 
94 	/* cacheline 3 is shared with other processors */
95 
96 	/*
97 	 * This is the yield_count.  An "odd" value (low bit on) means that
98 	 * the processor is yielded (either because of an OS yield or a
99 	 * hypervisor preempt).  An even value implies that the processor is
100 	 * currently executing.
101 	 * NOTE: Even dedicated processor partitions can yield so this
102 	 * field cannot be used to determine if we are shared or dedicated.
103 	 */
104 	volatile __be32 yield_count;
105 	volatile __be32 dispersion_count; /* dispatch changed physical cpu */
106 	volatile __be64 cmo_faults;	/* CMO page fault count */
107 	volatile __be64 cmo_fault_time;	/* CMO page fault time */
108 	u8	reserved10[104];
109 
110 	/* cacheline 4-5 */
111 
112 	__be32	page_ins;		/* CMO Hint - # page ins by OS */
113 	u8	reserved11[148];
114 	volatile __be64 dtl_idx;	/* Dispatch Trace Log head index */
115 	u8	reserved12[96];
116 } ____cacheline_aligned;
117 
118 #define lppaca_of(cpu)	(*paca_ptrs[cpu]->lppaca_ptr)
119 
120 /*
121  * We are using a non architected field to determine if a partition is
122  * shared or dedicated. This currently works on both KVM and PHYP, but
123  * we will have to transition to something better.
124  */
125 #define LPPACA_OLD_SHARED_PROC		2
126 
127 static inline bool lppaca_shared_proc(struct lppaca *l)
128 {
129 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
130 		return false;
131 	return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
132 }
133 
134 /*
135  * SLB shadow buffer structure as defined in the PAPR.  The save_area
136  * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
137  * ESID is stored in the lower 64bits, then the VSID.
138  */
139 struct slb_shadow {
140 	__be32	persistent;		/* Number of persistent SLBs */
141 	__be32	buffer_length;		/* Total shadow buffer length */
142 	__be64	reserved;
143 	struct	{
144 		__be64     esid;
145 		__be64	vsid;
146 	} save_area[SLB_NUM_BOLTED];
147 } ____cacheline_aligned;
148 
149 /*
150  * Layout of entries in the hypervisor's dispatch trace log buffer.
151  */
152 struct dtl_entry {
153 	u8	dispatch_reason;
154 	u8	preempt_reason;
155 	__be16	processor_id;
156 	__be32	enqueue_to_dispatch_time;
157 	__be32	ready_to_enqueue_time;
158 	__be32	waiting_to_ready_time;
159 	__be64	timebase;
160 	__be64	fault_addr;
161 	__be64	srr0;
162 	__be64	srr1;
163 };
164 
165 #define DISPATCH_LOG_BYTES	4096	/* bytes per cpu */
166 #define N_DISPATCH_LOG		(DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
167 
168 /*
169  * Dispatch trace log event enable mask:
170  *   0x1: voluntary virtual processor waits
171  *   0x2: time-slice preempts
172  *   0x4: virtual partition memory page faults
173  */
174 #define DTL_LOG_CEDE		0x1
175 #define DTL_LOG_PREEMPT		0x2
176 #define DTL_LOG_FAULT		0x4
177 #define DTL_LOG_ALL		(DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
178 
179 extern struct kmem_cache *dtl_cache;
180 extern rwlock_t dtl_access_lock;
181 
182 /*
183  * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
184  * reading from the dispatch trace log.  If other code wants to consume
185  * DTL entries, it can set this pointer to a function that will get
186  * called once for each DTL entry that gets processed.
187  */
188 extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
189 
190 extern void register_dtl_buffer(int cpu);
191 extern void alloc_dtl_buffers(unsigned long *time_limit);
192 extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
193 
194 #endif /* CONFIG_PPC_BOOK3S */
195 #endif /* __KERNEL__ */
196 #endif /* _ASM_POWERPC_LPPACA_H */
197