1b8b572e1SStephen Rothwell /* 2b8b572e1SStephen Rothwell * lppaca.h 3b8b572e1SStephen Rothwell * Copyright (C) 2001 Mike Corrigan IBM Corporation 4b8b572e1SStephen Rothwell * 5b8b572e1SStephen Rothwell * This program is free software; you can redistribute it and/or modify 6b8b572e1SStephen Rothwell * it under the terms of the GNU General Public License as published by 7b8b572e1SStephen Rothwell * the Free Software Foundation; either version 2 of the License, or 8b8b572e1SStephen Rothwell * (at your option) any later version. 9b8b572e1SStephen Rothwell * 10b8b572e1SStephen Rothwell * This program is distributed in the hope that it will be useful, 11b8b572e1SStephen Rothwell * but WITHOUT ANY WARRANTY; without even the implied warranty of 12b8b572e1SStephen Rothwell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13b8b572e1SStephen Rothwell * GNU General Public License for more details. 14b8b572e1SStephen Rothwell * 15b8b572e1SStephen Rothwell * You should have received a copy of the GNU General Public License 16b8b572e1SStephen Rothwell * along with this program; if not, write to the Free Software 17b8b572e1SStephen Rothwell * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18b8b572e1SStephen Rothwell */ 19b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_LPPACA_H 20b8b572e1SStephen Rothwell #define _ASM_POWERPC_LPPACA_H 21b8b572e1SStephen Rothwell #ifdef __KERNEL__ 22b8b572e1SStephen Rothwell 2359c19cb2SAnton Blanchard /* 2459c19cb2SAnton Blanchard * These definitions relate to hypervisors that only exist when using 2594491685SBenjamin Herrenschmidt * a server type processor 2694491685SBenjamin Herrenschmidt */ 2794491685SBenjamin Herrenschmidt #ifdef CONFIG_PPC_BOOK3S 2894491685SBenjamin Herrenschmidt 2959c19cb2SAnton Blanchard /* 3059c19cb2SAnton Blanchard * This control block contains the data that is shared between the 3159c19cb2SAnton Blanchard * hypervisor and the OS. 3259c19cb2SAnton Blanchard */ 33b8b572e1SStephen Rothwell #include <linux/cache.h> 34f2f6dad6SBenjamin Herrenschmidt #include <linux/threads.h> 35b8b572e1SStephen Rothwell #include <asm/types.h> 36b8b572e1SStephen Rothwell #include <asm/mmu.h> 37b8b572e1SStephen Rothwell 38f2f6dad6SBenjamin Herrenschmidt /* 39f2f6dad6SBenjamin Herrenschmidt * We only have to have statically allocated lppaca structs on 40f2f6dad6SBenjamin Herrenschmidt * legacy iSeries, which supports at most 64 cpus. 41f2f6dad6SBenjamin Herrenschmidt */ 42f2f6dad6SBenjamin Herrenschmidt #define NR_LPPACAS 1 43f2f6dad6SBenjamin Herrenschmidt 4459c19cb2SAnton Blanchard /* 4559c19cb2SAnton Blanchard * The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 4659c19cb2SAnton Blanchard * alignment is sufficient to prevent this 4759c19cb2SAnton Blanchard */ 48b8b572e1SStephen Rothwell struct lppaca { 4959c19cb2SAnton Blanchard /* cacheline 1 contains read-only data */ 50b8b572e1SStephen Rothwell 51c72cd555SAnton Blanchard __be32 desc; /* Eye catcher 0xD397D781 */ 52c72cd555SAnton Blanchard __be16 size; /* Size of this struct */ 53f13c13a0SAnton Blanchard u8 reserved1[3]; 54f13c13a0SAnton Blanchard u8 __old_status; /* Old status, including shared proc */ 5559c19cb2SAnton Blanchard u8 reserved3[14]; 56c72cd555SAnton Blanchard volatile __be32 dyn_hw_node_id; /* Dynamic hardware node id */ 57c72cd555SAnton Blanchard volatile __be32 dyn_hw_proc_id; /* Dynamic hardware proc id */ 5859c19cb2SAnton Blanchard u8 reserved4[56]; 5959c19cb2SAnton Blanchard volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */ 6059c19cb2SAnton Blanchard /* associativity change counters */ 6159c19cb2SAnton Blanchard u8 reserved5[32]; 62b8b572e1SStephen Rothwell 6359c19cb2SAnton Blanchard /* cacheline 2 contains local read-write data */ 64b8b572e1SStephen Rothwell 6559c19cb2SAnton Blanchard u8 reserved6[48]; 6659c19cb2SAnton Blanchard u8 cede_latency_hint; 676e0b8bc9SMichael Ellerman u8 ebb_regs_in_use; 686e0b8bc9SMichael Ellerman u8 reserved7[6]; 6959c19cb2SAnton Blanchard u8 dtl_enable_mask; /* Dispatch Trace Log mask */ 7059c19cb2SAnton Blanchard u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ 7159c19cb2SAnton Blanchard u8 fpregs_in_use; 7259c19cb2SAnton Blanchard u8 pmcregs_in_use; 7359c19cb2SAnton Blanchard u8 reserved8[28]; 74c72cd555SAnton Blanchard __be64 wait_state_cycles; /* Wait cycles for this proc */ 7559c19cb2SAnton Blanchard u8 reserved9[28]; 76c72cd555SAnton Blanchard __be16 slb_count; /* # of SLBs to maintain */ 7759c19cb2SAnton Blanchard u8 idle; /* Indicate OS is idle */ 7859c19cb2SAnton Blanchard u8 vmxregs_in_use; 79b8b572e1SStephen Rothwell 8059c19cb2SAnton Blanchard /* cacheline 3 is shared with other processors */ 8159c19cb2SAnton Blanchard 8259c19cb2SAnton Blanchard /* 8359c19cb2SAnton Blanchard * This is the yield_count. An "odd" value (low bit on) means that 8459c19cb2SAnton Blanchard * the processor is yielded (either because of an OS yield or a 8559c19cb2SAnton Blanchard * hypervisor preempt). An even value implies that the processor is 8659c19cb2SAnton Blanchard * currently executing. 87733187e2SAnton Blanchard * NOTE: Even dedicated processor partitions can yield so this 88733187e2SAnton Blanchard * field cannot be used to determine if we are shared or dedicated. 8959c19cb2SAnton Blanchard */ 90c72cd555SAnton Blanchard volatile __be32 yield_count; 91c72cd555SAnton Blanchard volatile __be32 dispersion_count; /* dispatch changed physical cpu */ 92c72cd555SAnton Blanchard volatile __be64 cmo_faults; /* CMO page fault count */ 93c72cd555SAnton Blanchard volatile __be64 cmo_fault_time; /* CMO page fault time */ 9459c19cb2SAnton Blanchard u8 reserved10[104]; 9559c19cb2SAnton Blanchard 9659c19cb2SAnton Blanchard /* cacheline 4-5 */ 9759c19cb2SAnton Blanchard 98c72cd555SAnton Blanchard __be32 page_ins; /* CMO Hint - # page ins by OS */ 9959c19cb2SAnton Blanchard u8 reserved11[148]; 100c72cd555SAnton Blanchard volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ 10159c19cb2SAnton Blanchard u8 reserved12[96]; 102b8b572e1SStephen Rothwell } __attribute__((__aligned__(0x400))); 103b8b572e1SStephen Rothwell 104b8b572e1SStephen Rothwell extern struct lppaca lppaca[]; 105b8b572e1SStephen Rothwell 106*d2e60075SNicholas Piggin #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) 1078154c5d2SPaul Mackerras 108b8b572e1SStephen Rothwell /* 109733187e2SAnton Blanchard * We are using a non architected field to determine if a partition is 110733187e2SAnton Blanchard * shared or dedicated. This currently works on both KVM and PHYP, but 111733187e2SAnton Blanchard * we will have to transition to something better. 112f13c13a0SAnton Blanchard */ 113f13c13a0SAnton Blanchard #define LPPACA_OLD_SHARED_PROC 2 114f13c13a0SAnton Blanchard 115f13c13a0SAnton Blanchard static inline bool lppaca_shared_proc(struct lppaca *l) 116f13c13a0SAnton Blanchard { 117733187e2SAnton Blanchard return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); 118f13c13a0SAnton Blanchard } 119f13c13a0SAnton Blanchard 120f13c13a0SAnton Blanchard /* 121b8b572e1SStephen Rothwell * SLB shadow buffer structure as defined in the PAPR. The save_area 122b8b572e1SStephen Rothwell * contains adjacent ESID and VSID pairs for each shadowed SLB. The 123b8b572e1SStephen Rothwell * ESID is stored in the lower 64bits, then the VSID. 124b8b572e1SStephen Rothwell */ 125b8b572e1SStephen Rothwell struct slb_shadow { 126c72cd555SAnton Blanchard __be32 persistent; /* Number of persistent SLBs */ 127c72cd555SAnton Blanchard __be32 buffer_length; /* Total shadow buffer length */ 128c72cd555SAnton Blanchard __be64 reserved; 129b8b572e1SStephen Rothwell struct { 130c72cd555SAnton Blanchard __be64 esid; 131c72cd555SAnton Blanchard __be64 vsid; 13259c19cb2SAnton Blanchard } save_area[SLB_NUM_BOLTED]; 133b8b572e1SStephen Rothwell } ____cacheline_aligned; 134b8b572e1SStephen Rothwell 135cf9efce0SPaul Mackerras /* 136cf9efce0SPaul Mackerras * Layout of entries in the hypervisor's dispatch trace log buffer. 137cf9efce0SPaul Mackerras */ 138cf9efce0SPaul Mackerras struct dtl_entry { 139cf9efce0SPaul Mackerras u8 dispatch_reason; 140cf9efce0SPaul Mackerras u8 preempt_reason; 141c72cd555SAnton Blanchard __be16 processor_id; 142c72cd555SAnton Blanchard __be32 enqueue_to_dispatch_time; 143c72cd555SAnton Blanchard __be32 ready_to_enqueue_time; 144c72cd555SAnton Blanchard __be32 waiting_to_ready_time; 145c72cd555SAnton Blanchard __be64 timebase; 146c72cd555SAnton Blanchard __be64 fault_addr; 147c72cd555SAnton Blanchard __be64 srr0; 148c72cd555SAnton Blanchard __be64 srr1; 149cf9efce0SPaul Mackerras }; 150cf9efce0SPaul Mackerras 151cf9efce0SPaul Mackerras #define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */ 152cf9efce0SPaul Mackerras #define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry)) 153cf9efce0SPaul Mackerras 154af442a1bSNishanth Aravamudan extern struct kmem_cache *dtl_cache; 155af442a1bSNishanth Aravamudan 156872e439aSPaul Mackerras /* 157abf917cdSFrederic Weisbecker * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls 158872e439aSPaul Mackerras * reading from the dispatch trace log. If other code wants to consume 159872e439aSPaul Mackerras * DTL entries, it can set this pointer to a function that will get 160872e439aSPaul Mackerras * called once for each DTL entry that gets processed. 161872e439aSPaul Mackerras */ 162872e439aSPaul Mackerras extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index); 163872e439aSPaul Mackerras 16494491685SBenjamin Herrenschmidt #endif /* CONFIG_PPC_BOOK3S */ 165b8b572e1SStephen Rothwell #endif /* __KERNEL__ */ 166b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_LPPACA_H */ 167