Lines Matching defs:cpu
82 typedef struct cpu { struct
83 processorid_t cpu_id; /* CPU number */ argument
84 processorid_t cpu_seqid; /* sequential CPU id (0..ncpus-1) */ argument
85 volatile cpu_flag_t cpu_flags; /* flags indicating CPU state */ argument
86 struct cpu *cpu_self; /* pointer to itself */ argument
87 kthread_t *cpu_thread; /* current thread */ argument
88 kthread_t *cpu_idle_thread; /* idle thread for this CPU */ argument
89 kthread_t *cpu_pause_thread; /* pause thread for this CPU */ argument
90 klwp_id_t cpu_lwp; /* current lwp (if any) */ argument
91 klwp_id_t cpu_fpowner; /* currently loaded fpu owner */ argument
92 struct cpupart *cpu_part; /* partition with this CPU */ argument
93 struct lgrp_ld *cpu_lpl; /* pointer to this cpu's load */ argument
94 int cpu_cache_offset; /* see kmem.c for details */ argument
105 struct cpu *cpu_next; /* next existing CPU */ argument
106 struct cpu *cpu_prev; /* prev existing CPU */ argument
107 struct cpu *cpu_next_onln; /* next online (enabled) CPU */ argument
108 struct cpu *cpu_prev_onln; /* prev online (enabled) CPU */ argument
109 struct cpu *cpu_next_part; /* next CPU in partition */ argument
110 struct cpu *cpu_prev_part; /* prev CPU in partition */ argument
111 struct cpu *cpu_next_lgrp; /* next CPU in latency group */ argument
112 struct cpu *cpu_prev_lgrp; /* prev CPU in latency group */ argument
113 struct cpu *cpu_next_lpl; /* next CPU in lgrp partition */ argument
114 struct cpu *cpu_prev_lpl; argument
116 struct cpu_pg *cpu_pg; /* cpu's processor groups */ argument
118 void *cpu_reserved[4]; /* reserved for future use */ argument
123 disp_t *cpu_disp; /* dispatch queue data */ argument
129 char cpu_runrun; /* scheduling flag - set to preempt */ argument
130 char cpu_kprunrun; /* force kernel preemption */ argument
131 pri_t cpu_chosen_level; /* priority at which cpu */ argument
133 kthread_t *cpu_dispthread; /* thread selected for dispatch */ argument
134 disp_lock_t cpu_thread_lock; /* dispatcher lock on current thread */ argument
135 uint8_t cpu_disp_flags; /* flags used by dispatcher */ argument
141 pri_t cpu_dispatch_pri; /* priority of cpu_dispthread */ argument
142 clock_t cpu_last_swtch; /* last time switched to new thread */ argument
147 caddr_t cpu_intr_stack; /* interrupt stack */ argument
148 kthread_t *cpu_intr_thread; /* interrupt thread list */ argument
149 uint_t cpu_intr_actv; /* interrupt levels active (bitmask) */ argument
150 int cpu_base_spl; /* priority for highest rupt active */ argument
155 cpu_stats_t cpu_stats; /* per-CPU statistics */ argument
156 struct kstat *cpu_info_kstat; /* kstat for cpu info */ argument
158 uintptr_t cpu_profile_pc; /* kernel PC in profile interrupt */ argument
159 uintptr_t cpu_profile_upc; /* user PC in profile interrupt */ argument
160 uintptr_t cpu_profile_pil; /* PIL when profile interrupted */ argument
162 ftrace_data_t cpu_ftrace; /* per cpu ftrace data */ argument
164 clock_t cpu_deadman_counter; /* used by deadman() */ argument
165 uint_t cpu_deadman_countdown; /* used by deadman() */ argument
167 kmutex_t cpu_cpc_ctxlock; /* protects context for idle thread */ argument
168 kcpc_ctx_t *cpu_cpc_ctx; /* performance counter context */ argument
173 processor_info_t cpu_type_info; /* config info */ argument
174 time_t cpu_state_begin; /* when CPU entered current state */ argument
175 char cpu_cpr_flags; /* CPR related info */ argument
176 struct cyc_cpu *cpu_cyclic; /* per cpu cyclic subsystem data */ argument
177 struct squeue_set_s *cpu_squeue_set; /* per cpu squeue set */ argument
178 struct nvlist *cpu_props; /* pool-related properties */ argument
180 krwlock_t cpu_ft_lock; /* DTrace: fasttrap lock */ argument
181 uintptr_t cpu_dtrace_caller; /* DTrace: caller, if any */ argument
182 hrtime_t cpu_dtrace_chillmark; /* DTrace: chill mark time */ argument
183 hrtime_t cpu_dtrace_chilled; /* DTrace: total chill time */ argument
184 uint64_t cpu_dtrace_probes; /* DTrace: total probes fired */ argument
185 hrtime_t cpu_dtrace_nsec; /* DTrace: ns in dtrace_probe */ argument
187 volatile uint16_t cpu_mstate; /* cpu microstate */ argument
188 volatile uint16_t cpu_mstate_gen; /* generation counter */ argument
189 volatile hrtime_t cpu_mstate_start; /* cpu microstate start time */ argument
190 volatile hrtime_t cpu_acct[NCMSTATES]; /* cpu microstate data */ argument
191 hrtime_t cpu_intracct[NCMSTATES]; /* interrupt mstate data */ argument
192 hrtime_t cpu_waitrq; /* cpu run-queue wait time */ argument
193 struct loadavg_s cpu_loadavg; /* loadavg info for this cpu */ argument
195 char *cpu_idstr; /* for printing and debugging */ argument
196 char *cpu_brandstr; /* for printing */ argument
202 int32_t cpu_intr_weight; argument
203 void *cpu_vm_data; argument
205 struct cpu_physid *cpu_physid; /* physical associations */ argument
207 uint64_t cpu_curr_clock; /* current clock freq in Hz */ argument
208 char *cpu_supp_freqs; /* supported freqs in Hz */ argument
210 uintptr_t cpu_cpcprofile_pc; /* kernel PC in cpc interrupt */ argument
211 uintptr_t cpu_cpcprofile_upc; /* user PC in cpc interrupt */ argument
216 hrtime_t cpu_intrlast; /* total interrupt time (nsec) */ argument
217 int cpu_intrload; /* interrupt load factor (0-99%) */ argument
219 uint_t cpu_rotor; /* for cheap pseudo-random numbers */ argument
221 struct cu_cpu_info *cpu_cu_info; /* capacity & util. info */ argument
229 volatile uint_t cpu_generation; /* tracking on/off-line */ argument
238 uintptr_t cpu_m_pad; argument
241 struct machcpu cpu_m; /* per architecture info */ argument
349 #define CPU_ACTIVE(cpu) (((cpu)->cpu_flags & CPU_OFFLINE) == 0) argument
428 #define CPUSET_ALL_BUT(set, cpu) cpuset_all_but(&(set), cpu) argument
429 #define CPUSET_ONLY(set, cpu) cpuset_only(&(set), cpu) argument
430 #define CPU_IN_SET(set, cpu) BT_TEST((set).cpub, cpu) argument
431 #define CPUSET_ADD(set, cpu) BT_SET((set).cpub, cpu) argument
432 #define CPUSET_DEL(set, cpu) BT_CLEAR((set).cpub, cpu) argument
441 #define CPUSET_FIND(set, cpu) { \ argument
462 #define CPUSET_ATOMIC_DEL(set, cpu) BT_ATOMIC_CLEAR((set).cpub, (cpu)) argument
463 #define CPUSET_ATOMIC_ADD(set, cpu) BT_ATOMIC_SET((set).cpub, (cpu)) argument
465 #define CPUSET_ATOMIC_XADD(set, cpu, result) \ argument
468 #define CPUSET_ATOMIC_XDEL(set, cpu, result) \ argument
500 #define CPUSET(cpu) (1UL << (cpu)) argument
503 #define CPUSET_ALL_BUT(set, cpu) ((void)((set) = ~CPUSET(cpu))) argument
504 #define CPUSET_ONLY(set, cpu) ((void)((set) = CPUSET(cpu))) argument
505 #define CPU_IN_SET(set, cpu) ((set) & CPUSET(cpu)) argument
506 #define CPUSET_ADD(set, cpu) ((void)((set) |= CPUSET(cpu))) argument
507 #define CPUSET_DEL(set, cpu) ((void)((set) &= ~CPUSET(cpu))) argument
515 #define CPUSET_FIND(set, cpu) { \ argument
524 #define CPUSET_ATOMIC_DEL(set, cpu) atomic_and_ulong(&(set), ~CPUSET(cpu)) argument
525 #define CPUSET_ATOMIC_ADD(set, cpu) atomic_or_ulong(&(set), CPUSET(cpu)) argument
527 #define CPUSET_ATOMIC_XADD(set, cpu, result) \ argument
530 #define CPUSET_ATOMIC_XDEL(set, cpu, result) \ argument
545 #define CPU_CPR_IS_OFFLINE(cpu) (((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) == 0) argument
546 #define CPU_CPR_IS_ONLINE(cpu) ((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) argument
547 #define CPU_SET_CPR_FLAGS(cpu, flag) ((cpu)->cpu_cpr_flags |= flag) argument