/titanic_50/usr/src/uts/i86pc/sys/ |
H A D | machcpuvar.h | 156 #define MWAIT_WAKEUP(cpu) (*((cpu)->cpu_m.mcpu_mwait) = MWAIT_RUNNING) 162 #define cpu_nodeid cpu_m.mcpu_nodeid 163 #define cpu_pri cpu_m.mcpu_pri 164 #define cpu_current_hat cpu_m.mcpu_current_hat 165 #define cpu_hat_info cpu_m.mcpu_hat_info 166 #define cpu_ppaddr_mutex cpu_m.mcpu_ppaddr_mutex 167 #define cpu_gdt cpu_m.mcpu_gdt 168 #define cpu_idt cpu_m.mcpu_idt 169 #define cpu_tss cpu_m.mcpu_tss 170 #define cpu_ldt cpu_m.mcpu_ldt [all …]
|
/titanic_50/usr/src/uts/i86pc/os/ |
H A D | x_call.c | 167 queue == &cpu[msg->xc_master]->cpu_m.xc_free); in xc_insert() 219 xc_insert(&cpup->cpu_m.xc_free, msg); in xc_init_cpu() 231 xc_insert(&cpu[c]->cpu_m.xc_free, msg); in xc_init_cpu() 236 xc_insert(&cpup->cpu_m.xc_free, msg); in xc_init_cpu() 247 xc_insert(&cpup->cpu_m.xc_free, msg); in xc_init_cpu() 260 ASSERT(cpup->cpu_m.xc_msgbox == NULL); in xc_fini_cpu() 261 ASSERT(cpup->cpu_m.xc_work_cnt == 0); in xc_fini_cpu() 263 while ((msg = xc_extract(&cpup->cpu_m.xc_free)) != NULL) { in xc_fini_cpu() 288 if (cpup->cpu_m.xc_work_cnt == 0) { in xc_flush_cpu() 314 struct machcpu *mcpup = &(CPU->cpu_m); in xc_serv() [all …]
|
H A D | cpuid.c | 584 ASSERT(cpu->cpu_m.mcpu_cpi == NULL); in cpuid_alloc_space() 585 cpu->cpu_m.mcpu_cpi = in cpuid_alloc_space() 586 kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP); in cpuid_alloc_space() 592 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_free_space() 608 cpu->cpu_m.mcpu_cpi = NULL; in cpuid_free_space() 741 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_intel_getids() 802 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_amd_getids() 962 if (cpu->cpu_m.mcpu_cpi == NULL) in cpuid_pass1() 963 cpu->cpu_m.mcpu_cpi = &cpuid_info0; in cpuid_pass1() 968 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_pass1() [all …]
|
H A D | intr.c | 529 struct machcpu *mcpu = &cpu->cpu_m; in hilevel_intr_prolog() 625 struct machcpu *mcpu = &cpu->cpu_m; in hilevel_intr_epilog() 706 struct machcpu *mcpu = &cpu->cpu_m; in intr_thread_prolog() 772 struct machcpu *mcpu = &cpu->cpu_m; in intr_thread_epilog() 915 mcpu = &cpu->cpu_m; in intr_get_time() 945 struct machcpu *mcpu = &cpu->cpu_m; in dosoftint_prolog() 1051 struct machcpu *mcpu = &cpu->cpu_m; in dosoftint_epilog() 1190 hrt = (hrtime_t)cpup->cpu_m.intrstat[i + 1][0]; in cpu_kstat_intrstat_update() 1232 cpu->cpu_m.intrstat[t->t_pil][0] += interval; in cpu_intr_swtch_enter() 1356 ++*(uint16_t *)&cpu->cpu_m.mcpu_istamp; in do_interrupt() [all …]
|
H A D | mp_pc.c | 279 return (cp->cpu_m.mcpu_mach_ctx_ptr); in mach_cpucontext_xalloc() 344 cp->cpu_m.mcpu_mach_ctx_ptr = arg; in mach_cpucontext_xfree() 352 cp->cpu_m.mcpu_mach_ctx_ptr = arg; in mach_cpucontext_xfree() 370 cp->cpu_m.mcpu_mach_ctx_ptr = NULL; in mach_cpucontext_xfree() 595 if (cp->cpu_m.mcpu_cmi_hdl != NULL) { in mp_cpu_poweroff() 596 cmi_fini(cp->cpu_m.mcpu_cmi_hdl); in mp_cpu_poweroff() 597 cp->cpu_m.mcpu_cmi_hdl = NULL; in mp_cpu_poweroff() 630 cp->cpu_m.mcpu_cmi_hdl = hdl; in mp_cpu_poweroff()
|
H A D | microcode.c | 147 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL); in ucode_alloc_space() 148 cp->cpu_m.mcpu_ucode_info = in ucode_alloc_space() 149 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP); in ucode_alloc_space() 155 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL); in ucode_free_space() 156 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0); in ucode_free_space() 157 kmem_free(cp->cpu_m.mcpu_ucode_info, in ucode_free_space() 158 sizeof (*cp->cpu_m.mcpu_ucode_info)); in ucode_free_space() 159 cp->cpu_m.mcpu_ucode_info = NULL; in ucode_free_space() 736 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info; in ucode_write() 1069 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info, in ucode_update() [all …]
|
/titanic_50/usr/src/cmd/mdb/sun4v/modules/errh/ |
H A D | errh.c | 69 if (cpu.cpu_m.cpu_rq_lastre == 0) in resumable() 73 cpu.cpu_m.cpu_rq_lastre); in resumable() 74 } else if (cpu.cpu_m.cpu_rq_lastre != 0) in resumable() 75 mdb_printf("%lx\n", cpu.cpu_m.cpu_rq_lastre); in resumable() 112 if (cpu.cpu_m.cpu_nrq_lastnre == 0) in nonresumable() 116 cpu.cpu_m.cpu_nrq_lastnre); in nonresumable() 117 } else if (cpu.cpu_m.cpu_nrq_lastnre != 0) in nonresumable() 118 mdb_printf("%lx\n", cpu.cpu_m.cpu_nrq_lastnre); in nonresumable() 144 ao = (uintptr_t)cpu.cpu_m.cpu_rq_lastre; /* beginning and end */ in rqueue() 145 lower = (uintptr_t)cpu.cpu_m.cpu_rq_va + CPU_RQ_SIZE; in rqueue() [all …]
|
/titanic_50/usr/src/uts/sun4v/cpu/ |
H A D | generic.c | 163 cp->cpu_m.cpu_ipipe = cpunodes[cp->cpu_id].exec_unit_mapping; in cpu_map_exec_units() 164 if (cp->cpu_m.cpu_ipipe == NO_EU_MAPPING_FOUND) in cpu_map_exec_units() 165 cp->cpu_m.cpu_ipipe = (id_t)(cp->cpu_id); in cpu_map_exec_units() 167 cp->cpu_m.cpu_fpu = cpunodes[cp->cpu_id].fpu_mapping; in cpu_map_exec_units() 168 if (cp->cpu_m.cpu_fpu == NO_EU_MAPPING_FOUND) in cpu_map_exec_units() 169 cp->cpu_m.cpu_fpu = (id_t)(cp->cpu_id); in cpu_map_exec_units() 177 cp->cpu_m.cpu_mpipe = cpunodes[cp->cpu_id].l2_cache_mapping; in cpu_map_exec_units() 178 if (cp->cpu_m.cpu_mpipe == NO_L2_CACHE_MAPPING_FOUND) in cpu_map_exec_units() 179 cp->cpu_m.cpu_mpipe = CPU_L2_CACHEID_INVALID; in cpu_map_exec_units() 181 cp->cpu_m.cpu_core = (id_t)(cp->cpu_id); in cpu_map_exec_units() [all …]
|
H A D | niagara.c | 182 cp->cpu_m.cpu_ipipe = cpunodes[cp->cpu_id].exec_unit_mapping; in cpu_map_exec_units() 183 if (cp->cpu_m.cpu_ipipe == NO_EU_MAPPING_FOUND) in cpu_map_exec_units() 184 cp->cpu_m.cpu_ipipe = (id_t)(cp->cpu_id); in cpu_map_exec_units() 186 cp->cpu_m.cpu_fpu = cpunodes[cp->cpu_id].fpu_mapping; in cpu_map_exec_units() 187 if (cp->cpu_m.cpu_fpu == NO_EU_MAPPING_FOUND) in cpu_map_exec_units() 188 cp->cpu_m.cpu_fpu = (id_t)(cp->cpu_id); in cpu_map_exec_units() 193 cp->cpu_m.cpu_core = cp->cpu_m.cpu_ipipe; in cpu_map_exec_units() 199 cp->cpu_m.cpu_chip = 0; in cpu_map_exec_units() 200 cp->cpu_m.cpu_mpipe = 0; in cpu_map_exec_units()
|
H A D | niagara2.c | 209 cp->cpu_m.cpu_ipipe = cpunodes[cp->cpu_id].exec_unit_mapping; in cpu_map_exec_units() 210 if (cp->cpu_m.cpu_ipipe == NO_EU_MAPPING_FOUND) in cpu_map_exec_units() 211 cp->cpu_m.cpu_ipipe = (id_t)(cp->cpu_id); in cpu_map_exec_units() 213 cp->cpu_m.cpu_fpu = cpunodes[cp->cpu_id].fpu_mapping; in cpu_map_exec_units() 214 if (cp->cpu_m.cpu_fpu == NO_EU_MAPPING_FOUND) in cpu_map_exec_units() 215 cp->cpu_m.cpu_fpu = (id_t)(cp->cpu_id); in cpu_map_exec_units() 220 cp->cpu_m.cpu_core = cp->cpu_m.cpu_fpu; in cpu_map_exec_units() 228 cp->cpu_m.cpu_mpipe = cpunodes[cp->cpu_id].l2_cache_mapping; in cpu_map_exec_units() 229 if (cp->cpu_m.cpu_mpipe == NO_L2_CACHE_MAPPING_FOUND) in cpu_map_exec_units() 230 cp->cpu_m.cpu_mpipe = CPU_L2_CACHEID_INVALID; in cpu_map_exec_units() [all …]
|
/titanic_50/usr/src/uts/sun4v/os/ |
H A D | cmp.c | 86 return (cpu[cpuid]->cpu_m.cpu_chip); in cmp_cpu_to_chip() 120 return (cpu->cpu_m.cpu_ipipe); in pg_plat_hw_instance_id() 122 return (cpu->cpu_m.cpu_chip); in pg_plat_hw_instance_id() 124 return (cpu->cpu_m.cpu_mpipe); in pg_plat_hw_instance_id() 126 return (cpu->cpu_m.cpu_fpu); in pg_plat_hw_instance_id() 176 return (cpu->cpu_m.cpu_core); in pg_plat_get_core_id()
|
/titanic_50/usr/src/uts/sun4/ml/ |
H A D | offsets.in | 359 cpu_m CPU_MCPU 360 cpu_m.divisor CPU_DIVISOR 361 cpu_m.intrstat CPU_INTRSTAT 362 cpu_m.pil_high_start CPU_PIL_HIGH_START 363 cpu_m.intr_tail INTR_TAIL 364 cpu_m.intr_head INTR_HEAD 365 cpu_m.tl1_hdlr CPU_TL1_HDLR 366 cpu_m.intrcnt CPU_INTRCNT 367 cpu_m.tmp1 CPU_TMP1 368 cpu_m.tmp2 CPU_TMP2 [all …]
|
/titanic_50/usr/src/uts/i86pc/os/cpupm/ |
H A D | cpu_idle.c | 177 struct machcpu *mcpu = &(cp->cpu_m); in cstate_wakeup() 252 mcpu_mwait = cpu_seq[cpu_found]->cpu_m.mcpu_mwait; in cstate_wakeup() 334 volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; in acpi_cpu_cstate() 565 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpu_acpi_idle() 653 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpu_idle_init() 738 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in cpu_idle_fini() 746 idle_cpu = cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu; in cpu_idle_fini() 786 CPU->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu; in cpu_idle_stop_sync() 793 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in cpu_idle_stop() 803 cp->cpu_m.mcpu_idle_cpu = cpu_idle_stop_sync; in cpu_idle_stop() [all …]
|
H A D | cpupm_mach.c | 149 struct machcpu *mcpu = &(cp->cpu_m); in cpupm_init() 155 mach_state = cp->cpu_m.mcpu_pm_mach_state = in cpupm_init() 243 cp->cpu_m.mcpu_idle_cpu = cpu_acpi_idle; in cpupm_init() 300 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpupm_free() 339 cp->cpu_m.mcpu_pm_mach_state = NULL; in cpupm_free() 378 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpupm_is_ready() 428 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in cpupm_alloc_domains() 538 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in cpupm_remove_domains() 589 mach_state = (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in cpupm_alloc_ms_cstate() 601 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in cpupm_free_ms_cstate() [all …]
|
H A D | cpupm_throttle.c | 160 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state; in cpupm_tstate_transition() 226 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpupm_throttle_init() 267 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpupm_throttle_fini() 278 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpupm_throttle_stop() 294 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in cpupm_throttle_get_max() 331 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpupm_throttle_manage_notification()
|
H A D | pwrnow.c | 117 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state; in pwrnow_pstate_transition() 170 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in pwrnow_init() 223 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in pwrnow_fini() 301 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in pwrnow_stop()
|
H A D | speedstep.c | 133 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state; in speedstep_pstate_transition() 185 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in speedstep_init() 238 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in speedstep_fini() 253 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); in speedstep_stop()
|
/titanic_50/usr/src/uts/sun4/os/ |
H A D | mp_states.c | 67 CPU->cpu_m.in_prom = 1; in cpu_idle_self() 77 CPU->cpu_m.in_prom = 0; in cpu_idle_self() 112 while (!cpu[i]->cpu_m.in_prom && ntries) { in idle_other_cpus() 121 if (!cpu[i]->cpu_m.in_prom) { in idle_other_cpus() 159 while (cpu[i]->cpu_m.in_prom && ntries) { in resume_other_cpus() 168 if (cpu[i]->cpu_m.in_prom) { in resume_other_cpus()
|
/titanic_50/usr/src/uts/i86pc/io/ |
H A D | cpudrv_mach.c | 67 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpudrv_change_speed() 106 cpudsp->cp->cpu_m.mcpu_pm_mach_state != NULL) { in cpudrv_is_enabled() 108 (cpupm_mach_state_t *)cpudsp->cp->cpu_m.mcpu_pm_mach_state; in cpudrv_is_enabled() 225 mach_state = (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpudrv_notify_handler() 253 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; in cpudrv_uninstall_notify_handler() 301 (cpudsp->cp->cpu_m.mcpu_pm_mach_state); in cpudrv_mach_init()
|
/titanic_50/usr/src/uts/sun4u/sys/ |
H A D | machcpuvar.h | 145 #define cpu_startup_thread cpu_m.startup_thread 146 #define CPU_MMU_IDX(cp) ((cp)->cpu_m.cpu_mmu_idx) 147 #define CPU_MMU_CTXP(cp) ((cp)->cpu_m.cpu_mmu_ctxp) 153 #define CPU_PRIVATE(cp) ((cp)->cpu_m.cpu_private)
|
/titanic_50/usr/src/uts/i86pc/ml/ |
H A D | offsets.in | 219 cpu_m.pil_high_start CPU_PIL_HIGH_START 220 cpu_m.intrstat CPU_INTRSTAT 221 cpu_m.mcpu_current_hat CPU_CURRENT_HAT 222 cpu_m.mcpu_gdt CPU_GDT 223 cpu_m.mcpu_idt CPU_IDT 224 cpu_m.mcpu_tss CPU_TSS 225 cpu_m.mcpu_softinfo CPU_SOFTINFO 226 cpu_m.mcpu_pri CPU_PRI 228 cpu_m.mcpu_vcpu_info CPU_VCPU_INFO
|
/titanic_50/usr/src/uts/sun4v/sys/ |
H A D | machcpuvar.h | 188 #define cpu_startup_thread cpu_m.startup_thread 189 #define CPU_MMU_IDX(cp) ((cp)->cpu_m.cpu_mmu_idx) 190 #define CPU_MMU_CTXP(cp) ((cp)->cpu_m.cpu_mmu_ctxp) 196 #define CPU_PRIVATE(cp) ((cp)->cpu_m.cpu_private)
|
/titanic_50/usr/src/uts/i86xpv/os/ |
H A D | xpv_timestamp.c | 100 src = &CPU->cpu_m.mcpu_vcpu_info->time; in dtrace_xpv_getsystime() 108 stamp = CPU->cpu_m.mcpu_istamp; in dtrace_xpv_getsystime() 128 CPU->cpu_m.mcpu_istamp != stamp); in dtrace_xpv_getsystime()
|
H A D | mp_xen.c | 271 &cp->cpu_m.mcpu_idt[vec], ti) == 0) { in mp_set_cpu_context() 286 vgc->gdt_frames[0] = pa_to_ma(mmu_btop(cp->cpu_m.mcpu_gdtpa)); in mp_set_cpu_context() 288 vgc->gdt_frames[0] = pfn_to_mfn(mmu_btop(cp->cpu_m.mcpu_gdtpa)); in mp_set_cpu_context() 348 cp->cpu_m.mcpu_vcpu_info = in mach_cpucontext_alloc() 350 cp->cpu_m.mcpu_evt_pend = kmem_zalloc( in mach_cpucontext_alloc() 352 cp->cpu_m.mcpu_gdtpa = in mach_cpucontext_alloc() 428 kmem_free(cp->cpu_m.mcpu_evt_pend, in mach_cpucontext_free() 441 bzero(cp->cpu_m.mcpu_evt_pend, sizeof (struct xen_evt_data)); in mach_cpucontext_reset()
|
/titanic_50/usr/src/uts/sun4u/os/ |
H A D | mach_cpu_states.c | 233 CPU->cpu_m.in_prom = 1; in panic_idle() 261 while (!cpu[i]->cpu_m.in_prom && ntries) { in panic_stopcpus() 266 if (!cpu[i]->cpu_m.in_prom) in panic_stopcpus() 401 ptl1_state_t *pstate = &cpu->cpu_m.ptl1_state; in ptl1_init_cpu()
|