Lines Matching defs:cp

123 init_cpu_info(struct cpu *cp)
125 processor_info_t *pi = &cp->cpu_type_info;
135 cp->cpu_curr_clock = cpu_freq_hz;
140 if (cp->cpu_supp_freqs == NULL) {
141 cpu_set_supp_freqs(cp, NULL);
148 cp->cpu_idstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
149 cp->cpu_brandstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
152 * If called for the BSP, cp is equal to current CPU.
153 * For non-BSPs, cpuid info of cp is not ready yet, so use cpuid info
158 (void) cpuid_getidstr(CPU, cp->cpu_idstr, CPU_IDSTRLEN);
159 (void) cpuid_getbrandstr(CPU, cp->cpu_brandstr, CPU_IDSTRLEN);
167 init_cpu_syscall(struct cpu *cp)
251 init_cpu_id_gdt(struct cpu *cp)
255 set_usegd(&cp->cpu_gdt[GDT_CPUID], SDP_SHORT, NULL, cp->cpu_id,
258 set_usegd(&cp->cpu_gdt[GDT_CPUID], NULL, cp->cpu_id, SDT_MEMRODA,
275 struct cpu *cp;
294 cp = kmem_zalloc(sizeof (*cp), KM_SLEEP);
296 cp = cpu_free_list;
297 cpu_free_list = cp->cpu_next_free;
300 cp->cpu_m.mcpu_istamp = cpun << 16;
308 disp_cpu_init(cp);
310 cpu_vm_data_init(cp);
327 THREAD_ONPROC(tp, cp);
329 tp->t_bound_cpu = cp;
331 tp->t_cpu = cp;
332 tp->t_disp_queue = cp->cpu_disp;
351 cp->cpu_id = cpun;
352 cp->cpu_self = cp;
353 cp->cpu_thread = tp;
354 cp->cpu_lwp = NULL;
355 cp->cpu_dispthread = tp;
356 cp->cpu_dispatch_pri = DISP_PRIO(tp);
368 cp->cpu_base_spl = ipltospl(LOCK_LEVEL);
375 cp->cpu_idle_thread = tp;
378 tp->t_bound_cpu = cp;
380 tp->t_cpu = cp;
381 tp->t_disp_queue = cp->cpu_disp;
386 pg_cpu_bootstrap(cp);
391 kcpc_hw_init(cp);
397 setup_vaddr_for_ppcopy(cp);
403 ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE);
405 cp->cpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP);
406 bcopy(CPU->cpu_gdt, cp->cpu_gdt, (sizeof (*cp->cpu_gdt) * NGDT));
412 set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
426 cp->cpu_idt = kmem_zalloc(PAGESIZE, KM_SLEEP);
427 bcopy(CPU->cpu_idt, cp->cpu_idt, PAGESIZE);
429 cp->cpu_idt = CPU->cpu_idt;
435 cpuid_alloc_space(cp);
439 cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp);
440 cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
443 cp->cpu_m.mcpu_idle_cpu = cpu_idle;
445 init_cpu_info(cp);
448 init_cpu_id_gdt(cp);
454 ucode_alloc_space(cp);
455 xc_init_cpu(cp);
456 hat_cpu_online(cp);
473 cpu_intr_alloc(cp, NINTR_THREADS);
475 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
476 cpu_set_state(cp);
482 cpu_add_unit(cp);
484 return (cp);
491 mp_cpu_unconfigure_common(struct cpu *cp, int error)
498 cpu_del_unit(cp->cpu_id);
511 cp->cpu_flags = 0;
520 cpu_destroy_bound_threads(cp);
521 cp->cpu_idle_thread = NULL;
527 cp->cpu_intr_stack - (INTR_STACK_SIZE - SA(MINFRAME)));
528 cp->cpu_intr_stack = NULL;
535 trap_trace_ctl_t *ttc = &trap_trace_ctl[cp->cpu_id];
542 hat_cpu_offline(cp);
544 ucode_free_space(cp);
547 if (cp->cpu_idstr) {
548 kmem_free(cp->cpu_idstr, CPU_IDSTRLEN);
549 cp->cpu_idstr = NULL;
551 if (cp->cpu_brandstr) {
552 kmem_free(cp->cpu_brandstr, CPU_IDSTRLEN);
553 cp->cpu_brandstr = NULL;
557 if (cp->cpu_m.mcpu_mwait != NULL) {
558 cpuid_mwait_free(cp);
559 cp->cpu_m.mcpu_mwait = NULL;
562 cpuid_free_space(cp);
564 if (cp->cpu_idt != CPU->cpu_idt)
565 kmem_free(cp->cpu_idt, PAGESIZE);
566 cp->cpu_idt = NULL;
568 kmem_free(cp->cpu_gdt, PAGESIZE);
569 cp->cpu_gdt = NULL;
571 if (cp->cpu_supp_freqs != NULL) {
572 size_t len = strlen(cp->cpu_supp_freqs) + 1;
573 kmem_free(cp->cpu_supp_freqs, len);
574 cp->cpu_supp_freqs = NULL;
577 teardown_vaddr_for_ppcopy(cp);
579 kcpc_hw_fini(cp);
581 cp->cpu_dispthread = NULL;
582 cp->cpu_thread = NULL; /* discarded by cpu_destroy_bound_threads() */
584 cpu_vm_data_destroy(cp);
586 xc_fini_cpu(cp);
587 disp_cpu_fini(cp);
589 ASSERT(cp != CPU0);
590 bzero(cp, sizeof (*cp));
591 cp->cpu_next_free = cpu_free_list;
592 cpu_free_list = cp;
679 workaround_warning(cpu_t *cp, uint_t erratum)
682 cp->cpu_id, erratum);
697 msr_warning(cpu_t *cp, const char *rw, uint_t msr, int error)
700 cp->cpu_id, rw, msr, error);
1349 mp_start_cpu_common(cpu_t *cp, boolean_t boot)
1362 ASSERT(cp != NULL);
1363 cpuid = cp->cpu_id;
1364 ctx = mach_cpucontext_alloc(cp);
1367 "cpu%d: failed to allocate context", cp->cpu_id);
1370 error = mach_cpu_start(cp, ctx);
1373 "cpu%d: failed to start, error %d", cp->cpu_id, error);
1374 mach_cpucontext_free(cp, ctx, error);
1393 mach_cpucontext_free(cp, ctx, error);
1405 mach_cpucontext_free(cp, ctx, 0);
1430 cpupm_init(cp);
1432 (void) pg_cpu_init(cp, B_FALSE);
1433 cpu_set_state(cp);
1449 cpu_t *cp;
1471 cp = mp_cpu_configure_common(who, B_TRUE);
1472 ASSERT(cp != NULL);
1477 error = mp_start_cpu_common(cp, B_TRUE);
1479 mp_cpu_unconfigure_common(cp, error);
1596 cpu_t *cp;
1602 cp = cpu_get(cpuid);
1603 if (cp != NULL) {
1620 cp = mp_cpu_configure_common(cpuid, B_FALSE);
1621 ASSERT(cp != NULL && cpu_get(cpuid) == cp);
1623 return (cp != NULL ? 0 : EAGAIN);
1629 cpu_t *cp;
1637 cp = cpu_get(cpuid);
1638 if (cp == NULL) {
1641 mp_cpu_unconfigure_common(cp, 0);
1657 cpu_t *cp = CPU;
1670 cpuid_pass1(cp, new_x86_featureset);
1681 mp_startup_signal(&procset_slave, cp->cpu_id);
1708 (void) wrmsr(MSR_AMD_TSCAUX, cp->cpu_id);
1713 init_cpu_syscall(cp);
1737 cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id);
1739 cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
1756 if (workaround_errata(cp) != 0)
1757 panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
1769 cp->cpu_flags &= ~(CPU_POWEROFF | CPU_QUIESCED);
1775 xsave_setup_msr(cp);
1778 cpuid_pass2(cp);
1779 cpuid_pass3(cp);
1780 cpuid_pass4(cp, NULL);
1786 (void) cpuid_getidstr(cp, cp->cpu_idstr, CPU_IDSTRLEN);
1787 (void) cpuid_getbrandstr(cp, cp->cpu_brandstr, CPU_IDSTRLEN);
1789 cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS;
1793 cpu_event_init_cpu(cp);
1806 ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL));
1809 pghw_physid_create(cp);
1815 mp_startup_signal(&procset_slave, cp->cpu_id);
1816 mp_startup_wait(&procset_master, cp->cpu_id);
1817 pg_cmt_cpu_startup(cp);
1821 cp->cpu_flags &= ~CPU_OFFLINE;
1822 cpu_enable_intr(cp);
1823 cpu_add_active(cp);
1833 ucode_check(cp);
1850 cp->cpu_m.mcpu_cmi_hdl = hdl;
1858 (void) mach_cpu_create_device_node(cp, NULL);
1865 CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id);
1867 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr);
1868 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr);
1870 cp->cpu_id);
1903 mp_cpu_start(struct cpu *cp)
1913 mp_cpu_stop(struct cpu *cp)
1922 if (cp->cpu_id == 0)
1931 if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0))
1941 cpu_disable_intr(struct cpu *cp)
1943 if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS)
1946 cp->cpu_flags &= ~CPU_ENABLE;
1954 cpu_enable_intr(struct cpu *cp)
1957 cp->cpu_flags |= CPU_ENABLE;
1958 psm_enable_intr(cp->cpu_id);
1962 mp_cpu_faulted_enter(struct cpu *cp)
1965 _NOTE(ARGUNUSED(cp));
1967 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
1972 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
1973 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
1983 mp_cpu_faulted_exit(struct cpu *cp)
1986 _NOTE(ARGUNUSED(cp));
1988 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
1993 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
1994 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));