Lines Matching defs:cpu

35 #include <sys/cpu.h>
68 #include <sys/fm/cpu/UltraSPARC-III.h>
127 int cpu_ecache_set_size(struct cpu *cp);
140 static void cpu_uninit_ecache_scrub_dr(struct cpu *cp);
159 static void cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr,
570 * Declare that this architecture/cpu combination does fpRAS.
613 int cpuid; /* fallen cpu */
614 int buddy; /* cpu that ran recovery */
645 int proc_cpu; /* null cpu */
669 * Attempt to recover a cpu by claiming every cache line as saved
670 * in the TSB that the non-responsive cpu is using. Since we can't
706 if ((cp = cpu[cpuid]) == NULL) {
883 * non-responsive cpu might be using. This might kick that cpu out of
998 cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
1008 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
1016 * Create a low level cyclic to send a xtrap to the next cpu online.
1044 * Disable kernel preemption to protect the cpu list
1175 * Tunable to disable the checking of other cpu logout areas during panic for
1256 struct cpu *cp;
1261 cp = cpu[i];
1336 * cpu logout structure pointer.
1350 * If no cpu logout data, then we will have to make due without
2756 * cpu later.
2778 * If we've come from a cpu CE trap then this info already exists
2779 * in the cpu logout area.
2788 * no line data capture. In this case we logout to the cpu logout
2789 * area - that's appropriate since it's the cpu cache data we need
2790 * for classification. We thus borrow the cpu logout area for a
2794 * If called from the partner check xcall handler then this cpu
2826 * another cpu.
2972 * Must be called with kernel preemption disabled (to stop the cpu list
2973 * from changing). The detecting cpu we are partnering has cpuid
2974 * aflt->flt_inst; we might not be running on the detecting cpu.
2976 * Restrict choice to active cpus in the same cpu partition as ourselves in
2989 * We keep a cache of the last partner selected for a cpu, and we'll try to
2993 * requiring frequent cpu list traversals.
2997 #define PTNR_SELFOK 0x2 /* Allow selection of cpu to "partner" itself */
3007 dtcr = cpu[aflt->flt_inst];
3012 * . there is just one cpu present
3017 * . there is just one cpu left online in the cpu partition
3021 * again next time; this is the case where the only other cpu online
3047 * Start the scan at the next online cpu in the same cpu
3054 * . is still a valid cpu,
3060 sp = cpu[CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id)];
3082 * valid cpu then start the scan at the next cpu in the
3084 * is no longer a valid cpu then go with our default. In
3088 sp = cpu[CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id)];
3100 * cpu is offline then its cpu_next_part will point to itself
3135 * If locptnr is not NULL it is a cpu in the same lgroup as the
3166 * a cpu that experienced a possibly sticky or possibly persistnet CE.
3179 * cpu has been DR'd out then ereport detector info will not be able to
3215 * on the detecting cpu, and that the async_flt structure will not persist on
3228 * panic context. We can assume that the cpu we are running on is online.
3311 kpreempt_disable(); /* stop cpu list changing */
3768 * Turn off all cpu error detection, normally only used for panics.
3819 cpu_ecache_set_size(struct cpu *cp)
4486 * Initialize cpu scheme for specified cpu.
4712 * On-board processor array error, add cpu resource.
5365 * odd case where the cpu isn't cooperating we'll keep trying. A cpu
5475 * in cpu_async_panic_callb, each cpu checks for CPU events on its way to
5490 cpu_uninit_private(struct cpu *cp)
5541 * With all other cpu types, E$ scrubbing is on by default
5680 cpu_scrub_cyclic_setup(void *arg, cpu_t *cpu, cyc_handler_t *hdlr,
5777 * Indicate that the specified cpu is idle.
5780 cpu_idle_ecache_scrub(struct cpu *cp)
5789 * Indicate that the specified cpu is busy.
5792 cpu_busy_ecache_scrub(struct cpu *cp)
5801 * Initialization for cache scrubbing for the specified cpu.
5804 cpu_init_ecache_scrub_dr(struct cpu *cp)
5817 * ?cache_scrub_enable and this per-cpu enable variable. All scrubbers
5831 * Un-initialization for cache scrubbing for the specified cpu.
5834 cpu_uninit_ecache_scrub_dr(struct cpu *cp)
6109 * verify that the cpu is still around, DR
6194 * - binding to a cpu, eg with thread_affinity_set(). This is used
6289 * Attempt a cpu logout for an error that we did not trap for, such
6291 * on the cpu that took the error and that we cannot migrate. Returns
6687 * Create the scheme "cpu" FMRI.
6808 * anything in this cpu module. The SERD algorithm is handled by
7087 * Called when a cpu enters the CPU_FAULTED state (by the cpu placing the
7088 * faulted cpu into that state). Cross-trap to the faulted cpu to clear
7090 * on that cpu. We could cross-call instead, but that has a larger
7091 * instruction and data footprint than cross-trapping, and the cpu is known
7096 cpu_faulted_enter(struct cpu *cp)
7102 * Called when a cpu leaves the CPU_FAULTED state to return to one of
7103 * offline, spare, or online (by the cpu requesting this state change).
7109 cpu_faulted_exit(struct cpu *cp)