cpu_switch.S (48a09cf2760da35e089ae3e2d56578b730fc7047) cpu_switch.S (9a3b3e8bce8e8c8bbec663229e16bebb3cfc5d53)
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 19 unchanged lines hidden (view full) ---

28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 19 unchanged lines hidden (view full) ---

28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $Id: swtch.s,v 1.58 1997/08/04 17:31:43 fsmp Exp $
36 * $Id: swtch.s,v 1.59 1997/08/09 00:02:47 dyson Exp $
37 */
38
39#include "npx.h"
40#include "opt_user_ldt.h"
41
42#include <sys/rtprio.h>
43
44#include <machine/asmacros.h>

--- 190 unchanged lines hidden (view full) ---

235
236rem3: .asciz "remrq"
237rem3rt: .asciz "remrq.rt"
238rem3id: .asciz "remrq.id"
239
240/*
241 * When no processes are on the runq, cpu_switch() branches to _idle
242 * to wait for something to come ready.
37 */
38
39#include "npx.h"
40#include "opt_user_ldt.h"
41
42#include <sys/rtprio.h>
43
44#include <machine/asmacros.h>

--- 190 unchanged lines hidden (view full) ---

235
236rem3: .asciz "remrq"
237rem3rt: .asciz "remrq.rt"
238rem3id: .asciz "remrq.id"
239
240/*
241 * When no processes are on the runq, cpu_switch() branches to _idle
242 * to wait for something to come ready.
243 *
244 * NOTE: on an SMP system this routine is a startup-only code path.
245 * once initialization is over, meaning the idle procs have been
246 * created, we should NEVER branch here.
247 */
248 ALIGN_TEXT
249_idle:
243 */
244 ALIGN_TEXT
245_idle:
250#if defined(SMP) && defined(DIAGNOSTIC)
251 cmpl $0, _smp_active
252 jnz badsw3
253#endif /* SMP && DIAGNOSTIC */
246#ifdef SMP
247 /* when called, we have the mplock, intr disabled */
248
254 xorl %ebp,%ebp
249 xorl %ebp,%ebp
250
251 /* use our idleproc's "context" */
252 movl _my_idlePTD,%ecx
253 movl %ecx,%cr3
254 movl $_idlestack_top,%ecx
255 movl %ecx,%esp
256
257 /* update common_tss.tss_esp0 pointer */
258 movl $_common_tss, %eax
259 movl %ecx, TSS_ESP0(%eax)
260
261 sti
262
263 /*
264 * XXX callers of cpu_switch() do a bogus splclock(). Locking should
265 * be left to cpu_switch().
266 */
267 call _spl0
268
269 cli
270
271 /*
272 * _REALLY_ free the lock, no matter how deep the prior nesting.
273 * We will recover the nesting on the way out when we have a new
274 * proc to load.
275 *
276 * XXX: we had damn well better be sure we had it before doing this!
277 */
278 movl $FREE_LOCK, %eax
279 movl %eax, _mp_lock
280
281 /* do NOT have lock, intrs disabled */
282 .globl idle_loop
283idle_loop:
284
285 movl %cr3,%eax /* ouch! */
286 movl %eax,%cr3
287
288 cmpl $0,_smp_active
289 jne 1f
290 cmpl $0,_cpuid
291 je 1f
292 jmp 2f
293
2941: cmpl $0,_whichrtqs /* real-time queue */
295 jne 3f
296 cmpl $0,_whichqs /* normal queue */
297 jne 3f
298 cmpl $0,_whichidqs /* 'idle' queue */
299 jne 3f
300
301 cmpl $0,_do_page_zero_idle
302 je 2f
303 /* XXX appears to cause panics */
304 /*
305 * Inside zero_idle we enable interrupts and grab the mplock
306 * as needed. It needs to be careful about entry/exit mutexes.
307 */
308 call _vm_page_zero_idle /* internal locking */
309 testl %eax, %eax
310 jnz idle_loop
3112:
312
313 /* enable intrs for a halt */
314 sti
315 call *_hlt_vector /* wait for interrupt */
316 cli
317 jmp idle_loop
318
3193:
320 call _get_mplock
321 cmpl $0,_whichrtqs /* real-time queue */
322 CROSSJUMP(jne, sw1a, je)
323 cmpl $0,_whichqs /* normal queue */
324 CROSSJUMP(jne, nortqr, je)
325 cmpl $0,_whichidqs /* 'idle' queue */
326 CROSSJUMP(jne, idqr, je)
327 call _rel_mplock
328 jmp idle_loop
329
330#else
331 xorl %ebp,%ebp
255 movl $HIDENAME(tmpstk),%esp
256 movl _IdlePTD,%ecx
257 movl %ecx,%cr3
258
259 /* update common_tss.tss_esp0 pointer */
260#ifdef VM86
261 movl $GPROC0_SEL, %esi
262#endif /* VM86 */

--- 34 unchanged lines hidden (view full) ---

297 cmpl $0,_whichidqs /* 'idle' queue */
298 CROSSJUMP(jne, idqr, je)
299 call _vm_page_zero_idle
300 testl %eax, %eax
301 jnz idle_loop
302 sti
303 call *_hlt_vector /* wait for interrupt */
304 jmp idle_loop
332 movl $HIDENAME(tmpstk),%esp
333 movl _IdlePTD,%ecx
334 movl %ecx,%cr3
335
336 /* update common_tss.tss_esp0 pointer */
337#ifdef VM86
338 movl $GPROC0_SEL, %esi
339#endif /* VM86 */

--- 34 unchanged lines hidden (view full) ---

374 cmpl $0,_whichidqs /* 'idle' queue */
375 CROSSJUMP(jne, idqr, je)
376 call _vm_page_zero_idle
377 testl %eax, %eax
378 jnz idle_loop
379 sti
380 call *_hlt_vector /* wait for interrupt */
381 jmp idle_loop
382#endif
305
306CROSSJUMPTARGET(_idle)
307
308ENTRY(default_halt)
309 hlt
310 ret
311
312/*

--- 49 unchanged lines hidden (view full) ---

3621:
363#endif /* NNPX > 0 */
364
365 movl $0,_curproc /* out of process */
366
367 /* save is done, now choose a new process or idle */
368sw1:
369 cli
383
384CROSSJUMPTARGET(_idle)
385
386ENTRY(default_halt)
387 hlt
388 ret
389
390/*

--- 49 unchanged lines hidden (view full) ---

4401:
441#endif /* NNPX > 0 */
442
443 movl $0,_curproc /* out of process */
444
445 /* save is done, now choose a new process or idle */
446sw1:
447 cli
448
449#ifdef SMP
450 /* Stop scheduling if smp_active goes zero and we are not BSP */
451 cmpl $0,_smp_active
452 jne 1f
453 cmpl $0,_cpuid
454 je 1f
455 CROSSJUMP(je, _idle, jne) /* wind down */
4561:
457#endif
458
370sw1a:
371 movl _whichrtqs,%edi /* pick next p. from rtqs */
372 testl %edi,%edi
373 jz nortqr /* no realtime procs */
374
375 /* XXX - bsf is sloow */
376 bsfl %edi,%ebx /* find a full q */
377 jz nortqr /* no proc on rt q - try normal ... */

--- 211 unchanged lines hidden (view full) ---

589badsw2:
590 pushl $sw0_2
591 call _panic
592
593sw0_2: .asciz "cpu_switch: not SRUN"
594#endif
595
596#if defined(SMP) && defined(DIAGNOSTIC)
459sw1a:
460 movl _whichrtqs,%edi /* pick next p. from rtqs */
461 testl %edi,%edi
462 jz nortqr /* no realtime procs */
463
464 /* XXX - bsf is sloow */
465 bsfl %edi,%ebx /* find a full q */
466 jz nortqr /* no proc on rt q - try normal ... */

--- 211 unchanged lines hidden (view full) ---

678badsw2:
679 pushl $sw0_2
680 call _panic
681
682sw0_2: .asciz "cpu_switch: not SRUN"
683#endif
684
685#if defined(SMP) && defined(DIAGNOSTIC)
597badsw3:
598 pushl $sw0_3
599 call _panic
600
601sw0_3: .asciz "cpu_switch: went idle with smp_active"
602
603badsw4:
604 pushl $sw0_4
605 call _panic
606
607sw0_4: .asciz "cpu_switch: do not have lock"
608#endif /* SMP && DIAGNOSTIC */
609
610/*

--- 56 unchanged lines hidden ---
686badsw4:
687 pushl $sw0_4
688 call _panic
689
690sw0_4: .asciz "cpu_switch: do not have lock"
691#endif /* SMP && DIAGNOSTIC */
692
693/*

--- 56 unchanged lines hidden ---