| /linux/arch/csky/ | 
| H A D | Kconfig | 14 	select ARCH_INLINE_READ_LOCK if !PREEMPTION15 	select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
 16 	select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
 17 	select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
 18 	select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
 19 	select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
 20 	select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
 21 	select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
 22 	select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
 23 	select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
 [all …]
 
 | 
| /linux/Documentation/locking/ | 
| H A D | preempt-locking.rst | 35 protect these situations by disabling preemption around them.37 You can also use put_cpu() and get_cpu(), which will disable preemption.
 44 Under preemption, the state of the CPU must be protected.  This is arch-
 47 section that must occur while preemption is disabled.  Think what would happen
 50 upon preemption, the FPU registers will be sold to the lowest bidder.  Thus,
 51 preemption must be disabled around such regions.
 54 kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
 72 Data protection under preemption is achieved by disabling preemption for the
 84 n-times in a code path, and preemption will not be reenabled until the n-th
 86 preemption is not enabled.
 [all …]
 
 | 
| /linux/kernel/ | 
| H A D | Kconfig.preempt | 11 	select PREEMPTION18 	prompt "Preemption Model"
 22 	bool "No Forced Preemption (Server)"
 26 	  This is the traditional Linux preemption model, geared towards
 37 	bool "Voluntary Kernel Preemption (Desktop)"
 43 	  "explicit preemption points" to the kernel code. These new
 44 	  preemption points have been selected to reduce the maximum
 66 	  otherwise not be about to reach a natural preemption point.
 76 	bool "Scheduler controlled preemption model"
 81 	  This option provides a scheduler driven preemption model that
 [all …]
 
 | 
| H A D | Kconfig.locks | 104 #   - DEBUG_SPINLOCK=n and PREEMPTION=n142 	depends on !PREEMPTION || ARCH_INLINE_SPIN_UNLOCK_IRQ
 171 	depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK
 179 	depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK_IRQ
 208 	depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK
 216 	depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK_IRQ
 
 | 
| /linux/drivers/gpu/drm/msm/adreno/ | 
| H A D | a5xx_gpu.h | 58  * In order to do lockless preemption we use a simple state machine to progress61  * PREEMPT_NONE - no preemption in progress.  Next state START.
 62  * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
 66  * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
 68  * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
 70  * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
 85  * CPU to store the state for preemption. The record itself is much larger
 88  * There is a preemption record assigned per ringbuffer. When the CPU triggers a
 89  * preemption, it fills out the record with the useful information (wptr, ring
 91  * the preemption.  When a ring is switched out, the CP will save the ringbuffer
 [all …]
 
 | 
| H A D | a5xx_preempt.c | 9  * Try to transition the preemption state from old to new. Return22  * Force the preemption state to the specified state.  This is used in cases
 30 	 * preemption or in the interrupt handler so barriers are needed  in set_preempt_state()
 90 	DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);  in a5xx_preempt_timer()
 94 /* Try to trigger a preemption switch */
 106 	 * Serialize preemption start to ensure that we always make  in a5xx_preempt_trigger()
 113 	 * Try to start preemption by moving from NONE to START. If  in a5xx_preempt_trigger()
 114 	 * unsuccessful, a preemption is already in flight  in a5xx_preempt_trigger()
 128 		 * Its possible that while a preemption request is in progress  in a5xx_preempt_trigger()
 152 	/* Set the address of the incoming preemption record */  in a5xx_preempt_trigger()
 [all …]
 
 | 
| /linux/arch/loongarch/ | 
| H A D | Kconfig | 34 	select ARCH_INLINE_READ_LOCK if !PREEMPTION35 	select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
 36 	select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
 37 	select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
 38 	select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
 39 	select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
 40 	select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
 41 	select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
 42 	select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
 43 	select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
 [all …]
 
 | 
| /linux/Documentation/trace/rv/ | 
| H A D | monitor_sched.rst | 88 The schedule called with preemption disabled (scpd) monitor ensures schedule is89 called with preemption disabled::
 110 does not enable preemption::
 180 The need resched preempts (nrp) monitor ensures preemption requires
 181 ``need_resched``. Only kernel preemption is considered, since preemption
 184 A kernel preemption is whenever ``__schedule`` is called with the preemption
 186 type of preemption occurs after the need for ``rescheduling`` has been set.
 188 userspace preemption.
 190 case, a task goes through the scheduler from a preemption context but it is
 195 In theory, a preemption can only occur after the ``need_resched`` flag is set. In
 [all …]
 
 | 
| H A D | monitor_wip.rst | 13 preemption disabled::30 The wakeup event always takes place with preemption disabled because
 
 | 
| /linux/include/linux/ | 
| H A D | preempt.h | 7  * preempt_count (used for kernel preemption, interrupt count, etc.)15  * We put the hardirq and softirq counter into the preemption
 18  * - bits 0-7 are the preemption count (max preemption depth: 256)
 60  * Disable preemption until the scheduler is running -- use an unconditional
 160 /* Locks on RT do not disable preemption */
 169  * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
 281  * Even if we don't have any preemption, we need preempt disable/enable
 301  * Modules have no business playing preemption tricks.
 345  * preempt_notifier - key for installing preemption notifiers
 429  * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
 [all …]
 
 | 
| /linux/Documentation/core-api/ | 
| H A D | local_ops.rst | 42 making sure that we modify it from within a preemption safe context. It is70 * Preemption (or interrupts) must be disabled when using local ops in
 76   preemption already disabled. I suggest, however, to explicitly
 77   disable preemption anyway to make sure it will still work correctly on
 104 local atomic operations: it makes sure that preemption is disabled around write
 110 If you are already in a preemption-safe context, you can use
 161              * preemptible context (it disables preemption) :
 
 | 
| /linux/Documentation/RCU/ | 
| H A D | NMI-RCU.rst | 45 The do_nmi() function processes each NMI.  It first disables preemption50 preemption is restored.
 95 CPUs complete any preemption-disabled segments of code that they were
 97 Since NMI handlers disable preemption, synchronize_rcu() is guaranteed
 
 | 
| /linux/drivers/gpu/drm/xe/ | 
| H A D | xe_exec_queue_types.h | 117 		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */127 		/** @lr.pfence: preemption fence */
 129 		/** @lr.context: preemption fence context */
 131 		/** @lr.seqno: preemption fence seqno */
 197 	/** @set_preempt_timeout: Set preemption timeout for exec queue */
 
 | 
| H A D | xe_pcode.c | 169  * @timeout_base_ms: timeout for polling with preemption enabled174  * applying @reply_mask. Polling is first attempted with preemption enabled
 176  * preemption disabled.
 200 	 * the poll with preemption disabled to maximize the number of  in xe_pcode_request()
 207 		"PCODE timeout, retrying with preemption disabled\n");  in xe_pcode_request()
 
 | 
| H A D | xe_hw_engine_types.h | 94 		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */96 		/** @sched_props.preempt_timeout_min: min preemption timeout in micro-seconds */
 98 		/** @sched_props.preempt_timeout_max: max preemption timeout in micro-seconds */
 
 | 
| /linux/fs/ | 
| H A D | stack.c | 18 	 * preemption (see include/linux/fs.h): we need nothing extra for  in fsstack_copy_inode_size()26 	 * i_blocks in sync despite SMP or PREEMPTION - though stat's  in fsstack_copy_inode_size()
 48 	 * i_blocks in sync despite SMP or PREEMPTION: use i_lock for that case  in fsstack_copy_inode_size()
 
 | 
| /linux/arch/arm64/ | 
| H A D | Kconfig | 63 	select ARCH_INLINE_READ_LOCK if !PREEMPTION64 	select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
 65 	select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
 66 	select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
 67 	select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
 68 	select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
 69 	select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
 70 	select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
 71 	select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
 72 	select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
 [all …]
 
 | 
| /linux/kernel/trace/rv/monitors/opid/ | 
| H A D | Kconfig | 13 	  interrupts and preemption disabled or during IRQs, where preemption
 | 
| /linux/arch/arm64/include/asm/ | 
| H A D | percpu.h | 26 	 * Non-VHE hyp code runs with preemption disabled. No need to hazard  in __hyp_my_cpu_offset()140  * re-enabling preemption for preemptible kernels, but doing that in a way
 144  * Not to mention it'll break the actual preemption model for missing a
 145  * preemption point when TIF_NEED_RESCHED gets set while preemption is
 
 | 
| /linux/Documentation/mm/ | 
| H A D | highmem.rst | 66   CPU while the mapping is active. Although preemption is never disabled by73   As said, pagefaults and preemption are never disabled. There is no need to
 74   disable preemption because, when context switches to a different task, the
 110   effects of atomic mappings, i.e. disabling page faults or preemption, or both.
 141   restrictions on preemption or migration. It comes with an overhead as mapping
 
 | 
| /linux/kernel/trace/ | 
| H A D | trace_syscalls.c | 309 	 * Syscall probe called with preemption enabled, but the ring  in ftrace_syscall_enter()310 	 * buffer and per-cpu data require preemption to be disabled.  in ftrace_syscall_enter()
 354 	 * Syscall probe called with preemption enabled, but the ring  in ftrace_syscall_exit()
 355 	 * buffer and per-cpu data require preemption to be disabled.  in ftrace_syscall_exit()
 606 	 * Syscall probe called with preemption enabled, but the ring  in perf_syscall_enter()
 607 	 * buffer and per-cpu data require preemption to be disabled.  in perf_syscall_enter()
 715 	 * Syscall probe called with preemption enabled, but the ring  in perf_syscall_exit()
 716 	 * buffer and per-cpu data require preemption to be disabled.  in perf_syscall_exit()
 
 | 
| /linux/Documentation/virt/kvm/devices/ | 
| H A D | arm-vgic.rst | 99     maximum possible 128 preemption levels.  The semantics of the register100     indicate if any interrupts in a given preemption level are in the active
 103     Thus, preemption level X has one or more active interrupts if and only if:
 107     Bits for undefined preemption levels are RAZ/WI.
 
 | 
| /linux/arch/s390/include/asm/ | 
| H A D | kmsan.h | 43 	 * KMSAN. Therefore, disable preemption here, and re-enable preemption  in kmsan_virt_addr_valid()
 | 
| /linux/arch/riscv/include/asm/ | 
| H A D | simd.h | 30 	 * RISCV_KERNEL_MODE_V is only set while preemption is disabled,  in may_use_simd()31 	 * and is clear whenever preemption is enabled.  in may_use_simd()
 
 | 
| /linux/drivers/gpu/drm/amd/include/ivsrcid/vpe/ | 
| H A D | irqsrcs_vpe_6_1.h | 30 #define VPE_6_1_SRCID__VPE_PREEMPT                      4               // 0x4 Preemption38 #define VPE_6_1_SRCID__VPE_IB_PREEMPT                   12              // 0xC IB preemption
 
 |