/linux/arch/arm64/kvm/ |
H A D | vmid.c | 63 * Unlike ASID allocator, we expect less frequent rollover in in flush_context() 67 * invalidation over the inner shareable domain on rollover. in flush_context() 151 * reserving the VMID space needlessly on rollover. in kvm_arm_vmid_update() 153 * handle the sync with a concurrent rollover. in kvm_arm_vmid_update() 180 * Expect allocation after rollover to fail if we don't have in kvm_arm_vmid_alloc_init()
|
/linux/arch/arm64/mm/ |
H A D | context.c | 116 * rollover, but hasn't run another task in in flush_context() 168 * If our current ASID was active during a rollover, we in new_context() 230 * cmpxchg. Racing with a concurrent rollover means that either: in check_and_switch_context() 233 * lock. Taking the lock synchronises with the rollover and so in check_and_switch_context() 295 * We went through one or more rollover since that ASID was in arm64_mm_context_get() 384 * Expect allocation after rollover to fail if we don't have at least in asids_update_limit() 392 * There must always be an ASID available after rollover. Ensure that, in asids_update_limit()
|
/linux/drivers/input/misc/ |
H A D | rotary_encoder.c | 41 bool rollover; member 84 if (encoder->rollover) in rotary_encoder_report_event() 90 if (encoder->rollover || pos < encoder->steps) in rotary_encoder_report_event() 94 if (encoder->rollover) in rotary_encoder_report_event() 210 encoder->rollover = in rotary_encoder_probe() 211 device_property_read_bool(dev, "rotary-encoder,rollover"); in rotary_encoder_probe()
|
/linux/arch/mips/mm/ |
H A D | context.c | 80 * rollover, but hasn't run another task in in flush_context() 137 * If our current MMID was active during a rollover, we in get_new_mmid() 197 * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover in check_switch_mmu_context() 202 * with the rollover and so we are forced to see the updated in check_switch_mmu_context() 273 * Expect allocation after rollover to fail if we don't have at least in mmid_init()
|
/linux/arch/arc/include/asm/ |
H A D | mmu_context.h | 36 * A new allocation cycle, post rollover, could potentially reassign an ASID 40 * automagically increments the generation when lower 8 bits rollover. 79 /* move to new ASID and handle rollover */ in get_new_mmu_context() 85 * Above check for rollover of 8 bit ASID in 32 bit container. in get_new_mmu_context()
|
/linux/tools/testing/selftests/net/tcp_ao/ |
H A D | seq-ext.c | 127 synchronize_threads(); /* 5: verify the connection during SEQ-number rollover */ in server_fn() 138 synchronize_threads(); /* 6: verify counters after SEQ-number rollover */ in server_fn() 218 synchronize_threads(); /* 5: verify the connection during SEQ-number rollover */ in client_fn() 224 synchronize_threads(); /* 5: verify counters after SEQ-number rollover */ in client_fn()
|
/linux/arch/riscv/mm/ |
H A D | context.c | 75 * If this CPU has already been through a rollover, but in __flush_context() 107 * If our current CONTEXT was active during a rollover, we in __new_context() 157 * Following is how we handle racing with a concurrent rollover: in set_mm_asid() 160 * lock. Taking the lock synchronises with the rollover and so in set_mm_asid()
|
/linux/arch/csky/mm/ |
H A D | asid.c | 36 * rollover, but hasn't run another task in in flush_context() 90 * If our current ASID was active during a rollover, we in new_context() 176 * Expect allocation after rollover to fail if we don't have at least in asid_allocator_init()
|
/linux/arch/csky/include/asm/ |
H A D | asid.h | 54 * cmpxchg. Racing with a concurrent rollover means that either: in asid_check_context() 57 * lock. Taking the lock synchronises with the rollover and so in asid_check_context()
|
/linux/Documentation/devicetree/bindings/input/ |
H A D | rotary-encoder.yaml | 43 rotary-encoder,rollover: 46 Automatic rollover when the rotary value becomes
|
/linux/drivers/rtc/ |
H A D | interface.c | 302 /* For simplicity, only support date rollover for now */ in __rtc_read_alarm() 325 /* with luck, no rollover is needed */ in __rtc_read_alarm() 332 /* 24 hour rollover ... if it's now 10am Monday, an alarm that in __rtc_read_alarm() 338 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); in __rtc_read_alarm() 343 /* Month rollover ... if it's the 31th, an alarm on the 3rd will in __rtc_read_alarm() 349 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); in __rtc_read_alarm() 362 /* Year rollover ... easy except for leap years! */ in __rtc_read_alarm() 364 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); in __rtc_read_alarm() 372 dev_warn(&rtc->dev, "alarm rollover not handled\n"); in __rtc_read_alarm()
|
/linux/arch/arm64/include/asm/ |
H A D | mmu.h | 41 * rollover event (see new_context() and flush_context()). In this case, 53 * | <rollover>
|
/linux/arch/arm/mm/ |
H A D | context.c | 81 * any issues across a rollover). 147 * rollover, but hasn't run another task in in flush_context() 199 * If our current ASID was active during a rollover, we in new_context()
|
/linux/drivers/net/ethernet/ti/ |
H A D | cpts.h | 82 CPTS_EV_ROLL, /* Time Stamp Rollover Event */ 83 CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
|
/linux/net/packet/ |
H A D | af_packet.c | 1338 u32 *history = po->rollover->history; in __packet_rcv_has_room() 1404 i = j = min_t(int, po->rollover->sock, num - 1); in fanout_flow_is_huge() 1411 po->rollover->sock = i; in fanout_demux_hash() 1412 atomic_long_inc(&po->rollover->num); in fanout_demux_hash() 1414 atomic_long_inc(&po->rollover->num_huge); in fanout_demux_hash() 1422 atomic_long_inc(&po->rollover->num_failed); in fanout_demux_lb() 1678 struct packet_rollover *rollover = NULL; in fanout_set_data() 1713 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); in fanout_find_new_id() 1714 if (!rollover) in fanout_find_new_id() 1732 struct packet_rollover *rollover = NULL; fanout_add() local [all...] |
H A D | internal.h | 118 struct packet_rollover *rollover; 123 struct packet_rollover *rollover; global() member
|
/linux/Documentation/hwmon/ |
H A D | fam15h_power.rst | 113 if (Jy < Jx) // Rollover has occurred
|
/linux/sound/isa/gus/ |
H A D | gus_pcm.c | 108 /* enable RAMP IRQ + rollover */ in snd_gf1_pcm_trigger_up() 112 ramp_ctrl &= ~0x04; /* disable rollover */ in snd_gf1_pcm_trigger_up() 199 ramp_ctrl |= 0x04; /* enable rollover */ in snd_gf1_pcm_interrupt_wave() 250 /* stop ramp, but leave rollover bit untouched */ in snd_gf1_pcm_interrupt_volume()
|
/linux/samples/pfsm/ |
H A D | pfsm-wakeup.c | 62 /* Set RTC alarm to ALARM_DELTA_SEC sec in the future, and check for rollover */ in main()
|
/linux/drivers/usb/dwc2/ |
H A D | hcd.h | 603 * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the 616 * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the 626 * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
|
/linux/drivers/net/ethernet/sun/ |
H A D | cassini.h | 1259 #define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal 1261 #define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive 1263 #define MAC_TX_COLL_LATE 0x0020 /* rollover of the late 1265 #define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first 1267 #define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer 1269 #define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak 1277 #define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame 1279 #define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment 1281 #define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error 1283 #define MAC_RX_LEN_ERR 0x0020 /* rollover of length [all …]
|
/linux/Documentation/firmware-guide/acpi/apei/ |
H A D | output_format.rst | 120 Bad TLP | Bad DLLP | RELAY_NUM Rollover | unknown | unknown | unknown | \
|
/linux/drivers/iio/temperature/ |
H A D | max30208.c | 128 * Sets the rollover bit to '1' to enable overwriting FIFO during overflow.
|
/linux/drivers/gpu/drm/i915/display/ |
H A D | intel_tv_regs.h | 356 /* Sets the rollover for the second subcarrier phase generation DDA */ 364 /* Sets the rollover for the third subcarrier phase generation DDA */
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-bus-pci-devices-aer | 26 RELAY_NUM Rollover 0
|