/linux/arch/x86/lib/ |
H A D | delay.c | 37 static void (*delay_halt_fn)(u64 start, u64 cycles) __ro_after_init; 63 static void delay_tsc(u64 cycles) in delay_tsc() argument 73 if ((now - bclock) >= cycles) in delay_tsc() 91 cycles -= (now - bclock); in delay_tsc() 105 static void delay_halt_tpause(u64 start, u64 cycles) in delay_halt_tpause() argument 107 u64 until = start + cycles; in delay_halt_tpause() 125 static void delay_halt_mwaitx(u64 unused, u64 cycles) in delay_halt_mwaitx() argument 129 delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles); in delay_halt_mwaitx() 151 u64 start, end, cycles = __cycles; in delay_halt() local 157 if (!cycles) in delay_halt() [all …]
|
/linux/tools/perf/dlfilters/ |
H A D | dlfilter-show-cycles.c | 19 static __u64 cycles[MAX_CPU][MAX_ENTRY]; variable 30 __u64 cycles[MAX_ENTRY]; member 77 e->cycles[pos] += cnt; in add_entry() 92 cycles[cpu][pos] += sample->cyc_cnt; in filter_event_early() 98 static void print_vals(__u64 cycles, __u64 delta) in print_vals() argument 101 printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta); in print_vals() 103 printf("%10llu %10s ", (unsigned long long)cycles, ""); in print_vals() 115 print_vals(cycles[cpu][pos], cycles[cpu][pos] - cycles_rpt[cpu][pos]); in filter_event() 116 cycles_rpt[cpu][pos] = cycles[cpu][pos]; in filter_event() 124 print_vals(e->cycles[pos], e->cycles[pos] - e->cycles_rpt[pos]); in filter_event() [all …]
|
/linux/drivers/memory/ |
H A D | jz4780-nemc.c | 162 uint32_t smcr, val, cycles; in jz4780_nemc_configure_bank() local 208 cycles = jz4780_nemc_ns_to_cycles(nemc, val); in jz4780_nemc_configure_bank() 209 if (cycles > nemc->soc_info->tas_tah_cycles_max) { in jz4780_nemc_configure_bank() 211 val, cycles); in jz4780_nemc_configure_bank() 215 smcr |= cycles << NEMC_SMCR_TAS_SHIFT; in jz4780_nemc_configure_bank() 220 cycles = jz4780_nemc_ns_to_cycles(nemc, val); in jz4780_nemc_configure_bank() 221 if (cycles > nemc->soc_info->tas_tah_cycles_max) { in jz4780_nemc_configure_bank() 223 val, cycles); in jz4780_nemc_configure_bank() 227 smcr |= cycles << NEMC_SMCR_TAH_SHIFT; in jz4780_nemc_configure_bank() 232 cycles = jz4780_nemc_ns_to_cycles(nemc, val); in jz4780_nemc_configure_bank() [all …]
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | selftest_gt_pm.c | 56 u32 cycles[5]; in measure_clocks() local 61 cycles[i] = -read_timestamp(engine); in measure_clocks() 66 cycles[i] += read_timestamp(engine); in measure_clocks() 72 sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL); in measure_clocks() 73 *out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4; in measure_clocks() 99 u32 cycles; in live_gt_clocks() local 107 measure_clocks(engine, &cycles, &dt); in live_gt_clocks() 109 time = intel_gt_clock_interval_to_ns(engine->gt, cycles); in live_gt_clocks() 113 engine->name, cycles, time, dt, expected, in live_gt_clocks() 123 if (9 * expected < 8 * cycles || 8 * expected > 9 * cycles) { in live_gt_clocks()
|
/linux/tools/perf/Documentation/ |
H A D | intel-hybrid.txt | 45 For example, count the 'cycles' event on core cpus. 47 perf stat -e cpu_core/cycles/ 56 For hardware events, they have pre-defined configs (e.g. 0 for cycles). 84 perf stat -e cycles -a (use system-wide in this example), two events 115 The kernel creates 'cycles' (0x400000000) on cpu0-cpu15 (core cpus), 116 and create 'cycles' (0x800000000) on cpu16-cpu23 (atom cpus). 122 6,744,979 cpu_core/cycles/ 123 1,965,552 cpu_atom/cycles/ 125 The first 'cycles' is core event, the second 'cycles' is atom event. 133 scaled value for core cycles is 160,444,092 and the percentage is 0.47%. [all …]
|
H A D | perf-daemon.txt | 32 …916507 916508 ... \_ perf record --control=fifo:control,ack -m 10M -e cycles --overwrite --switc… 120 [session-cycles] 121 run = -m 10M -e cycles --overwrite --switch-output -a 136 [603350:cycles] perf record -m 10M -e cycles --overwrite --switch-output -a 149 [603350:cycles] perf record -m 10M -e cycles --overwrite --switch-output -a 150 base: /opt/perfdata/session-cycles 151 output: /opt/perfdata/session-cycles/output 152 control: /opt/perfdata/session-cycles/control 153 ack: /opt/perfdata/session-cycles/ack 173 OK cycles [all …]
|
H A D | perf.data-directory-format.txt | 51 Samples for 'cycles' event do not have CPU attribute set. Skipping 'cpu' field. 55 … perf 15316 2060795.480902: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux) 56 … perf 15316 2060795.480906: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux) 57 … perf 15316 2060795.480908: 7 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux) 58 … perf 15316 2060795.480910: 119 cycles: ffffffffa2caa54a native_write_msr+0xa (vmlinux) 59 …perf 15316 2060795.480912: 2109 cycles: ffffffffa2c9b7b0 native_apic_msr_write+0x0 (vmlinux) 60 …perf 15316 2060795.480914: 37606 cycles: ffffffffa2f121fe perf_event_addr_filters_exec+0x2e … 61 …uname 15316 2060795.480924: 588287 cycles: ffffffffa303a56d page_counter_try_charge+0x6d (vml… 62 … uname 15316 2060795.481067: 2261945 cycles: ffffffffa301438f kmem_cache_free+0x4f (vmlinux) 63 …uname 15316 2060795.481643: 2172167 cycles: 7f1a48c393c0 _IO_un_link+0x0 (/lib/x86_64-linu…
|
/linux/Documentation/devicetree/bindings/mtd/ |
H A D | fsmc-nand.txt | 15 byte 0 TCLR : CLE to RE delay in number of AHB clock cycles, only 4 bits 17 cycles. 19 byte 2 THIZ : number of HCLK clock cycles during which the data bus is 21 Only valid for write transactions. Zero means zero cycles, 22 255 means 255 cycles. 23 byte 3 THOLD : number of HCLK clock cycles to hold the address (and data 25 one cycle, 255 means 256 cycles. 26 byte 4 TWAIT : number of HCLK clock cycles to assert the command to the 28 255 means 256 cycles. 29 byte 5 TSET : number of HCLK clock cycles to assert the address before the [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_clock.c | 44 container_of(tc, struct mlx4_en_dev, cycles); in mlx4_en_read_clock() 139 mdev->cycles.mult = mult; in mlx4_en_phc_adjfine() 208 timecounter_init(&mdev->clock, &mdev->cycles, ns); in mlx4_en_phc_settime() 275 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); in mlx4_en_init_timestamp() 276 mdev->cycles.read = mlx4_en_read_clock; in mlx4_en_init_timestamp() 277 mdev->cycles.mask = CLOCKSOURCE_MASK(48); in mlx4_en_init_timestamp() 278 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); in mlx4_en_init_timestamp() 279 mdev->cycles.mult = in mlx4_en_init_timestamp() 280 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); in mlx4_en_init_timestamp() 281 mdev->nominal_c_mult = mdev->cycles.mult; in mlx4_en_init_timestamp() [all …]
|
/linux/drivers/net/wireless/ath/ |
H A D | hw.c | 144 u32 cycles, busy, rx, tx; in ath_hw_cycle_counters_update() local 151 cycles = REG_READ(ah, AR_CCCNT); in ath_hw_cycle_counters_update() 166 common->cc_ani.cycles += cycles; in ath_hw_cycle_counters_update() 171 common->cc_survey.cycles += cycles; in ath_hw_cycle_counters_update() 183 listen_time = (cc->cycles - cc->rx_frame - cc->tx_frame) / in ath_hw_get_listen_time()
|
/linux/drivers/pwm/ |
H A D | pwm-berlin.c | 81 u64 cycles; in berlin_pwm_config() local 83 cycles = clk_get_rate(bpc->clk); in berlin_pwm_config() 84 cycles *= period_ns; in berlin_pwm_config() 85 do_div(cycles, NSEC_PER_SEC); in berlin_pwm_config() 87 if (cycles > BERLIN_PWM_MAX_TCNT) { in berlin_pwm_config() 89 cycles >>= 12; // Prescaled by 4096 in berlin_pwm_config() 91 if (cycles > BERLIN_PWM_MAX_TCNT) in berlin_pwm_config() 95 period = cycles; in berlin_pwm_config() 96 cycles *= duty_ns; in berlin_pwm_config() 97 do_div(cycles, period_ns); in berlin_pwm_config() [all …]
|
H A D | pwm-xilinx.c | 35 u64 cycles) in xilinx_timer_tlr_cycles() argument 37 WARN_ON(cycles < 2 || cycles - 2 > priv->max); in xilinx_timer_tlr_cycles() 40 return cycles - 2; in xilinx_timer_tlr_cycles() 41 return priv->max - cycles + 2; in xilinx_timer_tlr_cycles() 47 u64 cycles; in xilinx_timer_get_period() local 50 cycles = tlr + 2; in xilinx_timer_get_period() 52 cycles = (u64)priv->max - tlr + 2; in xilinx_timer_get_period() 55 return DIV64_U64_ROUND_UP(cycles * NSEC_PER_SEC, in xilinx_timer_get_period()
|
H A D | pwm-atmel.c | 197 unsigned long long cycles = state->period; in atmel_pwm_calculate_cprd_and_pres() local 201 cycles *= clkrate; in atmel_pwm_calculate_cprd_and_pres() 202 do_div(cycles, NSEC_PER_SEC); in atmel_pwm_calculate_cprd_and_pres() 209 shift = fls(cycles) - atmel_pwm->data->cfg.period_bits; in atmel_pwm_calculate_cprd_and_pres() 216 cycles >>= *pres; in atmel_pwm_calculate_cprd_and_pres() 221 *cprd = cycles; in atmel_pwm_calculate_cprd_and_pres() 230 unsigned long long cycles = state->duty_cycle; in atmel_pwm_calculate_cdty() local 232 cycles *= clkrate; in atmel_pwm_calculate_cdty() 233 do_div(cycles, NSEC_PER_SEC); in atmel_pwm_calculate_cdty() 234 cycles >>= pres; in atmel_pwm_calculate_cdty() [all …]
|
/linux/arch/arm64/lib/ |
H A D | delay.c | 26 void __delay(unsigned long cycles) in __delay() argument 31 u64 end = start + cycles; in __delay() 38 while ((get_cycles() - start) < cycles) in __delay() 44 while ((get_cycles() - start + timer_evt_period) < cycles) in __delay() 48 while ((get_cycles() - start) < cycles) in __delay()
|
/linux/Documentation/driver-api/mtd/ |
H A D | spi-nor.rst | 83 mode cycles 0 84 dummy cycles 0 87 mode cycles 0 88 dummy cycles 8 91 mode cycles 0 92 dummy cycles 8 95 mode cycles 4 96 dummy cycles 0 99 mode cycles 0 100 dummy cycles 8 [all …]
|
/linux/tools/perf/util/ |
H A D | parse-events.l | 326 cpu-cycles|cycles { return hw_term(yyscanner, PERF_COUNT_HW_CPU_CYCLES); } 327 stalled-cycles-frontend|idle-cycles-frontend { return hw_term(yyscanner, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } 328 stalled-cycles-backend|idle-cycles-backend { return hw_term(yyscanner, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } 334 bus-cycles { return hw_term(yyscanner, PERF_COUNT_HW_BUS_CYCLES); } 335 ref-cycles { return hw_term(yyscanner, PERF_COUNT_HW_REF_CPU_CYCLES); } 379 cpu-cycles|cycles { retur [all...] |
H A D | block-info.c | 111 bi->cycles = ch->cycles; in init_block_info() 135 u64 cycles = 0; in block_info__process_sym() local 161 cycles += bi->cycles_aggr / bi->num_aggr; in block_info__process_sym() 173 *block_cycles_aggr += cycles; in block_info__process_sym() 244 static void cycles_string(u64 cycles, char *buf, int size) in cycles_string() argument 246 if (cycles >= 1000000) in cycles_string() 247 scnprintf(buf, size, "%.1fM", (double)cycles / 1000000.0); in cycles_string() 248 else if (cycles >= 1000) in cycles_string() 249 scnprintf(buf, size, "%.1fK", (double)cycles / 1000.0); in cycles_string() 251 scnprintf(buf, size, "%1d", cycles); in cycles_string() [all …]
|
/linux/arch/xtensa/include/asm/ |
H A D | delay.h | 40 unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5; in __udelay() local 43 while (((unsigned long)get_ccount()) - start < cycles) in __udelay() 61 unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15; in __ndelay() local 62 __delay(cycles); in __ndelay()
|
/linux/tools/testing/selftests/kvm/include/riscv/ |
H A D | arch_timer.h | 22 #define cycles_to_usec(cycles) \ argument 23 ((uint64_t)(cycles) * 1000000 / (timer_freq)) 58 static inline void __delay(uint64_t cycles) in __delay() argument 62 while ((timer_get_cycles() - start) < cycles) in __delay()
|
/linux/lib/vdso/ |
H A D | gettimeofday.c | 39 static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base) in vdso_calc_ns() argument 41 u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd); in vdso_calc_ns() 65 static inline bool vdso_cycles_ok(u64 cycles) in vdso_cycles_ok() argument 78 u64 cycles, ns; in do_hres_timens() local 96 cycles = __arch_get_hw_counter(vd->clock_mode, vd); in do_hres_timens() 97 if (unlikely(!vdso_cycles_ok(cycles))) in do_hres_timens() 99 ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec); in do_hres_timens() 134 u64 cycles, sec, ns; in do_hres() local 164 cycles = __arch_get_hw_counter(vd->clock_mode, vd); in do_hres() 165 if (unlikely(!vdso_cycles_ok(cycles))) in do_hres() [all …]
|
/linux/Documentation/arch/m68k/ |
H A D | buddha-driver.rst | 147 497ns Select (7 clock cycles) , IOR/IOW after 172ns (2 clock cycles) 152 639ns Select (9 clock cycles), IOR/IOW after 243ns (3 clock cycles) 155 781ns Select (11 clock cycles), IOR/IOW after 314ns (4 clock cycles) 158 355ns Select (5 clock cycles), IOR/IOW after 101ns (1 clock cycle) 161 355ns Select (5 clock cycles), IOR/IOW after 172ns (2 clock cycles) 164 355ns Select (5 clock cycles), IOR/IOW after 243ns (3 clock cycles) 167 1065ns Select (15 clock cycles), IOR/IOW after 314ns (4 clock cycles) 170 355ns Select, (5 clock cycles), IOR/IOW after 101ns (1 clock cycle) 176 781ns select, IOR/IOW after 4 clock cycles (=314ns) active. 180 system: Sometimes two more clock cycles are inserted by the [all …]
|
/linux/tools/perf/tests/shell/ |
H A D | stat+shadow_stat.sh | 13 perf stat -a -e cycles sleep 1 2>&1 | grep -e cpu_core && exit 2 17 perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \ 18 grep -e cycles -e instructions | \ 56 perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
|
/linux/tools/virtio/ringtest/ |
H A D | main.h | 21 static inline void wait_cycles(unsigned long long cycles) in wait_cycles() argument 26 while (__rdtsc() - t < cycles) {} in wait_cycles() 33 static inline void wait_cycles(unsigned long long cycles) in wait_cycles() argument 35 asm volatile("0: brctg %0,0b" : : "d" (cycles)); in wait_cycles() 43 static inline void wait_cycles(unsigned long long cycles) in wait_cycles() argument
|
/linux/tools/power/cpupower/bench/ |
H A D | benchmark.c | 113 " for %lius\n", _round + 1, config->cycles, in start_benchmark() 125 for (cycle = 0; cycle < config->cycles; cycle++) { in start_benchmark() 139 performance_time / config->cycles); in start_benchmark() 151 for (cycle = 0; cycle < config->cycles; cycle++) { in start_benchmark() 169 powersave_time / config->cycles); in start_benchmark()
|
/linux/drivers/char/hw_random/ |
H A D | cavium-rng-vf.c | 85 u64 status, cycles; in check_rng_health() local 99 cycles = status >> 1; in check_rng_health() 100 if (!cycles) in check_rng_health() 109 cycles = cycles / 2; in check_rng_health() 110 cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */ in check_rng_health()
|