/linux/Documentation/block/ |
H A D | bfq-iosched.rst | 14 throughput high). 17 throughput. So, when needed for achieving a lower latency, BFQ builds 18 schedules that may lead to a lower throughput. If your main or only 20 throughput at all times, then do switch off all low-latency heuristics 23 latency and throughput, or on how to maximize throughput. 31 instrumentation, and using the throughput-sync.sh script of the S 50 support is enabled), then the sustainable throughput with BFQ 117 High throughput 120 On hard disks, BFQ achieves up to 30% higher throughput than CFQ, and 121 up to 150% higher throughput than DEADLINE and NOOP, with all the [all …]
|
/linux/net/batman-adv/ |
H A D | bat_v_elp.c | 74 * batadv_v_elp_get_throughput() - get the throughput towards a neighbour 75 * @neigh: the neighbour for which the throughput has to be obtained 76 * @pthroughput: calculated throughput towards the given neighbour in multiples 89 u32 throughput; in batadv_v_elp_get_throughput() local 92 /* don't query throughput when no longer associated with any in batadv_v_elp_get_throughput() 101 throughput = atomic_read(&hard_iface->bat_v.throughput_override); in batadv_v_elp_get_throughput() 102 if (throughput != 0) { in batadv_v_elp_get_throughput() 103 *pthroughput = throughput; in batadv_v_elp_get_throughput() 107 /* if this is a wireless device, then ask its throughput through in batadv_v_elp_get_throughput() 130 * the throughput metric to 0. in batadv_v_elp_get_throughput() [all …]
|
H A D | bat_v_ogm.c | 337 …"Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n… in batadv_v_ogm_send_softif() 339 ntohl(ogm_packet->throughput), ogm_packet->ttl, in batadv_v_ogm_send_softif() 453 * batadv_v_forward_penalty() - apply a penalty to the throughput metric 458 * @throughput: the current throughput 460 * Apply a penalty on the current throughput metric value based on the 463 * Initially the per hardif hop penalty is applied to the throughput. After 465 * - throughput * 50% if the incoming and outgoing interface are the 466 * same WiFi interface and the throughput is above 468 * - throughput if the outgoing interface is the default 471 * - throughput * node hop penalty otherwise [all …]
|
H A D | bat_v.c | 115 ewma_throughput_init(&hardif_neigh->bat_v.throughput); in batadv_v_hardif_neigh_init() 133 u32 throughput; in batadv_v_neigh_dump_neigh() local 136 throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput); in batadv_v_neigh_dump_neigh() 137 throughput = throughput * 100; in batadv_v_neigh_dump_neigh() 152 nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput)) in batadv_v_neigh_dump_neigh() 275 u32 throughput; in batadv_v_orig_dump_subentry() local 282 throughput = n_ifinfo->bat_v.throughput * 100; in batadv_v_orig_dump_subentry() 304 nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput) || in batadv_v_orig_dump_subentry() 465 ret = ifinfo1->bat_v.throughput - ifinfo2->bat_v.throughput; in batadv_v_neigh_cmp() 491 threshold = ifinfo1->bat_v.throughput / 4; in batadv_v_neigh_is_sob() [all …]
|
/linux/drivers/net/wireless/intel/iwlwifi/dvm/ |
H A D | led.c | 23 /* Throughput OFF time(ms) ON time (ms) 37 { .throughput = 0, .blink_time = 334 }, 38 { .throughput = 1 * 1024 - 1, .blink_time = 260 }, 39 { .throughput = 5 * 1024 - 1, .blink_time = 220 }, 40 { .throughput = 10 * 1024 - 1, .blink_time = 190 }, 41 { .throughput = 20 * 1024 - 1, .blink_time = 170 }, 42 { .throughput = 50 * 1024 - 1, .blink_time = 150 }, 43 { .throughput = 70 * 1024 - 1, .blink_time = 130 }, 44 { .throughput = 100 * 1024 - 1, .blink_time = 110 }, 45 { .throughput = 200 * 1024 - 1, .blink_time = 80 }, [all …]
|
H A D | rs.c | 150 * The following tables contain the expected throughput metrics for all rates 411 * Static function to get the expected throughput from an iwl_scale_tbl_info 441 /* Get expected throughput */ in rs_collect_tx_data() 489 /* Calculate average throughput, if we have enough history. */ in rs_collect_tx_data() 1035 * Set frame tx success limits according to legacy vs. high-throughput, 1062 * Find correct throughput table for given mode of modulation 1109 * Find starting rate for new "search" high-throughput mode of modulation. 1111 * above the current measured throughput of "active" mode, to give new mode 1117 * to decrease to match "active" throughput. When moving from MIMO to SISO, 1130 /* expected "search" throughput */ in rs_get_best_rate() [all …]
|
/linux/net/x25/ |
H A D | x25_facilities.c | 103 facilities->throughput = p[1]; in x25_parse_facilities() 210 if (facilities->throughput && (facil_mask & X25_MASK_THROUGHPUT)) { in x25_create_facilities() 212 *p++ = facilities->throughput; in x25_create_facilities() 291 if (theirs.throughput) { in x25_negotiate_facilities() 292 int theirs_in = theirs.throughput & 0x0f; in x25_negotiate_facilities() 293 int theirs_out = theirs.throughput & 0xf0; in x25_negotiate_facilities() 294 int ours_in = ours->throughput & 0x0f; in x25_negotiate_facilities() 295 int ours_out = ours->throughput & 0xf0; in x25_negotiate_facilities() 297 net_dbg_ratelimited("X.25: inbound throughput negotiated\n"); in x25_negotiate_facilities() 298 new->throughput = (new->throughput & 0xf0) | theirs_in; in x25_negotiate_facilities() [all …]
|
/linux/tools/testing/selftests/bpf/benchs/ |
H A D | run_common.sh | 38 echo -n "throughput: " 39 echo -n "$*" | sed -E "s/.*throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/" 46 echo -n "hits throughput: " 47 echo -n "$*" | sed -E "s/.* hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/" 50 echo -n ", important_hits throughput: " 51 …echo "$*" | sed -E "s/.*important_hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*…
|
/linux/tools/testing/selftests/amd-pstate/ |
H A D | tbench.sh | 5 # power consumption and throughput etc.when this script trigger tbench 10 # 4) Get power consumption and throughput by amd_pstate_trace.py. 98 …grep Throughput $OUTFILE_TBENCH-perf-$1-$2.log | awk '{print $2}' > $OUTFILE_TBENCH-throughput-$1-… 99 tp_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_TBENCH-throughput-$1-$2.log) 100 printf "Tbench-$1-#$2 throughput(MB/s): $tp_sum\n" | tee -a $OUTFILE_TBENCH.result 106 # Permance is throughput per second, denoted T/t, where T is throught rendered in t seconds. 150 …bench-$1-#" $OUTFILE_TBENCH.result | grep "throughput(MB/s):" | awk '{print $NF}' > $OUTFILE_TBENC… 151 tp_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_TBENCH-throughput-$1.log) 152 printf "Tbench-$1 total throughput(MB/s): $tp_sum\n" | tee -a $OUTFILE_TBENCH.result 154 …avg_tp=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_TBENCH-throughput-… [all …]
|
/linux/include/linux/dma/ |
H A D | k3-psil.h | 16 * enum udma_tp_level - Channel Throughput Levels 18 * @UDMA_TP_HIGH: High Throughput channel 19 * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel 45 * @channel_tpl: Desired throughput level for the channel
|
/linux/drivers/gpu/drm/amd/display/dc/dsc/ |
H A D | dc_dsc.c | 229 static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput) in dsc_throughput_from_dpcd() argument 233 *throughput = 0; in dsc_throughput_from_dpcd() 236 *throughput = 170; in dsc_throughput_from_dpcd() 239 *throughput = 340; in dsc_throughput_from_dpcd() 242 *throughput = 400; in dsc_throughput_from_dpcd() 245 *throughput = 450; in dsc_throughput_from_dpcd() 248 *throughput = 500; in dsc_throughput_from_dpcd() 251 *throughput = 550; in dsc_throughput_from_dpcd() 254 *throughput = 600; in dsc_throughput_from_dpcd() 257 *throughput = 650; in dsc_throughput_from_dpcd() [all …]
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_benchmark.c | 65 s64 throughput = (n * (size >> 10)); in amdgpu_benchmark_log_results() local 67 throughput = div64_s64(throughput, time_ms); in amdgpu_benchmark_log_results() 70 " %d to %d in %lld ms, throughput: %lld Mb/s or %lld MB/s\n", in amdgpu_benchmark_log_results() 72 throughput * 8, throughput); in amdgpu_benchmark_log_results()
|
/linux/drivers/atm/ |
H A D | Kconfig | 68 In order to obtain good throughput, the ENI NIC can transfer 81 resulting throughput is lower than when using only the largest 84 Also, sometimes larger bursts lead to lower throughput, e.g. on an 108 may or may not improve throughput. 116 are also set may or may not improve throughput. 139 improve throughput. 147 8W are also set may or may not improve throughput.
|
/linux/drivers/md/ |
H A D | dm-ps-service-time.c | 9 * Throughput oriented path selector. 126 * <relative_throughput>: The relative throughput value of in st_add_path() 224 * Case 1: Both have same throughput value. Choose less loaded path. in st_compare_load() 230 * Case 2a: Both have same load. Choose higher throughput path. in st_compare_load() 231 * Case 2b: One path has no throughput value. Choose the other one. in st_compare_load() 272 * Case 4: Service time is equal. Choose higher throughput path. in st_compare_load() 363 MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector");
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | dm-service-time.rst | 11 The performance value is a relative throughput value among all paths 25 The relative throughput value of the path 42 The relative throughput value of the path 79 and sda has an average throughput 1GB/s and sdb has 4GB/s,
|
/linux/net/core/ |
H A D | ieee8021q_helpers.c | 174 * [RFC4594], Section 4.8, recommends High-Throughput Data be marked in ietf_dscp_to_ieee8021q_tt() 178 * By default (as described in Section 2.3), High-Throughput Data will in ietf_dscp_to_ieee8021q_tt() 183 * Throughput Data service class within the constrained 4 Access in ietf_dscp_to_ieee8021q_tt() 184 * Category [IEEE.802.11-2016] model. If the High-Throughput Data in ietf_dscp_to_ieee8021q_tt() 195 * is generally RECOMMENDED to map High-Throughput Data to UP 0, thereby in ietf_dscp_to_ieee8021q_tt()
|
/linux/drivers/gpu/drm/loongson/ |
H A D | lsdc_benchmark.c | 76 unsigned int throughput; in lsdc_benchmark_copy() local 93 throughput = (n * (size >> 10)) / time; in lsdc_benchmark_copy() 100 time, throughput); in lsdc_benchmark_copy()
|
/linux/block/ |
H A D | bfq-iosched.c | 28 * to distribute the device throughput among processes as desired, 29 * without any distortion due to throughput fluctuations, or to device 34 * guarantees that each queue receives a fraction of the throughput 37 * processes issuing sequential requests (to boost the throughput), 76 * preserving both a low latency and a high throughput on NCQ-capable, 81 * the maximum-possible throughput at all times, then do switch off 190 * writes to steal I/O throughput to reads. 240 * because it is characterized by limited throughput and apparently 320 * a) unjustly steal throughput to applications that may actually need 323 * in loss of device throughput with most flash-based storage, and may [all …]
|
/linux/drivers/gpu/drm/radeon/ |
H A D | radeon_benchmark.c | 80 unsigned int throughput = (n * (size >> 10)) / time; in radeon_benchmark_log_results() local 82 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n", in radeon_benchmark_log_results() 84 throughput * 8, throughput); in radeon_benchmark_log_results()
|
/linux/drivers/net/wireless/intel/iwlegacy/ |
H A D | 4965-rs.c | 146 * The following tables contain the expected throughput metrics for all rates 385 * Static function to get the expected throughput from an il_scale_tbl_info 417 /* Get expected throughput */ in il4965_rs_collect_tx_data() 465 /* Calculate average throughput, if we have enough history. */ in il4965_rs_collect_tx_data() 980 * Set frame tx success limits according to legacy vs. high-throughput, 1008 * Find correct throughput table for given mode of modulation 1052 * Find starting rate for new "search" high-throughput mode of modulation. 1054 * above the current measured throughput of "active" mode, to give new mode 1060 * to decrease to match "active" throughput. When moving from MIMO to SISO, 1074 /* expected "search" throughput */ in il4965_rs_get_best_rate() [all …]
|
/linux/drivers/net/wireless/ath/ath9k/ |
H A D | htc_drv_init.c | 51 { .throughput = 0 * 1024, .blink_time = 334 }, 52 { .throughput = 1 * 1024, .blink_time = 260 }, 53 { .throughput = 5 * 1024, .blink_time = 220 }, 54 { .throughput = 10 * 1024, .blink_time = 190 }, 55 { .throughput = 20 * 1024, .blink_time = 170 }, 56 { .throughput = 50 * 1024, .blink_time = 150 }, 57 { .throughput = 70 * 1024, .blink_time = 130 }, 58 { .throughput = 100 * 1024, .blink_time = 110 }, 59 { .throughput = 200 * 1024, .blink_time = 80 }, 60 { .throughput = 300 * 1024, .blink_time = 50 },
|
/linux/kernel/ |
H A D | Kconfig.preempt | 27 throughput. It will still provide good latencies most of the 46 at the cost of slightly lower throughput. 68 system is under load, at the cost of slightly lower throughput
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-driver-xdata | 17 The user can read the current PCIe link throughput generated 42 The user can read the current PCIe link throughput generated
|
/linux/Documentation/networking/device_drivers/ethernet/intel/ |
H A D | e1000.rst | 148 for bulk throughput traffic. 172 the overall throughput, we recommend that you load the driver as 215 extra latency to frame reception and can end up decreasing the throughput 240 along with RxIntDelay, may improve traffic throughput in specific network 297 along with TxIntDelay, may improve traffic throughput in specific 413 Degradation in throughput performance may be observed in some Jumbo frames
|
/linux/Documentation/arch/arm/omap/ |
H A D | omap_pm.rst | 7 throughput constraints to the kernel power management code. 20 latency and throughput, rather than units which are specific to OMAP 46 4. Set the minimum bus throughput needed by a device::
|