/linux/Documentation/arch/x86/ |
H A D | topology.rst | 100 AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses 108 AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always 177 [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0 178 -> [Compute Unit Core 1] -> Linux CPU 1 179 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2 180 -> [Compute Unit Core 1] -> Linux CPU 3 218 [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0 219 -> [Compute Unit Core 1] -> Linux CPU 1 220 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2 221 -> [Compute Unit Core 1] -> Linux CPU 3 [all …]
|
/linux/scripts/coccinelle/misc/ |
H A D | array_size_dup.cocci | 5 /// 1. An opencoded expression is used before array_size() to compute the same size 6 /// 2. An opencoded expression is used after array_size() to compute the same size 43 msg = "WARNING: array_size is used later (line %s) to compute the same size" % (p2[0].line) 51 msg = "WARNING: array_size is used later (line %s) to compute the same size" % (p2[0].line) 72 msg = "WARNING: array_size is already used (line %s) to compute the same size" % (p1[0].line) 80 msg = "WARNING: array_size is already used (line %s) to compute the same size" % (p1[0].line) 108 msg = "WARNING: array3_size is used later (line %s) to compute the same size" % (p2[0].line) 116 msg = "WARNING: array3_size is used later (line %s) to compute the same size" % (p2[0].line) 138 msg = "WARNING: array3_size is already used (line %s) to compute the same size" % (p1[0].line) 146 msg = "WARNING: array3_size is already used (line %s) to compute the same size" % (p1[0].line) [all …]
|
/linux/Documentation/devicetree/bindings/misc/ |
H A D | qcom,fastrpc.yaml | 68 "(compute-)?cb@[0-9]*$": 72 Each subnode of the Fastrpc represents compute context banks available on the dsp. 76 const: qcom,fastrpc-compute-cb 129 compute-cb@1 { 130 compatible = "qcom,fastrpc-compute-cb"; 135 compute-cb@2 { 136 compatible = "qcom,fastrpc-compute-cb"; 141 compute-cb@3 { 142 compatible = "qcom,fastrpc-compute-cb";
|
/linux/tools/perf/pmu-events/arch/s390/cf_z16/ |
H A D | pai_crypto.json | 727 "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING DEA", 728 "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-DEA function ending with CC=0" 734 "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 128", 735 "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-128 function ending with CC=0" 741 "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 192", 742 "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-192 function ending with CC=0" 748 "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED DEA", 749 "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-DEA function ending with CC=0" 755 "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 128", 756 …"PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA- 128 function ending with C… [all …]
|
/linux/Documentation/gpu/amdgpu/ |
H A D | driver-core.rst | 66 GC (Graphics and Compute) 67 This is the graphics and compute engine, i.e., the block that 79 Graphics and Compute Microcontrollers 84 GFX/Compute pipeline. Consists mainly of a bunch of microcontrollers 86 provides the driver interface to interact with the GFX/Compute engine. 88 MEC (MicroEngine Compute) 89 This is the microcontroller that controls the compute queues on the 90 GFX/compute engine. 96 This is another microcontroller in the GFX/Compute engine. It handles 97 power management related functionality within the GFX/Compute engine. [all …]
|
H A D | amdgpu-glossary.rst | 22 Compute Unit 43 Graphics and Compute 75 Kernel Compute Queue 84 MicroEngine Compute
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_vm_doc.h | 185 * the VM's DMA_RESV_USAGE_KERNEL slot (blocks future jobs / resume compute mode 187 * to execs and compute mode rebind worker. To accomplish this, hold the 213 * until all pending users (jobs or compute mode engines) of the userptr are 219 * Either the next exec (non-compute) or rebind worker (compute mode) will 221 * after the VM dma-resv wait if the VM is in compute mode. 223 * Compute mode 226 * A VM in compute mode enables long running workloads and ultra low latency 231 * are not used when a VM is in compute mode. User fences (TODO: link user fence 422 * evictions, and compute mode rebind worker) in XE. 431 * bind path also acquires this lock in write while the exec / compute mode [all …]
|
H A D | xe_gt_types.h | 105 * for implementing the graphics, compute, and/or media IP. It encapsulates 110 * A GPU/tile may have a single GT that supplies all graphics, compute, and 111 * media functionality, or the graphics/compute and media may be split into 216 * @ccs_mode: Number of compute engines enabled. 217 * Allows fixed mapping of available compute slices to compute engines. 218 * By default only the first available compute engine is enabled and all 219 * available compute slices are allocated to it. 257 * of compute resources available. 353 /** @fuse_topo.c_dss_mask: dual-subslices usable by compute */
|
/linux/drivers/accel/ |
H A D | Kconfig | 3 # Compute Acceleration device configuration 5 # This framework provides support for compute acceleration devices, such 12 bool "Compute Acceleration Framework" 14 Framework for device drivers of compute acceleration devices, such 19 This framework is integrated with the DRM subsystem as compute
|
/linux/drivers/iio/common/inv_sensors/ |
H A D | inv_sensors_timestamp.c | 13 /* compute jitter, min and max following jitter in per mille */ 31 /* compute the mean of all stored values, use 0 as empty slot */ in inv_update_acc() 46 /* save chip parameters and compute min and max clock period */ in inv_sensors_timestamp_init() 146 /* update interrupt timestamp and compute chip and sensor periods */ in inv_sensors_timestamp_interrupt() 152 /* compute period: delta time divided by number of samples */ in inv_sensors_timestamp_interrupt() 157 /* no previous data, compute theoritical value from interrupt */ in inv_sensors_timestamp_interrupt() 188 * undertermined (depends when the change occures). So we compute the in inv_sensors_timestamp_apply_odr() 193 /* compute measured fifo period */ in inv_sensors_timestamp_apply_odr()
|
/linux/Documentation/driver-api/ |
H A D | dma-buf.rst | 255 * Long-running compute command buffers, while still using traditional end of 257 fences which get reattached when the compute job is rescheduled. 304 userspace is allowed to use userspace fencing or long running compute 327 faults on GPUs are limited to pure compute workloads. 330 compute side, like compute units or command submission engines. If both a 3D 331 job with a DMA fence and a compute workload using recoverable page faults are 334 - The 3D workload might need to wait for the compute job to finish and release 337 - The compute workload might be stuck in a page fault, because the memory 343 - Compute workloads can always be preempted, even when a page fault is pending 348 achieved through e.g. through dedicated engines and minimal compute unit [all …]
|
/linux/arch/x86/events/amd/ |
H A D | power.c | 25 * The ratio of compute unit power accumulator sample period to the 30 /* Maximum accumulated power of a compute unit. */ 36 * Accumulated power represents the sum of each compute unit's (CU) power 227 * Find a new CPU on the same compute unit, if was set in cpumask in power_cpu_exit() 228 * and still some CPUs on compute unit. Then migrate event and in power_cpu_exit() 244 * 1) If any CPU is set at cpu_mask in the same compute unit, do in power_cpu_init() 246 * 2) If no CPU is set at cpu_mask in the same compute unit, in power_cpu_init() 276 pr_err("Failed to read max compute unit power accumulator MSR\n"); in amd_power_pmu_init()
|
/linux/Documentation/hwmon/ |
H A D | fam15h_power.rst | 67 compute unit power accumulator sample period 76 the ratio of compute unit power accumulator sample period to the 80 max compute unit accumulated power which is indicated by 84 compute unit accumulated power which is indicated by 110 v. Calculate the average power consumption for a compute unit over
|
/linux/lib/ |
H A D | siphash.c | 112 * siphash_1u64 - compute 64-bit siphash PRF value of a u64 128 * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 149 * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 176 * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 312 * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 325 * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 342 * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 362 * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 454 * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 469 * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 [all …]
|
H A D | bch.c | 30 * Call bch_encode to compute and store ecc parity bytes to a given buffer. 109 /* given its degree, compute a polynomial size in bytes */ 368 * compute 2t syndromes of ecc polynomial, i.e. ecc(a^j) for j=1..2t 386 /* compute v(a^j) for j=1 .. 2t-1 */ in compute_syndromes() 441 /* compute l[i+1] = max(l[i]->c[l[p]+2*(i-p]) */ in compute_error_locator_polynomial() 523 /* compute unique solution */ in solve_linear_system() 573 * compute root r of a degree 1 polynomial over GF(2^m) (returned as log(1/r)) 588 * compute roots of a degree 2 polynomial over GF(2^m) 605 * let u = sum(li.a^i) i=0..m-1; then compute r = sum(li.xi): in find_poly_deg2_roots() 619 /* reverse z=a/bX transformation and compute log(1/r) */ in find_poly_deg2_roots() [all …]
|
/linux/tools/perf/Documentation/ |
H A D | perf-diff.txt | 92 --compute:: 95 diff.compute config option. See COMPARISON METHODS section for 113 Specify compute sorting column number. 0 means sorting by baseline 211 baseline/A compute/B compute/C samples 222 baseline/B compute/A compute/C samples 233 baseline/C compute/B compute/A samples
|
/linux/Documentation/devicetree/bindings/arm/bcm/ |
H A D | bcm2835.yaml | 22 - raspberrypi,4-compute-module 41 - raspberrypi,compute-module 58 - raspberrypi,3-compute-module 59 - raspberrypi,3-compute-module-lite
|
/linux/drivers/hwmon/ |
H A D | fam15h_power.c | 55 /* maximum accumulated power of a compute unit */ 57 /* accumulated power of the compute units */ 61 /* online/offline status of current compute unit */ 142 * is compute unit id. in do_read_registers_on_cu() 171 * Choose the first online core of each compute unit, and then in read_registers() 173 * because the MSR value of CPU core represent the compute in read_registers() 186 /* get any CPU on this compute unit */ in read_registers() 210 * compute unit number. in power1_average_show() 232 /* check if current compute unit is online */ in power1_average_show() 421 * determine the ratio of the compute unit power accumulator in fam15h_power_init_data() [all …]
|
/linux/arch/x86/include/asm/ |
H A D | checksum_64.h | 41 * ip_fast_csum - Compute the IPv4 header checksum efficiently. 76 * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. 102 * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. 120 * csum_partial - Compute an internet checksum. 139 * ip_compute_csum - Compute an 16bit IP checksum. 149 * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
|
/linux/fs/xfs/libxfs/ |
H A D | xfs_rtbitmap.c | 284 * Compute and read in starting bitmap block for starting block. in xfs_rtfind_back() 298 * Compute match value, based on the bit at start: if 1 (free) in xfs_rtfind_back() 354 * Compute difference between actual and desired value. in xfs_rtfind_back() 393 * Compute difference between actual and desired value. in xfs_rtfind_back() 440 * Compute and read in starting bitmap block for starting block. in xfs_rtfind_forw() 454 * Compute match value, based on the bit at start: if 1 (free) in xfs_rtfind_forw() 509 * Compute difference between actual and desired value. in xfs_rtfind_forw() 546 * Compute difference between actual and desired value. in xfs_rtfind_forw() 677 * Compute starting bitmap block number. in xfs_rtmodify_range() 688 * Compute the starting word's address, and starting bit. in xfs_rtmodify_range() [all …]
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_doorbell.h | 121 /* Compute + GFX: 0~255 */ 176 /* 8 compute rings per GC. Max to 0x1CE */ 188 /* Compute + GFX: 0~255 */ 244 …* All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should loca… 246 * Compute related doorbells are allocated from 0x00 to 0x8a 257 /* Compute engines */ 328 /* Compute: 0x08 ~ 0x20 */
|
/linux/drivers/clk/ti/ |
H A D | dpll44xx.c | 79 * omap4_dpll_lpmode_recalc - compute DPLL low-power setting 104 * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit 105 * @hw: pointer to the clock to compute the rate for 108 * Compute the output rate for the OMAP4 DPLL represented by @clk. 142 * Compute the rate that would be programmed into the DPLL hardware 165 * First try to compute the DPLL configuration for in omap4_dpll_regm4xen_round_rate()
|
/linux/tools/perf/ |
H A D | builtin-diff.c | 112 COMPUTE_STREAM, /* After COMPUTE_MAX to avoid use current compute arrays */ 123 static int compute = COMPUTE_DELTA_ABS; variable 204 pr_debug("compute wdiff w1(%" PRId64 ") w2(%" PRId64 ")\n", in setup_compute_opt_wdiff() 218 if (compute == COMPUTE_WEIGHTED_DIFF) in setup_compute_opt() 351 switch (compute) { in formula_fprintf() 425 switch (compute) { in diff__process_sample_event() 691 if (compute == COMPUTE_CYCLES) { in hists__precompute() 702 switch (compute) { in hists__precompute() 939 if (compute == COMPUTE_CYCLES) in hists__process() 1210 if (compute in __cmd_diff() [all...] |
/linux/Documentation/devicetree/bindings/interconnect/ |
H A D | qcom,rpmh.yaml | 30 - qcom,sc7180-compute-noc 42 - qcom,sc8180x-compute-noc 75 - qcom,sm8150-compute-noc 84 - qcom,sm8250-compute-noc 101 - qcom,sm8350-compute-noc
|
/linux/arch/xtensa/lib/ |
H A D | strncpy_user.S | 94 sub a2, a11, a2 # compute strlen 149 sub a2, a11, a2 # compute strlen 156 sub a2, a11, a2 # compute strlen 164 sub a2, a11, a2 # compute strlen 174 sub a2, a11, a2 # compute strlen 200 sub a2, a11, a2 # compute strlen
|