| /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
| H A D | MIMGInstructions.td | 427 R128A16:$r128, TFE:$tfe, LWE:$lwe, DA:$da), 429 let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$cpol$r128$tfe$lwe$da" 452 R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe), 454 let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" 465 R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe), 467 let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" 477 R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe), 479 let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" 490 R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe), 492 let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" [all …]
|
| H A D | SIInstrFormats.td | 344 bits<1> tfe; 370 let Inst{16} = tfe; 397 let Inst{16} = tfe; 409 bits<1> tfe; 434 let Inst{53} = tfe; 441 bits<1> tfe; 475 let Inst{3} = tfe; 486 let Inst{55} = tfe;
|
| H A D | BUFInstructions.td | 89 bits<1> tfe = 0; 155 // Bit supersedes tfe. 367 // Bit supersedes tfe. 455 string TFE = !if(isTFE, " tfe", ""); 466 string ret = Vdata # MainArgs # Offset # OtherArgs # Lds # TFE; 511 let tfe = isTFE; 617 let tfe = isTFE; 2348 let Inst{53} = ps.tfe; 2364 let Inst{55} = ps.tfe; 2455 let Inst{22} = ps.tfe; [all …]
|
| H A D | SIShrinkInstructions.cpp | 363 // Further check for implicit tied operands - this may be present if TFE is in shrinkMIMG() 365 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe); in shrinkMIMG() 371 // TFE/LWE is enabled so we need to deal with an implicit tied operand in shrinkMIMG()
|
| H A D | AMDGPUInstructionSelector.cpp | 1787 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, in parseTexFail() argument 1792 TFE = (TexFailCtrl & 0x1) ? true : false; in parseTexFail() 1828 bool TFE; in selectImageIntrinsic() local 1832 TFE, LWE, IsTexFail)) in selectImageIntrinsic() 2021 MIB.addImm(TFE); // tfe in selectImageIntrinsic() 2022 } else if (TFE) { in selectImageIntrinsic() 2023 LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n"); in selectImageIntrinsic() 5231 addZeroImm, // tfe in selectMUBUFAddr64() 5259 addZeroImm, // tfe in selectMUBUFOffset()
|
| H A D | AMDGPULegalizerInfo.cpp | 5976 // TODO: Support TFE for typed and narrow loads. in legalizeBufferLoad() 6325 // that use the unpacked register layout, or need to repack the TFE result. in legalizeImageIntrinsic() 6515 if (BaseOpcode->NoReturn) { // No TFE for stores? in legalizeImageIntrinsic() 6557 // S32 vector to cover all data, plus TFE result element. in legalizeImageIntrinsic() 6588 // TODO: For TFE with d16, if we used a TFE type that was a multiple of <2 x in legalizeImageIntrinsic() 6597 // In the IR, TFE is supposed to be used with a 2 element struct return in legalizeImageIntrinsic() 6607 // TODO: Make sure the TFE operand bit is set. in legalizeImageIntrinsic() 6632 // Drop the final TFE element to get the data part. The TFE result is in legalizeImageIntrinsic() 6654 // For packed D16 results with TFE enabled, all the data components are in legalizeImageIntrinsic() 6658 // cast for the TFE result if a multiple of v2s16 was used. in legalizeImageIntrinsic()
|
| H A D | SIISelLowering.cpp | 1132 // Peek through TFE struct returns to only use the data size. 1140 // TFE intrinsics return an aggregate type. in memVTFromLoadIntrReturn() 7849 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, in parseTexFail() argument 7859 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); in parseTexFail() 8122 SDValue TFE; in lowerImage() local 8126 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) in lowerImage() 8198 Ops.push_back(TFE); //tfe in lowerImage() 8199 } else if (TFE->getAsZExtVal()) { in lowerImage() 8200 report_fatal_error("TFE is not supported on this GPU"); in lowerImage() 9355 // dwordx4 if on SI and handle TFE loads. [all …]
|
| H A D | FLATInstructions.td | 128 // We don't use tfe right now, and it was removed in gfx9. 129 bits<1> tfe = 0; 145 let Inst{55} = acc; // nv on GFX9+, TFE before. AccVGPR for data on GFX90A.
|
| H A D | SIInstrInfo.cpp | 4873 // being used TFE/LWE require an extra result register. in verifyInstruction() 4879 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); in verifyInstruction() local 4887 // Adjust if using LWE or TFE in verifyInstruction() 4888 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) in verifyInstruction() 6884 if (const MachineOperand *TFE = in legalizeOperands() local 6885 getNamedOperand(MI, AMDGPU::OpName::tfe)) { in legalizeOperands() 6886 MIB.addImm(TFE->getImm()); in legalizeOperands()
|
| H A D | SILoadStoreOptimizer.cpp | 926 // Ignore instructions with tfe/lwe set. in dmasksCanBeCombined() 927 const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe); in dmasksCanBeCombined()
|
| H A D | AMDGPUInstCombineIntrinsic.cpp | 1283 /// Note: This only supports non-TFE/LWE image intrinsic calls; those have
|
| /freebsd/sbin/ipfw/ |
| H A D | tables.c | 1191 struct tflow_entry *tfe; in tentry_fill_key_type() local 1274 tfe = &tentry->k.flow; in tentry_fill_key_type() 1287 memcpy(&tfe->a.a4.sip, &tmp, 4); in tentry_fill_key_type() 1293 memcpy(&tfe->a.a6.sip6, &tmp, 16); in tentry_fill_key_type() 1318 tfe->proto = key; in tentry_fill_key_type() 1337 tfe->sport = port; in tentry_fill_key_type() 1353 memcpy(&tfe->a.a4.dip, &tmp, 4); in tentry_fill_key_type() 1359 memcpy(&tfe->a.a6.dip6, &tmp, 16); in tentry_fill_key_type() 1379 tfe->dport = port; in tentry_fill_key_type() 1383 tfe->af = af; in tentry_fill_key_type() [all …]
|
| /freebsd/sys/contrib/device-tree/Bindings/media/ |
| H A D | nvidia,tegra-vde.txt | 17 - tfe 54 "tfe", "ppb", "vdma", "frameid";
|
| H A D | nvidia,tegra-vde.yaml | 38 - const: tfe 107 "tfe", "ppb", "vdma", "frameid";
|
| /freebsd/sys/netpfil/ipfw/ |
| H A D | ip_fw_table_algo.c | 3356 struct tflow_entry *tfe; in ta_dump_fhash_tentry() local 3359 tfe = &tent->k.flow; in ta_dump_fhash_tentry() 3361 tfe->af = ent->af; in ta_dump_fhash_tentry() 3362 tfe->proto = ent->proto; in ta_dump_fhash_tentry() 3363 tfe->dport = htons(ent->dport); in ta_dump_fhash_tentry() 3364 tfe->sport = htons(ent->sport); in ta_dump_fhash_tentry() 3370 tfe->a.a4.sip.s_addr = htonl(fe4->sip.s_addr); in ta_dump_fhash_tentry() 3371 tfe->a.a4.dip.s_addr = htonl(fe4->dip.s_addr); in ta_dump_fhash_tentry() 3376 tfe->a.a6.sip6 = fe6->sip6; in ta_dump_fhash_tentry() 3377 tfe->a.a6.dip6 = fe6->dip6; in ta_dump_fhash_tentry() [all …]
|
| /freebsd/sys/dts/arm/ |
| H A D | imx53x.dtsi | 620 /* 68 SIM intr composed of tc, etc, tfe, and rdrf */
|
| H A D | imx51x.dtsi | 539 /* 68 SIM intr composed of tc, etc, tfe, and rdrf */
|
| /freebsd/sys/net/ |
| H A D | if_vxlan.c | 608 struct vxlan_ftable_entry *fe, *tfe; in vxlan_ftable_flush() local 612 LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) { in vxlan_ftable_flush() 622 struct vxlan_ftable_entry *fe, *tfe; in vxlan_ftable_expire() local 628 LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) { in vxlan_ftable_expire()
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Disassembler/ |
| H A D | AMDGPUDisassembler.cpp | 669 // GFX90A lost TFE, its place is occupied by ACC. in getInstruction() 671 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe); in getInstruction() 962 AMDGPU::OpName::tfe); in convertMIMGInst()
|
| /freebsd/sys/contrib/device-tree/src/arm/nvidia/ |
| H A D | tegra114.dtsi | 279 "tfe", "ppb", "vdma", "frameid";
|
| H A D | tegra20.dtsi | 346 "tfe", "ppb", "vdma", "frameid";
|
| H A D | tegra30.dtsi | 525 "tfe", "ppb", "vdma", "frameid";
|
| /freebsd/sys/dev/axgbe/ |
| H A D | xgbe-dev.c | 504 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); in xgbe_disable_tx_flow_control() 544 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); in xgbe_enable_tx_flow_control()
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/ |
| H A D | AMDGPUBaseInfo.cpp | 330 bool tfe; member 489 return Info ? Info->tfe : false; in getMUBUFTfe()
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
| H A D | AMDGPUAsmParser.cpp | 1082 case ImmTyTFE: OS << "TFE"; break; in printImmTy() 3869 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe); in validateMIMGDataSize() 3900 Modifiers = IsPackedD16 ? "dmask, d16 and tfe" : "dmask and tfe"; in validateMIMGDataSize() 5044 Error(Loc, "TFE modifier has no meaning for store instructions"); in validateTFE() 9588 return parseNamedBit("tfe", Operands, AMDGPUOperand::ImmTyTFE); in parseCustomOperand()
|