1//===-- ARMInstrMVE.td - MVE support for ARM ---------------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the ARM MVE instruction set. 10// 11//===----------------------------------------------------------------------===// 12 13// VPT condition mask 14def vpt_mask : Operand<i32> { 15 let PrintMethod = "printVPTMask"; 16 let ParserMatchClass = it_mask_asmoperand; 17 let EncoderMethod = "getVPTMaskOpValue"; 18 let DecoderMethod = "DecodeVPTMaskOperand"; 19} 20 21// VPT/VCMP restricted predicate for sign invariant types 22def pred_restricted_i_asmoperand : AsmOperandClass { 23 let Name = "CondCodeRestrictedI"; 24 let RenderMethod = "addITCondCodeOperands"; 25 let PredicateMethod = "isITCondCodeRestrictedI"; 26 let ParserMethod = "parseITCondCode"; 27 let DiagnosticString = "condition code for sign-independent integer "# 28 "comparison must be EQ or NE"; 29} 30 31// VPT/VCMP restricted predicate for signed types 32def pred_restricted_s_asmoperand : AsmOperandClass { 33 let Name = "CondCodeRestrictedS"; 34 let RenderMethod = "addITCondCodeOperands"; 35 let PredicateMethod = "isITCondCodeRestrictedS"; 36 let ParserMethod = "parseITCondCode"; 37 let DiagnosticString = "condition code for signed integer "# 38 "comparison must be EQ, NE, LT, GT, LE or GE"; 39} 40 41// VPT/VCMP restricted predicate for unsigned types 42def pred_restricted_u_asmoperand : AsmOperandClass { 43 let Name = "CondCodeRestrictedU"; 44 let RenderMethod = "addITCondCodeOperands"; 45 let PredicateMethod = "isITCondCodeRestrictedU"; 46 let ParserMethod = "parseITCondCode"; 47 let DiagnosticString = "condition code for unsigned integer "# 48 "comparison must be EQ, NE, HS or HI"; 49} 50 51// VPT/VCMP restricted predicate for floating point 52def pred_restricted_fp_asmoperand : AsmOperandClass { 53 let Name = "CondCodeRestrictedFP"; 54 let RenderMethod = "addITCondCodeOperands"; 55 let PredicateMethod = "isITCondCodeRestrictedFP"; 56 let ParserMethod = "parseITCondCode"; 57 let DiagnosticString = "condition code for floating-point "# 58 "comparison must be EQ, NE, LT, GT, LE or GE"; 59} 60 61class VCMPPredicateOperand : Operand<i32>; 62 63def pred_basic_i : VCMPPredicateOperand { 64 let PrintMethod = "printMandatoryRestrictedPredicateOperand"; 65 let ParserMatchClass = pred_restricted_i_asmoperand; 66 let DecoderMethod = "DecodeRestrictedIPredicateOperand"; 67 let EncoderMethod = "getRestrictedCondCodeOpValue"; 68} 69 70def pred_basic_u : VCMPPredicateOperand { 71 let PrintMethod = "printMandatoryRestrictedPredicateOperand"; 72 let ParserMatchClass = pred_restricted_u_asmoperand; 73 let DecoderMethod = "DecodeRestrictedUPredicateOperand"; 74 let EncoderMethod = "getRestrictedCondCodeOpValue"; 75} 76 77def pred_basic_s : VCMPPredicateOperand { 78 let PrintMethod = "printMandatoryRestrictedPredicateOperand"; 79 let ParserMatchClass = pred_restricted_s_asmoperand; 80 let DecoderMethod = "DecodeRestrictedSPredicateOperand"; 81 let EncoderMethod = "getRestrictedCondCodeOpValue"; 82} 83 84def pred_basic_fp : VCMPPredicateOperand { 85 let PrintMethod = "printMandatoryRestrictedPredicateOperand"; 86 let ParserMatchClass = pred_restricted_fp_asmoperand; 87 let DecoderMethod = "DecodeRestrictedFPPredicateOperand"; 88 let EncoderMethod = "getRestrictedCondCodeOpValue"; 89} 90 91// Register list operands for interleaving load/stores 92def VecList2QAsmOperand : AsmOperandClass { 93 let Name = "VecListTwoMQ"; 94 let ParserMethod = "parseVectorList"; 95 let RenderMethod = "addMVEVecListOperands"; 96 let DiagnosticString = "operand must be a list of two consecutive "# 97 "q-registers in range [q0,q7]"; 98} 99 100def VecList2Q : RegisterOperand<QQPR, "printMVEVectorListTwoQ"> { 101 let ParserMatchClass = VecList2QAsmOperand; 102 let PrintMethod = "printMVEVectorList<2>"; 103} 104 105def VecList4QAsmOperand : AsmOperandClass { 106 let Name = "VecListFourMQ"; 107 let ParserMethod = "parseVectorList"; 108 let RenderMethod = "addMVEVecListOperands"; 109 let DiagnosticString = "operand must be a list of four consecutive "# 110 "q-registers in range [q0,q7]"; 111} 112 113def VecList4Q : RegisterOperand<QQQQPR, "printMVEVectorListFourQ"> { 114 let ParserMatchClass = VecList4QAsmOperand; 115 let PrintMethod = "printMVEVectorList<4>"; 116} 117 118// taddrmode_imm7 := reg[r0-r7] +/- (imm7 << shift) 119class TMemImm7ShiftOffsetAsmOperand<int shift> : AsmOperandClass { 120 let Name = "TMemImm7Shift"#shift#"Offset"; 121 let PredicateMethod = "isMemImm7ShiftedOffset<"#shift#",ARM::tGPRRegClassID>"; 122 let RenderMethod = "addMemImmOffsetOperands"; 123} 124 125class taddrmode_imm7<int shift> : MemOperand, 126 ComplexPattern<i32, 2, "SelectTAddrModeImm7<"#shift#">", []> { 127 let ParserMatchClass = TMemImm7ShiftOffsetAsmOperand<shift>; 128 // They are printed the same way as the T2 imm8 version 129 let PrintMethod = "printT2AddrModeImm8Operand<false>"; 130 // This can also be the same as the T2 version. 131 let EncoderMethod = "getT2AddrModeImmOpValue<7,"#shift#">"; 132 let DecoderMethod = "DecodeTAddrModeImm7<"#shift#">"; 133 let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm); 134} 135 136// t2addrmode_imm7 := reg +/- (imm7) 137class MemImm7ShiftOffsetAsmOperand<int shift> : AsmOperandClass { 138 let Name = "MemImm7Shift"#shift#"Offset"; 139 let PredicateMethod = "isMemImm7ShiftedOffset<" # shift # 140 ",ARM::GPRnopcRegClassID>"; 141 let RenderMethod = "addMemImmOffsetOperands"; 142} 143 144def MemImm7Shift0OffsetAsmOperand : MemImm7ShiftOffsetAsmOperand<0>; 145def MemImm7Shift1OffsetAsmOperand : MemImm7ShiftOffsetAsmOperand<1>; 146def MemImm7Shift2OffsetAsmOperand : MemImm7ShiftOffsetAsmOperand<2>; 147class T2AddrMode_Imm7<int shift> : MemOperand, 148 ComplexPattern<i32, 2, "SelectT2AddrModeImm7<"#shift#">", []> { 149 let EncoderMethod = "getT2AddrModeImmOpValue<7,"#shift#">"; 150 let DecoderMethod = "DecodeT2AddrModeImm7<"#shift#", 0>"; 151 let ParserMatchClass = 152 !cast<AsmOperandClass>("MemImm7Shift"#shift#"OffsetAsmOperand"); 153 let MIOperandInfo = (ops GPRnopc:$base, i32imm:$offsimm); 154} 155 156class t2addrmode_imm7<int shift> : T2AddrMode_Imm7<shift> { 157 // They are printed the same way as the imm8 version 158 let PrintMethod = "printT2AddrModeImm8Operand<false>"; 159} 160 161class MemImm7ShiftOffsetWBAsmOperand<int shift> : AsmOperandClass { 162 let Name = "MemImm7Shift"#shift#"OffsetWB"; 163 let PredicateMethod = "isMemImm7ShiftedOffset<" # shift # 164 ",ARM::rGPRRegClassID>"; 165 let RenderMethod = "addMemImmOffsetOperands"; 166} 167 168def MemImm7Shift0OffsetWBAsmOperand : MemImm7ShiftOffsetWBAsmOperand<0>; 169def MemImm7Shift1OffsetWBAsmOperand : MemImm7ShiftOffsetWBAsmOperand<1>; 170def MemImm7Shift2OffsetWBAsmOperand : MemImm7ShiftOffsetWBAsmOperand<2>; 171 172class t2addrmode_imm7_pre<int shift> : T2AddrMode_Imm7<shift> { 173 // They are printed the same way as the imm8 version 174 let PrintMethod = "printT2AddrModeImm8Operand<true>"; 175 let ParserMatchClass = 176 !cast<AsmOperandClass>("MemImm7Shift"#shift#"OffsetWBAsmOperand"); 177 let DecoderMethod = "DecodeT2AddrModeImm7<"#shift#", 1>"; 178 let MIOperandInfo = (ops rGPR:$base, i32imm:$offsim); 179} 180 181class t2am_imm7shiftOffsetAsmOperand<int shift> 182 : AsmOperandClass { let Name = "Imm7Shift"#shift; } 183def t2am_imm7shift0OffsetAsmOperand : t2am_imm7shiftOffsetAsmOperand<0>; 184def t2am_imm7shift1OffsetAsmOperand : t2am_imm7shiftOffsetAsmOperand<1>; 185def t2am_imm7shift2OffsetAsmOperand : t2am_imm7shiftOffsetAsmOperand<2>; 186 187class t2am_imm7_offset<int shift> : MemOperand, 188 ComplexPattern<i32, 1, "SelectT2AddrModeImm7Offset<"#shift#">", 189 [], [SDNPWantRoot]> { 190 // They are printed the same way as the imm8 version 191 let PrintMethod = "printT2AddrModeImm8OffsetOperand"; 192 let ParserMatchClass = 193 !cast<AsmOperandClass>("t2am_imm7shift"#shift#"OffsetAsmOperand"); 194 let EncoderMethod = "getT2ScaledImmOpValue<7,"#shift#">"; 195 let DecoderMethod = "DecodeT2Imm7<"#shift#">"; 196} 197 198// Operands for gather/scatter loads of the form [Rbase, Qoffsets] 199class MemRegRQOffsetAsmOperand<int shift> : AsmOperandClass { 200 let Name = "MemRegRQS"#shift#"Offset"; 201 let PredicateMethod = "isMemRegRQOffset<"#shift#">"; 202 let RenderMethod = "addMemRegRQOffsetOperands"; 203} 204 205def MemRegRQS0OffsetAsmOperand : MemRegRQOffsetAsmOperand<0>; 206def MemRegRQS1OffsetAsmOperand : MemRegRQOffsetAsmOperand<1>; 207def MemRegRQS2OffsetAsmOperand : MemRegRQOffsetAsmOperand<2>; 208def MemRegRQS3OffsetAsmOperand : MemRegRQOffsetAsmOperand<3>; 209 210// mve_addr_rq_shift := reg + vreg{ << UXTW #shift} 211class mve_addr_rq_shift<int shift> : MemOperand { 212 let EncoderMethod = "getMveAddrModeRQOpValue"; 213 let PrintMethod = "printMveAddrModeRQOperand<"#shift#">"; 214 let ParserMatchClass = 215 !cast<AsmOperandClass>("MemRegRQS"#shift#"OffsetAsmOperand"); 216 let DecoderMethod = "DecodeMveAddrModeRQ"; 217 let MIOperandInfo = (ops GPRnopc:$base, MQPR:$offsreg); 218} 219 220class MemRegQOffsetAsmOperand<int shift> : AsmOperandClass { 221 let Name = "MemRegQS"#shift#"Offset"; 222 let PredicateMethod = "isMemRegQOffset<"#shift#">"; 223 let RenderMethod = "addMemImmOffsetOperands"; 224} 225 226def MemRegQS2OffsetAsmOperand : MemRegQOffsetAsmOperand<2>; 227def MemRegQS3OffsetAsmOperand : MemRegQOffsetAsmOperand<3>; 228 229// mve_addr_q_shift := vreg {+ #imm7s2/4} 230class mve_addr_q_shift<int shift> : MemOperand { 231 let EncoderMethod = "getMveAddrModeQOpValue<"#shift#">"; 232 // Can be printed same way as other reg + imm operands 233 let PrintMethod = "printT2AddrModeImm8Operand<false>"; 234 let ParserMatchClass = 235 !cast<AsmOperandClass>("MemRegQS"#shift#"OffsetAsmOperand"); 236 let DecoderMethod = "DecodeMveAddrModeQ<"#shift#">"; 237 let MIOperandInfo = (ops MQPR:$base, i32imm:$imm); 238} 239 240// A family of classes wrapping up information about the vector types 241// used by MVE. 242class MVEVectorVTInfo<ValueType vec, ValueType dblvec, 243 ValueType pred, ValueType dblpred, 244 bits<2> size, string suffixletter, bit unsigned> { 245 // The LLVM ValueType representing the vector, so we can use it in 246 // ISel patterns. 247 ValueType Vec = vec; 248 249 // The LLVM ValueType representing a vector with elements double the size 250 // of those in Vec, so we can use it in ISel patterns. It is up to the 251 // invoker of this class to ensure that this is a correct choice. 252 ValueType DblVec = dblvec; 253 254 // An LLVM ValueType representing a corresponding vector of 255 // predicate bits, for use in ISel patterns that handle an IR 256 // intrinsic describing the predicated form of the instruction. 257 // 258 // Usually, for a vector of N things, this will be vNi1. But for 259 // vectors of 2 values, we make an exception, and use v4i1 instead 260 // of v2i1. Rationale: MVE codegen doesn't support doing all the 261 // auxiliary operations on v2i1 (vector shuffles etc), and also, 262 // there's no MVE compare instruction that will _generate_ v2i1 263 // directly. 264 ValueType Pred = pred; 265 266 // Same as Pred but for DblVec rather than Vec. 267 ValueType DblPred = dblpred; 268 269 // The most common representation of the vector element size in MVE 270 // instruction encodings: a 2-bit value V representing an (8<<V)-bit 271 // vector element. 272 bits<2> Size = size; 273 274 // For vectors explicitly mentioning a signedness of integers: 0 for 275 // signed and 1 for unsigned. For anything else, undefined. 276 bit Unsigned = unsigned; 277 278 // The number of bits in a vector element, in integer form. 279 int LaneBits = !shl(8, Size); 280 281 // The suffix used in assembly language on an instruction operating 282 // on this lane if it only cares about number of bits. 283 string BitsSuffix = !if(!eq(suffixletter, "p"), 284 !if(!eq(unsigned, 0b0), "8", "16"), 285 !cast<string>(LaneBits)); 286 287 // The suffix used on an instruction that mentions the whole type. 288 string Suffix = suffixletter # BitsSuffix; 289 290 // The letter part of the suffix only. 291 string SuffixLetter = suffixletter; 292} 293 294// Integer vector types that don't treat signed and unsigned differently. 295def MVE_v16i8 : MVEVectorVTInfo<v16i8, v8i16, v16i1, v8i1, 0b00, "i", ?>; 296def MVE_v8i16 : MVEVectorVTInfo<v8i16, v4i32, v8i1, v4i1, 0b01, "i", ?>; 297def MVE_v4i32 : MVEVectorVTInfo<v4i32, v2i64, v4i1, v4i1, 0b10, "i", ?>; 298def MVE_v2i64 : MVEVectorVTInfo<v2i64, ?, v4i1, ?, 0b11, "i", ?>; 299 300// Explicitly signed and unsigned integer vectors. They map to the 301// same set of LLVM ValueTypes as above, but are represented 302// differently in assembly and instruction encodings. 303def MVE_v16s8 : MVEVectorVTInfo<v16i8, v8i16, v16i1, v8i1, 0b00, "s", 0b0>; 304def MVE_v8s16 : MVEVectorVTInfo<v8i16, v4i32, v8i1, v4i1, 0b01, "s", 0b0>; 305def MVE_v4s32 : MVEVectorVTInfo<v4i32, v2i64, v4i1, v4i1, 0b10, "s", 0b0>; 306def MVE_v2s64 : MVEVectorVTInfo<v2i64, ?, v4i1, ?, 0b11, "s", 0b0>; 307def MVE_v16u8 : MVEVectorVTInfo<v16i8, v8i16, v16i1, v8i1, 0b00, "u", 0b1>; 308def MVE_v8u16 : MVEVectorVTInfo<v8i16, v4i32, v8i1, v4i1, 0b01, "u", 0b1>; 309def MVE_v4u32 : MVEVectorVTInfo<v4i32, v2i64, v4i1, v4i1, 0b10, "u", 0b1>; 310def MVE_v2u64 : MVEVectorVTInfo<v2i64, ?, v4i1, ?, 0b11, "u", 0b1>; 311 312// FP vector types. 313def MVE_v8f16 : MVEVectorVTInfo<v8f16, v4f32, v8i1, v4i1, 0b01, "f", ?>; 314def MVE_v4f32 : MVEVectorVTInfo<v4f32, v2f64, v4i1, v4i1, 0b10, "f", ?>; 315def MVE_v2f64 : MVEVectorVTInfo<v2f64, ?, v4i1, ?, 0b11, "f", ?>; 316 317// Polynomial vector types. 318def MVE_v16p8 : MVEVectorVTInfo<v16i8, v8i16, v16i1, v8i1, 0b11, "p", 0b0>; 319def MVE_v8p16 : MVEVectorVTInfo<v8i16, v4i32, v8i1, v4i1, 0b11, "p", 0b1>; 320 321multiclass MVE_TwoOpPattern<MVEVectorVTInfo VTI, SDPatternOperator Op, Intrinsic PredInt, 322 dag PredOperands, Instruction Inst, 323 SDPatternOperator IdentityVec = null_frag> { 324 // Unpredicated 325 def : Pat<(VTI.Vec (Op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn))), 326 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 327 328 // Predicated with select 329 if !ne(VTI.Size, 0b11) then { 330 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$mask), 331 (VTI.Vec (Op (VTI.Vec MQPR:$Qm), 332 (VTI.Vec MQPR:$Qn))), 333 (VTI.Vec MQPR:$inactive))), 334 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 335 ARMVCCThen, (VTI.Pred VCCR:$mask), 336 (VTI.Vec MQPR:$inactive)))>; 337 338 // Optionally with the select folded through the op 339 def : Pat<(VTI.Vec (Op (VTI.Vec MQPR:$Qm), 340 (VTI.Vec (vselect (VTI.Pred VCCR:$mask), 341 (VTI.Vec MQPR:$Qn), 342 (VTI.Vec IdentityVec))))), 343 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 344 ARMVCCThen, (VTI.Pred VCCR:$mask), 345 (VTI.Vec MQPR:$Qm)))>; 346 } 347 348 // Predicated with intrinsic 349 def : Pat<(VTI.Vec !con((PredInt (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)), 350 PredOperands, 351 (? (VTI.Pred VCCR:$mask), (VTI.Vec MQPR:$inactive)))), 352 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 353 ARMVCCThen, (VTI.Pred VCCR:$mask), 354 (VTI.Vec MQPR:$inactive)))>; 355} 356 357multiclass MVE_TwoOpPatternDup<MVEVectorVTInfo VTI, SDPatternOperator Op, Intrinsic PredInt, 358 dag PredOperands, Instruction Inst, 359 SDPatternOperator IdentityVec = null_frag> { 360 // Unpredicated 361 def : Pat<(VTI.Vec (Op (VTI.Vec MQPR:$Qm), (VTI.Vec (ARMvdup rGPR:$Rn)))), 362 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), rGPR:$Rn))>; 363 364 // Predicated with select 365 if !ne(VTI.Size, 0b11) then { 366 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$mask), 367 (VTI.Vec (Op (VTI.Vec MQPR:$Qm), 368 (VTI.Vec (ARMvdup rGPR:$Rn)))), 369 (VTI.Vec MQPR:$inactive))), 370 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), rGPR:$Rn, 371 ARMVCCThen, (VTI.Pred VCCR:$mask), 372 (VTI.Vec MQPR:$inactive)))>; 373 374 // Optionally with the select folded through the op 375 def : Pat<(VTI.Vec (Op (VTI.Vec MQPR:$Qm), 376 (VTI.Vec (vselect (VTI.Pred VCCR:$mask), 377 (ARMvdup rGPR:$Rn), 378 (VTI.Vec IdentityVec))))), 379 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), rGPR:$Rn, 380 ARMVCCThen, (VTI.Pred VCCR:$mask), 381 (VTI.Vec MQPR:$Qm)))>; 382 } 383 384 // Predicated with intrinsic 385 def : Pat<(VTI.Vec !con((PredInt (VTI.Vec MQPR:$Qm), (VTI.Vec (ARMvdup rGPR:$Rn))), 386 PredOperands, 387 (? (VTI.Pred VCCR:$mask), (VTI.Vec MQPR:$inactive)))), 388 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), rGPR:$Rn, 389 ARMVCCThen, (VTI.Pred VCCR:$mask), 390 (VTI.Vec MQPR:$inactive)))>; 391} 392 393// --------- Start of base classes for the instructions themselves 394 395class MVE_MI<dag oops, dag iops, InstrItinClass itin, string asm, 396 string ops, string cstr, list<dag> pattern> 397 : Thumb2XI<oops, iops, AddrModeNone, 4, itin, !strconcat(asm, "\t", ops), cstr, 398 pattern>, 399 Requires<[HasMVEInt]> { 400 let D = MVEDomain; 401 let DecoderNamespace = "MVE"; 402} 403 404// MVE_p is used for most predicated instructions, to add the cluster 405// of input operands that provides the VPT suffix (none, T or E) and 406// the input predicate register. 407class MVE_p<dag oops, dag iops, InstrItinClass itin, string iname, 408 string suffix, string ops, vpred_ops vpred, string cstr, 409 list<dag> pattern=[]> 410 : MVE_MI<oops, !con(iops, (ins vpred:$vp)), itin, 411 // If the instruction has a suffix, like vadd.f32, then the 412 // VPT predication suffix goes before the dot, so the full 413 // name has to be "vadd${vp}.f32". 414 !strconcat(iname, "${vp}", 415 !if(!eq(suffix, ""), "", !strconcat(".", suffix))), 416 ops, !strconcat(cstr, vpred.vpred_constraint), pattern> { 417 let Inst{31-29} = 0b111; 418 let Inst{27-26} = 0b11; 419} 420 421class MVE_f<dag oops, dag iops, InstrItinClass itin, string iname, 422 string suffix, string ops, vpred_ops vpred, string cstr, 423 list<dag> pattern=[]> 424 : MVE_p<oops, iops, itin, iname, suffix, ops, vpred, cstr, pattern> { 425 let Predicates = [HasMVEFloat]; 426} 427 428class MVE_MI_with_pred<dag oops, dag iops, InstrItinClass itin, string asm, 429 string ops, string cstr, list<dag> pattern> 430 : Thumb2I<oops, iops, AddrModeNone, 4, itin, asm, !strconcat("\t", ops), cstr, 431 pattern>, 432 Requires<[HasV8_1MMainline, HasMVEInt]> { 433 let D = MVEDomain; 434 let DecoderNamespace = "MVE"; 435} 436 437class MVE_VMOV_lane_base<dag oops, dag iops, InstrItinClass itin, string asm, 438 string suffix, string ops, string cstr, 439 list<dag> pattern> 440 : Thumb2I<oops, iops, AddrModeNone, 4, itin, asm, 441 !if(!eq(suffix, ""), "", "." # suffix) # "\t" # ops, 442 cstr, pattern>, 443 Requires<[HasV8_1MMainline, HasMVEInt]> { 444 let D = MVEDomain; 445 let DecoderNamespace = "MVE"; 446} 447 448class MVE_ScalarShift<string iname, dag oops, dag iops, string asm, string cstr, 449 list<dag> pattern=[]> 450 : MVE_MI_with_pred<oops, iops, NoItinerary, iname, asm, cstr, pattern> { 451 let Inst{31-20} = 0b111010100101; 452 let Inst{8} = 0b1; 453 let validForTailPredication=1; 454} 455 456class MVE_ScalarShiftSingleReg<string iname, dag iops, string asm, string cstr, 457 list<dag> pattern=[]> 458 : MVE_ScalarShift<iname, (outs rGPR:$RdaDest), iops, asm, cstr, pattern> { 459 bits<4> RdaDest; 460 461 let Inst{19-16} = RdaDest{3-0}; 462} 463 464class MVE_ScalarShiftSRegImm<string iname, bits<2> op5_4> 465 : MVE_ScalarShiftSingleReg<iname, (ins rGPR:$RdaSrc, long_shift:$imm), 466 "$RdaSrc, $imm", "$RdaDest = $RdaSrc", 467 [(set rGPR:$RdaDest, 468 (i32 (!cast<Intrinsic>("int_arm_mve_" # iname) 469 (i32 rGPR:$RdaSrc), (i32 imm:$imm))))]> { 470 bits<5> imm; 471 472 let Inst{15} = 0b0; 473 let Inst{14-12} = imm{4-2}; 474 let Inst{11-8} = 0b1111; 475 let Inst{7-6} = imm{1-0}; 476 let Inst{5-4} = op5_4{1-0}; 477 let Inst{3-0} = 0b1111; 478} 479 480def MVE_SQSHL : MVE_ScalarShiftSRegImm<"sqshl", 0b11>; 481def MVE_SRSHR : MVE_ScalarShiftSRegImm<"srshr", 0b10>; 482def MVE_UQSHL : MVE_ScalarShiftSRegImm<"uqshl", 0b00>; 483def MVE_URSHR : MVE_ScalarShiftSRegImm<"urshr", 0b01>; 484 485class MVE_ScalarShiftSRegReg<string iname, bits<2> op5_4> 486 : MVE_ScalarShiftSingleReg<iname, (ins rGPR:$RdaSrc, rGPR:$Rm), 487 "$RdaSrc, $Rm", "$RdaDest = $RdaSrc", 488 [(set rGPR:$RdaDest, 489 (i32 (!cast<Intrinsic>("int_arm_mve_" # iname) 490 (i32 rGPR:$RdaSrc), (i32 rGPR:$Rm))))]> { 491 bits<4> Rm; 492 493 let Inst{15-12} = Rm{3-0}; 494 let Inst{11-8} = 0b1111; 495 let Inst{7-6} = 0b00; 496 let Inst{5-4} = op5_4{1-0}; 497 let Inst{3-0} = 0b1101; 498 499 let Unpredictable{8-6} = 0b111; 500} 501 502def MVE_SQRSHR : MVE_ScalarShiftSRegReg<"sqrshr", 0b10>; 503def MVE_UQRSHL : MVE_ScalarShiftSRegReg<"uqrshl", 0b00>; 504 505class MVE_ScalarShiftDoubleReg<string iname, dag iops, string asm, 506 string cstr, list<dag> pattern=[]> 507 : MVE_ScalarShift<iname, (outs tGPREven:$RdaLo, tGPROdd:$RdaHi), 508 iops, asm, cstr, pattern> { 509 bits<4> RdaLo; 510 bits<4> RdaHi; 511 512 let Inst{19-17} = RdaLo{3-1}; 513 let Inst{11-9} = RdaHi{3-1}; 514 515 let hasSideEffects = 0; 516} 517 518class MVE_ScalarShiftDRegImm<string iname, bits<2> op5_4, bit op16, 519 list<dag> pattern=[]> 520 : MVE_ScalarShiftDoubleReg< 521 iname, (ins tGPREven:$RdaLo_src, tGPROdd:$RdaHi_src, long_shift:$imm), 522 "$RdaLo, $RdaHi, $imm", "$RdaLo = $RdaLo_src,$RdaHi = $RdaHi_src", 523 pattern> { 524 bits<5> imm; 525 526 let Inst{16} = op16; 527 let Inst{15} = 0b0; 528 let Inst{14-12} = imm{4-2}; 529 let Inst{7-6} = imm{1-0}; 530 let Inst{5-4} = op5_4{1-0}; 531 let Inst{3-0} = 0b1111; 532} 533 534class MVE_ScalarShiftDRegRegBase<string iname, dag iops, string asm, 535 bit op5, bit op16, list<dag> pattern=[]> 536 : MVE_ScalarShiftDoubleReg< 537 iname, iops, asm, "@earlyclobber $RdaHi,@earlyclobber $RdaLo," 538 "$RdaLo = $RdaLo_src,$RdaHi = $RdaHi_src", 539 pattern> { 540 bits<4> Rm; 541 542 let Inst{16} = op16; 543 let Inst{15-12} = Rm{3-0}; 544 let Inst{6} = 0b0; 545 let Inst{5} = op5; 546 let Inst{4} = 0b0; 547 let Inst{3-0} = 0b1101; 548 549 // Custom decoder method because of the following overlapping encodings: 550 // ASRL and SQRSHR 551 // LSLL and UQRSHL 552 // SQRSHRL and SQRSHR 553 // UQRSHLL and UQRSHL 554 let DecoderMethod = "DecodeMVEOverlappingLongShift"; 555} 556 557class MVE_ScalarShiftDRegReg<string iname, bit op5, list<dag> pattern=[]> 558 : MVE_ScalarShiftDRegRegBase< 559 iname, (ins tGPREven:$RdaLo_src, tGPROdd:$RdaHi_src, rGPR:$Rm), 560 "$RdaLo, $RdaHi, $Rm", op5, 0b0, pattern> { 561 562 let Inst{7} = 0b0; 563} 564 565class MVE_ScalarShiftDRegRegWithSat<string iname, bit op5, list<dag> pattern=[]> 566 : MVE_ScalarShiftDRegRegBase< 567 iname, (ins tGPREven:$RdaLo_src, tGPROdd:$RdaHi_src, rGPR:$Rm, saturateop:$sat), 568 "$RdaLo, $RdaHi, $sat, $Rm", op5, 0b1, pattern> { 569 bit sat; 570 571 let Inst{7} = sat; 572} 573 574def MVE_ASRLr : MVE_ScalarShiftDRegReg<"asrl", 0b1, [(set tGPREven:$RdaLo, tGPROdd:$RdaHi, 575 (ARMasrl tGPREven:$RdaLo_src, 576 tGPROdd:$RdaHi_src, rGPR:$Rm))]>; 577def MVE_ASRLi : MVE_ScalarShiftDRegImm<"asrl", 0b10, ?, [(set tGPREven:$RdaLo, tGPROdd:$RdaHi, 578 (ARMasrl tGPREven:$RdaLo_src, 579 tGPROdd:$RdaHi_src, (i32 long_shift:$imm)))]>; 580def MVE_LSLLr : MVE_ScalarShiftDRegReg<"lsll", 0b0, [(set tGPREven:$RdaLo, tGPROdd:$RdaHi, 581 (ARMlsll tGPREven:$RdaLo_src, 582 tGPROdd:$RdaHi_src, rGPR:$Rm))]>; 583def MVE_LSLLi : MVE_ScalarShiftDRegImm<"lsll", 0b00, ?, [(set tGPREven:$RdaLo, tGPROdd:$RdaHi, 584 (ARMlsll tGPREven:$RdaLo_src, 585 tGPROdd:$RdaHi_src, (i32 long_shift:$imm)))]>; 586def MVE_LSRL : MVE_ScalarShiftDRegImm<"lsrl", 0b01, ?, [(set tGPREven:$RdaLo, tGPROdd:$RdaHi, 587 (ARMlsrl tGPREven:$RdaLo_src, 588 tGPROdd:$RdaHi_src, (i32 long_shift:$imm)))]>; 589 590def MVE_SQRSHRL : MVE_ScalarShiftDRegRegWithSat<"sqrshrl", 0b1>; 591def MVE_SQSHLL : MVE_ScalarShiftDRegImm<"sqshll", 0b11, 0b1>; 592def MVE_SRSHRL : MVE_ScalarShiftDRegImm<"srshrl", 0b10, 0b1>; 593 594def MVE_UQRSHLL : MVE_ScalarShiftDRegRegWithSat<"uqrshll", 0b0>; 595def MVE_UQSHLL : MVE_ScalarShiftDRegImm<"uqshll", 0b00, 0b1>; 596def MVE_URSHRL : MVE_ScalarShiftDRegImm<"urshrl", 0b01, 0b1>; 597 598// start of mve_rDest instructions 599 600class MVE_rDest<dag oops, dag iops, InstrItinClass itin, 601 string iname, string suffix, 602 string ops, string cstr, list<dag> pattern=[]> 603// Always use vpred_n and not vpred_r: with the output register being 604// a GPR and not a vector register, there can't be any question of 605// what to put in its inactive lanes. 606 : MVE_p<oops, iops, itin, iname, suffix, ops, vpred_n, cstr, pattern> { 607 608 let Inst{25-23} = 0b101; 609 let Inst{11-9} = 0b111; 610 let Inst{4} = 0b0; 611} 612 613class MVE_VABAV<string suffix, bit U, bits<2> size> 614 : MVE_rDest<(outs rGPR:$Rda), (ins rGPR:$Rda_src, MQPR:$Qn, MQPR:$Qm), 615 NoItinerary, "vabav", suffix, "$Rda, $Qn, $Qm", "$Rda = $Rda_src", 616 []> { 617 bits<4> Qm; 618 bits<4> Qn; 619 bits<4> Rda; 620 621 let Inst{28} = U; 622 let Inst{22} = 0b0; 623 let Inst{21-20} = size{1-0}; 624 let Inst{19-17} = Qn{2-0}; 625 let Inst{16} = 0b0; 626 let Inst{15-12} = Rda{3-0}; 627 let Inst{8} = 0b1; 628 let Inst{7} = Qn{3}; 629 let Inst{6} = 0b0; 630 let Inst{5} = Qm{3}; 631 let Inst{3-1} = Qm{2-0}; 632 let Inst{0} = 0b1; 633 let horizontalReduction = 1; 634} 635 636multiclass MVE_VABAV_m<MVEVectorVTInfo VTI> { 637 def "" : MVE_VABAV<VTI.Suffix, VTI.Unsigned, VTI.Size>; 638 defvar Inst = !cast<Instruction>(NAME); 639 640 let Predicates = [HasMVEInt] in { 641 def : Pat<(i32 (int_arm_mve_vabav 642 (i32 VTI.Unsigned), 643 (i32 rGPR:$Rda_src), 644 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), 645 (i32 (Inst (i32 rGPR:$Rda_src), 646 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm)))>; 647 648 def : Pat<(i32 (int_arm_mve_vabav_predicated 649 (i32 VTI.Unsigned), 650 (i32 rGPR:$Rda_src), 651 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 652 (VTI.Pred VCCR:$mask))), 653 (i32 (Inst (i32 rGPR:$Rda_src), 654 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 655 ARMVCCThen, (VTI.Pred VCCR:$mask)))>; 656 } 657} 658 659defm MVE_VABAVs8 : MVE_VABAV_m<MVE_v16s8>; 660defm MVE_VABAVs16 : MVE_VABAV_m<MVE_v8s16>; 661defm MVE_VABAVs32 : MVE_VABAV_m<MVE_v4s32>; 662defm MVE_VABAVu8 : MVE_VABAV_m<MVE_v16u8>; 663defm MVE_VABAVu16 : MVE_VABAV_m<MVE_v8u16>; 664defm MVE_VABAVu32 : MVE_VABAV_m<MVE_v4u32>; 665 666class MVE_VADDV<string iname, string suffix, dag iops, string cstr, 667 bit A, bit U, bits<2> size, list<dag> pattern=[]> 668 : MVE_rDest<(outs tGPREven:$Rda), iops, NoItinerary, 669 iname, suffix, "$Rda, $Qm", cstr, pattern> { 670 bits<3> Qm; 671 bits<4> Rda; 672 673 let Inst{28} = U; 674 let Inst{22-20} = 0b111; 675 let Inst{19-18} = size{1-0}; 676 let Inst{17-16} = 0b01; 677 let Inst{15-13} = Rda{3-1}; 678 let Inst{12} = 0b0; 679 let Inst{8-6} = 0b100; 680 let Inst{5} = A; 681 let Inst{3-1} = Qm{2-0}; 682 let Inst{0} = 0b0; 683 let horizontalReduction = 1; 684 let validForTailPredication = 1; 685} 686 687def SDTVecReduceP : SDTypeProfile<1, 2, [ // VADDLVp 688 SDTCisInt<0>, SDTCisVec<1>, SDTCisVec<2> 689]>; 690def ARMVADDVs : SDNode<"ARMISD::VADDVs", SDTVecReduce>; 691def ARMVADDVu : SDNode<"ARMISD::VADDVu", SDTVecReduce>; 692def ARMVADDVps : SDNode<"ARMISD::VADDVps", SDTVecReduceP>; 693def ARMVADDVpu : SDNode<"ARMISD::VADDVpu", SDTVecReduceP>; 694 695multiclass MVE_VADDV_A<MVEVectorVTInfo VTI> { 696 def acc : MVE_VADDV<"vaddva", VTI.Suffix, 697 (ins tGPREven:$Rda_src, MQPR:$Qm), "$Rda = $Rda_src", 698 0b1, VTI.Unsigned, VTI.Size>; 699 def no_acc : MVE_VADDV<"vaddv", VTI.Suffix, 700 (ins MQPR:$Qm), "", 701 0b0, VTI.Unsigned, VTI.Size>; 702 703 defvar InstA = !cast<Instruction>(NAME # "acc"); 704 defvar InstN = !cast<Instruction>(NAME # "no_acc"); 705 706 let Predicates = [HasMVEInt] in { 707 if VTI.Unsigned then { 708 def : Pat<(i32 (vecreduce_add (VTI.Vec MQPR:$vec))), 709 (i32 (InstN $vec))>; 710 def : Pat<(i32 (vecreduce_add (VTI.Vec (vselect (VTI.Pred VCCR:$pred), 711 (VTI.Vec MQPR:$vec), 712 (VTI.Vec ARMimmAllZerosV))))), 713 (i32 (InstN $vec, ARMVCCThen, $pred))>; 714 def : Pat<(i32 (ARMVADDVu (VTI.Vec MQPR:$vec))), 715 (i32 (InstN $vec))>; 716 def : Pat<(i32 (ARMVADDVpu (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))), 717 (i32 (InstN $vec, ARMVCCThen, $pred))>; 718 def : Pat<(i32 (add (i32 (vecreduce_add (VTI.Vec MQPR:$vec))), 719 (i32 tGPREven:$acc))), 720 (i32 (InstA $acc, $vec))>; 721 def : Pat<(i32 (add (i32 (vecreduce_add (VTI.Vec (vselect (VTI.Pred VCCR:$pred), 722 (VTI.Vec MQPR:$vec), 723 (VTI.Vec ARMimmAllZerosV))))), 724 (i32 tGPREven:$acc))), 725 (i32 (InstA $acc, $vec, ARMVCCThen, $pred))>; 726 def : Pat<(i32 (add (i32 (ARMVADDVu (VTI.Vec MQPR:$vec))), 727 (i32 tGPREven:$acc))), 728 (i32 (InstA $acc, $vec))>; 729 def : Pat<(i32 (add (i32 (ARMVADDVpu (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))), 730 (i32 tGPREven:$acc))), 731 (i32 (InstA $acc, $vec, ARMVCCThen, $pred))>; 732 } else { 733 def : Pat<(i32 (ARMVADDVs (VTI.Vec MQPR:$vec))), 734 (i32 (InstN $vec))>; 735 def : Pat<(i32 (add (i32 (ARMVADDVs (VTI.Vec MQPR:$vec))), 736 (i32 tGPREven:$acc))), 737 (i32 (InstA $acc, $vec))>; 738 def : Pat<(i32 (ARMVADDVps (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))), 739 (i32 (InstN $vec, ARMVCCThen, $pred))>; 740 def : Pat<(i32 (add (i32 (ARMVADDVps (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))), 741 (i32 tGPREven:$acc))), 742 (i32 (InstA $acc, $vec, ARMVCCThen, $pred))>; 743 } 744 745 def : Pat<(i32 (int_arm_mve_addv_predicated (VTI.Vec MQPR:$vec), 746 (i32 VTI.Unsigned), 747 (VTI.Pred VCCR:$pred))), 748 (i32 (InstN $vec, ARMVCCThen, $pred))>; 749 def : Pat<(i32 (add (int_arm_mve_addv_predicated (VTI.Vec MQPR:$vec), 750 (i32 VTI.Unsigned), 751 (VTI.Pred VCCR:$pred)), 752 (i32 tGPREven:$acc))), 753 (i32 (InstA $acc, $vec, ARMVCCThen, $pred))>; 754 } 755} 756 757defm MVE_VADDVs8 : MVE_VADDV_A<MVE_v16s8>; 758defm MVE_VADDVs16 : MVE_VADDV_A<MVE_v8s16>; 759defm MVE_VADDVs32 : MVE_VADDV_A<MVE_v4s32>; 760defm MVE_VADDVu8 : MVE_VADDV_A<MVE_v16u8>; 761defm MVE_VADDVu16 : MVE_VADDV_A<MVE_v8u16>; 762defm MVE_VADDVu32 : MVE_VADDV_A<MVE_v4u32>; 763 764class MVE_VADDLV<string iname, string suffix, dag iops, string cstr, 765 bit A, bit U, list<dag> pattern=[]> 766 : MVE_rDest<(outs tGPREven:$RdaLo, tGPROdd:$RdaHi), iops, NoItinerary, iname, 767 suffix, "$RdaLo, $RdaHi, $Qm", cstr, pattern> { 768 bits<3> Qm; 769 bits<4> RdaLo; 770 bits<4> RdaHi; 771 772 let Inst{28} = U; 773 let Inst{22-20} = RdaHi{3-1}; 774 let Inst{19-18} = 0b10; 775 let Inst{17-16} = 0b01; 776 let Inst{15-13} = RdaLo{3-1}; 777 let Inst{12} = 0b0; 778 let Inst{8-6} = 0b100; 779 let Inst{5} = A; 780 let Inst{3-1} = Qm{2-0}; 781 let Inst{0} = 0b0; 782 let horizontalReduction = 1; 783} 784 785def SDTVecReduceL : SDTypeProfile<2, 1, [ // VADDLV 786 SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2> 787]>; 788def SDTVecReduceLA : SDTypeProfile<2, 3, [ // VADDLVA 789 SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>, SDTCisInt<3>, 790 SDTCisVec<4> 791]>; 792def SDTVecReduceLP : SDTypeProfile<2, 2, [ // VADDLVp 793 SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2>, SDTCisVec<2> 794]>; 795def SDTVecReduceLPA : SDTypeProfile<2, 4, [ // VADDLVAp 796 SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>, SDTCisInt<3>, 797 SDTCisVec<4>, SDTCisVec<5> 798]>; 799 800multiclass MVE_VADDLV_A<MVEVectorVTInfo VTI> { 801 def acc : MVE_VADDLV<"vaddlva", VTI.Suffix, 802 (ins tGPREven:$RdaLo_src, tGPROdd:$RdaHi_src, MQPR:$Qm), 803 "$RdaLo = $RdaLo_src,$RdaHi = $RdaHi_src", 804 0b1, VTI.Unsigned>; 805 def no_acc : MVE_VADDLV<"vaddlv", VTI.Suffix, 806 (ins MQPR:$Qm), "", 807 0b0, VTI.Unsigned>; 808 809 defvar InstA = !cast<Instruction>(NAME # "acc"); 810 defvar InstN = !cast<Instruction>(NAME # "no_acc"); 811 812 defvar letter = VTI.SuffixLetter; 813 defvar ARMVADDLV = SDNode<"ARMISD::VADDLV" # letter, SDTVecReduceL>; 814 defvar ARMVADDLVA = SDNode<"ARMISD::VADDLVA" # letter, SDTVecReduceLA>; 815 defvar ARMVADDLVp = SDNode<"ARMISD::VADDLVp" # letter, SDTVecReduceLP>; 816 defvar ARMVADDLVAp = SDNode<"ARMISD::VADDLVAp" # letter, SDTVecReduceLPA>; 817 818 let Predicates = [HasMVEInt] in { 819 def : Pat<(ARMVADDLV (v4i32 MQPR:$vec)), 820 (InstN (v4i32 MQPR:$vec))>; 821 def : Pat<(ARMVADDLVA tGPREven:$acclo, tGPROdd:$acchi, (v4i32 MQPR:$vec)), 822 (InstA tGPREven:$acclo, tGPROdd:$acchi, (v4i32 MQPR:$vec))>; 823 def : Pat<(ARMVADDLVp (v4i32 MQPR:$vec), (VTI.Pred VCCR:$pred)), 824 (InstN (v4i32 MQPR:$vec), ARMVCCThen, (VTI.Pred VCCR:$pred))>; 825 def : Pat<(ARMVADDLVAp tGPREven:$acclo, tGPROdd:$acchi, (v4i32 MQPR:$vec), 826 (VTI.Pred VCCR:$pred)), 827 (InstA tGPREven:$acclo, tGPROdd:$acchi, (v4i32 MQPR:$vec), 828 ARMVCCThen, (VTI.Pred VCCR:$pred))>; 829 } 830} 831 832defm MVE_VADDLVs32 : MVE_VADDLV_A<MVE_v4s32>; 833defm MVE_VADDLVu32 : MVE_VADDLV_A<MVE_v4u32>; 834 835class MVE_VMINMAXNMV<string iname, string suffix, bit sz, 836 bit bit_17, bit bit_7, list<dag> pattern=[]> 837 : MVE_rDest<(outs rGPR:$RdaDest), (ins rGPR:$RdaSrc, MQPR:$Qm), 838 NoItinerary, iname, suffix, "$RdaSrc, $Qm", 839 "$RdaDest = $RdaSrc", pattern> { 840 bits<3> Qm; 841 bits<4> RdaDest; 842 843 let Inst{28} = sz; 844 let Inst{22-20} = 0b110; 845 let Inst{19-18} = 0b11; 846 let Inst{17} = bit_17; 847 let Inst{16} = 0b0; 848 let Inst{15-12} = RdaDest{3-0}; 849 let Inst{8} = 0b1; 850 let Inst{7} = bit_7; 851 let Inst{6-5} = 0b00; 852 let Inst{3-1} = Qm{2-0}; 853 let Inst{0} = 0b0; 854 let horizontalReduction = 1; 855 856 let Predicates = [HasMVEFloat]; 857 let hasSideEffects = 0; 858} 859 860multiclass MVE_VMINMAXNMV_p<string iname, bit notAbs, bit isMin, 861 MVEVectorVTInfo VTI, string intrBaseName, 862 ValueType Scalar, RegisterClass ScalarReg> { 863 def "": MVE_VMINMAXNMV<iname, VTI.Suffix, VTI.Size{0}, notAbs, isMin>; 864 defvar Inst = !cast<Instruction>(NAME); 865 defvar unpred_intr = !cast<Intrinsic>(intrBaseName); 866 defvar pred_intr = !cast<Intrinsic>(intrBaseName#"_predicated"); 867 868 let Predicates = [HasMVEFloat] in { 869 def : Pat<(Scalar (unpred_intr (Scalar ScalarReg:$prev), 870 (VTI.Vec MQPR:$vec))), 871 (COPY_TO_REGCLASS (Inst (COPY_TO_REGCLASS ScalarReg:$prev, rGPR), 872 (VTI.Vec MQPR:$vec)), 873 ScalarReg)>; 874 def : Pat<(Scalar (pred_intr (Scalar ScalarReg:$prev), 875 (VTI.Vec MQPR:$vec), 876 (VTI.Pred VCCR:$pred))), 877 (COPY_TO_REGCLASS (Inst (COPY_TO_REGCLASS ScalarReg:$prev, rGPR), 878 (VTI.Vec MQPR:$vec), 879 ARMVCCThen, (VTI.Pred VCCR:$pred)), 880 ScalarReg)>; 881 } 882} 883 884multiclass MVE_VMINMAXNMV_fty<string iname, bit notAbs, bit isMin, 885 string intrBase> { 886 defm f32 : MVE_VMINMAXNMV_p<iname, notAbs, isMin, MVE_v4f32, intrBase, 887 f32, SPR>; 888 defm f16 : MVE_VMINMAXNMV_p<iname, notAbs, isMin, MVE_v8f16, intrBase, 889 f16, HPR>; 890} 891 892defm MVE_VMINNMV : MVE_VMINMAXNMV_fty<"vminnmv", 1, 1, "int_arm_mve_minnmv">; 893defm MVE_VMAXNMV : MVE_VMINMAXNMV_fty<"vmaxnmv", 1, 0, "int_arm_mve_maxnmv">; 894defm MVE_VMINNMAV: MVE_VMINMAXNMV_fty<"vminnmav", 0, 1, "int_arm_mve_minnmav">; 895defm MVE_VMAXNMAV: MVE_VMINMAXNMV_fty<"vmaxnmav", 0, 0, "int_arm_mve_maxnmav">; 896 897class MVE_VMINMAXV<string iname, string suffix, bit U, bits<2> size, 898 bit bit_17, bit bit_7, list<dag> pattern=[]> 899 : MVE_rDest<(outs rGPR:$RdaDest), (ins rGPR:$RdaSrc, MQPR:$Qm), NoItinerary, 900 iname, suffix, "$RdaSrc, $Qm", "$RdaDest = $RdaSrc", pattern> { 901 bits<3> Qm; 902 bits<4> RdaDest; 903 904 let Inst{28} = U; 905 let Inst{22-20} = 0b110; 906 let Inst{19-18} = size{1-0}; 907 let Inst{17} = bit_17; 908 let Inst{16} = 0b0; 909 let Inst{15-12} = RdaDest{3-0}; 910 let Inst{8} = 0b1; 911 let Inst{7} = bit_7; 912 let Inst{6-5} = 0b00; 913 let Inst{3-1} = Qm{2-0}; 914 let Inst{0} = 0b0; 915 let horizontalReduction = 1; 916} 917 918multiclass MVE_VMINMAXV_p<string iname, bit notAbs, bit isMin, 919 MVEVectorVTInfo VTI, string intrBaseName> { 920 def "": MVE_VMINMAXV<iname, VTI.Suffix, VTI.Unsigned, VTI.Size, 921 notAbs, isMin>; 922 defvar Inst = !cast<Instruction>(NAME); 923 defvar unpred_intr = !cast<Intrinsic>(intrBaseName); 924 defvar pred_intr = !cast<Intrinsic>(intrBaseName#"_predicated"); 925 defvar base_args = (? (i32 rGPR:$prev), (VTI.Vec MQPR:$vec)); 926 defvar args = !if(notAbs, !con(base_args, (? (i32 VTI.Unsigned))), 927 base_args); 928 929 let Predicates = [HasMVEInt] in { 930 def : Pat<(i32 !con(args, (unpred_intr))), 931 (i32 (Inst (i32 rGPR:$prev), (VTI.Vec MQPR:$vec)))>; 932 def : Pat<(i32 !con(args, (pred_intr (VTI.Pred VCCR:$pred)))), 933 (i32 (Inst (i32 rGPR:$prev), (VTI.Vec MQPR:$vec), 934 ARMVCCThen, (VTI.Pred VCCR:$pred)))>; 935 } 936} 937 938multiclass MVE_VMINMAXV_ty<string iname, bit isMin, string intrBaseName> { 939 defm s8 : MVE_VMINMAXV_p<iname, 1, isMin, MVE_v16s8, intrBaseName>; 940 defm s16: MVE_VMINMAXV_p<iname, 1, isMin, MVE_v8s16, intrBaseName>; 941 defm s32: MVE_VMINMAXV_p<iname, 1, isMin, MVE_v4s32, intrBaseName>; 942 defm u8 : MVE_VMINMAXV_p<iname, 1, isMin, MVE_v16u8, intrBaseName>; 943 defm u16: MVE_VMINMAXV_p<iname, 1, isMin, MVE_v8u16, intrBaseName>; 944 defm u32: MVE_VMINMAXV_p<iname, 1, isMin, MVE_v4u32, intrBaseName>; 945} 946 947def SDTVecReduceR : SDTypeProfile<1, 2, [ // Reduction of an integer and vector into an integer 948 SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2> 949]>; 950def ARMVMINVu : SDNode<"ARMISD::VMINVu", SDTVecReduceR>; 951def ARMVMINVs : SDNode<"ARMISD::VMINVs", SDTVecReduceR>; 952def ARMVMAXVu : SDNode<"ARMISD::VMAXVu", SDTVecReduceR>; 953def ARMVMAXVs : SDNode<"ARMISD::VMAXVs", SDTVecReduceR>; 954 955defm MVE_VMINV : MVE_VMINMAXV_ty<"vminv", 1, "int_arm_mve_minv">; 956defm MVE_VMAXV : MVE_VMINMAXV_ty<"vmaxv", 0, "int_arm_mve_maxv">; 957 958let Predicates = [HasMVEInt] in { 959 def : Pat<(i32 (vecreduce_smax (v16i8 MQPR:$src))), 960 (i32 (MVE_VMAXVs8 (t2MVNi (i32 127)), $src))>; 961 def : Pat<(i32 (vecreduce_smax (v8i16 MQPR:$src))), 962 (i32 (MVE_VMAXVs16 (t2MOVi32imm (i32 -32768)), $src))>; 963 def : Pat<(i32 (vecreduce_smax (v4i32 MQPR:$src))), 964 (i32 (MVE_VMAXVs32 (t2MOVi (i32 -2147483648)), $src))>; 965 def : Pat<(i32 (vecreduce_umax (v16i8 MQPR:$src))), 966 (i32 (MVE_VMAXVu8 (t2MOVi (i32 0)), $src))>; 967 def : Pat<(i32 (vecreduce_umax (v8i16 MQPR:$src))), 968 (i32 (MVE_VMAXVu16 (t2MOVi (i32 0)), $src))>; 969 def : Pat<(i32 (vecreduce_umax (v4i32 MQPR:$src))), 970 (i32 (MVE_VMAXVu32 (t2MOVi (i32 0)), $src))>; 971 972 def : Pat<(i32 (vecreduce_smin (v16i8 MQPR:$src))), 973 (i32 (MVE_VMINVs8 (t2MOVi (i32 127)), $src))>; 974 def : Pat<(i32 (vecreduce_smin (v8i16 MQPR:$src))), 975 (i32 (MVE_VMINVs16 (t2MOVi16 (i32 32767)), $src))>; 976 def : Pat<(i32 (vecreduce_smin (v4i32 MQPR:$src))), 977 (i32 (MVE_VMINVs32 (t2MVNi (i32 -2147483648)), $src))>; 978 def : Pat<(i32 (vecreduce_umin (v16i8 MQPR:$src))), 979 (i32 (MVE_VMINVu8 (t2MOVi (i32 255)), $src))>; 980 def : Pat<(i32 (vecreduce_umin (v8i16 MQPR:$src))), 981 (i32 (MVE_VMINVu16 (t2MOVi16 (i32 65535)), $src))>; 982 def : Pat<(i32 (vecreduce_umin (v4i32 MQPR:$src))), 983 (i32 (MVE_VMINVu32 (t2MOVi (i32 4294967295)), $src))>; 984 985 def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v16i8 MQPR:$src))), 986 (i32 (MVE_VMINVu8 $x, $src))>; 987 def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v8i16 MQPR:$src))), 988 (i32 (MVE_VMINVu16 $x, $src))>; 989 def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v4i32 MQPR:$src))), 990 (i32 (MVE_VMINVu32 $x, $src))>; 991 def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v16i8 MQPR:$src))), 992 (i32 (MVE_VMINVs8 $x, $src))>; 993 def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v8i16 MQPR:$src))), 994 (i32 (MVE_VMINVs16 $x, $src))>; 995 def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v4i32 MQPR:$src))), 996 (i32 (MVE_VMINVs32 $x, $src))>; 997 998 def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v16i8 MQPR:$src))), 999 (i32 (MVE_VMAXVu8 $x, $src))>; 1000 def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v8i16 MQPR:$src))), 1001 (i32 (MVE_VMAXVu16 $x, $src))>; 1002 def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v4i32 MQPR:$src))), 1003 (i32 (MVE_VMAXVu32 $x, $src))>; 1004 def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v16i8 MQPR:$src))), 1005 (i32 (MVE_VMAXVs8 $x, $src))>; 1006 def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v8i16 MQPR:$src))), 1007 (i32 (MVE_VMAXVs16 $x, $src))>; 1008 def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v4i32 MQPR:$src))), 1009 (i32 (MVE_VMAXVs32 $x, $src))>; 1010 1011} 1012 1013multiclass MVE_VMINMAXAV_ty<string iname, bit isMin, string intrBaseName> { 1014 defm s8 : MVE_VMINMAXV_p<iname, 0, isMin, MVE_v16s8, intrBaseName>; 1015 defm s16: MVE_VMINMAXV_p<iname, 0, isMin, MVE_v8s16, intrBaseName>; 1016 defm s32: MVE_VMINMAXV_p<iname, 0, isMin, MVE_v4s32, intrBaseName>; 1017} 1018 1019defm MVE_VMINAV : MVE_VMINMAXAV_ty<"vminav", 1, "int_arm_mve_minav">; 1020defm MVE_VMAXAV : MVE_VMINMAXAV_ty<"vmaxav", 0, "int_arm_mve_maxav">; 1021 1022class MVE_VMLAMLSDAV<string iname, string suffix, dag iops, string cstr, 1023 bit sz, bit bit_28, bit A, bit X, bit bit_8, bit bit_0> 1024 : MVE_rDest<(outs tGPREven:$RdaDest), iops, NoItinerary, iname, suffix, 1025 "$RdaDest, $Qn, $Qm", cstr, []> { 1026 bits<4> RdaDest; 1027 bits<3> Qm; 1028 bits<3> Qn; 1029 1030 let Inst{28} = bit_28; 1031 let Inst{22-20} = 0b111; 1032 let Inst{19-17} = Qn{2-0}; 1033 let Inst{16} = sz; 1034 let Inst{15-13} = RdaDest{3-1}; 1035 let Inst{12} = X; 1036 let Inst{8} = bit_8; 1037 let Inst{7-6} = 0b00; 1038 let Inst{5} = A; 1039 let Inst{3-1} = Qm{2-0}; 1040 let Inst{0} = bit_0; 1041 let horizontalReduction = 1; 1042 // Allow tail predication for non-exchanging versions. As this is also a 1043 // horizontalReduction, ARMLowOverheadLoops will also have to check that 1044 // the vector operands contain zeros in their false lanes for the instruction 1045 // to be properly valid. 1046 let validForTailPredication = !eq(X, 0); 1047} 1048 1049multiclass MVE_VMLAMLSDAV_A<string iname, string x, MVEVectorVTInfo VTI, 1050 bit sz, bit bit_28, bit X, bit bit_8, bit bit_0> { 1051 def ""#x#VTI.Suffix : MVE_VMLAMLSDAV<iname # x, VTI.Suffix, 1052 (ins MQPR:$Qn, MQPR:$Qm), "", 1053 sz, bit_28, 0b0, X, bit_8, bit_0>; 1054 def "a"#x#VTI.Suffix : MVE_VMLAMLSDAV<iname # "a" # x, VTI.Suffix, 1055 (ins tGPREven:$RdaSrc, MQPR:$Qn, MQPR:$Qm), 1056 "$RdaDest = $RdaSrc", 1057 sz, bit_28, 0b1, X, bit_8, bit_0>; 1058 let Predicates = [HasMVEInt] in { 1059 def : Pat<(i32 (int_arm_mve_vmldava 1060 (i32 VTI.Unsigned), 1061 (i32 bit_0) /* subtract */, 1062 (i32 X) /* exchange */, 1063 (i32 0) /* accumulator */, 1064 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), 1065 (i32 (!cast<Instruction>(NAME # x # VTI.Suffix) 1066 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm)))>; 1067 1068 def : Pat<(i32 (int_arm_mve_vmldava_predicated 1069 (i32 VTI.Unsigned), 1070 (i32 bit_0) /* subtract */, 1071 (i32 X) /* exchange */, 1072 (i32 0) /* accumulator */, 1073 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 1074 (VTI.Pred VCCR:$mask))), 1075 (i32 (!cast<Instruction>(NAME # x # VTI.Suffix) 1076 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 1077 ARMVCCThen, (VTI.Pred VCCR:$mask)))>; 1078 1079 def : Pat<(i32 (int_arm_mve_vmldava 1080 (i32 VTI.Unsigned), 1081 (i32 bit_0) /* subtract */, 1082 (i32 X) /* exchange */, 1083 (i32 tGPREven:$RdaSrc), 1084 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), 1085 (i32 (!cast<Instruction>(NAME # "a" # x # VTI.Suffix) 1086 (i32 tGPREven:$RdaSrc), 1087 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm)))>; 1088 1089 def : Pat<(i32 (int_arm_mve_vmldava_predicated 1090 (i32 VTI.Unsigned), 1091 (i32 bit_0) /* subtract */, 1092 (i32 X) /* exchange */, 1093 (i32 tGPREven:$RdaSrc), 1094 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 1095 (VTI.Pred VCCR:$mask))), 1096 (i32 (!cast<Instruction>(NAME # "a" # x # VTI.Suffix) 1097 (i32 tGPREven:$RdaSrc), 1098 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 1099 ARMVCCThen, (VTI.Pred VCCR:$mask)))>; 1100 } 1101} 1102 1103multiclass MVE_VMLAMLSDAV_AX<string iname, MVEVectorVTInfo VTI, bit sz, 1104 bit bit_28, bit bit_8, bit bit_0> { 1105 defm "" : MVE_VMLAMLSDAV_A<iname, "", VTI, sz, bit_28, 1106 0b0, bit_8, bit_0>; 1107 defm "" : MVE_VMLAMLSDAV_A<iname, "x", VTI, sz, bit_28, 1108 0b1, bit_8, bit_0>; 1109} 1110 1111multiclass MVE_VMLADAV_multi<MVEVectorVTInfo SVTI, MVEVectorVTInfo UVTI, 1112 bit sz, bit bit_8> { 1113 defm "" : MVE_VMLAMLSDAV_AX<"vmladav", SVTI, 1114 sz, 0b0, bit_8, 0b0>; 1115 defm "" : MVE_VMLAMLSDAV_A<"vmladav", "", UVTI, 1116 sz, 0b1, 0b0, bit_8, 0b0>; 1117} 1118 1119multiclass MVE_VMLSDAV_multi<MVEVectorVTInfo VTI, bit sz, bit bit_28> { 1120 defm "" : MVE_VMLAMLSDAV_AX<"vmlsdav", VTI, 1121 sz, bit_28, 0b0, 0b1>; 1122} 1123 1124defm MVE_VMLADAV : MVE_VMLADAV_multi<MVE_v16s8, MVE_v16u8, 0b0, 0b1>; 1125defm MVE_VMLADAV : MVE_VMLADAV_multi<MVE_v8s16, MVE_v8u16, 0b0, 0b0>; 1126defm MVE_VMLADAV : MVE_VMLADAV_multi<MVE_v4s32, MVE_v4u32, 0b1, 0b0>; 1127 1128defm MVE_VMLSDAV : MVE_VMLSDAV_multi<MVE_v16s8, 0b0, 0b1>; 1129defm MVE_VMLSDAV : MVE_VMLSDAV_multi<MVE_v8s16, 0b0, 0b0>; 1130defm MVE_VMLSDAV : MVE_VMLSDAV_multi<MVE_v4s32, 0b1, 0b0>; 1131 1132def SDTVecReduce2 : SDTypeProfile<1, 2, [ // VMLAV 1133 SDTCisInt<0>, SDTCisVec<1>, SDTCisVec<2> 1134]>; 1135def SDTVecReduce2L : SDTypeProfile<2, 2, [ // VMLALV 1136 SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2>, SDTCisVec<3> 1137]>; 1138def SDTVecReduce2LA : SDTypeProfile<2, 4, [ // VMLALVA 1139 SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>, SDTCisInt<3>, 1140 SDTCisVec<4>, SDTCisVec<5> 1141]>; 1142def SDTVecReduce2P : SDTypeProfile<1, 3, [ // VMLAV 1143 SDTCisInt<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3> 1144]>; 1145def SDTVecReduce2LP : SDTypeProfile<2, 3, [ // VMLALV 1146 SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2>, SDTCisVec<3>, SDTCisVec<4> 1147]>; 1148def SDTVecReduce2LAP : SDTypeProfile<2, 5, [ // VMLALVA 1149 SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>, SDTCisInt<3>, 1150 SDTCisVec<4>, SDTCisVec<5>, SDTCisVec<6> 1151]>; 1152def ARMVMLAVs : SDNode<"ARMISD::VMLAVs", SDTVecReduce2>; 1153def ARMVMLAVu : SDNode<"ARMISD::VMLAVu", SDTVecReduce2>; 1154def ARMVMLALVs : SDNode<"ARMISD::VMLALVs", SDTVecReduce2L>; 1155def ARMVMLALVu : SDNode<"ARMISD::VMLALVu", SDTVecReduce2L>; 1156def ARMVMLALVAs : SDNode<"ARMISD::VMLALVAs", SDTVecReduce2LA>; 1157def ARMVMLALVAu : SDNode<"ARMISD::VMLALVAu", SDTVecReduce2LA>; 1158def ARMVMLAVps : SDNode<"ARMISD::VMLAVps", SDTVecReduce2P>; 1159def ARMVMLAVpu : SDNode<"ARMISD::VMLAVpu", SDTVecReduce2P>; 1160def ARMVMLALVps : SDNode<"ARMISD::VMLALVps", SDTVecReduce2LP>; 1161def ARMVMLALVpu : SDNode<"ARMISD::VMLALVpu", SDTVecReduce2LP>; 1162def ARMVMLALVAps : SDNode<"ARMISD::VMLALVAps", SDTVecReduce2LAP>; 1163def ARMVMLALVApu : SDNode<"ARMISD::VMLALVApu", SDTVecReduce2LAP>; 1164 1165let Predicates = [HasMVEInt] in { 1166 def : Pat<(i32 (vecreduce_add (mul (v4i32 MQPR:$src1), (v4i32 MQPR:$src2)))), 1167 (i32 (MVE_VMLADAVu32 $src1, $src2))>; 1168 def : Pat<(i32 (vecreduce_add (mul (v8i16 MQPR:$src1), (v8i16 MQPR:$src2)))), 1169 (i32 (MVE_VMLADAVu16 $src1, $src2))>; 1170 def : Pat<(i32 (ARMVMLAVs (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))), 1171 (i32 (MVE_VMLADAVs16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; 1172 def : Pat<(i32 (ARMVMLAVu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))), 1173 (i32 (MVE_VMLADAVu16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; 1174 def : Pat<(i32 (vecreduce_add (mul (v16i8 MQPR:$src1), (v16i8 MQPR:$src2)))), 1175 (i32 (MVE_VMLADAVu8 $src1, $src2))>; 1176 def : Pat<(i32 (ARMVMLAVs (v16i8 MQPR:$val1), (v16i8 MQPR:$val2))), 1177 (i32 (MVE_VMLADAVs8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; 1178 def : Pat<(i32 (ARMVMLAVu (v16i8 MQPR:$val1), (v16i8 MQPR:$val2))), 1179 (i32 (MVE_VMLADAVu8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; 1180 1181 def : Pat<(i32 (add (i32 (vecreduce_add (mul (v4i32 MQPR:$src1), (v4i32 MQPR:$src2)))), 1182 (i32 tGPREven:$src3))), 1183 (i32 (MVE_VMLADAVau32 $src3, $src1, $src2))>; 1184 def : Pat<(i32 (add (i32 (vecreduce_add (mul (v8i16 MQPR:$src1), (v8i16 MQPR:$src2)))), 1185 (i32 tGPREven:$src3))), 1186 (i32 (MVE_VMLADAVau16 $src3, $src1, $src2))>; 1187 def : Pat<(i32 (add (ARMVMLAVs (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), tGPREven:$Rd)), 1188 (i32 (MVE_VMLADAVas16 tGPREven:$Rd, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; 1189 def : Pat<(i32 (add (ARMVMLAVu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), tGPREven:$Rd)), 1190 (i32 (MVE_VMLADAVau16 tGPREven:$Rd, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; 1191 def : Pat<(i32 (add (i32 (vecreduce_add (mul (v16i8 MQPR:$src1), (v16i8 MQPR:$src2)))), 1192 (i32 tGPREven:$src3))), 1193 (i32 (MVE_VMLADAVau8 $src3, $src1, $src2))>; 1194 def : Pat<(i32 (add (ARMVMLAVs (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)), tGPREven:$Rd)), 1195 (i32 (MVE_VMLADAVas8 tGPREven:$Rd, (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; 1196 def : Pat<(i32 (add (ARMVMLAVu (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)), tGPREven:$Rd)), 1197 (i32 (MVE_VMLADAVau8 tGPREven:$Rd, (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; 1198 1199 // Predicated 1200 def : Pat<(i32 (vecreduce_add (vselect (v4i1 VCCR:$pred), 1201 (mul (v4i32 MQPR:$src1), (v4i32 MQPR:$src2)), 1202 (v4i32 ARMimmAllZerosV)))), 1203 (i32 (MVE_VMLADAVu32 $src1, $src2, ARMVCCThen, $pred))>; 1204 def : Pat<(i32 (vecreduce_add (vselect (v8i1 VCCR:$pred), 1205 (mul (v8i16 MQPR:$src1), (v8i16 MQPR:$src2)), 1206 (v8i16 ARMimmAllZerosV)))), 1207 (i32 (MVE_VMLADAVu16 $src1, $src2, ARMVCCThen, $pred))>; 1208 def : Pat<(i32 (ARMVMLAVps (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred))), 1209 (i32 (MVE_VMLADAVs16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred))>; 1210 def : Pat<(i32 (ARMVMLAVpu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred))), 1211 (i32 (MVE_VMLADAVu16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred))>; 1212 def : Pat<(i32 (vecreduce_add (vselect (v16i1 VCCR:$pred), 1213 (mul (v16i8 MQPR:$src1), (v16i8 MQPR:$src2)), 1214 (v16i8 ARMimmAllZerosV)))), 1215 (i32 (MVE_VMLADAVu8 $src1, $src2, ARMVCCThen, $pred))>; 1216 def : Pat<(i32 (ARMVMLAVps (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), (v16i1 VCCR:$pred))), 1217 (i32 (MVE_VMLADAVs8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), ARMVCCThen, $pred))>; 1218 def : Pat<(i32 (ARMVMLAVpu (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), (v16i1 VCCR:$pred))), 1219 (i32 (MVE_VMLADAVu8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), ARMVCCThen, $pred))>; 1220 1221 def : Pat<(i32 (add (i32 (vecreduce_add (vselect (v4i1 VCCR:$pred), 1222 (mul (v4i32 MQPR:$src1), (v4i32 MQPR:$src2)), 1223 (v4i32 ARMimmAllZerosV)))), 1224 (i32 tGPREven:$src3))), 1225 (i32 (MVE_VMLADAVau32 $src3, $src1, $src2, ARMVCCThen, $pred))>; 1226 def : Pat<(i32 (add (i32 (vecreduce_add (vselect (v8i1 VCCR:$pred), 1227 (mul (v8i16 MQPR:$src1), (v8i16 MQPR:$src2)), 1228 (v8i16 ARMimmAllZerosV)))), 1229 (i32 tGPREven:$src3))), 1230 (i32 (MVE_VMLADAVau16 $src3, $src1, $src2, ARMVCCThen, $pred))>; 1231 def : Pat<(i32 (add (ARMVMLAVps (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred)), tGPREven:$Rd)), 1232 (i32 (MVE_VMLADAVas16 tGPREven:$Rd, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred))>; 1233 def : Pat<(i32 (add (ARMVMLAVpu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred)), tGPREven:$Rd)), 1234 (i32 (MVE_VMLADAVau16 tGPREven:$Rd, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred))>; 1235 def : Pat<(i32 (add (i32 (vecreduce_add (vselect (v16i1 VCCR:$pred), 1236 (mul (v16i8 MQPR:$src1), (v16i8 MQPR:$src2)), 1237 (v16i8 ARMimmAllZerosV)))), 1238 (i32 tGPREven:$src3))), 1239 (i32 (MVE_VMLADAVau8 $src3, $src1, $src2, ARMVCCThen, $pred))>; 1240 def : Pat<(i32 (add (ARMVMLAVps (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), (v16i1 VCCR:$pred)), tGPREven:$Rd)), 1241 (i32 (MVE_VMLADAVas8 tGPREven:$Rd, (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), ARMVCCThen, $pred))>; 1242 def : Pat<(i32 (add (ARMVMLAVpu (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), (v16i1 VCCR:$pred)), tGPREven:$Rd)), 1243 (i32 (MVE_VMLADAVau8 tGPREven:$Rd, (v16i8 MQPR:$val1), (v16i8 MQPR:$val2), ARMVCCThen, $pred))>; 1244} 1245 1246// vmlav aliases vmladav 1247foreach acc = ["", "a"] in { 1248 foreach suffix = ["s8", "s16", "s32", "u8", "u16", "u32"] in { 1249 def : MVEInstAlias<"vmlav"#acc#"${vp}."#suffix#"\t$RdaDest, $Qn, $Qm", 1250 (!cast<Instruction>("MVE_VMLADAV"#acc#suffix) 1251 tGPREven:$RdaDest, MQPR:$Qn, MQPR:$Qm, vpred_n:$vp)>; 1252 } 1253} 1254 1255// Base class for VMLALDAV and VMLSLDAV, VRMLALDAVH, VRMLSLDAVH 1256class MVE_VMLALDAVBase<string iname, string suffix, dag iops, string cstr, 1257 bit sz, bit bit_28, bit A, bit X, bit bit_8, bit bit_0, 1258 list<dag> pattern=[]> 1259 : MVE_rDest<(outs tGPREven:$RdaLoDest, tGPROdd:$RdaHiDest), iops, NoItinerary, 1260 iname, suffix, "$RdaLoDest, $RdaHiDest, $Qn, $Qm", cstr, pattern> { 1261 bits<4> RdaLoDest; 1262 bits<4> RdaHiDest; 1263 bits<3> Qm; 1264 bits<3> Qn; 1265 1266 let Inst{28} = bit_28; 1267 let Inst{22-20} = RdaHiDest{3-1}; 1268 let Inst{19-17} = Qn{2-0}; 1269 let Inst{16} = sz; 1270 let Inst{15-13} = RdaLoDest{3-1}; 1271 let Inst{12} = X; 1272 let Inst{8} = bit_8; 1273 let Inst{7-6} = 0b00; 1274 let Inst{5} = A; 1275 let Inst{3-1} = Qm{2-0}; 1276 let Inst{0} = bit_0; 1277 let horizontalReduction = 1; 1278 // Allow tail predication for non-exchanging versions. As this is also a 1279 // horizontalReduction, ARMLowOverheadLoops will also have to check that 1280 // the vector operands contain zeros in their false lanes for the instruction 1281 // to be properly valid. 1282 let validForTailPredication = !eq(X, 0); 1283 1284 let hasSideEffects = 0; 1285} 1286 1287multiclass MVE_VMLALDAVBase_A<string iname, string x, string suffix, 1288 bit sz, bit bit_28, bit X, bit bit_8, bit bit_0, 1289 list<dag> pattern=[]> { 1290 def ""#x#suffix : MVE_VMLALDAVBase< 1291 iname # x, suffix, (ins MQPR:$Qn, MQPR:$Qm), "", 1292 sz, bit_28, 0b0, X, bit_8, bit_0, pattern>; 1293 def "a"#x#suffix : MVE_VMLALDAVBase< 1294 iname # "a" # x, suffix, 1295 (ins tGPREven:$RdaLoSrc, tGPROdd:$RdaHiSrc, MQPR:$Qn, MQPR:$Qm), 1296 "$RdaLoDest = $RdaLoSrc,$RdaHiDest = $RdaHiSrc", 1297 sz, bit_28, 0b1, X, bit_8, bit_0, pattern>; 1298} 1299 1300 1301multiclass MVE_VMLALDAVBase_AX<string iname, string suffix, bit sz, bit bit_28, 1302 bit bit_8, bit bit_0, list<dag> pattern=[]> { 1303 defm "" : MVE_VMLALDAVBase_A<iname, "", suffix, sz, 1304 bit_28, 0b0, bit_8, bit_0, pattern>; 1305 defm "" : MVE_VMLALDAVBase_A<iname, "x", suffix, sz, 1306 bit_28, 0b1, bit_8, bit_0, pattern>; 1307} 1308 1309multiclass MVE_VRMLALDAVH_multi<string suffix, list<dag> pattern=[]> { 1310 defm "" : MVE_VMLALDAVBase_AX<"vrmlaldavh", "s"#suffix, 1311 0b0, 0b0, 0b1, 0b0, pattern>; 1312 defm "" : MVE_VMLALDAVBase_A<"vrmlaldavh", "", "u"#suffix, 1313 0b0, 0b1, 0b0, 0b1, 0b0, pattern>; 1314} 1315 1316defm MVE_VRMLALDAVH : MVE_VRMLALDAVH_multi<"32">; 1317 1318// vrmlalvh aliases for vrmlaldavh 1319def : MVEInstAlias<"vrmlalvh${vp}.s32\t$RdaLo, $RdaHi, $Qn, $Qm", 1320 (MVE_VRMLALDAVHs32 1321 tGPREven:$RdaLo, tGPROdd:$RdaHi, 1322 MQPR:$Qn, MQPR:$Qm, vpred_n:$vp)>; 1323def : MVEInstAlias<"vrmlalvha${vp}.s32\t$RdaLo, $RdaHi, $Qn, $Qm", 1324 (MVE_VRMLALDAVHas32 1325 tGPREven:$RdaLo, tGPROdd:$RdaHi, 1326 MQPR:$Qn, MQPR:$Qm, vpred_n:$vp)>; 1327def : MVEInstAlias<"vrmlalvh${vp}.u32\t$RdaLo, $RdaHi, $Qn, $Qm", 1328 (MVE_VRMLALDAVHu32 1329 tGPREven:$RdaLo, tGPROdd:$RdaHi, 1330 MQPR:$Qn, MQPR:$Qm, vpred_n:$vp)>; 1331def : MVEInstAlias<"vrmlalvha${vp}.u32\t$RdaLo, $RdaHi, $Qn, $Qm", 1332 (MVE_VRMLALDAVHau32 1333 tGPREven:$RdaLo, tGPROdd:$RdaHi, 1334 MQPR:$Qn, MQPR:$Qm, vpred_n:$vp)>; 1335 1336multiclass MVE_VMLALDAV_multi<string suffix, bit sz, list<dag> pattern=[]> { 1337 defm "" : MVE_VMLALDAVBase_AX<"vmlaldav", "s"#suffix, sz, 0b0, 0b0, 0b0, pattern>; 1338 defm "" : MVE_VMLALDAVBase_A<"vmlaldav", "", "u"#suffix, 1339 sz, 0b1, 0b0, 0b0, 0b0, pattern>; 1340} 1341 1342defm MVE_VMLALDAV : MVE_VMLALDAV_multi<"16", 0b0>; 1343defm MVE_VMLALDAV : MVE_VMLALDAV_multi<"32", 0b1>; 1344 1345let Predicates = [HasMVEInt] in { 1346 def : Pat<(ARMVMLALVs (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), 1347 (MVE_VMLALDAVs32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; 1348 def : Pat<(ARMVMLALVu (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), 1349 (MVE_VMLALDAVu32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; 1350 def : Pat<(ARMVMLALVs (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), 1351 (MVE_VMLALDAVs16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; 1352 def : Pat<(ARMVMLALVu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), 1353 (MVE_VMLALDAVu16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; 1354 1355 def : Pat<(ARMVMLALVAs tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), 1356 (MVE_VMLALDAVas32 tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; 1357 def : Pat<(ARMVMLALVAu tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)), 1358 (MVE_VMLALDAVau32 tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))>; 1359 def : Pat<(ARMVMLALVAs tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), 1360 (MVE_VMLALDAVas16 tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; 1361 def : Pat<(ARMVMLALVAu tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)), 1362 (MVE_VMLALDAVau16 tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))>; 1363 1364 // Predicated 1365 def : Pat<(ARMVMLALVps (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), (v4i1 VCCR:$pred)), 1366 (MVE_VMLALDAVs32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), ARMVCCThen, $pred)>; 1367 def : Pat<(ARMVMLALVpu (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), (v4i1 VCCR:$pred)), 1368 (MVE_VMLALDAVu32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), ARMVCCThen, $pred)>; 1369 def : Pat<(ARMVMLALVps (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred)), 1370 (MVE_VMLALDAVs16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred)>; 1371 def : Pat<(ARMVMLALVpu (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred)), 1372 (MVE_VMLALDAVu16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred)>; 1373 1374 def : Pat<(ARMVMLALVAps tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), (v4i1 VCCR:$pred)), 1375 (MVE_VMLALDAVas32 tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), ARMVCCThen, $pred)>; 1376 def : Pat<(ARMVMLALVApu tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), (v4i1 VCCR:$pred)), 1377 (MVE_VMLALDAVau32 tGPREven:$Rda, tGPROdd:$Rdb, (v4i32 MQPR:$val1), (v4i32 MQPR:$val2), ARMVCCThen, $pred)>; 1378 def : Pat<(ARMVMLALVAps tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred)), 1379 (MVE_VMLALDAVas16 tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred)>; 1380 def : Pat<(ARMVMLALVApu tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), (v8i1 VCCR:$pred)), 1381 (MVE_VMLALDAVau16 tGPREven:$Rda, tGPROdd:$Rdb, (v8i16 MQPR:$val1), (v8i16 MQPR:$val2), ARMVCCThen, $pred)>; 1382} 1383 1384// vmlalv aliases vmlaldav 1385foreach acc = ["", "a"] in { 1386 foreach suffix = ["s16", "s32", "u16", "u32"] in { 1387 def : MVEInstAlias<"vmlalv" # acc # "${vp}." # suffix # 1388 "\t$RdaLoDest, $RdaHiDest, $Qn, $Qm", 1389 (!cast<Instruction>("MVE_VMLALDAV"#acc#suffix) 1390 tGPREven:$RdaLoDest, tGPROdd:$RdaHiDest, 1391 MQPR:$Qn, MQPR:$Qm, vpred_n:$vp)>; 1392 } 1393} 1394 1395multiclass MVE_VMLSLDAV_multi<string iname, string suffix, bit sz, 1396 bit bit_28, list<dag> pattern=[]> { 1397 defm "" : MVE_VMLALDAVBase_AX<iname, suffix, sz, bit_28, 0b0, 0b1, pattern>; 1398} 1399 1400defm MVE_VMLSLDAV : MVE_VMLSLDAV_multi<"vmlsldav", "s16", 0b0, 0b0>; 1401defm MVE_VMLSLDAV : MVE_VMLSLDAV_multi<"vmlsldav", "s32", 0b1, 0b0>; 1402defm MVE_VRMLSLDAVH : MVE_VMLSLDAV_multi<"vrmlsldavh", "s32", 0b0, 0b1>; 1403 1404// end of mve_rDest instructions 1405 1406// start of mve_comp instructions 1407 1408class MVE_comp<InstrItinClass itin, string iname, string suffix, 1409 string cstr, list<dag> pattern=[]> 1410 : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), itin, iname, suffix, 1411 "$Qd, $Qn, $Qm", vpred_r, cstr, pattern> { 1412 bits<4> Qd; 1413 bits<4> Qn; 1414 bits<4> Qm; 1415 1416 let Inst{22} = Qd{3}; 1417 let Inst{19-17} = Qn{2-0}; 1418 let Inst{16} = 0b0; 1419 let Inst{15-13} = Qd{2-0}; 1420 let Inst{12} = 0b0; 1421 let Inst{10-9} = 0b11; 1422 let Inst{7} = Qn{3}; 1423 let Inst{5} = Qm{3}; 1424 let Inst{3-1} = Qm{2-0}; 1425 let Inst{0} = 0b0; 1426} 1427 1428class MVE_VMINMAXNM<string iname, string suffix, bit sz, bit bit_21, 1429 list<dag> pattern=[]> 1430 : MVE_comp<NoItinerary, iname, suffix, "", pattern> { 1431 1432 let Inst{28} = 0b1; 1433 let Inst{25-24} = 0b11; 1434 let Inst{23} = 0b0; 1435 let Inst{21} = bit_21; 1436 let Inst{20} = sz; 1437 let Inst{11} = 0b1; 1438 let Inst{8} = 0b1; 1439 let Inst{6} = 0b1; 1440 let Inst{4} = 0b1; 1441 1442 let Predicates = [HasMVEFloat]; 1443} 1444 1445multiclass MVE_VMINMAXNM_m<string iname, bit bit_4, MVEVectorVTInfo VTI, SDNode Op, Intrinsic PredInt> { 1446 def "" : MVE_VMINMAXNM<iname, VTI.Suffix, VTI.Size{0}, bit_4>; 1447 1448 let Predicates = [HasMVEFloat] in { 1449 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? (i32 0)), !cast<Instruction>(NAME)>; 1450 } 1451} 1452 1453defm MVE_VMAXNMf32 : MVE_VMINMAXNM_m<"vmaxnm", 0b0, MVE_v4f32, fmaxnum, int_arm_mve_max_predicated>; 1454defm MVE_VMAXNMf16 : MVE_VMINMAXNM_m<"vmaxnm", 0b0, MVE_v8f16, fmaxnum, int_arm_mve_max_predicated>; 1455defm MVE_VMINNMf32 : MVE_VMINMAXNM_m<"vminnm", 0b1, MVE_v4f32, fminnum, int_arm_mve_min_predicated>; 1456defm MVE_VMINNMf16 : MVE_VMINMAXNM_m<"vminnm", 0b1, MVE_v8f16, fminnum, int_arm_mve_min_predicated>; 1457 1458 1459class MVE_VMINMAX<string iname, string suffix, bit U, bits<2> size, 1460 bit bit_4, list<dag> pattern=[]> 1461 : MVE_comp<NoItinerary, iname, suffix, "", pattern> { 1462 1463 let Inst{28} = U; 1464 let Inst{25-24} = 0b11; 1465 let Inst{23} = 0b0; 1466 let Inst{21-20} = size{1-0}; 1467 let Inst{11} = 0b0; 1468 let Inst{8} = 0b0; 1469 let Inst{6} = 0b1; 1470 let Inst{4} = bit_4; 1471 let validForTailPredication = 1; 1472} 1473 1474multiclass MVE_VMINMAX_m<string iname, bit bit_4, MVEVectorVTInfo VTI, 1475 SDNode Op, Intrinsic PredInt> { 1476 def "" : MVE_VMINMAX<iname, VTI.Suffix, VTI.Unsigned, VTI.Size, bit_4>; 1477 1478 let Predicates = [HasMVEInt] in { 1479 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? (i32 VTI.Unsigned)), !cast<Instruction>(NAME)>; 1480 } 1481} 1482 1483multiclass MVE_VMAX<MVEVectorVTInfo VTI> 1484 : MVE_VMINMAX_m<"vmax", 0b0, VTI, !if(VTI.Unsigned, umax, smax), int_arm_mve_max_predicated>; 1485multiclass MVE_VMIN<MVEVectorVTInfo VTI> 1486 : MVE_VMINMAX_m<"vmin", 0b1, VTI, !if(VTI.Unsigned, umin, smin), int_arm_mve_min_predicated>; 1487 1488defm MVE_VMINs8 : MVE_VMIN<MVE_v16s8>; 1489defm MVE_VMINs16 : MVE_VMIN<MVE_v8s16>; 1490defm MVE_VMINs32 : MVE_VMIN<MVE_v4s32>; 1491defm MVE_VMINu8 : MVE_VMIN<MVE_v16u8>; 1492defm MVE_VMINu16 : MVE_VMIN<MVE_v8u16>; 1493defm MVE_VMINu32 : MVE_VMIN<MVE_v4u32>; 1494 1495defm MVE_VMAXs8 : MVE_VMAX<MVE_v16s8>; 1496defm MVE_VMAXs16 : MVE_VMAX<MVE_v8s16>; 1497defm MVE_VMAXs32 : MVE_VMAX<MVE_v4s32>; 1498defm MVE_VMAXu8 : MVE_VMAX<MVE_v16u8>; 1499defm MVE_VMAXu16 : MVE_VMAX<MVE_v8u16>; 1500defm MVE_VMAXu32 : MVE_VMAX<MVE_v4u32>; 1501 1502// end of mve_comp instructions 1503 1504// start of mve_bit instructions 1505 1506class MVE_bit_arith<dag oops, dag iops, string iname, string suffix, 1507 string ops, string cstr, list<dag> pattern=[]> 1508 : MVE_p<oops, iops, NoItinerary, iname, suffix, ops, vpred_r, cstr, pattern> { 1509 bits<4> Qd; 1510 bits<4> Qm; 1511 1512 let Inst{22} = Qd{3}; 1513 let Inst{15-13} = Qd{2-0}; 1514 let Inst{5} = Qm{3}; 1515 let Inst{3-1} = Qm{2-0}; 1516} 1517 1518def MVE_VBIC : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), 1519 "vbic", "", "$Qd, $Qn, $Qm", ""> { 1520 bits<4> Qn; 1521 1522 let Inst{28} = 0b0; 1523 let Inst{25-23} = 0b110; 1524 let Inst{21-20} = 0b01; 1525 let Inst{19-17} = Qn{2-0}; 1526 let Inst{16} = 0b0; 1527 let Inst{12-8} = 0b00001; 1528 let Inst{7} = Qn{3}; 1529 let Inst{6} = 0b1; 1530 let Inst{4} = 0b1; 1531 let Inst{0} = 0b0; 1532 let validForTailPredication = 1; 1533} 1534 1535class MVE_VREV<string iname, string suffix, bits<2> size, bits<2> bit_8_7, string cstr=""> 1536 : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qm), iname, 1537 suffix, "$Qd, $Qm", cstr> { 1538 1539 let Inst{28} = 0b1; 1540 let Inst{25-23} = 0b111; 1541 let Inst{21-20} = 0b11; 1542 let Inst{19-18} = size; 1543 let Inst{17-16} = 0b00; 1544 let Inst{12-9} = 0b0000; 1545 let Inst{8-7} = bit_8_7; 1546 let Inst{6} = 0b1; 1547 let Inst{4} = 0b0; 1548 let Inst{0} = 0b0; 1549} 1550 1551def MVE_VREV64_8 : MVE_VREV<"vrev64", "8", 0b00, 0b00, "@earlyclobber $Qd">; 1552def MVE_VREV64_16 : MVE_VREV<"vrev64", "16", 0b01, 0b00, "@earlyclobber $Qd">; 1553def MVE_VREV64_32 : MVE_VREV<"vrev64", "32", 0b10, 0b00, "@earlyclobber $Qd">; 1554 1555def MVE_VREV32_8 : MVE_VREV<"vrev32", "8", 0b00, 0b01>; 1556def MVE_VREV32_16 : MVE_VREV<"vrev32", "16", 0b01, 0b01>; 1557 1558def MVE_VREV16_8 : MVE_VREV<"vrev16", "8", 0b00, 0b10>; 1559 1560let Predicates = [HasMVEInt] in { 1561 def : Pat<(v8i16 (bswap (v8i16 MQPR:$src))), 1562 (v8i16 (MVE_VREV16_8 (v8i16 MQPR:$src)))>; 1563 def : Pat<(v4i32 (bswap (v4i32 MQPR:$src))), 1564 (v4i32 (MVE_VREV32_8 (v4i32 MQPR:$src)))>; 1565} 1566 1567multiclass MVE_VREV_basic_patterns<int revbits, list<MVEVectorVTInfo> VTIs, 1568 Instruction Inst> { 1569 defvar unpred_op = !cast<SDNode>("ARMvrev" # revbits); 1570 1571 foreach VTI = VTIs in { 1572 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$src))), 1573 (VTI.Vec (Inst (VTI.Vec MQPR:$src)))>; 1574 def : Pat<(VTI.Vec (int_arm_mve_vrev_predicated (VTI.Vec MQPR:$src), 1575 revbits, (VTI.Pred VCCR:$pred), (VTI.Vec MQPR:$inactive))), 1576 (VTI.Vec (Inst (VTI.Vec MQPR:$src), ARMVCCThen, 1577 (VTI.Pred VCCR:$pred), (VTI.Vec MQPR:$inactive)))>; 1578 } 1579} 1580 1581let Predicates = [HasMVEInt] in { 1582 defm: MVE_VREV_basic_patterns<64, [MVE_v4i32, MVE_v4f32], MVE_VREV64_32>; 1583 defm: MVE_VREV_basic_patterns<64, [MVE_v8i16, MVE_v8f16], MVE_VREV64_16>; 1584 defm: MVE_VREV_basic_patterns<64, [MVE_v16i8 ], MVE_VREV64_8>; 1585 1586 defm: MVE_VREV_basic_patterns<32, [MVE_v8i16, MVE_v8f16], MVE_VREV32_16>; 1587 defm: MVE_VREV_basic_patterns<32, [MVE_v16i8 ], MVE_VREV32_8>; 1588 1589 defm: MVE_VREV_basic_patterns<16, [MVE_v16i8 ], MVE_VREV16_8>; 1590} 1591 1592def MVE_VMVN : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qm), 1593 "vmvn", "", "$Qd, $Qm", ""> { 1594 let Inst{28} = 0b1; 1595 let Inst{25-23} = 0b111; 1596 let Inst{21-16} = 0b110000; 1597 let Inst{12-6} = 0b0010111; 1598 let Inst{4} = 0b0; 1599 let Inst{0} = 0b0; 1600 let validForTailPredication = 1; 1601} 1602 1603let Predicates = [HasMVEInt] in { 1604 foreach VTI = [ MVE_v16i8, MVE_v8i16, MVE_v4i32, MVE_v2i64 ] in { 1605 def : Pat<(VTI.Vec (vnotq (VTI.Vec MQPR:$val1))), 1606 (VTI.Vec (MVE_VMVN (VTI.Vec MQPR:$val1)))>; 1607 def : Pat<(VTI.Vec (int_arm_mve_mvn_predicated (VTI.Vec MQPR:$val1), 1608 (VTI.Pred VCCR:$pred), (VTI.Vec MQPR:$inactive))), 1609 (VTI.Vec (MVE_VMVN (VTI.Vec MQPR:$val1), ARMVCCThen, 1610 (VTI.Pred VCCR:$pred), (VTI.Vec MQPR:$inactive)))>; 1611 } 1612} 1613 1614class MVE_bit_ops<string iname, bits<2> bit_21_20, bit bit_28> 1615 : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), 1616 iname, "", "$Qd, $Qn, $Qm", ""> { 1617 bits<4> Qn; 1618 1619 let Inst{28} = bit_28; 1620 let Inst{25-23} = 0b110; 1621 let Inst{21-20} = bit_21_20; 1622 let Inst{19-17} = Qn{2-0}; 1623 let Inst{16} = 0b0; 1624 let Inst{12-8} = 0b00001; 1625 let Inst{7} = Qn{3}; 1626 let Inst{6} = 0b1; 1627 let Inst{4} = 0b1; 1628 let Inst{0} = 0b0; 1629 let validForTailPredication = 1; 1630} 1631 1632def MVE_VEOR : MVE_bit_ops<"veor", 0b00, 0b1>; 1633def MVE_VORN : MVE_bit_ops<"vorn", 0b11, 0b0>; 1634def MVE_VORR : MVE_bit_ops<"vorr", 0b10, 0b0>; 1635def MVE_VAND : MVE_bit_ops<"vand", 0b00, 0b0>; 1636 1637// add ignored suffixes as aliases 1638 1639foreach s=["s8", "s16", "s32", "u8", "u16", "u32", "i8", "i16", "i32", "f16", "f32"] in { 1640 def : MVEInstAlias<"vbic${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc", 1641 (MVE_VBIC MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>; 1642 def : MVEInstAlias<"veor${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc", 1643 (MVE_VEOR MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>; 1644 def : MVEInstAlias<"vorn${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc", 1645 (MVE_VORN MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>; 1646 def : MVEInstAlias<"vorr${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc", 1647 (MVE_VORR MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>; 1648 def : MVEInstAlias<"vand${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc", 1649 (MVE_VAND MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>; 1650} 1651 1652let Predicates = [HasMVEInt] in { 1653 defm : MVE_TwoOpPattern<MVE_v16i8, and, int_arm_mve_and_predicated, (? ), MVE_VAND, ARMimmAllOnesV>; 1654 defm : MVE_TwoOpPattern<MVE_v8i16, and, int_arm_mve_and_predicated, (? ), MVE_VAND, ARMimmAllOnesV>; 1655 defm : MVE_TwoOpPattern<MVE_v4i32, and, int_arm_mve_and_predicated, (? ), MVE_VAND, ARMimmAllOnesV>; 1656 defm : MVE_TwoOpPattern<MVE_v2i64, and, int_arm_mve_and_predicated, (? ), MVE_VAND, ARMimmAllOnesV>; 1657 1658 defm : MVE_TwoOpPattern<MVE_v16i8, or, int_arm_mve_orr_predicated, (? ), MVE_VORR, ARMimmAllZerosV>; 1659 defm : MVE_TwoOpPattern<MVE_v8i16, or, int_arm_mve_orr_predicated, (? ), MVE_VORR, ARMimmAllZerosV>; 1660 defm : MVE_TwoOpPattern<MVE_v4i32, or, int_arm_mve_orr_predicated, (? ), MVE_VORR, ARMimmAllZerosV>; 1661 defm : MVE_TwoOpPattern<MVE_v2i64, or, int_arm_mve_orr_predicated, (? ), MVE_VORR, ARMimmAllZerosV>; 1662 1663 defm : MVE_TwoOpPattern<MVE_v16i8, xor, int_arm_mve_eor_predicated, (? ), MVE_VEOR, ARMimmAllZerosV>; 1664 defm : MVE_TwoOpPattern<MVE_v8i16, xor, int_arm_mve_eor_predicated, (? ), MVE_VEOR, ARMimmAllZerosV>; 1665 defm : MVE_TwoOpPattern<MVE_v4i32, xor, int_arm_mve_eor_predicated, (? ), MVE_VEOR, ARMimmAllZerosV>; 1666 defm : MVE_TwoOpPattern<MVE_v2i64, xor, int_arm_mve_eor_predicated, (? ), MVE_VEOR, ARMimmAllZerosV>; 1667 1668 defm : MVE_TwoOpPattern<MVE_v16i8, BinOpFrag<(and node:$LHS, (vnotq node:$RHS))>, 1669 int_arm_mve_bic_predicated, (? ), MVE_VBIC>; 1670 defm : MVE_TwoOpPattern<MVE_v8i16, BinOpFrag<(and node:$LHS, (vnotq node:$RHS))>, 1671 int_arm_mve_bic_predicated, (? ), MVE_VBIC>; 1672 defm : MVE_TwoOpPattern<MVE_v4i32, BinOpFrag<(and node:$LHS, (vnotq node:$RHS))>, 1673 int_arm_mve_bic_predicated, (? ), MVE_VBIC>; 1674 defm : MVE_TwoOpPattern<MVE_v2i64, BinOpFrag<(and node:$LHS, (vnotq node:$RHS))>, 1675 int_arm_mve_bic_predicated, (? ), MVE_VBIC>; 1676 1677 defm : MVE_TwoOpPattern<MVE_v16i8, BinOpFrag<(or node:$LHS, (vnotq node:$RHS))>, 1678 int_arm_mve_orn_predicated, (? ), MVE_VORN>; 1679 defm : MVE_TwoOpPattern<MVE_v8i16, BinOpFrag<(or node:$LHS, (vnotq node:$RHS))>, 1680 int_arm_mve_orn_predicated, (? ), MVE_VORN>; 1681 defm : MVE_TwoOpPattern<MVE_v4i32, BinOpFrag<(or node:$LHS, (vnotq node:$RHS))>, 1682 int_arm_mve_orn_predicated, (? ), MVE_VORN>; 1683 defm : MVE_TwoOpPattern<MVE_v2i64, BinOpFrag<(or node:$LHS, (vnotq node:$RHS))>, 1684 int_arm_mve_orn_predicated, (? ), MVE_VORN>; 1685} 1686 1687class MVE_bit_cmode<string iname, string suffix, bit halfword, dag inOps> 1688 : MVE_p<(outs MQPR:$Qd), inOps, NoItinerary, 1689 iname, suffix, "$Qd, $imm", vpred_n, "$Qd = $Qd_src"> { 1690 bits<12> imm; 1691 bits<4> Qd; 1692 1693 let Inst{28} = imm{7}; 1694 let Inst{27-23} = 0b11111; 1695 let Inst{22} = Qd{3}; 1696 let Inst{21-19} = 0b000; 1697 let Inst{18-16} = imm{6-4}; 1698 let Inst{15-13} = Qd{2-0}; 1699 let Inst{12} = 0b0; 1700 let Inst{11} = halfword; 1701 let Inst{10} = !if(halfword, 0, imm{10}); 1702 let Inst{9} = imm{9}; 1703 let Inst{8} = 0b1; 1704 let Inst{7-6} = 0b01; 1705 let Inst{4} = 0b1; 1706 let Inst{3-0} = imm{3-0}; 1707} 1708 1709multiclass MVE_bit_cmode_p<string iname, bit opcode, 1710 MVEVectorVTInfo VTI, Operand imm_type, SDNode op> { 1711 def "" : MVE_bit_cmode<iname, VTI.Suffix, VTI.Size{0}, 1712 (ins MQPR:$Qd_src, imm_type:$imm)> { 1713 let Inst{5} = opcode; 1714 let validForTailPredication = 1; 1715 } 1716 1717 defvar Inst = !cast<Instruction>(NAME); 1718 defvar UnpredPat = (VTI.Vec (op (VTI.Vec MQPR:$src), timm:$simm)); 1719 1720 let Predicates = [HasMVEInt] in { 1721 def : Pat<UnpredPat, 1722 (VTI.Vec (Inst (VTI.Vec MQPR:$src), imm_type:$simm))>; 1723 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred), 1724 UnpredPat, (VTI.Vec MQPR:$src))), 1725 (VTI.Vec (Inst (VTI.Vec MQPR:$src), imm_type:$simm, 1726 ARMVCCThen, (VTI.Pred VCCR:$pred)))>; 1727 } 1728} 1729 1730multiclass MVE_VORRimm<MVEVectorVTInfo VTI, Operand imm_type> { 1731 defm "": MVE_bit_cmode_p<"vorr", 0, VTI, imm_type, ARMvorrImm>; 1732} 1733multiclass MVE_VBICimm<MVEVectorVTInfo VTI, Operand imm_type> { 1734 defm "": MVE_bit_cmode_p<"vbic", 1, VTI, imm_type, ARMvbicImm>; 1735} 1736 1737defm MVE_VORRimmi16 : MVE_VORRimm<MVE_v8i16, nImmSplatI16>; 1738defm MVE_VORRimmi32 : MVE_VORRimm<MVE_v4i32, nImmSplatI32>; 1739defm MVE_VBICimmi16 : MVE_VBICimm<MVE_v8i16, nImmSplatI16>; 1740defm MVE_VBICimmi32 : MVE_VBICimm<MVE_v4i32, nImmSplatI32>; 1741 1742def MVE_VORNimmi16 : MVEInstAlias<"vorn${vp}.i16\t$Qd, $imm", 1743 (MVE_VORRimmi16 MQPR:$Qd, nImmSplatNotI16:$imm, vpred_n:$vp), 0>; 1744def MVE_VORNimmi32 : MVEInstAlias<"vorn${vp}.i32\t$Qd, $imm", 1745 (MVE_VORRimmi32 MQPR:$Qd, nImmSplatNotI32:$imm, vpred_n:$vp), 0>; 1746 1747def MVE_VANDimmi16 : MVEInstAlias<"vand${vp}.i16\t$Qd, $imm", 1748 (MVE_VBICimmi16 MQPR:$Qd, nImmSplatNotI16:$imm, vpred_n:$vp), 0>; 1749def MVE_VANDimmi32 : MVEInstAlias<"vand${vp}.i32\t$Qd, $imm", 1750 (MVE_VBICimmi32 MQPR:$Qd, nImmSplatNotI32:$imm, vpred_n:$vp), 0>; 1751 1752def MVE_VMOV : MVEInstAlias<"vmov${vp}\t$Qd, $Qm", 1753 (MVE_VORR MQPR:$Qd, MQPR:$Qm, MQPR:$Qm, vpred_r:$vp)>; 1754 1755class MVE_VMOV_lane_direction { 1756 bit bit_20; 1757 dag oops; 1758 dag iops; 1759 string ops; 1760 string cstr; 1761} 1762def MVE_VMOV_from_lane : MVE_VMOV_lane_direction { 1763 let bit_20 = 0b1; 1764 let oops = (outs rGPR:$Rt); 1765 let iops = (ins MQPR:$Qd); 1766 let ops = "$Rt, $Qd$Idx"; 1767 let cstr = ""; 1768} 1769def MVE_VMOV_to_lane : MVE_VMOV_lane_direction { 1770 let bit_20 = 0b0; 1771 let oops = (outs MQPR:$Qd); 1772 let iops = (ins MQPR:$Qd_src, rGPR:$Rt); 1773 let ops = "$Qd$Idx, $Rt"; 1774 let cstr = "$Qd = $Qd_src"; 1775} 1776 1777class MVE_VMOV_lane<string suffix, bit U, dag indexop, 1778 MVE_VMOV_lane_direction dir> 1779 : MVE_VMOV_lane_base<dir.oops, !con(dir.iops, indexop), NoItinerary, 1780 "vmov", suffix, dir.ops, dir.cstr, []> { 1781 bits<4> Qd; 1782 bits<4> Rt; 1783 1784 let Inst{31-24} = 0b11101110; 1785 let Inst{23} = U; 1786 let Inst{20} = dir.bit_20; 1787 let Inst{19-17} = Qd{2-0}; 1788 let Inst{15-12} = Rt{3-0}; 1789 let Inst{11-8} = 0b1011; 1790 let Inst{7} = Qd{3}; 1791 let Inst{4-0} = 0b10000; 1792 1793 let hasSideEffects = 0; 1794} 1795 1796class MVE_VMOV_lane_32<MVE_VMOV_lane_direction dir> 1797 : MVE_VMOV_lane<"32", 0b0, (ins MVEVectorIndex<4>:$Idx), dir> { 1798 bits<2> Idx; 1799 let Inst{22} = 0b0; 1800 let Inst{6-5} = 0b00; 1801 let Inst{16} = Idx{1}; 1802 let Inst{21} = Idx{0}; 1803 1804 let Predicates = [HasFPRegsV8_1M]; 1805} 1806 1807class MVE_VMOV_lane_16<string suffix, bit U, MVE_VMOV_lane_direction dir> 1808 : MVE_VMOV_lane<suffix, U, (ins MVEVectorIndex<8>:$Idx), dir> { 1809 bits<3> Idx; 1810 let Inst{22} = 0b0; 1811 let Inst{5} = 0b1; 1812 let Inst{16} = Idx{2}; 1813 let Inst{21} = Idx{1}; 1814 let Inst{6} = Idx{0}; 1815} 1816 1817class MVE_VMOV_lane_8<string suffix, bit U, MVE_VMOV_lane_direction dir> 1818 : MVE_VMOV_lane<suffix, U, (ins MVEVectorIndex<16>:$Idx), dir> { 1819 bits<4> Idx; 1820 let Inst{22} = 0b1; 1821 let Inst{16} = Idx{3}; 1822 let Inst{21} = Idx{2}; 1823 let Inst{6} = Idx{1}; 1824 let Inst{5} = Idx{0}; 1825} 1826 1827def MVE_VMOV_from_lane_32 : MVE_VMOV_lane_32< MVE_VMOV_from_lane>; 1828def MVE_VMOV_from_lane_s16 : MVE_VMOV_lane_16<"s16", 0b0, MVE_VMOV_from_lane>; 1829def MVE_VMOV_from_lane_u16 : MVE_VMOV_lane_16<"u16", 0b1, MVE_VMOV_from_lane>; 1830def MVE_VMOV_from_lane_s8 : MVE_VMOV_lane_8 < "s8", 0b0, MVE_VMOV_from_lane>; 1831def MVE_VMOV_from_lane_u8 : MVE_VMOV_lane_8 < "u8", 0b1, MVE_VMOV_from_lane>; 1832let isInsertSubreg = 1 in 1833def MVE_VMOV_to_lane_32 : MVE_VMOV_lane_32< MVE_VMOV_to_lane>; 1834def MVE_VMOV_to_lane_16 : MVE_VMOV_lane_16< "16", 0b0, MVE_VMOV_to_lane>; 1835def MVE_VMOV_to_lane_8 : MVE_VMOV_lane_8 < "8", 0b0, MVE_VMOV_to_lane>; 1836 1837// This is the same as insertelt but allows the inserted value to be an i32 as 1838// will be used when it is the only legal type. 1839def ARMVecInsert : SDTypeProfile<1, 3, [ 1840 SDTCisVT<2, i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3> 1841]>; 1842def ARMinsertelt : SDNode<"ISD::INSERT_VECTOR_ELT", ARMVecInsert>; 1843 1844let Predicates = [HasMVEInt] in { 1845 def : Pat<(extractelt (v2f64 MQPR:$src), imm:$lane), 1846 (f64 (EXTRACT_SUBREG MQPR:$src, (DSubReg_f64_reg imm:$lane)))>; 1847 def : Pat<(insertelt (v2f64 MQPR:$src1), DPR:$src2, imm:$lane), 1848 (INSERT_SUBREG (v2f64 (COPY_TO_REGCLASS MQPR:$src1, MQPR)), DPR:$src2, (DSubReg_f64_reg imm:$lane))>; 1849 1850 def : Pat<(extractelt (v4i32 MQPR:$src), imm:$lane), 1851 (COPY_TO_REGCLASS 1852 (i32 (EXTRACT_SUBREG MQPR:$src, (SSubReg_f32_reg imm:$lane))), rGPR)>; 1853 def : Pat<(insertelt (v4i32 MQPR:$src1), rGPR:$src2, imm:$lane), 1854 (MVE_VMOV_to_lane_32 MQPR:$src1, rGPR:$src2, imm:$lane)>; 1855 // This tries to copy from one lane to another, without going via GPR regs 1856 def : Pat<(insertelt (v4i32 MQPR:$src1), (extractelt (v4i32 MQPR:$src2), imm:$extlane), imm:$inslane), 1857 (v4i32 (COPY_TO_REGCLASS 1858 (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS (v4i32 MQPR:$src1), MQPR)), 1859 (f32 (EXTRACT_SUBREG (v4f32 (COPY_TO_REGCLASS (v4i32 MQPR:$src2), MQPR)), 1860 (SSubReg_f32_reg imm:$extlane))), 1861 (SSubReg_f32_reg imm:$inslane)), 1862 MQPR))>; 1863 1864 def : Pat<(vector_insert (v16i8 MQPR:$src1), rGPR:$src2, imm:$lane), 1865 (MVE_VMOV_to_lane_8 MQPR:$src1, rGPR:$src2, imm:$lane)>; 1866 def : Pat<(vector_insert (v8i16 MQPR:$src1), rGPR:$src2, imm:$lane), 1867 (MVE_VMOV_to_lane_16 MQPR:$src1, rGPR:$src2, imm:$lane)>; 1868 1869 def : Pat<(ARMvgetlanes (v16i8 MQPR:$src), imm:$lane), 1870 (MVE_VMOV_from_lane_s8 MQPR:$src, imm:$lane)>; 1871 def : Pat<(ARMvgetlanes (v8i16 MQPR:$src), imm:$lane), 1872 (MVE_VMOV_from_lane_s16 MQPR:$src, imm:$lane)>; 1873 def : Pat<(ARMvgetlanes (v8f16 MQPR:$src), imm:$lane), 1874 (MVE_VMOV_from_lane_s16 MQPR:$src, imm:$lane)>; 1875 def : Pat<(ARMvgetlaneu (v16i8 MQPR:$src), imm:$lane), 1876 (MVE_VMOV_from_lane_u8 MQPR:$src, imm:$lane)>; 1877 def : Pat<(ARMvgetlaneu (v8i16 MQPR:$src), imm:$lane), 1878 (MVE_VMOV_from_lane_u16 MQPR:$src, imm:$lane)>; 1879 def : Pat<(ARMvgetlaneu (v8f16 MQPR:$src), imm:$lane), 1880 (MVE_VMOV_from_lane_u16 MQPR:$src, imm:$lane)>; 1881 // For i16's inserts being extracted from low lanes, then may use VINS. 1882 def : Pat<(ARMinsertelt (v8i16 MQPR:$src1), 1883 (ARMvgetlaneu (v8i16 MQPR:$src2), imm_even:$extlane), 1884 imm_odd:$inslane), 1885 (COPY_TO_REGCLASS (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS MQPR:$src1, MQPR)), 1886 (VINSH (EXTRACT_SUBREG MQPR:$src1, (SSubReg_f16_reg imm_odd:$inslane)), 1887 (EXTRACT_SUBREG MQPR:$src2, (SSubReg_f16_reg imm_even:$extlane))), 1888 (SSubReg_f16_reg imm_odd:$inslane)), MQPR)>; 1889 1890 def : Pat<(v16i8 (scalar_to_vector GPR:$src)), 1891 (MVE_VMOV_to_lane_8 (v16i8 (IMPLICIT_DEF)), rGPR:$src, (i32 0))>; 1892 def : Pat<(v8i16 (scalar_to_vector GPR:$src)), 1893 (MVE_VMOV_to_lane_16 (v8i16 (IMPLICIT_DEF)), rGPR:$src, (i32 0))>; 1894 def : Pat<(v4i32 (scalar_to_vector GPR:$src)), 1895 (MVE_VMOV_to_lane_32 (v4i32 (IMPLICIT_DEF)), rGPR:$src, (i32 0))>; 1896 1897 // Floating point patterns, still enabled under HasMVEInt 1898 def : Pat<(extractelt (v4f32 MQPR:$src), imm:$lane), 1899 (COPY_TO_REGCLASS (f32 (EXTRACT_SUBREG MQPR:$src, (SSubReg_f32_reg imm:$lane))), SPR)>; 1900 def : Pat<(insertelt (v4f32 MQPR:$src1), (f32 SPR:$src2), imm:$lane), 1901 (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS MQPR:$src1, MQPR)), SPR:$src2, (SSubReg_f32_reg imm:$lane))>; 1902 1903 def : Pat<(insertelt (v8f16 MQPR:$src1), (f16 HPR:$src2), imm_even:$lane), 1904 (MVE_VMOV_to_lane_16 MQPR:$src1, (COPY_TO_REGCLASS (f16 HPR:$src2), rGPR), imm:$lane)>; 1905 def : Pat<(insertelt (v8f16 MQPR:$src1), (f16 HPR:$src2), imm_odd:$lane), 1906 (COPY_TO_REGCLASS (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS MQPR:$src1, MQPR)), 1907 (VINSH (EXTRACT_SUBREG MQPR:$src1, (SSubReg_f16_reg imm_odd:$lane)), 1908 (COPY_TO_REGCLASS HPR:$src2, SPR)), 1909 (SSubReg_f16_reg imm_odd:$lane)), MQPR)>; 1910 def : Pat<(extractelt (v8f16 MQPR:$src), imm_even:$lane), 1911 (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_even:$lane))>; 1912 def : Pat<(extractelt (v8f16 MQPR:$src), imm_odd:$lane), 1913 (COPY_TO_REGCLASS 1914 (VMOVH (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_odd:$lane))), 1915 HPR)>; 1916 1917 def : Pat<(v2f64 (scalar_to_vector (f64 DPR:$src))), 1918 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), DPR:$src, dsub_0)>; 1919 def : Pat<(v4f32 (scalar_to_vector SPR:$src)), 1920 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), SPR:$src, ssub_0)>; 1921 def : Pat<(v4f32 (scalar_to_vector GPR:$src)), 1922 (MVE_VMOV_to_lane_32 (v4f32 (IMPLICIT_DEF)), rGPR:$src, (i32 0))>; 1923 def : Pat<(v8f16 (scalar_to_vector (f16 HPR:$src))), 1924 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), (f16 HPR:$src), ssub_0)>; 1925 def : Pat<(v8f16 (scalar_to_vector GPR:$src)), 1926 (MVE_VMOV_to_lane_16 (v8f16 (IMPLICIT_DEF)), rGPR:$src, (i32 0))>; 1927} 1928 1929// end of mve_bit instructions 1930 1931// start of MVE Integer instructions 1932 1933class MVE_int<string iname, string suffix, bits<2> size, list<dag> pattern=[]> 1934 : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), NoItinerary, 1935 iname, suffix, "$Qd, $Qn, $Qm", vpred_r, "", pattern> { 1936 bits<4> Qd; 1937 bits<4> Qn; 1938 bits<4> Qm; 1939 1940 let Inst{22} = Qd{3}; 1941 let Inst{21-20} = size; 1942 let Inst{19-17} = Qn{2-0}; 1943 let Inst{15-13} = Qd{2-0}; 1944 let Inst{7} = Qn{3}; 1945 let Inst{6} = 0b1; 1946 let Inst{5} = Qm{3}; 1947 let Inst{3-1} = Qm{2-0}; 1948} 1949 1950class MVE_VMULt1<string iname, string suffix, bits<2> size, 1951 list<dag> pattern=[]> 1952 : MVE_int<iname, suffix, size, pattern> { 1953 1954 let Inst{28} = 0b0; 1955 let Inst{25-23} = 0b110; 1956 let Inst{16} = 0b0; 1957 let Inst{12-8} = 0b01001; 1958 let Inst{4} = 0b1; 1959 let Inst{0} = 0b0; 1960 let validForTailPredication = 1; 1961} 1962 1963multiclass MVE_VMUL_m<MVEVectorVTInfo VTI> { 1964 def "" : MVE_VMULt1<"vmul", VTI.Suffix, VTI.Size>; 1965 1966 let Predicates = [HasMVEInt] in { 1967 defm : MVE_TwoOpPattern<VTI, mul, int_arm_mve_mul_predicated, (? ), 1968 !cast<Instruction>(NAME), ARMimmOneV>; 1969 } 1970} 1971 1972defm MVE_VMULi8 : MVE_VMUL_m<MVE_v16i8>; 1973defm MVE_VMULi16 : MVE_VMUL_m<MVE_v8i16>; 1974defm MVE_VMULi32 : MVE_VMUL_m<MVE_v4i32>; 1975 1976class MVE_VQxDMULH_Base<string iname, string suffix, bits<2> size, bit rounding, 1977 list<dag> pattern=[]> 1978 : MVE_int<iname, suffix, size, pattern> { 1979 1980 let Inst{28} = rounding; 1981 let Inst{25-23} = 0b110; 1982 let Inst{16} = 0b0; 1983 let Inst{12-8} = 0b01011; 1984 let Inst{4} = 0b0; 1985 let Inst{0} = 0b0; 1986 let validForTailPredication = 1; 1987} 1988 1989def MVEvqdmulh : SDNode<"ARMISD::VQDMULH", SDTIntBinOp>; 1990 1991multiclass MVE_VQxDMULH_m<string iname, MVEVectorVTInfo VTI, 1992 SDNode Op, Intrinsic unpred_int, Intrinsic pred_int, 1993 bit rounding> { 1994 def "" : MVE_VQxDMULH_Base<iname, VTI.Suffix, VTI.Size, rounding>; 1995 defvar Inst = !cast<Instruction>(NAME); 1996 1997 let Predicates = [HasMVEInt] in { 1998 defm : MVE_TwoOpPattern<VTI, Op, pred_int, (? ), Inst>; 1999 2000 // Extra unpredicated multiply intrinsic patterns 2001 def : Pat<(VTI.Vec (unpred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn))), 2002 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 2003 } 2004} 2005 2006multiclass MVE_VQxDMULH<string iname, MVEVectorVTInfo VTI, bit rounding> 2007 : MVE_VQxDMULH_m<iname, VTI, !if(rounding, null_frag, 2008 MVEvqdmulh), 2009 !if(rounding, int_arm_mve_vqrdmulh, 2010 int_arm_mve_vqdmulh), 2011 !if(rounding, int_arm_mve_qrdmulh_predicated, 2012 int_arm_mve_qdmulh_predicated), 2013 rounding>; 2014 2015defm MVE_VQDMULHi8 : MVE_VQxDMULH<"vqdmulh", MVE_v16s8, 0b0>; 2016defm MVE_VQDMULHi16 : MVE_VQxDMULH<"vqdmulh", MVE_v8s16, 0b0>; 2017defm MVE_VQDMULHi32 : MVE_VQxDMULH<"vqdmulh", MVE_v4s32, 0b0>; 2018 2019defm MVE_VQRDMULHi8 : MVE_VQxDMULH<"vqrdmulh", MVE_v16s8, 0b1>; 2020defm MVE_VQRDMULHi16 : MVE_VQxDMULH<"vqrdmulh", MVE_v8s16, 0b1>; 2021defm MVE_VQRDMULHi32 : MVE_VQxDMULH<"vqrdmulh", MVE_v4s32, 0b1>; 2022 2023class MVE_VADDSUB<string iname, string suffix, bits<2> size, bit subtract, 2024 list<dag> pattern=[]> 2025 : MVE_int<iname, suffix, size, pattern> { 2026 2027 let Inst{28} = subtract; 2028 let Inst{25-23} = 0b110; 2029 let Inst{16} = 0b0; 2030 let Inst{12-8} = 0b01000; 2031 let Inst{4} = 0b0; 2032 let Inst{0} = 0b0; 2033 let validForTailPredication = 1; 2034} 2035 2036multiclass MVE_VADDSUB_m<string iname, MVEVectorVTInfo VTI, bit subtract, 2037 SDNode Op, Intrinsic PredInt> { 2038 def "" : MVE_VADDSUB<iname, VTI.Suffix, VTI.Size, subtract>; 2039 defvar Inst = !cast<Instruction>(NAME); 2040 2041 let Predicates = [HasMVEInt] in { 2042 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? ), !cast<Instruction>(NAME), ARMimmAllZerosV>; 2043 } 2044} 2045 2046multiclass MVE_VADD<MVEVectorVTInfo VTI> 2047 : MVE_VADDSUB_m<"vadd", VTI, 0b0, add, int_arm_mve_add_predicated>; 2048multiclass MVE_VSUB<MVEVectorVTInfo VTI> 2049 : MVE_VADDSUB_m<"vsub", VTI, 0b1, sub, int_arm_mve_sub_predicated>; 2050 2051defm MVE_VADDi8 : MVE_VADD<MVE_v16i8>; 2052defm MVE_VADDi16 : MVE_VADD<MVE_v8i16>; 2053defm MVE_VADDi32 : MVE_VADD<MVE_v4i32>; 2054 2055defm MVE_VSUBi8 : MVE_VSUB<MVE_v16i8>; 2056defm MVE_VSUBi16 : MVE_VSUB<MVE_v8i16>; 2057defm MVE_VSUBi32 : MVE_VSUB<MVE_v4i32>; 2058 2059class MVE_VQADDSUB<string iname, string suffix, bit U, bit subtract, 2060 bits<2> size> 2061 : MVE_int<iname, suffix, size, []> { 2062 2063 let Inst{28} = U; 2064 let Inst{25-23} = 0b110; 2065 let Inst{16} = 0b0; 2066 let Inst{12-10} = 0b000; 2067 let Inst{9} = subtract; 2068 let Inst{8} = 0b0; 2069 let Inst{4} = 0b1; 2070 let Inst{0} = 0b0; 2071 let validForTailPredication = 1; 2072} 2073 2074class MVE_VQADD_<string suffix, bit U, bits<2> size> 2075 : MVE_VQADDSUB<"vqadd", suffix, U, 0b0, size>; 2076class MVE_VQSUB_<string suffix, bit U, bits<2> size> 2077 : MVE_VQADDSUB<"vqsub", suffix, U, 0b1, size>; 2078 2079multiclass MVE_VQADD_m<MVEVectorVTInfo VTI, 2080 SDNode Op, Intrinsic PredInt> { 2081 def "" : MVE_VQADD_<VTI.Suffix, VTI.Unsigned, VTI.Size>; 2082 defvar Inst = !cast<Instruction>(NAME); 2083 2084 let Predicates = [HasMVEInt] in { 2085 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? (i32 VTI.Unsigned)), 2086 !cast<Instruction>(NAME)>; 2087 } 2088} 2089 2090multiclass MVE_VQADD<MVEVectorVTInfo VTI, SDNode unpred_op> 2091 : MVE_VQADD_m<VTI, unpred_op, int_arm_mve_qadd_predicated>; 2092 2093defm MVE_VQADDs8 : MVE_VQADD<MVE_v16s8, saddsat>; 2094defm MVE_VQADDs16 : MVE_VQADD<MVE_v8s16, saddsat>; 2095defm MVE_VQADDs32 : MVE_VQADD<MVE_v4s32, saddsat>; 2096defm MVE_VQADDu8 : MVE_VQADD<MVE_v16u8, uaddsat>; 2097defm MVE_VQADDu16 : MVE_VQADD<MVE_v8u16, uaddsat>; 2098defm MVE_VQADDu32 : MVE_VQADD<MVE_v4u32, uaddsat>; 2099 2100multiclass MVE_VQSUB_m<MVEVectorVTInfo VTI, 2101 SDNode Op, Intrinsic PredInt> { 2102 def "" : MVE_VQSUB_<VTI.Suffix, VTI.Unsigned, VTI.Size>; 2103 defvar Inst = !cast<Instruction>(NAME); 2104 2105 let Predicates = [HasMVEInt] in { 2106 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? (i32 VTI.Unsigned)), 2107 !cast<Instruction>(NAME)>; 2108 } 2109} 2110 2111multiclass MVE_VQSUB<MVEVectorVTInfo VTI, SDNode unpred_op> 2112 : MVE_VQSUB_m<VTI, unpred_op, int_arm_mve_qsub_predicated>; 2113 2114defm MVE_VQSUBs8 : MVE_VQSUB<MVE_v16s8, ssubsat>; 2115defm MVE_VQSUBs16 : MVE_VQSUB<MVE_v8s16, ssubsat>; 2116defm MVE_VQSUBs32 : MVE_VQSUB<MVE_v4s32, ssubsat>; 2117defm MVE_VQSUBu8 : MVE_VQSUB<MVE_v16u8, usubsat>; 2118defm MVE_VQSUBu16 : MVE_VQSUB<MVE_v8u16, usubsat>; 2119defm MVE_VQSUBu32 : MVE_VQSUB<MVE_v4u32, usubsat>; 2120 2121class MVE_VABD_int<string suffix, bit U, bits<2> size, 2122 list<dag> pattern=[]> 2123 : MVE_int<"vabd", suffix, size, pattern> { 2124 2125 let Inst{28} = U; 2126 let Inst{25-23} = 0b110; 2127 let Inst{16} = 0b0; 2128 let Inst{12-8} = 0b00111; 2129 let Inst{4} = 0b0; 2130 let Inst{0} = 0b0; 2131 let validForTailPredication = 1; 2132} 2133 2134multiclass MVE_VABD_m<MVEVectorVTInfo VTI, SDNode Op, 2135 Intrinsic unpred_int, Intrinsic PredInt> { 2136 def "" : MVE_VABD_int<VTI.Suffix, VTI.Unsigned, VTI.Size>; 2137 defvar Inst = !cast<Instruction>(NAME); 2138 2139 let Predicates = [HasMVEInt] in { 2140 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? (i32 VTI.Unsigned)), 2141 !cast<Instruction>(NAME)>; 2142 2143 // Unpredicated absolute difference 2144 def : Pat<(VTI.Vec (unpred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2145 (i32 VTI.Unsigned))), 2146 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 2147 } 2148} 2149 2150multiclass MVE_VABD<MVEVectorVTInfo VTI, SDNode Op> 2151 : MVE_VABD_m<VTI, Op, int_arm_mve_vabd, int_arm_mve_abd_predicated>; 2152 2153defm MVE_VABDs8 : MVE_VABD<MVE_v16s8, abds>; 2154defm MVE_VABDs16 : MVE_VABD<MVE_v8s16, abds>; 2155defm MVE_VABDs32 : MVE_VABD<MVE_v4s32, abds>; 2156defm MVE_VABDu8 : MVE_VABD<MVE_v16u8, abdu>; 2157defm MVE_VABDu16 : MVE_VABD<MVE_v8u16, abdu>; 2158defm MVE_VABDu32 : MVE_VABD<MVE_v4u32, abdu>; 2159 2160class MVE_VRHADD_Base<string suffix, bit U, bits<2> size, list<dag> pattern=[]> 2161 : MVE_int<"vrhadd", suffix, size, pattern> { 2162 2163 let Inst{28} = U; 2164 let Inst{25-23} = 0b110; 2165 let Inst{16} = 0b0; 2166 let Inst{12-8} = 0b00001; 2167 let Inst{4} = 0b0; 2168 let Inst{0} = 0b0; 2169 let validForTailPredication = 1; 2170} 2171 2172def addnuw : PatFrag<(ops node:$lhs, node:$rhs), 2173 (add node:$lhs, node:$rhs), [{ 2174 return N->getFlags().hasNoUnsignedWrap(); 2175}]>; 2176 2177def addnsw : PatFrag<(ops node:$lhs, node:$rhs), 2178 (add node:$lhs, node:$rhs), [{ 2179 return N->getFlags().hasNoSignedWrap(); 2180}]>; 2181 2182def subnuw : PatFrag<(ops node:$lhs, node:$rhs), 2183 (sub node:$lhs, node:$rhs), [{ 2184 return N->getFlags().hasNoUnsignedWrap(); 2185}]>; 2186 2187def subnsw : PatFrag<(ops node:$lhs, node:$rhs), 2188 (sub node:$lhs, node:$rhs), [{ 2189 return N->getFlags().hasNoSignedWrap(); 2190}]>; 2191 2192multiclass MVE_VRHADD_m<MVEVectorVTInfo VTI, 2193 SDNode unpred_op, Intrinsic pred_int> { 2194 def "" : MVE_VRHADD_Base<VTI.Suffix, VTI.Unsigned, VTI.Size>; 2195 defvar Inst = !cast<Instruction>(NAME); 2196 2197 let Predicates = [HasMVEInt] in { 2198 // Unpredicated rounding add-with-divide-by-two 2199 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2200 (i32 VTI.Unsigned))), 2201 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 2202 2203 // Predicated add-with-divide-by-two 2204 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2205 (i32 VTI.Unsigned), (VTI.Pred VCCR:$mask), 2206 (VTI.Vec MQPR:$inactive))), 2207 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2208 ARMVCCThen, (VTI.Pred VCCR:$mask), 2209 (VTI.Vec MQPR:$inactive)))>; 2210 } 2211} 2212 2213multiclass MVE_VRHADD<MVEVectorVTInfo VTI> 2214 : MVE_VRHADD_m<VTI, int_arm_mve_vrhadd, int_arm_mve_rhadd_predicated>; 2215 2216defm MVE_VRHADDs8 : MVE_VRHADD<MVE_v16s8>; 2217defm MVE_VRHADDs16 : MVE_VRHADD<MVE_v8s16>; 2218defm MVE_VRHADDs32 : MVE_VRHADD<MVE_v4s32>; 2219defm MVE_VRHADDu8 : MVE_VRHADD<MVE_v16u8>; 2220defm MVE_VRHADDu16 : MVE_VRHADD<MVE_v8u16>; 2221defm MVE_VRHADDu32 : MVE_VRHADD<MVE_v4u32>; 2222 2223// Rounding Halving Add perform the arithemtic operation with an extra bit of 2224// precision, before performing the shift, to void clipping errors. We're not 2225// modelling that here with these patterns, but we're using no wrap forms of 2226// add to ensure that the extra bit of information is not needed for the 2227// arithmetic or the rounding. 2228let Predicates = [HasMVEInt] in { 2229 def : Pat<(v16i8 (ARMvshrsImm (addnsw (addnsw (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)), 2230 (v16i8 (ARMvmovImm (i32 3585)))), 2231 (i32 1))), 2232 (MVE_VRHADDs8 MQPR:$Qm, MQPR:$Qn)>; 2233 def : Pat<(v8i16 (ARMvshrsImm (addnsw (addnsw (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)), 2234 (v8i16 (ARMvmovImm (i32 2049)))), 2235 (i32 1))), 2236 (MVE_VRHADDs16 MQPR:$Qm, MQPR:$Qn)>; 2237 def : Pat<(v4i32 (ARMvshrsImm (addnsw (addnsw (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)), 2238 (v4i32 (ARMvmovImm (i32 1)))), 2239 (i32 1))), 2240 (MVE_VRHADDs32 MQPR:$Qm, MQPR:$Qn)>; 2241 def : Pat<(v16i8 (ARMvshruImm (addnuw (addnuw (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)), 2242 (v16i8 (ARMvmovImm (i32 3585)))), 2243 (i32 1))), 2244 (MVE_VRHADDu8 MQPR:$Qm, MQPR:$Qn)>; 2245 def : Pat<(v8i16 (ARMvshruImm (addnuw (addnuw (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)), 2246 (v8i16 (ARMvmovImm (i32 2049)))), 2247 (i32 1))), 2248 (MVE_VRHADDu16 MQPR:$Qm, MQPR:$Qn)>; 2249 def : Pat<(v4i32 (ARMvshruImm (addnuw (addnuw (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)), 2250 (v4i32 (ARMvmovImm (i32 1)))), 2251 (i32 1))), 2252 (MVE_VRHADDu32 MQPR:$Qm, MQPR:$Qn)>; 2253} 2254 2255 2256class MVE_VHADDSUB<string iname, string suffix, bit U, bit subtract, 2257 bits<2> size, list<dag> pattern=[]> 2258 : MVE_int<iname, suffix, size, pattern> { 2259 2260 let Inst{28} = U; 2261 let Inst{25-23} = 0b110; 2262 let Inst{16} = 0b0; 2263 let Inst{12-10} = 0b000; 2264 let Inst{9} = subtract; 2265 let Inst{8} = 0b0; 2266 let Inst{4} = 0b0; 2267 let Inst{0} = 0b0; 2268 let validForTailPredication = 1; 2269} 2270 2271class MVE_VHADD_<string suffix, bit U, bits<2> size, 2272 list<dag> pattern=[]> 2273 : MVE_VHADDSUB<"vhadd", suffix, U, 0b0, size, pattern>; 2274class MVE_VHSUB_<string suffix, bit U, bits<2> size, 2275 list<dag> pattern=[]> 2276 : MVE_VHADDSUB<"vhsub", suffix, U, 0b1, size, pattern>; 2277 2278multiclass MVE_VHADD_m<MVEVectorVTInfo VTI, 2279 SDNode unpred_op, Intrinsic pred_int, PatFrag add_op, 2280 SDNode shift_op> { 2281 def "" : MVE_VHADD_<VTI.Suffix, VTI.Unsigned, VTI.Size>; 2282 defvar Inst = !cast<Instruction>(NAME); 2283 2284 let Predicates = [HasMVEInt] in { 2285 // Unpredicated add-and-divide-by-two 2286 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), (i32 VTI.Unsigned))), 2287 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 2288 2289 def : Pat<(VTI.Vec (shift_op (add_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)), (i32 1))), 2290 (Inst MQPR:$Qm, MQPR:$Qn)>; 2291 2292 // Predicated add-and-divide-by-two 2293 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), (i32 VTI.Unsigned), 2294 (VTI.Pred VCCR:$mask), (VTI.Vec MQPR:$inactive))), 2295 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2296 ARMVCCThen, (VTI.Pred VCCR:$mask), 2297 (VTI.Vec MQPR:$inactive)))>; 2298 } 2299} 2300 2301multiclass MVE_VHADD<MVEVectorVTInfo VTI, PatFrag add_op, SDNode shift_op> 2302 : MVE_VHADD_m<VTI, int_arm_mve_vhadd, int_arm_mve_hadd_predicated, add_op, 2303 shift_op>; 2304 2305// Halving add/sub perform the arithemtic operation with an extra bit of 2306// precision, before performing the shift, to void clipping errors. We're not 2307// modelling that here with these patterns, but we're using no wrap forms of 2308// add/sub to ensure that the extra bit of information is not needed. 2309defm MVE_VHADDs8 : MVE_VHADD<MVE_v16s8, addnsw, ARMvshrsImm>; 2310defm MVE_VHADDs16 : MVE_VHADD<MVE_v8s16, addnsw, ARMvshrsImm>; 2311defm MVE_VHADDs32 : MVE_VHADD<MVE_v4s32, addnsw, ARMvshrsImm>; 2312defm MVE_VHADDu8 : MVE_VHADD<MVE_v16u8, addnuw, ARMvshruImm>; 2313defm MVE_VHADDu16 : MVE_VHADD<MVE_v8u16, addnuw, ARMvshruImm>; 2314defm MVE_VHADDu32 : MVE_VHADD<MVE_v4u32, addnuw, ARMvshruImm>; 2315 2316multiclass MVE_VHSUB_m<MVEVectorVTInfo VTI, 2317 SDNode unpred_op, Intrinsic pred_int, PatFrag sub_op, 2318 SDNode shift_op> { 2319 def "" : MVE_VHSUB_<VTI.Suffix, VTI.Unsigned, VTI.Size>; 2320 defvar Inst = !cast<Instruction>(NAME); 2321 2322 let Predicates = [HasMVEInt] in { 2323 // Unpredicated subtract-and-divide-by-two 2324 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2325 (i32 VTI.Unsigned))), 2326 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 2327 2328 def : Pat<(VTI.Vec (shift_op (sub_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)), (i32 1))), 2329 (Inst MQPR:$Qm, MQPR:$Qn)>; 2330 2331 2332 // Predicated subtract-and-divide-by-two 2333 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2334 (i32 VTI.Unsigned), (VTI.Pred VCCR:$mask), 2335 (VTI.Vec MQPR:$inactive))), 2336 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 2337 ARMVCCThen, (VTI.Pred VCCR:$mask), 2338 (VTI.Vec MQPR:$inactive)))>; 2339 } 2340} 2341 2342multiclass MVE_VHSUB<MVEVectorVTInfo VTI, PatFrag sub_op, SDNode shift_op> 2343 : MVE_VHSUB_m<VTI, int_arm_mve_vhsub, int_arm_mve_hsub_predicated, sub_op, 2344 shift_op>; 2345 2346defm MVE_VHSUBs8 : MVE_VHSUB<MVE_v16s8, subnsw, ARMvshrsImm>; 2347defm MVE_VHSUBs16 : MVE_VHSUB<MVE_v8s16, subnsw, ARMvshrsImm>; 2348defm MVE_VHSUBs32 : MVE_VHSUB<MVE_v4s32, subnsw, ARMvshrsImm>; 2349defm MVE_VHSUBu8 : MVE_VHSUB<MVE_v16u8, subnuw, ARMvshruImm>; 2350defm MVE_VHSUBu16 : MVE_VHSUB<MVE_v8u16, subnuw, ARMvshruImm>; 2351defm MVE_VHSUBu32 : MVE_VHSUB<MVE_v4u32, subnuw, ARMvshruImm>; 2352 2353class MVE_VDUP<string suffix, bit B, bit E, list<dag> pattern=[]> 2354 : MVE_p<(outs MQPR:$Qd), (ins rGPR:$Rt), NoItinerary, 2355 "vdup", suffix, "$Qd, $Rt", vpred_r, "", pattern> { 2356 bits<4> Qd; 2357 bits<4> Rt; 2358 2359 let Inst{28} = 0b0; 2360 let Inst{25-23} = 0b101; 2361 let Inst{22} = B; 2362 let Inst{21-20} = 0b10; 2363 let Inst{19-17} = Qd{2-0}; 2364 let Inst{16} = 0b0; 2365 let Inst{15-12} = Rt; 2366 let Inst{11-8} = 0b1011; 2367 let Inst{7} = Qd{3}; 2368 let Inst{6} = 0b0; 2369 let Inst{5} = E; 2370 let Inst{4-0} = 0b10000; 2371 let validForTailPredication = 1; 2372} 2373 2374def MVE_VDUP32 : MVE_VDUP<"32", 0b0, 0b0>; 2375def MVE_VDUP16 : MVE_VDUP<"16", 0b0, 0b1>; 2376def MVE_VDUP8 : MVE_VDUP<"8", 0b1, 0b0>; 2377 2378let Predicates = [HasMVEInt] in { 2379 def : Pat<(v16i8 (ARMvdup (i32 rGPR:$elem))), 2380 (MVE_VDUP8 rGPR:$elem)>; 2381 def : Pat<(v8i16 (ARMvdup (i32 rGPR:$elem))), 2382 (MVE_VDUP16 rGPR:$elem)>; 2383 def : Pat<(v4i32 (ARMvdup (i32 rGPR:$elem))), 2384 (MVE_VDUP32 rGPR:$elem)>; 2385 2386 def : Pat<(v8f16 (ARMvdup (i32 rGPR:$elem))), 2387 (MVE_VDUP16 rGPR:$elem)>; 2388 def : Pat<(v4f32 (ARMvdup (i32 rGPR:$elem))), 2389 (MVE_VDUP32 rGPR:$elem)>; 2390 2391 // Match a vselect with an ARMvdup as a predicated MVE_VDUP 2392 def : Pat<(v16i8 (vselect (v16i1 VCCR:$pred), 2393 (v16i8 (ARMvdup (i32 rGPR:$elem))), 2394 (v16i8 MQPR:$inactive))), 2395 (MVE_VDUP8 rGPR:$elem, ARMVCCThen, (v16i1 VCCR:$pred), 2396 (v16i8 MQPR:$inactive))>; 2397 def : Pat<(v8i16 (vselect (v8i1 VCCR:$pred), 2398 (v8i16 (ARMvdup (i32 rGPR:$elem))), 2399 (v8i16 MQPR:$inactive))), 2400 (MVE_VDUP16 rGPR:$elem, ARMVCCThen, (v8i1 VCCR:$pred), 2401 (v8i16 MQPR:$inactive))>; 2402 def : Pat<(v4i32 (vselect (v4i1 VCCR:$pred), 2403 (v4i32 (ARMvdup (i32 rGPR:$elem))), 2404 (v4i32 MQPR:$inactive))), 2405 (MVE_VDUP32 rGPR:$elem, ARMVCCThen, (v4i1 VCCR:$pred), 2406 (v4i32 MQPR:$inactive))>; 2407 def : Pat<(v4f32 (vselect (v4i1 VCCR:$pred), 2408 (v4f32 (ARMvdup (i32 rGPR:$elem))), 2409 (v4f32 MQPR:$inactive))), 2410 (MVE_VDUP32 rGPR:$elem, ARMVCCThen, (v4i1 VCCR:$pred), 2411 (v4f32 MQPR:$inactive))>; 2412 def : Pat<(v8f16 (vselect (v8i1 VCCR:$pred), 2413 (v8f16 (ARMvdup (i32 rGPR:$elem))), 2414 (v8f16 MQPR:$inactive))), 2415 (MVE_VDUP16 rGPR:$elem, ARMVCCThen, (v8i1 VCCR:$pred), 2416 (v8f16 MQPR:$inactive))>; 2417} 2418 2419 2420class MVEIntSingleSrc<string iname, string suffix, bits<2> size, 2421 list<dag> pattern=[]> 2422 : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qm), NoItinerary, 2423 iname, suffix, "$Qd, $Qm", vpred_r, "", pattern> { 2424 bits<4> Qd; 2425 bits<4> Qm; 2426 2427 let Inst{22} = Qd{3}; 2428 let Inst{19-18} = size{1-0}; 2429 let Inst{15-13} = Qd{2-0}; 2430 let Inst{5} = Qm{3}; 2431 let Inst{3-1} = Qm{2-0}; 2432} 2433 2434class MVE_VCLSCLZ<string iname, string suffix, bits<2> size, 2435 bit count_zeroes, list<dag> pattern=[]> 2436 : MVEIntSingleSrc<iname, suffix, size, pattern> { 2437 2438 let Inst{28} = 0b1; 2439 let Inst{25-23} = 0b111; 2440 let Inst{21-20} = 0b11; 2441 let Inst{17-16} = 0b00; 2442 let Inst{12-8} = 0b00100; 2443 let Inst{7} = count_zeroes; 2444 let Inst{6} = 0b1; 2445 let Inst{4} = 0b0; 2446 let Inst{0} = 0b0; 2447 let validForTailPredication = 1; 2448} 2449 2450multiclass MVE_VCLSCLZ_p<string opname, bit opcode, MVEVectorVTInfo VTI, 2451 SDPatternOperator unpred_op> { 2452 def "": MVE_VCLSCLZ<"v"#opname, VTI.Suffix, VTI.Size, opcode>; 2453 2454 defvar Inst = !cast<Instruction>(NAME); 2455 defvar pred_int = !cast<Intrinsic>("int_arm_mve_"#opname#"_predicated"); 2456 2457 let Predicates = [HasMVEInt] in { 2458 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$val))), 2459 (VTI.Vec (Inst (VTI.Vec MQPR:$val)))>; 2460 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$val), (VTI.Pred VCCR:$pred), 2461 (VTI.Vec MQPR:$inactive))), 2462 (VTI.Vec (Inst (VTI.Vec MQPR:$val), ARMVCCThen, 2463 (VTI.Pred VCCR:$pred), (VTI.Vec MQPR:$inactive)))>; 2464 } 2465} 2466 2467defm MVE_VCLSs8 : MVE_VCLSCLZ_p<"cls", 0, MVE_v16s8, int_arm_mve_vcls>; 2468defm MVE_VCLSs16 : MVE_VCLSCLZ_p<"cls", 0, MVE_v8s16, int_arm_mve_vcls>; 2469defm MVE_VCLSs32 : MVE_VCLSCLZ_p<"cls", 0, MVE_v4s32, int_arm_mve_vcls>; 2470 2471defm MVE_VCLZs8 : MVE_VCLSCLZ_p<"clz", 1, MVE_v16i8, ctlz>; 2472defm MVE_VCLZs16 : MVE_VCLSCLZ_p<"clz", 1, MVE_v8i16, ctlz>; 2473defm MVE_VCLZs32 : MVE_VCLSCLZ_p<"clz", 1, MVE_v4i32, ctlz>; 2474 2475class MVE_VABSNEG_int<string iname, string suffix, bits<2> size, bit negate, 2476 bit saturate, list<dag> pattern=[]> 2477 : MVEIntSingleSrc<iname, suffix, size, pattern> { 2478 2479 let Inst{28} = 0b1; 2480 let Inst{25-23} = 0b111; 2481 let Inst{21-20} = 0b11; 2482 let Inst{17} = 0b0; 2483 let Inst{16} = !eq(saturate, 0); 2484 let Inst{12-11} = 0b00; 2485 let Inst{10} = saturate; 2486 let Inst{9-8} = 0b11; 2487 let Inst{7} = negate; 2488 let Inst{6} = 0b1; 2489 let Inst{4} = 0b0; 2490 let Inst{0} = 0b0; 2491 let validForTailPredication = 1; 2492} 2493 2494multiclass MVE_VABSNEG_int_m<string iname, bit negate, bit saturate, 2495 SDPatternOperator unpred_op, Intrinsic pred_int, 2496 MVEVectorVTInfo VTI> { 2497 def "" : MVE_VABSNEG_int<iname, VTI.Suffix, VTI.Size, negate, saturate>; 2498 defvar Inst = !cast<Instruction>(NAME); 2499 2500 let Predicates = [HasMVEInt] in { 2501 // VQABS and VQNEG have more difficult isel patterns defined elsewhere 2502 if !not(saturate) then { 2503 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$v))), 2504 (VTI.Vec (Inst $v))>; 2505 } 2506 2507 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$v), (VTI.Pred VCCR:$mask), 2508 (VTI.Vec MQPR:$inactive))), 2509 (VTI.Vec (Inst $v, ARMVCCThen, $mask, $inactive))>; 2510 } 2511} 2512 2513foreach VTI = [ MVE_v16s8, MVE_v8s16, MVE_v4s32 ] in { 2514 defm "MVE_VABS" # VTI.Suffix : MVE_VABSNEG_int_m< 2515 "vabs", 0, 0, abs, int_arm_mve_abs_predicated, VTI>; 2516 defm "MVE_VQABS" # VTI.Suffix : MVE_VABSNEG_int_m< 2517 "vqabs", 0, 1, ?, int_arm_mve_qabs_predicated, VTI>; 2518 defm "MVE_VNEG" # VTI.Suffix : MVE_VABSNEG_int_m< 2519 "vneg", 1, 0, vnegq, int_arm_mve_neg_predicated, VTI>; 2520 defm "MVE_VQNEG" # VTI.Suffix : MVE_VABSNEG_int_m< 2521 "vqneg", 1, 1, ?, int_arm_mve_qneg_predicated, VTI>; 2522} 2523 2524// int_min/int_max: vector containing INT_MIN/INT_MAX VTI.Size times 2525// zero_vec: v4i32-initialized zero vector, potentially wrapped in a bitconvert 2526multiclass vqabsneg_pattern<MVEVectorVTInfo VTI, dag int_min, dag int_max, 2527 dag zero_vec, MVE_VABSNEG_int vqabs_instruction, 2528 MVE_VABSNEG_int vqneg_instruction> { 2529 let Predicates = [HasMVEInt] in { 2530 // The below tree can be replaced by a vqabs instruction, as it represents 2531 // the following vectorized expression (r being the value in $reg): 2532 // r > 0 ? r : (r == INT_MIN ? INT_MAX : -r) 2533 def : Pat<(VTI.Vec (vselect 2534 (VTI.Pred (ARMvcmpz (VTI.Vec MQPR:$reg), ARMCCgt)), 2535 (VTI.Vec MQPR:$reg), 2536 (VTI.Vec (vselect 2537 (VTI.Pred (ARMvcmp (VTI.Vec MQPR:$reg), int_min, ARMCCeq)), 2538 int_max, 2539 (sub (VTI.Vec zero_vec), (VTI.Vec MQPR:$reg)))))), 2540 (VTI.Vec (vqabs_instruction (VTI.Vec MQPR:$reg)))>; 2541 // Similarly, this tree represents vqneg, i.e. the following vectorized expression: 2542 // r == INT_MIN ? INT_MAX : -r 2543 def : Pat<(VTI.Vec (vselect 2544 (VTI.Pred (ARMvcmp (VTI.Vec MQPR:$reg), int_min, ARMCCeq)), 2545 int_max, 2546 (sub (VTI.Vec zero_vec), (VTI.Vec MQPR:$reg)))), 2547 (VTI.Vec (vqneg_instruction (VTI.Vec MQPR:$reg)))>; 2548 } 2549} 2550 2551defm MVE_VQABSNEG_Ps8 : vqabsneg_pattern<MVE_v16i8, 2552 (v16i8 (ARMvmovImm (i32 3712))), 2553 (v16i8 (ARMvmovImm (i32 3711))), 2554 (bitconvert (v4i32 (ARMvmovImm (i32 0)))), 2555 MVE_VQABSs8, MVE_VQNEGs8>; 2556defm MVE_VQABSNEG_Ps16 : vqabsneg_pattern<MVE_v8i16, 2557 (v8i16 (ARMvmovImm (i32 2688))), 2558 (v8i16 (ARMvmvnImm (i32 2688))), 2559 (bitconvert (v4i32 (ARMvmovImm (i32 0)))), 2560 MVE_VQABSs16, MVE_VQNEGs16>; 2561defm MVE_VQABSNEG_Ps32 : vqabsneg_pattern<MVE_v4i32, 2562 (v4i32 (ARMvmovImm (i32 1664))), 2563 (v4i32 (ARMvmvnImm (i32 1664))), 2564 (ARMvmovImm (i32 0)), 2565 MVE_VQABSs32, MVE_VQNEGs32>; 2566 2567class MVE_mod_imm<string iname, string suffix, bits<4> cmode, bit op, 2568 dag iops, list<dag> pattern=[]> 2569 : MVE_p<(outs MQPR:$Qd), iops, NoItinerary, iname, suffix, "$Qd, $imm", 2570 vpred_r, "", pattern> { 2571 bits<13> imm; 2572 bits<4> Qd; 2573 2574 let Inst{28} = imm{7}; 2575 let Inst{25-23} = 0b111; 2576 let Inst{22} = Qd{3}; 2577 let Inst{21-19} = 0b000; 2578 let Inst{18-16} = imm{6-4}; 2579 let Inst{15-13} = Qd{2-0}; 2580 let Inst{12} = 0b0; 2581 let Inst{11-8} = cmode{3-0}; 2582 let Inst{7-6} = 0b01; 2583 let Inst{5} = op; 2584 let Inst{4} = 0b1; 2585 let Inst{3-0} = imm{3-0}; 2586 2587 let DecoderMethod = "DecodeMVEModImmInstruction"; 2588 let validForTailPredication = 1; 2589} 2590 2591let isReMaterializable = 1 in { 2592let isAsCheapAsAMove = 1 in { 2593def MVE_VMOVimmi8 : MVE_mod_imm<"vmov", "i8", {1,1,1,0}, 0b0, (ins nImmSplatI8:$imm)>; 2594def MVE_VMOVimmi16 : MVE_mod_imm<"vmov", "i16", {1,0,?,0}, 0b0, (ins nImmSplatI16:$imm)> { 2595 let Inst{9} = imm{9}; 2596} 2597def MVE_VMOVimmi32 : MVE_mod_imm<"vmov", "i32", {?,?,?,?}, 0b0, (ins nImmVMOVI32:$imm)> { 2598 let Inst{11-8} = imm{11-8}; 2599} 2600def MVE_VMOVimmi64 : MVE_mod_imm<"vmov", "i64", {1,1,1,0}, 0b1, (ins nImmSplatI64:$imm)>; 2601def MVE_VMOVimmf32 : MVE_mod_imm<"vmov", "f32", {1,1,1,1}, 0b0, (ins nImmVMOVF32:$imm)>; 2602} // let isAsCheapAsAMove = 1 2603 2604def MVE_VMVNimmi16 : MVE_mod_imm<"vmvn", "i16", {1,0,?,0}, 0b1, (ins nImmSplatI16:$imm)> { 2605 let Inst{9} = imm{9}; 2606} 2607def MVE_VMVNimmi32 : MVE_mod_imm<"vmvn", "i32", {?,?,?,?}, 0b1, (ins nImmVMOVI32:$imm)> { 2608 let Inst{11-8} = imm{11-8}; 2609} 2610} // let isReMaterializable = 1 2611 2612let Predicates = [HasMVEInt] in { 2613 def : Pat<(v16i8 (ARMvmovImm timm:$simm)), 2614 (v16i8 (MVE_VMOVimmi8 nImmSplatI8:$simm))>; 2615 def : Pat<(v8i16 (ARMvmovImm timm:$simm)), 2616 (v8i16 (MVE_VMOVimmi16 nImmSplatI16:$simm))>; 2617 def : Pat<(v4i32 (ARMvmovImm timm:$simm)), 2618 (v4i32 (MVE_VMOVimmi32 nImmVMOVI32:$simm))>; 2619 def : Pat<(v2i64 (ARMvmovImm timm:$simm)), 2620 (v2i64 (MVE_VMOVimmi64 nImmSplatI64:$simm))>; 2621 2622 def : Pat<(v8i16 (ARMvmvnImm timm:$simm)), 2623 (v8i16 (MVE_VMVNimmi16 nImmSplatI16:$simm))>; 2624 def : Pat<(v4i32 (ARMvmvnImm timm:$simm)), 2625 (v4i32 (MVE_VMVNimmi32 nImmVMOVI32:$simm))>; 2626 2627 def : Pat<(v4f32 (ARMvmovFPImm timm:$simm)), 2628 (v4f32 (MVE_VMOVimmf32 nImmVMOVF32:$simm))>; 2629 2630 def : Pat<(v8i16 (vselect (v8i1 VCCR:$pred), (ARMvmvnImm timm:$simm), 2631 MQPR:$inactive)), 2632 (v8i16 (MVE_VMVNimmi16 nImmSplatI16:$simm, 2633 ARMVCCThen, VCCR:$pred, MQPR:$inactive))>; 2634 def : Pat<(v4i32 (vselect (v4i1 VCCR:$pred), (ARMvmvnImm timm:$simm), 2635 MQPR:$inactive)), 2636 (v4i32 (MVE_VMVNimmi32 nImmSplatI32:$simm, 2637 ARMVCCThen, VCCR:$pred, MQPR:$inactive))>; 2638} 2639 2640class MVE_VMINMAXA<string iname, string suffix, bits<2> size, 2641 bit bit_12, list<dag> pattern=[]> 2642 : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qm), 2643 NoItinerary, iname, suffix, "$Qd, $Qm", vpred_n, "$Qd = $Qd_src", 2644 pattern> { 2645 bits<4> Qd; 2646 bits<4> Qm; 2647 2648 let Inst{28} = 0b0; 2649 let Inst{25-23} = 0b100; 2650 let Inst{22} = Qd{3}; 2651 let Inst{21-20} = 0b11; 2652 let Inst{19-18} = size; 2653 let Inst{17-16} = 0b11; 2654 let Inst{15-13} = Qd{2-0}; 2655 let Inst{12} = bit_12; 2656 let Inst{11-6} = 0b111010; 2657 let Inst{5} = Qm{3}; 2658 let Inst{4} = 0b0; 2659 let Inst{3-1} = Qm{2-0}; 2660 let Inst{0} = 0b1; 2661 let validForTailPredication = 1; 2662} 2663 2664multiclass MVE_VMINMAXA_m<string iname, MVEVectorVTInfo VTI, 2665 SDNode unpred_op, Intrinsic pred_int, bit bit_12> { 2666 def "" : MVE_VMINMAXA<iname, VTI.Suffix, VTI.Size, bit_12>; 2667 defvar Inst = !cast<Instruction>(NAME); 2668 2669 let Predicates = [HasMVEInt] in { 2670 // Unpredicated v(min|max)a 2671 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$Qd), (abs (VTI.Vec MQPR:$Qm)))), 2672 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd), (VTI.Vec MQPR:$Qm)))>; 2673 2674 // Predicated v(min|max)a 2675 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$Qd), (VTI.Vec MQPR:$Qm), 2676 (VTI.Pred VCCR:$mask))), 2677 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd), (VTI.Vec MQPR:$Qm), 2678 ARMVCCThen, (VTI.Pred VCCR:$mask)))>; 2679 } 2680} 2681 2682multiclass MVE_VMINA<MVEVectorVTInfo VTI> 2683 : MVE_VMINMAXA_m<"vmina", VTI, umin, int_arm_mve_vmina_predicated, 0b1>; 2684 2685defm MVE_VMINAs8 : MVE_VMINA<MVE_v16s8>; 2686defm MVE_VMINAs16 : MVE_VMINA<MVE_v8s16>; 2687defm MVE_VMINAs32 : MVE_VMINA<MVE_v4s32>; 2688 2689multiclass MVE_VMAXA<MVEVectorVTInfo VTI> 2690 : MVE_VMINMAXA_m<"vmaxa", VTI, umax, int_arm_mve_vmaxa_predicated, 0b0>; 2691 2692defm MVE_VMAXAs8 : MVE_VMAXA<MVE_v16s8>; 2693defm MVE_VMAXAs16 : MVE_VMAXA<MVE_v8s16>; 2694defm MVE_VMAXAs32 : MVE_VMAXA<MVE_v4s32>; 2695 2696// end of MVE Integer instructions 2697 2698// start of mve_imm_shift instructions 2699 2700def MVE_VSHLC : MVE_p<(outs rGPR:$RdmDest, MQPR:$Qd), 2701 (ins MQPR:$QdSrc, rGPR:$RdmSrc, long_shift:$imm), 2702 NoItinerary, "vshlc", "", "$QdSrc, $RdmSrc, $imm", 2703 vpred_n, "$RdmDest = $RdmSrc,$Qd = $QdSrc"> { 2704 bits<5> imm; 2705 bits<4> Qd; 2706 bits<4> RdmDest; 2707 2708 let Inst{28} = 0b0; 2709 let Inst{25-23} = 0b101; 2710 let Inst{22} = Qd{3}; 2711 let Inst{21} = 0b1; 2712 let Inst{20-16} = imm{4-0}; 2713 let Inst{15-13} = Qd{2-0}; 2714 let Inst{12-4} = 0b011111100; 2715 let Inst{3-0} = RdmDest{3-0}; 2716} 2717 2718class MVE_shift_imm<dag oops, dag iops, string iname, string suffix, 2719 string ops, vpred_ops vpred, string cstr, 2720 list<dag> pattern=[]> 2721 : MVE_p<oops, iops, NoItinerary, iname, suffix, ops, vpred, cstr, pattern> { 2722 bits<4> Qd; 2723 bits<4> Qm; 2724 2725 let Inst{22} = Qd{3}; 2726 let Inst{15-13} = Qd{2-0}; 2727 let Inst{5} = Qm{3}; 2728 let Inst{3-1} = Qm{2-0}; 2729} 2730 2731class MVE_VMOVL<string iname, string suffix, bits<2> sz, bit U, bit top, 2732 list<dag> pattern=[]> 2733 : MVE_shift_imm<(outs MQPR:$Qd), (ins MQPR:$Qm), 2734 iname, suffix, "$Qd, $Qm", vpred_r, "", 2735 pattern> { 2736 let Inst{28} = U; 2737 let Inst{25-23} = 0b101; 2738 let Inst{21} = 0b1; 2739 let Inst{20-19} = sz{1-0}; 2740 let Inst{18-16} = 0b000; 2741 let Inst{12} = top; 2742 let Inst{11-6} = 0b111101; 2743 let Inst{4} = 0b0; 2744 let Inst{0} = 0b0; 2745 let doubleWidthResult = 1; 2746} 2747 2748multiclass MVE_VMOVL_m<bit top, string chr, MVEVectorVTInfo OutVTI, 2749 MVEVectorVTInfo InVTI> { 2750 def "": MVE_VMOVL<"vmovl" # chr, InVTI.Suffix, OutVTI.Size, 2751 InVTI.Unsigned, top>; 2752 defvar Inst = !cast<Instruction>(NAME); 2753 2754 def : Pat<(OutVTI.Vec (int_arm_mve_vmovl_predicated (InVTI.Vec MQPR:$src), 2755 (i32 InVTI.Unsigned), (i32 top), 2756 (OutVTI.Pred VCCR:$pred), 2757 (OutVTI.Vec MQPR:$inactive))), 2758 (OutVTI.Vec (Inst (InVTI.Vec MQPR:$src), ARMVCCThen, 2759 (OutVTI.Pred VCCR:$pred), 2760 (OutVTI.Vec MQPR:$inactive)))>; 2761} 2762 2763defm MVE_VMOVLs8bh : MVE_VMOVL_m<0, "b", MVE_v8s16, MVE_v16s8>; 2764defm MVE_VMOVLs8th : MVE_VMOVL_m<1, "t", MVE_v8s16, MVE_v16s8>; 2765defm MVE_VMOVLu8bh : MVE_VMOVL_m<0, "b", MVE_v8u16, MVE_v16u8>; 2766defm MVE_VMOVLu8th : MVE_VMOVL_m<1, "t", MVE_v8u16, MVE_v16u8>; 2767defm MVE_VMOVLs16bh : MVE_VMOVL_m<0, "b", MVE_v4s32, MVE_v8s16>; 2768defm MVE_VMOVLs16th : MVE_VMOVL_m<1, "t", MVE_v4s32, MVE_v8s16>; 2769defm MVE_VMOVLu16bh : MVE_VMOVL_m<0, "b", MVE_v4s32, MVE_v8u16>; 2770defm MVE_VMOVLu16th : MVE_VMOVL_m<1, "t", MVE_v4s32, MVE_v8u16>; 2771 2772let Predicates = [HasMVEInt] in { 2773 def : Pat<(sext_inreg (v4i32 MQPR:$src), v4i16), 2774 (MVE_VMOVLs16bh MQPR:$src)>; 2775 def : Pat<(sext_inreg (v8i16 MQPR:$src), v8i8), 2776 (MVE_VMOVLs8bh MQPR:$src)>; 2777 def : Pat<(sext_inreg (v4i32 MQPR:$src), v4i8), 2778 (MVE_VMOVLs16bh (MVE_VMOVLs8bh MQPR:$src))>; 2779 2780 def : Pat<(sext_inreg (v8i16 (ARMVectorRegCast (ARMvrev16 (v16i8 MQPR:$src)))), v8i8), 2781 (MVE_VMOVLs8th MQPR:$src)>; 2782 def : Pat<(sext_inreg (v4i32 (ARMVectorRegCast (ARMvrev32 (v8i16 MQPR:$src)))), v4i16), 2783 (MVE_VMOVLs16th MQPR:$src)>; 2784 2785 // zext_inreg 8 -> 16 2786 def : Pat<(ARMvbicImm (v8i16 MQPR:$src), (i32 0xAFF)), 2787 (MVE_VMOVLu8bh MQPR:$src)>; 2788 // zext_inreg 16 -> 32 2789 def : Pat<(and (v4i32 MQPR:$src), (v4i32 (ARMvmovImm (i32 0xCFF)))), 2790 (MVE_VMOVLu16bh MQPR:$src)>; 2791 // Same zext_inreg with vrevs, picking the top half 2792 def : Pat<(ARMvbicImm (v8i16 (ARMVectorRegCast (ARMvrev16 (v16i8 MQPR:$src)))), (i32 0xAFF)), 2793 (MVE_VMOVLu8th MQPR:$src)>; 2794 def : Pat<(and (v4i32 (ARMVectorRegCast (ARMvrev32 (v8i16 MQPR:$src)))), 2795 (v4i32 (ARMvmovImm (i32 0xCFF)))), 2796 (MVE_VMOVLu16th MQPR:$src)>; 2797} 2798 2799 2800class MVE_VSHLL_imm<string iname, string suffix, bit U, bit th, 2801 Operand immtype, list<dag> pattern=[]> 2802 : MVE_shift_imm<(outs MQPR:$Qd), (ins MQPR:$Qm, immtype:$imm), 2803 iname, suffix, "$Qd, $Qm, $imm", vpred_r, "", pattern> { 2804 let Inst{28} = U; 2805 let Inst{25-23} = 0b101; 2806 let Inst{21} = 0b1; 2807 let Inst{12} = th; 2808 let Inst{11-6} = 0b111101; 2809 let Inst{4} = 0b0; 2810 let Inst{0} = 0b0; 2811 2812 // For the MVE_VSHLL_patterns multiclass to refer to 2813 Operand immediateType = immtype; 2814 2815 let doubleWidthResult = 1; 2816} 2817 2818// The immediate VSHLL instructions accept shift counts from 1 up to 2819// the lane width (8 or 16), but the full-width shifts have an 2820// entirely separate encoding, given below with 'lw' in the name. 2821 2822class MVE_VSHLL_imm8<string iname, string suffix, 2823 bit U, bit th, list<dag> pattern=[]> 2824 : MVE_VSHLL_imm<iname, suffix, U, th, mve_shift_imm1_7, pattern> { 2825 bits<3> imm; 2826 let Inst{20-19} = 0b01; 2827 let Inst{18-16} = imm; 2828} 2829 2830class MVE_VSHLL_imm16<string iname, string suffix, 2831 bit U, bit th, list<dag> pattern=[]> 2832 : MVE_VSHLL_imm<iname, suffix, U, th, mve_shift_imm1_15, pattern> { 2833 bits<4> imm; 2834 let Inst{20} = 0b1; 2835 let Inst{19-16} = imm; 2836} 2837 2838def MVE_VSHLL_imms8bh : MVE_VSHLL_imm8 <"vshllb", "s8", 0b0, 0b0>; 2839def MVE_VSHLL_imms8th : MVE_VSHLL_imm8 <"vshllt", "s8", 0b0, 0b1>; 2840def MVE_VSHLL_immu8bh : MVE_VSHLL_imm8 <"vshllb", "u8", 0b1, 0b0>; 2841def MVE_VSHLL_immu8th : MVE_VSHLL_imm8 <"vshllt", "u8", 0b1, 0b1>; 2842def MVE_VSHLL_imms16bh : MVE_VSHLL_imm16<"vshllb", "s16", 0b0, 0b0>; 2843def MVE_VSHLL_imms16th : MVE_VSHLL_imm16<"vshllt", "s16", 0b0, 0b1>; 2844def MVE_VSHLL_immu16bh : MVE_VSHLL_imm16<"vshllb", "u16", 0b1, 0b0>; 2845def MVE_VSHLL_immu16th : MVE_VSHLL_imm16<"vshllt", "u16", 0b1, 0b1>; 2846 2847class MVE_VSHLL_by_lane_width<string iname, string suffix, bits<2> size, 2848 bit U, string ops, list<dag> pattern=[]> 2849 : MVE_shift_imm<(outs MQPR:$Qd), (ins MQPR:$Qm), 2850 iname, suffix, ops, vpred_r, "", pattern> { 2851 let Inst{28} = U; 2852 let Inst{25-23} = 0b100; 2853 let Inst{21-20} = 0b11; 2854 let Inst{19-18} = size{1-0}; 2855 let Inst{17-16} = 0b01; 2856 let Inst{11-6} = 0b111000; 2857 let Inst{4} = 0b0; 2858 let Inst{0} = 0b1; 2859 let doubleWidthResult = 1; 2860} 2861 2862multiclass MVE_VSHLL_lw<string iname, string suffix, bits<2> sz, bit U, 2863 string ops, list<dag> pattern=[]> { 2864 def bh : MVE_VSHLL_by_lane_width<iname#"b", suffix, sz, U, ops, pattern> { 2865 let Inst{12} = 0b0; 2866 } 2867 def th : MVE_VSHLL_by_lane_width<iname#"t", suffix, sz, U, ops, pattern> { 2868 let Inst{12} = 0b1; 2869 } 2870} 2871 2872defm MVE_VSHLL_lws8 : MVE_VSHLL_lw<"vshll", "s8", 0b00, 0b0, "$Qd, $Qm, #8">; 2873defm MVE_VSHLL_lws16 : MVE_VSHLL_lw<"vshll", "s16", 0b01, 0b0, "$Qd, $Qm, #16">; 2874defm MVE_VSHLL_lwu8 : MVE_VSHLL_lw<"vshll", "u8", 0b00, 0b1, "$Qd, $Qm, #8">; 2875defm MVE_VSHLL_lwu16 : MVE_VSHLL_lw<"vshll", "u16", 0b01, 0b1, "$Qd, $Qm, #16">; 2876 2877multiclass MVE_VSHLL_patterns<MVEVectorVTInfo VTI, int top> { 2878 defvar suffix = !strconcat(VTI.Suffix, !if(top, "th", "bh")); 2879 defvar inst_imm = !cast<MVE_VSHLL_imm>("MVE_VSHLL_imm" # suffix); 2880 defvar inst_lw = !cast<MVE_VSHLL_by_lane_width>("MVE_VSHLL_lw" # suffix); 2881 defvar unpred_int = int_arm_mve_vshll_imm; 2882 defvar pred_int = int_arm_mve_vshll_imm_predicated; 2883 defvar imm = inst_imm.immediateType; 2884 2885 def : Pat<(VTI.DblVec (unpred_int (VTI.Vec MQPR:$src), imm:$imm, 2886 (i32 VTI.Unsigned), (i32 top))), 2887 (VTI.DblVec (inst_imm (VTI.Vec MQPR:$src), imm:$imm))>; 2888 def : Pat<(VTI.DblVec (unpred_int (VTI.Vec MQPR:$src), (i32 VTI.LaneBits), 2889 (i32 VTI.Unsigned), (i32 top))), 2890 (VTI.DblVec (inst_lw (VTI.Vec MQPR:$src)))>; 2891 2892 def : Pat<(VTI.DblVec (pred_int (VTI.Vec MQPR:$src), imm:$imm, 2893 (i32 VTI.Unsigned), (i32 top), 2894 (VTI.DblPred VCCR:$mask), 2895 (VTI.DblVec MQPR:$inactive))), 2896 (VTI.DblVec (inst_imm (VTI.Vec MQPR:$src), imm:$imm, 2897 ARMVCCThen, (VTI.DblPred VCCR:$mask), 2898 (VTI.DblVec MQPR:$inactive)))>; 2899 def : Pat<(VTI.DblVec (pred_int (VTI.Vec MQPR:$src), (i32 VTI.LaneBits), 2900 (i32 VTI.Unsigned), (i32 top), 2901 (VTI.DblPred VCCR:$mask), 2902 (VTI.DblVec MQPR:$inactive))), 2903 (VTI.DblVec (inst_lw (VTI.Vec MQPR:$src), ARMVCCThen, 2904 (VTI.DblPred VCCR:$mask), 2905 (VTI.DblVec MQPR:$inactive)))>; 2906} 2907 2908foreach VTI = [MVE_v16s8, MVE_v8s16, MVE_v16u8, MVE_v8u16] in 2909 foreach top = [0, 1] in 2910 defm : MVE_VSHLL_patterns<VTI, top>; 2911 2912class MVE_shift_imm_partial<Operand imm, string iname, string suffix> 2913 : MVE_shift_imm<(outs MQPR:$Qd), (ins MQPR:$QdSrc, MQPR:$Qm, imm:$imm), 2914 iname, suffix, "$Qd, $Qm, $imm", vpred_n, "$Qd = $QdSrc"> { 2915 Operand immediateType = imm; 2916} 2917 2918class MVE_VxSHRN<string iname, string suffix, bit bit_12, bit bit_28, 2919 Operand imm, list<dag> pattern=[]> 2920 : MVE_shift_imm_partial<imm, iname, suffix> { 2921 bits<5> imm; 2922 2923 let Inst{28} = bit_28; 2924 let Inst{25-23} = 0b101; 2925 let Inst{21} = 0b0; 2926 let Inst{20-16} = imm{4-0}; 2927 let Inst{12} = bit_12; 2928 let Inst{11-6} = 0b111111; 2929 let Inst{4} = 0b0; 2930 let Inst{0} = 0b1; 2931 let validForTailPredication = 1; 2932 let retainsPreviousHalfElement = 1; 2933} 2934 2935def MVE_VRSHRNi16bh : MVE_VxSHRN<"vrshrnb", "i16", 0b0, 0b1, shr_imm8> { 2936 let Inst{20-19} = 0b01; 2937} 2938def MVE_VRSHRNi16th : MVE_VxSHRN<"vrshrnt", "i16", 0b1, 0b1, shr_imm8> { 2939 let Inst{20-19} = 0b01; 2940} 2941def MVE_VRSHRNi32bh : MVE_VxSHRN<"vrshrnb", "i32", 0b0, 0b1, shr_imm16> { 2942 let Inst{20} = 0b1; 2943} 2944def MVE_VRSHRNi32th : MVE_VxSHRN<"vrshrnt", "i32", 0b1, 0b1, shr_imm16> { 2945 let Inst{20} = 0b1; 2946} 2947 2948def MVE_VSHRNi16bh : MVE_VxSHRN<"vshrnb", "i16", 0b0, 0b0, shr_imm8> { 2949 let Inst{20-19} = 0b01; 2950} 2951def MVE_VSHRNi16th : MVE_VxSHRN<"vshrnt", "i16", 0b1, 0b0, shr_imm8> { 2952 let Inst{20-19} = 0b01; 2953} 2954def MVE_VSHRNi32bh : MVE_VxSHRN<"vshrnb", "i32", 0b0, 0b0, shr_imm16> { 2955 let Inst{20} = 0b1; 2956} 2957def MVE_VSHRNi32th : MVE_VxSHRN<"vshrnt", "i32", 0b1, 0b0, shr_imm16> { 2958 let Inst{20} = 0b1; 2959} 2960 2961class MVE_VxQRSHRUN<string iname, string suffix, bit bit_28, bit bit_12, 2962 Operand imm, list<dag> pattern=[]> 2963 : MVE_shift_imm_partial<imm, iname, suffix> { 2964 bits<5> imm; 2965 2966 let Inst{28} = bit_28; 2967 let Inst{25-23} = 0b101; 2968 let Inst{21} = 0b0; 2969 let Inst{20-16} = imm{4-0}; 2970 let Inst{12} = bit_12; 2971 let Inst{11-6} = 0b111111; 2972 let Inst{4} = 0b0; 2973 let Inst{0} = 0b0; 2974 let validForTailPredication = 1; 2975 let retainsPreviousHalfElement = 1; 2976} 2977 2978def MVE_VQRSHRUNs16bh : MVE_VxQRSHRUN< 2979 "vqrshrunb", "s16", 0b1, 0b0, shr_imm8> { 2980 let Inst{20-19} = 0b01; 2981} 2982def MVE_VQRSHRUNs16th : MVE_VxQRSHRUN< 2983 "vqrshrunt", "s16", 0b1, 0b1, shr_imm8> { 2984 let Inst{20-19} = 0b01; 2985} 2986def MVE_VQRSHRUNs32bh : MVE_VxQRSHRUN< 2987 "vqrshrunb", "s32", 0b1, 0b0, shr_imm16> { 2988 let Inst{20} = 0b1; 2989} 2990def MVE_VQRSHRUNs32th : MVE_VxQRSHRUN< 2991 "vqrshrunt", "s32", 0b1, 0b1, shr_imm16> { 2992 let Inst{20} = 0b1; 2993} 2994 2995def MVE_VQSHRUNs16bh : MVE_VxQRSHRUN< 2996 "vqshrunb", "s16", 0b0, 0b0, shr_imm8> { 2997 let Inst{20-19} = 0b01; 2998} 2999def MVE_VQSHRUNs16th : MVE_VxQRSHRUN< 3000 "vqshrunt", "s16", 0b0, 0b1, shr_imm8> { 3001 let Inst{20-19} = 0b01; 3002} 3003def MVE_VQSHRUNs32bh : MVE_VxQRSHRUN< 3004 "vqshrunb", "s32", 0b0, 0b0, shr_imm16> { 3005 let Inst{20} = 0b1; 3006} 3007def MVE_VQSHRUNs32th : MVE_VxQRSHRUN< 3008 "vqshrunt", "s32", 0b0, 0b1, shr_imm16> { 3009 let Inst{20} = 0b1; 3010} 3011 3012class MVE_VxQRSHRN<string iname, string suffix, bit bit_0, bit bit_12, 3013 Operand imm, list<dag> pattern=[]> 3014 : MVE_shift_imm_partial<imm, iname, suffix> { 3015 bits<5> imm; 3016 3017 let Inst{25-23} = 0b101; 3018 let Inst{21} = 0b0; 3019 let Inst{20-16} = imm{4-0}; 3020 let Inst{12} = bit_12; 3021 let Inst{11-6} = 0b111101; 3022 let Inst{4} = 0b0; 3023 let Inst{0} = bit_0; 3024 let validForTailPredication = 1; 3025 let retainsPreviousHalfElement = 1; 3026} 3027 3028multiclass MVE_VxQRSHRN_types<string iname, bit bit_0, bit bit_12> { 3029 def s16 : MVE_VxQRSHRN<iname, "s16", bit_0, bit_12, shr_imm8> { 3030 let Inst{28} = 0b0; 3031 let Inst{20-19} = 0b01; 3032 } 3033 def u16 : MVE_VxQRSHRN<iname, "u16", bit_0, bit_12, shr_imm8> { 3034 let Inst{28} = 0b1; 3035 let Inst{20-19} = 0b01; 3036 } 3037 def s32 : MVE_VxQRSHRN<iname, "s32", bit_0, bit_12, shr_imm16> { 3038 let Inst{28} = 0b0; 3039 let Inst{20} = 0b1; 3040 } 3041 def u32 : MVE_VxQRSHRN<iname, "u32", bit_0, bit_12, shr_imm16> { 3042 let Inst{28} = 0b1; 3043 let Inst{20} = 0b1; 3044 } 3045} 3046 3047defm MVE_VQRSHRNbh : MVE_VxQRSHRN_types<"vqrshrnb", 0b1, 0b0>; 3048defm MVE_VQRSHRNth : MVE_VxQRSHRN_types<"vqrshrnt", 0b1, 0b1>; 3049defm MVE_VQSHRNbh : MVE_VxQRSHRN_types<"vqshrnb", 0b0, 0b0>; 3050defm MVE_VQSHRNth : MVE_VxQRSHRN_types<"vqshrnt", 0b0, 0b1>; 3051 3052multiclass MVE_VSHRN_patterns<MVE_shift_imm_partial inst, 3053 MVEVectorVTInfo OutVTI, MVEVectorVTInfo InVTI, 3054 bit q, bit r, bit top> { 3055 defvar inparams = (? (OutVTI.Vec MQPR:$QdSrc), (InVTI.Vec MQPR:$Qm), 3056 (inst.immediateType:$imm), (i32 q), (i32 r), 3057 (i32 OutVTI.Unsigned), (i32 InVTI.Unsigned), (i32 top)); 3058 defvar outparams = (inst (OutVTI.Vec MQPR:$QdSrc), (InVTI.Vec MQPR:$Qm), 3059 (imm:$imm)); 3060 3061 def : Pat<(OutVTI.Vec !setdagop(inparams, int_arm_mve_vshrn)), 3062 (OutVTI.Vec outparams)>; 3063 def : Pat<(OutVTI.Vec !con(inparams, (int_arm_mve_vshrn_predicated 3064 (InVTI.Pred VCCR:$pred)))), 3065 (OutVTI.Vec !con(outparams, (? ARMVCCThen, VCCR:$pred)))>; 3066} 3067 3068defm : MVE_VSHRN_patterns<MVE_VSHRNi16bh, MVE_v16s8, MVE_v8s16, 0,0,0>; 3069defm : MVE_VSHRN_patterns<MVE_VSHRNi16th, MVE_v16s8, MVE_v8s16, 0,0,1>; 3070defm : MVE_VSHRN_patterns<MVE_VSHRNi32bh, MVE_v8s16, MVE_v4s32, 0,0,0>; 3071defm : MVE_VSHRN_patterns<MVE_VSHRNi32th, MVE_v8s16, MVE_v4s32, 0,0,1>; 3072defm : MVE_VSHRN_patterns<MVE_VSHRNi16bh, MVE_v16u8, MVE_v8u16, 0,0,0>; 3073defm : MVE_VSHRN_patterns<MVE_VSHRNi16th, MVE_v16u8, MVE_v8u16, 0,0,1>; 3074defm : MVE_VSHRN_patterns<MVE_VSHRNi32bh, MVE_v8u16, MVE_v4u32, 0,0,0>; 3075defm : MVE_VSHRN_patterns<MVE_VSHRNi32th, MVE_v8u16, MVE_v4u32, 0,0,1>; 3076defm : MVE_VSHRN_patterns<MVE_VRSHRNi16bh, MVE_v16s8, MVE_v8s16, 0,1,0>; 3077defm : MVE_VSHRN_patterns<MVE_VRSHRNi16th, MVE_v16s8, MVE_v8s16, 0,1,1>; 3078defm : MVE_VSHRN_patterns<MVE_VRSHRNi32bh, MVE_v8s16, MVE_v4s32, 0,1,0>; 3079defm : MVE_VSHRN_patterns<MVE_VRSHRNi32th, MVE_v8s16, MVE_v4s32, 0,1,1>; 3080defm : MVE_VSHRN_patterns<MVE_VRSHRNi16bh, MVE_v16u8, MVE_v8u16, 0,1,0>; 3081defm : MVE_VSHRN_patterns<MVE_VRSHRNi16th, MVE_v16u8, MVE_v8u16, 0,1,1>; 3082defm : MVE_VSHRN_patterns<MVE_VRSHRNi32bh, MVE_v8u16, MVE_v4u32, 0,1,0>; 3083defm : MVE_VSHRN_patterns<MVE_VRSHRNi32th, MVE_v8u16, MVE_v4u32, 0,1,1>; 3084defm : MVE_VSHRN_patterns<MVE_VQSHRNbhs16, MVE_v16s8, MVE_v8s16, 1,0,0>; 3085defm : MVE_VSHRN_patterns<MVE_VQSHRNths16, MVE_v16s8, MVE_v8s16, 1,0,1>; 3086defm : MVE_VSHRN_patterns<MVE_VQSHRNbhs32, MVE_v8s16, MVE_v4s32, 1,0,0>; 3087defm : MVE_VSHRN_patterns<MVE_VQSHRNths32, MVE_v8s16, MVE_v4s32, 1,0,1>; 3088defm : MVE_VSHRN_patterns<MVE_VQSHRNbhu16, MVE_v16u8, MVE_v8u16, 1,0,0>; 3089defm : MVE_VSHRN_patterns<MVE_VQSHRNthu16, MVE_v16u8, MVE_v8u16, 1,0,1>; 3090defm : MVE_VSHRN_patterns<MVE_VQSHRNbhu32, MVE_v8u16, MVE_v4u32, 1,0,0>; 3091defm : MVE_VSHRN_patterns<MVE_VQSHRNthu32, MVE_v8u16, MVE_v4u32, 1,0,1>; 3092defm : MVE_VSHRN_patterns<MVE_VQRSHRNbhs16, MVE_v16s8, MVE_v8s16, 1,1,0>; 3093defm : MVE_VSHRN_patterns<MVE_VQRSHRNths16, MVE_v16s8, MVE_v8s16, 1,1,1>; 3094defm : MVE_VSHRN_patterns<MVE_VQRSHRNbhs32, MVE_v8s16, MVE_v4s32, 1,1,0>; 3095defm : MVE_VSHRN_patterns<MVE_VQRSHRNths32, MVE_v8s16, MVE_v4s32, 1,1,1>; 3096defm : MVE_VSHRN_patterns<MVE_VQRSHRNbhu16, MVE_v16u8, MVE_v8u16, 1,1,0>; 3097defm : MVE_VSHRN_patterns<MVE_VQRSHRNthu16, MVE_v16u8, MVE_v8u16, 1,1,1>; 3098defm : MVE_VSHRN_patterns<MVE_VQRSHRNbhu32, MVE_v8u16, MVE_v4u32, 1,1,0>; 3099defm : MVE_VSHRN_patterns<MVE_VQRSHRNthu32, MVE_v8u16, MVE_v4u32, 1,1,1>; 3100defm : MVE_VSHRN_patterns<MVE_VQSHRUNs16bh, MVE_v16u8, MVE_v8s16, 1,0,0>; 3101defm : MVE_VSHRN_patterns<MVE_VQSHRUNs16th, MVE_v16u8, MVE_v8s16, 1,0,1>; 3102defm : MVE_VSHRN_patterns<MVE_VQSHRUNs32bh, MVE_v8u16, MVE_v4s32, 1,0,0>; 3103defm : MVE_VSHRN_patterns<MVE_VQSHRUNs32th, MVE_v8u16, MVE_v4s32, 1,0,1>; 3104defm : MVE_VSHRN_patterns<MVE_VQRSHRUNs16bh, MVE_v16u8, MVE_v8s16, 1,1,0>; 3105defm : MVE_VSHRN_patterns<MVE_VQRSHRUNs16th, MVE_v16u8, MVE_v8s16, 1,1,1>; 3106defm : MVE_VSHRN_patterns<MVE_VQRSHRUNs32bh, MVE_v8u16, MVE_v4s32, 1,1,0>; 3107defm : MVE_VSHRN_patterns<MVE_VQRSHRUNs32th, MVE_v8u16, MVE_v4s32, 1,1,1>; 3108 3109// end of mve_imm_shift instructions 3110 3111// start of mve_shift instructions 3112 3113class MVE_shift_by_vec<string iname, string suffix, bit U, 3114 bits<2> size, bit bit_4, bit bit_8> 3115 : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qm, MQPR:$Qn), NoItinerary, 3116 iname, suffix, "$Qd, $Qm, $Qn", vpred_r, "", []> { 3117 // Shift instructions which take a vector of shift counts 3118 bits<4> Qd; 3119 bits<4> Qm; 3120 bits<4> Qn; 3121 3122 let Inst{28} = U; 3123 let Inst{25-24} = 0b11; 3124 let Inst{23} = 0b0; 3125 let Inst{22} = Qd{3}; 3126 let Inst{21-20} = size; 3127 let Inst{19-17} = Qn{2-0}; 3128 let Inst{16} = 0b0; 3129 let Inst{15-13} = Qd{2-0}; 3130 let Inst{12-9} = 0b0010; 3131 let Inst{8} = bit_8; 3132 let Inst{7} = Qn{3}; 3133 let Inst{6} = 0b1; 3134 let Inst{5} = Qm{3}; 3135 let Inst{4} = bit_4; 3136 let Inst{3-1} = Qm{2-0}; 3137 let Inst{0} = 0b0; 3138 let validForTailPredication = 1; 3139} 3140 3141multiclass MVE_shift_by_vec_p<string iname, MVEVectorVTInfo VTI, bit q, bit r> { 3142 def "" : MVE_shift_by_vec<iname, VTI.Suffix, VTI.Unsigned, VTI.Size, q, r>; 3143 defvar Inst = !cast<Instruction>(NAME); 3144 3145 def : Pat<(VTI.Vec (int_arm_mve_vshl_vector 3146 (VTI.Vec MQPR:$in), (VTI.Vec MQPR:$sh), 3147 (i32 q), (i32 r), (i32 VTI.Unsigned))), 3148 (VTI.Vec (Inst (VTI.Vec MQPR:$in), (VTI.Vec MQPR:$sh)))>; 3149 3150 def : Pat<(VTI.Vec (int_arm_mve_vshl_vector_predicated 3151 (VTI.Vec MQPR:$in), (VTI.Vec MQPR:$sh), 3152 (i32 q), (i32 r), (i32 VTI.Unsigned), 3153 (VTI.Pred VCCR:$mask), (VTI.Vec MQPR:$inactive))), 3154 (VTI.Vec (Inst (VTI.Vec MQPR:$in), (VTI.Vec MQPR:$sh), 3155 ARMVCCThen, (VTI.Pred VCCR:$mask), 3156 (VTI.Vec MQPR:$inactive)))>; 3157} 3158 3159multiclass mve_shift_by_vec_multi<string iname, bit bit_4, bit bit_8> { 3160 defm s8 : MVE_shift_by_vec_p<iname, MVE_v16s8, bit_4, bit_8>; 3161 defm s16 : MVE_shift_by_vec_p<iname, MVE_v8s16, bit_4, bit_8>; 3162 defm s32 : MVE_shift_by_vec_p<iname, MVE_v4s32, bit_4, bit_8>; 3163 defm u8 : MVE_shift_by_vec_p<iname, MVE_v16u8, bit_4, bit_8>; 3164 defm u16 : MVE_shift_by_vec_p<iname, MVE_v8u16, bit_4, bit_8>; 3165 defm u32 : MVE_shift_by_vec_p<iname, MVE_v4u32, bit_4, bit_8>; 3166} 3167 3168defm MVE_VSHL_by_vec : mve_shift_by_vec_multi<"vshl", 0b0, 0b0>; 3169defm MVE_VQSHL_by_vec : mve_shift_by_vec_multi<"vqshl", 0b1, 0b0>; 3170defm MVE_VQRSHL_by_vec : mve_shift_by_vec_multi<"vqrshl", 0b1, 0b1>; 3171defm MVE_VRSHL_by_vec : mve_shift_by_vec_multi<"vrshl", 0b0, 0b1>; 3172 3173let Predicates = [HasMVEInt] in { 3174 def : Pat<(v4i32 (ARMvshlu (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn))), 3175 (v4i32 (MVE_VSHL_by_vecu32 (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)))>; 3176 def : Pat<(v8i16 (ARMvshlu (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn))), 3177 (v8i16 (MVE_VSHL_by_vecu16 (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)))>; 3178 def : Pat<(v16i8 (ARMvshlu (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn))), 3179 (v16i8 (MVE_VSHL_by_vecu8 (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)))>; 3180 3181 def : Pat<(v4i32 (ARMvshls (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn))), 3182 (v4i32 (MVE_VSHL_by_vecs32 (v4i32 MQPR:$Qm), (v4i32 MQPR:$Qn)))>; 3183 def : Pat<(v8i16 (ARMvshls (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn))), 3184 (v8i16 (MVE_VSHL_by_vecs16 (v8i16 MQPR:$Qm), (v8i16 MQPR:$Qn)))>; 3185 def : Pat<(v16i8 (ARMvshls (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn))), 3186 (v16i8 (MVE_VSHL_by_vecs8 (v16i8 MQPR:$Qm), (v16i8 MQPR:$Qn)))>; 3187} 3188 3189class MVE_shift_with_imm<string iname, string suffix, dag oops, dag iops, 3190 string ops, vpred_ops vpred, string cstr, 3191 list<dag> pattern=[]> 3192 : MVE_p<oops, iops, NoItinerary, iname, suffix, ops, vpred, cstr, pattern> { 3193 bits<4> Qd; 3194 bits<4> Qm; 3195 3196 let Inst{23} = 0b1; 3197 let Inst{22} = Qd{3}; 3198 let Inst{15-13} = Qd{2-0}; 3199 let Inst{12-11} = 0b00; 3200 let Inst{7-6} = 0b01; 3201 let Inst{5} = Qm{3}; 3202 let Inst{4} = 0b1; 3203 let Inst{3-1} = Qm{2-0}; 3204 let Inst{0} = 0b0; 3205 let validForTailPredication = 1; 3206 3207 // For the MVE_shift_imm_patterns multiclass to refer to 3208 MVEVectorVTInfo VTI; 3209 Operand immediateType; 3210 Intrinsic unpred_int; 3211 Intrinsic pred_int; 3212 dag unsignedFlag = (?); 3213} 3214 3215class MVE_VSxI_imm<string iname, string suffix, bit bit_8, Operand immType> 3216 : MVE_shift_with_imm<iname, suffix, (outs MQPR:$Qd), 3217 (ins MQPR:$Qd_src, MQPR:$Qm, immType:$imm), 3218 "$Qd, $Qm, $imm", vpred_n, "$Qd = $Qd_src"> { 3219 bits<6> imm; 3220 let Inst{28} = 0b1; 3221 let Inst{25-24} = 0b11; 3222 let Inst{21-16} = imm; 3223 let Inst{10-9} = 0b10; 3224 let Inst{8} = bit_8; 3225 let validForTailPredication = 1; 3226 3227 Operand immediateType = immType; 3228} 3229 3230def MVE_VSRIimm8 : MVE_VSxI_imm<"vsri", "8", 0b0, shr_imm8> { 3231 let Inst{21-19} = 0b001; 3232} 3233 3234def MVE_VSRIimm16 : MVE_VSxI_imm<"vsri", "16", 0b0, shr_imm16> { 3235 let Inst{21-20} = 0b01; 3236} 3237 3238def MVE_VSRIimm32 : MVE_VSxI_imm<"vsri", "32", 0b0, shr_imm32> { 3239 let Inst{21} = 0b1; 3240} 3241 3242def MVE_VSLIimm8 : MVE_VSxI_imm<"vsli", "8", 0b1, imm0_7> { 3243 let Inst{21-19} = 0b001; 3244} 3245 3246def MVE_VSLIimm16 : MVE_VSxI_imm<"vsli", "16", 0b1, imm0_15> { 3247 let Inst{21-20} = 0b01; 3248} 3249 3250def MVE_VSLIimm32 : MVE_VSxI_imm<"vsli", "32", 0b1,imm0_31> { 3251 let Inst{21} = 0b1; 3252} 3253 3254multiclass MVE_VSxI_patterns<MVE_VSxI_imm inst, string name, 3255 MVEVectorVTInfo VTI> { 3256 defvar inparams = (? (VTI.Vec MQPR:$QdSrc), (VTI.Vec MQPR:$Qm), 3257 (inst.immediateType:$imm)); 3258 defvar outparams = (inst (VTI.Vec MQPR:$QdSrc), (VTI.Vec MQPR:$Qm), 3259 (inst.immediateType:$imm)); 3260 defvar unpred_int = !cast<Intrinsic>("int_arm_mve_" # name); 3261 defvar pred_int = !cast<Intrinsic>("int_arm_mve_" # name # "_predicated"); 3262 3263 def : Pat<(VTI.Vec !setdagop(inparams, unpred_int)), 3264 (VTI.Vec outparams)>; 3265 def : Pat<(VTI.Vec !con(inparams, (pred_int (VTI.Pred VCCR:$pred)))), 3266 (VTI.Vec !con(outparams, (? ARMVCCThen, VCCR:$pred)))>; 3267} 3268 3269defm : MVE_VSxI_patterns<MVE_VSLIimm8, "vsli", MVE_v16i8>; 3270defm : MVE_VSxI_patterns<MVE_VSLIimm16, "vsli", MVE_v8i16>; 3271defm : MVE_VSxI_patterns<MVE_VSLIimm32, "vsli", MVE_v4i32>; 3272defm : MVE_VSxI_patterns<MVE_VSRIimm8, "vsri", MVE_v16i8>; 3273defm : MVE_VSxI_patterns<MVE_VSRIimm16, "vsri", MVE_v8i16>; 3274defm : MVE_VSxI_patterns<MVE_VSRIimm32, "vsri", MVE_v4i32>; 3275 3276class MVE_VQSHL_imm<MVEVectorVTInfo VTI_, Operand immType> 3277 : MVE_shift_with_imm<"vqshl", VTI_.Suffix, (outs MQPR:$Qd), 3278 (ins MQPR:$Qm, immType:$imm), "$Qd, $Qm, $imm", 3279 vpred_r, ""> { 3280 bits<6> imm; 3281 3282 let Inst{28} = VTI_.Unsigned; 3283 let Inst{25-24} = 0b11; 3284 let Inst{21-16} = imm; 3285 let Inst{10-8} = 0b111; 3286 3287 let VTI = VTI_; 3288 let immediateType = immType; 3289 let unsignedFlag = (? (i32 VTI.Unsigned)); 3290} 3291 3292let unpred_int = int_arm_mve_vqshl_imm, 3293 pred_int = int_arm_mve_vqshl_imm_predicated in { 3294 def MVE_VQSHLimms8 : MVE_VQSHL_imm<MVE_v16s8, imm0_7> { 3295 let Inst{21-19} = 0b001; 3296 } 3297 def MVE_VQSHLimmu8 : MVE_VQSHL_imm<MVE_v16u8, imm0_7> { 3298 let Inst{21-19} = 0b001; 3299 } 3300 3301 def MVE_VQSHLimms16 : MVE_VQSHL_imm<MVE_v8s16, imm0_15> { 3302 let Inst{21-20} = 0b01; 3303 } 3304 def MVE_VQSHLimmu16 : MVE_VQSHL_imm<MVE_v8u16, imm0_15> { 3305 let Inst{21-20} = 0b01; 3306 } 3307 3308 def MVE_VQSHLimms32 : MVE_VQSHL_imm<MVE_v4s32, imm0_31> { 3309 let Inst{21} = 0b1; 3310 } 3311 def MVE_VQSHLimmu32 : MVE_VQSHL_imm<MVE_v4u32, imm0_31> { 3312 let Inst{21} = 0b1; 3313 } 3314} 3315 3316class MVE_VQSHLU_imm<MVEVectorVTInfo VTI_, Operand immType> 3317 : MVE_shift_with_imm<"vqshlu", VTI_.Suffix, (outs MQPR:$Qd), 3318 (ins MQPR:$Qm, immType:$imm), "$Qd, $Qm, $imm", 3319 vpred_r, ""> { 3320 bits<6> imm; 3321 3322 let Inst{28} = 0b1; 3323 let Inst{25-24} = 0b11; 3324 let Inst{21-16} = imm; 3325 let Inst{10-8} = 0b110; 3326 3327 let VTI = VTI_; 3328 let immediateType = immType; 3329} 3330 3331let unpred_int = int_arm_mve_vqshlu_imm, 3332 pred_int = int_arm_mve_vqshlu_imm_predicated in { 3333 def MVE_VQSHLU_imms8 : MVE_VQSHLU_imm<MVE_v16s8, imm0_7> { 3334 let Inst{21-19} = 0b001; 3335 } 3336 3337 def MVE_VQSHLU_imms16 : MVE_VQSHLU_imm<MVE_v8s16, imm0_15> { 3338 let Inst{21-20} = 0b01; 3339 } 3340 3341 def MVE_VQSHLU_imms32 : MVE_VQSHLU_imm<MVE_v4s32, imm0_31> { 3342 let Inst{21} = 0b1; 3343 } 3344} 3345 3346class MVE_VRSHR_imm<MVEVectorVTInfo VTI_, Operand immType> 3347 : MVE_shift_with_imm<"vrshr", VTI_.Suffix, (outs MQPR:$Qd), 3348 (ins MQPR:$Qm, immType:$imm), "$Qd, $Qm, $imm", 3349 vpred_r, ""> { 3350 bits<6> imm; 3351 3352 let Inst{28} = VTI_.Unsigned; 3353 let Inst{25-24} = 0b11; 3354 let Inst{21-16} = imm; 3355 let Inst{10-8} = 0b010; 3356 3357 let VTI = VTI_; 3358 let immediateType = immType; 3359 let unsignedFlag = (? (i32 VTI.Unsigned)); 3360} 3361 3362let unpred_int = int_arm_mve_vrshr_imm, 3363 pred_int = int_arm_mve_vrshr_imm_predicated in { 3364 def MVE_VRSHR_imms8 : MVE_VRSHR_imm<MVE_v16s8, shr_imm8> { 3365 let Inst{21-19} = 0b001; 3366 } 3367 3368 def MVE_VRSHR_immu8 : MVE_VRSHR_imm<MVE_v16u8, shr_imm8> { 3369 let Inst{21-19} = 0b001; 3370 } 3371 3372 def MVE_VRSHR_imms16 : MVE_VRSHR_imm<MVE_v8s16, shr_imm16> { 3373 let Inst{21-20} = 0b01; 3374 } 3375 3376 def MVE_VRSHR_immu16 : MVE_VRSHR_imm<MVE_v8u16, shr_imm16> { 3377 let Inst{21-20} = 0b01; 3378 } 3379 3380 def MVE_VRSHR_imms32 : MVE_VRSHR_imm<MVE_v4s32, shr_imm32> { 3381 let Inst{21} = 0b1; 3382 } 3383 3384 def MVE_VRSHR_immu32 : MVE_VRSHR_imm<MVE_v4u32, shr_imm32> { 3385 let Inst{21} = 0b1; 3386 } 3387} 3388 3389multiclass MVE_shift_imm_patterns<MVE_shift_with_imm inst> { 3390 def : Pat<(inst.VTI.Vec !con((inst.unpred_int (inst.VTI.Vec MQPR:$src), 3391 inst.immediateType:$imm), 3392 inst.unsignedFlag)), 3393 (inst.VTI.Vec (inst (inst.VTI.Vec MQPR:$src), 3394 inst.immediateType:$imm))>; 3395 3396 def : Pat<(inst.VTI.Vec !con((inst.pred_int (inst.VTI.Vec MQPR:$src), 3397 inst.immediateType:$imm), 3398 inst.unsignedFlag, 3399 (? (inst.VTI.Pred VCCR:$mask), 3400 (inst.VTI.Vec MQPR:$inactive)))), 3401 (inst.VTI.Vec (inst (inst.VTI.Vec MQPR:$src), 3402 inst.immediateType:$imm, 3403 ARMVCCThen, (inst.VTI.Pred VCCR:$mask), 3404 (inst.VTI.Vec MQPR:$inactive)))>; 3405} 3406 3407defm : MVE_shift_imm_patterns<MVE_VQSHLimms8>; 3408defm : MVE_shift_imm_patterns<MVE_VQSHLimmu8>; 3409defm : MVE_shift_imm_patterns<MVE_VQSHLimms16>; 3410defm : MVE_shift_imm_patterns<MVE_VQSHLimmu16>; 3411defm : MVE_shift_imm_patterns<MVE_VQSHLimms32>; 3412defm : MVE_shift_imm_patterns<MVE_VQSHLimmu32>; 3413defm : MVE_shift_imm_patterns<MVE_VQSHLU_imms8>; 3414defm : MVE_shift_imm_patterns<MVE_VQSHLU_imms16>; 3415defm : MVE_shift_imm_patterns<MVE_VQSHLU_imms32>; 3416defm : MVE_shift_imm_patterns<MVE_VRSHR_imms8>; 3417defm : MVE_shift_imm_patterns<MVE_VRSHR_immu8>; 3418defm : MVE_shift_imm_patterns<MVE_VRSHR_imms16>; 3419defm : MVE_shift_imm_patterns<MVE_VRSHR_immu16>; 3420defm : MVE_shift_imm_patterns<MVE_VRSHR_imms32>; 3421defm : MVE_shift_imm_patterns<MVE_VRSHR_immu32>; 3422 3423class MVE_VSHR_imm<string suffix, dag imm> 3424 : MVE_shift_with_imm<"vshr", suffix, (outs MQPR:$Qd), 3425 !con((ins MQPR:$Qm), imm), "$Qd, $Qm, $imm", 3426 vpred_r, ""> { 3427 bits<6> imm; 3428 3429 let Inst{25-24} = 0b11; 3430 let Inst{21-16} = imm; 3431 let Inst{10-8} = 0b000; 3432} 3433 3434def MVE_VSHR_imms8 : MVE_VSHR_imm<"s8", (ins shr_imm8:$imm)> { 3435 let Inst{28} = 0b0; 3436 let Inst{21-19} = 0b001; 3437} 3438 3439def MVE_VSHR_immu8 : MVE_VSHR_imm<"u8", (ins shr_imm8:$imm)> { 3440 let Inst{28} = 0b1; 3441 let Inst{21-19} = 0b001; 3442} 3443 3444def MVE_VSHR_imms16 : MVE_VSHR_imm<"s16", (ins shr_imm16:$imm)> { 3445 let Inst{28} = 0b0; 3446 let Inst{21-20} = 0b01; 3447} 3448 3449def MVE_VSHR_immu16 : MVE_VSHR_imm<"u16", (ins shr_imm16:$imm)> { 3450 let Inst{28} = 0b1; 3451 let Inst{21-20} = 0b01; 3452} 3453 3454def MVE_VSHR_imms32 : MVE_VSHR_imm<"s32", (ins shr_imm32:$imm)> { 3455 let Inst{28} = 0b0; 3456 let Inst{21} = 0b1; 3457} 3458 3459def MVE_VSHR_immu32 : MVE_VSHR_imm<"u32", (ins shr_imm32:$imm)> { 3460 let Inst{28} = 0b1; 3461 let Inst{21} = 0b1; 3462} 3463 3464class MVE_VSHL_imm<string suffix, dag imm> 3465 : MVE_shift_with_imm<"vshl", suffix, (outs MQPR:$Qd), 3466 !con((ins MQPR:$Qm), imm), "$Qd, $Qm, $imm", 3467 vpred_r, ""> { 3468 bits<6> imm; 3469 3470 let Inst{28} = 0b0; 3471 let Inst{25-24} = 0b11; 3472 let Inst{21-16} = imm; 3473 let Inst{10-8} = 0b101; 3474} 3475 3476def MVE_VSHL_immi8 : MVE_VSHL_imm<"i8", (ins imm0_7:$imm)> { 3477 let Inst{21-19} = 0b001; 3478} 3479 3480def MVE_VSHL_immi16 : MVE_VSHL_imm<"i16", (ins imm0_15:$imm)> { 3481 let Inst{21-20} = 0b01; 3482} 3483 3484def MVE_VSHL_immi32 : MVE_VSHL_imm<"i32", (ins imm0_31:$imm)> { 3485 let Inst{21} = 0b1; 3486} 3487 3488multiclass MVE_immediate_shift_patterns_inner< 3489 MVEVectorVTInfo VTI, Operand imm_operand_type, SDNode unpred_op, 3490 Intrinsic pred_int, Instruction inst, list<int> unsignedFlag = []> { 3491 3492 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$src), imm_operand_type:$imm)), 3493 (VTI.Vec (inst (VTI.Vec MQPR:$src), imm_operand_type:$imm))>; 3494 3495 def : Pat<(VTI.Vec !con((pred_int (VTI.Vec MQPR:$src), imm_operand_type:$imm), 3496 !dag(pred_int, unsignedFlag, ?), 3497 (pred_int (VTI.Pred VCCR:$mask), 3498 (VTI.Vec MQPR:$inactive)))), 3499 (VTI.Vec (inst (VTI.Vec MQPR:$src), imm_operand_type:$imm, 3500 ARMVCCThen, (VTI.Pred VCCR:$mask), 3501 (VTI.Vec MQPR:$inactive)))>; 3502} 3503 3504multiclass MVE_immediate_shift_patterns<MVEVectorVTInfo VTI, 3505 Operand imm_operand_type> { 3506 defm : MVE_immediate_shift_patterns_inner<VTI, imm_operand_type, 3507 ARMvshlImm, int_arm_mve_shl_imm_predicated, 3508 !cast<Instruction>("MVE_VSHL_immi" # VTI.BitsSuffix)>; 3509 defm : MVE_immediate_shift_patterns_inner<VTI, imm_operand_type, 3510 ARMvshruImm, int_arm_mve_shr_imm_predicated, 3511 !cast<Instruction>("MVE_VSHR_immu" # VTI.BitsSuffix), [1]>; 3512 defm : MVE_immediate_shift_patterns_inner<VTI, imm_operand_type, 3513 ARMvshrsImm, int_arm_mve_shr_imm_predicated, 3514 !cast<Instruction>("MVE_VSHR_imms" # VTI.BitsSuffix), [0]>; 3515} 3516 3517let Predicates = [HasMVEInt] in { 3518 defm : MVE_immediate_shift_patterns<MVE_v16i8, imm0_7>; 3519 defm : MVE_immediate_shift_patterns<MVE_v8i16, imm0_15>; 3520 defm : MVE_immediate_shift_patterns<MVE_v4i32, imm0_31>; 3521} 3522 3523// end of mve_shift instructions 3524 3525// start of MVE Floating Point instructions 3526 3527class MVE_float<string iname, string suffix, dag oops, dag iops, string ops, 3528 vpred_ops vpred, string cstr, list<dag> pattern=[]> 3529 : MVE_f<oops, iops, NoItinerary, iname, suffix, ops, vpred, cstr, pattern> { 3530 bits<4> Qm; 3531 3532 let Inst{12} = 0b0; 3533 let Inst{6} = 0b1; 3534 let Inst{5} = Qm{3}; 3535 let Inst{3-1} = Qm{2-0}; 3536 let Inst{0} = 0b0; 3537} 3538 3539class MVE_VRINT<string rmode, bits<3> op, string suffix, bits<2> size, 3540 list<dag> pattern=[]> 3541 : MVE_float<!strconcat("vrint", rmode), suffix, (outs MQPR:$Qd), 3542 (ins MQPR:$Qm), "$Qd, $Qm", vpred_r, "", pattern> { 3543 bits<4> Qd; 3544 3545 let Inst{28} = 0b1; 3546 let Inst{25-23} = 0b111; 3547 let Inst{22} = Qd{3}; 3548 let Inst{21-20} = 0b11; 3549 let Inst{19-18} = size; 3550 let Inst{17-16} = 0b10; 3551 let Inst{15-13} = Qd{2-0}; 3552 let Inst{11-10} = 0b01; 3553 let Inst{9-7} = op{2-0}; 3554 let Inst{4} = 0b0; 3555 let validForTailPredication = 1; 3556 3557} 3558 3559multiclass MVE_VRINT_m<MVEVectorVTInfo VTI, string suffix, bits<3> opcode, 3560 SDPatternOperator unpred_op> { 3561 def "": MVE_VRINT<suffix, opcode, VTI.Suffix, VTI.Size>; 3562 defvar Inst = !cast<Instruction>(NAME); 3563 defvar pred_int = !cast<Intrinsic>("int_arm_mve_vrint"#suffix#"_predicated"); 3564 3565 let Predicates = [HasMVEFloat] in { 3566 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$val))), 3567 (VTI.Vec (Inst (VTI.Vec MQPR:$val)))>; 3568 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$val), (VTI.Pred VCCR:$pred), 3569 (VTI.Vec MQPR:$inactive))), 3570 (VTI.Vec (Inst (VTI.Vec MQPR:$val), ARMVCCThen, 3571 (VTI.Pred VCCR:$pred), (VTI.Vec MQPR:$inactive)))>; 3572 } 3573} 3574 3575multiclass MVE_VRINT_ops<MVEVectorVTInfo VTI> { 3576 defm N : MVE_VRINT_m<VTI, "n", 0b000, int_arm_mve_vrintn>; 3577 defm X : MVE_VRINT_m<VTI, "x", 0b001, frint>; 3578 defm A : MVE_VRINT_m<VTI, "a", 0b010, fround>; 3579 defm Z : MVE_VRINT_m<VTI, "z", 0b011, ftrunc>; 3580 defm M : MVE_VRINT_m<VTI, "m", 0b101, ffloor>; 3581 defm P : MVE_VRINT_m<VTI, "p", 0b111, fceil>; 3582} 3583 3584defm MVE_VRINTf16 : MVE_VRINT_ops<MVE_v8f16>; 3585defm MVE_VRINTf32 : MVE_VRINT_ops<MVE_v4f32>; 3586 3587class MVEFloatArithNeon<string iname, string suffix, bit size, 3588 dag oops, dag iops, string ops, 3589 vpred_ops vpred, string cstr, list<dag> pattern=[]> 3590 : MVE_float<iname, suffix, oops, iops, ops, vpred, cstr, pattern> { 3591 let Inst{20} = size; 3592 let Inst{16} = 0b0; 3593} 3594 3595class MVE_VMUL_fp<string iname, string suffix, bit size, list<dag> pattern=[]> 3596 : MVEFloatArithNeon<iname, suffix, size, (outs MQPR:$Qd), 3597 (ins MQPR:$Qn, MQPR:$Qm), "$Qd, $Qn, $Qm", vpred_r, "", 3598 pattern> { 3599 bits<4> Qd; 3600 bits<4> Qn; 3601 3602 let Inst{28} = 0b1; 3603 let Inst{25-23} = 0b110; 3604 let Inst{22} = Qd{3}; 3605 let Inst{21} = 0b0; 3606 let Inst{19-17} = Qn{2-0}; 3607 let Inst{15-13} = Qd{2-0}; 3608 let Inst{12-8} = 0b01101; 3609 let Inst{7} = Qn{3}; 3610 let Inst{4} = 0b1; 3611 let validForTailPredication = 1; 3612} 3613 3614multiclass MVE_VMULT_fp_m<string iname, bit bit_21, MVEVectorVTInfo VTI, 3615 SDNode Op, Intrinsic PredInt> { 3616 def "" : MVE_VMUL_fp<iname, VTI.Suffix, VTI.Size{0}>; 3617 defvar Inst = !cast<Instruction>(NAME); 3618 3619 let Predicates = [HasMVEFloat] in { 3620 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? ), !cast<Instruction>(NAME)>; 3621 } 3622} 3623 3624multiclass MVE_VMUL_fp_m<MVEVectorVTInfo VTI> 3625 : MVE_VMULT_fp_m<"vmul", 0, VTI, fmul, int_arm_mve_mul_predicated>; 3626 3627defm MVE_VMULf32 : MVE_VMUL_fp_m<MVE_v4f32>; 3628defm MVE_VMULf16 : MVE_VMUL_fp_m<MVE_v8f16>; 3629 3630class MVE_VCMLA<string suffix, bit size> 3631 : MVEFloatArithNeon<"vcmla", suffix, size, (outs MQPR:$Qd), 3632 (ins MQPR:$Qd_src, MQPR:$Qn, MQPR:$Qm, complexrotateop:$rot), 3633 "$Qd, $Qn, $Qm, $rot", vpred_n, "$Qd = $Qd_src", []> { 3634 bits<4> Qd; 3635 bits<4> Qn; 3636 bits<2> rot; 3637 3638 let Inst{28} = 0b1; 3639 let Inst{25} = 0b0; 3640 let Inst{24-23} = rot; 3641 let Inst{22} = Qd{3}; 3642 let Inst{21} = 0b1; 3643 let Inst{19-17} = Qn{2-0}; 3644 let Inst{15-13} = Qd{2-0}; 3645 let Inst{12-8} = 0b01000; 3646 let Inst{7} = Qn{3}; 3647 let Inst{4} = 0b0; 3648} 3649 3650multiclass MVE_VCMLA_m<MVEVectorVTInfo VTI, bit size> { 3651 def "" : MVE_VCMLA<VTI.Suffix, size>; 3652 defvar Inst = !cast<Instruction>(NAME); 3653 3654 let Predicates = [HasMVEFloat] in { 3655 def : Pat<(VTI.Vec (int_arm_mve_vcmlaq 3656 imm:$rot, (VTI.Vec MQPR:$Qd_src), 3657 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), 3658 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src), 3659 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 3660 imm:$rot))>; 3661 3662 def : Pat<(VTI.Vec (int_arm_mve_vcmlaq_predicated 3663 imm:$rot, (VTI.Vec MQPR:$Qd_src), 3664 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 3665 (VTI.Pred VCCR:$mask))), 3666 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src), (VTI.Vec MQPR:$Qn), 3667 (VTI.Vec MQPR:$Qm), imm:$rot, 3668 ARMVCCThen, (VTI.Pred VCCR:$mask)))>; 3669 3670 } 3671} 3672 3673defm MVE_VCMLAf16 : MVE_VCMLA_m<MVE_v8f16, 0b0>; 3674defm MVE_VCMLAf32 : MVE_VCMLA_m<MVE_v4f32, 0b1>; 3675 3676class MVE_VADDSUBFMA_fp<string iname, string suffix, bit size, bit bit_4, 3677 bit bit_8, bit bit_21, dag iops=(ins), 3678 vpred_ops vpred=vpred_r, string cstr="", 3679 list<dag> pattern=[]> 3680 : MVEFloatArithNeon<iname, suffix, size, (outs MQPR:$Qd), 3681 !con(iops, (ins MQPR:$Qn, MQPR:$Qm)), "$Qd, $Qn, $Qm", 3682 vpred, cstr, pattern> { 3683 bits<4> Qd; 3684 bits<4> Qn; 3685 3686 let Inst{28} = 0b0; 3687 let Inst{25-23} = 0b110; 3688 let Inst{22} = Qd{3}; 3689 let Inst{21} = bit_21; 3690 let Inst{19-17} = Qn{2-0}; 3691 let Inst{15-13} = Qd{2-0}; 3692 let Inst{11-9} = 0b110; 3693 let Inst{8} = bit_8; 3694 let Inst{7} = Qn{3}; 3695 let Inst{4} = bit_4; 3696 let validForTailPredication = 1; 3697} 3698 3699multiclass MVE_VFMA_fp_multi<string iname, bit fms, MVEVectorVTInfo VTI> { 3700 def "" : MVE_VADDSUBFMA_fp<iname, VTI.Suffix, VTI.Size{0}, 0b1, 0b0, fms, 3701 (ins MQPR:$Qd_src), vpred_n, "$Qd = $Qd_src">; 3702 defvar Inst = !cast<Instruction>(NAME); 3703 defvar pred_int = int_arm_mve_fma_predicated; 3704 defvar m1 = (VTI.Vec MQPR:$m1); 3705 defvar m2 = (VTI.Vec MQPR:$m2); 3706 defvar add = (VTI.Vec MQPR:$add); 3707 defvar pred = (VTI.Pred VCCR:$pred); 3708 3709 let Predicates = [HasMVEFloat] in { 3710 if fms then { 3711 def : Pat<(VTI.Vec (fma (fneg m1), m2, add)), 3712 (Inst $add, $m1, $m2)>; 3713 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred), 3714 (VTI.Vec (fma (fneg m1), m2, add)), 3715 add)), 3716 (Inst $add, $m1, $m2, ARMVCCThen, $pred)>; 3717 def : Pat<(VTI.Vec (pred_int (fneg m1), m2, add, pred)), 3718 (Inst $add, $m1, $m2, ARMVCCThen, $pred)>; 3719 def : Pat<(VTI.Vec (pred_int m1, (fneg m2), add, pred)), 3720 (Inst $add, $m1, $m2, ARMVCCThen, $pred)>; 3721 } else { 3722 def : Pat<(VTI.Vec (fma m1, m2, add)), 3723 (Inst $add, $m1, $m2)>; 3724 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred), 3725 (VTI.Vec (fma m1, m2, add)), 3726 add)), 3727 (Inst $add, $m1, $m2, ARMVCCThen, $pred)>; 3728 def : Pat<(VTI.Vec (pred_int m1, m2, add, pred)), 3729 (Inst $add, $m1, $m2, ARMVCCThen, $pred)>; 3730 } 3731 } 3732} 3733 3734defm MVE_VFMAf32 : MVE_VFMA_fp_multi<"vfma", 0, MVE_v4f32>; 3735defm MVE_VFMAf16 : MVE_VFMA_fp_multi<"vfma", 0, MVE_v8f16>; 3736defm MVE_VFMSf32 : MVE_VFMA_fp_multi<"vfms", 1, MVE_v4f32>; 3737defm MVE_VFMSf16 : MVE_VFMA_fp_multi<"vfms", 1, MVE_v8f16>; 3738 3739multiclass MVE_VADDSUB_fp_m<string iname, bit bit_21, MVEVectorVTInfo VTI, 3740 SDNode Op, Intrinsic PredInt> { 3741 def "" : MVE_VADDSUBFMA_fp<iname, VTI.Suffix, VTI.Size{0}, 0, 1, bit_21> { 3742 let validForTailPredication = 1; 3743 } 3744 defvar Inst = !cast<Instruction>(NAME); 3745 3746 let Predicates = [HasMVEFloat] in { 3747 defm : MVE_TwoOpPattern<VTI, Op, PredInt, (? ), !cast<Instruction>(NAME)>; 3748 } 3749} 3750 3751multiclass MVE_VADD_fp_m<MVEVectorVTInfo VTI> 3752 : MVE_VADDSUB_fp_m<"vadd", 0, VTI, fadd, int_arm_mve_add_predicated>; 3753multiclass MVE_VSUB_fp_m<MVEVectorVTInfo VTI> 3754 : MVE_VADDSUB_fp_m<"vsub", 1, VTI, fsub, int_arm_mve_sub_predicated>; 3755 3756defm MVE_VADDf32 : MVE_VADD_fp_m<MVE_v4f32>; 3757defm MVE_VADDf16 : MVE_VADD_fp_m<MVE_v8f16>; 3758 3759defm MVE_VSUBf32 : MVE_VSUB_fp_m<MVE_v4f32>; 3760defm MVE_VSUBf16 : MVE_VSUB_fp_m<MVE_v8f16>; 3761 3762class MVE_VCADD<string suffix, bit size, string cstr=""> 3763 : MVEFloatArithNeon<"vcadd", suffix, size, (outs MQPR:$Qd), 3764 (ins MQPR:$Qn, MQPR:$Qm, complexrotateopodd:$rot), 3765 "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { 3766 bits<4> Qd; 3767 bits<4> Qn; 3768 bit rot; 3769 3770 let Inst{28} = 0b1; 3771 let Inst{25} = 0b0; 3772 let Inst{24} = rot; 3773 let Inst{23} = 0b1; 3774 let Inst{22} = Qd{3}; 3775 let Inst{21} = 0b0; 3776 let Inst{19-17} = Qn{2-0}; 3777 let Inst{15-13} = Qd{2-0}; 3778 let Inst{12-8} = 0b01000; 3779 let Inst{7} = Qn{3}; 3780 let Inst{4} = 0b0; 3781} 3782 3783multiclass MVE_VCADD_m<MVEVectorVTInfo VTI, bit size, string cstr=""> { 3784 def "" : MVE_VCADD<VTI.Suffix, size, cstr>; 3785 defvar Inst = !cast<Instruction>(NAME); 3786 3787 let Predicates = [HasMVEFloat] in { 3788 def : Pat<(VTI.Vec (int_arm_mve_vcaddq (i32 1), 3789 imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), 3790 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 3791 imm:$rot))>; 3792 3793 def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated (i32 1), 3794 imm:$rot, (VTI.Vec MQPR:$inactive), 3795 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 3796 (VTI.Pred VCCR:$mask))), 3797 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 3798 imm:$rot, ARMVCCThen, (VTI.Pred VCCR:$mask), 3799 (VTI.Vec MQPR:$inactive)))>; 3800 3801 } 3802} 3803 3804defm MVE_VCADDf16 : MVE_VCADD_m<MVE_v8f16, 0b0>; 3805defm MVE_VCADDf32 : MVE_VCADD_m<MVE_v4f32, 0b1, "@earlyclobber $Qd">; 3806 3807class MVE_VABD_fp<string suffix, bit size> 3808 : MVE_float<"vabd", suffix, (outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), 3809 "$Qd, $Qn, $Qm", vpred_r, ""> { 3810 bits<4> Qd; 3811 bits<4> Qn; 3812 3813 let Inst{28} = 0b1; 3814 let Inst{25-23} = 0b110; 3815 let Inst{22} = Qd{3}; 3816 let Inst{21} = 0b1; 3817 let Inst{20} = size; 3818 let Inst{19-17} = Qn{2-0}; 3819 let Inst{16} = 0b0; 3820 let Inst{15-13} = Qd{2-0}; 3821 let Inst{11-8} = 0b1101; 3822 let Inst{7} = Qn{3}; 3823 let Inst{4} = 0b0; 3824 let validForTailPredication = 1; 3825} 3826 3827multiclass MVE_VABDT_fp_m<MVEVectorVTInfo VTI, 3828 Intrinsic unpred_int, Intrinsic pred_int> { 3829 def "" : MVE_VABD_fp<VTI.Suffix, VTI.Size{0}>; 3830 defvar Inst = !cast<Instruction>(NAME); 3831 3832 let Predicates = [HasMVEFloat] in { 3833 def : Pat<(VTI.Vec (unpred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 3834 (i32 0))), 3835 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 3836 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 3837 (i32 0), (VTI.Pred VCCR:$mask), 3838 (VTI.Vec MQPR:$inactive))), 3839 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 3840 ARMVCCThen, (VTI.Pred VCCR:$mask), 3841 (VTI.Vec MQPR:$inactive)))>; 3842 } 3843} 3844 3845multiclass MVE_VABD_fp_m<MVEVectorVTInfo VTI> 3846 : MVE_VABDT_fp_m<VTI, int_arm_mve_vabd, int_arm_mve_abd_predicated>; 3847 3848defm MVE_VABDf32 : MVE_VABD_fp_m<MVE_v4f32>; 3849defm MVE_VABDf16 : MVE_VABD_fp_m<MVE_v8f16>; 3850 3851let Predicates = [HasMVEFloat] in { 3852 def : Pat<(v8f16 (fabs (fsub (v8f16 MQPR:$Qm), (v8f16 MQPR:$Qn)))), 3853 (MVE_VABDf16 MQPR:$Qm, MQPR:$Qn)>; 3854 def : Pat<(v4f32 (fabs (fsub (v4f32 MQPR:$Qm), (v4f32 MQPR:$Qn)))), 3855 (MVE_VABDf32 MQPR:$Qm, MQPR:$Qn)>; 3856} 3857 3858class MVE_VCVT_fix<string suffix, bit fsi, bit U, bit op, 3859 Operand imm_operand_type> 3860 : MVE_float<"vcvt", suffix, 3861 (outs MQPR:$Qd), (ins MQPR:$Qm, imm_operand_type:$imm6), 3862 "$Qd, $Qm, $imm6", vpred_r, "", []> { 3863 bits<4> Qd; 3864 bits<6> imm6; 3865 3866 let Inst{28} = U; 3867 let Inst{25-23} = 0b111; 3868 let Inst{22} = Qd{3}; 3869 let Inst{21} = 0b1; 3870 let Inst{19-16} = imm6{3-0}; 3871 let Inst{15-13} = Qd{2-0}; 3872 let Inst{11-10} = 0b11; 3873 let Inst{9} = fsi; 3874 let Inst{8} = op; 3875 let Inst{7} = 0b0; 3876 let Inst{4} = 0b1; 3877 3878 let DecoderMethod = "DecodeMVEVCVTt1fp"; 3879 let validForTailPredication = 1; 3880} 3881 3882class MVE_VCVT_imm_asmop<int Bits> : AsmOperandClass { 3883 let PredicateMethod = "isImmediate<1," # Bits # ">"; 3884 let DiagnosticString = 3885 "MVE fixed-point immediate operand must be between 1 and " # Bits; 3886 let Name = "MVEVcvtImm" # Bits; 3887 let RenderMethod = "addImmOperands"; 3888} 3889class MVE_VCVT_imm<int Bits>: Operand<i32> { 3890 let ParserMatchClass = MVE_VCVT_imm_asmop<Bits>; 3891 let EncoderMethod = "getNEONVcvtImm32OpValue"; 3892 let DecoderMethod = "DecodeVCVTImmOperand"; 3893} 3894 3895class MVE_VCVT_fix_f32<string suffix, bit U, bit op> 3896 : MVE_VCVT_fix<suffix, 0b1, U, op, MVE_VCVT_imm<32>> { 3897 let Inst{20} = imm6{4}; 3898} 3899class MVE_VCVT_fix_f16<string suffix, bit U, bit op> 3900 : MVE_VCVT_fix<suffix, 0b0, U, op, MVE_VCVT_imm<16>> { 3901 let Inst{20} = 0b1; 3902} 3903 3904multiclass MVE_VCVT_fix_patterns<Instruction Inst, bit U, MVEVectorVTInfo DestVTI, 3905 MVEVectorVTInfo SrcVTI> { 3906 let Predicates = [HasMVEFloat] in { 3907 def : Pat<(DestVTI.Vec (int_arm_mve_vcvt_fix 3908 (i32 U), (SrcVTI.Vec MQPR:$Qm), imm:$scale)), 3909 (DestVTI.Vec (Inst (SrcVTI.Vec MQPR:$Qm), imm:$scale))>; 3910 def : Pat<(DestVTI.Vec (int_arm_mve_vcvt_fix_predicated (i32 U), 3911 (DestVTI.Vec MQPR:$inactive), 3912 (SrcVTI.Vec MQPR:$Qm), 3913 imm:$scale, 3914 (DestVTI.Pred VCCR:$mask))), 3915 (DestVTI.Vec (Inst (SrcVTI.Vec MQPR:$Qm), imm:$scale, 3916 ARMVCCThen, (DestVTI.Pred VCCR:$mask), 3917 (DestVTI.Vec MQPR:$inactive)))>; 3918 } 3919} 3920 3921multiclass MVE_VCVT_fix_f32_m<bit U, bit op, 3922 MVEVectorVTInfo DestVTI, MVEVectorVTInfo SrcVTI> { 3923 def "" : MVE_VCVT_fix_f32<DestVTI.Suffix#"."#SrcVTI.Suffix, U, op>; 3924 defm : MVE_VCVT_fix_patterns<!cast<Instruction>(NAME), U, DestVTI, SrcVTI>; 3925} 3926 3927multiclass MVE_VCVT_fix_f16_m<bit U, bit op, 3928 MVEVectorVTInfo DestVTI, MVEVectorVTInfo SrcVTI> { 3929 def "" : MVE_VCVT_fix_f16<DestVTI.Suffix#"."#SrcVTI.Suffix, U, op>; 3930 defm : MVE_VCVT_fix_patterns<!cast<Instruction>(NAME), U, DestVTI, SrcVTI>; 3931} 3932 3933defm MVE_VCVTf16s16_fix : MVE_VCVT_fix_f16_m<0b0, 0b0, MVE_v8f16, MVE_v8s16>; 3934defm MVE_VCVTs16f16_fix : MVE_VCVT_fix_f16_m<0b0, 0b1, MVE_v8s16, MVE_v8f16>; 3935defm MVE_VCVTf16u16_fix : MVE_VCVT_fix_f16_m<0b1, 0b0, MVE_v8f16, MVE_v8u16>; 3936defm MVE_VCVTu16f16_fix : MVE_VCVT_fix_f16_m<0b1, 0b1, MVE_v8u16, MVE_v8f16>; 3937defm MVE_VCVTf32s32_fix : MVE_VCVT_fix_f32_m<0b0, 0b0, MVE_v4f32, MVE_v4s32>; 3938defm MVE_VCVTs32f32_fix : MVE_VCVT_fix_f32_m<0b0, 0b1, MVE_v4s32, MVE_v4f32>; 3939defm MVE_VCVTf32u32_fix : MVE_VCVT_fix_f32_m<0b1, 0b0, MVE_v4f32, MVE_v4u32>; 3940defm MVE_VCVTu32f32_fix : MVE_VCVT_fix_f32_m<0b1, 0b1, MVE_v4u32, MVE_v4f32>; 3941 3942class MVE_VCVT_fp_int_anpm<string suffix, bits<2> size, bit op, string anpm, 3943 bits<2> rm, list<dag> pattern=[]> 3944 : MVE_float<!strconcat("vcvt", anpm), suffix, (outs MQPR:$Qd), 3945 (ins MQPR:$Qm), "$Qd, $Qm", vpred_r, "", pattern> { 3946 bits<4> Qd; 3947 3948 let Inst{28} = 0b1; 3949 let Inst{25-23} = 0b111; 3950 let Inst{22} = Qd{3}; 3951 let Inst{21-20} = 0b11; 3952 let Inst{19-18} = size; 3953 let Inst{17-16} = 0b11; 3954 let Inst{15-13} = Qd{2-0}; 3955 let Inst{12-10} = 0b000; 3956 let Inst{9-8} = rm; 3957 let Inst{7} = op; 3958 let Inst{4} = 0b0; 3959 let validForTailPredication = 1; 3960} 3961 3962multiclass MVE_VCVT_fp_int_anpm_inner<MVEVectorVTInfo Int, MVEVectorVTInfo Flt, 3963 string anpm, bits<2> rm> { 3964 def "": MVE_VCVT_fp_int_anpm<Int.Suffix # "." # Flt.Suffix, Int.Size, 3965 Int.Unsigned, anpm, rm>; 3966 3967 defvar Inst = !cast<Instruction>(NAME); 3968 defvar IntrBaseName = "int_arm_mve_vcvt" # anpm; 3969 defvar UnpredIntr = !cast<Intrinsic>(IntrBaseName); 3970 defvar PredIntr = !cast<Intrinsic>(IntrBaseName # "_predicated"); 3971 3972 let Predicates = [HasMVEFloat] in { 3973 def : Pat<(Int.Vec (UnpredIntr (i32 Int.Unsigned), (Flt.Vec MQPR:$in))), 3974 (Int.Vec (Inst (Flt.Vec MQPR:$in)))>; 3975 3976 def : Pat<(Int.Vec (PredIntr (i32 Int.Unsigned), (Int.Vec MQPR:$inactive), 3977 (Flt.Vec MQPR:$in), (Flt.Pred VCCR:$pred))), 3978 (Int.Vec (Inst (Flt.Vec MQPR:$in), ARMVCCThen, 3979 (Flt.Pred VCCR:$pred), (Int.Vec MQPR:$inactive)))>; 3980 } 3981} 3982 3983multiclass MVE_VCVT_fp_int_anpm_outer<MVEVectorVTInfo Int, 3984 MVEVectorVTInfo Flt> { 3985 defm a : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "a", 0b00>; 3986 defm n : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "n", 0b01>; 3987 defm p : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "p", 0b10>; 3988 defm m : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "m", 0b11>; 3989} 3990 3991// This defines instructions such as MVE_VCVTu16f16a, with an explicit 3992// rounding-mode suffix on the mnemonic. The class below will define 3993// the bare MVE_VCVTu16f16 (with implied rounding toward zero). 3994defm MVE_VCVTs16f16 : MVE_VCVT_fp_int_anpm_outer<MVE_v8s16, MVE_v8f16>; 3995defm MVE_VCVTu16f16 : MVE_VCVT_fp_int_anpm_outer<MVE_v8u16, MVE_v8f16>; 3996defm MVE_VCVTs32f32 : MVE_VCVT_fp_int_anpm_outer<MVE_v4s32, MVE_v4f32>; 3997defm MVE_VCVTu32f32 : MVE_VCVT_fp_int_anpm_outer<MVE_v4u32, MVE_v4f32>; 3998 3999class MVE_VCVT_fp_int<string suffix, bits<2> size, bit toint, bit unsigned, 4000 list<dag> pattern=[]> 4001 : MVE_float<"vcvt", suffix, (outs MQPR:$Qd), 4002 (ins MQPR:$Qm), "$Qd, $Qm", vpred_r, "", pattern> { 4003 bits<4> Qd; 4004 4005 let Inst{28} = 0b1; 4006 let Inst{25-23} = 0b111; 4007 let Inst{22} = Qd{3}; 4008 let Inst{21-20} = 0b11; 4009 let Inst{19-18} = size; 4010 let Inst{17-16} = 0b11; 4011 let Inst{15-13} = Qd{2-0}; 4012 let Inst{12-9} = 0b0011; 4013 let Inst{8} = toint; 4014 let Inst{7} = unsigned; 4015 let Inst{4} = 0b0; 4016 let validForTailPredication = 1; 4017} 4018 4019multiclass MVE_VCVT_fp_int_m<MVEVectorVTInfo Dest, MVEVectorVTInfo Src, 4020 SDNode unpred_op> { 4021 defvar Unsigned = !or(!eq(Dest.SuffixLetter,"u"), !eq(Src.SuffixLetter,"u")); 4022 defvar ToInt = !eq(Src.SuffixLetter,"f"); 4023 4024 def "" : MVE_VCVT_fp_int<Dest.Suffix # "." # Src.Suffix, Dest.Size, 4025 ToInt, Unsigned>; 4026 defvar Inst = !cast<Instruction>(NAME); 4027 4028 let Predicates = [HasMVEFloat] in { 4029 def : Pat<(Dest.Vec (unpred_op (Src.Vec MQPR:$src))), 4030 (Dest.Vec (Inst (Src.Vec MQPR:$src)))>; 4031 def : Pat<(Dest.Vec (int_arm_mve_vcvt_fp_int_predicated 4032 (Src.Vec MQPR:$src), (i32 Unsigned), 4033 (Src.Pred VCCR:$mask), (Dest.Vec MQPR:$inactive))), 4034 (Dest.Vec (Inst (Src.Vec MQPR:$src), ARMVCCThen, 4035 (Src.Pred VCCR:$mask), 4036 (Dest.Vec MQPR:$inactive)))>; 4037 } 4038} 4039// The unsuffixed VCVT for float->int implicitly rounds toward zero, 4040// which I reflect here in the llvm instruction names 4041defm MVE_VCVTs16f16z : MVE_VCVT_fp_int_m<MVE_v8s16, MVE_v8f16, fp_to_sint>; 4042defm MVE_VCVTu16f16z : MVE_VCVT_fp_int_m<MVE_v8u16, MVE_v8f16, fp_to_uint>; 4043defm MVE_VCVTs32f32z : MVE_VCVT_fp_int_m<MVE_v4s32, MVE_v4f32, fp_to_sint>; 4044defm MVE_VCVTu32f32z : MVE_VCVT_fp_int_m<MVE_v4u32, MVE_v4f32, fp_to_uint>; 4045// Whereas VCVT for int->float rounds to nearest 4046defm MVE_VCVTf16s16n : MVE_VCVT_fp_int_m<MVE_v8f16, MVE_v8s16, sint_to_fp>; 4047defm MVE_VCVTf16u16n : MVE_VCVT_fp_int_m<MVE_v8f16, MVE_v8u16, uint_to_fp>; 4048defm MVE_VCVTf32s32n : MVE_VCVT_fp_int_m<MVE_v4f32, MVE_v4s32, sint_to_fp>; 4049defm MVE_VCVTf32u32n : MVE_VCVT_fp_int_m<MVE_v4f32, MVE_v4u32, uint_to_fp>; 4050 4051class MVE_VABSNEG_fp<string iname, string suffix, bits<2> size, bit negate, 4052 list<dag> pattern=[]> 4053 : MVE_float<iname, suffix, (outs MQPR:$Qd), 4054 (ins MQPR:$Qm), "$Qd, $Qm", vpred_r, "", pattern> { 4055 bits<4> Qd; 4056 4057 let Inst{28} = 0b1; 4058 let Inst{25-23} = 0b111; 4059 let Inst{22} = Qd{3}; 4060 let Inst{21-20} = 0b11; 4061 let Inst{19-18} = size; 4062 let Inst{17-16} = 0b01; 4063 let Inst{15-13} = Qd{2-0}; 4064 let Inst{11-8} = 0b0111; 4065 let Inst{7} = negate; 4066 let Inst{4} = 0b0; 4067 let validForTailPredication = 1; 4068} 4069 4070multiclass MVE_VABSNEG_fp_m<string iname, SDNode unpred_op, Intrinsic pred_int, 4071 MVEVectorVTInfo VTI, bit opcode> { 4072 def "" : MVE_VABSNEG_fp<iname, VTI.Suffix, VTI.Size, opcode>; 4073 defvar Inst = !cast<Instruction>(NAME); 4074 4075 let Predicates = [HasMVEInt] in { 4076 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$v))), 4077 (VTI.Vec (Inst $v))>; 4078 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$v), (VTI.Pred VCCR:$mask), 4079 (VTI.Vec MQPR:$inactive))), 4080 (VTI.Vec (Inst $v, ARMVCCThen, $mask, $inactive))>; 4081 } 4082} 4083 4084defm MVE_VABSf16 : MVE_VABSNEG_fp_m<"vabs", fabs, int_arm_mve_abs_predicated, 4085 MVE_v8f16, 0>; 4086defm MVE_VABSf32 : MVE_VABSNEG_fp_m<"vabs", fabs, int_arm_mve_abs_predicated, 4087 MVE_v4f32, 0>; 4088defm MVE_VNEGf16 : MVE_VABSNEG_fp_m<"vneg", fneg, int_arm_mve_neg_predicated, 4089 MVE_v8f16, 1>; 4090defm MVE_VNEGf32 : MVE_VABSNEG_fp_m<"vneg", fneg, int_arm_mve_neg_predicated, 4091 MVE_v4f32, 1>; 4092 4093class MVE_VMAXMINNMA<string iname, string suffix, bit size, bit bit_12, 4094 list<dag> pattern=[]> 4095 : MVE_f<(outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qm), 4096 NoItinerary, iname, suffix, "$Qd, $Qm", vpred_n, "$Qd = $Qd_src", 4097 pattern> { 4098 bits<4> Qd; 4099 bits<4> Qm; 4100 4101 let Inst{28} = size; 4102 let Inst{25-23} = 0b100; 4103 let Inst{22} = Qd{3}; 4104 let Inst{21-16} = 0b111111; 4105 let Inst{15-13} = Qd{2-0}; 4106 let Inst{12} = bit_12; 4107 let Inst{11-6} = 0b111010; 4108 let Inst{5} = Qm{3}; 4109 let Inst{4} = 0b0; 4110 let Inst{3-1} = Qm{2-0}; 4111 let Inst{0} = 0b1; 4112 4113 let isCommutable = 1; 4114} 4115 4116multiclass MVE_VMAXMINNMA_m<string iname, MVEVectorVTInfo VTI, 4117 SDNode unpred_op, Intrinsic pred_int, 4118 bit bit_12> { 4119 def "" : MVE_VMAXMINNMA<iname, VTI.Suffix, VTI.Size{0}, bit_12>; 4120 defvar Inst = !cast<Instruction>(NAME); 4121 4122 let Predicates = [HasMVEInt] in { 4123 // Unpredicated v(max|min)nma 4124 def : Pat<(VTI.Vec (unpred_op (fabs (VTI.Vec MQPR:$Qd)), 4125 (fabs (VTI.Vec MQPR:$Qm)))), 4126 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd), (VTI.Vec MQPR:$Qm)))>; 4127 4128 // Predicated v(max|min)nma 4129 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$Qd), (VTI.Vec MQPR:$Qm), 4130 (VTI.Pred VCCR:$mask))), 4131 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd), (VTI.Vec MQPR:$Qm), 4132 ARMVCCThen, (VTI.Pred VCCR:$mask)))>; 4133 } 4134} 4135 4136multiclass MVE_VMAXNMA<MVEVectorVTInfo VTI, bit bit_12> 4137 : MVE_VMAXMINNMA_m<"vmaxnma", VTI, fmaxnum, int_arm_mve_vmaxnma_predicated, bit_12>; 4138 4139defm MVE_VMAXNMAf32 : MVE_VMAXNMA<MVE_v4f32, 0b0>; 4140defm MVE_VMAXNMAf16 : MVE_VMAXNMA<MVE_v8f16, 0b0>; 4141 4142multiclass MVE_VMINNMA<MVEVectorVTInfo VTI, bit bit_12> 4143 : MVE_VMAXMINNMA_m<"vminnma", VTI, fminnum, int_arm_mve_vminnma_predicated, bit_12>; 4144 4145defm MVE_VMINNMAf32 : MVE_VMINNMA<MVE_v4f32, 0b1>; 4146defm MVE_VMINNMAf16 : MVE_VMINNMA<MVE_v8f16, 0b1>; 4147 4148// end of MVE Floating Point instructions 4149 4150// start of MVE compares 4151 4152class MVE_VCMPqq<string suffix, bit bit_28, bits<2> bits_21_20, 4153 VCMPPredicateOperand predtype, list<dag> pattern=[]> 4154 : MVE_p<(outs VCCR:$P0), (ins MQPR:$Qn, MQPR:$Qm, predtype:$fc), 4155 NoItinerary, "vcmp", suffix, "$fc, $Qn, $Qm", vpred_n, "", pattern> { 4156 // Base class for comparing two vector registers 4157 bits<3> fc; 4158 bits<4> Qn; 4159 bits<4> Qm; 4160 4161 let Inst{28} = bit_28; 4162 let Inst{25-22} = 0b1000; 4163 let Inst{21-20} = bits_21_20; 4164 let Inst{19-17} = Qn{2-0}; 4165 let Inst{16-13} = 0b1000; 4166 let Inst{12} = fc{2}; 4167 let Inst{11-8} = 0b1111; 4168 let Inst{7} = fc{0}; 4169 let Inst{6} = 0b0; 4170 let Inst{5} = Qm{3}; 4171 let Inst{4} = 0b0; 4172 let Inst{3-1} = Qm{2-0}; 4173 let Inst{0} = fc{1}; 4174 4175 let Constraints = ""; 4176 4177 // We need a custom decoder method for these instructions because of 4178 // the output VCCR operand, which isn't encoded in the instruction 4179 // bits anywhere (there is only one choice for it) but has to be 4180 // included in the MC operands so that codegen will be able to track 4181 // its data flow between instructions, spill/reload it when 4182 // necessary, etc. There seems to be no way to get the Tablegen 4183 // decoder to emit an operand that isn't affected by any instruction 4184 // bit. 4185 let DecoderMethod = "DecodeMVEVCMP<false," # predtype.DecoderMethod # ">"; 4186 let validForTailPredication = 1; 4187} 4188 4189class MVE_VCMPqqf<string suffix, bit size> 4190 : MVE_VCMPqq<suffix, size, 0b11, pred_basic_fp> { 4191 let Predicates = [HasMVEFloat]; 4192} 4193 4194class MVE_VCMPqqi<string suffix, bits<2> size> 4195 : MVE_VCMPqq<suffix, 0b1, size, pred_basic_i> { 4196 let Inst{12} = 0b0; 4197 let Inst{0} = 0b0; 4198} 4199 4200class MVE_VCMPqqu<string suffix, bits<2> size> 4201 : MVE_VCMPqq<suffix, 0b1, size, pred_basic_u> { 4202 let Inst{12} = 0b0; 4203 let Inst{0} = 0b1; 4204} 4205 4206class MVE_VCMPqqs<string suffix, bits<2> size> 4207 : MVE_VCMPqq<suffix, 0b1, size, pred_basic_s> { 4208 let Inst{12} = 0b1; 4209} 4210 4211def MVE_VCMPf32 : MVE_VCMPqqf<"f32", 0b0>; 4212def MVE_VCMPf16 : MVE_VCMPqqf<"f16", 0b1>; 4213 4214def MVE_VCMPi8 : MVE_VCMPqqi<"i8", 0b00>; 4215def MVE_VCMPi16 : MVE_VCMPqqi<"i16", 0b01>; 4216def MVE_VCMPi32 : MVE_VCMPqqi<"i32", 0b10>; 4217 4218def MVE_VCMPu8 : MVE_VCMPqqu<"u8", 0b00>; 4219def MVE_VCMPu16 : MVE_VCMPqqu<"u16", 0b01>; 4220def MVE_VCMPu32 : MVE_VCMPqqu<"u32", 0b10>; 4221 4222def MVE_VCMPs8 : MVE_VCMPqqs<"s8", 0b00>; 4223def MVE_VCMPs16 : MVE_VCMPqqs<"s16", 0b01>; 4224def MVE_VCMPs32 : MVE_VCMPqqs<"s32", 0b10>; 4225 4226class MVE_VCMPqr<string suffix, bit bit_28, bits<2> bits_21_20, 4227 VCMPPredicateOperand predtype, list<dag> pattern=[]> 4228 : MVE_p<(outs VCCR:$P0), (ins MQPR:$Qn, GPRwithZR:$Rm, predtype:$fc), 4229 NoItinerary, "vcmp", suffix, "$fc, $Qn, $Rm", vpred_n, "", pattern> { 4230 // Base class for comparing a vector register with a scalar 4231 bits<3> fc; 4232 bits<4> Qn; 4233 bits<4> Rm; 4234 4235 let Inst{28} = bit_28; 4236 let Inst{25-22} = 0b1000; 4237 let Inst{21-20} = bits_21_20; 4238 let Inst{19-17} = Qn{2-0}; 4239 let Inst{16-13} = 0b1000; 4240 let Inst{12} = fc{2}; 4241 let Inst{11-8} = 0b1111; 4242 let Inst{7} = fc{0}; 4243 let Inst{6} = 0b1; 4244 let Inst{5} = fc{1}; 4245 let Inst{4} = 0b0; 4246 let Inst{3-0} = Rm{3-0}; 4247 4248 let Constraints = ""; 4249 // Custom decoder method, for the same reason as MVE_VCMPqq 4250 let DecoderMethod = "DecodeMVEVCMP<true," # predtype.DecoderMethod # ">"; 4251 let validForTailPredication = 1; 4252} 4253 4254class MVE_VCMPqrf<string suffix, bit size> 4255 : MVE_VCMPqr<suffix, size, 0b11, pred_basic_fp> { 4256 let Predicates = [HasMVEFloat]; 4257} 4258 4259class MVE_VCMPqri<string suffix, bits<2> size> 4260 : MVE_VCMPqr<suffix, 0b1, size, pred_basic_i> { 4261 let Inst{12} = 0b0; 4262 let Inst{5} = 0b0; 4263} 4264 4265class MVE_VCMPqru<string suffix, bits<2> size> 4266 : MVE_VCMPqr<suffix, 0b1, size, pred_basic_u> { 4267 let Inst{12} = 0b0; 4268 let Inst{5} = 0b1; 4269} 4270 4271class MVE_VCMPqrs<string suffix, bits<2> size> 4272 : MVE_VCMPqr<suffix, 0b1, size, pred_basic_s> { 4273 let Inst{12} = 0b1; 4274} 4275 4276def MVE_VCMPf32r : MVE_VCMPqrf<"f32", 0b0>; 4277def MVE_VCMPf16r : MVE_VCMPqrf<"f16", 0b1>; 4278 4279def MVE_VCMPi8r : MVE_VCMPqri<"i8", 0b00>; 4280def MVE_VCMPi16r : MVE_VCMPqri<"i16", 0b01>; 4281def MVE_VCMPi32r : MVE_VCMPqri<"i32", 0b10>; 4282 4283def MVE_VCMPu8r : MVE_VCMPqru<"u8", 0b00>; 4284def MVE_VCMPu16r : MVE_VCMPqru<"u16", 0b01>; 4285def MVE_VCMPu32r : MVE_VCMPqru<"u32", 0b10>; 4286 4287def MVE_VCMPs8r : MVE_VCMPqrs<"s8", 0b00>; 4288def MVE_VCMPs16r : MVE_VCMPqrs<"s16", 0b01>; 4289def MVE_VCMPs32r : MVE_VCMPqrs<"s32", 0b10>; 4290 4291multiclass unpred_vcmp_z<string suffix, PatLeaf fc> { 4292 def i8 : Pat<(v16i1 (ARMvcmpz (v16i8 MQPR:$v1), fc)), 4293 (v16i1 (!cast<Instruction>("MVE_VCMP"#suffix#"8r") (v16i8 MQPR:$v1), ZR, fc))>; 4294 def i16 : Pat<(v8i1 (ARMvcmpz (v8i16 MQPR:$v1), fc)), 4295 (v8i1 (!cast<Instruction>("MVE_VCMP"#suffix#"16r") (v8i16 MQPR:$v1), ZR, fc))>; 4296 def i32 : Pat<(v4i1 (ARMvcmpz (v4i32 MQPR:$v1), fc)), 4297 (v4i1 (!cast<Instruction>("MVE_VCMP"#suffix#"32r") (v4i32 MQPR:$v1), ZR, fc))>; 4298 4299 def : Pat<(v16i1 (and (v16i1 VCCR:$p1), (v16i1 (ARMvcmpz (v16i8 MQPR:$v1), fc)))), 4300 (v16i1 (!cast<Instruction>("MVE_VCMP"#suffix#"8r") (v16i8 MQPR:$v1), ZR, fc, ARMVCCThen, VCCR:$p1))>; 4301 def : Pat<(v8i1 (and (v8i1 VCCR:$p1), (v8i1 (ARMvcmpz (v8i16 MQPR:$v1), fc)))), 4302 (v8i1 (!cast<Instruction>("MVE_VCMP"#suffix#"16r") (v8i16 MQPR:$v1), ZR, fc, ARMVCCThen, VCCR:$p1))>; 4303 def : Pat<(v4i1 (and (v4i1 VCCR:$p1), (v4i1 (ARMvcmpz (v4i32 MQPR:$v1), fc)))), 4304 (v4i1 (!cast<Instruction>("MVE_VCMP"#suffix#"32r") (v4i32 MQPR:$v1), ZR, fc, ARMVCCThen, VCCR:$p1))>; 4305} 4306 4307multiclass unpred_vcmp_r<string suffix, PatLeaf fc> { 4308 def i8 : Pat<(v16i1 (ARMvcmp (v16i8 MQPR:$v1), (v16i8 MQPR:$v2), fc)), 4309 (v16i1 (!cast<Instruction>("MVE_VCMP"#suffix#"8") (v16i8 MQPR:$v1), (v16i8 MQPR:$v2), fc))>; 4310 def i16 : Pat<(v8i1 (ARMvcmp (v8i16 MQPR:$v1), (v8i16 MQPR:$v2), fc)), 4311 (v8i1 (!cast<Instruction>("MVE_VCMP"#suffix#"16") (v8i16 MQPR:$v1), (v8i16 MQPR:$v2), fc))>; 4312 def i32 : Pat<(v4i1 (ARMvcmp (v4i32 MQPR:$v1), (v4i32 MQPR:$v2), fc)), 4313 (v4i1 (!cast<Instruction>("MVE_VCMP"#suffix#"32") (v4i32 MQPR:$v1), (v4i32 MQPR:$v2), fc))>; 4314 4315 def i8r : Pat<(v16i1 (ARMvcmp (v16i8 MQPR:$v1), (v16i8 (ARMvdup rGPR:$v2)), fc)), 4316 (v16i1 (!cast<Instruction>("MVE_VCMP"#suffix#"8r") (v16i8 MQPR:$v1), (i32 rGPR:$v2), fc))>; 4317 def i16r : Pat<(v8i1 (ARMvcmp (v8i16 MQPR:$v1), (v8i16 (ARMvdup rGPR:$v2)), fc)), 4318 (v8i1 (!cast<Instruction>("MVE_VCMP"#suffix#"16r") (v8i16 MQPR:$v1), (i32 rGPR:$v2), fc))>; 4319 def i32r : Pat<(v4i1 (ARMvcmp (v4i32 MQPR:$v1), (v4i32 (ARMvdup rGPR:$v2)), fc)), 4320 (v4i1 (!cast<Instruction>("MVE_VCMP"#suffix#"32r") (v4i32 MQPR:$v1), (i32 rGPR:$v2), fc))>; 4321 4322 def : Pat<(v16i1 (and (v16i1 VCCR:$p1), (v16i1 (ARMvcmp (v16i8 MQPR:$v1), (v16i8 MQPR:$v2), fc)))), 4323 (v16i1 (!cast<Instruction>("MVE_VCMP"#suffix#"8") (v16i8 MQPR:$v1), (v16i8 MQPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4324 def : Pat<(v8i1 (and (v8i1 VCCR:$p1), (v8i1 (ARMvcmp (v8i16 MQPR:$v1), (v8i16 MQPR:$v2), fc)))), 4325 (v8i1 (!cast<Instruction>("MVE_VCMP"#suffix#"16") (v8i16 MQPR:$v1), (v8i16 MQPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4326 def : Pat<(v4i1 (and (v4i1 VCCR:$p1), (v4i1 (ARMvcmp (v4i32 MQPR:$v1), (v4i32 MQPR:$v2), fc)))), 4327 (v4i1 (!cast<Instruction>("MVE_VCMP"#suffix#"32") (v4i32 MQPR:$v1), (v4i32 MQPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4328 4329 def : Pat<(v16i1 (and (v16i1 VCCR:$p1), (v16i1 (ARMvcmp (v16i8 MQPR:$v1), (v16i8 (ARMvdup rGPR:$v2)), fc)))), 4330 (v16i1 (!cast<Instruction>("MVE_VCMP"#suffix#"8r") (v16i8 MQPR:$v1), (i32 rGPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4331 def : Pat<(v8i1 (and (v8i1 VCCR:$p1), (v8i1 (ARMvcmp (v8i16 MQPR:$v1), (v8i16 (ARMvdup rGPR:$v2)), fc)))), 4332 (v8i1 (!cast<Instruction>("MVE_VCMP"#suffix#"16r") (v8i16 MQPR:$v1), (i32 rGPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4333 def : Pat<(v4i1 (and (v4i1 VCCR:$p1), (v4i1 (ARMvcmp (v4i32 MQPR:$v1), (v4i32 (ARMvdup rGPR:$v2)), fc)))), 4334 (v4i1 (!cast<Instruction>("MVE_VCMP"#suffix#"32r") (v4i32 MQPR:$v1), (i32 rGPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4335} 4336 4337multiclass unpred_vcmpf_z<PatLeaf fc> { 4338 def f16 : Pat<(v8i1 (ARMvcmpz (v8f16 MQPR:$v1), fc)), 4339 (v8i1 (MVE_VCMPf16r (v8f16 MQPR:$v1), ZR, fc))>; 4340 def f32 : Pat<(v4i1 (ARMvcmpz (v4f32 MQPR:$v1), fc)), 4341 (v4i1 (MVE_VCMPf32r (v4f32 MQPR:$v1), ZR, fc))>; 4342 4343 def : Pat<(v8i1 (and (v8i1 VCCR:$p1), (v8i1 (ARMvcmpz (v8f16 MQPR:$v1), fc)))), 4344 (v8i1 (MVE_VCMPf16r (v8f16 MQPR:$v1), ZR, fc, ARMVCCThen, VCCR:$p1))>; 4345 def : Pat<(v4i1 (and (v4i1 VCCR:$p1), (v4i1 (ARMvcmpz (v4f32 MQPR:$v1), fc)))), 4346 (v4i1 (MVE_VCMPf32r (v4f32 MQPR:$v1), ZR, fc, ARMVCCThen, VCCR:$p1))>; 4347} 4348 4349multiclass unpred_vcmpf_r<PatLeaf fc> { 4350 def : Pat<(v8i1 (ARMvcmp (v8f16 MQPR:$v1), (v8f16 MQPR:$v2), fc)), 4351 (v8i1 (MVE_VCMPf16 (v8f16 MQPR:$v1), (v8f16 MQPR:$v2), fc))>; 4352 def : Pat<(v4i1 (ARMvcmp (v4f32 MQPR:$v1), (v4f32 MQPR:$v2), fc)), 4353 (v4i1 (MVE_VCMPf32 (v4f32 MQPR:$v1), (v4f32 MQPR:$v2), fc))>; 4354 4355 def : Pat<(v8i1 (ARMvcmp (v8f16 MQPR:$v1), (v8f16 (ARMvdup rGPR:$v2)), fc)), 4356 (v8i1 (MVE_VCMPf16r (v8f16 MQPR:$v1), (i32 rGPR:$v2), fc))>; 4357 def : Pat<(v4i1 (ARMvcmp (v4f32 MQPR:$v1), (v4f32 (ARMvdup rGPR:$v2)), fc)), 4358 (v4i1 (MVE_VCMPf32r (v4f32 MQPR:$v1), (i32 rGPR:$v2), fc))>; 4359 4360 def : Pat<(v8i1 (and (v8i1 VCCR:$p1), (v8i1 (ARMvcmp (v8f16 MQPR:$v1), (v8f16 MQPR:$v2), fc)))), 4361 (v8i1 (MVE_VCMPf16 (v8f16 MQPR:$v1), (v8f16 MQPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4362 def : Pat<(v4i1 (and (v4i1 VCCR:$p1), (v4i1 (ARMvcmp (v4f32 MQPR:$v1), (v4f32 MQPR:$v2), fc)))), 4363 (v4i1 (MVE_VCMPf32 (v4f32 MQPR:$v1), (v4f32 MQPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4364 4365 def : Pat<(v8i1 (and (v8i1 VCCR:$p1), (v8i1 (ARMvcmp (v8f16 MQPR:$v1), (v8f16 (ARMvdup rGPR:$v2)), fc)))), 4366 (v8i1 (MVE_VCMPf16r (v8f16 MQPR:$v1), (i32 rGPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4367 def : Pat<(v4i1 (and (v4i1 VCCR:$p1), (v4i1 (ARMvcmp (v4f32 MQPR:$v1), (v4f32 (ARMvdup rGPR:$v2)), fc)))), 4368 (v4i1 (MVE_VCMPf32r (v4f32 MQPR:$v1), (i32 rGPR:$v2), fc, ARMVCCThen, VCCR:$p1))>; 4369} 4370 4371let Predicates = [HasMVEInt] in { 4372 defm MVE_VCEQZ : unpred_vcmp_z<"i", ARMCCeq>; 4373 defm MVE_VCNEZ : unpred_vcmp_z<"i", ARMCCne>; 4374 defm MVE_VCGEZ : unpred_vcmp_z<"s", ARMCCge>; 4375 defm MVE_VCLTZ : unpred_vcmp_z<"s", ARMCClt>; 4376 defm MVE_VCGTZ : unpred_vcmp_z<"s", ARMCCgt>; 4377 defm MVE_VCLEZ : unpred_vcmp_z<"s", ARMCCle>; 4378 defm MVE_VCGTUZ : unpred_vcmp_z<"u", ARMCChi>; 4379 defm MVE_VCGEUZ : unpred_vcmp_z<"u", ARMCChs>; 4380 4381 defm MVE_VCEQ : unpred_vcmp_r<"i", ARMCCeq>; 4382 defm MVE_VCNE : unpred_vcmp_r<"i", ARMCCne>; 4383 defm MVE_VCGE : unpred_vcmp_r<"s", ARMCCge>; 4384 defm MVE_VCLT : unpred_vcmp_r<"s", ARMCClt>; 4385 defm MVE_VCGT : unpred_vcmp_r<"s", ARMCCgt>; 4386 defm MVE_VCLE : unpred_vcmp_r<"s", ARMCCle>; 4387 defm MVE_VCGTU : unpred_vcmp_r<"u", ARMCChi>; 4388 defm MVE_VCGEU : unpred_vcmp_r<"u", ARMCChs>; 4389} 4390 4391let Predicates = [HasMVEFloat] in { 4392 defm MVE_VFCEQZ : unpred_vcmpf_z<ARMCCeq>; 4393 defm MVE_VFCNEZ : unpred_vcmpf_z<ARMCCne>; 4394 defm MVE_VFCGEZ : unpred_vcmpf_z<ARMCCge>; 4395 defm MVE_VFCLTZ : unpred_vcmpf_z<ARMCClt>; 4396 defm MVE_VFCGTZ : unpred_vcmpf_z<ARMCCgt>; 4397 defm MVE_VFCLEZ : unpred_vcmpf_z<ARMCCle>; 4398 4399 defm MVE_VFCEQ : unpred_vcmpf_r<ARMCCeq>; 4400 defm MVE_VFCNE : unpred_vcmpf_r<ARMCCne>; 4401 defm MVE_VFCGE : unpred_vcmpf_r<ARMCCge>; 4402 defm MVE_VFCLT : unpred_vcmpf_r<ARMCClt>; 4403 defm MVE_VFCGT : unpred_vcmpf_r<ARMCCgt>; 4404 defm MVE_VFCLE : unpred_vcmpf_r<ARMCCle>; 4405} 4406 4407 4408// Extra "worst case" and/or/xor patterns, going into and out of GRP 4409multiclass two_predops<SDPatternOperator opnode, Instruction insn> { 4410 def v16i1 : Pat<(v16i1 (opnode (v16i1 VCCR:$p1), (v16i1 VCCR:$p2))), 4411 (v16i1 (COPY_TO_REGCLASS 4412 (insn (i32 (COPY_TO_REGCLASS (v16i1 VCCR:$p1), rGPR)), 4413 (i32 (COPY_TO_REGCLASS (v16i1 VCCR:$p2), rGPR))), 4414 VCCR))>; 4415 def v8i1 : Pat<(v8i1 (opnode (v8i1 VCCR:$p1), (v8i1 VCCR:$p2))), 4416 (v8i1 (COPY_TO_REGCLASS 4417 (insn (i32 (COPY_TO_REGCLASS (v8i1 VCCR:$p1), rGPR)), 4418 (i32 (COPY_TO_REGCLASS (v8i1 VCCR:$p2), rGPR))), 4419 VCCR))>; 4420 def v4i1 : Pat<(v4i1 (opnode (v4i1 VCCR:$p1), (v4i1 VCCR:$p2))), 4421 (v4i1 (COPY_TO_REGCLASS 4422 (insn (i32 (COPY_TO_REGCLASS (v4i1 VCCR:$p1), rGPR)), 4423 (i32 (COPY_TO_REGCLASS (v4i1 VCCR:$p2), rGPR))), 4424 VCCR))>; 4425} 4426 4427let Predicates = [HasMVEInt] in { 4428 defm POR : two_predops<or, t2ORRrr>; 4429 defm PAND : two_predops<and, t2ANDrr>; 4430 defm PEOR : two_predops<xor, t2EORrr>; 4431} 4432 4433// Occasionally we need to cast between a i32 and a boolean vector, for 4434// example when moving between rGPR and VPR.P0 as part of predicate vector 4435// shuffles. We also sometimes need to cast between different predicate 4436// vector types (v4i1<>v8i1, etc.) also as part of lowering vector shuffles. 4437def predicate_cast : SDNode<"ARMISD::PREDICATE_CAST", SDTUnaryOp>; 4438 4439def load_align4 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 4440 return cast<LoadSDNode>(N)->getAlignment() >= 4; 4441}]>; 4442 4443let Predicates = [HasMVEInt] in { 4444 foreach VT = [ v4i1, v8i1, v16i1 ] in { 4445 def : Pat<(i32 (predicate_cast (VT VCCR:$src))), 4446 (i32 (COPY_TO_REGCLASS (VT VCCR:$src), VCCR))>; 4447 def : Pat<(VT (predicate_cast (i32 VCCR:$src))), 4448 (VT (COPY_TO_REGCLASS (i32 VCCR:$src), VCCR))>; 4449 4450 foreach VT2 = [ v4i1, v8i1, v16i1 ] in 4451 def : Pat<(VT (predicate_cast (VT2 VCCR:$src))), 4452 (VT (COPY_TO_REGCLASS (VT2 VCCR:$src), VCCR))>; 4453 } 4454 4455 // If we happen to be casting from a load we can convert that straight 4456 // into a predicate load, so long as the load is of the correct type. 4457 foreach VT = [ v4i1, v8i1, v16i1 ] in { 4458 def : Pat<(VT (predicate_cast (i32 (load_align4 taddrmode_imm7<2>:$addr)))), 4459 (VT (VLDR_P0_off taddrmode_imm7<2>:$addr))>; 4460 } 4461 4462 // Here we match the specific SDNode type 'ARMVectorRegCastImpl' 4463 // rather than the more general 'ARMVectorRegCast' which would also 4464 // match some bitconverts. If we use the latter in cases where the 4465 // input and output types are the same, the bitconvert gets elided 4466 // and we end up generating a nonsense match of nothing. 4467 4468 foreach VT = [ v16i8, v8i16, v8f16, v4i32, v4f32, v2i64, v2f64 ] in 4469 foreach VT2 = [ v16i8, v8i16, v8f16, v4i32, v4f32, v2i64, v2f64 ] in 4470 def : Pat<(VT (ARMVectorRegCastImpl (VT2 MQPR:$src))), 4471 (VT MQPR:$src)>; 4472} 4473 4474// end of MVE compares 4475 4476// start of MVE_qDest_qSrc 4477 4478class MVE_qDest_qSrc<string iname, string suffix, dag oops, dag iops, 4479 string ops, vpred_ops vpred, string cstr, 4480 list<dag> pattern=[]> 4481 : MVE_p<oops, iops, NoItinerary, iname, suffix, 4482 ops, vpred, cstr, pattern> { 4483 bits<4> Qd; 4484 bits<4> Qm; 4485 4486 let Inst{25-23} = 0b100; 4487 let Inst{22} = Qd{3}; 4488 let Inst{15-13} = Qd{2-0}; 4489 let Inst{11-9} = 0b111; 4490 let Inst{6} = 0b0; 4491 let Inst{5} = Qm{3}; 4492 let Inst{4} = 0b0; 4493 let Inst{3-1} = Qm{2-0}; 4494} 4495 4496class MVE_VQxDMLxDH<string iname, bit exch, bit round, bit subtract, 4497 string suffix, bits<2> size, string cstr="", list<dag> pattern=[]> 4498 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 4499 (ins MQPR:$Qd_src, MQPR:$Qn, MQPR:$Qm), "$Qd, $Qn, $Qm", 4500 vpred_n, "$Qd = $Qd_src"#cstr, pattern> { 4501 bits<4> Qn; 4502 4503 let Inst{28} = subtract; 4504 let Inst{21-20} = size; 4505 let Inst{19-17} = Qn{2-0}; 4506 let Inst{16} = 0b0; 4507 let Inst{12} = exch; 4508 let Inst{8} = 0b0; 4509 let Inst{7} = Qn{3}; 4510 let Inst{0} = round; 4511} 4512 4513multiclass MVE_VQxDMLxDH_p<string iname, bit exch, bit round, bit subtract, 4514 MVEVectorVTInfo VTI> { 4515 def "": MVE_VQxDMLxDH<iname, exch, round, subtract, VTI.Suffix, VTI.Size, 4516 !if(!eq(VTI.LaneBits, 32), ",@earlyclobber $Qd", "")>; 4517 defvar Inst = !cast<Instruction>(NAME); 4518 defvar ConstParams = (? (i32 exch), (i32 round), (i32 subtract)); 4519 defvar unpred_intr = int_arm_mve_vqdmlad; 4520 defvar pred_intr = int_arm_mve_vqdmlad_predicated; 4521 4522 def : Pat<(VTI.Vec !con((unpred_intr (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b), 4523 (VTI.Vec MQPR:$c)), ConstParams)), 4524 (VTI.Vec (Inst (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b), 4525 (VTI.Vec MQPR:$c)))>; 4526 def : Pat<(VTI.Vec !con((pred_intr (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b), 4527 (VTI.Vec MQPR:$c)), ConstParams, 4528 (? (VTI.Pred VCCR:$pred)))), 4529 (VTI.Vec (Inst (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b), 4530 (VTI.Vec MQPR:$c), 4531 ARMVCCThen, (VTI.Pred VCCR:$pred)))>; 4532} 4533 4534multiclass MVE_VQxDMLxDH_multi<string iname, bit exch, 4535 bit round, bit subtract> { 4536 defm s8 : MVE_VQxDMLxDH_p<iname, exch, round, subtract, MVE_v16s8>; 4537 defm s16 : MVE_VQxDMLxDH_p<iname, exch, round, subtract, MVE_v8s16>; 4538 defm s32 : MVE_VQxDMLxDH_p<iname, exch, round, subtract, MVE_v4s32>; 4539} 4540 4541defm MVE_VQDMLADH : MVE_VQxDMLxDH_multi<"vqdmladh", 0b0, 0b0, 0b0>; 4542defm MVE_VQDMLADHX : MVE_VQxDMLxDH_multi<"vqdmladhx", 0b1, 0b0, 0b0>; 4543defm MVE_VQRDMLADH : MVE_VQxDMLxDH_multi<"vqrdmladh", 0b0, 0b1, 0b0>; 4544defm MVE_VQRDMLADHX : MVE_VQxDMLxDH_multi<"vqrdmladhx", 0b1, 0b1, 0b0>; 4545defm MVE_VQDMLSDH : MVE_VQxDMLxDH_multi<"vqdmlsdh", 0b0, 0b0, 0b1>; 4546defm MVE_VQDMLSDHX : MVE_VQxDMLxDH_multi<"vqdmlsdhx", 0b1, 0b0, 0b1>; 4547defm MVE_VQRDMLSDH : MVE_VQxDMLxDH_multi<"vqrdmlsdh", 0b0, 0b1, 0b1>; 4548defm MVE_VQRDMLSDHX : MVE_VQxDMLxDH_multi<"vqrdmlsdhx", 0b1, 0b1, 0b1>; 4549 4550class MVE_VCMUL<string iname, string suffix, bit size, string cstr=""> 4551 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 4552 (ins MQPR:$Qn, MQPR:$Qm, complexrotateop:$rot), 4553 "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { 4554 bits<4> Qn; 4555 bits<2> rot; 4556 4557 let Inst{28} = size; 4558 let Inst{21-20} = 0b11; 4559 let Inst{19-17} = Qn{2-0}; 4560 let Inst{16} = 0b0; 4561 let Inst{12} = rot{1}; 4562 let Inst{8} = 0b0; 4563 let Inst{7} = Qn{3}; 4564 let Inst{0} = rot{0}; 4565 4566 let Predicates = [HasMVEFloat]; 4567} 4568 4569multiclass MVE_VCMUL_m<string iname, MVEVectorVTInfo VTI, 4570 bit size, string cstr=""> { 4571 def "" : MVE_VCMUL<iname, VTI.Suffix, size, cstr>; 4572 defvar Inst = !cast<Instruction>(NAME); 4573 4574 let Predicates = [HasMVEFloat] in { 4575 def : Pat<(VTI.Vec (int_arm_mve_vcmulq 4576 imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), 4577 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 4578 imm:$rot))>; 4579 4580 def : Pat<(VTI.Vec (int_arm_mve_vcmulq_predicated 4581 imm:$rot, (VTI.Vec MQPR:$inactive), 4582 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 4583 (VTI.Pred VCCR:$mask))), 4584 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 4585 imm:$rot, ARMVCCThen, (VTI.Pred VCCR:$mask), 4586 (VTI.Vec MQPR:$inactive)))>; 4587 4588 } 4589} 4590 4591defm MVE_VCMULf16 : MVE_VCMUL_m<"vcmul", MVE_v8f16, 0b0>; 4592defm MVE_VCMULf32 : MVE_VCMUL_m<"vcmul", MVE_v4f32, 0b1, "@earlyclobber $Qd">; 4593 4594class MVE_VMULL<string iname, string suffix, bit bit_28, bits<2> bits_21_20, 4595 bit T, string cstr, list<dag> pattern=[]> 4596 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 4597 (ins MQPR:$Qn, MQPR:$Qm), "$Qd, $Qn, $Qm", 4598 vpred_r, cstr, pattern> { 4599 bits<4> Qd; 4600 bits<4> Qn; 4601 bits<4> Qm; 4602 4603 let Inst{28} = bit_28; 4604 let Inst{21-20} = bits_21_20; 4605 let Inst{19-17} = Qn{2-0}; 4606 let Inst{16} = 0b1; 4607 let Inst{12} = T; 4608 let Inst{8} = 0b0; 4609 let Inst{7} = Qn{3}; 4610 let Inst{0} = 0b0; 4611 let validForTailPredication = 1; 4612 let doubleWidthResult = 1; 4613} 4614 4615multiclass MVE_VMULL_m<MVEVectorVTInfo VTI, 4616 SDPatternOperator unpred_op, Intrinsic pred_int, 4617 bit Top, string cstr=""> { 4618 def "" : MVE_VMULL<"vmull" # !if(Top, "t", "b"), VTI.Suffix, VTI.Unsigned, 4619 VTI.Size, Top, cstr>; 4620 defvar Inst = !cast<Instruction>(NAME); 4621 4622 let Predicates = [HasMVEInt] in { 4623 defvar uflag = !if(!eq(VTI.SuffixLetter, "p"), (?), (? (i32 VTI.Unsigned))); 4624 4625 // Unpredicated multiply 4626 def : Pat<(VTI.DblVec !con((unpred_op (VTI.Vec MQPR:$Qm), 4627 (VTI.Vec MQPR:$Qn)), 4628 uflag, (? (i32 Top)))), 4629 (VTI.DblVec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 4630 4631 // Predicated multiply 4632 def : Pat<(VTI.DblVec !con((pred_int (VTI.Vec MQPR:$Qm), 4633 (VTI.Vec MQPR:$Qn)), 4634 uflag, (? (i32 Top), (VTI.DblPred VCCR:$mask), 4635 (VTI.DblVec MQPR:$inactive)))), 4636 (VTI.DblVec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 4637 ARMVCCThen, (VTI.DblPred VCCR:$mask), 4638 (VTI.DblVec MQPR:$inactive)))>; 4639 } 4640} 4641 4642// For polynomial multiplies, the size bits take the unused value 0b11, and 4643// the unsigned bit switches to encoding the size. 4644 4645defm MVE_VMULLBs8 : MVE_VMULL_m<MVE_v16s8, int_arm_mve_vmull, 4646 int_arm_mve_mull_int_predicated, 0b0>; 4647defm MVE_VMULLTs8 : MVE_VMULL_m<MVE_v16s8, int_arm_mve_vmull, 4648 int_arm_mve_mull_int_predicated, 0b1>; 4649defm MVE_VMULLBs16 : MVE_VMULL_m<MVE_v8s16, int_arm_mve_vmull, 4650 int_arm_mve_mull_int_predicated, 0b0>; 4651defm MVE_VMULLTs16 : MVE_VMULL_m<MVE_v8s16, int_arm_mve_vmull, 4652 int_arm_mve_mull_int_predicated, 0b1>; 4653defm MVE_VMULLBs32 : MVE_VMULL_m<MVE_v4s32, int_arm_mve_vmull, 4654 int_arm_mve_mull_int_predicated, 0b0, 4655 "@earlyclobber $Qd">; 4656defm MVE_VMULLTs32 : MVE_VMULL_m<MVE_v4s32, int_arm_mve_vmull, 4657 int_arm_mve_mull_int_predicated, 0b1, 4658 "@earlyclobber $Qd">; 4659 4660defm MVE_VMULLBu8 : MVE_VMULL_m<MVE_v16u8, int_arm_mve_vmull, 4661 int_arm_mve_mull_int_predicated, 0b0>; 4662defm MVE_VMULLTu8 : MVE_VMULL_m<MVE_v16u8, int_arm_mve_vmull, 4663 int_arm_mve_mull_int_predicated, 0b1>; 4664defm MVE_VMULLBu16 : MVE_VMULL_m<MVE_v8u16, int_arm_mve_vmull, 4665 int_arm_mve_mull_int_predicated, 0b0>; 4666defm MVE_VMULLTu16 : MVE_VMULL_m<MVE_v8u16, int_arm_mve_vmull, 4667 int_arm_mve_mull_int_predicated, 0b1>; 4668defm MVE_VMULLBu32 : MVE_VMULL_m<MVE_v4u32, int_arm_mve_vmull, 4669 int_arm_mve_mull_int_predicated, 0b0, 4670 "@earlyclobber $Qd">; 4671defm MVE_VMULLTu32 : MVE_VMULL_m<MVE_v4u32, int_arm_mve_vmull, 4672 int_arm_mve_mull_int_predicated, 0b1, 4673 "@earlyclobber $Qd">; 4674 4675defm MVE_VMULLBp8 : MVE_VMULL_m<MVE_v16p8, int_arm_mve_vmull_poly, 4676 int_arm_mve_mull_poly_predicated, 0b0>; 4677defm MVE_VMULLTp8 : MVE_VMULL_m<MVE_v16p8, int_arm_mve_vmull_poly, 4678 int_arm_mve_mull_poly_predicated, 0b1>; 4679defm MVE_VMULLBp16 : MVE_VMULL_m<MVE_v8p16, int_arm_mve_vmull_poly, 4680 int_arm_mve_mull_poly_predicated, 0b0>; 4681defm MVE_VMULLTp16 : MVE_VMULL_m<MVE_v8p16, int_arm_mve_vmull_poly, 4682 int_arm_mve_mull_poly_predicated, 0b1>; 4683 4684let Predicates = [HasMVEInt] in { 4685 def : Pat<(v2i64 (ARMvmulls (v4i32 MQPR:$src1), (v4i32 MQPR:$src2))), 4686 (MVE_VMULLBs32 MQPR:$src1, MQPR:$src2)>; 4687 def : Pat<(v2i64 (ARMvmulls (v4i32 (ARMvrev64 (v4i32 MQPR:$src1))), 4688 (v4i32 (ARMvrev64 (v4i32 MQPR:$src2))))), 4689 (MVE_VMULLTs32 MQPR:$src1, MQPR:$src2)>; 4690 4691 def : Pat<(mul (sext_inreg (v4i32 MQPR:$src1), v4i16), 4692 (sext_inreg (v4i32 MQPR:$src2), v4i16)), 4693 (MVE_VMULLBs16 MQPR:$src1, MQPR:$src2)>; 4694 def : Pat<(mul (sext_inreg (v4i32 (ARMVectorRegCast (ARMvrev32 (v8i16 MQPR:$src1)))), v4i16), 4695 (sext_inreg (v4i32 (ARMVectorRegCast (ARMvrev32 (v8i16 MQPR:$src2)))), v4i16)), 4696 (MVE_VMULLTs16 MQPR:$src1, MQPR:$src2)>; 4697 4698 def : Pat<(mul (sext_inreg (v8i16 MQPR:$src1), v8i8), 4699 (sext_inreg (v8i16 MQPR:$src2), v8i8)), 4700 (MVE_VMULLBs8 MQPR:$src1, MQPR:$src2)>; 4701 def : Pat<(mul (sext_inreg (v8i16 (ARMVectorRegCast (ARMvrev16 (v16i8 MQPR:$src1)))), v8i8), 4702 (sext_inreg (v8i16 (ARMVectorRegCast (ARMvrev16 (v16i8 MQPR:$src2)))), v8i8)), 4703 (MVE_VMULLTs8 MQPR:$src1, MQPR:$src2)>; 4704 4705 def : Pat<(v2i64 (ARMvmullu (v4i32 MQPR:$src1), (v4i32 MQPR:$src2))), 4706 (MVE_VMULLBu32 MQPR:$src1, MQPR:$src2)>; 4707 def : Pat<(v2i64 (ARMvmullu (v4i32 (ARMvrev64 (v4i32 MQPR:$src1))), 4708 (v4i32 (ARMvrev64 (v4i32 MQPR:$src2))))), 4709 (MVE_VMULLTu32 MQPR:$src1, MQPR:$src2)>; 4710 4711 def : Pat<(mul (and (v4i32 MQPR:$src1), (v4i32 (ARMvmovImm (i32 0xCFF)))), 4712 (and (v4i32 MQPR:$src2), (v4i32 (ARMvmovImm (i32 0xCFF))))), 4713 (MVE_VMULLBu16 MQPR:$src1, MQPR:$src2)>; 4714 def : Pat<(mul (and (v4i32 (ARMVectorRegCast (ARMvrev32 (v8i16 MQPR:$src1)))), 4715 (v4i32 (ARMvmovImm (i32 0xCFF)))), 4716 (and (v4i32 (ARMVectorRegCast (ARMvrev32 (v8i16 MQPR:$src2)))), 4717 (v4i32 (ARMvmovImm (i32 0xCFF))))), 4718 (MVE_VMULLTu16 MQPR:$src1, MQPR:$src2)>; 4719 4720 def : Pat<(mul (ARMvbicImm (v8i16 MQPR:$src1), (i32 0xAFF)), 4721 (ARMvbicImm (v8i16 MQPR:$src2), (i32 0xAFF))), 4722 (MVE_VMULLBu8 MQPR:$src1, MQPR:$src2)>; 4723 def : Pat<(mul (ARMvbicImm (v8i16 (ARMVectorRegCast (ARMvrev16 (v16i8 MQPR:$src1)))), (i32 0xAFF)), 4724 (ARMvbicImm (v8i16 (ARMVectorRegCast (ARMvrev16 (v16i8 MQPR:$src2)))), (i32 0xAFF))), 4725 (MVE_VMULLTu8 MQPR:$src1, MQPR:$src2)>; 4726} 4727 4728class MVE_VxMULH<string iname, string suffix, bit U, bits<2> size, bit round, 4729 list<dag> pattern=[]> 4730 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 4731 (ins MQPR:$Qn, MQPR:$Qm), "$Qd, $Qn, $Qm", 4732 vpred_r, "", pattern> { 4733 bits<4> Qn; 4734 4735 let Inst{28} = U; 4736 let Inst{21-20} = size; 4737 let Inst{19-17} = Qn{2-0}; 4738 let Inst{16} = 0b1; 4739 let Inst{12} = round; 4740 let Inst{8} = 0b0; 4741 let Inst{7} = Qn{3}; 4742 let Inst{0} = 0b1; 4743 let validForTailPredication = 1; 4744} 4745 4746multiclass MVE_VxMULH_m<string iname, MVEVectorVTInfo VTI, SDNode unpred_op, 4747 Intrinsic PredInt, bit round> { 4748 def "" : MVE_VxMULH<iname, VTI.Suffix, VTI.Unsigned, VTI.Size, round>; 4749 defvar Inst = !cast<Instruction>(NAME); 4750 4751 let Predicates = [HasMVEInt] in { 4752 if !eq(round, 0b0) then { 4753 defvar mulh = !if(VTI.Unsigned, mulhu, mulhs); 4754 defm : MVE_TwoOpPattern<VTI, mulh, PredInt, (? (i32 VTI.Unsigned)), 4755 !cast<Instruction>(NAME)>; 4756 } else { 4757 // Predicated multiply returning high bits 4758 def : Pat<(VTI.Vec (PredInt (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 4759 (i32 VTI.Unsigned), (VTI.Pred VCCR:$mask), 4760 (VTI.Vec MQPR:$inactive))), 4761 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 4762 ARMVCCThen, (VTI.Pred VCCR:$mask), 4763 (VTI.Vec MQPR:$inactive)))>; 4764 } 4765 4766 // Unpredicated intrinsic 4767 def : Pat<(VTI.Vec (unpred_op (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 4768 (i32 VTI.Unsigned))), 4769 (VTI.Vec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 4770 } 4771} 4772 4773multiclass MVE_VMULT<string iname, MVEVectorVTInfo VTI, bit round> 4774 : MVE_VxMULH_m<iname, VTI, !if(round, int_arm_mve_vrmulh, int_arm_mve_vmulh), 4775 !if(round, int_arm_mve_rmulh_predicated, 4776 int_arm_mve_mulh_predicated), 4777 round>; 4778 4779defm MVE_VMULHs8 : MVE_VMULT<"vmulh", MVE_v16s8, 0b0>; 4780defm MVE_VMULHs16 : MVE_VMULT<"vmulh", MVE_v8s16, 0b0>; 4781defm MVE_VMULHs32 : MVE_VMULT<"vmulh", MVE_v4s32, 0b0>; 4782defm MVE_VMULHu8 : MVE_VMULT<"vmulh", MVE_v16u8, 0b0>; 4783defm MVE_VMULHu16 : MVE_VMULT<"vmulh", MVE_v8u16, 0b0>; 4784defm MVE_VMULHu32 : MVE_VMULT<"vmulh", MVE_v4u32, 0b0>; 4785 4786defm MVE_VRMULHs8 : MVE_VMULT<"vrmulh", MVE_v16s8, 0b1>; 4787defm MVE_VRMULHs16 : MVE_VMULT<"vrmulh", MVE_v8s16, 0b1>; 4788defm MVE_VRMULHs32 : MVE_VMULT<"vrmulh", MVE_v4s32, 0b1>; 4789defm MVE_VRMULHu8 : MVE_VMULT<"vrmulh", MVE_v16u8, 0b1>; 4790defm MVE_VRMULHu16 : MVE_VMULT<"vrmulh", MVE_v8u16, 0b1>; 4791defm MVE_VRMULHu32 : MVE_VMULT<"vrmulh", MVE_v4u32, 0b1>; 4792 4793class MVE_VxMOVxN<string iname, string suffix, bit bit_28, bit bit_17, 4794 bits<2> size, bit T, list<dag> pattern=[]> 4795 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 4796 (ins MQPR:$Qd_src, MQPR:$Qm), "$Qd, $Qm", 4797 vpred_n, "$Qd = $Qd_src", pattern> { 4798 4799 let Inst{28} = bit_28; 4800 let Inst{21-20} = 0b11; 4801 let Inst{19-18} = size; 4802 let Inst{17} = bit_17; 4803 let Inst{16} = 0b1; 4804 let Inst{12} = T; 4805 let Inst{8} = 0b0; 4806 let Inst{7} = !not(bit_17); 4807 let Inst{0} = 0b1; 4808 let validForTailPredication = 1; 4809 let retainsPreviousHalfElement = 1; 4810} 4811 4812multiclass MVE_VxMOVxN_halves<string iname, string suffix, 4813 bit bit_28, bit bit_17, bits<2> size> { 4814 def bh : MVE_VxMOVxN<iname # "b", suffix, bit_28, bit_17, size, 0b0>; 4815 def th : MVE_VxMOVxN<iname # "t", suffix, bit_28, bit_17, size, 0b1>; 4816} 4817 4818defm MVE_VMOVNi16 : MVE_VxMOVxN_halves<"vmovn", "i16", 0b1, 0b0, 0b00>; 4819defm MVE_VMOVNi32 : MVE_VxMOVxN_halves<"vmovn", "i32", 0b1, 0b0, 0b01>; 4820defm MVE_VQMOVNs16 : MVE_VxMOVxN_halves<"vqmovn", "s16", 0b0, 0b1, 0b00>; 4821defm MVE_VQMOVNs32 : MVE_VxMOVxN_halves<"vqmovn", "s32", 0b0, 0b1, 0b01>; 4822defm MVE_VQMOVNu16 : MVE_VxMOVxN_halves<"vqmovn", "u16", 0b1, 0b1, 0b00>; 4823defm MVE_VQMOVNu32 : MVE_VxMOVxN_halves<"vqmovn", "u32", 0b1, 0b1, 0b01>; 4824defm MVE_VQMOVUNs16 : MVE_VxMOVxN_halves<"vqmovun", "s16", 0b0, 0b0, 0b00>; 4825defm MVE_VQMOVUNs32 : MVE_VxMOVxN_halves<"vqmovun", "s32", 0b0, 0b0, 0b01>; 4826 4827def MVEvmovn : SDNode<"ARMISD::VMOVN", SDTARMVEXT>; 4828 4829multiclass MVE_VMOVN_p<Instruction Inst, bit top, 4830 MVEVectorVTInfo VTI, MVEVectorVTInfo InVTI> { 4831 // Match the most obvious MVEvmovn(a,b,t), which overwrites the odd or even 4832 // lanes of a (depending on t) with the even lanes of b. 4833 def : Pat<(VTI.Vec (MVEvmovn (VTI.Vec MQPR:$Qd_src), 4834 (VTI.Vec MQPR:$Qm), (i32 top))), 4835 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src), (VTI.Vec MQPR:$Qm)))>; 4836 4837 if !not(top) then { 4838 // If we see MVEvmovn(a,ARMvrev(b),1), that wants to overwrite the odd 4839 // lanes of a with the odd lanes of b. In other words, the lanes we're 4840 // _keeping_ from a are the even ones. So we can flip it round and say that 4841 // this is the same as overwriting the even lanes of b with the even lanes 4842 // of a, i.e. it's a VMOVNB with the operands reversed. 4843 defvar vrev = !cast<SDNode>("ARMvrev" # InVTI.LaneBits); 4844 def : Pat<(VTI.Vec (MVEvmovn (VTI.Vec MQPR:$Qm), 4845 (VTI.Vec (vrev MQPR:$Qd_src)), (i32 1))), 4846 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src), (VTI.Vec MQPR:$Qm)))>; 4847 } 4848 4849 // Match the IR intrinsic for a predicated VMOVN. This regards the Qm input 4850 // as having wider lanes that we're narrowing, instead of already-narrow 4851 // lanes that we're taking every other one of. 4852 def : Pat<(VTI.Vec (int_arm_mve_vmovn_predicated (VTI.Vec MQPR:$Qd_src), 4853 (InVTI.Vec MQPR:$Qm), (i32 top), 4854 (InVTI.Pred VCCR:$pred))), 4855 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src), 4856 (InVTI.Vec MQPR:$Qm), 4857 ARMVCCThen, (InVTI.Pred VCCR:$pred)))>; 4858} 4859 4860defm : MVE_VMOVN_p<MVE_VMOVNi32bh, 0, MVE_v8i16, MVE_v4i32>; 4861defm : MVE_VMOVN_p<MVE_VMOVNi32th, 1, MVE_v8i16, MVE_v4i32>; 4862defm : MVE_VMOVN_p<MVE_VMOVNi16bh, 0, MVE_v16i8, MVE_v8i16>; 4863defm : MVE_VMOVN_p<MVE_VMOVNi16th, 1, MVE_v16i8, MVE_v8i16>; 4864 4865multiclass MVE_VQMOVN_p<Instruction Inst, bit outU, bit inU, bit top, 4866 MVEVectorVTInfo VTI, MVEVectorVTInfo InVTI> { 4867 def : Pat<(VTI.Vec (int_arm_mve_vqmovn (VTI.Vec MQPR:$Qd_src), 4868 (InVTI.Vec MQPR:$Qm), 4869 (i32 outU), (i32 inU), (i32 top))), 4870 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src), 4871 (InVTI.Vec MQPR:$Qm)))>; 4872 4873 def : Pat<(VTI.Vec (int_arm_mve_vqmovn_predicated (VTI.Vec MQPR:$Qd_src), 4874 (InVTI.Vec MQPR:$Qm), 4875 (i32 outU), (i32 inU), (i32 top), 4876 (InVTI.Pred VCCR:$pred))), 4877 (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src), 4878 (InVTI.Vec MQPR:$Qm), 4879 ARMVCCThen, (InVTI.Pred VCCR:$pred)))>; 4880} 4881 4882defm : MVE_VQMOVN_p<MVE_VQMOVNs32bh, 0, 0, 0, MVE_v8i16, MVE_v4i32>; 4883defm : MVE_VQMOVN_p<MVE_VQMOVNs32th, 0, 0, 1, MVE_v8i16, MVE_v4i32>; 4884defm : MVE_VQMOVN_p<MVE_VQMOVNs16bh, 0, 0, 0, MVE_v16i8, MVE_v8i16>; 4885defm : MVE_VQMOVN_p<MVE_VQMOVNs16th, 0, 0, 1, MVE_v16i8, MVE_v8i16>; 4886defm : MVE_VQMOVN_p<MVE_VQMOVNu32bh, 1, 1, 0, MVE_v8i16, MVE_v4i32>; 4887defm : MVE_VQMOVN_p<MVE_VQMOVNu32th, 1, 1, 1, MVE_v8i16, MVE_v4i32>; 4888defm : MVE_VQMOVN_p<MVE_VQMOVNu16bh, 1, 1, 0, MVE_v16i8, MVE_v8i16>; 4889defm : MVE_VQMOVN_p<MVE_VQMOVNu16th, 1, 1, 1, MVE_v16i8, MVE_v8i16>; 4890defm : MVE_VQMOVN_p<MVE_VQMOVUNs32bh, 1, 0, 0, MVE_v8i16, MVE_v4i32>; 4891defm : MVE_VQMOVN_p<MVE_VQMOVUNs32th, 1, 0, 1, MVE_v8i16, MVE_v4i32>; 4892defm : MVE_VQMOVN_p<MVE_VQMOVUNs16bh, 1, 0, 0, MVE_v16i8, MVE_v8i16>; 4893defm : MVE_VQMOVN_p<MVE_VQMOVUNs16th, 1, 0, 1, MVE_v16i8, MVE_v8i16>; 4894 4895def SDTARMVMOVNQ : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>, 4896 SDTCisVec<2>, SDTCisVT<3, i32>]>; 4897def MVEvqmovns : SDNode<"ARMISD::VQMOVNs", SDTARMVMOVNQ>; 4898def MVEvqmovnu : SDNode<"ARMISD::VQMOVNu", SDTARMVMOVNQ>; 4899 4900let Predicates = [HasMVEInt] in { 4901 def : Pat<(v8i16 (MVEvqmovns (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), (i32 0))), 4902 (v8i16 (MVE_VQMOVNs32bh (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm)))>; 4903 def : Pat<(v8i16 (MVEvqmovns (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), (i32 1))), 4904 (v8i16 (MVE_VQMOVNs32th (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm)))>; 4905 def : Pat<(v16i8 (MVEvqmovns (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), (i32 0))), 4906 (v16i8 (MVE_VQMOVNs16bh (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm)))>; 4907 def : Pat<(v16i8 (MVEvqmovns (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), (i32 1))), 4908 (v16i8 (MVE_VQMOVNs16th (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm)))>; 4909 4910 def : Pat<(v8i16 (MVEvqmovnu (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), (i32 0))), 4911 (v8i16 (MVE_VQMOVNu32bh (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm)))>; 4912 def : Pat<(v8i16 (MVEvqmovnu (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), (i32 1))), 4913 (v8i16 (MVE_VQMOVNu32th (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm)))>; 4914 def : Pat<(v16i8 (MVEvqmovnu (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), (i32 0))), 4915 (v16i8 (MVE_VQMOVNu16bh (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm)))>; 4916 def : Pat<(v16i8 (MVEvqmovnu (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), (i32 1))), 4917 (v16i8 (MVE_VQMOVNu16th (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm)))>; 4918 4919 def : Pat<(v8i16 (MVEvqmovns (v8i16 MQPR:$Qd_src), (v4i32 (ARMvshrsImm (v4i32 MQPR:$Qm), imm0_31:$imm)), (i32 0))), 4920 (v8i16 (MVE_VQSHRNbhs32 (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), imm0_31:$imm))>; 4921 def : Pat<(v16i8 (MVEvqmovns (v16i8 MQPR:$Qd_src), (v8i16 (ARMvshrsImm (v8i16 MQPR:$Qm), imm0_15:$imm)), (i32 0))), 4922 (v16i8 (MVE_VQSHRNbhs16 (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), imm0_15:$imm))>; 4923 def : Pat<(v8i16 (MVEvqmovns (v8i16 MQPR:$Qd_src), (v4i32 (ARMvshrsImm (v4i32 MQPR:$Qm), imm0_31:$imm)), (i32 1))), 4924 (v8i16 (MVE_VQSHRNths32 (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), imm0_31:$imm))>; 4925 def : Pat<(v16i8 (MVEvqmovns (v16i8 MQPR:$Qd_src), (v8i16 (ARMvshrsImm (v8i16 MQPR:$Qm), imm0_15:$imm)), (i32 1))), 4926 (v16i8 (MVE_VQSHRNths16 (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), imm0_15:$imm))>; 4927 4928 def : Pat<(v8i16 (MVEvqmovnu (v8i16 MQPR:$Qd_src), (v4i32 (ARMvshruImm (v4i32 MQPR:$Qm), imm0_31:$imm)), (i32 0))), 4929 (v8i16 (MVE_VQSHRNbhu32 (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), imm0_31:$imm))>; 4930 def : Pat<(v16i8 (MVEvqmovnu (v16i8 MQPR:$Qd_src), (v8i16 (ARMvshruImm (v8i16 MQPR:$Qm), imm0_15:$imm)), (i32 0))), 4931 (v16i8 (MVE_VQSHRNbhu16 (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), imm0_15:$imm))>; 4932 def : Pat<(v8i16 (MVEvqmovnu (v8i16 MQPR:$Qd_src), (v4i32 (ARMvshruImm (v4i32 MQPR:$Qm), imm0_31:$imm)), (i32 1))), 4933 (v8i16 (MVE_VQSHRNthu32 (v8i16 MQPR:$Qd_src), (v4i32 MQPR:$Qm), imm0_31:$imm))>; 4934 def : Pat<(v16i8 (MVEvqmovnu (v16i8 MQPR:$Qd_src), (v8i16 (ARMvshruImm (v8i16 MQPR:$Qm), imm0_15:$imm)), (i32 1))), 4935 (v16i8 (MVE_VQSHRNthu16 (v16i8 MQPR:$Qd_src), (v8i16 MQPR:$Qm), imm0_15:$imm))>; 4936} 4937 4938class MVE_VCVT_ff<string iname, string suffix, bit op, bit T, 4939 dag iops_extra, vpred_ops vpred, string cstr> 4940 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 4941 !con(iops_extra, (ins MQPR:$Qm)), "$Qd, $Qm", 4942 vpred, cstr, []> { 4943 let Inst{28} = op; 4944 let Inst{21-16} = 0b111111; 4945 let Inst{12} = T; 4946 let Inst{8-7} = 0b00; 4947 let Inst{0} = 0b1; 4948 4949 let Predicates = [HasMVEFloat]; 4950 let retainsPreviousHalfElement = 1; 4951} 4952 4953def SDTARMVCVTL : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, 4954 SDTCisVT<2, i32>]>; 4955def MVEvcvtn : SDNode<"ARMISD::VCVTN", SDTARMVMOVNQ>; 4956def MVEvcvtl : SDNode<"ARMISD::VCVTL", SDTARMVCVTL>; 4957 4958multiclass MVE_VCVT_f2h_m<string iname, int half> { 4959 def "": MVE_VCVT_ff<iname, "f16.f32", 0b0, half, 4960 (ins MQPR:$Qd_src), vpred_n, "$Qd = $Qd_src">; 4961 defvar Inst = !cast<Instruction>(NAME); 4962 4963 let Predicates = [HasMVEFloat] in { 4964 def : Pat<(v8f16 (int_arm_mve_vcvt_narrow 4965 (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 half))), 4966 (v8f16 (Inst (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm)))>; 4967 def : Pat<(v8f16 (int_arm_mve_vcvt_narrow_predicated 4968 (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 half), 4969 (v4i1 VCCR:$mask))), 4970 (v8f16 (Inst (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), 4971 ARMVCCThen, (v4i1 VCCR:$mask)))>; 4972 4973 def : Pat<(v8f16 (MVEvcvtn (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 half))), 4974 (v8f16 (Inst (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm)))>; 4975 } 4976} 4977 4978multiclass MVE_VCVT_h2f_m<string iname, int half> { 4979 def "": MVE_VCVT_ff<iname, "f32.f16", 0b1, half, (ins), vpred_r, "">; 4980 defvar Inst = !cast<Instruction>(NAME); 4981 4982 let Predicates = [HasMVEFloat] in { 4983 def : Pat<(v4f32 (int_arm_mve_vcvt_widen (v8f16 MQPR:$Qm), (i32 half))), 4984 (v4f32 (Inst (v8f16 MQPR:$Qm)))>; 4985 def : Pat<(v4f32 (int_arm_mve_vcvt_widen_predicated 4986 (v4f32 MQPR:$inactive), (v8f16 MQPR:$Qm), (i32 half), 4987 (v4i1 VCCR:$mask))), 4988 (v4f32 (Inst (v8f16 MQPR:$Qm), ARMVCCThen, 4989 (v4i1 VCCR:$mask), (v4f32 MQPR:$inactive)))>; 4990 4991 def : Pat<(v4f32 (MVEvcvtl (v8f16 MQPR:$Qm), (i32 half))), 4992 (v4f32 (Inst (v8f16 MQPR:$Qm)))>; 4993 } 4994} 4995 4996defm MVE_VCVTf16f32bh : MVE_VCVT_f2h_m<"vcvtb", 0b0>; 4997defm MVE_VCVTf16f32th : MVE_VCVT_f2h_m<"vcvtt", 0b1>; 4998defm MVE_VCVTf32f16bh : MVE_VCVT_h2f_m<"vcvtb", 0b0>; 4999defm MVE_VCVTf32f16th : MVE_VCVT_h2f_m<"vcvtt", 0b1>; 5000 5001class MVE_VxCADD<string iname, string suffix, bits<2> size, bit halve, 5002 string cstr=""> 5003 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 5004 (ins MQPR:$Qn, MQPR:$Qm, complexrotateopodd:$rot), 5005 "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { 5006 bits<4> Qn; 5007 bit rot; 5008 5009 let Inst{28} = halve; 5010 let Inst{21-20} = size; 5011 let Inst{19-17} = Qn{2-0}; 5012 let Inst{16} = 0b0; 5013 let Inst{12} = rot; 5014 let Inst{8} = 0b1; 5015 let Inst{7} = Qn{3}; 5016 let Inst{0} = 0b0; 5017} 5018 5019multiclass MVE_VxCADD_m<string iname, MVEVectorVTInfo VTI, 5020 bit halve, string cstr=""> { 5021 def "" : MVE_VxCADD<iname, VTI.Suffix, VTI.Size, halve, cstr>; 5022 defvar Inst = !cast<Instruction>(NAME); 5023 5024 let Predicates = [HasMVEInt] in { 5025 def : Pat<(VTI.Vec (int_arm_mve_vcaddq halve, 5026 imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), 5027 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 5028 imm:$rot))>; 5029 5030 def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated halve, 5031 imm:$rot, (VTI.Vec MQPR:$inactive), 5032 (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 5033 (VTI.Pred VCCR:$mask))), 5034 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), 5035 imm:$rot, ARMVCCThen, (VTI.Pred VCCR:$mask), 5036 (VTI.Vec MQPR:$inactive)))>; 5037 5038 } 5039} 5040 5041defm MVE_VCADDi8 : MVE_VxCADD_m<"vcadd", MVE_v16i8, 0b1>; 5042defm MVE_VCADDi16 : MVE_VxCADD_m<"vcadd", MVE_v8i16, 0b1>; 5043defm MVE_VCADDi32 : MVE_VxCADD_m<"vcadd", MVE_v4i32, 0b1, "@earlyclobber $Qd">; 5044 5045defm MVE_VHCADDs8 : MVE_VxCADD_m<"vhcadd", MVE_v16s8, 0b0>; 5046defm MVE_VHCADDs16 : MVE_VxCADD_m<"vhcadd", MVE_v8s16, 0b0>; 5047defm MVE_VHCADDs32 : MVE_VxCADD_m<"vhcadd", MVE_v4s32, 0b0, "@earlyclobber $Qd">; 5048 5049class MVE_VADCSBC<string iname, bit I, bit subtract, 5050 dag carryin, list<dag> pattern=[]> 5051 : MVE_qDest_qSrc<iname, "i32", (outs MQPR:$Qd, cl_FPSCR_NZCV:$carryout), 5052 !con((ins MQPR:$Qn, MQPR:$Qm), carryin), 5053 "$Qd, $Qn, $Qm", vpred_r, "", pattern> { 5054 bits<4> Qn; 5055 5056 let Inst{28} = subtract; 5057 let Inst{21-20} = 0b11; 5058 let Inst{19-17} = Qn{2-0}; 5059 let Inst{16} = 0b0; 5060 let Inst{12} = I; 5061 let Inst{8} = 0b1; 5062 let Inst{7} = Qn{3}; 5063 let Inst{0} = 0b0; 5064 5065 // Custom decoder method in order to add the FPSCR operand(s), which 5066 // Tablegen won't do right 5067 let DecoderMethod = "DecodeMVEVADCInstruction"; 5068} 5069 5070def MVE_VADC : MVE_VADCSBC<"vadc", 0b0, 0b0, (ins cl_FPSCR_NZCV:$carryin)>; 5071def MVE_VADCI : MVE_VADCSBC<"vadci", 0b1, 0b0, (ins)>; 5072 5073def MVE_VSBC : MVE_VADCSBC<"vsbc", 0b0, 0b1, (ins cl_FPSCR_NZCV:$carryin)>; 5074def MVE_VSBCI : MVE_VADCSBC<"vsbci", 0b1, 0b1, (ins)>; 5075 5076class MVE_VQDMULL<string iname, string suffix, bit size, bit T, 5077 string cstr="", list<dag> pattern=[]> 5078 : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), 5079 (ins MQPR:$Qn, MQPR:$Qm), "$Qd, $Qn, $Qm", 5080 vpred_r, cstr, pattern> { 5081 bits<4> Qn; 5082 5083 let Inst{28} = size; 5084 let Inst{21-20} = 0b11; 5085 let Inst{19-17} = Qn{2-0}; 5086 let Inst{16} = 0b0; 5087 let Inst{12} = T; 5088 let Inst{8} = 0b1; 5089 let Inst{7} = Qn{3}; 5090 let Inst{0} = 0b1; 5091 let validForTailPredication = 1; 5092 let doubleWidthResult = 1; 5093} 5094 5095multiclass MVE_VQDMULL_m<string iname, MVEVectorVTInfo VTI, bit size, bit T, 5096 string cstr> { 5097 def "" : MVE_VQDMULL<iname, VTI.Suffix, size, T, cstr>; 5098 defvar Inst = !cast<Instruction>(NAME); 5099 5100 let Predicates = [HasMVEInt] in { 5101 // Unpredicated saturating multiply 5102 def : Pat<(VTI.DblVec (int_arm_mve_vqdmull (VTI.Vec MQPR:$Qm), 5103 (VTI.Vec MQPR:$Qn), (i32 T))), 5104 (VTI.DblVec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn)))>; 5105 // Predicated saturating multiply 5106 def : Pat<(VTI.DblVec (int_arm_mve_vqdmull_predicated 5107 (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 5108 (i32 T), (VTI.DblPred VCCR:$mask), 5109 (VTI.DblVec MQPR:$inactive))), 5110 (VTI.DblVec (Inst (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), 5111 ARMVCCThen, (VTI.DblPred VCCR:$mask), 5112 (VTI.DblVec MQPR:$inactive)))>; 5113 } 5114} 5115 5116multiclass MVE_VQDMULL_halves<MVEVectorVTInfo VTI, bit size, string cstr=""> { 5117 defm bh : MVE_VQDMULL_m<"vqdmullb", VTI, size, 0b0, cstr>; 5118 defm th : MVE_VQDMULL_m<"vqdmullt", VTI, size, 0b1, cstr>; 5119} 5120 5121defm MVE_VQDMULLs16 : MVE_VQDMULL_halves<MVE_v8s16, 0b0>; 5122defm MVE_VQDMULLs32 : MVE_VQDMULL_halves<MVE_v4s32, 0b1, "@earlyclobber $Qd">; 5123 5124// end of mve_qDest_qSrc 5125 5126// start of mve_qDest_rSrc 5127 5128class MVE_qr_base<dag oops, dag iops, InstrItinClass itin, string iname, 5129 string suffix, string ops, vpred_ops vpred, string cstr, 5130 list<dag> pattern=[]> 5131 : MVE_p<oops, iops, NoItinerary, iname, suffix, ops, vpred, cstr, pattern> { 5132 bits<4> Qd; 5133 bits<4> Qn; 5134 bits<4> Rm; 5135 5136 let Inst{25-23} = 0b100; 5137 let Inst{22} = Qd{3}; 5138 let Inst{19-17} = Qn{2-0}; 5139 let Inst{15-13} = Qd{2-0}; 5140 let Inst{11-9} = 0b111; 5141 let Inst{7} = Qn{3}; 5142 let Inst{6} = 0b1; 5143 let Inst{4} = 0b0; 5144 let Inst{3-0} = Rm{3-0}; 5145} 5146 5147class MVE_qDest_rSrc<string iname, string suffix, string cstr="", list<dag> pattern=[]> 5148 : MVE_qr_base<(outs MQPR:$Qd), (ins MQPR:$Qn, rGPR:$Rm), 5149 NoItinerary, iname, suffix, "$Qd, $Qn, $Rm", vpred_r, cstr, 5150 pattern>; 5151 5152class MVE_qDestSrc_rSrc<string iname, string suffix, list<dag> pattern=[]> 5153 : MVE_qr_base<(outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qn, rGPR:$Rm), 5154 NoItinerary, iname, suffix, "$Qd, $Qn, $Rm", vpred_n, "$Qd = $Qd_src", 5155 pattern>; 5156 5157class MVE_qDest_single_rSrc<string iname, string suffix, list<dag> pattern=[]> 5158 : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qd_src, rGPR:$Rm), NoItinerary, iname, 5159 suffix, "$Qd, $Rm", vpred_n, "$Qd = $Qd_src", pattern> { 5160 bits<4> Qd; 5161 bits<4> Rm; 5162 5163 let Inst{22} = Qd{3}; 5164 let Inst{15-13} = Qd{2-0}; 5165 let Inst{3-0} = Rm{3-0}; 5166} 5167 5168// Patterns for vector-scalar instructions with integer operands 5169multiclass MVE_vec_scalar_int_pat_m<Instruction inst, MVEVectorVTInfo VTI, 5170 SDPatternOperator unpred_op, 5171 SDPatternOperator pred_op, 5172 bit unpred_has_sign = 0, 5173 bit pred_has_sign = 0> { 5174 defvar UnpredSign = !if(unpred_has_sign, (? (i32 VTI.Unsigned)), (?)); 5175 defvar PredSign = !if(pred_has_sign, (? (i32 VTI.Unsigned)), (?)); 5176 5177 let Predicates = [HasMVEInt] in { 5178 // Unpredicated version 5179 def : Pat<(VTI.Vec !con((unpred_op (VTI.Vec MQPR:$Qm), 5180 (VTI.Vec (ARMvdup rGPR:$val))), 5181 UnpredSign)), 5182 (VTI.Vec (inst (VTI.Vec MQPR:$Qm), (i32 rGPR:$val)))>; 5183 // Predicated version 5184 def : Pat<(VTI.Vec !con((pred_op (VTI.Vec MQPR:$Qm), 5185 (VTI.Vec (ARMvdup rGPR:$val))), 5186 PredSign, 5187 (pred_op (VTI.Pred VCCR:$mask), 5188 (VTI.Vec MQPR:$inactive)))), 5189 (VTI.Vec (inst (VTI.Vec MQPR:$Qm), (i32 rGPR:$val), 5190 ARMVCCThen, (VTI.Pred VCCR:$mask), 5191 (VTI.Vec MQPR:$inactive)))>; 5192 } 5193} 5194 5195class MVE_VADDSUB_qr<string iname, string suffix, bits<2> size, 5196 bit bit_5, bit bit_12, bit bit_16, bit bit_28> 5197 : MVE_qDest_rSrc<iname, suffix, ""> { 5198 5199 let Inst{28} = bit_28; 5200 let Inst{21-20} = size; 5201 let Inst{16} = bit_16; 5202 let Inst{12} = bit_12; 5203 let Inst{8} = 0b1; 5204 let Inst{5} = bit_5; 5205 let validForTailPredication = 1; 5206} 5207 5208// Vector-scalar add/sub 5209multiclass MVE_VADDSUB_qr_m<string iname, MVEVectorVTInfo VTI, bit subtract, 5210 SDNode Op, Intrinsic PredInt> { 5211 def "" : MVE_VADDSUB_qr<iname, VTI.Suffix, VTI.Size, 0b0, subtract, 0b1, 0b0>; 5212 let Predicates = [HasMVEInt] in { 5213 defm : MVE_TwoOpPatternDup<VTI, Op, PredInt, (? ), !cast<Instruction>(NAME), ARMimmAllZerosV>; 5214 } 5215} 5216 5217multiclass MVE_VADD_qr_m<MVEVectorVTInfo VTI> 5218 : MVE_VADDSUB_qr_m<"vadd", VTI, 0b0, add, int_arm_mve_add_predicated>; 5219 5220multiclass MVE_VSUB_qr_m<MVEVectorVTInfo VTI> 5221 : MVE_VADDSUB_qr_m<"vsub", VTI, 0b1, sub, int_arm_mve_sub_predicated>; 5222 5223defm MVE_VADD_qr_i8 : MVE_VADD_qr_m<MVE_v16i8>; 5224defm MVE_VADD_qr_i16 : MVE_VADD_qr_m<MVE_v8i16>; 5225defm MVE_VADD_qr_i32 : MVE_VADD_qr_m<MVE_v4i32>; 5226 5227defm MVE_VSUB_qr_i8 : MVE_VSUB_qr_m<MVE_v16i8>; 5228defm MVE_VSUB_qr_i16 : MVE_VSUB_qr_m<MVE_v8i16>; 5229defm MVE_VSUB_qr_i32 : MVE_VSUB_qr_m<MVE_v4i32>; 5230 5231// Vector-scalar saturating add/sub 5232multiclass MVE_VQADDSUB_qr_m<string iname, MVEVectorVTInfo VTI, bit subtract, 5233 SDNode Op, Intrinsic PredInt> { 5234 def "" : MVE_VADDSUB_qr<iname, VTI.Suffix, VTI.Size, 0b1, subtract, 5235 0b0, VTI.Unsigned>; 5236 5237 let Predicates = [HasMVEInt] in { 5238 defm : MVE_TwoOpPatternDup<VTI, Op, PredInt, (? (i32 VTI.Unsigned)), 5239 !cast<Instruction>(NAME)>; 5240 } 5241} 5242 5243multiclass MVE_VQADD_qr_m<MVEVectorVTInfo VTI, SDNode Op> 5244 : MVE_VQADDSUB_qr_m<"vqadd", VTI, 0b0, Op, int_arm_mve_qadd_predicated>; 5245 5246multiclass MVE_VQSUB_qr_m<MVEVectorVTInfo VTI, SDNode Op> 5247 : MVE_VQADDSUB_qr_m<"vqsub", VTI, 0b1, Op, int_arm_mve_qsub_predicated>; 5248 5249defm MVE_VQADD_qr_s8 : MVE_VQADD_qr_m<MVE_v16s8, saddsat>; 5250defm MVE_VQADD_qr_s16 : MVE_VQADD_qr_m<MVE_v8s16, saddsat>; 5251defm MVE_VQADD_qr_s32 : MVE_VQADD_qr_m<MVE_v4s32, saddsat>; 5252defm MVE_VQADD_qr_u8 : MVE_VQADD_qr_m<MVE_v16u8, uaddsat>; 5253defm MVE_VQADD_qr_u16 : MVE_VQADD_qr_m<MVE_v8u16, uaddsat>; 5254defm MVE_VQADD_qr_u32 : MVE_VQADD_qr_m<MVE_v4u32, uaddsat>; 5255 5256defm MVE_VQSUB_qr_s8 : MVE_VQSUB_qr_m<MVE_v16s8, ssubsat>; 5257defm MVE_VQSUB_qr_s16 : MVE_VQSUB_qr_m<MVE_v8s16, ssubsat>; 5258defm MVE_VQSUB_qr_s32 : MVE_VQSUB_qr_m<MVE_v4s32, ssubsat>; 5259defm MVE_VQSUB_qr_u8 : MVE_VQSUB_qr_m<MVE_v16u8, usubsat>; 5260defm MVE_VQSUB_qr_u16 : MVE_VQSUB_qr_m<MVE_v8u16, usubsat>; 5261defm MVE_VQSUB_qr_u32 : MVE_VQSUB_qr_m<MVE_v4u32, usubsat>; 5262 5263class MVE_VQDMULL_qr<string iname, string suffix, bit size, 5264 bit T, string cstr="", list<dag> pattern=[]> 5265 : MVE_qDest_rSrc<iname, suffix, cstr, pattern> { 5266 5267 let Inst{28} = size; 5268 let Inst{21-20} = 0b11; 5269 let Inst{16} = 0b0; 5270 let Inst{12} = T; 5271 let Inst{8} = 0b1; 5272 let Inst{5} = 0b1; 5273 let validForTailPredication = 1; 5274 let doubleWidthResult = 1; 5275} 5276 5277multiclass MVE_VQDMULL_qr_m<string iname, MVEVectorVTInfo VTI, bit size, 5278 bit T, string cstr> { 5279 def "" : MVE_VQDMULL_qr<iname, VTI.Suffix, size, T, cstr>; 5280 defvar Inst = !cast<Instruction>(NAME); 5281 5282 let Predicates = [HasMVEInt] in { 5283 // Unpredicated saturating multiply 5284 def : Pat<(VTI.DblVec (int_arm_mve_vqdmull (VTI.Vec MQPR:$Qm), 5285 (VTI.Vec (ARMvdup rGPR:$val)), 5286 (i32 T))), 5287 (VTI.DblVec (Inst (VTI.Vec MQPR:$Qm), (i32 rGPR:$val)))>; 5288 // Predicated saturating multiply 5289 def : Pat<(VTI.DblVec (int_arm_mve_vqdmull_predicated 5290 (VTI.Vec MQPR:$Qm), 5291 (VTI.Vec (ARMvdup rGPR:$val)), 5292 (i32 T), 5293 (VTI.DblPred VCCR:$mask), 5294 (VTI.DblVec MQPR:$inactive))), 5295 (VTI.DblVec (Inst (VTI.Vec MQPR:$Qm), (i32 rGPR:$val), 5296 ARMVCCThen, (VTI.DblPred VCCR:$mask), 5297 (VTI.DblVec MQPR:$inactive)))>; 5298 } 5299} 5300 5301multiclass MVE_VQDMULL_qr_halves<MVEVectorVTInfo VTI, bit size, string cstr=""> { 5302 defm bh : MVE_VQDMULL_qr_m<"vqdmullb", VTI, size, 0b0, cstr>; 5303 defm th : MVE_VQDMULL_qr_m<"vqdmullt", VTI, size, 0b1, cstr>; 5304} 5305 5306defm MVE_VQDMULL_qr_s16 : MVE_VQDMULL_qr_halves<MVE_v8s16, 0b0>; 5307defm MVE_VQDMULL_qr_s32 : MVE_VQDMULL_qr_halves<MVE_v4s32, 0b1, "@earlyclobber $Qd">; 5308 5309class MVE_VxADDSUB_qr<string iname, string suffix, 5310 bit bit_28, bits<2> bits_21_20, bit subtract, 5311 list<dag> pattern=[]> 5312 : MVE_qDest_rSrc<iname, suffix, "", pattern> { 5313 5314 let Inst{28} = bit_28; 5315 let Inst{21-20} = bits_21_20; 5316 let Inst{16} = 0b0; 5317 let Inst{12} = subtract; 5318 let Inst{8} = 0b1; 5319 let Inst{5} = 0b0; 5320 let validForTailPredication = 1; 5321} 5322 5323multiclass MVE_VHADDSUB_qr_m<string iname, MVEVectorVTInfo VTI, bit subtract, 5324 Intrinsic unpred_int, Intrinsic pred_int> { 5325 def "" : MVE_VxADDSUB_qr<iname, VTI.Suffix, VTI.Unsigned, VTI.Size, subtract>; 5326 defm : MVE_vec_scalar_int_pat_m<!cast<Instruction>(NAME), 5327 VTI, unpred_int, pred_int, 1, 1>; 5328} 5329 5330multiclass MVE_VHADD_qr_m<MVEVectorVTInfo VTI> : 5331 MVE_VHADDSUB_qr_m<"vhadd", VTI, 0b0, int_arm_mve_vhadd, 5332 int_arm_mve_hadd_predicated>; 5333 5334multiclass MVE_VHSUB_qr_m<MVEVectorVTInfo VTI> : 5335 MVE_VHADDSUB_qr_m<"vhsub", VTI, 0b1, int_arm_mve_vhsub, 5336 int_arm_mve_hsub_predicated>; 5337 5338defm MVE_VHADD_qr_s8 : MVE_VHADD_qr_m<MVE_v16s8>; 5339defm MVE_VHADD_qr_s16 : MVE_VHADD_qr_m<MVE_v8s16>; 5340defm MVE_VHADD_qr_s32 : MVE_VHADD_qr_m<MVE_v4s32>; 5341defm MVE_VHADD_qr_u8 : MVE_VHADD_qr_m<MVE_v16u8>; 5342defm MVE_VHADD_qr_u16 : MVE_VHADD_qr_m<MVE_v8u16>; 5343defm MVE_VHADD_qr_u32 : MVE_VHADD_qr_m<MVE_v4u32>; 5344 5345defm MVE_VHSUB_qr_s8 : MVE_VHSUB_qr_m<MVE_v16s8>; 5346defm MVE_VHSUB_qr_s16 : MVE_VHSUB_qr_m<MVE_v8s16>; 5347defm MVE_VHSUB_qr_s32 : MVE_VHSUB_qr_m<MVE_v4s32>; 5348defm MVE_VHSUB_qr_u8 : MVE_VHSUB_qr_m<MVE_v16u8>; 5349defm MVE_VHSUB_qr_u16 : MVE_VHSUB_qr_m<MVE_v8u16>; 5350defm MVE_VHSUB_qr_u32 : MVE_VHSUB_qr_m<MVE_v4u32>; 5351 5352multiclass MVE_VADDSUB_qr_f<string iname, MVEVectorVTInfo VTI, bit subtract, 5353 SDNode Op, Intrinsic PredInt> { 5354 def "" : MVE_VxADDSUB_qr<iname, VTI.Suffix, VTI.Size{0}, 0b11, subtract>; 5355 defm : MVE_TwoOpPatternDup<VTI, Op, PredInt, (? ), 5356 !cast<Instruction>(NAME)>; 5357} 5358 5359let Predicates = [HasMVEFloat] in { 5360 defm MVE_VADD_qr_f32 : MVE_VADDSUB_qr_f<"vadd", MVE_v4f32, 0b0, fadd, 5361 int_arm_mve_add_predicated>; 5362 defm MVE_VADD_qr_f16 : MVE_VADDSUB_qr_f<"vadd", MVE_v8f16, 0b0, fadd, 5363 int_arm_mve_add_predicated>; 5364 5365 defm MVE_VSUB_qr_f32 : MVE_VADDSUB_qr_f<"vsub", MVE_v4f32, 0b1, fsub, 5366 int_arm_mve_sub_predicated>; 5367 defm MVE_VSUB_qr_f16 : MVE_VADDSUB_qr_f<"vsub", MVE_v8f16, 0b1, fsub, 5368 int_arm_mve_sub_predicated>; 5369} 5370 5371class MVE_VxSHL_qr<string iname, string suffix, bit U, bits<2> size, 5372 bit bit_7, bit bit_17, list<dag> pattern=[]> 5373 : MVE_qDest_single_rSrc<iname, suffix, pattern> { 5374 5375 let Inst{28} = U; 5376 let Inst{25-23} = 0b100; 5377 let Inst{21-20} = 0b11; 5378 let Inst{19-18} = size; 5379 let Inst{17} = bit_17; 5380 let Inst{16} = 0b1; 5381 let Inst{12-8} = 0b11110; 5382 let Inst{7} = bit_7; 5383 let Inst{6-4} = 0b110; 5384 let validForTailPredication = 1; 5385} 5386 5387multiclass MVE_VxSHL_qr_p<string iname, MVEVectorVTInfo VTI, bit q, bit r> { 5388 def "" : MVE_VxSHL_qr<iname, VTI.Suffix, VTI.Unsigned, VTI.Size, q, r>; 5389 defvar Inst = !cast<Instruction>(NAME); 5390 5391 def : Pat<(VTI.Vec (int_arm_mve_vshl_scalar 5392 (VTI.Vec MQPR:$in), (i32 rGPR:$sh), 5393 (i32 q), (i32 r), (i32 VTI.Unsigned))), 5394 (VTI.Vec (Inst (VTI.Vec MQPR:$in), (i32 rGPR:$sh)))>; 5395 5396 def : Pat<(VTI.Vec (int_arm_mve_vshl_scalar_predicated 5397 (VTI.Vec MQPR:$in), (i32 rGPR:$sh), 5398 (i32 q), (i32 r), (i32 VTI.Unsigned), 5399 (VTI.Pred VCCR:$mask))), 5400 (VTI.Vec (Inst (VTI.Vec MQPR:$in), (i32 rGPR:$sh), 5401 ARMVCCThen, (VTI.Pred VCCR:$mask)))>; 5402} 5403 5404multiclass MVE_VxSHL_qr_types<string iname, bit bit_7, bit bit_17> { 5405 defm s8 : MVE_VxSHL_qr_p<iname, MVE_v16s8, bit_7, bit_17>; 5406 defm s16 : MVE_VxSHL_qr_p<iname, MVE_v8s16, bit_7, bit_17>; 5407 defm s32 : MVE_VxSHL_qr_p<iname, MVE_v4s32, bit_7, bit_17>; 5408 defm u8 : MVE_VxSHL_qr_p<iname, MVE_v16u8, bit_7, bit_17>; 5409 defm u16 : MVE_VxSHL_qr_p<iname, MVE_v8u16, bit_7, bit_17>; 5410 defm u32 : MVE_VxSHL_qr_p<iname, MVE_v4u32, bit_7, bit_17>; 5411} 5412 5413defm MVE_VSHL_qr : MVE_VxSHL_qr_types<"vshl", 0b0, 0b0>; 5414defm MVE_VRSHL_qr : MVE_VxSHL_qr_types<"vrshl", 0b0, 0b1>; 5415defm MVE_VQSHL_qr : MVE_VxSHL_qr_types<"vqshl", 0b1, 0b0>; 5416defm MVE_VQRSHL_qr : MVE_VxSHL_qr_types<"vqrshl", 0b1, 0b1>; 5417 5418let Predicates = [HasMVEInt] in { 5419 def : Pat<(v4i32 (ARMvshlu (v4i32 MQPR:$Qm), (v4i32 (ARMvdup rGPR:$Rm)))), 5420 (v4i32 (MVE_VSHL_qru32 (v4i32 MQPR:$Qm), rGPR:$Rm))>; 5421 def : Pat<(v8i16 (ARMvshlu (v8i16 MQPR:$Qm), (v8i16 (ARMvdup rGPR:$Rm)))), 5422 (v8i16 (MVE_VSHL_qru16 (v8i16 MQPR:$Qm), rGPR:$Rm))>; 5423 def : Pat<(v16i8 (ARMvshlu (v16i8 MQPR:$Qm), (v16i8 (ARMvdup rGPR:$Rm)))), 5424 (v16i8 (MVE_VSHL_qru8 (v16i8 MQPR:$Qm), rGPR:$Rm))>; 5425 5426 def : Pat<(v4i32 (ARMvshls (v4i32 MQPR:$Qm), (v4i32 (ARMvdup rGPR:$Rm)))), 5427 (v4i32 (MVE_VSHL_qrs32 (v4i32 MQPR:$Qm), rGPR:$Rm))>; 5428 def : Pat<(v8i16 (ARMvshls (v8i16 MQPR:$Qm), (v8i16 (ARMvdup rGPR:$Rm)))), 5429 (v8i16 (MVE_VSHL_qrs16 (v8i16 MQPR:$Qm), rGPR:$Rm))>; 5430 def : Pat<(v16i8 (ARMvshls (v16i8 MQPR:$Qm), (v16i8 (ARMvdup rGPR:$Rm)))), 5431 (v16i8 (MVE_VSHL_qrs8 (v16i8 MQPR:$Qm), rGPR:$Rm))>; 5432} 5433 5434class MVE_VBRSR<string iname, string suffix, bits<2> size, list<dag> pattern=[]> 5435 : MVE_qDest_rSrc<iname, suffix, "", pattern> { 5436 5437 let Inst{28} = 0b1; 5438 let Inst{21-20} = size; 5439 let Inst{16} = 0b1; 5440 let Inst{12} = 0b1; 5441 let Inst{8} = 0b0; 5442 let Inst{5} = 0b1; 5443 let validForTailPredication = 1; 5444} 5445 5446def MVE_VBRSR8 : MVE_VBRSR<"vbrsr", "8", 0b00>; 5447def MVE_VBRSR16 : MVE_VBRSR<"vbrsr", "16", 0b01>; 5448def MVE_VBRSR32 : MVE_VBRSR<"vbrsr", "32", 0b10>; 5449 5450multiclass MVE_VBRSR_pat_m<MVEVectorVTInfo VTI, Instruction Inst> { 5451 // Unpredicated 5452 def : Pat<(VTI.Vec (int_arm_mve_vbrsr (VTI.Vec MQPR:$Qn), (i32 rGPR:$Rm))), 5453 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (i32 rGPR:$Rm)))>; 5454 // Predicated 5455 def : Pat<(VTI.Vec (int_arm_mve_vbrsr_predicated 5456 (VTI.Vec MQPR:$inactive), 5457 (VTI.Vec MQPR:$Qn), (i32 rGPR:$Rm), 5458 (VTI.Pred VCCR:$mask))), 5459 (VTI.Vec (Inst (VTI.Vec MQPR:$Qn), (i32 rGPR:$Rm), 5460 ARMVCCThen, (VTI.Pred VCCR:$mask), 5461 (VTI.Vec MQPR:$inactive)))>; 5462} 5463 5464let Predicates = [HasMVEInt] in { 5465 def : Pat<(v16i8 ( bitreverse (v16i8 MQPR:$val1))), 5466 (v16i8 ( MVE_VBRSR8 (v16i8 MQPR:$val1), (t2MOVi (i32 8)) ))>; 5467 5468 def : Pat<(v4i32 ( bitreverse (v4i32 MQPR:$val1))), 5469 (v4i32 ( MVE_VBRSR32 (v4i32 MQPR:$val1), (t2MOVi (i32 32)) ))>; 5470 5471 def : Pat<(v8i16 ( bitreverse (v8i16 MQPR:$val1))), 5472 (v8i16 ( MVE_VBRSR16 (v8i16 MQPR:$val1), (t2MOVi (i32 16)) ))>; 5473 5474 defm : MVE_VBRSR_pat_m<MVE_v16i8, MVE_VBRSR8>; 5475 defm : MVE_VBRSR_pat_m<MVE_v8i16, MVE_VBRSR16>; 5476 defm : MVE_VBRSR_pat_m<MVE_v4i32, MVE_VBRSR32>; 5477} 5478 5479let Predicates = [HasMVEFloat] in { 5480 defm : MVE_VBRSR_pat_m<MVE_v8f16, MVE_VBRSR16>; 5481 defm : MVE_VBRSR_pat_m<MVE_v4f32, MVE_VBRSR32>; 5482} 5483 5484class MVE_VMUL_qr_int<string iname, string suffix, bits<2> size> 5485 : MVE_qDest_rSrc<iname, suffix, ""> { 5486 5487 let Inst{28} = 0b0; 5488 let Inst{21-20} = size; 5489 let Inst{16} = 0b1; 5490 let Inst{12} = 0b1; 5491 let Inst{8} = 0b0; 5492 let Inst{5} = 0b1; 5493 let validForTailPredication = 1; 5494} 5495 5496multiclass MVE_VMUL_qr_int_m<MVEVectorVTInfo VTI> { 5497 def "" : MVE_VMUL_qr_int<"vmul", VTI.Suffix, VTI.Size>; 5498 let Predicates = [HasMVEInt] in { 5499 defm : MVE_TwoOpPatternDup<VTI, mul, int_arm_mve_mul_predicated, (? ), 5500 !cast<Instruction>(NAME), ARMimmOneV>; 5501 } 5502} 5503 5504defm MVE_VMUL_qr_i8 : MVE_VMUL_qr_int_m<MVE_v16i8>; 5505defm MVE_VMUL_qr_i16 : MVE_VMUL_qr_int_m<MVE_v8i16>; 5506defm MVE_VMUL_qr_i32 : MVE_VMUL_qr_int_m<MVE_v4i32>; 5507 5508class MVE_VxxMUL_qr<string iname, string suffix, 5509 bit bit_28, bits<2> bits_21_20, list<dag> pattern=[]> 5510 : MVE_qDest_rSrc<iname, suffix, "", pattern> { 5511 5512 let Inst{28} = bit_28; 5513 let Inst{21-20} = bits_21_20; 5514 let Inst{16} = 0b1; 5515 let Inst{12} = 0b0; 5516 let Inst{8} = 0b0; 5517 let Inst{5} = 0b1; 5518 let validForTailPredication = 1; 5519} 5520 5521multiclass MVE_VxxMUL_qr_m<string iname, MVEVectorVTInfo VTI, bit bit_28, 5522 PatFrag Op, Intrinsic int_unpred, Intrinsic int_pred> { 5523 def "" : MVE_VxxMUL_qr<iname, VTI.Suffix, bit_28, VTI.Size>; 5524 5525 let Predicates = [HasMVEInt] in { 5526 defm : MVE_TwoOpPatternDup<VTI, Op, int_pred, (? ), !cast<Instruction>(NAME)>; 5527 } 5528 defm : MVE_vec_scalar_int_pat_m<!cast<Instruction>(NAME), VTI, int_unpred, int_pred>; 5529} 5530 5531multiclass MVE_VQDMULH_qr_m<MVEVectorVTInfo VTI> : 5532 MVE_VxxMUL_qr_m<"vqdmulh", VTI, 0b0, MVEvqdmulh, 5533 int_arm_mve_vqdmulh, int_arm_mve_qdmulh_predicated>; 5534 5535multiclass MVE_VQRDMULH_qr_m<MVEVectorVTInfo VTI> : 5536 MVE_VxxMUL_qr_m<"vqrdmulh", VTI, 0b1, null_frag, 5537 int_arm_mve_vqrdmulh, int_arm_mve_qrdmulh_predicated>; 5538 5539defm MVE_VQDMULH_qr_s8 : MVE_VQDMULH_qr_m<MVE_v16s8>; 5540defm MVE_VQDMULH_qr_s16 : MVE_VQDMULH_qr_m<MVE_v8s16>; 5541defm MVE_VQDMULH_qr_s32 : MVE_VQDMULH_qr_m<MVE_v4s32>; 5542 5543defm MVE_VQRDMULH_qr_s8 : MVE_VQRDMULH_qr_m<MVE_v16s8>; 5544defm MVE_VQRDMULH_qr_s16 : MVE_VQRDMULH_qr_m<MVE_v8s16>; 5545defm MVE_VQRDMULH_qr_s32 : MVE_VQRDMULH_qr_m<MVE_v4s32>; 5546 5547multiclass MVE_VxxMUL_qr_f_m<MVEVectorVTInfo VTI> { 5548 let validForTailPredication = 1 in 5549 def "" : MVE_VxxMUL_qr<"vmul", VTI.Suffix, VTI.Size{0}, 0b11>; 5550 defm : MVE_TwoOpPatternDup<VTI, fmul, int_arm_mve_mul_predicated, (? ), 5551 !cast<Instruction>(NAME)>; 5552} 5553 5554let Predicates = [HasMVEFloat] in { 5555 defm MVE_VMUL_qr_f16 : MVE_VxxMUL_qr_f_m<MVE_v8f16>; 5556 defm MVE_VMUL_qr_f32 : MVE_VxxMUL_qr_f_m<MVE_v4f32>; 5557} 5558 5559class MVE_VFMAMLA_qr<string iname, string suffix, 5560 bit bit_28, bits<2> bits_21_20, bit S, 5561 list<dag> pattern=[]> 5562 : MVE_qDestSrc_rSrc<iname, suffix, pattern> { 5563 5564 let Inst{28} = bit_28; 5565 let Inst{21-20} = bits_21_20; 5566 let Inst{16} = 0b1; 5567 let Inst{12} = S; 5568 let Inst{8} = 0b0; 5569 let Inst{5} = 0b0; 5570 let validForTailPredication = 1; 5571 let hasSideEffects = 0; 5572} 5573 5574multiclass MVE_VMLA_qr_multi<string iname, MVEVectorVTInfo VTI, 5575 bit scalar_addend> { 5576 def "": MVE_VFMAMLA_qr<iname, VTI.Suffix, VTI.Unsigned, VTI.Size, 5577 scalar_addend>; 5578 defvar Inst = !cast<Instruction>(NAME); 5579 defvar pred_int = !cast<Intrinsic>("int_arm_mve_" # iname # "_n_predicated"); 5580 defvar v1 = (VTI.Vec MQPR:$v1); 5581 defvar v2 = (VTI.Vec MQPR:$v2); 5582 defvar vs = (VTI.Vec (ARMvdup rGPR:$s)); 5583 defvar s = (i32 rGPR:$s); 5584 defvar pred = (VTI.Pred VCCR:$pred); 5585 5586 // The signed and unsigned variants of this instruction have different 5587 // encodings, but they're functionally identical. For the sake of 5588 // determinism, we generate only the unsigned variant. 5589 if VTI.Unsigned then let Predicates = [HasMVEInt] in { 5590 if scalar_addend then { 5591 def : Pat<(VTI.Vec (add (mul v1, v2), vs)), 5592 (VTI.Vec (Inst v1, v2, s))>; 5593 } else { 5594 def : Pat<(VTI.Vec (add (mul v2, vs), v1)), 5595 (VTI.Vec (Inst v1, v2, s))>; 5596 } 5597 5598 def : Pat<(VTI.Vec (pred_int v1, v2, s, pred)), 5599 (VTI.Vec (Inst v1, v2, s, ARMVCCThen, pred))>; 5600 } 5601} 5602 5603defm MVE_VMLA_qr_s8 : MVE_VMLA_qr_multi<"vmla", MVE_v16s8, 0b0>; 5604defm MVE_VMLA_qr_s16 : MVE_VMLA_qr_multi<"vmla", MVE_v8s16, 0b0>; 5605defm MVE_VMLA_qr_s32 : MVE_VMLA_qr_multi<"vmla", MVE_v4s32, 0b0>; 5606defm MVE_VMLA_qr_u8 : MVE_VMLA_qr_multi<"vmla", MVE_v16u8, 0b0>; 5607defm MVE_VMLA_qr_u16 : MVE_VMLA_qr_multi<"vmla", MVE_v8u16, 0b0>; 5608defm MVE_VMLA_qr_u32 : MVE_VMLA_qr_multi<"vmla", MVE_v4u32, 0b0>; 5609 5610defm MVE_VMLAS_qr_s8 : MVE_VMLA_qr_multi<"vmlas", MVE_v16s8, 0b1>; 5611defm MVE_VMLAS_qr_s16 : MVE_VMLA_qr_multi<"vmlas", MVE_v8s16, 0b1>; 5612defm MVE_VMLAS_qr_s32 : MVE_VMLA_qr_multi<"vmlas", MVE_v4s32, 0b1>; 5613defm MVE_VMLAS_qr_u8 : MVE_VMLA_qr_multi<"vmlas", MVE_v16u8, 0b1>; 5614defm MVE_VMLAS_qr_u16 : MVE_VMLA_qr_multi<"vmlas", MVE_v8u16, 0b1>; 5615defm MVE_VMLAS_qr_u32 : MVE_VMLA_qr_multi<"vmlas", MVE_v4u32, 0b1>; 5616 5617multiclass MVE_VFMA_qr_multi<string iname, MVEVectorVTInfo VTI, 5618 bit scalar_addend> { 5619 def "": MVE_VFMAMLA_qr<iname, VTI.Suffix, VTI.Size{0}, 0b11, scalar_addend>; 5620 defvar Inst = !cast<Instruction>(NAME); 5621 defvar pred_int = int_arm_mve_fma_predicated; 5622 defvar v1 = (VTI.Vec MQPR:$v1); 5623 defvar v2 = (VTI.Vec MQPR:$v2); 5624 defvar vs = (VTI.Vec (ARMvdup (i32 rGPR:$s))); 5625 defvar is = (i32 rGPR:$s); 5626 defvar pred = (VTI.Pred VCCR:$pred); 5627 5628 let Predicates = [HasMVEFloat] in { 5629 if scalar_addend then { 5630 def : Pat<(VTI.Vec (fma v1, v2, vs)), 5631 (VTI.Vec (Inst v1, v2, is))>; 5632 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred), 5633 (VTI.Vec (fma v1, v2, vs)), 5634 v1)), 5635 (VTI.Vec (Inst v1, v2, is, ARMVCCThen, $pred))>; 5636 def : Pat<(VTI.Vec (pred_int v1, v2, vs, pred)), 5637 (VTI.Vec (Inst v1, v2, is, ARMVCCThen, pred))>; 5638 } else { 5639 def : Pat<(VTI.Vec (fma v1, vs, v2)), 5640 (VTI.Vec (Inst v2, v1, is))>; 5641 def : Pat<(VTI.Vec (fma vs, v1, v2)), 5642 (VTI.Vec (Inst v2, v1, is))>; 5643 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred), 5644 (VTI.Vec (fma vs, v2, v1)), 5645 v1)), 5646 (VTI.Vec (Inst v1, v2, is, ARMVCCThen, $pred))>; 5647 def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred), 5648 (VTI.Vec (fma v2, vs, v1)), 5649 v1)), 5650 (VTI.Vec (Inst v1, v2, is, ARMVCCThen, $pred))>; 5651 def : Pat<(VTI.Vec (pred_int v1, vs, v2, pred)), 5652 (VTI.Vec (Inst v2, v1, is, ARMVCCThen, pred))>; 5653 def : Pat<(VTI.Vec (pred_int vs, v1, v2, pred)), 5654 (VTI.Vec (Inst v2, v1, is, ARMVCCThen, pred))>; 5655 } 5656 } 5657} 5658 5659let Predicates = [HasMVEFloat] in { 5660 defm MVE_VFMA_qr_f16 : MVE_VFMA_qr_multi<"vfma", MVE_v8f16, 0>; 5661 defm MVE_VFMA_qr_f32 : MVE_VFMA_qr_multi<"vfma", MVE_v4f32, 0>; 5662 defm MVE_VFMA_qr_Sf16 : MVE_VFMA_qr_multi<"vfmas", MVE_v8f16, 1>; 5663 defm MVE_VFMA_qr_Sf32 : MVE_VFMA_qr_multi<"vfmas", MVE_v4f32, 1>; 5664} 5665 5666class MVE_VQDMLAH_qr<string iname, string suffix, bit U, bits<2> size, 5667 bit bit_5, bit bit_12, list<dag> pattern=[]> 5668 : MVE_qDestSrc_rSrc<iname, suffix, pattern> { 5669 5670 let Inst{28} = U; 5671 let Inst{21-20} = size; 5672 let Inst{16} = 0b0; 5673 let Inst{12} = bit_12; 5674 let Inst{8} = 0b0; 5675 let Inst{5} = bit_5; 5676} 5677 5678multiclass MVE_VQDMLAH_qr_multi<string iname, MVEVectorVTInfo VTI, 5679 bit bit_5, bit bit_12> { 5680 def "": MVE_VQDMLAH_qr<iname, VTI.Suffix, 0b0, VTI.Size, bit_5, bit_12>; 5681 defvar Inst = !cast<Instruction>(NAME); 5682 defvar unpred_int = !cast<Intrinsic>("int_arm_mve_" # iname); 5683 defvar pred_int = !cast<Intrinsic>("int_arm_mve_" # iname # "_predicated"); 5684 5685 let Predicates = [HasMVEInt] in { 5686 def : Pat<(VTI.Vec (unpred_int (VTI.Vec MQPR:$v1), (VTI.Vec MQPR:$v2), 5687 (i32 rGPR:$s))), 5688 (VTI.Vec (Inst (VTI.Vec MQPR:$v1), (VTI.Vec MQPR:$v2), 5689 (i32 rGPR:$s)))>; 5690 def : Pat<(VTI.Vec (pred_int (VTI.Vec MQPR:$v1), (VTI.Vec MQPR:$v2), 5691 (i32 rGPR:$s), (VTI.Pred VCCR:$pred))), 5692 (VTI.Vec (Inst (VTI.Vec MQPR:$v1), (VTI.Vec MQPR:$v2), 5693 (i32 rGPR:$s), ARMVCCThen, 5694 (VTI.Pred VCCR:$pred)))>; 5695 } 5696} 5697 5698multiclass MVE_VQDMLAH_qr_types<string iname, bit bit_5, bit bit_12> { 5699 defm s8 : MVE_VQDMLAH_qr_multi<iname, MVE_v16s8, bit_5, bit_12>; 5700 defm s16 : MVE_VQDMLAH_qr_multi<iname, MVE_v8s16, bit_5, bit_12>; 5701 defm s32 : MVE_VQDMLAH_qr_multi<iname, MVE_v4s32, bit_5, bit_12>; 5702} 5703 5704defm MVE_VQDMLAH_qr : MVE_VQDMLAH_qr_types<"vqdmlah", 0b1, 0b0>; 5705defm MVE_VQRDMLAH_qr : MVE_VQDMLAH_qr_types<"vqrdmlah", 0b0, 0b0>; 5706defm MVE_VQDMLASH_qr : MVE_VQDMLAH_qr_types<"vqdmlash", 0b1, 0b1>; 5707defm MVE_VQRDMLASH_qr : MVE_VQDMLAH_qr_types<"vqrdmlash", 0b0, 0b1>; 5708 5709class MVE_VxDUP<string iname, string suffix, bits<2> size, bit bit_12, 5710 ValueType VT, SDPatternOperator vxdup> 5711 : MVE_p<(outs MQPR:$Qd, tGPREven:$Rn), 5712 (ins tGPREven:$Rn_src, MVE_VIDUP_imm:$imm), NoItinerary, 5713 iname, suffix, "$Qd, $Rn, $imm", vpred_r, "$Rn = $Rn_src", 5714 [(set (VT MQPR:$Qd), (i32 tGPREven:$Rn), 5715 (vxdup (i32 tGPREven:$Rn_src), (i32 imm:$imm)))]> { 5716 bits<4> Qd; 5717 bits<4> Rn; 5718 bits<2> imm; 5719 5720 let Inst{28} = 0b0; 5721 let Inst{25-23} = 0b100; 5722 let Inst{22} = Qd{3}; 5723 let Inst{21-20} = size; 5724 let Inst{19-17} = Rn{3-1}; 5725 let Inst{16} = 0b1; 5726 let Inst{15-13} = Qd{2-0}; 5727 let Inst{12} = bit_12; 5728 let Inst{11-8} = 0b1111; 5729 let Inst{7} = imm{1}; 5730 let Inst{6-1} = 0b110111; 5731 let Inst{0} = imm{0}; 5732 let validForTailPredication = 1; 5733 let hasSideEffects = 0; 5734} 5735 5736def MVE_VIDUPu8 : MVE_VxDUP<"vidup", "u8", 0b00, 0b0, v16i8, ARMvidup>; 5737def MVE_VIDUPu16 : MVE_VxDUP<"vidup", "u16", 0b01, 0b0, v8i16, ARMvidup>; 5738def MVE_VIDUPu32 : MVE_VxDUP<"vidup", "u32", 0b10, 0b0, v4i32, ARMvidup>; 5739 5740def MVE_VDDUPu8 : MVE_VxDUP<"vddup", "u8", 0b00, 0b1, v16i8, null_frag>; 5741def MVE_VDDUPu16 : MVE_VxDUP<"vddup", "u16", 0b01, 0b1, v8i16, null_frag>; 5742def MVE_VDDUPu32 : MVE_VxDUP<"vddup", "u32", 0b10, 0b1, v4i32, null_frag>; 5743 5744class MVE_VxWDUP<string iname, string suffix, bits<2> size, bit bit_12, 5745 list<dag> pattern=[]> 5746 : MVE_p<(outs MQPR:$Qd, tGPREven:$Rn), 5747 (ins tGPREven:$Rn_src, tGPROdd:$Rm, MVE_VIDUP_imm:$imm), NoItinerary, 5748 iname, suffix, "$Qd, $Rn, $Rm, $imm", vpred_r, "$Rn = $Rn_src", 5749 pattern> { 5750 bits<4> Qd; 5751 bits<4> Rm; 5752 bits<4> Rn; 5753 bits<2> imm; 5754 5755 let Inst{28} = 0b0; 5756 let Inst{25-23} = 0b100; 5757 let Inst{22} = Qd{3}; 5758 let Inst{21-20} = size; 5759 let Inst{19-17} = Rn{3-1}; 5760 let Inst{16} = 0b1; 5761 let Inst{15-13} = Qd{2-0}; 5762 let Inst{12} = bit_12; 5763 let Inst{11-8} = 0b1111; 5764 let Inst{7} = imm{1}; 5765 let Inst{6-4} = 0b110; 5766 let Inst{3-1} = Rm{3-1}; 5767 let Inst{0} = imm{0}; 5768 let validForTailPredication = 1; 5769 let hasSideEffects = 0; 5770} 5771 5772def MVE_VIWDUPu8 : MVE_VxWDUP<"viwdup", "u8", 0b00, 0b0>; 5773def MVE_VIWDUPu16 : MVE_VxWDUP<"viwdup", "u16", 0b01, 0b0>; 5774def MVE_VIWDUPu32 : MVE_VxWDUP<"viwdup", "u32", 0b10, 0b0>; 5775 5776def MVE_VDWDUPu8 : MVE_VxWDUP<"vdwdup", "u8", 0b00, 0b1>; 5777def MVE_VDWDUPu16 : MVE_VxWDUP<"vdwdup", "u16", 0b01, 0b1>; 5778def MVE_VDWDUPu32 : MVE_VxWDUP<"vdwdup", "u32", 0b10, 0b1>; 5779 5780let isReMaterializable = 1 in 5781class MVE_VCTPInst<string suffix, bits<2> size, list<dag> pattern=[]> 5782 : MVE_p<(outs VCCR:$P0), (ins rGPR:$Rn), NoItinerary, "vctp", suffix, 5783 "$Rn", vpred_n, "", pattern> { 5784 bits<4> Rn; 5785 5786 let Inst{28-27} = 0b10; 5787 let Inst{26-22} = 0b00000; 5788 let Inst{21-20} = size; 5789 let Inst{19-16} = Rn{3-0}; 5790 let Inst{15-11} = 0b11101; 5791 let Inst{10-0} = 0b00000000001; 5792 let Unpredictable{10-0} = 0b11111111111; 5793 5794 let Constraints = ""; 5795 let DecoderMethod = "DecodeMveVCTP"; 5796 let validForTailPredication = 1; 5797} 5798 5799multiclass MVE_VCTP<MVEVectorVTInfo VTI, Intrinsic intr> { 5800 def "": MVE_VCTPInst<VTI.BitsSuffix, VTI.Size>; 5801 defvar Inst = !cast<Instruction>(NAME); 5802 5803 let Predicates = [HasMVEInt] in { 5804 def : Pat<(intr rGPR:$Rn), 5805 (VTI.Pred (Inst rGPR:$Rn))>; 5806 def : Pat<(and (intr rGPR:$Rn), (VTI.Pred VCCR:$mask)), 5807 (VTI.Pred (Inst rGPR:$Rn, ARMVCCThen, VCCR:$mask))>; 5808 } 5809} 5810 5811defm MVE_VCTP8 : MVE_VCTP<MVE_v16i8, int_arm_mve_vctp8>; 5812defm MVE_VCTP16 : MVE_VCTP<MVE_v8i16, int_arm_mve_vctp16>; 5813defm MVE_VCTP32 : MVE_VCTP<MVE_v4i32, int_arm_mve_vctp32>; 5814defm MVE_VCTP64 : MVE_VCTP<MVE_v2i64, int_arm_mve_vctp64>; 5815 5816// end of mve_qDest_rSrc 5817 5818// start of coproc mov 5819 5820class MVE_VMOV_64bit<dag oops, dag iops, bit to_qreg, string ops, string cstr> 5821 : MVE_VMOV_lane_base<oops, !con(iops, (ins MVEPairVectorIndex2:$idx, 5822 MVEPairVectorIndex0:$idx2)), 5823 NoItinerary, "vmov", "", ops, cstr, []> { 5824 bits<5> Rt; 5825 bits<5> Rt2; 5826 bits<4> Qd; 5827 bit idx; 5828 bit idx2; 5829 5830 let Inst{31-23} = 0b111011000; 5831 let Inst{22} = Qd{3}; 5832 let Inst{21} = 0b0; 5833 let Inst{20} = to_qreg; 5834 let Inst{19-16} = Rt2{3-0}; 5835 let Inst{15-13} = Qd{2-0}; 5836 let Inst{12-5} = 0b01111000; 5837 let Inst{4} = idx2; 5838 let Inst{3-0} = Rt{3-0}; 5839 5840 let hasSideEffects = 0; 5841} 5842 5843// The assembly syntax for these instructions mentions the vector 5844// register name twice, e.g. 5845// 5846// vmov q2[2], q2[0], r0, r1 5847// vmov r0, r1, q2[2], q2[0] 5848// 5849// which needs a bit of juggling with MC operand handling. 5850// 5851// For the move _into_ a vector register, the MC operand list also has 5852// to mention the register name twice: once as the output, and once as 5853// an extra input to represent where the unchanged half of the output 5854// register comes from (when this instruction is used in code 5855// generation). So we arrange that the first mention of the vector reg 5856// in the instruction is considered by the AsmMatcher to be the output 5857// ($Qd), and the second one is the input ($QdSrc). Binding them 5858// together with the existing 'tie' constraint is enough to enforce at 5859// register allocation time that they have to be the same register. 5860// 5861// For the move _from_ a vector register, there's no way to get round 5862// the fact that both instances of that register name have to be 5863// inputs. They have to be the same register again, but this time, we 5864// can't use a tie constraint, because that has to be between an 5865// output and an input operand. So this time, we have to arrange that 5866// the q-reg appears just once in the MC operand list, in spite of 5867// being mentioned twice in the asm syntax - which needs a custom 5868// AsmMatchConverter. 5869 5870def MVE_VMOV_q_rr : MVE_VMOV_64bit<(outs MQPR:$Qd), 5871 (ins MQPR:$QdSrc, rGPR:$Rt, rGPR:$Rt2), 5872 0b1, "$Qd$idx, $QdSrc$idx2, $Rt, $Rt2", 5873 "$Qd = $QdSrc"> { 5874 let DecoderMethod = "DecodeMVEVMOVDRegtoQ"; 5875} 5876 5877def MVE_VMOV_rr_q : MVE_VMOV_64bit<(outs rGPR:$Rt, rGPR:$Rt2), (ins MQPR:$Qd), 5878 0b0, "$Rt, $Rt2, $Qd$idx, $Qd$idx2", ""> { 5879 let DecoderMethod = "DecodeMVEVMOVQtoDReg"; 5880 let AsmMatchConverter = "cvtMVEVMOVQtoDReg"; 5881} 5882 5883let Predicates = [HasMVEInt] in { 5884 // Double lane moves. There are a number of patterns here. We know that the 5885 // insertelt's will be in descending order by index, and need to match the 5 5886 // patterns that might contain 2-0 or 3-1 pairs. These are: 5887 // 3 2 1 0 -> vmovqrr 31; vmovqrr 20 5888 // 3 2 1 -> vmovqrr 31; vmov 2 5889 // 3 1 -> vmovqrr 31 5890 // 2 1 0 -> vmovqrr 20; vmov 1 5891 // 2 0 -> vmovqrr 20 5892 // The other potential patterns will be handled by single lane inserts. 5893 def : Pat<(insertelt (insertelt (insertelt (insertelt (v4i32 MQPR:$src1), 5894 rGPR:$srcA, (i32 0)), 5895 rGPR:$srcB, (i32 1)), 5896 rGPR:$srcC, (i32 2)), 5897 rGPR:$srcD, (i32 3)), 5898 (MVE_VMOV_q_rr (MVE_VMOV_q_rr MQPR:$src1, rGPR:$srcA, rGPR:$srcC, (i32 2), (i32 0)), 5899 rGPR:$srcB, rGPR:$srcD, (i32 3), (i32 1))>; 5900 def : Pat<(insertelt (insertelt (insertelt (v4i32 MQPR:$src1), 5901 rGPR:$srcB, (i32 1)), 5902 rGPR:$srcC, (i32 2)), 5903 rGPR:$srcD, (i32 3)), 5904 (MVE_VMOV_q_rr (MVE_VMOV_to_lane_32 MQPR:$src1, rGPR:$srcC, (i32 2)), 5905 rGPR:$srcB, rGPR:$srcD, (i32 3), (i32 1))>; 5906 def : Pat<(insertelt (insertelt (v4i32 MQPR:$src1), rGPR:$srcA, (i32 1)), rGPR:$srcB, (i32 3)), 5907 (MVE_VMOV_q_rr MQPR:$src1, rGPR:$srcA, rGPR:$srcB, (i32 3), (i32 1))>; 5908 def : Pat<(insertelt (insertelt (insertelt (v4i32 MQPR:$src1), 5909 rGPR:$srcB, (i32 0)), 5910 rGPR:$srcC, (i32 1)), 5911 rGPR:$srcD, (i32 2)), 5912 (MVE_VMOV_q_rr (MVE_VMOV_to_lane_32 MQPR:$src1, rGPR:$srcC, (i32 1)), 5913 rGPR:$srcB, rGPR:$srcD, (i32 2), (i32 0))>; 5914 def : Pat<(insertelt (insertelt (v4i32 MQPR:$src1), rGPR:$srcA, (i32 0)), rGPR:$srcB, (i32 2)), 5915 (MVE_VMOV_q_rr MQPR:$src1, rGPR:$srcA, rGPR:$srcB, (i32 2), (i32 0))>; 5916} 5917 5918// end of coproc mov 5919 5920// start of MVE interleaving load/store 5921 5922// Base class for the family of interleaving/deinterleaving 5923// load/stores with names like VLD20.8 and VST43.32. 5924class MVE_vldst24_base<bit writeback, bit fourregs, bits<2> stage, bits<2> size, 5925 bit load, dag Oops, dag loadIops, dag wbIops, 5926 string iname, string ops, 5927 string cstr, list<dag> pattern=[]> 5928 : MVE_MI<Oops, !con(loadIops, wbIops), NoItinerary, iname, ops, cstr, pattern> { 5929 bits<4> VQd; 5930 bits<4> Rn; 5931 5932 let Inst{31-22} = 0b1111110010; 5933 let Inst{21} = writeback; 5934 let Inst{20} = load; 5935 let Inst{19-16} = Rn; 5936 let Inst{15-13} = VQd{2-0}; 5937 let Inst{12-9} = 0b1111; 5938 let Inst{8-7} = size; 5939 let Inst{6-5} = stage; 5940 let Inst{4-1} = 0b0000; 5941 let Inst{0} = fourregs; 5942 5943 let mayLoad = load; 5944 let mayStore = !eq(load,0); 5945 let hasSideEffects = 0; 5946 let validForTailPredication = load; 5947} 5948 5949// A parameter class used to encapsulate all the ways the writeback 5950// variants of VLD20 and friends differ from the non-writeback ones. 5951class MVE_vldst24_writeback<bit b, dag Oo, dag Io, 5952 string sy="", string c="", string n=""> { 5953 bit writeback = b; 5954 dag Oops = Oo; 5955 dag Iops = Io; 5956 string syntax = sy; 5957 string cstr = c; 5958 string id_suffix = n; 5959} 5960 5961// Another parameter class that encapsulates the differences between VLD2x 5962// and VLD4x. 5963class MVE_vldst24_nvecs<int n, list<int> s, bit b, RegisterOperand vl> { 5964 int nvecs = n; 5965 list<int> stages = s; 5966 bit bit0 = b; 5967 RegisterOperand VecList = vl; 5968} 5969 5970// A third parameter class that distinguishes VLDnn.8 from .16 from .32. 5971class MVE_vldst24_lanesize<int i, bits<2> b> { 5972 int lanesize = i; 5973 bits<2> sizebits = b; 5974} 5975 5976// A base class for each direction of transfer: one for load, one for 5977// store. I can't make these a fourth independent parametric tuple 5978// class, because they have to take the nvecs tuple class as a 5979// parameter, in order to find the right VecList operand type. 5980 5981class MVE_vld24_base<MVE_vldst24_nvecs n, bits<2> pat, bits<2> size, 5982 MVE_vldst24_writeback wb, string iname, 5983 list<dag> pattern=[]> 5984 : MVE_vldst24_base<wb.writeback, n.bit0, pat, size, 1, 5985 !con((outs n.VecList:$VQd), wb.Oops), 5986 (ins n.VecList:$VQdSrc), wb.Iops, 5987 iname, "$VQd, $Rn" # wb.syntax, 5988 wb.cstr # ",$VQdSrc = $VQd", pattern>; 5989 5990class MVE_vst24_base<MVE_vldst24_nvecs n, bits<2> pat, bits<2> size, 5991 MVE_vldst24_writeback wb, string iname, 5992 list<dag> pattern=[]> 5993 : MVE_vldst24_base<wb.writeback, n.bit0, pat, size, 0, 5994 wb.Oops, (ins n.VecList:$VQd), wb.Iops, 5995 iname, "$VQd, $Rn" # wb.syntax, 5996 wb.cstr, pattern>; 5997 5998// Actually define all the interleaving loads and stores, by a series 5999// of nested foreaches over number of vectors (VLD2/VLD4); stage 6000// within one of those series (VLDx0/VLDx1/VLDx2/VLDx3); size of 6001// vector lane; writeback or no writeback. 6002foreach n = [MVE_vldst24_nvecs<2, [0,1], 0, VecList2Q>, 6003 MVE_vldst24_nvecs<4, [0,1,2,3], 1, VecList4Q>] in 6004foreach stage = n.stages in 6005foreach s = [MVE_vldst24_lanesize< 8, 0b00>, 6006 MVE_vldst24_lanesize<16, 0b01>, 6007 MVE_vldst24_lanesize<32, 0b10>] in 6008foreach wb = [MVE_vldst24_writeback< 6009 1, (outs rGPR:$wb), (ins t2_nosp_addr_offset_none:$Rn), 6010 "!", "$Rn.base = $wb", "_wb">, 6011 MVE_vldst24_writeback<0, (outs), (ins t2_addr_offset_none:$Rn)>] in { 6012 6013 // For each case within all of those foreaches, define the actual 6014 // instructions. The def names are made by gluing together pieces 6015 // from all the parameter classes, and will end up being things like 6016 // MVE_VLD20_8 and MVE_VST43_16_wb. 6017 6018 def "MVE_VLD" # n.nvecs # stage # "_" # s.lanesize # wb.id_suffix 6019 : MVE_vld24_base<n, stage, s.sizebits, wb, 6020 "vld" # n.nvecs # stage # "." # s.lanesize>; 6021 6022 def "MVE_VST" # n.nvecs # stage # "_" # s.lanesize # wb.id_suffix 6023 : MVE_vst24_base<n, stage, s.sizebits, wb, 6024 "vst" # n.nvecs # stage # "." # s.lanesize>; 6025} 6026 6027def SDTARMVST2 : SDTypeProfile<1, 5, [SDTCisPtrTy<0>, SDTCisPtrTy<1>, SDTCisVT<2, i32>, SDTCisVec<3>, 6028 SDTCisSameAs<3, 4>, SDTCisVT<5, i32>]>; 6029def SDTARMVST4 : SDTypeProfile<1, 7, [SDTCisPtrTy<0>, SDTCisPtrTy<1>, SDTCisVT<2, i32>, SDTCisVec<3>, 6030 SDTCisSameAs<3, 4>, SDTCisSameAs<3, 5>, 6031 SDTCisSameAs<3, 6>, SDTCisVT<7, i32>]>; 6032def MVEVST2UPD : SDNode<"ARMISD::VST2_UPD", SDTARMVST2, [SDNPHasChain, SDNPMemOperand]>; 6033def MVEVST4UPD : SDNode<"ARMISD::VST4_UPD", SDTARMVST4, [SDNPHasChain, SDNPMemOperand]>; 6034 6035multiclass MVE_vst24_patterns<int lanesize, ValueType VT> { 6036 foreach stage = [0,1] in 6037 def : Pat<(int_arm_mve_vst2q i32:$addr, 6038 (VT MQPR:$v0), (VT MQPR:$v1), (i32 stage)), 6039 (!cast<Instruction>("MVE_VST2"#stage#"_"#lanesize) 6040 (REG_SEQUENCE QQPR, VT:$v0, qsub_0, VT:$v1, qsub_1), 6041 t2_addr_offset_none:$addr)>; 6042 foreach stage = [0,1] in 6043 def : Pat<(i32 (MVEVST2UPD i32:$addr, (i32 32), 6044 (VT MQPR:$v0), (VT MQPR:$v1), (i32 stage))), 6045 (i32 (!cast<Instruction>("MVE_VST2"#stage#"_"#lanesize#_wb) 6046 (REG_SEQUENCE QQPR, VT:$v0, qsub_0, VT:$v1, qsub_1), 6047 t2_addr_offset_none:$addr))>; 6048 6049 foreach stage = [0,1,2,3] in 6050 def : Pat<(int_arm_mve_vst4q i32:$addr, 6051 (VT MQPR:$v0), (VT MQPR:$v1), 6052 (VT MQPR:$v2), (VT MQPR:$v3), (i32 stage)), 6053 (!cast<Instruction>("MVE_VST4"#stage#"_"#lanesize) 6054 (REG_SEQUENCE QQQQPR, VT:$v0, qsub_0, VT:$v1, qsub_1, 6055 VT:$v2, qsub_2, VT:$v3, qsub_3), 6056 t2_addr_offset_none:$addr)>; 6057 foreach stage = [0,1,2,3] in 6058 def : Pat<(i32 (MVEVST4UPD i32:$addr, (i32 64), 6059 (VT MQPR:$v0), (VT MQPR:$v1), 6060 (VT MQPR:$v2), (VT MQPR:$v3), (i32 stage))), 6061 (i32 (!cast<Instruction>("MVE_VST4"#stage#"_"#lanesize#_wb) 6062 (REG_SEQUENCE QQQQPR, VT:$v0, qsub_0, VT:$v1, qsub_1, 6063 VT:$v2, qsub_2, VT:$v3, qsub_3), 6064 t2_addr_offset_none:$addr))>; 6065} 6066defm : MVE_vst24_patterns<8, v16i8>; 6067defm : MVE_vst24_patterns<16, v8i16>; 6068defm : MVE_vst24_patterns<32, v4i32>; 6069defm : MVE_vst24_patterns<16, v8f16>; 6070defm : MVE_vst24_patterns<32, v4f32>; 6071 6072// end of MVE interleaving load/store 6073 6074// start of MVE predicable load/store 6075 6076// A parameter class for the direction of transfer. 6077class MVE_ldst_direction<bit b, dag Oo, dag Io, string c=""> { 6078 bit load = b; 6079 dag Oops = Oo; 6080 dag Iops = Io; 6081 string cstr = c; 6082} 6083def MVE_ld: MVE_ldst_direction<1, (outs MQPR:$Qd), (ins), ",@earlyclobber $Qd">; 6084def MVE_st: MVE_ldst_direction<0, (outs), (ins MQPR:$Qd)>; 6085 6086// A parameter class for the size of memory access in a load. 6087class MVE_memsz<bits<2> e, int s, AddrMode m, string mn, list<string> types> { 6088 bits<2> encoding = e; // opcode bit(s) for encoding 6089 int shift = s; // shift applied to immediate load offset 6090 AddrMode AM = m; 6091 6092 // For instruction aliases: define the complete list of type 6093 // suffixes at this size, and the canonical ones for loads and 6094 // stores. 6095 string MnemonicLetter = mn; 6096 int TypeBits = !shl(8, s); 6097 string CanonLoadSuffix = ".u" # TypeBits; 6098 string CanonStoreSuffix = "." # TypeBits; 6099 list<string> suffixes = !foreach(letter, types, "." # letter # TypeBits); 6100} 6101 6102// Instances of MVE_memsz. 6103// 6104// (memD doesn't need an AddrMode, because those are only for 6105// contiguous loads, and memD is only used by gather/scatters.) 6106def MVE_memB: MVE_memsz<0b00, 0, AddrModeT2_i7, "b", ["", "u", "s"]>; 6107def MVE_memH: MVE_memsz<0b01, 1, AddrModeT2_i7s2, "h", ["", "u", "s", "f"]>; 6108def MVE_memW: MVE_memsz<0b10, 2, AddrModeT2_i7s4, "w", ["", "u", "s", "f"]>; 6109def MVE_memD: MVE_memsz<0b11, 3, ?, "d", ["", "u", "s", "f"]>; 6110 6111// This is the base class for all the MVE loads and stores other than 6112// the interleaving ones. All the non-interleaving loads/stores share 6113// the characteristic that they operate on just one vector register, 6114// so they are VPT-predicable. 6115// 6116// The predication operand is vpred_n, for both loads and stores. For 6117// store instructions, the reason is obvious: if there is no output 6118// register, there can't be a need for an input parameter giving the 6119// output register's previous value. Load instructions also don't need 6120// that input parameter, because unlike MVE data processing 6121// instructions, predicated loads are defined to set the inactive 6122// lanes of the output register to zero, instead of preserving their 6123// input values. 6124class MVE_VLDRSTR_base<MVE_ldst_direction dir, bit U, bit P, bit W, bit opc, 6125 dag oops, dag iops, string asm, string suffix, 6126 string ops, string cstr, list<dag> pattern=[]> 6127 : MVE_p<oops, iops, NoItinerary, asm, suffix, ops, vpred_n, cstr, pattern> { 6128 bits<3> Qd; 6129 6130 let Inst{28} = U; 6131 let Inst{25} = 0b0; 6132 let Inst{24} = P; 6133 let Inst{22} = 0b0; 6134 let Inst{21} = W; 6135 let Inst{20} = dir.load; 6136 let Inst{15-13} = Qd{2-0}; 6137 let Inst{12} = opc; 6138 let Inst{11-9} = 0b111; 6139 6140 let mayLoad = dir.load; 6141 let mayStore = !eq(dir.load,0); 6142 let hasSideEffects = 0; 6143 let validForTailPredication = 1; 6144} 6145 6146// Contiguous load and store instructions. These come in two main 6147// categories: same-size loads/stores in which 128 bits of vector 6148// register is transferred to or from 128 bits of memory in the most 6149// obvious way, and widening loads / narrowing stores, in which the 6150// size of memory accessed is less than the size of a vector register, 6151// so the load instructions sign- or zero-extend each memory value 6152// into a wider vector lane, and the store instructions truncate 6153// correspondingly. 6154// 6155// The instruction mnemonics for these two classes look reasonably 6156// similar, but the actual encodings are different enough to need two 6157// separate base classes. 6158 6159// Contiguous, same size 6160class MVE_VLDRSTR_cs<MVE_ldst_direction dir, MVE_memsz memsz, bit P, bit W, 6161 dag oops, dag iops, string asm, string suffix, 6162 IndexMode im, string ops, string cstr> 6163 : MVE_VLDRSTR_base<dir, 0, P, W, 1, oops, iops, asm, suffix, ops, cstr> { 6164 bits<12> addr; 6165 let Inst{23} = addr{7}; 6166 let Inst{19-16} = addr{11-8}; 6167 let Inst{8-7} = memsz.encoding; 6168 let Inst{6-0} = addr{6-0}; 6169} 6170 6171// Contiguous, widening/narrowing 6172class MVE_VLDRSTR_cw<MVE_ldst_direction dir, MVE_memsz memsz, bit U, 6173 bit P, bit W, bits<2> size, dag oops, dag iops, 6174 string asm, string suffix, IndexMode im, 6175 string ops, string cstr> 6176 : MVE_VLDRSTR_base<dir, U, P, W, 0, oops, iops, asm, suffix, ops, cstr> { 6177 bits<11> addr; 6178 let Inst{23} = addr{7}; 6179 let Inst{19} = memsz.encoding{0}; // enough to tell 16- from 32-bit 6180 let Inst{18-16} = addr{10-8}; 6181 let Inst{8-7} = size; 6182 let Inst{6-0} = addr{6-0}; 6183 6184 let IM = im; 6185} 6186 6187// Multiclass wrapper on each of the _cw and _cs base classes, to 6188// generate three writeback modes (none, preindex, postindex). 6189 6190multiclass MVE_VLDRSTR_cw_m<MVE_ldst_direction dir, MVE_memsz memsz, 6191 string asm, string suffix, bit U, bits<2> size> { 6192 let AM = memsz.AM in { 6193 def "" : MVE_VLDRSTR_cw< 6194 dir, memsz, U, 1, 0, size, 6195 dir.Oops, !con(dir.Iops, (ins taddrmode_imm7<memsz.shift>:$addr)), 6196 asm, suffix, IndexModeNone, "$Qd, $addr", "">; 6197 6198 def _pre : MVE_VLDRSTR_cw< 6199 dir, memsz, U, 1, 1, size, 6200 !con((outs tGPR:$wb), dir.Oops), 6201 !con(dir.Iops, (ins taddrmode_imm7<memsz.shift>:$addr)), 6202 asm, suffix, IndexModePre, "$Qd, $addr!", "$addr.base = $wb"> { 6203 let DecoderMethod = "DecodeMVE_MEM_1_pre<"#memsz.shift#">"; 6204 } 6205 6206 def _post : MVE_VLDRSTR_cw< 6207 dir, memsz, U, 0, 1, size, 6208 !con((outs tGPR:$wb), dir.Oops), 6209 !con(dir.Iops, (ins t_addr_offset_none:$Rn, 6210 t2am_imm7_offset<memsz.shift>:$addr)), 6211 asm, suffix, IndexModePost, "$Qd, $Rn$addr", "$Rn.base = $wb"> { 6212 bits<4> Rn; 6213 let Inst{18-16} = Rn{2-0}; 6214 } 6215 } 6216} 6217 6218multiclass MVE_VLDRSTR_cs_m<MVE_ldst_direction dir, MVE_memsz memsz, 6219 string asm, string suffix> { 6220 let AM = memsz.AM in { 6221 def "" : MVE_VLDRSTR_cs< 6222 dir, memsz, 1, 0, 6223 dir.Oops, !con(dir.Iops, (ins t2addrmode_imm7<memsz.shift>:$addr)), 6224 asm, suffix, IndexModeNone, "$Qd, $addr", "">; 6225 6226 def _pre : MVE_VLDRSTR_cs< 6227 dir, memsz, 1, 1, 6228 !con((outs rGPR:$wb), dir.Oops), 6229 !con(dir.Iops, (ins t2addrmode_imm7_pre<memsz.shift>:$addr)), 6230 asm, suffix, IndexModePre, "$Qd, $addr!", "$addr.base = $wb"> { 6231 let DecoderMethod = "DecodeMVE_MEM_2_pre<"#memsz.shift#">"; 6232 } 6233 6234 def _post : MVE_VLDRSTR_cs< 6235 dir, memsz, 0, 1, 6236 !con((outs rGPR:$wb), dir.Oops), 6237 !con(dir.Iops, (ins t2_nosp_addr_offset_none:$Rn, 6238 t2am_imm7_offset<memsz.shift>:$addr)), 6239 asm, suffix, IndexModePost, "$Qd, $Rn$addr", "$Rn.base = $wb"> { 6240 bits<4> Rn; 6241 let Inst{19-16} = Rn{3-0}; 6242 } 6243 } 6244} 6245 6246// Now actually declare all the contiguous load/stores, via those 6247// multiclasses. The instruction ids coming out of this are the bare 6248// names shown in the defm, with _pre or _post appended for writeback, 6249// e.g. MVE_VLDRBS16, MVE_VSTRB16_pre, MVE_VSTRHU16_post. 6250 6251defm MVE_VLDRBS16: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memB, "vldrb", "s16", 0, 0b01>; 6252defm MVE_VLDRBS32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memB, "vldrb", "s32", 0, 0b10>; 6253defm MVE_VLDRBU16: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memB, "vldrb", "u16", 1, 0b01>; 6254defm MVE_VLDRBU32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memB, "vldrb", "u32", 1, 0b10>; 6255defm MVE_VLDRHS32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memH, "vldrh", "s32", 0, 0b10>; 6256defm MVE_VLDRHU32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memH, "vldrh", "u32", 1, 0b10>; 6257 6258defm MVE_VLDRBU8: MVE_VLDRSTR_cs_m<MVE_ld, MVE_memB, "vldrb", "u8">; 6259defm MVE_VLDRHU16: MVE_VLDRSTR_cs_m<MVE_ld, MVE_memH, "vldrh", "u16">; 6260defm MVE_VLDRWU32: MVE_VLDRSTR_cs_m<MVE_ld, MVE_memW, "vldrw", "u32">; 6261 6262defm MVE_VSTRB16: MVE_VLDRSTR_cw_m<MVE_st, MVE_memB, "vstrb", "16", 0, 0b01>; 6263defm MVE_VSTRB32: MVE_VLDRSTR_cw_m<MVE_st, MVE_memB, "vstrb", "32", 0, 0b10>; 6264defm MVE_VSTRH32: MVE_VLDRSTR_cw_m<MVE_st, MVE_memH, "vstrh", "32", 0, 0b10>; 6265 6266defm MVE_VSTRBU8 : MVE_VLDRSTR_cs_m<MVE_st, MVE_memB, "vstrb", "8">; 6267defm MVE_VSTRHU16: MVE_VLDRSTR_cs_m<MVE_st, MVE_memH, "vstrh", "16">; 6268defm MVE_VSTRWU32: MVE_VLDRSTR_cs_m<MVE_st, MVE_memW, "vstrw", "32">; 6269 6270// Gather loads / scatter stores whose address operand is of the form 6271// [Rn,Qm], i.e. a single GPR as the common base address, plus a 6272// vector of offset from it. ('Load/store this sequence of elements of 6273// the same array.') 6274// 6275// Like the contiguous family, these loads and stores can widen the 6276// loaded values / truncate the stored ones, or they can just 6277// load/store the same size of memory and vector lane. But unlike the 6278// contiguous family, there's no particular difference in encoding 6279// between those two cases. 6280// 6281// This family also comes with the option to scale the offset values 6282// in Qm by the size of the loaded memory (i.e. to treat them as array 6283// indices), or not to scale them (to treat them as plain byte offsets 6284// in memory, so that perhaps the loaded values are unaligned). The 6285// scaled instructions' address operand in assembly looks like 6286// [Rn,Qm,UXTW #2] or similar. 6287 6288// Base class. 6289class MVE_VLDRSTR_rq<MVE_ldst_direction dir, MVE_memsz memsz, bit U, 6290 bits<2> size, bit os, string asm, string suffix, int shift> 6291 : MVE_VLDRSTR_base<dir, U, 0b0, 0b0, 0, dir.Oops, 6292 !con(dir.Iops, (ins mve_addr_rq_shift<shift>:$addr)), 6293 asm, suffix, "$Qd, $addr", dir.cstr> { 6294 bits<7> addr; 6295 let Inst{23} = 0b1; 6296 let Inst{19-16} = addr{6-3}; 6297 let Inst{8-7} = size; 6298 let Inst{6} = memsz.encoding{1}; 6299 let Inst{5} = 0; 6300 let Inst{4} = memsz.encoding{0}; 6301 let Inst{3-1} = addr{2-0}; 6302 let Inst{0} = os; 6303} 6304 6305// Multiclass that defines the scaled and unscaled versions of an 6306// instruction, when the memory size is wider than a byte. The scaled 6307// version gets the default name like MVE_VLDRBU16_rq; the unscaled / 6308// potentially unaligned version gets a "_u" suffix, e.g. 6309// MVE_VLDRBU16_rq_u. 6310multiclass MVE_VLDRSTR_rq_w<MVE_ldst_direction dir, MVE_memsz memsz, 6311 string asm, string suffix, bit U, bits<2> size> { 6312 def _u : MVE_VLDRSTR_rq<dir, memsz, U, size, 0, asm, suffix, 0>; 6313 def "" : MVE_VLDRSTR_rq<dir, memsz, U, size, 1, asm, suffix, memsz.shift>; 6314} 6315 6316// Subclass of MVE_VLDRSTR_rq with the same API as that multiclass, 6317// for use when the memory size is one byte, so there's no 'scaled' 6318// version of the instruction at all. (This is encoded as if it were 6319// unscaled, but named in the default way with no _u suffix.) 6320class MVE_VLDRSTR_rq_b<MVE_ldst_direction dir, MVE_memsz memsz, 6321 string asm, string suffix, bit U, bits<2> size> 6322 : MVE_VLDRSTR_rq<dir, memsz, U, size, 0, asm, suffix, 0>; 6323 6324// Multiclasses wrapping that to add ISel patterns for intrinsics. 6325multiclass MVE_VLDR_rq_w<MVE_memsz memsz, list<MVEVectorVTInfo> VTIs> { 6326 defm "": MVE_VLDRSTR_rq_w<MVE_ld, memsz, "vldr" # memsz.MnemonicLetter, 6327 VTIs[0].Suffix, VTIs[0].Unsigned, VTIs[0].Size>; 6328 defvar Inst = !cast<Instruction>(NAME); 6329 defvar InstU = !cast<Instruction>(NAME # "_u"); 6330 6331 foreach VTI = VTIs in 6332 foreach UnsignedFlag = !if(!eq(VTI.Size, memsz.encoding), 6333 [0,1], [VTI.Unsigned]) in { 6334 def : Pat<(VTI.Vec (int_arm_mve_vldr_gather_offset GPR:$base, (VTIs[0].Vec MQPR:$offsets), memsz.TypeBits, 0, UnsignedFlag)), 6335 (VTI.Vec (InstU GPR:$base, MQPR:$offsets))>; 6336 def : Pat<(VTI.Vec (int_arm_mve_vldr_gather_offset GPR:$base, (VTIs[0].Vec MQPR:$offsets), memsz.TypeBits, memsz.shift, UnsignedFlag)), 6337 (VTI.Vec (Inst GPR:$base, MQPR:$offsets))>; 6338 def : Pat<(VTI.Vec (int_arm_mve_vldr_gather_offset_predicated GPR:$base, (VTIs[0].Vec MQPR:$offsets), memsz.TypeBits, 0, UnsignedFlag, (VTI.Pred VCCR:$pred))), 6339 (VTI.Vec (InstU GPR:$base, MQPR:$offsets, ARMVCCThen, VCCR:$pred))>; 6340 def : Pat<(VTI.Vec (int_arm_mve_vldr_gather_offset_predicated GPR:$base, (VTIs[0].Vec MQPR:$offsets), memsz.TypeBits, memsz.shift, UnsignedFlag, (VTI.Pred VCCR:$pred))), 6341 (VTI.Vec (Inst GPR:$base, MQPR:$offsets, ARMVCCThen, VCCR:$pred))>; 6342 } 6343} 6344multiclass MVE_VLDR_rq_b<list<MVEVectorVTInfo> VTIs> { 6345 def "": MVE_VLDRSTR_rq_b<MVE_ld, MVE_memB, "vldrb", 6346 VTIs[0].Suffix, VTIs[0].Unsigned, VTIs[0].Size>; 6347 defvar Inst = !cast<Instruction>(NAME); 6348 6349 foreach VTI = VTIs in { 6350 def : Pat<(VTI.Vec (int_arm_mve_vldr_gather_offset GPR:$base, (VTIs[0].Vec MQPR:$offsets), 8, 0, VTI.Unsigned)), 6351 (VTI.Vec (Inst GPR:$base, MQPR:$offsets))>; 6352 def : Pat<(VTI.Vec (int_arm_mve_vldr_gather_offset_predicated GPR:$base, (VTIs[0].Vec MQPR:$offsets), 8, 0, VTI.Unsigned, (VTI.Pred VCCR:$pred))), 6353 (VTI.Vec (Inst GPR:$base, MQPR:$offsets, ARMVCCThen, VCCR:$pred))>; 6354 } 6355} 6356multiclass MVE_VSTR_rq_w<MVE_memsz memsz, list<MVEVectorVTInfo> VTIs> { 6357 defm "": MVE_VLDRSTR_rq_w<MVE_st, memsz, "vstr" # memsz.MnemonicLetter, 6358 VTIs[0].BitsSuffix, 0, VTIs[0].Size>; 6359 defvar Inst = !cast<Instruction>(NAME); 6360 defvar InstU = !cast<Instruction>(NAME # "_u"); 6361 6362 foreach VTI = VTIs in { 6363 def : Pat<(int_arm_mve_vstr_scatter_offset GPR:$base, (VTIs[0].Vec MQPR:$offsets), (VTI.Vec MQPR:$data), memsz.TypeBits, 0), 6364 (InstU MQPR:$data, GPR:$base, MQPR:$offsets)>; 6365 def : Pat<(int_arm_mve_vstr_scatter_offset GPR:$base, (VTIs[0].Vec MQPR:$offsets), (VTI.Vec MQPR:$data), memsz.TypeBits, memsz.shift), 6366 (Inst MQPR:$data, GPR:$base, MQPR:$offsets)>; 6367 def : Pat<(int_arm_mve_vstr_scatter_offset_predicated GPR:$base, (VTIs[0].Vec MQPR:$offsets), (VTI.Vec MQPR:$data), memsz.TypeBits, 0, (VTI.Pred VCCR:$pred)), 6368 (InstU MQPR:$data, GPR:$base, MQPR:$offsets, ARMVCCThen, VCCR:$pred)>; 6369 def : Pat<(int_arm_mve_vstr_scatter_offset_predicated GPR:$base, (VTIs[0].Vec MQPR:$offsets), (VTI.Vec MQPR:$data), memsz.TypeBits, memsz.shift, (VTI.Pred VCCR:$pred)), 6370 (Inst MQPR:$data, GPR:$base, MQPR:$offsets, ARMVCCThen, VCCR:$pred)>; 6371 } 6372} 6373multiclass MVE_VSTR_rq_b<list<MVEVectorVTInfo> VTIs> { 6374 def "": MVE_VLDRSTR_rq_b<MVE_st, MVE_memB, "vstrb", 6375 VTIs[0].BitsSuffix, 0, VTIs[0].Size>; 6376 defvar Inst = !cast<Instruction>(NAME); 6377 6378 foreach VTI = VTIs in { 6379 def : Pat<(int_arm_mve_vstr_scatter_offset GPR:$base, (VTIs[0].Vec MQPR:$offsets), (VTI.Vec MQPR:$data), 8, 0), 6380 (Inst MQPR:$data, GPR:$base, MQPR:$offsets)>; 6381 def : Pat<(int_arm_mve_vstr_scatter_offset_predicated GPR:$base, (VTIs[0].Vec MQPR:$offsets), (VTI.Vec MQPR:$data), 8, 0, (VTI.Pred VCCR:$pred)), 6382 (Inst MQPR:$data, GPR:$base, MQPR:$offsets, ARMVCCThen, VCCR:$pred)>; 6383 } 6384} 6385 6386// Actually define all the loads and stores in this family. 6387 6388defm MVE_VLDRBU8_rq : MVE_VLDR_rq_b<[MVE_v16u8,MVE_v16s8]>; 6389defm MVE_VLDRBU16_rq: MVE_VLDR_rq_b<[MVE_v8u16]>; 6390defm MVE_VLDRBS16_rq: MVE_VLDR_rq_b<[MVE_v8s16]>; 6391defm MVE_VLDRBU32_rq: MVE_VLDR_rq_b<[MVE_v4u32]>; 6392defm MVE_VLDRBS32_rq: MVE_VLDR_rq_b<[MVE_v4s32]>; 6393 6394defm MVE_VLDRHU16_rq: MVE_VLDR_rq_w<MVE_memH, [MVE_v8u16,MVE_v8s16,MVE_v8f16]>; 6395defm MVE_VLDRHU32_rq: MVE_VLDR_rq_w<MVE_memH, [MVE_v4u32]>; 6396defm MVE_VLDRHS32_rq: MVE_VLDR_rq_w<MVE_memH, [MVE_v4s32]>; 6397defm MVE_VLDRWU32_rq: MVE_VLDR_rq_w<MVE_memW, [MVE_v4u32,MVE_v4s32,MVE_v4f32]>; 6398defm MVE_VLDRDU64_rq: MVE_VLDR_rq_w<MVE_memD, [MVE_v2u64,MVE_v2s64]>; 6399 6400defm MVE_VSTRB8_rq : MVE_VSTR_rq_b<[MVE_v16i8]>; 6401defm MVE_VSTRB16_rq : MVE_VSTR_rq_b<[MVE_v8i16]>; 6402defm MVE_VSTRB32_rq : MVE_VSTR_rq_b<[MVE_v4i32]>; 6403 6404defm MVE_VSTRH16_rq : MVE_VSTR_rq_w<MVE_memH, [MVE_v8i16,MVE_v8f16]>; 6405defm MVE_VSTRH32_rq : MVE_VSTR_rq_w<MVE_memH, [MVE_v4i32]>; 6406defm MVE_VSTRW32_rq : MVE_VSTR_rq_w<MVE_memW, [MVE_v4i32,MVE_v4f32]>; 6407defm MVE_VSTRD64_rq : MVE_VSTR_rq_w<MVE_memD, [MVE_v2i64]>; 6408 6409// Gather loads / scatter stores whose address operand is of the form 6410// [Qm,#imm], i.e. a vector containing a full base address for each 6411// loaded item, plus an immediate offset applied consistently to all 6412// of them. ('Load/store the same field from this vector of pointers 6413// to a structure type.') 6414// 6415// This family requires the vector lane size to be at least 32 bits 6416// (so there's room for an address in each lane at all). It has no 6417// widening/narrowing variants. But it does support preindex 6418// writeback, in which the address vector is updated to hold the 6419// addresses actually loaded from. 6420 6421// Base class. 6422class MVE_VLDRSTR_qi<MVE_ldst_direction dir, MVE_memsz memsz, bit W, dag wbops, 6423 string asm, string wbAsm, string suffix, string cstr = ""> 6424 : MVE_VLDRSTR_base<dir, 1, 1, W, 1, !con(wbops, dir.Oops), 6425 !con(dir.Iops, (ins mve_addr_q_shift<memsz.shift>:$addr)), 6426 asm, suffix, "$Qd, $addr" # wbAsm, cstr # dir.cstr> { 6427 bits<11> addr; 6428 let Inst{23} = addr{7}; 6429 let Inst{19-17} = addr{10-8}; 6430 let Inst{16} = 0; 6431 let Inst{8} = memsz.encoding{0}; // enough to distinguish 32- from 64-bit 6432 let Inst{7} = 0; 6433 let Inst{6-0} = addr{6-0}; 6434} 6435 6436// Multiclass that generates the non-writeback and writeback variants. 6437multiclass MVE_VLDRSTR_qi_m<MVE_ldst_direction dir, MVE_memsz memsz, 6438 string asm, string suffix> { 6439 def "" : MVE_VLDRSTR_qi<dir, memsz, 0, (outs), asm, "", suffix>; 6440 def _pre : MVE_VLDRSTR_qi<dir, memsz, 1, (outs MQPR:$wb), asm, "!", suffix, 6441 "$addr.base = $wb"> { 6442 let DecoderMethod="DecodeMVE_MEM_3_pre<"#memsz.shift#">"; 6443 } 6444} 6445 6446// Multiclasses wrapping that one, adding selection patterns for the 6447// non-writeback loads and all the stores. (The writeback loads must 6448// deliver multiple output values, so they have to be selected by C++ 6449// code.) 6450multiclass MVE_VLDR_qi<MVE_memsz memsz, MVEVectorVTInfo AVTI, 6451 list<MVEVectorVTInfo> DVTIs> { 6452 defm "" : MVE_VLDRSTR_qi_m<MVE_ld, memsz, "vldr" # memsz.MnemonicLetter, 6453 "u" # memsz.TypeBits>; 6454 defvar Inst = !cast<Instruction>(NAME); 6455 6456 foreach DVTI = DVTIs in { 6457 def : Pat<(DVTI.Vec (int_arm_mve_vldr_gather_base 6458 (AVTI.Vec MQPR:$addr), (i32 imm:$offset))), 6459 (DVTI.Vec (Inst (AVTI.Vec MQPR:$addr), (i32 imm:$offset)))>; 6460 def : Pat<(DVTI.Vec (int_arm_mve_vldr_gather_base_predicated 6461 (AVTI.Vec MQPR:$addr), (i32 imm:$offset), (AVTI.Pred VCCR:$pred))), 6462 (DVTI.Vec (Inst (AVTI.Vec MQPR:$addr), (i32 imm:$offset), 6463 ARMVCCThen, VCCR:$pred))>; 6464 } 6465} 6466multiclass MVE_VSTR_qi<MVE_memsz memsz, MVEVectorVTInfo AVTI, 6467 list<MVEVectorVTInfo> DVTIs> { 6468 defm "" : MVE_VLDRSTR_qi_m<MVE_st, memsz, "vstr" # memsz.MnemonicLetter, 6469 !cast<string>(memsz.TypeBits)>; 6470 defvar Inst = !cast<Instruction>(NAME); 6471 defvar InstPre = !cast<Instruction>(NAME # "_pre"); 6472 6473 foreach DVTI = DVTIs in { 6474 def : Pat<(int_arm_mve_vstr_scatter_base 6475 (AVTI.Vec MQPR:$addr), (i32 imm:$offset), (DVTI.Vec MQPR:$data)), 6476 (Inst (DVTI.Vec MQPR:$data), (AVTI.Vec MQPR:$addr), 6477 (i32 imm:$offset))>; 6478 def : Pat<(int_arm_mve_vstr_scatter_base_predicated 6479 (AVTI.Vec MQPR:$addr), (i32 imm:$offset), (DVTI.Vec MQPR:$data), (AVTI.Pred VCCR:$pred)), 6480 (Inst (DVTI.Vec MQPR:$data), (AVTI.Vec MQPR:$addr), 6481 (i32 imm:$offset), ARMVCCThen, VCCR:$pred)>; 6482 def : Pat<(AVTI.Vec (int_arm_mve_vstr_scatter_base_wb 6483 (AVTI.Vec MQPR:$addr), (i32 imm:$offset), (DVTI.Vec MQPR:$data))), 6484 (AVTI.Vec (InstPre (DVTI.Vec MQPR:$data), (AVTI.Vec MQPR:$addr), 6485 (i32 imm:$offset)))>; 6486 def : Pat<(AVTI.Vec (int_arm_mve_vstr_scatter_base_wb_predicated 6487 (AVTI.Vec MQPR:$addr), (i32 imm:$offset), (DVTI.Vec MQPR:$data), (AVTI.Pred VCCR:$pred))), 6488 (AVTI.Vec (InstPre (DVTI.Vec MQPR:$data), (AVTI.Vec MQPR:$addr), 6489 (i32 imm:$offset), ARMVCCThen, VCCR:$pred))>; 6490 } 6491} 6492 6493// Actual instruction definitions. 6494defm MVE_VLDRWU32_qi: MVE_VLDR_qi<MVE_memW, MVE_v4i32, [MVE_v4i32,MVE_v4f32]>; 6495defm MVE_VLDRDU64_qi: MVE_VLDR_qi<MVE_memD, MVE_v2i64, [MVE_v2i64,MVE_v2f64]>; 6496defm MVE_VSTRW32_qi: MVE_VSTR_qi<MVE_memW, MVE_v4i32, [MVE_v4i32,MVE_v4f32]>; 6497defm MVE_VSTRD64_qi: MVE_VSTR_qi<MVE_memD, MVE_v2i64, [MVE_v2i64,MVE_v2f64]>; 6498 6499// Define aliases for all the instructions where memory size and 6500// vector lane size are the same. These are mnemonic aliases, so they 6501// apply consistently across all of the above families - contiguous 6502// loads, and both the rq and qi types of gather/scatter. 6503// 6504// Rationale: As long as you're loading (for example) 16-bit memory 6505// values into 16-bit vector lanes, you can think of them as signed or 6506// unsigned integers, fp16 or just raw 16-bit blobs and it makes no 6507// difference. So we permit all of vldrh.16, vldrh.u16, vldrh.s16, 6508// vldrh.f16 and treat them all as equivalent to the canonical 6509// spelling (which happens to be .u16 for loads, and just .16 for 6510// stores). 6511 6512foreach vpt_cond = ["", "t", "e"] in 6513foreach memsz = [MVE_memB, MVE_memH, MVE_memW, MVE_memD] in 6514foreach suffix = memsz.suffixes in { 6515 // Define an alias with every suffix in the list, except for the one 6516 // used by the real Instruction record (i.e. the one that all the 6517 // rest are aliases *for*). 6518 6519 if !ne(suffix, memsz.CanonLoadSuffix) then { 6520 def : MnemonicAlias< 6521 "vldr" # memsz.MnemonicLetter # vpt_cond # suffix, 6522 "vldr" # memsz.MnemonicLetter # vpt_cond # memsz.CanonLoadSuffix>; 6523 } 6524 6525 if !ne(suffix, memsz.CanonStoreSuffix) then { 6526 def : MnemonicAlias< 6527 "vstr" # memsz.MnemonicLetter # vpt_cond # suffix, 6528 "vstr" # memsz.MnemonicLetter # vpt_cond # memsz.CanonStoreSuffix>; 6529 } 6530} 6531 6532// end of MVE predicable load/store 6533 6534class MVE_VPT<string suffix, bits<2> size, dag iops, string asm, list<dag> pattern=[]> 6535 : MVE_MI<(outs ), iops, NoItinerary, !strconcat("vpt", "${Mk}", ".", suffix), asm, "", pattern> { 6536 bits<3> fc; 6537 bits<4> Mk; 6538 bits<3> Qn; 6539 6540 let Inst{31-23} = 0b111111100; 6541 let Inst{22} = Mk{3}; 6542 let Inst{21-20} = size; 6543 let Inst{19-17} = Qn{2-0}; 6544 let Inst{16} = 0b1; 6545 let Inst{15-13} = Mk{2-0}; 6546 let Inst{12} = fc{2}; 6547 let Inst{11-8} = 0b1111; 6548 let Inst{7} = fc{0}; 6549 let Inst{4} = 0b0; 6550 6551 let Defs = [VPR]; 6552 let validForTailPredication=1; 6553} 6554 6555class MVE_VPTt1<string suffix, bits<2> size, dag iops> 6556 : MVE_VPT<suffix, size, iops, "$fc, $Qn, $Qm"> { 6557 bits<4> Qm; 6558 bits<4> Mk; 6559 6560 let Inst{6} = 0b0; 6561 let Inst{5} = Qm{3}; 6562 let Inst{3-1} = Qm{2-0}; 6563 let Inst{0} = fc{1}; 6564} 6565 6566class MVE_VPTt1i<string suffix, bits<2> size> 6567 : MVE_VPTt1<suffix, size, 6568 (ins vpt_mask:$Mk, MQPR:$Qn, MQPR:$Qm, pred_basic_i:$fc)> { 6569 let Inst{12} = 0b0; 6570 let Inst{0} = 0b0; 6571} 6572 6573def MVE_VPTv4i32 : MVE_VPTt1i<"i32", 0b10>; 6574def MVE_VPTv8i16 : MVE_VPTt1i<"i16", 0b01>; 6575def MVE_VPTv16i8 : MVE_VPTt1i<"i8", 0b00>; 6576 6577class MVE_VPTt1u<string suffix, bits<2> size> 6578 : MVE_VPTt1<suffix, size, 6579 (ins vpt_mask:$Mk, MQPR:$Qn, MQPR:$Qm, pred_basic_u:$fc)> { 6580 let Inst{12} = 0b0; 6581 let Inst{0} = 0b1; 6582} 6583 6584def MVE_VPTv4u32 : MVE_VPTt1u<"u32", 0b10>; 6585def MVE_VPTv8u16 : MVE_VPTt1u<"u16", 0b01>; 6586def MVE_VPTv16u8 : MVE_VPTt1u<"u8", 0b00>; 6587 6588class MVE_VPTt1s<string suffix, bits<2> size> 6589 : MVE_VPTt1<suffix, size, 6590 (ins vpt_mask:$Mk, MQPR:$Qn, MQPR:$Qm, pred_basic_s:$fc)> { 6591 let Inst{12} = 0b1; 6592} 6593 6594def MVE_VPTv4s32 : MVE_VPTt1s<"s32", 0b10>; 6595def MVE_VPTv8s16 : MVE_VPTt1s<"s16", 0b01>; 6596def MVE_VPTv16s8 : MVE_VPTt1s<"s8", 0b00>; 6597 6598class MVE_VPTt2<string suffix, bits<2> size, dag iops> 6599 : MVE_VPT<suffix, size, iops, 6600 "$fc, $Qn, $Rm"> { 6601 bits<4> Rm; 6602 bits<3> fc; 6603 bits<4> Mk; 6604 6605 let Inst{6} = 0b1; 6606 let Inst{5} = fc{1}; 6607 let Inst{3-0} = Rm{3-0}; 6608} 6609 6610class MVE_VPTt2i<string suffix, bits<2> size> 6611 : MVE_VPTt2<suffix, size, 6612 (ins vpt_mask:$Mk, MQPR:$Qn, GPRwithZR:$Rm, pred_basic_i:$fc)> { 6613 let Inst{12} = 0b0; 6614 let Inst{5} = 0b0; 6615} 6616 6617def MVE_VPTv4i32r : MVE_VPTt2i<"i32", 0b10>; 6618def MVE_VPTv8i16r : MVE_VPTt2i<"i16", 0b01>; 6619def MVE_VPTv16i8r : MVE_VPTt2i<"i8", 0b00>; 6620 6621class MVE_VPTt2u<string suffix, bits<2> size> 6622 : MVE_VPTt2<suffix, size, 6623 (ins vpt_mask:$Mk, MQPR:$Qn, GPRwithZR:$Rm, pred_basic_u:$fc)> { 6624 let Inst{12} = 0b0; 6625 let Inst{5} = 0b1; 6626} 6627 6628def MVE_VPTv4u32r : MVE_VPTt2u<"u32", 0b10>; 6629def MVE_VPTv8u16r : MVE_VPTt2u<"u16", 0b01>; 6630def MVE_VPTv16u8r : MVE_VPTt2u<"u8", 0b00>; 6631 6632class MVE_VPTt2s<string suffix, bits<2> size> 6633 : MVE_VPTt2<suffix, size, 6634 (ins vpt_mask:$Mk, MQPR:$Qn, GPRwithZR:$Rm, pred_basic_s:$fc)> { 6635 let Inst{12} = 0b1; 6636} 6637 6638def MVE_VPTv4s32r : MVE_VPTt2s<"s32", 0b10>; 6639def MVE_VPTv8s16r : MVE_VPTt2s<"s16", 0b01>; 6640def MVE_VPTv16s8r : MVE_VPTt2s<"s8", 0b00>; 6641 6642 6643class MVE_VPTf<string suffix, bit size, dag iops, string asm, list<dag> pattern=[]> 6644 : MVE_MI<(outs ), iops, NoItinerary, !strconcat("vpt", "${Mk}", ".", suffix), asm, 6645 "", pattern> { 6646 bits<3> fc; 6647 bits<4> Mk; 6648 bits<3> Qn; 6649 6650 let Inst{31-29} = 0b111; 6651 let Inst{28} = size; 6652 let Inst{27-23} = 0b11100; 6653 let Inst{22} = Mk{3}; 6654 let Inst{21-20} = 0b11; 6655 let Inst{19-17} = Qn{2-0}; 6656 let Inst{16} = 0b1; 6657 let Inst{15-13} = Mk{2-0}; 6658 let Inst{12} = fc{2}; 6659 let Inst{11-8} = 0b1111; 6660 let Inst{7} = fc{0}; 6661 let Inst{4} = 0b0; 6662 6663 let Defs = [VPR]; 6664 let Predicates = [HasMVEFloat]; 6665 let validForTailPredication=1; 6666} 6667 6668class MVE_VPTft1<string suffix, bit size> 6669 : MVE_VPTf<suffix, size, (ins vpt_mask:$Mk, MQPR:$Qn, MQPR:$Qm, pred_basic_fp:$fc), 6670 "$fc, $Qn, $Qm"> { 6671 bits<3> fc; 6672 bits<4> Qm; 6673 6674 let Inst{6} = 0b0; 6675 let Inst{5} = Qm{3}; 6676 let Inst{3-1} = Qm{2-0}; 6677 let Inst{0} = fc{1}; 6678} 6679 6680def MVE_VPTv4f32 : MVE_VPTft1<"f32", 0b0>; 6681def MVE_VPTv8f16 : MVE_VPTft1<"f16", 0b1>; 6682 6683class MVE_VPTft2<string suffix, bit size> 6684 : MVE_VPTf<suffix, size, (ins vpt_mask:$Mk, MQPR:$Qn, GPRwithZR:$Rm, pred_basic_fp:$fc), 6685 "$fc, $Qn, $Rm"> { 6686 bits<3> fc; 6687 bits<4> Rm; 6688 6689 let Inst{6} = 0b1; 6690 let Inst{5} = fc{1}; 6691 let Inst{3-0} = Rm{3-0}; 6692} 6693 6694def MVE_VPTv4f32r : MVE_VPTft2<"f32", 0b0>; 6695def MVE_VPTv8f16r : MVE_VPTft2<"f16", 0b1>; 6696 6697def MVE_VPST : MVE_MI<(outs ), (ins vpt_mask:$Mk), NoItinerary, 6698 !strconcat("vpst", "${Mk}"), "", "", []> { 6699 bits<4> Mk; 6700 6701 let Inst{31-23} = 0b111111100; 6702 let Inst{22} = Mk{3}; 6703 let Inst{21-16} = 0b110001; 6704 let Inst{15-13} = Mk{2-0}; 6705 let Inst{12-0} = 0b0111101001101; 6706 let Unpredictable{12} = 0b1; 6707 let Unpredictable{7} = 0b1; 6708 let Unpredictable{5} = 0b1; 6709 6710 let Uses = [VPR]; 6711 let validForTailPredication = 1; 6712} 6713 6714def MVE_VPSEL : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), NoItinerary, 6715 "vpsel", "", "$Qd, $Qn, $Qm", vpred_n, "", []> { 6716 bits<4> Qn; 6717 bits<4> Qd; 6718 bits<4> Qm; 6719 6720 let Inst{28} = 0b1; 6721 let Inst{25-23} = 0b100; 6722 let Inst{22} = Qd{3}; 6723 let Inst{21-20} = 0b11; 6724 let Inst{19-17} = Qn{2-0}; 6725 let Inst{16} = 0b1; 6726 let Inst{15-13} = Qd{2-0}; 6727 let Inst{12-9} = 0b0111; 6728 let Inst{8} = 0b1; 6729 let Inst{7} = Qn{3}; 6730 let Inst{6} = 0b0; 6731 let Inst{5} = Qm{3}; 6732 let Inst{4} = 0b0; 6733 let Inst{3-1} = Qm{2-0}; 6734 let Inst{0} = 0b1; 6735} 6736 6737foreach suffix = ["s8", "s16", "s32", "u8", "u16", "u32", 6738 "i8", "i16", "i32", "f16", "f32"] in 6739def : MVEInstAlias<"vpsel${vp}." # suffix # "\t$Qd, $Qn, $Qm", 6740 (MVE_VPSEL MQPR:$Qd, MQPR:$Qn, MQPR:$Qm, vpred_n:$vp)>; 6741 6742let Predicates = [HasMVEInt] in { 6743 def : Pat<(v16i8 (vselect (v16i1 VCCR:$pred), (v16i8 MQPR:$v1), (v16i8 MQPR:$v2))), 6744 (v16i8 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, VCCR:$pred))>; 6745 def : Pat<(v8i16 (vselect (v8i1 VCCR:$pred), (v8i16 MQPR:$v1), (v8i16 MQPR:$v2))), 6746 (v8i16 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, VCCR:$pred))>; 6747 def : Pat<(v4i32 (vselect (v4i1 VCCR:$pred), (v4i32 MQPR:$v1), (v4i32 MQPR:$v2))), 6748 (v4i32 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, VCCR:$pred))>; 6749 6750 def : Pat<(v8f16 (vselect (v8i1 VCCR:$pred), (v8f16 MQPR:$v1), (v8f16 MQPR:$v2))), 6751 (v8f16 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, VCCR:$pred))>; 6752 def : Pat<(v4f32 (vselect (v4i1 VCCR:$pred), (v4f32 MQPR:$v1), (v4f32 MQPR:$v2))), 6753 (v4f32 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, VCCR:$pred))>; 6754 6755 def : Pat<(v16i8 (vselect (v16i8 MQPR:$pred), (v16i8 MQPR:$v1), (v16i8 MQPR:$v2))), 6756 (v16i8 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, 6757 (MVE_VCMPi8 (v16i8 MQPR:$pred), (MVE_VMOVimmi8 0), ARMCCne)))>; 6758 def : Pat<(v8i16 (vselect (v8i16 MQPR:$pred), (v8i16 MQPR:$v1), (v8i16 MQPR:$v2))), 6759 (v8i16 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, 6760 (MVE_VCMPi16 (v8i16 MQPR:$pred), (MVE_VMOVimmi16 0), ARMCCne)))>; 6761 def : Pat<(v4i32 (vselect (v4i32 MQPR:$pred), (v4i32 MQPR:$v1), (v4i32 MQPR:$v2))), 6762 (v4i32 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, 6763 (MVE_VCMPi32 (v4i32 MQPR:$pred), (MVE_VMOVimmi32 0), ARMCCne)))>; 6764 6765 def : Pat<(v8f16 (vselect (v8i16 MQPR:$pred), (v8f16 MQPR:$v1), (v8f16 MQPR:$v2))), 6766 (v8f16 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, 6767 (MVE_VCMPi16 (v8i16 MQPR:$pred), (MVE_VMOVimmi16 0), ARMCCne)))>; 6768 def : Pat<(v4f32 (vselect (v4i32 MQPR:$pred), (v4f32 MQPR:$v1), (v4f32 MQPR:$v2))), 6769 (v4f32 (MVE_VPSEL MQPR:$v1, MQPR:$v2, ARMVCCNone, 6770 (MVE_VCMPi32 (v4i32 MQPR:$pred), (MVE_VMOVimmi32 0), ARMCCne)))>; 6771 6772 // Pred <-> Int 6773 def : Pat<(v16i8 (zext (v16i1 VCCR:$pred))), 6774 (v16i8 (MVE_VPSEL (MVE_VMOVimmi8 1), (MVE_VMOVimmi8 0), ARMVCCNone, VCCR:$pred))>; 6775 def : Pat<(v8i16 (zext (v8i1 VCCR:$pred))), 6776 (v8i16 (MVE_VPSEL (MVE_VMOVimmi16 1), (MVE_VMOVimmi16 0), ARMVCCNone, VCCR:$pred))>; 6777 def : Pat<(v4i32 (zext (v4i1 VCCR:$pred))), 6778 (v4i32 (MVE_VPSEL (MVE_VMOVimmi32 1), (MVE_VMOVimmi32 0), ARMVCCNone, VCCR:$pred))>; 6779 6780 def : Pat<(v16i8 (sext (v16i1 VCCR:$pred))), 6781 (v16i8 (MVE_VPSEL (MVE_VMOVimmi8 255), (MVE_VMOVimmi8 0), ARMVCCNone, VCCR:$pred))>; 6782 def : Pat<(v8i16 (sext (v8i1 VCCR:$pred))), 6783 (v8i16 (MVE_VPSEL (MVE_VMOVimmi8 255), (MVE_VMOVimmi16 0), ARMVCCNone, VCCR:$pred))>; 6784 def : Pat<(v4i32 (sext (v4i1 VCCR:$pred))), 6785 (v4i32 (MVE_VPSEL (MVE_VMOVimmi8 255), (MVE_VMOVimmi32 0), ARMVCCNone, VCCR:$pred))>; 6786 6787 def : Pat<(v16i8 (anyext (v16i1 VCCR:$pred))), 6788 (v16i8 (MVE_VPSEL (MVE_VMOVimmi8 1), (MVE_VMOVimmi8 0), ARMVCCNone, VCCR:$pred))>; 6789 def : Pat<(v8i16 (anyext (v8i1 VCCR:$pred))), 6790 (v8i16 (MVE_VPSEL (MVE_VMOVimmi16 1), (MVE_VMOVimmi16 0), ARMVCCNone, VCCR:$pred))>; 6791 def : Pat<(v4i32 (anyext (v4i1 VCCR:$pred))), 6792 (v4i32 (MVE_VPSEL (MVE_VMOVimmi32 1), (MVE_VMOVimmi32 0), ARMVCCNone, VCCR:$pred))>; 6793} 6794 6795let Predicates = [HasMVEFloat] in { 6796 // Pred <-> Float 6797 // 112 is 1.0 in float 6798 def : Pat<(v4f32 (uint_to_fp (v4i1 VCCR:$pred))), 6799 (v4f32 (MVE_VPSEL (v4f32 (MVE_VMOVimmf32 112)), (v4f32 (MVE_VMOVimmi32 0)), ARMVCCNone, VCCR:$pred))>; 6800 // 2620 in 1.0 in half 6801 def : Pat<(v8f16 (uint_to_fp (v8i1 VCCR:$pred))), 6802 (v8f16 (MVE_VPSEL (v8f16 (MVE_VMOVimmi16 2620)), (v8f16 (MVE_VMOVimmi16 0)), ARMVCCNone, VCCR:$pred))>; 6803 // 240 is -1.0 in float 6804 def : Pat<(v4f32 (sint_to_fp (v4i1 VCCR:$pred))), 6805 (v4f32 (MVE_VPSEL (v4f32 (MVE_VMOVimmf32 240)), (v4f32 (MVE_VMOVimmi32 0)), ARMVCCNone, VCCR:$pred))>; 6806 // 2748 is -1.0 in half 6807 def : Pat<(v8f16 (sint_to_fp (v8i1 VCCR:$pred))), 6808 (v8f16 (MVE_VPSEL (v8f16 (MVE_VMOVimmi16 2748)), (v8f16 (MVE_VMOVimmi16 0)), ARMVCCNone, VCCR:$pred))>; 6809 6810 def : Pat<(v4i1 (fp_to_uint (v4f32 MQPR:$v1))), 6811 (v4i1 (MVE_VCMPf32r (v4f32 MQPR:$v1), ZR, ARMCCne))>; 6812 def : Pat<(v8i1 (fp_to_uint (v8f16 MQPR:$v1))), 6813 (v8i1 (MVE_VCMPf16r (v8f16 MQPR:$v1), ZR, ARMCCne))>; 6814 def : Pat<(v4i1 (fp_to_sint (v4f32 MQPR:$v1))), 6815 (v4i1 (MVE_VCMPf32r (v4f32 MQPR:$v1), ZR, ARMCCne))>; 6816 def : Pat<(v8i1 (fp_to_sint (v8f16 MQPR:$v1))), 6817 (v8i1 (MVE_VCMPf16r (v8f16 MQPR:$v1), ZR, ARMCCne))>; 6818} 6819 6820def MVE_VPNOT : MVE_p<(outs VCCR:$P0), (ins VCCR:$P0_in), NoItinerary, 6821 "vpnot", "", "", vpred_n, "", []> { 6822 let Inst{31-0} = 0b11111110001100010000111101001101; 6823 let Unpredictable{19-17} = 0b111; 6824 let Unpredictable{12} = 0b1; 6825 let Unpredictable{7} = 0b1; 6826 let Unpredictable{5} = 0b1; 6827 6828 let Constraints = ""; 6829 let DecoderMethod = "DecodeMVEVPNOT"; 6830} 6831 6832let Predicates = [HasMVEInt] in { 6833 def : Pat<(v4i1 (xor (v4i1 VCCR:$pred), (v4i1 (predicate_cast (i32 65535))))), 6834 (v4i1 (MVE_VPNOT (v4i1 VCCR:$pred)))>; 6835 def : Pat<(v8i1 (xor (v8i1 VCCR:$pred), (v8i1 (predicate_cast (i32 65535))))), 6836 (v8i1 (MVE_VPNOT (v8i1 VCCR:$pred)))>; 6837 def : Pat<(v16i1 (xor (v16i1 VCCR:$pred), (v16i1 (predicate_cast (i32 65535))))), 6838 (v16i1 (MVE_VPNOT (v16i1 VCCR:$pred)))>; 6839} 6840 6841 6842class MVE_loltp_start<dag iops, string asm, string ops, bits<2> size> 6843 : t2LOL<(outs GPRlr:$LR), iops, asm, ops> { 6844 bits<4> Rn; 6845 let Predicates = [HasMVEInt]; 6846 let Inst{22} = 0b0; 6847 let Inst{21-20} = size; 6848 let Inst{19-16} = Rn{3-0}; 6849 let Inst{12} = 0b0; 6850} 6851 6852class MVE_DLSTP<string asm, bits<2> size> 6853 : MVE_loltp_start<(ins rGPR:$Rn), asm, "$LR, $Rn", size> { 6854 let Inst{13} = 0b1; 6855 let Inst{11-1} = 0b00000000000; 6856 let Unpredictable{10-1} = 0b1111111111; 6857} 6858 6859class MVE_WLSTP<string asm, bits<2> size> 6860 : MVE_loltp_start<(ins rGPR:$Rn, wlslabel_u11:$label), 6861 asm, "$LR, $Rn, $label", size> { 6862 bits<11> label; 6863 let Inst{13} = 0b0; 6864 let Inst{11} = label{0}; 6865 let Inst{10-1} = label{10-1}; 6866 let isBranch = 1; 6867 let isTerminator = 1; 6868} 6869 6870def SDT_MVEMEMCPYLOOPNODE 6871 : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisPtrTy<1>, SDTCisVT<2, i32>]>; 6872def MVE_MEMCPYLOOPNODE : SDNode<"ARMISD::MEMCPYLOOP", SDT_MVEMEMCPYLOOPNODE, 6873 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; 6874 6875let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Defs = [CPSR] in { 6876 def MVE_MEMCPYLOOPINST : PseudoInst<(outs), 6877 (ins rGPR:$dst, rGPR:$src, rGPR:$sz), 6878 NoItinerary, 6879 [(MVE_MEMCPYLOOPNODE rGPR:$dst, rGPR:$src, rGPR:$sz)]>; 6880} 6881 6882def SDT_MVEMEMSETLOOPNODE 6883 : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisVT<1, v16i8>, SDTCisVT<2, i32>]>; 6884def MVE_MEMSETLOOPNODE : SDNode<"ARMISD::MEMSETLOOP", SDT_MVEMEMSETLOOPNODE, 6885 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; 6886 6887let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Defs = [CPSR] in { 6888 def MVE_MEMSETLOOPINST : PseudoInst<(outs), 6889 (ins rGPR:$dst, MQPR:$src, rGPR:$sz), 6890 NoItinerary, 6891 [(MVE_MEMSETLOOPNODE rGPR:$dst, MQPR:$src, rGPR:$sz)]>; 6892} 6893 6894def MVE_DLSTP_8 : MVE_DLSTP<"dlstp.8", 0b00>; 6895def MVE_DLSTP_16 : MVE_DLSTP<"dlstp.16", 0b01>; 6896def MVE_DLSTP_32 : MVE_DLSTP<"dlstp.32", 0b10>; 6897def MVE_DLSTP_64 : MVE_DLSTP<"dlstp.64", 0b11>; 6898 6899def MVE_WLSTP_8 : MVE_WLSTP<"wlstp.8", 0b00>; 6900def MVE_WLSTP_16 : MVE_WLSTP<"wlstp.16", 0b01>; 6901def MVE_WLSTP_32 : MVE_WLSTP<"wlstp.32", 0b10>; 6902def MVE_WLSTP_64 : MVE_WLSTP<"wlstp.64", 0b11>; 6903 6904class MVE_loltp_end<dag oops, dag iops, string asm, string ops> 6905 : t2LOL<oops, iops, asm, ops> { 6906 let Predicates = [HasMVEInt]; 6907 let Inst{22-21} = 0b00; 6908 let Inst{19-16} = 0b1111; 6909 let Inst{12} = 0b0; 6910} 6911 6912def MVE_LETP : MVE_loltp_end<(outs GPRlr:$LRout), 6913 (ins GPRlr:$LRin, lelabel_u11:$label), 6914 "letp", "$LRin, $label"> { 6915 bits<11> label; 6916 let Inst{20} = 0b1; 6917 let Inst{13} = 0b0; 6918 let Inst{11} = label{0}; 6919 let Inst{10-1} = label{10-1}; 6920 let isBranch = 1; 6921 let isTerminator = 1; 6922} 6923 6924def MVE_LCTP : MVE_loltp_end<(outs), (ins pred:$p), "lctp${p}", ""> { 6925 let Inst{20} = 0b0; 6926 let Inst{13} = 0b1; 6927 let Inst{11-1} = 0b00000000000; 6928 let Unpredictable{21-20} = 0b11; 6929 let Unpredictable{11-1} = 0b11111111111; 6930} 6931 6932 6933//===----------------------------------------------------------------------===// 6934// Patterns 6935//===----------------------------------------------------------------------===// 6936 6937// PatFrags for loads and stores. Often trying to keep semi-consistent names. 6938 6939def aligned32_pre_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), 6940 (pre_store node:$val, node:$ptr, node:$offset), [{ 6941 return cast<StoreSDNode>(N)->getAlignment() >= 4; 6942}]>; 6943def aligned32_post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), 6944 (post_store node:$val, node:$ptr, node:$offset), [{ 6945 return cast<StoreSDNode>(N)->getAlignment() >= 4; 6946}]>; 6947def aligned16_pre_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), 6948 (pre_store node:$val, node:$ptr, node:$offset), [{ 6949 return cast<StoreSDNode>(N)->getAlignment() >= 2; 6950}]>; 6951def aligned16_post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), 6952 (post_store node:$val, node:$ptr, node:$offset), [{ 6953 return cast<StoreSDNode>(N)->getAlignment() >= 2; 6954}]>; 6955 6956 6957def aligned_maskedloadvi8 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6958 (masked_ld node:$ptr, undef, node:$pred, node:$passthru), [{ 6959 auto *Ld = cast<MaskedLoadSDNode>(N); 6960 return Ld->getMemoryVT().getScalarType() == MVT::i8; 6961}]>; 6962def aligned_sextmaskedloadvi8 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6963 (aligned_maskedloadvi8 node:$ptr, node:$pred, node:$passthru), [{ 6964 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; 6965}]>; 6966def aligned_zextmaskedloadvi8 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6967 (aligned_maskedloadvi8 node:$ptr, node:$pred, node:$passthru), [{ 6968 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; 6969}]>; 6970def aligned_extmaskedloadvi8 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6971 (aligned_maskedloadvi8 node:$ptr, node:$pred, node:$passthru), [{ 6972 auto *Ld = cast<MaskedLoadSDNode>(N); 6973 EVT ScalarVT = Ld->getMemoryVT().getScalarType(); 6974 return ScalarVT.isInteger() && Ld->getExtensionType() == ISD::EXTLOAD; 6975}]>; 6976def aligned_maskedloadvi16: PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6977 (masked_ld node:$ptr, undef, node:$pred, node:$passthru), [{ 6978 auto *Ld = cast<MaskedLoadSDNode>(N); 6979 EVT ScalarVT = Ld->getMemoryVT().getScalarType(); 6980 return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && Ld->getAlignment() >= 2; 6981}]>; 6982def aligned_sextmaskedloadvi16 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6983 (aligned_maskedloadvi16 node:$ptr, node:$pred, node:$passthru), [{ 6984 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; 6985}]>; 6986def aligned_zextmaskedloadvi16 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6987 (aligned_maskedloadvi16 node:$ptr, node:$pred, node:$passthru), [{ 6988 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; 6989}]>; 6990def aligned_extmaskedloadvi16 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6991 (aligned_maskedloadvi16 node:$ptr, node:$pred, node:$passthru), [{ 6992 auto *Ld = cast<MaskedLoadSDNode>(N); 6993 EVT ScalarVT = Ld->getMemoryVT().getScalarType(); 6994 return ScalarVT.isInteger() && Ld->getExtensionType() == ISD::EXTLOAD; 6995}]>; 6996def aligned_maskedloadvi32: PatFrag<(ops node:$ptr, node:$pred, node:$passthru), 6997 (masked_ld node:$ptr, undef, node:$pred, node:$passthru), [{ 6998 auto *Ld = cast<MaskedLoadSDNode>(N); 6999 EVT ScalarVT = Ld->getMemoryVT().getScalarType(); 7000 return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && Ld->getAlignment() >= 4; 7001}]>; 7002 7003def aligned_maskedstvi8 : PatFrag<(ops node:$val, node:$ptr, node:$pred), 7004 (masked_st node:$val, node:$ptr, undef, node:$pred), [{ 7005 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 7006}]>; 7007def aligned_maskedstvi16 : PatFrag<(ops node:$val, node:$ptr, node:$pred), 7008 (masked_st node:$val, node:$ptr, undef, node:$pred), [{ 7009 auto *St = cast<MaskedStoreSDNode>(N); 7010 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7011 return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; 7012}]>; 7013def aligned_maskedstvi32 : PatFrag<(ops node:$val, node:$ptr, node:$pred), 7014 (masked_st node:$val, node:$ptr, undef, node:$pred), [{ 7015 auto *St = cast<MaskedStoreSDNode>(N); 7016 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7017 return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; 7018}]>; 7019 7020def pre_maskedstore : PatFrag<(ops node:$val, node:$base, node:$offset, node:$mask), 7021 (masked_st node:$val, node:$base, node:$offset, node:$mask), [{ 7022 ISD::MemIndexedMode AM = cast<MaskedStoreSDNode>(N)->getAddressingMode(); 7023 return AM == ISD::PRE_INC || AM == ISD::PRE_DEC; 7024}]>; 7025def post_maskedstore : PatFrag<(ops node:$val, node:$base, node:$offset, node:$mask), 7026 (masked_st node:$val, node:$base, node:$offset, node:$mask), [{ 7027 ISD::MemIndexedMode AM = cast<MaskedStoreSDNode>(N)->getAddressingMode(); 7028 return AM == ISD::POST_INC || AM == ISD::POST_DEC; 7029}]>; 7030def aligned_pre_maskedstorevi8 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), 7031 (pre_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ 7032 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 7033}]>; 7034def aligned_post_maskedstorevi8 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), 7035 (post_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ 7036 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 7037}]>; 7038def aligned_pre_maskedstorevi16 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), 7039 (pre_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ 7040 auto *St = cast<MaskedStoreSDNode>(N); 7041 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7042 return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; 7043}]>; 7044def aligned_post_maskedstorevi16 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), 7045 (post_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ 7046 auto *St = cast<MaskedStoreSDNode>(N); 7047 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7048 return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; 7049}]>; 7050def aligned_pre_maskedstorevi32 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), 7051 (pre_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ 7052 auto *St = cast<MaskedStoreSDNode>(N); 7053 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7054 return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; 7055}]>; 7056def aligned_post_maskedstorevi32 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), 7057 (post_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ 7058 auto *St = cast<MaskedStoreSDNode>(N); 7059 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7060 return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; 7061}]>; 7062 7063 7064// PatFrags for "Aligned" extending / truncating 7065 7066def aligned_extloadvi8 : PatFrag<(ops node:$ptr), (extloadvi8 node:$ptr)>; 7067def aligned_sextloadvi8 : PatFrag<(ops node:$ptr), (sextloadvi8 node:$ptr)>; 7068def aligned_zextloadvi8 : PatFrag<(ops node:$ptr), (zextloadvi8 node:$ptr)>; 7069 7070def aligned_truncstvi8 : PatFrag<(ops node:$val, node:$ptr), 7071 (truncstorevi8 node:$val, node:$ptr)>; 7072def aligned_post_truncstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset), 7073 (post_truncstvi8 node:$val, node:$base, node:$offset)>; 7074def aligned_pre_truncstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset), 7075 (pre_truncstvi8 node:$val, node:$base, node:$offset)>; 7076 7077let MinAlignment = 2 in { 7078 def aligned_extloadvi16 : PatFrag<(ops node:$ptr), (extloadvi16 node:$ptr)>; 7079 def aligned_sextloadvi16 : PatFrag<(ops node:$ptr), (sextloadvi16 node:$ptr)>; 7080 def aligned_zextloadvi16 : PatFrag<(ops node:$ptr), (zextloadvi16 node:$ptr)>; 7081 7082 def aligned_truncstvi16 : PatFrag<(ops node:$val, node:$ptr), 7083 (truncstorevi16 node:$val, node:$ptr)>; 7084 def aligned_post_truncstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset), 7085 (post_truncstvi16 node:$val, node:$base, node:$offset)>; 7086 def aligned_pre_truncstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset), 7087 (pre_truncstvi16 node:$val, node:$base, node:$offset)>; 7088} 7089 7090def truncmaskedst : PatFrag<(ops node:$val, node:$base, node:$pred), 7091 (masked_st node:$val, node:$base, undef, node:$pred), [{ 7092 return cast<MaskedStoreSDNode>(N)->isTruncatingStore(); 7093}]>; 7094def aligned_truncmaskedstvi8 : PatFrag<(ops node:$val, node:$base, node:$pred), 7095 (truncmaskedst node:$val, node:$base, node:$pred), [{ 7096 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 7097}]>; 7098def aligned_truncmaskedstvi16 : PatFrag<(ops node:$val, node:$base, node:$pred), 7099 (truncmaskedst node:$val, node:$base, node:$pred), [{ 7100 auto *St = cast<MaskedStoreSDNode>(N); 7101 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7102 return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; 7103}]>; 7104def pre_truncmaskedst : PatFrag<(ops node:$val, node:$base, node:$offset, node:$pred), 7105 (masked_st node:$val, node:$base, node:$offset, node:$pred), [{ 7106 ISD::MemIndexedMode AM = cast<MaskedStoreSDNode>(N)->getAddressingMode(); 7107 return cast<MaskedStoreSDNode>(N)->isTruncatingStore() && (AM == ISD::PRE_INC || AM == ISD::PRE_DEC); 7108}]>; 7109def aligned_pre_truncmaskedstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset, node:$pred), 7110 (pre_truncmaskedst node:$val, node:$base, node:$offset, node:$pred), [{ 7111 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 7112}]>; 7113def aligned_pre_truncmaskedstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset, node:$pred), 7114 (pre_truncmaskedst node:$val, node:$base, node:$offset, node:$pred), [{ 7115 auto *St = cast<MaskedStoreSDNode>(N); 7116 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7117 return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; 7118}]>; 7119def post_truncmaskedst : PatFrag<(ops node:$val, node:$base, node:$offset, node:$postd), 7120 (masked_st node:$val, node:$base, node:$offset, node:$postd), [{ 7121 ISD::MemIndexedMode AM = cast<MaskedStoreSDNode>(N)->getAddressingMode(); 7122 return cast<MaskedStoreSDNode>(N)->isTruncatingStore() && (AM == ISD::POST_INC || AM == ISD::POST_DEC); 7123}]>; 7124def aligned_post_truncmaskedstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset, node:$postd), 7125 (post_truncmaskedst node:$val, node:$base, node:$offset, node:$postd), [{ 7126 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 7127}]>; 7128def aligned_post_truncmaskedstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset, node:$postd), 7129 (post_truncmaskedst node:$val, node:$base, node:$offset, node:$postd), [{ 7130 auto *St = cast<MaskedStoreSDNode>(N); 7131 EVT ScalarVT = St->getMemoryVT().getScalarType(); 7132 return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; 7133}]>; 7134 7135// Load/store patterns 7136 7137class MVE_vector_store_typed<ValueType Ty, Instruction RegImmInst, 7138 PatFrag StoreKind, int shift> 7139 : Pat<(StoreKind (Ty MQPR:$val), t2addrmode_imm7<shift>:$addr), 7140 (RegImmInst (Ty MQPR:$val), t2addrmode_imm7<shift>:$addr)>; 7141 7142class MVE_vector_maskedstore_typed<ValueType Ty, Instruction RegImmInst, 7143 PatFrag StoreKind, int shift> 7144 : Pat<(StoreKind (Ty MQPR:$val), t2addrmode_imm7<shift>:$addr, VCCR:$pred), 7145 (RegImmInst (Ty MQPR:$val), t2addrmode_imm7<shift>:$addr, ARMVCCThen, VCCR:$pred)>; 7146 7147multiclass MVE_vector_store<Instruction RegImmInst, PatFrag StoreKind, 7148 int shift> { 7149 def : MVE_vector_store_typed<v16i8, RegImmInst, StoreKind, shift>; 7150 def : MVE_vector_store_typed<v8i16, RegImmInst, StoreKind, shift>; 7151 def : MVE_vector_store_typed<v8f16, RegImmInst, StoreKind, shift>; 7152 def : MVE_vector_store_typed<v4i32, RegImmInst, StoreKind, shift>; 7153 def : MVE_vector_store_typed<v4f32, RegImmInst, StoreKind, shift>; 7154 def : MVE_vector_store_typed<v2i64, RegImmInst, StoreKind, shift>; 7155 def : MVE_vector_store_typed<v2f64, RegImmInst, StoreKind, shift>; 7156} 7157 7158class MVE_vector_load_typed<ValueType Ty, Instruction RegImmInst, 7159 PatFrag LoadKind, int shift> 7160 : Pat<(Ty (LoadKind t2addrmode_imm7<shift>:$addr)), 7161 (Ty (RegImmInst t2addrmode_imm7<shift>:$addr))>; 7162 7163class MVE_vector_maskedload_typed<ValueType Ty, Instruction RegImmInst, 7164 PatFrag LoadKind, int shift> 7165 : Pat<(Ty (LoadKind t2addrmode_imm7<shift>:$addr, VCCR:$pred, (Ty (ARMvmovImm (i32 0))))), 7166 (Ty (RegImmInst t2addrmode_imm7<shift>:$addr, ARMVCCThen, VCCR:$pred))>; 7167 7168multiclass MVE_vector_load<Instruction RegImmInst, PatFrag LoadKind, 7169 int shift> { 7170 def : MVE_vector_load_typed<v16i8, RegImmInst, LoadKind, shift>; 7171 def : MVE_vector_load_typed<v8i16, RegImmInst, LoadKind, shift>; 7172 def : MVE_vector_load_typed<v8f16, RegImmInst, LoadKind, shift>; 7173 def : MVE_vector_load_typed<v4i32, RegImmInst, LoadKind, shift>; 7174 def : MVE_vector_load_typed<v4f32, RegImmInst, LoadKind, shift>; 7175 def : MVE_vector_load_typed<v2i64, RegImmInst, LoadKind, shift>; 7176 def : MVE_vector_load_typed<v2f64, RegImmInst, LoadKind, shift>; 7177} 7178 7179class MVE_vector_offset_store_typed<ValueType Ty, Instruction Opcode, 7180 PatFrag StoreKind, int shift> 7181 : Pat<(StoreKind (Ty MQPR:$Rt), tGPR:$Rn, t2am_imm7_offset<shift>:$addr), 7182 (Opcode MQPR:$Rt, tGPR:$Rn, t2am_imm7_offset<shift>:$addr)>; 7183 7184class MVE_vector_offset_maskedstore_typed<ValueType Ty, Instruction Opcode, 7185 PatFrag StoreKind, int shift> 7186 : Pat<(StoreKind (Ty MQPR:$Rt), tGPR:$Rn, t2am_imm7_offset<shift>:$addr, VCCR:$pred), 7187 (Opcode MQPR:$Rt, tGPR:$Rn, t2am_imm7_offset<shift>:$addr, ARMVCCThen, VCCR:$pred)>; 7188 7189multiclass MVE_vector_offset_store<Instruction RegImmInst, PatFrag StoreKind, 7190 int shift> { 7191 def : MVE_vector_offset_store_typed<v16i8, RegImmInst, StoreKind, shift>; 7192 def : MVE_vector_offset_store_typed<v8i16, RegImmInst, StoreKind, shift>; 7193 def : MVE_vector_offset_store_typed<v8f16, RegImmInst, StoreKind, shift>; 7194 def : MVE_vector_offset_store_typed<v4i32, RegImmInst, StoreKind, shift>; 7195 def : MVE_vector_offset_store_typed<v4f32, RegImmInst, StoreKind, shift>; 7196 def : MVE_vector_offset_store_typed<v2i64, RegImmInst, StoreKind, shift>; 7197 def : MVE_vector_offset_store_typed<v2f64, RegImmInst, StoreKind, shift>; 7198} 7199 7200 7201let Predicates = [HasMVEInt, IsLE] in { 7202 // Stores 7203 defm : MVE_vector_store<MVE_VSTRBU8, byte_alignedstore, 0>; 7204 defm : MVE_vector_store<MVE_VSTRHU16, hword_alignedstore, 1>; 7205 defm : MVE_vector_store<MVE_VSTRWU32, alignedstore32, 2>; 7206 7207 // Loads 7208 defm : MVE_vector_load<MVE_VLDRBU8, byte_alignedload, 0>; 7209 defm : MVE_vector_load<MVE_VLDRHU16, hword_alignedload, 1>; 7210 defm : MVE_vector_load<MVE_VLDRWU32, alignedload32, 2>; 7211 7212 // Pre/post inc stores 7213 defm : MVE_vector_offset_store<MVE_VSTRBU8_pre, pre_store, 0>; 7214 defm : MVE_vector_offset_store<MVE_VSTRBU8_post, post_store, 0>; 7215 defm : MVE_vector_offset_store<MVE_VSTRHU16_pre, aligned16_pre_store, 1>; 7216 defm : MVE_vector_offset_store<MVE_VSTRHU16_post, aligned16_post_store, 1>; 7217 defm : MVE_vector_offset_store<MVE_VSTRWU32_pre, aligned32_pre_store, 2>; 7218 defm : MVE_vector_offset_store<MVE_VSTRWU32_post, aligned32_post_store, 2>; 7219} 7220 7221let Predicates = [HasMVEInt, IsBE] in { 7222 // Aligned Stores 7223 def : MVE_vector_store_typed<v16i8, MVE_VSTRBU8, store, 0>; 7224 def : MVE_vector_store_typed<v8i16, MVE_VSTRHU16, alignedstore16, 1>; 7225 def : MVE_vector_store_typed<v8f16, MVE_VSTRHU16, alignedstore16, 1>; 7226 def : MVE_vector_store_typed<v4i32, MVE_VSTRWU32, alignedstore32, 2>; 7227 def : MVE_vector_store_typed<v4f32, MVE_VSTRWU32, alignedstore32, 2>; 7228 7229 // Aligned Loads 7230 def : MVE_vector_load_typed<v16i8, MVE_VLDRBU8, load, 0>; 7231 def : MVE_vector_load_typed<v8i16, MVE_VLDRHU16, alignedload16, 1>; 7232 def : MVE_vector_load_typed<v8f16, MVE_VLDRHU16, alignedload16, 1>; 7233 def : MVE_vector_load_typed<v4i32, MVE_VLDRWU32, alignedload32, 2>; 7234 def : MVE_vector_load_typed<v4f32, MVE_VLDRWU32, alignedload32, 2>; 7235 7236 // Other unaligned loads/stores need to go though a VREV 7237 def : Pat<(v2f64 (load t2addrmode_imm7<0>:$addr)), 7238 (v2f64 (MVE_VREV64_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr)))>; 7239 def : Pat<(v2i64 (load t2addrmode_imm7<0>:$addr)), 7240 (v2i64 (MVE_VREV64_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr)))>; 7241 def : Pat<(v4i32 (load t2addrmode_imm7<0>:$addr)), 7242 (v4i32 (MVE_VREV32_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr)))>; 7243 def : Pat<(v4f32 (load t2addrmode_imm7<0>:$addr)), 7244 (v4f32 (MVE_VREV32_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr)))>; 7245 def : Pat<(v8i16 (load t2addrmode_imm7<0>:$addr)), 7246 (v8i16 (MVE_VREV16_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr)))>; 7247 def : Pat<(v8f16 (load t2addrmode_imm7<0>:$addr)), 7248 (v8f16 (MVE_VREV16_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr)))>; 7249 def : Pat<(store (v2f64 MQPR:$val), t2addrmode_imm7<0>:$addr), 7250 (MVE_VSTRBU8 (MVE_VREV64_8 MQPR:$val), t2addrmode_imm7<0>:$addr)>; 7251 def : Pat<(store (v2i64 MQPR:$val), t2addrmode_imm7<0>:$addr), 7252 (MVE_VSTRBU8 (MVE_VREV64_8 MQPR:$val), t2addrmode_imm7<0>:$addr)>; 7253 def : Pat<(store (v4i32 MQPR:$val), t2addrmode_imm7<0>:$addr), 7254 (MVE_VSTRBU8 (MVE_VREV32_8 MQPR:$val), t2addrmode_imm7<0>:$addr)>; 7255 def : Pat<(store (v4f32 MQPR:$val), t2addrmode_imm7<0>:$addr), 7256 (MVE_VSTRBU8 (MVE_VREV32_8 MQPR:$val), t2addrmode_imm7<0>:$addr)>; 7257 def : Pat<(store (v8i16 MQPR:$val), t2addrmode_imm7<0>:$addr), 7258 (MVE_VSTRBU8 (MVE_VREV16_8 MQPR:$val), t2addrmode_imm7<0>:$addr)>; 7259 def : Pat<(store (v8f16 MQPR:$val), t2addrmode_imm7<0>:$addr), 7260 (MVE_VSTRBU8 (MVE_VREV16_8 MQPR:$val), t2addrmode_imm7<0>:$addr)>; 7261 7262 // Pre/Post inc stores 7263 def : MVE_vector_offset_store_typed<v16i8, MVE_VSTRBU8_pre, pre_store, 0>; 7264 def : MVE_vector_offset_store_typed<v16i8, MVE_VSTRBU8_post, post_store, 0>; 7265 def : MVE_vector_offset_store_typed<v8i16, MVE_VSTRHU16_pre, aligned16_pre_store, 1>; 7266 def : MVE_vector_offset_store_typed<v8i16, MVE_VSTRHU16_post, aligned16_post_store, 1>; 7267 def : MVE_vector_offset_store_typed<v8f16, MVE_VSTRHU16_pre, aligned16_pre_store, 1>; 7268 def : MVE_vector_offset_store_typed<v8f16, MVE_VSTRHU16_post, aligned16_post_store, 1>; 7269 def : MVE_vector_offset_store_typed<v4i32, MVE_VSTRWU32_pre, aligned32_pre_store, 2>; 7270 def : MVE_vector_offset_store_typed<v4i32, MVE_VSTRWU32_post, aligned32_post_store, 2>; 7271 def : MVE_vector_offset_store_typed<v4f32, MVE_VSTRWU32_pre, aligned32_pre_store, 2>; 7272 def : MVE_vector_offset_store_typed<v4f32, MVE_VSTRWU32_post, aligned32_post_store, 2>; 7273} 7274 7275let Predicates = [HasMVEInt] in { 7276 // Aligned masked store, shared between LE and BE 7277 def : MVE_vector_maskedstore_typed<v16i8, MVE_VSTRBU8, aligned_maskedstvi8, 0>; 7278 def : MVE_vector_maskedstore_typed<v8i16, MVE_VSTRHU16, aligned_maskedstvi16, 1>; 7279 def : MVE_vector_maskedstore_typed<v8f16, MVE_VSTRHU16, aligned_maskedstvi16, 1>; 7280 def : MVE_vector_maskedstore_typed<v4i32, MVE_VSTRWU32, aligned_maskedstvi32, 2>; 7281 def : MVE_vector_maskedstore_typed<v4f32, MVE_VSTRWU32, aligned_maskedstvi32, 2>; 7282 7283 // Pre/Post inc masked stores 7284 def : MVE_vector_offset_maskedstore_typed<v16i8, MVE_VSTRBU8_pre, aligned_pre_maskedstorevi8, 0>; 7285 def : MVE_vector_offset_maskedstore_typed<v16i8, MVE_VSTRBU8_post, aligned_post_maskedstorevi8, 0>; 7286 def : MVE_vector_offset_maskedstore_typed<v8i16, MVE_VSTRHU16_pre, aligned_pre_maskedstorevi16, 1>; 7287 def : MVE_vector_offset_maskedstore_typed<v8i16, MVE_VSTRHU16_post, aligned_post_maskedstorevi16, 1>; 7288 def : MVE_vector_offset_maskedstore_typed<v8f16, MVE_VSTRHU16_pre, aligned_pre_maskedstorevi16, 1>; 7289 def : MVE_vector_offset_maskedstore_typed<v8f16, MVE_VSTRHU16_post, aligned_post_maskedstorevi16, 1>; 7290 def : MVE_vector_offset_maskedstore_typed<v4i32, MVE_VSTRWU32_pre, aligned_pre_maskedstorevi32, 2>; 7291 def : MVE_vector_offset_maskedstore_typed<v4i32, MVE_VSTRWU32_post, aligned_post_maskedstorevi32, 2>; 7292 def : MVE_vector_offset_maskedstore_typed<v4f32, MVE_VSTRWU32_pre, aligned_pre_maskedstorevi32, 2>; 7293 def : MVE_vector_offset_maskedstore_typed<v4f32, MVE_VSTRWU32_post, aligned_post_maskedstorevi32, 2>; 7294 7295 // Aligned masked loads 7296 def : MVE_vector_maskedload_typed<v16i8, MVE_VLDRBU8, aligned_maskedloadvi8, 0>; 7297 def : MVE_vector_maskedload_typed<v8i16, MVE_VLDRHU16, aligned_maskedloadvi16, 1>; 7298 def : MVE_vector_maskedload_typed<v8f16, MVE_VLDRHU16, aligned_maskedloadvi16, 1>; 7299 def : MVE_vector_maskedload_typed<v4i32, MVE_VLDRWU32, aligned_maskedloadvi32, 2>; 7300 def : MVE_vector_maskedload_typed<v4f32, MVE_VLDRWU32, aligned_maskedloadvi32, 2>; 7301} 7302 7303// Widening/Narrowing Loads/Stores 7304 7305multiclass MVEExtLoadStore<Instruction LoadSInst, Instruction LoadUInst, string StoreInst, 7306 string Amble, ValueType VT, int Shift> { 7307 // Trunc stores 7308 def : Pat<(!cast<PatFrag>("aligned_truncst"#Amble) (VT MQPR:$val), taddrmode_imm7<Shift>:$addr), 7309 (!cast<Instruction>(StoreInst) MQPR:$val, taddrmode_imm7<Shift>:$addr)>; 7310 def : Pat<(!cast<PatFrag>("aligned_post_truncst"#Amble) (VT MQPR:$Rt), tGPR:$Rn, t2am_imm7_offset<Shift>:$addr), 7311 (!cast<Instruction>(StoreInst#"_post") MQPR:$Rt, tGPR:$Rn, t2am_imm7_offset<Shift>:$addr)>; 7312 def : Pat<(!cast<PatFrag>("aligned_pre_truncst"#Amble) (VT MQPR:$Rt), tGPR:$Rn, t2am_imm7_offset<Shift>:$addr), 7313 (!cast<Instruction>(StoreInst#"_pre") MQPR:$Rt, tGPR:$Rn, t2am_imm7_offset<Shift>:$addr)>; 7314 7315 // Masked trunc stores 7316 def : Pat<(!cast<PatFrag>("aligned_truncmaskedst"#Amble) (VT MQPR:$val), taddrmode_imm7<Shift>:$addr, VCCR:$pred), 7317 (!cast<Instruction>(StoreInst) MQPR:$val, taddrmode_imm7<Shift>:$addr, ARMVCCThen, VCCR:$pred)>; 7318 def : Pat<(!cast<PatFrag>("aligned_post_truncmaskedst"#Amble) (VT MQPR:$Rt), tGPR:$Rn, t2am_imm7_offset<Shift>:$addr, VCCR:$pred), 7319 (!cast<Instruction>(StoreInst#"_post") MQPR:$Rt, tGPR:$Rn, t2am_imm7_offset<Shift>:$addr, ARMVCCThen, VCCR:$pred)>; 7320 def : Pat<(!cast<PatFrag>("aligned_pre_truncmaskedst"#Amble) (VT MQPR:$Rt), tGPR:$Rn, t2am_imm7_offset<Shift>:$addr, VCCR:$pred), 7321 (!cast<Instruction>(StoreInst#"_pre") MQPR:$Rt, tGPR:$Rn, t2am_imm7_offset<Shift>:$addr, ARMVCCThen, VCCR:$pred)>; 7322 7323 // Ext loads 7324 def : Pat<(VT (!cast<PatFrag>("aligned_extload"#Amble) taddrmode_imm7<Shift>:$addr)), 7325 (VT (LoadUInst taddrmode_imm7<Shift>:$addr))>; 7326 def : Pat<(VT (!cast<PatFrag>("aligned_sextload"#Amble) taddrmode_imm7<Shift>:$addr)), 7327 (VT (LoadSInst taddrmode_imm7<Shift>:$addr))>; 7328 def : Pat<(VT (!cast<PatFrag>("aligned_zextload"#Amble) taddrmode_imm7<Shift>:$addr)), 7329 (VT (LoadUInst taddrmode_imm7<Shift>:$addr))>; 7330 7331 // Masked ext loads 7332 def : Pat<(VT (!cast<PatFrag>("aligned_extmaskedload"#Amble) taddrmode_imm7<Shift>:$addr, VCCR:$pred, (VT (ARMvmovImm (i32 0))))), 7333 (VT (LoadUInst taddrmode_imm7<Shift>:$addr, ARMVCCThen, VCCR:$pred))>; 7334 def : Pat<(VT (!cast<PatFrag>("aligned_sextmaskedload"#Amble) taddrmode_imm7<Shift>:$addr, VCCR:$pred, (VT (ARMvmovImm (i32 0))))), 7335 (VT (LoadSInst taddrmode_imm7<Shift>:$addr, ARMVCCThen, VCCR:$pred))>; 7336 def : Pat<(VT (!cast<PatFrag>("aligned_zextmaskedload"#Amble) taddrmode_imm7<Shift>:$addr, VCCR:$pred, (VT (ARMvmovImm (i32 0))))), 7337 (VT (LoadUInst taddrmode_imm7<Shift>:$addr, ARMVCCThen, VCCR:$pred))>; 7338} 7339 7340let Predicates = [HasMVEInt] in { 7341 defm : MVEExtLoadStore<MVE_VLDRBS16, MVE_VLDRBU16, "MVE_VSTRB16", "vi8", v8i16, 0>; 7342 defm : MVEExtLoadStore<MVE_VLDRBS32, MVE_VLDRBU32, "MVE_VSTRB32", "vi8", v4i32, 0>; 7343 defm : MVEExtLoadStore<MVE_VLDRHS32, MVE_VLDRHU32, "MVE_VSTRH32", "vi16", v4i32, 1>; 7344} 7345 7346 7347// Bit convert patterns 7348 7349let Predicates = [HasMVEInt] in { 7350 def : Pat<(v2f64 (bitconvert (v2i64 MQPR:$src))), (v2f64 MQPR:$src)>; 7351 def : Pat<(v2i64 (bitconvert (v2f64 MQPR:$src))), (v2i64 MQPR:$src)>; 7352 7353 def : Pat<(v4i32 (bitconvert (v4f32 MQPR:$src))), (v4i32 MQPR:$src)>; 7354 def : Pat<(v4f32 (bitconvert (v4i32 MQPR:$src))), (v4f32 MQPR:$src)>; 7355 7356 def : Pat<(v8i16 (bitconvert (v8f16 MQPR:$src))), (v8i16 MQPR:$src)>; 7357 def : Pat<(v8f16 (bitconvert (v8i16 MQPR:$src))), (v8f16 MQPR:$src)>; 7358} 7359 7360let Predicates = [IsLE,HasMVEInt] in { 7361 def : Pat<(v2f64 (bitconvert (v4f32 MQPR:$src))), (v2f64 MQPR:$src)>; 7362 def : Pat<(v2f64 (bitconvert (v4i32 MQPR:$src))), (v2f64 MQPR:$src)>; 7363 def : Pat<(v2f64 (bitconvert (v8f16 MQPR:$src))), (v2f64 MQPR:$src)>; 7364 def : Pat<(v2f64 (bitconvert (v8i16 MQPR:$src))), (v2f64 MQPR:$src)>; 7365 def : Pat<(v2f64 (bitconvert (v16i8 MQPR:$src))), (v2f64 MQPR:$src)>; 7366 7367 def : Pat<(v2i64 (bitconvert (v4f32 MQPR:$src))), (v2i64 MQPR:$src)>; 7368 def : Pat<(v2i64 (bitconvert (v4i32 MQPR:$src))), (v2i64 MQPR:$src)>; 7369 def : Pat<(v2i64 (bitconvert (v8f16 MQPR:$src))), (v2i64 MQPR:$src)>; 7370 def : Pat<(v2i64 (bitconvert (v8i16 MQPR:$src))), (v2i64 MQPR:$src)>; 7371 def : Pat<(v2i64 (bitconvert (v16i8 MQPR:$src))), (v2i64 MQPR:$src)>; 7372 7373 def : Pat<(v4f32 (bitconvert (v2f64 MQPR:$src))), (v4f32 MQPR:$src)>; 7374 def : Pat<(v4f32 (bitconvert (v2i64 MQPR:$src))), (v4f32 MQPR:$src)>; 7375 def : Pat<(v4f32 (bitconvert (v8f16 MQPR:$src))), (v4f32 MQPR:$src)>; 7376 def : Pat<(v4f32 (bitconvert (v8i16 MQPR:$src))), (v4f32 MQPR:$src)>; 7377 def : Pat<(v4f32 (bitconvert (v16i8 MQPR:$src))), (v4f32 MQPR:$src)>; 7378 7379 def : Pat<(v4i32 (bitconvert (v2f64 MQPR:$src))), (v4i32 MQPR:$src)>; 7380 def : Pat<(v4i32 (bitconvert (v2i64 MQPR:$src))), (v4i32 MQPR:$src)>; 7381 def : Pat<(v4i32 (bitconvert (v8f16 MQPR:$src))), (v4i32 MQPR:$src)>; 7382 def : Pat<(v4i32 (bitconvert (v8i16 MQPR:$src))), (v4i32 MQPR:$src)>; 7383 def : Pat<(v4i32 (bitconvert (v16i8 MQPR:$src))), (v4i32 MQPR:$src)>; 7384 7385 def : Pat<(v8f16 (bitconvert (v2f64 MQPR:$src))), (v8f16 MQPR:$src)>; 7386 def : Pat<(v8f16 (bitconvert (v2i64 MQPR:$src))), (v8f16 MQPR:$src)>; 7387 def : Pat<(v8f16 (bitconvert (v4f32 MQPR:$src))), (v8f16 MQPR:$src)>; 7388 def : Pat<(v8f16 (bitconvert (v4i32 MQPR:$src))), (v8f16 MQPR:$src)>; 7389 def : Pat<(v8f16 (bitconvert (v16i8 MQPR:$src))), (v8f16 MQPR:$src)>; 7390 7391 def : Pat<(v8i16 (bitconvert (v2f64 MQPR:$src))), (v8i16 MQPR:$src)>; 7392 def : Pat<(v8i16 (bitconvert (v2i64 MQPR:$src))), (v8i16 MQPR:$src)>; 7393 def : Pat<(v8i16 (bitconvert (v4f32 MQPR:$src))), (v8i16 MQPR:$src)>; 7394 def : Pat<(v8i16 (bitconvert (v4i32 MQPR:$src))), (v8i16 MQPR:$src)>; 7395 def : Pat<(v8i16 (bitconvert (v16i8 MQPR:$src))), (v8i16 MQPR:$src)>; 7396 7397 def : Pat<(v16i8 (bitconvert (v2f64 MQPR:$src))), (v16i8 MQPR:$src)>; 7398 def : Pat<(v16i8 (bitconvert (v2i64 MQPR:$src))), (v16i8 MQPR:$src)>; 7399 def : Pat<(v16i8 (bitconvert (v4f32 MQPR:$src))), (v16i8 MQPR:$src)>; 7400 def : Pat<(v16i8 (bitconvert (v4i32 MQPR:$src))), (v16i8 MQPR:$src)>; 7401 def : Pat<(v16i8 (bitconvert (v8f16 MQPR:$src))), (v16i8 MQPR:$src)>; 7402 def : Pat<(v16i8 (bitconvert (v8i16 MQPR:$src))), (v16i8 MQPR:$src)>; 7403} 7404 7405let Predicates = [IsBE,HasMVEInt] in { 7406 def : Pat<(v2f64 (bitconvert (v4f32 MQPR:$src))), (v2f64 (MVE_VREV64_32 MQPR:$src))>; 7407 def : Pat<(v2f64 (bitconvert (v4i32 MQPR:$src))), (v2f64 (MVE_VREV64_32 MQPR:$src))>; 7408 def : Pat<(v2f64 (bitconvert (v8f16 MQPR:$src))), (v2f64 (MVE_VREV64_16 MQPR:$src))>; 7409 def : Pat<(v2f64 (bitconvert (v8i16 MQPR:$src))), (v2f64 (MVE_VREV64_16 MQPR:$src))>; 7410 def : Pat<(v2f64 (bitconvert (v16i8 MQPR:$src))), (v2f64 (MVE_VREV64_8 MQPR:$src))>; 7411 7412 def : Pat<(v2i64 (bitconvert (v4f32 MQPR:$src))), (v2i64 (MVE_VREV64_32 MQPR:$src))>; 7413 def : Pat<(v2i64 (bitconvert (v4i32 MQPR:$src))), (v2i64 (MVE_VREV64_32 MQPR:$src))>; 7414 def : Pat<(v2i64 (bitconvert (v8f16 MQPR:$src))), (v2i64 (MVE_VREV64_16 MQPR:$src))>; 7415 def : Pat<(v2i64 (bitconvert (v8i16 MQPR:$src))), (v2i64 (MVE_VREV64_16 MQPR:$src))>; 7416 def : Pat<(v2i64 (bitconvert (v16i8 MQPR:$src))), (v2i64 (MVE_VREV64_8 MQPR:$src))>; 7417 7418 def : Pat<(v4f32 (bitconvert (v2f64 MQPR:$src))), (v4f32 (MVE_VREV64_32 MQPR:$src))>; 7419 def : Pat<(v4f32 (bitconvert (v2i64 MQPR:$src))), (v4f32 (MVE_VREV64_32 MQPR:$src))>; 7420 def : Pat<(v4f32 (bitconvert (v8f16 MQPR:$src))), (v4f32 (MVE_VREV32_16 MQPR:$src))>; 7421 def : Pat<(v4f32 (bitconvert (v8i16 MQPR:$src))), (v4f32 (MVE_VREV32_16 MQPR:$src))>; 7422 def : Pat<(v4f32 (bitconvert (v16i8 MQPR:$src))), (v4f32 (MVE_VREV32_8 MQPR:$src))>; 7423 7424 def : Pat<(v4i32 (bitconvert (v2f64 MQPR:$src))), (v4i32 (MVE_VREV64_32 MQPR:$src))>; 7425 def : Pat<(v4i32 (bitconvert (v2i64 MQPR:$src))), (v4i32 (MVE_VREV64_32 MQPR:$src))>; 7426 def : Pat<(v4i32 (bitconvert (v8f16 MQPR:$src))), (v4i32 (MVE_VREV32_16 MQPR:$src))>; 7427 def : Pat<(v4i32 (bitconvert (v8i16 MQPR:$src))), (v4i32 (MVE_VREV32_16 MQPR:$src))>; 7428 def : Pat<(v4i32 (bitconvert (v16i8 MQPR:$src))), (v4i32 (MVE_VREV32_8 MQPR:$src))>; 7429 7430 def : Pat<(v8f16 (bitconvert (v2f64 MQPR:$src))), (v8f16 (MVE_VREV64_16 MQPR:$src))>; 7431 def : Pat<(v8f16 (bitconvert (v2i64 MQPR:$src))), (v8f16 (MVE_VREV64_16 MQPR:$src))>; 7432 def : Pat<(v8f16 (bitconvert (v4f32 MQPR:$src))), (v8f16 (MVE_VREV32_16 MQPR:$src))>; 7433 def : Pat<(v8f16 (bitconvert (v4i32 MQPR:$src))), (v8f16 (MVE_VREV32_16 MQPR:$src))>; 7434 def : Pat<(v8f16 (bitconvert (v16i8 MQPR:$src))), (v8f16 (MVE_VREV16_8 MQPR:$src))>; 7435 7436 def : Pat<(v8i16 (bitconvert (v2f64 MQPR:$src))), (v8i16 (MVE_VREV64_16 MQPR:$src))>; 7437 def : Pat<(v8i16 (bitconvert (v2i64 MQPR:$src))), (v8i16 (MVE_VREV64_16 MQPR:$src))>; 7438 def : Pat<(v8i16 (bitconvert (v4f32 MQPR:$src))), (v8i16 (MVE_VREV32_16 MQPR:$src))>; 7439 def : Pat<(v8i16 (bitconvert (v4i32 MQPR:$src))), (v8i16 (MVE_VREV32_16 MQPR:$src))>; 7440 def : Pat<(v8i16 (bitconvert (v16i8 MQPR:$src))), (v8i16 (MVE_VREV16_8 MQPR:$src))>; 7441 7442 def : Pat<(v16i8 (bitconvert (v2f64 MQPR:$src))), (v16i8 (MVE_VREV64_8 MQPR:$src))>; 7443 def : Pat<(v16i8 (bitconvert (v2i64 MQPR:$src))), (v16i8 (MVE_VREV64_8 MQPR:$src))>; 7444 def : Pat<(v16i8 (bitconvert (v4f32 MQPR:$src))), (v16i8 (MVE_VREV32_8 MQPR:$src))>; 7445 def : Pat<(v16i8 (bitconvert (v4i32 MQPR:$src))), (v16i8 (MVE_VREV32_8 MQPR:$src))>; 7446 def : Pat<(v16i8 (bitconvert (v8f16 MQPR:$src))), (v16i8 (MVE_VREV16_8 MQPR:$src))>; 7447 def : Pat<(v16i8 (bitconvert (v8i16 MQPR:$src))), (v16i8 (MVE_VREV16_8 MQPR:$src))>; 7448} 7449