1//===-- VEInstrPatternsVec.td - VEC_-type SDNodes and isel for VE Target --===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the VEC_* prefixed intermediate SDNodes and their 10// isel patterns. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Instruction format superclass 16//===----------------------------------------------------------------------===// 17 18// Sub-register replication for packed broadcast. 19def: Pat<(i64 (repl_f32 f32:$val)), 20 (ORrr 21 (SRLri (f2l $val), 32), 22 (zero_i32 (f2l $val)))>; 23def: Pat<(i64 (repl_i32 i32:$val)), 24 (ORrr 25 (zero_f32 (i2l $val)), 26 (SLLri (i2l $val), 32))>; 27 28///// Mask Load & Store ///// 29 30// Store for v256i1, v512i1 are implemented in 2 ways. These STVM/STVM512 31// pseudo instruction is used for frameindex related load/store instructions. 32// Custom Lowering is used for other load/store instructions. 33 34def : Pat<(v256i1 (load ADDRrii:$addr)), 35 (LDVMrii ADDRrii:$addr)>; 36def : Pat<(v512i1 (load ADDRrii:$addr)), 37 (LDVM512rii ADDRrii:$addr)>; 38def : Pat<(store v256i1:$vx, ADDRrii:$addr), 39 (STVMrii ADDRrii:$addr, $vx)>; 40def : Pat<(store v512i1:$vx, ADDRrii:$addr), 41 (STVM512rii ADDRrii:$addr, $vx)>; 42 43multiclass vbrd_elem32<ValueType v32, ValueType s32, SDPatternOperator ImmOp, 44 SDNodeXForm ImmCast, OutPatFrag SuperRegCast> { 45 // VBRDil 46 def : Pat<(v32 (vec_broadcast (s32 ImmOp:$sy), i32:$vl)), 47 (VBRDil (ImmCast $sy), i32:$vl)>; 48 49 // VBRDrl 50 def : Pat<(v32 (vec_broadcast s32:$sy, i32:$vl)), 51 (VBRDrl (SuperRegCast $sy), i32:$vl)>; 52} 53 54multiclass vbrd_elem64<ValueType v64, ValueType s64, 55 SDPatternOperator ImmOp, SDNodeXForm ImmCast> { 56 // VBRDil 57 def : Pat<(v64 (vec_broadcast (s64 ImmOp:$sy), i32:$vl)), 58 (VBRDil (ImmCast $sy), i32:$vl)>; 59 60 // VBRDrl 61 def : Pat<(v64 (vec_broadcast s64:$sy, i32:$vl)), 62 (VBRDrl s64:$sy, i32:$vl)>; 63} 64 65multiclass extract_insert_elem32<ValueType v32, ValueType s32, 66 OutPatFrag SubRegCast, 67 OutPatFrag SuperRegCast> { 68 // LVSvi 69 def: Pat<(s32 (extractelt v32:$vec, uimm7:$idx)), 70 (SubRegCast (LVSvi v32:$vec, (ULO7 $idx)))>; 71 // LVSvr 72 def: Pat<(s32 (extractelt v32:$vec, i64:$idx)), 73 (SubRegCast (LVSvr v32:$vec, $idx))>; 74 75 // LSVir 76 def: Pat<(v32 (insertelt v32:$vec, s32:$val, uimm7:$idx)), 77 (LSVir_v (ULO7 $idx), (SuperRegCast $val), $vec)>; 78 // LSVrr 79 def: Pat<(v32 (insertelt v32:$vec, s32:$val, i64:$idx)), 80 (LSVrr_v $idx, (SuperRegCast $val), $vec)>; 81} 82 83multiclass extract_insert_elem64<ValueType v64, ValueType s64> { 84 // LVSvi 85 def: Pat<(s64 (extractelt v64:$vec, uimm7:$idx)), 86 (LVSvi v64:$vec, (ULO7 $idx))>; 87 // LVSvr 88 def: Pat<(s64 (extractelt v64:$vec, i64:$idx)), 89 (LVSvr v64:$vec, $idx)>; 90 91 // LSVir 92 def: Pat<(v64 (insertelt v64:$vec, s64:$val, uimm7:$idx)), 93 (LSVir_v (ULO7 $idx), $val, $vec)>; 94 // LSVrr 95 def: Pat<(v64 (insertelt v64:$vec, s64:$val, i64:$idx)), 96 (LSVrr_v $idx, $val, $vec)>; 97} 98 99multiclass patterns_elem32<ValueType v32, ValueType s32, 100 SDPatternOperator ImmOp, SDNodeXForm ImmCast, 101 OutPatFrag SubRegCast, OutPatFrag SuperRegCast> { 102 defm : vbrd_elem32<v32, s32, ImmOp, ImmCast, SuperRegCast>; 103 defm : extract_insert_elem32<v32, s32, SubRegCast, SuperRegCast>; 104} 105 106multiclass patterns_elem64<ValueType v64, ValueType s64, 107 SDPatternOperator ImmOp, SDNodeXForm ImmCast> { 108 defm : vbrd_elem64<v64, s64, ImmOp, ImmCast>; 109 defm : extract_insert_elem64<v64, s64>; 110} 111 112defm : patterns_elem32<v256i32, i32, simm7, LO7, l2i, i2l>; 113defm : patterns_elem32<v256f32, f32, simm7fp, LO7FP, l2f, f2l>; 114 115defm : patterns_elem64<v256i64, i64, simm7, LO7>; 116defm : patterns_elem64<v256f64, f64, simm7fp, LO7FP>; 117 118defm : vbrd_elem64<v512i32, i64, simm7, LO7>; 119defm : vbrd_elem64<v512f32, i64, simm7, LO7>; 120defm : vbrd_elem64<v512i32, f64, simm7fp, LO7FP>; 121defm : vbrd_elem64<v512f32, f64, simm7fp, LO7FP>; 122 123class Mask_Binary<ValueType MaskVT, SDPatternOperator MaskOp, string InstName> : 124 Pat<(MaskVT (MaskOp MaskVT:$ma, MaskVT:$mb)), (!cast<Instruction>(InstName#"mm") $ma, $mb)>; 125 126def: Mask_Binary<v256i1, and, "ANDM">; 127def: Mask_Binary<v256i1, or, "ORM">; 128def: Mask_Binary<v256i1, xor, "XORM">; 129 130///// Packing support ///// 131 132// v256i1 <> v512i1 133def : Pat<(v256i1 (vec_unpack_lo v512i1:$vm, (i32 srcvalue))), 134 (EXTRACT_SUBREG $vm, sub_vm_odd)>; 135def : Pat<(v256i1 (vec_unpack_hi v512i1:$vm, (i32 srcvalue))), 136 (EXTRACT_SUBREG $vm, sub_vm_even)>; 137def : Pat<(v512i1 (vec_pack v256i1:$vlo, v256i1:$vhi, (i32 srcvalue))), 138 (INSERT_SUBREG (INSERT_SUBREG 139 (v512i1 (IMPLICIT_DEF)), 140 $vlo, sub_vm_odd), 141 $vhi, sub_vm_even)>; 142 143// v256.32 <> v512.32 144multiclass Packing<ValueType PackVT> { 145 // no-op unpacks 146 def : Pat<(v256i32 (vec_unpack_lo PackVT:$vp, (i32 srcvalue))), 147 (COPY_TO_REGCLASS $vp, V64)>; 148 def : Pat<(v256f32 (vec_unpack_hi PackVT:$vp, (i32 srcvalue))), 149 (COPY_TO_REGCLASS $vp, V64)>; 150 151 // shuffle unpacks 152 def : Pat<(v256f32 (vec_unpack_lo PackVT:$vp, i32:$avl)), 153 (VSHFvvil $vp, $vp, 4, $avl)>; // always pick lo 154 def : Pat<(v256i32 (vec_unpack_hi PackVT:$vp, i32:$avl)), 155 (VSHFvvil $vp, $vp, 0, $avl)>; // always pick hi 156} 157 158defm : Packing<v512i32>; 159defm : Packing<v512f32>; 160 161def : Pat<(v512i32 (vec_pack v256i32:$vlo, v256i32:$vhi, i32:$avl)), 162 (VSHFvvil $vlo, $vhi, 13, $avl)>; 163def : Pat<(v512f32 (vec_pack v256f32:$vlo, v256f32:$vhi, i32:$avl)), 164 (VSHFvvil $vlo, $vhi, 8, $avl)>; 165