1 //===-- X86SelectionDAGInfo.cpp - X86 SelectionDAG Info -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the X86SelectionDAGInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "X86SelectionDAGInfo.h" 14 #include "X86ISelLowering.h" 15 #include "X86InstrInfo.h" 16 #include "X86RegisterInfo.h" 17 #include "X86Subtarget.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/SelectionDAG.h" 20 #include "llvm/CodeGen/TargetLowering.h" 21 #include "llvm/IR/DerivedTypes.h" 22 23 using namespace llvm; 24 25 #define DEBUG_TYPE "x86-selectiondag-info" 26 27 static cl::opt<bool> 28 UseFSRMForMemcpy("x86-use-fsrm-for-memcpy", cl::Hidden, cl::init(false), 29 cl::desc("Use fast short rep mov in memcpy lowering")); 30 31 bool X86SelectionDAGInfo::isBaseRegConflictPossible( 32 SelectionDAG &DAG, ArrayRef<MCPhysReg> ClobberSet) const { 33 // We cannot use TRI->hasBasePointer() until *after* we select all basic 34 // blocks. Legalization may introduce new stack temporaries with large 35 // alignment requirements. Fall back to generic code if there are any 36 // dynamic stack adjustments (hopefully rare) and the base pointer would 37 // conflict if we had to use it. 38 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 39 if (!MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment()) 40 return false; 41 42 const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>( 43 DAG.getSubtarget().getRegisterInfo()); 44 return llvm::is_contained(ClobberSet, TRI->getBaseRegister()); 45 } 46 47 SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset( 48 SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Val, 49 SDValue Size, Align Alignment, bool isVolatile, bool AlwaysInline, 50 MachinePointerInfo DstPtrInfo) const { 51 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 52 const X86Subtarget &Subtarget = 53 DAG.getMachineFunction().getSubtarget<X86Subtarget>(); 54 55 #ifndef NDEBUG 56 // If the base register might conflict with our physical registers, bail out. 57 const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI, 58 X86::ECX, X86::EAX, X86::EDI}; 59 assert(!isBaseRegConflictPossible(DAG, ClobberSet)); 60 #endif 61 62 // If to a segment-relative address space, use the default lowering. 63 if (DstPtrInfo.getAddrSpace() >= 256) 64 return SDValue(); 65 66 // If not DWORD aligned or size is more than the threshold, call the library. 67 // The libc version is likely to be faster for these cases. It can use the 68 // address value and run time information about the CPU. 69 if (Alignment < Align(4) || !ConstantSize || 70 ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold()) 71 return SDValue(); 72 73 uint64_t SizeVal = ConstantSize->getZExtValue(); 74 SDValue InFlag; 75 EVT AVT; 76 SDValue Count; 77 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Val); 78 unsigned BytesLeft = 0; 79 if (ValC) { 80 unsigned ValReg; 81 uint64_t Val = ValC->getZExtValue() & 255; 82 83 // If the value is a constant, then we can potentially use larger sets. 84 if (Alignment > Align(2)) { 85 // DWORD aligned 86 AVT = MVT::i32; 87 ValReg = X86::EAX; 88 Val = (Val << 8) | Val; 89 Val = (Val << 16) | Val; 90 if (Subtarget.is64Bit() && Alignment > Align(8)) { // QWORD aligned 91 AVT = MVT::i64; 92 ValReg = X86::RAX; 93 Val = (Val << 32) | Val; 94 } 95 } else if (Alignment == Align(2)) { 96 // WORD aligned 97 AVT = MVT::i16; 98 ValReg = X86::AX; 99 Val = (Val << 8) | Val; 100 } else { 101 // Byte aligned 102 AVT = MVT::i8; 103 ValReg = X86::AL; 104 Count = DAG.getIntPtrConstant(SizeVal, dl); 105 } 106 107 if (AVT.bitsGT(MVT::i8)) { 108 unsigned UBytes = AVT.getSizeInBits() / 8; 109 Count = DAG.getIntPtrConstant(SizeVal / UBytes, dl); 110 BytesLeft = SizeVal % UBytes; 111 } 112 113 Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, dl, AVT), 114 InFlag); 115 InFlag = Chain.getValue(1); 116 } else { 117 AVT = MVT::i8; 118 Count = DAG.getIntPtrConstant(SizeVal, dl); 119 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Val, InFlag); 120 InFlag = Chain.getValue(1); 121 } 122 123 bool Use64BitRegs = Subtarget.isTarget64BitLP64(); 124 Chain = DAG.getCopyToReg(Chain, dl, Use64BitRegs ? X86::RCX : X86::ECX, 125 Count, InFlag); 126 InFlag = Chain.getValue(1); 127 Chain = DAG.getCopyToReg(Chain, dl, Use64BitRegs ? X86::RDI : X86::EDI, 128 Dst, InFlag); 129 InFlag = Chain.getValue(1); 130 131 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 132 SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag }; 133 Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops); 134 135 if (BytesLeft) { 136 // Handle the last 1 - 7 bytes. 137 unsigned Offset = SizeVal - BytesLeft; 138 EVT AddrVT = Dst.getValueType(); 139 EVT SizeVT = Size.getValueType(); 140 141 Chain = 142 DAG.getMemset(Chain, dl, 143 DAG.getNode(ISD::ADD, dl, AddrVT, Dst, 144 DAG.getConstant(Offset, dl, AddrVT)), 145 Val, DAG.getConstant(BytesLeft, dl, SizeVT), Alignment, 146 isVolatile, AlwaysInline, 147 /* isTailCall */ false, DstPtrInfo.getWithOffset(Offset)); 148 } 149 150 // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain. 151 return Chain; 152 } 153 154 /// Emit a single REP MOVS{B,W,D,Q} instruction. 155 static SDValue emitRepmovs(const X86Subtarget &Subtarget, SelectionDAG &DAG, 156 const SDLoc &dl, SDValue Chain, SDValue Dst, 157 SDValue Src, SDValue Size, MVT AVT) { 158 const bool Use64BitRegs = Subtarget.isTarget64BitLP64(); 159 const unsigned CX = Use64BitRegs ? X86::RCX : X86::ECX; 160 const unsigned DI = Use64BitRegs ? X86::RDI : X86::EDI; 161 const unsigned SI = Use64BitRegs ? X86::RSI : X86::ESI; 162 163 SDValue InFlag; 164 Chain = DAG.getCopyToReg(Chain, dl, CX, Size, InFlag); 165 InFlag = Chain.getValue(1); 166 Chain = DAG.getCopyToReg(Chain, dl, DI, Dst, InFlag); 167 InFlag = Chain.getValue(1); 168 Chain = DAG.getCopyToReg(Chain, dl, SI, Src, InFlag); 169 InFlag = Chain.getValue(1); 170 171 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 172 SDValue Ops[] = {Chain, DAG.getValueType(AVT), InFlag}; 173 return DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops); 174 } 175 176 /// Emit a single REP MOVSB instruction for a particular constant size. 177 static SDValue emitRepmovsB(const X86Subtarget &Subtarget, SelectionDAG &DAG, 178 const SDLoc &dl, SDValue Chain, SDValue Dst, 179 SDValue Src, uint64_t Size) { 180 return emitRepmovs(Subtarget, DAG, dl, Chain, Dst, Src, 181 DAG.getIntPtrConstant(Size, dl), MVT::i8); 182 } 183 184 /// Returns the best type to use with repmovs depending on alignment. 185 static MVT getOptimalRepmovsType(const X86Subtarget &Subtarget, 186 uint64_t Align) { 187 assert((Align != 0) && "Align is normalized"); 188 assert(isPowerOf2_64(Align) && "Align is a power of 2"); 189 switch (Align) { 190 case 1: 191 return MVT::i8; 192 case 2: 193 return MVT::i16; 194 case 4: 195 return MVT::i32; 196 default: 197 return Subtarget.is64Bit() ? MVT::i64 : MVT::i32; 198 } 199 } 200 201 /// Returns a REP MOVS instruction, possibly with a few load/stores to implement 202 /// a constant size memory copy. In some cases where we know REP MOVS is 203 /// inefficient we return an empty SDValue so the calling code can either 204 /// generate a load/store sequence or call the runtime memcpy function. 205 static SDValue emitConstantSizeRepmov( 206 SelectionDAG &DAG, const X86Subtarget &Subtarget, const SDLoc &dl, 207 SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, EVT SizeVT, 208 unsigned Align, bool isVolatile, bool AlwaysInline, 209 MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { 210 211 /// TODO: Revisit next line: big copy with ERMSB on march >= haswell are very 212 /// efficient. 213 if (!AlwaysInline && Size > Subtarget.getMaxInlineSizeThreshold()) 214 return SDValue(); 215 216 /// If we have enhanced repmovs we use it. 217 if (Subtarget.hasERMSB()) 218 return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size); 219 220 assert(!Subtarget.hasERMSB() && "No efficient RepMovs"); 221 /// We assume runtime memcpy will do a better job for unaligned copies when 222 /// ERMS is not present. 223 if (!AlwaysInline && (Align & 3) != 0) 224 return SDValue(); 225 226 const MVT BlockType = getOptimalRepmovsType(Subtarget, Align); 227 const uint64_t BlockBytes = BlockType.getSizeInBits() / 8; 228 const uint64_t BlockCount = Size / BlockBytes; 229 const uint64_t BytesLeft = Size % BlockBytes; 230 SDValue RepMovs = 231 emitRepmovs(Subtarget, DAG, dl, Chain, Dst, Src, 232 DAG.getIntPtrConstant(BlockCount, dl), BlockType); 233 234 /// RepMov can process the whole length. 235 if (BytesLeft == 0) 236 return RepMovs; 237 238 assert(BytesLeft && "We have leftover at this point"); 239 240 /// In case we optimize for size we use repmovsb even if it's less efficient 241 /// so we can save the loads/stores of the leftover. 242 if (DAG.getMachineFunction().getFunction().hasMinSize()) 243 return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size); 244 245 // Handle the last 1 - 7 bytes. 246 SmallVector<SDValue, 4> Results; 247 Results.push_back(RepMovs); 248 unsigned Offset = Size - BytesLeft; 249 EVT DstVT = Dst.getValueType(); 250 EVT SrcVT = Src.getValueType(); 251 Results.push_back(DAG.getMemcpy( 252 Chain, dl, 253 DAG.getNode(ISD::ADD, dl, DstVT, Dst, DAG.getConstant(Offset, dl, DstVT)), 254 DAG.getNode(ISD::ADD, dl, SrcVT, Src, DAG.getConstant(Offset, dl, SrcVT)), 255 DAG.getConstant(BytesLeft, dl, SizeVT), llvm::Align(Align), isVolatile, 256 /*AlwaysInline*/ true, /*isTailCall*/ false, 257 DstPtrInfo.getWithOffset(Offset), SrcPtrInfo.getWithOffset(Offset))); 258 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Results); 259 } 260 261 SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy( 262 SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, 263 SDValue Size, Align Alignment, bool isVolatile, bool AlwaysInline, 264 MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const { 265 // If to a segment-relative address space, use the default lowering. 266 if (DstPtrInfo.getAddrSpace() >= 256 || SrcPtrInfo.getAddrSpace() >= 256) 267 return SDValue(); 268 269 // If the base registers conflict with our physical registers, use the default 270 // lowering. 271 const MCPhysReg ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI, 272 X86::ECX, X86::ESI, X86::EDI}; 273 if (isBaseRegConflictPossible(DAG, ClobberSet)) 274 return SDValue(); 275 276 const X86Subtarget &Subtarget = 277 DAG.getMachineFunction().getSubtarget<X86Subtarget>(); 278 279 // If enabled and available, use fast short rep mov. 280 if (UseFSRMForMemcpy && Subtarget.hasFSRM()) 281 return emitRepmovs(Subtarget, DAG, dl, Chain, Dst, Src, Size, MVT::i8); 282 283 /// Handle constant sizes, 284 if (ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size)) 285 return emitConstantSizeRepmov( 286 DAG, Subtarget, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 287 Size.getValueType(), Alignment.value(), isVolatile, AlwaysInline, 288 DstPtrInfo, SrcPtrInfo); 289 290 return SDValue(); 291 } 292