Lines Matching +full:power +full:- +full:efficient
1 //===-- X86SelectionDAGInfo.cpp - X86 SelectionDAG Info -------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
25 #define DEBUG_TYPE "x86-selectiondag-info"
28 UseFSRMForMemcpy("x86-use-fsrm-for-memcpy", cl::Hidden, cl::init(false),
33 // We cannot use TRI->hasBasePointer() until *after* we select all basic in isBaseRegConflictPossible()
44 return llvm::is_contained(ClobberSet, TRI->getBaseRegister()); in isBaseRegConflictPossible()
51 // If to a segment-relative address space, use the default lowering. in EmitTargetCodeForMemset()
69 ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold()) in EmitTargetCodeForMemset()
72 uint64_t SizeVal = ConstantSize->getZExtValue(); in EmitTargetCodeForMemset()
79 uint64_t Val = ValC->getZExtValue() & 255; in EmitTargetCodeForMemset()
137 // Handle the last 1 - 7 bytes. in EmitTargetCodeForMemset()
140 unsigned Offset = SizeVal - BytesLeft; in EmitTargetCodeForMemset()
190 assert(isPowerOf2_64(Align) && "Align is a power of 2"); in getOptimalRepmovsType()
214 /// efficient. in emitConstantSizeRepmov()
222 assert(!Subtarget.hasERMSB() && "No efficient RepMovs"); in emitConstantSizeRepmov()
242 /// In case we optimize for size we use repmovsb even if it's less efficient in emitConstantSizeRepmov()
247 // Handle the last 1 - 7 bytes. in emitConstantSizeRepmov()
250 unsigned Offset = Size - BytesLeft; in emitConstantSizeRepmov()
267 // If to a segment-relative address space, use the default lowering. in EmitTargetCodeForMemcpy()
288 ConstantSize->getZExtValue(), in EmitTargetCodeForMemcpy()