1 //===-- IntrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements methods that make it really easy to deal with intrinsic 10 // functions. 11 // 12 // All intrinsic function calls are instances of the call instruction, so these 13 // are all subclasses of the CallInst class. Note that none of these classes 14 // has state or virtual methods, which is an important part of this gross/neat 15 // hack working. 16 // 17 // In some cases, arguments to intrinsics need to be generic and are defined as 18 // type pointer to empty struct { }*. To access the real item of interest the 19 // cast instruction needs to be stripped away. 20 // 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/DebugInfoMetadata.h" 27 #include "llvm/IR/GlobalVariable.h" 28 #include "llvm/IR/Metadata.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/IR/Operator.h" 31 #include "llvm/IR/PatternMatch.h" 32 #include "llvm/IR/Statepoint.h" 33 34 #include "llvm/Support/raw_ostream.h" 35 using namespace llvm; 36 37 //===----------------------------------------------------------------------===// 38 /// DbgVariableIntrinsic - This is the common base class for debug info 39 /// intrinsics for variables. 40 /// 41 42 iterator_range<DbgVariableIntrinsic::location_op_iterator> 43 DbgVariableIntrinsic::location_ops() const { 44 auto *MD = getRawLocation(); 45 assert(MD && "First operand of DbgVariableIntrinsic should be non-null."); 46 47 // If operand is ValueAsMetadata, return a range over just that operand. 48 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) { 49 return {location_op_iterator(VAM), location_op_iterator(VAM + 1)}; 50 } 51 // If operand is DIArgList, return a range over its args. 52 if (auto *AL = dyn_cast<DIArgList>(MD)) 53 return {location_op_iterator(AL->args_begin()), 54 location_op_iterator(AL->args_end())}; 55 // Operand must be an empty metadata tuple, so return empty iterator. 56 return {location_op_iterator(static_cast<ValueAsMetadata *>(nullptr)), 57 location_op_iterator(static_cast<ValueAsMetadata *>(nullptr))}; 58 } 59 60 Value *DbgVariableIntrinsic::getVariableLocationOp(unsigned OpIdx) const { 61 auto *MD = getRawLocation(); 62 assert(MD && "First operand of DbgVariableIntrinsic should be non-null."); 63 if (auto *AL = dyn_cast<DIArgList>(MD)) 64 return AL->getArgs()[OpIdx]->getValue(); 65 if (isa<MDNode>(MD)) 66 return nullptr; 67 assert( 68 isa<ValueAsMetadata>(MD) && 69 "Attempted to get location operand from DbgVariableIntrinsic with none."); 70 auto *V = cast<ValueAsMetadata>(MD); 71 assert(OpIdx == 0 && "Operand Index must be 0 for a debug intrinsic with a " 72 "single location operand."); 73 return V->getValue(); 74 } 75 76 static ValueAsMetadata *getAsMetadata(Value *V) { 77 return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>( 78 cast<MetadataAsValue>(V)->getMetadata()) 79 : ValueAsMetadata::get(V); 80 } 81 82 void DbgVariableIntrinsic::replaceVariableLocationOp(Value *OldValue, 83 Value *NewValue) { 84 assert(NewValue && "Values must be non-null"); 85 auto Locations = location_ops(); 86 auto OldIt = find(Locations, OldValue); 87 assert(OldIt != Locations.end() && "OldValue must be a current location"); 88 if (!hasArgList()) { 89 Value *NewOperand = isa<MetadataAsValue>(NewValue) 90 ? NewValue 91 : MetadataAsValue::get( 92 getContext(), ValueAsMetadata::get(NewValue)); 93 return setArgOperand(0, NewOperand); 94 } 95 SmallVector<ValueAsMetadata *, 4> MDs; 96 ValueAsMetadata *NewOperand = getAsMetadata(NewValue); 97 for (auto *VMD : Locations) 98 MDs.push_back(VMD == *OldIt ? NewOperand : getAsMetadata(VMD)); 99 setArgOperand( 100 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs))); 101 } 102 void DbgVariableIntrinsic::replaceVariableLocationOp(unsigned OpIdx, 103 Value *NewValue) { 104 assert(OpIdx < getNumVariableLocationOps() && "Invalid Operand Index"); 105 if (!hasArgList()) { 106 Value *NewOperand = isa<MetadataAsValue>(NewValue) 107 ? NewValue 108 : MetadataAsValue::get( 109 getContext(), ValueAsMetadata::get(NewValue)); 110 return setArgOperand(0, NewOperand); 111 } 112 SmallVector<ValueAsMetadata *, 4> MDs; 113 ValueAsMetadata *NewOperand = getAsMetadata(NewValue); 114 for (unsigned Idx = 0; Idx < getNumVariableLocationOps(); ++Idx) 115 MDs.push_back(Idx == OpIdx ? NewOperand 116 : getAsMetadata(getVariableLocationOp(Idx))); 117 setArgOperand( 118 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs))); 119 } 120 121 void DbgVariableIntrinsic::addVariableLocationOps(ArrayRef<Value *> NewValues, 122 DIExpression *NewExpr) { 123 assert(NewExpr->hasAllLocationOps(getNumVariableLocationOps() + 124 NewValues.size()) && 125 "NewExpr for debug variable intrinsic does not reference every " 126 "location operand."); 127 assert(!is_contained(NewValues, nullptr) && "New values must be non-null"); 128 setArgOperand(2, MetadataAsValue::get(getContext(), NewExpr)); 129 SmallVector<ValueAsMetadata *, 4> MDs; 130 for (auto *VMD : location_ops()) 131 MDs.push_back(getAsMetadata(VMD)); 132 for (auto *VMD : NewValues) 133 MDs.push_back(getAsMetadata(VMD)); 134 setArgOperand( 135 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs))); 136 } 137 138 Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const { 139 if (auto Fragment = getExpression()->getFragmentInfo()) 140 return Fragment->SizeInBits; 141 return getVariable()->getSizeInBits(); 142 } 143 144 int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable, 145 StringRef Name) { 146 assert(Name.startswith("llvm.")); 147 148 // Do successive binary searches of the dotted name components. For 149 // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of 150 // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then 151 // "llvm.gc.experimental.statepoint", and then we will stop as the range is 152 // size 1. During the search, we can skip the prefix that we already know is 153 // identical. By using strncmp we consider names with differing suffixes to 154 // be part of the equal range. 155 size_t CmpEnd = 4; // Skip the "llvm" component. 156 const char *const *Low = NameTable.begin(); 157 const char *const *High = NameTable.end(); 158 const char *const *LastLow = Low; 159 while (CmpEnd < Name.size() && High - Low > 0) { 160 size_t CmpStart = CmpEnd; 161 CmpEnd = Name.find('.', CmpStart + 1); 162 CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd; 163 auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) { 164 return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0; 165 }; 166 LastLow = Low; 167 std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp); 168 } 169 if (High - Low > 0) 170 LastLow = Low; 171 172 if (LastLow == NameTable.end()) 173 return -1; 174 StringRef NameFound = *LastLow; 175 if (Name == NameFound || 176 (Name.startswith(NameFound) && Name[NameFound.size()] == '.')) 177 return LastLow - NameTable.begin(); 178 return -1; 179 } 180 181 Value *InstrProfIncrementInst::getStep() const { 182 if (InstrProfIncrementInstStep::classof(this)) { 183 return const_cast<Value *>(getArgOperand(4)); 184 } 185 const Module *M = getModule(); 186 LLVMContext &Context = M->getContext(); 187 return ConstantInt::get(Type::getInt64Ty(Context), 1); 188 } 189 190 Optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const { 191 unsigned NumOperands = getNumArgOperands(); 192 Metadata *MD = nullptr; 193 auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2)); 194 if (MAV) 195 MD = MAV->getMetadata(); 196 if (!MD || !isa<MDString>(MD)) 197 return None; 198 return StrToRoundingMode(cast<MDString>(MD)->getString()); 199 } 200 201 Optional<fp::ExceptionBehavior> 202 ConstrainedFPIntrinsic::getExceptionBehavior() const { 203 unsigned NumOperands = getNumArgOperands(); 204 Metadata *MD = nullptr; 205 auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1)); 206 if (MAV) 207 MD = MAV->getMetadata(); 208 if (!MD || !isa<MDString>(MD)) 209 return None; 210 return StrToExceptionBehavior(cast<MDString>(MD)->getString()); 211 } 212 213 bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const { 214 Optional<fp::ExceptionBehavior> Except = getExceptionBehavior(); 215 if (Except) { 216 if (Except.getValue() != fp::ebIgnore) 217 return false; 218 } 219 220 Optional<RoundingMode> Rounding = getRoundingMode(); 221 if (Rounding) { 222 if (Rounding.getValue() != RoundingMode::NearestTiesToEven) 223 return false; 224 } 225 226 return true; 227 } 228 229 FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const { 230 Metadata *MD = cast<MetadataAsValue>(getArgOperand(2))->getMetadata(); 231 if (!MD || !isa<MDString>(MD)) 232 return FCmpInst::BAD_FCMP_PREDICATE; 233 return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString()) 234 .Case("oeq", FCmpInst::FCMP_OEQ) 235 .Case("ogt", FCmpInst::FCMP_OGT) 236 .Case("oge", FCmpInst::FCMP_OGE) 237 .Case("olt", FCmpInst::FCMP_OLT) 238 .Case("ole", FCmpInst::FCMP_OLE) 239 .Case("one", FCmpInst::FCMP_ONE) 240 .Case("ord", FCmpInst::FCMP_ORD) 241 .Case("uno", FCmpInst::FCMP_UNO) 242 .Case("ueq", FCmpInst::FCMP_UEQ) 243 .Case("ugt", FCmpInst::FCMP_UGT) 244 .Case("uge", FCmpInst::FCMP_UGE) 245 .Case("ult", FCmpInst::FCMP_ULT) 246 .Case("ule", FCmpInst::FCMP_ULE) 247 .Case("une", FCmpInst::FCMP_UNE) 248 .Default(FCmpInst::BAD_FCMP_PREDICATE); 249 } 250 251 bool ConstrainedFPIntrinsic::isUnaryOp() const { 252 switch (getIntrinsicID()) { 253 default: 254 return false; 255 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 256 case Intrinsic::INTRINSIC: \ 257 return NARG == 1; 258 #include "llvm/IR/ConstrainedOps.def" 259 } 260 } 261 262 bool ConstrainedFPIntrinsic::isTernaryOp() const { 263 switch (getIntrinsicID()) { 264 default: 265 return false; 266 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 267 case Intrinsic::INTRINSIC: \ 268 return NARG == 3; 269 #include "llvm/IR/ConstrainedOps.def" 270 } 271 } 272 273 bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) { 274 switch (I->getIntrinsicID()) { 275 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \ 276 case Intrinsic::INTRINSIC: 277 #include "llvm/IR/ConstrainedOps.def" 278 return true; 279 default: 280 return false; 281 } 282 } 283 284 ElementCount VPIntrinsic::getStaticVectorLength() const { 285 auto GetVectorLengthOfType = [](const Type *T) -> ElementCount { 286 const auto *VT = cast<VectorType>(T); 287 auto ElemCount = VT->getElementCount(); 288 return ElemCount; 289 }; 290 291 Value *VPMask = getMaskParam(); 292 assert(VPMask && "No mask param?"); 293 return GetVectorLengthOfType(VPMask->getType()); 294 } 295 296 Value *VPIntrinsic::getMaskParam() const { 297 if (auto MaskPos = getMaskParamPos(getIntrinsicID())) 298 return getArgOperand(MaskPos.getValue()); 299 return nullptr; 300 } 301 302 void VPIntrinsic::setMaskParam(Value *NewMask) { 303 auto MaskPos = getMaskParamPos(getIntrinsicID()); 304 setArgOperand(*MaskPos, NewMask); 305 } 306 307 Value *VPIntrinsic::getVectorLengthParam() const { 308 if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID())) 309 return getArgOperand(EVLPos.getValue()); 310 return nullptr; 311 } 312 313 void VPIntrinsic::setVectorLengthParam(Value *NewEVL) { 314 auto EVLPos = getVectorLengthParamPos(getIntrinsicID()); 315 setArgOperand(*EVLPos, NewEVL); 316 } 317 318 Optional<unsigned> VPIntrinsic::getMaskParamPos(Intrinsic::ID IntrinsicID) { 319 switch (IntrinsicID) { 320 default: 321 return None; 322 323 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \ 324 case Intrinsic::VPID: \ 325 return MASKPOS; 326 #include "llvm/IR/VPIntrinsics.def" 327 } 328 } 329 330 Optional<unsigned> 331 VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) { 332 switch (IntrinsicID) { 333 default: 334 return None; 335 336 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \ 337 case Intrinsic::VPID: \ 338 return VLENPOS; 339 #include "llvm/IR/VPIntrinsics.def" 340 } 341 } 342 343 /// \return the alignment of the pointer used by this load/store/gather or 344 /// scatter. 345 MaybeAlign VPIntrinsic::getPointerAlignment() const { 346 Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()); 347 assert(PtrParamOpt.hasValue() && "no pointer argument!"); 348 return getParamAlign(PtrParamOpt.getValue()); 349 } 350 351 /// \return The pointer operand of this load,store, gather or scatter. 352 Value *VPIntrinsic::getMemoryPointerParam() const { 353 if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID())) 354 return getArgOperand(PtrParamOpt.getValue()); 355 return nullptr; 356 } 357 358 Optional<unsigned> VPIntrinsic::getMemoryPointerParamPos(Intrinsic::ID VPID) { 359 switch (VPID) { 360 default: 361 return None; 362 363 #define HANDLE_VP_IS_MEMOP(VPID, POINTERPOS, DATAPOS) \ 364 case Intrinsic::VPID: \ 365 return POINTERPOS; 366 #include "llvm/IR/VPIntrinsics.def" 367 } 368 } 369 370 /// \return The data (payload) operand of this store or scatter. 371 Value *VPIntrinsic::getMemoryDataParam() const { 372 auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID()); 373 if (!DataParamOpt.hasValue()) 374 return nullptr; 375 return getArgOperand(DataParamOpt.getValue()); 376 } 377 378 Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) { 379 switch (VPID) { 380 default: 381 return None; 382 383 #define HANDLE_VP_IS_MEMOP(VPID, POINTERPOS, DATAPOS) \ 384 case Intrinsic::VPID: \ 385 return DATAPOS; 386 #include "llvm/IR/VPIntrinsics.def" 387 } 388 } 389 390 bool VPIntrinsic::isVPIntrinsic(Intrinsic::ID ID) { 391 switch (ID) { 392 default: 393 return false; 394 395 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \ 396 case Intrinsic::VPID: \ 397 break; 398 #include "llvm/IR/VPIntrinsics.def" 399 } 400 return true; 401 } 402 403 // Equivalent non-predicated opcode 404 Optional<unsigned> VPIntrinsic::getFunctionalOpcodeForVP(Intrinsic::ID ID) { 405 Optional<unsigned> FunctionalOC; 406 switch (ID) { 407 default: 408 break; 409 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 410 #define HANDLE_VP_TO_OPC(OPC) FunctionalOC = Instruction::OPC; 411 #define END_REGISTER_VP_INTRINSIC(...) break; 412 #include "llvm/IR/VPIntrinsics.def" 413 } 414 415 return FunctionalOC; 416 } 417 418 Intrinsic::ID VPIntrinsic::getForOpcode(unsigned IROPC) { 419 switch (IROPC) { 420 default: 421 return Intrinsic::not_intrinsic; 422 423 #define HANDLE_VP_TO_OPC(OPC) case Instruction::OPC: 424 #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID; 425 #include "llvm/IR/VPIntrinsics.def" 426 } 427 } 428 429 bool VPIntrinsic::canIgnoreVectorLengthParam() const { 430 using namespace PatternMatch; 431 432 ElementCount EC = getStaticVectorLength(); 433 434 // No vlen param - no lanes masked-off by it. 435 auto *VLParam = getVectorLengthParam(); 436 if (!VLParam) 437 return true; 438 439 // Note that the VP intrinsic causes undefined behavior if the Explicit Vector 440 // Length parameter is strictly greater-than the number of vector elements of 441 // the operation. This function returns true when this is detected statically 442 // in the IR. 443 444 // Check whether "W == vscale * EC.getKnownMinValue()" 445 if (EC.isScalable()) { 446 // Undig the DL 447 const auto *ParMod = this->getModule(); 448 if (!ParMod) 449 return false; 450 const auto &DL = ParMod->getDataLayout(); 451 452 // Compare vscale patterns 453 uint64_t VScaleFactor; 454 if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL)))) 455 return VScaleFactor >= EC.getKnownMinValue(); 456 return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL)); 457 } 458 459 // standard SIMD operation 460 const auto *VLConst = dyn_cast<ConstantInt>(VLParam); 461 if (!VLConst) 462 return false; 463 464 uint64_t VLNum = VLConst->getZExtValue(); 465 if (VLNum >= EC.getKnownMinValue()) 466 return true; 467 468 return false; 469 } 470 471 Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID, 472 ArrayRef<Value *> Params) { 473 assert(isVPIntrinsic(VPID) && "not a VP intrinsic"); 474 Function *VPFunc; 475 switch (VPID) { 476 default: 477 VPFunc = Intrinsic::getDeclaration(M, VPID, Params[0]->getType()); 478 break; 479 case Intrinsic::vp_load: 480 VPFunc = Intrinsic::getDeclaration( 481 M, VPID, 482 {Params[0]->getType()->getPointerElementType(), Params[0]->getType()}); 483 break; 484 case Intrinsic::vp_gather: 485 VPFunc = Intrinsic::getDeclaration( 486 M, VPID, 487 {VectorType::get(cast<VectorType>(Params[0]->getType()) 488 ->getElementType() 489 ->getPointerElementType(), 490 cast<VectorType>(Params[0]->getType())), 491 Params[0]->getType()}); 492 break; 493 case Intrinsic::vp_store: 494 VPFunc = Intrinsic::getDeclaration( 495 M, VPID, 496 {Params[1]->getType()->getPointerElementType(), Params[1]->getType()}); 497 break; 498 case Intrinsic::vp_scatter: 499 VPFunc = Intrinsic::getDeclaration( 500 M, VPID, {Params[0]->getType(), Params[1]->getType()}); 501 break; 502 } 503 assert(VPFunc && "Could not declare VP intrinsic"); 504 return VPFunc; 505 } 506 507 Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const { 508 switch (getIntrinsicID()) { 509 case Intrinsic::uadd_with_overflow: 510 case Intrinsic::sadd_with_overflow: 511 case Intrinsic::uadd_sat: 512 case Intrinsic::sadd_sat: 513 return Instruction::Add; 514 case Intrinsic::usub_with_overflow: 515 case Intrinsic::ssub_with_overflow: 516 case Intrinsic::usub_sat: 517 case Intrinsic::ssub_sat: 518 return Instruction::Sub; 519 case Intrinsic::umul_with_overflow: 520 case Intrinsic::smul_with_overflow: 521 return Instruction::Mul; 522 default: 523 llvm_unreachable("Invalid intrinsic"); 524 } 525 } 526 527 bool BinaryOpIntrinsic::isSigned() const { 528 switch (getIntrinsicID()) { 529 case Intrinsic::sadd_with_overflow: 530 case Intrinsic::ssub_with_overflow: 531 case Intrinsic::smul_with_overflow: 532 case Intrinsic::sadd_sat: 533 case Intrinsic::ssub_sat: 534 return true; 535 default: 536 return false; 537 } 538 } 539 540 unsigned BinaryOpIntrinsic::getNoWrapKind() const { 541 if (isSigned()) 542 return OverflowingBinaryOperator::NoSignedWrap; 543 else 544 return OverflowingBinaryOperator::NoUnsignedWrap; 545 } 546 547 const GCStatepointInst *GCProjectionInst::getStatepoint() const { 548 const Value *Token = getArgOperand(0); 549 550 // This takes care both of relocates for call statepoints and relocates 551 // on normal path of invoke statepoint. 552 if (!isa<LandingPadInst>(Token)) 553 return cast<GCStatepointInst>(Token); 554 555 // This relocate is on exceptional path of an invoke statepoint 556 const BasicBlock *InvokeBB = 557 cast<Instruction>(Token)->getParent()->getUniquePredecessor(); 558 559 assert(InvokeBB && "safepoints should have unique landingpads"); 560 assert(InvokeBB->getTerminator() && 561 "safepoint block should be well formed"); 562 563 return cast<GCStatepointInst>(InvokeBB->getTerminator()); 564 } 565 566 Value *GCRelocateInst::getBasePtr() const { 567 if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live)) 568 return *(Opt->Inputs.begin() + getBasePtrIndex()); 569 return *(getStatepoint()->arg_begin() + getBasePtrIndex()); 570 } 571 572 Value *GCRelocateInst::getDerivedPtr() const { 573 if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live)) 574 return *(Opt->Inputs.begin() + getDerivedPtrIndex()); 575 return *(getStatepoint()->arg_begin() + getDerivedPtrIndex()); 576 } 577