Newer
Older
#if SMP_DEBUG_CONTROLFLOW
SMP_msg("SMPFunction::Analyze: set stack frame info.\n");
#endif
if (!(this->HasSharedChunks())) {
clc5q
committed
this->SetStackFrameInfo();
} // end if not shared chunks
else { // has shared chunks; still want to compute stack frame info
#ifdef SMP_DEBUG_FUNC
SMP_msg(" %s has shared chunks \n", this->GetFuncName());
#endif
// Figure out the stack frame and related info.
this->SetStackFrameInfo();
}
#if SMP_COUNT_MEMORY_ALLOCATIONS
SMPInstCount += ((unsigned long) this->Instrs.size());
SMPBlockCount += ((unsigned long) this->Blocks.size());
SMPLocalVarCount += ((unsigned long) this->LocalVarTable.size());
#endif
} // end of SMPFunction::AdvancedAnalysis()
// Count call targets that have not been processed.
size_t SMPFunction::UnprocessedCalleesCount(void) {
size_t UnprocessedTargetsCount = 0;
size_t TargetIndex;
for (TargetIndex = 0; TargetIndex < this->AllCallTargets.size(); ++TargetIndex) {
SMPFunction *CurrTarget = this->GetProg()->FindFunction(this->AllCallTargets.at(TargetIndex));
if (NULL == CurrTarget) {
#if 0
// Bad call targets are removed in AdvancedAnalysis(), which comes later.
SMP_msg("ERROR: NULL CallTarget in UnprocessedCalleesCount() at TargetIndex %zu \n", TargetIndex);
#endif
}
else if (!(CurrTarget->IsFuncProcessed())) {
++UnprocessedTargetsCount;
}
}
return UnprocessedTargetsCount;
} // end of SMPFunction::UnprocessedCalleesCount()
ea_t SMPFunction::GetFirstUnprocessedCallee(void) {
ea_t CalleeAddr = BADADDR;
size_t TargetIndex;
for (TargetIndex = 0; TargetIndex < this->AllCallTargets.size(); ++TargetIndex) {
ea_t TargetAddr = this->AllCallTargets.at(TargetIndex);
SMPFunction *CurrTarget = this->GetProg()->FindFunction(TargetAddr);
if ((NULL != CurrTarget) && (!(CurrTarget->IsFuncProcessed()))) {
CalleeAddr = TargetAddr;
break;
}
}
return CalleeAddr;
} // end of SMPFunction::GetFirstUnprocessedCallee()
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
// Is the code starting at TargetAddr a non-shared chunk that jumps back into our function?
// If so, it can be incorporated into our function rather than treated as a separate function.
// This method is called only when we see a jump outside our function, and it is looking for
// code fragments that are not really functions (i.e. don't have a stack frame, jump straight back
// into our function after executing a few instructions, not a chunk shared among other functions).
// These code fragments are found in the locking and unlocking code of the gcc stdlib, for example.
bool SMPFunction::FindDistantCodeFragment(ea_t TargetAddr) {
bool PrivateFragment = false;
func_t *TargetFunc = get_func(TargetAddr);
if (TargetFunc) {
// Determine if we are dealing with shared chunks.
size_t ChunkCounter = 0;
func_tail_iterator_t FuncTail(TargetFunc);
for (bool ChunkOK = FuncTail.main(); ChunkOK; ChunkOK = FuncTail.next()) {
++ChunkCounter;
}
if (1 < ChunkCounter) {
SMP_msg("INFO: Code fragment at %x is shared chunk.\n", TargetAddr);
}
else {
bool JumpsBackIntoCurrentFunc = false;
bool HasReturnInstruction = false;
bool AllocatesStackFrame = false;
for (bool ChunkOK = FuncTail.main(); ChunkOK; ChunkOK = FuncTail.next()) {
const area_t &CurrChunk = FuncTail.chunk();
++ChunkCounter;
// Build the instruction and block lists for the function.
for (ea_t addr = CurrChunk.startEA; addr < CurrChunk.endEA; addr = get_item_end(addr)) {
flags_t InstrFlags = getFlags(addr);
if (isHead(InstrFlags) && isCode(InstrFlags)) {
SMPInstr *CurrInst = new SMPInstr(addr);
// Fill in the instruction data members.
CurrInst->Analyze();
// Search for two negative indicators (stack allocations and returns)
// and one positive indicator (jump back into this function).
if (CurrInst->MDIsReturnInstr()) {
HasReturnInstruction = true;
break;
}
else if (CurrInst->MDIsFrameAllocInstr()) {
AllocatesStackFrame = true;
break;
}
else {
SMPitype FlowType = CurrInst->GetDataFlowType();
if ((JUMP == FlowType) || (INDIR_JUMP == FlowType)) {
if (CurrInst->BuildRTL()) {
ea_t FragmentJumpTarget = CurrInst->GetJumpTarget();
if ((FragmentJumpTarget >= this->FirstEA) && (FragmentJumpTarget <= this->FuncInfo.endEA)) {
JumpsBackIntoCurrentFunc = true;
}
}
}
}
} // end if isHead() and isCode()
} // end for all addrs in chunk
} // end for all chunks (there will only be one)
PrivateFragment = (JumpsBackIntoCurrentFunc && (!HasReturnInstruction) && (!AllocatesStackFrame));
} // end if (1 < ChunkCounter) ... else ...
} // end if (TargetFunc)
return PrivateFragment;
} // end of SMPFunction::FindDistantCodeFragment()
// Free memory that is no longer needed after loop 2 of SMPProgram::Analyze().
void SMPFunction::FreeUnusedMemory2(void) {
size_t UnusedElements;
size_t CurrSize;
// Go through vector containers and resize to current capacity, if the vector
// has been fully computed by the time SMPProgram:Analyze() loop 2 completes.
CurrSize = this->DirectCallTargets.size();
UnusedElements = this->DirectCallTargets.capacity() - CurrSize;
if (0 < UnusedElements) {
UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<ea_t>(this->DirectCallTargets).swap(this->DirectCallTargets);
#else
this->DirectCallTargets.resize(CurrSize);
}
CurrSize = this->IndirectCallTargets.size();
UnusedElements = this->IndirectCallTargets.capacity() - CurrSize;
if (0 < UnusedElements) {
UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<ea_t>(this->IndirectCallTargets).swap(this->IndirectCallTargets);
#else
this->IndirectCallTargets.resize(CurrSize);
CurrSize = this->AllCallTargets.size();
UnusedElements = this->AllCallTargets.capacity() - CurrSize;
if (0 < UnusedElements) {
UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<ea_t>(this->AllCallTargets).swap(this->AllCallTargets);
#else
this->AllCallTargets.resize(CurrSize);
}
CurrSize = this->SavedRegLoc.size();
UnusedElements = this->SavedRegLoc.capacity() - CurrSize;
if (0 < UnusedElements) {
UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<int>(this->SavedRegLoc).swap(this->SavedRegLoc);
#else
this->SavedRegLoc.resize(CurrSize);
}
CurrSize = this->RPOBlocks.size();
UnusedElements = this->RPOBlocks.capacity() - CurrSize;
if (0 < UnusedElements) {
list<SMPBasicBlock *>::iterator DummyIter = this->Blocks.end();
UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<SMPBasicBlock *>(this->RPOBlocks).swap(this->RPOBlocks);
this->RPOBlocks.resize(CurrSize, DummyIter);
}
CurrSize = this->LocalVarTable.size();
UnusedElements = this->LocalVarTable.capacity() - CurrSize;
if (0 < UnusedElements) {
struct LocalVar DummyVar;
DummyVar.offset = 0;
DummyVar.size = 0;
UnusedStructCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<struct LocalVar>(this->LocalVarTable).swap(this->LocalVarTable);
#else
this->LocalVarTable.resize(CurrSize, DummyVar);
}
CurrSize = this->StackFrameMap.size();
UnusedElements = this->StackFrameMap.capacity() - CurrSize;
if (0 < UnusedElements) {
struct StackFrameEntry DummyEntry;
DummyEntry.offset = 0;
DummyEntry.VarPtr = NULL;
UnusedStructCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<struct StackFrameEntry>(this->StackFrameMap).swap(this->StackFrameMap);
#else
this->StackFrameMap.resize(CurrSize, DummyEntry);
}
CurrSize = this->FineGrainedStackTable.size();
UnusedElements = this->FineGrainedStackTable.capacity() - CurrSize;
if (0 < UnusedElements) {
struct FineGrainedInfo DummyFG;
DummyFG.SignMiscInfo = 0;
DummyFG.SizeInfo = 0;
UnusedStructCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<struct FineGrainedInfo>(this->FineGrainedStackTable).swap(this->FineGrainedStackTable);
#else
this->FineGrainedStackTable.resize(CurrSize, DummyFG);
}
return;
} // end of SMPFunction::FreeUnusedMemory2()
// Free memory that is no longer needed after loop 3 of SMPProgram::Analyze().
void SMPFunction::FreeUnusedMemory3(void) {
size_t UnusedElements;
size_t CurrSize;
// Go through vector containers and resize to current capacity, if the vector
// has been fully computed by the time SMPProgram:Analyze() loop 2 completes.
CurrSize = this->ReturnRegTypes.size();
UnusedElements = this->ReturnRegTypes.capacity() - CurrSize;
if (0 < UnusedElements) {
UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
std::vector<SMPOperandType>(this->ReturnRegTypes).swap(this->ReturnRegTypes);
#else
this->ReturnRegTypes.resize(CurrSize);
}
return;
} // end of SMPFunction::FreeUnusedMemory3()
// Free memory that is no longer needed after type inference (loop 4 of SMPProgram::Analyze()).
void SMPFunction::FreeUnusedMemory4(void) {
this->KillSet.clear();
this->LiveOutSet.clear();
this->LiveInSet.clear();
this->StackFrameMap.clear();
this->BlocksDefinedIn.clear();
#if SMP_SHRINK_TO_FIT
std::set<op_t, LessOp>(this->KillSet).swap(this->KillSet);
std::set<op_t, LessOp>(this->LiveOutSet).swap(this->LiveOutSet);
std::set<op_t, LessOp>(this->LiveInSet).swap(this->LiveInSet);
#endif
list<SMPBasicBlock *>::iterator BlockIter;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
(*BlockIter)->FreeUnusedMemory4();
}
return;
} // end of SMPFunction::FreeUnusedMemory4()
// Free SSA data structures that are no longer needed when all SSA numbers have
// been recorded in DEFs and USEs.
void SMPFunction::FreeSSAMemory(void) {
this->IDom.clear();
this->DomTree.clear();
this->BlocksDefinedIn.clear();
this->SSACounter.clear();
this->SSAStack.clear();
#if SMP_SHRINK_TO_FIT
vector<int>(this->IDom).swap(this->IDom);
vector<pair<int, list<int> > >(this->DomTree).swap(this->DomTree);
vector<list<int> >(this->BlocksDefinedIn).swap(this->BlocksDefinedIn);
vector<int>(this->SSACounter).swap(this->SSACounter);
vector<list<int> >(this->SSAStack).swap(this->SSAStack);
#endif
list<SMPBasicBlock *>::iterator BlockIter;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
(*BlockIter)->FreeSSAMemory();
}
return;
} // end of SMPFunction::FreeSSAMemory()
// For each instruction, mark the non-flags-reg DEFs as having live
// metadata (mmStrata needs to fetch and track this metadata for this
// instruction) or dead metadata (won't be used as addressing reg, won't
// be stored to memory, won't be returned to caller).
void SMPFunction::AnalyzeMetadataLiveness(void) {
bool changed;
int BaseReg;
int IndexReg;
ushort ScaleFactor;
ea_t offset;
op_t BaseOp = InitOp, IndexOp = InitOp, ReturnOp = InitOp;
BaseOp.type = o_reg;
IndexOp.type = o_reg;
ReturnOp.type = o_reg;
list<SMPInstr *>::iterator InstIter;
list<SMPInstr *>::reverse_iterator RevInstIter;
set<DefOrUse, LessDefUse>::iterator CurrDef;
set<DefOrUse, LessDefUse>::iterator CurrUse;
set<DefOrUse, LessDefUse>::iterator NextUse;
bool UseFP = this->UsesFramePointer();
int IterationCount = 0;
#if SMP_DEBUG_DATAFLOW
if (0 == strcmp("uw_frame_state_for", this->GetFuncName())) {
#endif
++IterationCount;
bool SafeMemDest;
if (DebugFlag) {
clc5q
committed
SMP_msg("AnalyzeMetadataLiveness iteration count: %d \n", IterationCount);
for (RevInstIter = this->Instrs.rbegin(); RevInstIter != this->Instrs.rend(); ++RevInstIter) {
SMPInstr *CurrInst = (*RevInstIter);
ea_t InstAddr = CurrInst->GetAddr();
SafeMemDest = false; // true for some SafeFunc instructions
// Skip the SSA marker instruction.
if (NN_fnop == CurrInst->GetCmd().itype)
continue;
if (DebugFlag) {
clc5q
committed
SMP_msg("Inst addr: %x \n", CurrInst->GetAddr());
CurrDef = CurrInst->GetFirstDef();
while (CurrDef != CurrInst->GetLastDef()) {
if (DEF_METADATA_UNANALYZED == CurrDef->GetMetadataStatus()) {
op_t DefOp = CurrDef->GetOp();
// Handle special registers never used as address regs.
if (DefOp.is_reg(X86_FLAGS_REG)
|| ((o_trreg <= DefOp.type) && (o_xmmreg >= DefOp.type))) {
CurrDef = CurrInst->SetDefMetadata(DefOp,
DEF_METADATA_UNUSED);
changed = true;
}
else if (MDIsStackOrFramePointerReg(DefOp, UseFP)) {
// Stack pointer register DEFs always have live
// metadata, but we don't need to propagate back
// through particular DEF-USE chains.
CurrDef = CurrInst->SetDefMetadata(DefOp, DEF_METADATA_USED);
changed = true;
}
else if ((o_mem <= DefOp.type) && (o_displ >= DefOp.type)) {
// DEF is a memory operand. The addressing registers
// therefore have live metadata, and the memory metadata is live.
// EXCEPTION: If the function is Safe, then direct stack writes
// to local variables (above the outgoing args area of the frame)
// are not live metadata, and there will be no indirect local frame
// writes, by definition of "safe." So, for safe funcs, only
// the o_mem (globals) and indirect writes are live metadata.
if (this->SafeFunc && MDIsStackAccessOpnd(DefOp, this->UseFP)
&& (!this->WritesAboveLocalFrame(DefOp, CurrInst->AreDefsNormalized()))
&& (!this->IsInOutgoingArgsRegion(DefOp))) {
++CurrDef;
SafeMemDest = true;
continue;
}
CurrDef = CurrInst->SetDefMetadata(DefOp, DEF_METADATA_USED);
changed = true;
MDExtractAddressFields(DefOp, BaseReg, IndexReg,
ScaleFactor, offset);
if (R_none != BaseReg) {
BaseOp.reg = MDCanonicalizeSubReg((ushort) BaseReg);
BaseOp.dtyp = dt_dword; // canonical 32-bit width
if (MDIsStackOrFramePointerReg(BaseOp, UseFP)) {
; // do nothing; DEF handled by case above
}
else {
CurrUse = CurrInst->FindUse(BaseOp);
if (CurrUse == CurrInst->GetLastUse()) {
SMP_msg("FATAL ERROR: BaseReg %d not in USE list at %x for %s\n",
BaseOp.reg, CurrInst->GetAddr(),
CurrInst->GetDisasm());
}
assert(CurrUse != CurrInst->GetLastUse());
if (this->IsGlobalName(BaseOp)) {
changed |= this->PropagateGlobalMetadata(BaseOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
else {
changed |= CurrInst->GetBlock()->PropagateLocalMetadata(BaseOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
}
} // end if R_none != BaseReg
if (R_none != IndexReg) {
IndexOp.reg = MDCanonicalizeSubReg((ushort) IndexReg);
IndexOp.dtyp = dt_dword; // canonical 32-bit width
if (MDIsStackOrFramePointerReg(IndexOp, UseFP)) {
; // do nothing; DEF handled by case above
}
else {
CurrUse = CurrInst->FindUse(IndexOp);
if (CurrUse == CurrInst->GetLastUse()) {
SMP_msg("FATAL ERROR: IndexReg %d not in USE list at %x for %s\n",
IndexOp.reg, CurrInst->GetAddr(),
CurrInst->GetDisasm());
}
assert(CurrUse != CurrInst->GetLastUse());
if (0 != ScaleFactor) {
; // mmStrata knows scaled reg is NUMERIC
// ... its metadata is not fetched
}
else if (this->IsGlobalName(IndexOp)) {
changed |= this->PropagateGlobalMetadata(IndexOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
else {
changed |= CurrInst->GetBlock()->PropagateLocalMetadata(IndexOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
}
} // end if R_none != IndexReg
} // end if X86_FLAGS_REG .. else if stack ptr ...
} // end if unanalyzed metadata usage
++CurrDef;
} // end while processing DEFs
if ((RETURN == CurrInst->GetDataFlowType())
|| (CALL == CurrInst->GetDataFlowType())
|| (INDIR_CALL == CurrInst->GetDataFlowType())) {
// The EAX and EDX registers can be returned to the caller,
// which might use their metadata. They show up as USEs
// of the return instruction. Some library functions
// pass return values in non-standard ways. e.g. through
// EBX or EDI, so we treat all return regs the same.
// For CALL instructions, values can be passed in caller-saved
// registers, unfortunately, so the metadata is live-in.
CurrUse = CurrInst->GetFirstUse();
while (CurrUse != CurrInst->GetLastUse()) {
NextUse = CurrUse;
++NextUse;
ReturnOp = CurrUse->GetOp();
if (DebugFlag) {
clc5q
committed
SMP_msg("ReturnOp: ");
PrintOperand(ReturnOp);
clc5q
committed
SMP_msg("\n");
if ((o_reg == ReturnOp.type) &&
(!MDIsStackOrFramePointerReg(ReturnOp, UseFP)) &&
(!ReturnOp.is_reg(X86_FLAGS_REG))) {
if (this->IsGlobalName(ReturnOp)) {
changed |= this->PropagateGlobalMetadata(ReturnOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
else {
changed |= CurrInst->GetBlock()->PropagateLocalMetadata(ReturnOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
}
CurrUse = NextUse;
} // end while all USEs
} // end if return or call
else if (CurrInst->HasDestMemoryOperand()
// Memory writes cause a lot of metadata usage.
// Addressing registers in the memory destination
// have live metadata used in bounds checking. The
// register being stored to memory could end up being
// used in some other bounds checking, unless we
// have precise memory tracking and know that it
// won't.
// We handled the addressing registers above, so we
// handle the register written to memory here.
// The same exception applies as above: If the destination
// memory operand is not a stack write, then safe functions
// do not need to track the metadata.
// If we push a register and have callees, the metadata could
// be live, if the callee gets its incoming args from our push
// instructions.
if (SafeMemDest && !(CurrInst->MDIsPushInstr() && !this->IsLeaf())) {
continue; // go to next instruction
}
CurrUse = CurrInst->GetFirstUse();
while (CurrUse != CurrInst->GetLastUse()) {
NextUse = CurrUse;
++NextUse;
op_t UseOp = CurrUse->GetOp();
// NOTE: **!!** To be less conservative, we
// should propagate less for exchange category
// instructions.
if ((UseOp.type == o_reg) && (!MDIsStackOrFramePointerReg(UseOp, UseFP)) && (!UseOp.is_reg(X86_FLAGS_REG))) {
if (this->IsGlobalName(UseOp)) {
changed |= this->PropagateGlobalMetadata(UseOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
else {
changed |= CurrInst->GetBlock()->PropagateLocalMetadata(UseOp,
DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
}
} // end if register
CurrUse = NextUse;
} // end while all USEs
} // end if call or return else if memdest ...
} // end for all instructions
} while (changed);
// All DEFs that still have status DEF_METADATA_UNANALYZED can now
// be marked as DEF_METADATA_UNUSED.
for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
if (NN_fnop == CurrInst->GetCmd().itype)
continue;
CurrDef = CurrInst->GetFirstDef();
while (CurrDef != CurrInst->GetLastDef()) {
if (DEF_METADATA_UNANALYZED == CurrDef->GetMetadataStatus()) {
CurrDef = CurrInst->SetDefMetadata(CurrDef->GetOp(),
DEF_METADATA_UNUSED);
assert(CurrDef != CurrInst->GetLastDef());
}
++CurrDef;
}
}
return;
} // end of SMPFunction::AnalyzeMetadataLiveness()
// Propagate the metadata Status for UseOp/SSANum to its global DEF.
// Return true if successful.
bool SMPFunction::PropagateGlobalMetadata(op_t UseOp, SMPMetadataType Status, int SSANum, ea_t UseAddr) {
bool UseFP = this->UsesFramePointer();
if ((0 > SSANum) || (o_void == UseOp.type) || (!MDIsDataFlowOpnd(UseOp, UseFP)) || MDIsIndirectMemoryOpnd(UseOp, UseFP))
return false;
// Find the DEF of UseOp with SSANum.
ea_t DefAddr;
SMPBasicBlock *UseBlock;
SMPBasicBlock *DefBlock;
if (UseAddr > this->GetNumBlocks()) { // UseAddr is an inst addr
UseBlock = this->GetBlockFromInstAddr(UseAddr);
}
else { // UseAddr is a block number
UseBlock = this->GetBlockByNum((size_t) UseAddr);
}
DefAddr = UseBlock->GetUltimateDefAddr(UseOp, UseAddr, SSANum, false);
if (BADADDR == DefAddr) {
return changed;
}
if (DefAddr > this->GetNumBlocks()) { // found a DEF inst.
SMPInstr *CurrInst = this->GetInstFromAddr(DefAddr);
ea_t InstAddr = DefAddr;
DefBlock = this->GetBlockFromInstAddr(DefAddr);
set<DefOrUse, LessDefUse>::iterator CurrDef;
set<DefOrUse, LessDefUse>::iterator CurrUse;
CurrDef = CurrInst->FindDef(UseOp);
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
assert(CurrDef != CurrInst->GetLastDef());
assert(SSANum == CurrDef->GetSSANum());
if (Status != CurrDef->GetMetadataStatus()) {
CurrDef = CurrInst->SetDefMetadata(UseOp, Status);
changed = (CurrDef != CurrInst->GetLastDef());
bool PropThroughUses = changed;
// If source operand was memory, we have two cases.
// (1) The instruction could be a load, in which
// case we should simply terminate the
// propagation, because the prior DEF of a memory
// location is always considered live metadata
// already, and we do not want to propagate liveness
// to the address regs in the USE list.
// EXCEPTION: For safe funcs, we propagate liveness
// for stack locations.
// (2) We could have an arithmetic operation such
// as reg := reg arithop memsrc. In this case, we
// still do not want to propagate through the memsrc,
// (with the same safe func EXCEPTION),
// but the register is both DEF and USE and we need
// to propagate through the register.
if (CurrInst->HasSourceMemoryOperand()) {
if (this->SafeFunc) {
op_t MemSrcOp = CurrInst->MDGetMemUseOp();
assert(o_void != MemSrcOp.type);
if (MDIsDirectStackAccessOpnd(MemSrcOp, this->UseFP)) {
// We have a SafeFunc stack access. This is
// the EXCEPTION case where we want to
// propagate metadata liveness for a memory
// location.
CurrUse = CurrInst->FindUse(MemSrcOp);
assert(CurrUse != CurrInst->GetLastUse());
if (this->IsGlobalName(MemSrcOp)) {
changed |= this->PropagateGlobalMetadata(MemSrcOp,
Status, CurrUse->GetSSANum(), InstAddr);
else {
changed |= CurrInst->GetBlock()->PropagateLocalMetadata(MemSrcOp,
Status, CurrUse->GetSSANum(), InstAddr);
} // end if stack access operand
} // end if SafeFunc
if (3 == CurrInst->GetOptType()) { // move inst
PropThroughUses = false; // load address regs are not live metadata
}
else if ((5 == CurrInst->GetOptType())
|| (NN_and == CurrInst->GetCmd().itype)
|| (NN_or == CurrInst->GetCmd().itype)
|| (NN_xor == CurrInst->GetCmd().itype)) {
// add, subtract, and, or with memsrc
// Find the DEF reg in the USE list.
CurrUse = CurrInst->FindUse(UseOp);
assert(CurrUse != CurrInst->GetLastUse());
changed |= this->PropagateGlobalMetadata(UseOp,
Status, CurrUse->GetSSANum(), InstAddr);
PropThroughUses = false;
}
} // end if memory source
// Now, propagate the metadata status to all the
// non-memory, non-flags-reg, non-special-reg
// (i.e. regular registers) USEs.
if (PropThroughUses) {
CurrUse = CurrInst->GetFirstUse();
while (CurrUse != CurrInst->GetLastUse()) {
op_t CurrUseOp = CurrUse->GetOp();
// NOTE: **!!** To be less conservative, we
// should propagate less for exchange category
// instructions.
if ((CurrUseOp.type == o_reg) && (!MDIsStackOrFramePointerReg(CurrUseOp, UseFP))
&& (!CurrUseOp.is_reg(X86_FLAGS_REG))) {
if (this->IsGlobalName(CurrUseOp)) {
changed |= this->PropagateGlobalMetadata(CurrUseOp,
Status, CurrUse->GetSSANum(), InstAddr);
else {
changed |= CurrInst->GetBlock()->PropagateLocalMetadata(CurrUseOp,
Status, CurrUse->GetSSANum(), InstAddr);
}
}
++CurrUse;
} // end while all USEs
else { // Found a DEF block number in DefAddr.
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
DefBlock = this->GetBlockByNum((size_t) DefAddr);
set<SMPPhiFunction, LessPhi>::iterator DefPhi;
DefPhi = DefBlock->FindPhi(UseOp);
assert(DefPhi != DefBlock->GetLastPhi());
assert(SSANum == DefPhi->GetDefSSANum());
if (Status != DefPhi->GetDefMetadata()) {
DefPhi = DefBlock->SetPhiDefMetadata(UseOp, Status);
changed = true;
// If the Phi DEF has live metadata, then the Phi
// USEs each have live metadata. Propagate.
int UseSSANum;
for (size_t index = 0; index < DefPhi->GetPhiListSize(); ++index) {
UseSSANum = DefPhi->GetUseSSANum(index);
// UseSSANum can be -1 in some cases because
// we conservatively make EAX and EDX be USEs
// of all return instructions, when the function
// might have a void return type, making it
// appear as if an uninitialized EAX or EDX
// could make it to the return block.
if (0 <= UseSSANum) {
changed |= this->PropagateGlobalMetadata(UseOp, Status, UseSSANum, DefAddr);
} // end if (DefAddr is inst addr) else ... [DefAddr is block number]
return changed;
} // end of SMPFunction::PropagateGlobalMetadata()
// Find consecutive DEFs of the same type and mark the second one redundant.
void SMPFunction::FindRedundantMetadata(void) {
list<SMPBasicBlock *>::iterator BlockIter;
SMPBasicBlock *CurrBlock;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
CurrBlock = (*BlockIter);
changed |= CurrBlock->FindRedundantLocalMetadata(this->SafeFunc);
}
return;
} // end of SMPFunction::FindRedundantMetadata()
// Perform SCCP to find constant values for DEFs, store in this->ConstantDefs
void SMPFunction::SparseConditionalConstantPropagation(void) {
// We perform the SCCP (Sparse Conditional Constant Propagation) algorithm
// as found in Cooper & Torczon, "Engineering a Compiler."
return;
} // end of SMPFunction::SparseConditionalConstantPropagation()
clc5q
committed
// Do we not care if DEF underflowed, due to how it is used?
bool SMPFunction::IsBenignUnderflowDEF(op_t DefOp, int DefSSANum, size_t DefAddr, int &IdiomCode) {
clc5q
committed
bool benign = false;
list<SMPInstr *>::iterator InstIter;
clc5q
committed
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
set<DefOrUse, LessDefUse>::iterator DefIter, UseIter;
int UseSSANum;
SMPOperandType DefType;
// We are looking to suppress overflow and underflow warnings on the following
// code sequence: PTR1-PTR2+1 gets a loop invariant code motion optimization
// that pulls temp := 1-PTR2 out of the loop, and leaves temp2 := PTR1+temp
// inside the loop. The hoisted subtraction could underflow, and the addition
// that is not hoisted could overflow. The net effect of these two instructions
// is benign, however, so we want to suppress underflow and overflow checks on
// both of them, but only if we can match the pair of instructions.
// We know that DefOp/DefAddr/DefSSANum refer to a subtraction instruction that
// produces a NEGATEDPTR result. We only need to find the paired addition instruction
// that USEs the same SSA name to produce a PTROFFSET result to prove that we have
// a case of benign underflow and overflow. If we find such a pair, we will mark
// both of their DEF results as benign overflows to suppress overflow checks.
// PAINFUL: Linear search of instructions. Need to address this in the future.
// Perhaps we should have a map of UseHashValue to InstAddr, but that is more
// memory consumption. Sure would be useful, though.
for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
clc5q
committed
UseIter = CurrInst->FindUse(DefOp);
UseSSANum = UseIter->GetSSANum();
if (UseSSANum == DefSSANum) {
// Only remaining question: Do we produce a PTROFFSET in CurrInst? (If we do,
// that implies we had an addition, so we don't need to check that.)
DefIter = CurrInst->GetFirstNonFlagsDef();
DefType = DefIter->GetType();
// NOTE: Make this more general. What if we just move the NEGATEDPTR into a register
// and then the next instruction, with different SSA name, produces the PTROFFSET?
// !!!!!*****!!!!!
if (IsEqType(DefType, PTROFFSET)) {
// Found a pair. Mark both DEFs as benign and return true.
benign = true;
IdiomCode = 4;
clc5q
committed
// Note that we have two possibilities for the addition. The NEGATEDPTR could be
// both the DEF and a USE, e.g. add negptr,ptr1; or the NEGATEDPTR could be
// just a USE, e.g. add reg,negptr, so that reg is overwritten and becomes a
// PTROFFSET. It really does not matter. The point is that we want to ignore
// overflow on this addition, and also on the subtraction that produced the
// NEGATEDPTR, so we mark the DEF in each instruction as benignly overflowing.
op_t UseInstDefOp = DefIter->GetOp();
clc5q
committed
CurrInst->SetDefNoOverflow(UseInstDefOp, true);
SMPInstr *DefInst = this->GetInstFromAddr(DefAddr);
DefInst->SetDefNoOverflow(DefOp, true);
clc5q
committed
break;
}
}
}
return benign;
} // end of SMPFunction::IsBenignUnderflowDEF()
bool SMPFunction::HasIntErrorCallSink(op_t DefOp, int DefSSANum, size_t DefAddr, std::string &SinkString) {
bool FoundSink = false;
this->ResetProcessedBlocks(); // prepare for recursion through blocks
SinkString.clear();
SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(DefAddr);
assert(CurrBlock != NULL);
clc5q
committed
FoundSink = CurrBlock->IsCriticalSink(DefOp, DefSSANum, SinkString);
clc5q
committed
return FoundSink;
} // end of SMPFunction::HasIntErrorCallSink()
// Compute SSA form data structures across the function.
void SMPFunction::ComputeSSA(void) {
clc5q
committed
bool DumpFlag = false;
#if SMP_DEBUG_DATAFLOW
DumpFlag |= (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
DebugFlag |= (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
#if 1
if (DumpFlag)
this->Dump();
#endif
clc5q
committed
if (DebugFlag) SMP_msg("Computing IDoms.\n");
clc5q
committed
if (DebugFlag) SMP_msg("Computing Dom frontiers.\n");
this->ComputeDomFrontiers();
clc5q
committed
if (DebugFlag) SMP_msg("Computing global names.\n");
this->ComputeGlobalNames();
clc5q
committed
if (DebugFlag) SMP_msg("Computing blocks defined in.\n");
this->ComputeBlocksDefinedIn();
clc5q
committed
if (DebugFlag) SMP_msg("Inserting Phi functions.\n");
this->InsertPhiFunctions();
clc5q
committed
if (DebugFlag) SMP_msg("Building dominator tree.\n");
this->BuildDominatorTree();
clc5q
committed
if (DebugFlag) SMP_msg("Computing SSA renumbering.\n");
list<SMPBasicBlock *>::iterator BlockIter;
SMPBasicBlock *CurrBlock;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
CurrBlock = (*BlockIter);
if (CurrBlock->FindLoopHeadsAndTails()) {
++this->LoopCount;
}
if (DumpFlag) CurrBlock->Dump();
clc5q
committed
if (DebugFlag) SMP_msg("Computing local names.\n");
CurrBlock->SetLocalNames();
clc5q
committed
if (DebugFlag) SMP_msg("Computing local SSA renumbering.\n");
CurrBlock->SSALocalRenumber();
if (DumpFlag) CurrBlock->Dump();
#if STARS_BUILD_DEF_USE_CHAINS
clc5q
committed
if (DebugFlag) SMP_msg("Computing global chains.\n");
CurrBlock->CreateGlobalChains();
#if 1
clc5q
committed
if (DebugFlag) SMP_msg("Marking dead registers.\n");
CurrBlock->MarkDeadRegs();
#endif
}
#if SMP_DEBUG_DATAFLOW
if (DumpFlag)
this->Dump();
// Once SSA numbers have been set into all DEFs, USES, and DU-chains, then
// the SSA numbering data structures will no longer be used and can be
// de-allocated.
this->FreeSSAMemory();
return;
} // end of SMPFunction::ComputeSSA()
// Detect which blocks are in which loops and populate FuncLoopsByBlock data structure.
void SMPFunction::DetectLoops(void) {
list<SMPBasicBlock *>::iterator BlockIter;
size_t LoopNumber = 0;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
SMPBasicBlock *CurrBlock = (*BlockIter);
if (CurrBlock->IsLoopTailBlock()) {
// For each loop tail block, get its loop header block number (the target
// block for its back edge). Then traverse the CFG upwards, recording all
// of the basic block numbers that lie between the loop tail and loop head,
// along with the tail and head.
this->ResetProcessedBlocks();
int TailBlockNum = CurrBlock->GetNumber();
int HeadBlockNum = CurrBlock->GetLoopHeaderNumber();
assert((TailBlockNum != SMP_BLOCKNUM_UNINIT) && (HeadBlockNum != SMP_BLOCKNUM_UNINIT));
list<list<SMPBasicBlock *>::iterator> BlockWorkList;
BlockWorkList.push_back(BlockIter);
SMPBasicBlock *WorkBlock;
SMPBasicBlock *PredBlock;
list<SMPBasicBlock *>::iterator WorkIter;
list<SMPBasicBlock *>::iterator PredIter;
BlockWorkList.pop_front();
int WorkBlockNum = WorkBlock->GetNumber();
assert(WorkBlockNum != SMP_BLOCKNUM_UNINIT);
if (!(WorkBlock->IsProcessed())) {
this->FuncLoopsByBlock[(size_t) WorkBlockNum].SetBit(LoopNumber);
WorkBlock->SetProcessed(true);
// Add unprocessed predecessors to the work list until we reach the loop head.
if (WorkBlockNum != HeadBlockNum) {
for (PredIter = WorkBlock->GetFirstPred(); PredIter != WorkBlock->GetLastPred(); ++PredIter) {
PredBlock = (*PredIter);
bool AlreadyProcessed = PredBlock->IsProcessed();
if (!AlreadyProcessed) {
BlockWorkList.push_back(PredIter);
}
}
}
}
} while (!BlockWorkList.empty());
++LoopNumber;
}
}
return;
} // end of SMPFunction::DetectLoops()
// Find memory writes (DEFs) with possible aliases
void SMPFunction::AliasAnalysis(void) {
// First task: Mark which memory DEFs MIGHT be aliased because an
// indirect memory write occurs somewhere in the DEF-USE chain.
// Memory DEF-USE chains with no possible aliasing can be subjected
// to type inference and type-based optimizing annotations, e.g. a
// register spill to memory followed by retrieval from spill memory
// followed by NUMERIC USEs should be typed as a continuous NUMERIC
// chain if there is no possibility of aliasing.
// Preparatory step: For each indirect write, mark all def-use chains
// (maintained at the basic block level) that include the indirect
// write instruction. If there are no indirect writes in the function,
// leave all DEFs marked as unaliased and exit.
if (!(this->HasIndirectWrites))
return;
list<SMPBasicBlock *>::iterator BlockIter;
SMPBasicBlock *CurrBlock;
vector<SMPInstr *>::iterator BlkInstIter;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
CurrBlock = (*BlockIter);
for (BlkInstIter = CurrBlock->GetFirstInst(); BlkInstIter != CurrBlock->GetLastInst(); ++BlkInstIter) {
SMPInstr *CurrInst = (*BlkInstIter);
if (CurrInst->HasIndirectMemoryWrite()) {
#if STARS_BUILD_DEF_USE_CHAINS
CurrBlock->MarkIndWriteChains(CurrInst->GetAddr());
// Until we get true aliasing analysis, any indirect write
// is classified as may-be-aliased.
CurrBlock->SetMaybeAliased(true);
}
} // end for all insts in block
} // end for all blocks in function
// Step one: Find only the memory DEFs to start with.
bool FoundIndWrite = false;
list<SMPInstr *>::iterator InstIter;
for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
if (CurrInst->HasDestMemoryOperand()) {
// Starting with the DEF instruction, traverse the control flow
// until we run into (A) the re-definition of the operand, including
// a re-definition of any of its addressing registers, or (B) an
// indirect write. Return false if condition A terminates the
// search, and true if condition B terminates the search.
this->ResetProcessedBlocks();
op_t MemDefOp = CurrInst->MDGetMemDefOp();
assert(o_void != MemDefOp.type);
set<DefOrUse, LessDefUse>::iterator CurrMemDef = CurrInst->FindDef(MemDefOp);
assert(CurrMemDef != CurrInst->GetLastDef());
int SSANum = CurrMemDef->GetSSANum();
FoundIndWrite = this->FindPossibleChainAlias(CurrInst, MemDefOp, SSANum);
if (FoundIndWrite) {
// Mark the DEF as aliased.
CurrMemDef = CurrInst->SetDefIndWrite(CurrMemDef->GetOp(), true);
}
} // end if inst has dest memory operand
} // end for all instructions
return;
} // end of SMPFunction::AliasAnalysis()
// Does the DefOp DEF_USE chain have an indirect mem write starting at CurrInst?
bool SMPFunction::FindPossibleChainAlias(SMPInstr *CurrInst, op_t DefOp, int SSANum) {
#if 0
bool DebugFlag = false;
if (0 == strcmp("sdissect", this->GetFuncName())) {
// Next line is just a good place to set a break point.
DebugFlag = true;
}
#endif
// Starting with the DEF instruction, traverse the control flow
// until we run into (A) the re-definition of the operand, including
// a re-definition of any of its addressing registers, or (B) an
// indirect write. Return false if condition A terminates the
// search, and true if condition B terminates the search.
SMPBasicBlock *CurrBlock = CurrInst->GetBlock();
if (!(CurrBlock->IsProcessed())) {
CurrBlock->SetProcessed(true);
}
else
return false; // block already processed
// Proceed by cases:
ea_t DefAddr = CurrInst->GetAddr();
set<DefOrUse, LessDefUse>::iterator UseIter;
vector<SMPInstr *>::iterator InstIter = CurrBlock->GetInstIterFromAddr(DefAddr);
bool IndWriteFound, ReDef;
++InstIter;
// Case 1: Local name. Return the IndWrite flag for the local Def-Use
// chain begun by CurrInst.
bool UseAfterIndirectWrite = CurrBlock->HasUseAfterIndWrite(DefOp, InstIter, IndWriteFound, ReDef);
bool LiveOutFlag = CurrBlock->IsLiveOut(DefOp);
if (CurrBlock->IsLocalName(DefOp)) {