Skip to content
Snippets Groups Projects
SMPFunction.cpp 213 KiB
Newer Older
	long SignedOffset;

	MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
	SignedOffset = (long) offset;
	bool ESPrelative = (BaseReg == R_sp) || (IndexReg == R_sp);
	bool EBPrelative = this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp));
	if (!(ESPrelative || EBPrelative))
		return false;
	if (((IndexReg != R_none) && (BaseReg != R_none))
		|| (0 < ScaleFactor)) {

		msg("WARNING: WritesAboveLocalFrame called with indexed write.");
	InArgWrite = (ESPrelative && (SignedOffset > ((long) this->LocalVarsSize)))
		|| (EBPrelative && (SignedOffset > 0));
}// end of SMPFunction::WritesAboveLocalFrame()

// Is DestOp an indexed write above the local vars frame?
bool SMPFunction::IndexedWritesAboveLocalFrame(op_t DestOp) {
	bool InArgWrite = false;
	int BaseReg, IndexReg;
	ushort ScaleFactor;
	ea_t offset;

	MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
	bool ESPrelative = (BaseReg == R_sp) || (IndexReg == R_sp);
	bool EBPrelative = this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp));
	if (!(ESPrelative || EBPrelative))
		return false;

	SignedOffset = (int) offset;
	InArgWrite = (ESPrelative && (SignedOffset > this->LocalVarsSize))
		|| (EBPrelative && (SignedOffset > 0));
}	 // end of SMPFunction::IndexedWritesAboveLocalFrame
// Find evidence of calls to alloca(), which appear as stack space allocations (i.e.
//  subtractions from the stack pointer) AFTER the local frame allocation instruction
//  for this function.
// Return true if such an allocation is found and false otherwise.
bool SMPFunction::FindAlloca(void) {
	list<SMPInstr>::iterator CurrInst = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
	++CurrInst;  // skip marker instruction
	for ( ; CurrInst != this->Instrs.end(); ++CurrInst) {
		if ((CurrInst->GetAddr() > this->LocalVarsAllocInstr) && CurrInst->MDIsFrameAllocInstr()) {
			return true;
		}
	}
	return false;
} // end of SMPFunction::FindAlloca()

// Emit the annotations describing the regions of the stack frame.
void SMPFunction::EmitStackFrameAnnotations(FILE *AnnotFile, list<SMPInstr>::iterator Instr) {
	ea_t addr = Instr->GetAddr();

#if 0
	if (0 < IncomingArgsSize) {
		qfprintf(AnnotFile, "%10x %6d INARGS STACK esp + %d %s \n",
				addr, IncomingArgsSize,
				(LocalVarsSize + CalleeSavedRegsSize + RetAddrSize),
				Instr->GetDisasm());
		qfprintf(AnnotFile, "%10x %6d MEMORYHOLE STACK esp + %d ReturnAddress \n",
				addr, RetAddrSize, (this->LocalVarsSize + this->CalleeSavedRegsSize));
	if (0 < this->CalleeSavedRegsSize) {
		qfprintf(AnnotFile, "%10x %6u MEMORYHOLE STACK esp + %d CalleeSavedRegs \n",
				addr, this->CalleeSavedRegsSize, this->LocalVarsSize);
	if ((0 < this->LocalVarsSize) && this->GoodLocalVarTable) {
		unsigned long ParentReferentID = DataReferentID++;
		qfprintf(AnnotFile, "%10x %6u DATAREF STACK %ld esp + %d PARENT LocalFrame LOCALFRAME\n",
				addr, this->LocalVarsSize, ParentReferentID, 0);
#if SMP_COMPUTE_STACK_GRANULARITY
		if (this->AnalyzedSP && !this->CallsAlloca && (BADADDR != this->LocalVarsAllocInstr)) {
			// We can only fine-grain the stack frame if we were able to analyze the stack
			if (this->OutgoingArgsSize > 0) {
				qfprintf(AnnotFile, "%10x %6u DATAREF STACK %ld esp + %d CHILDOF %ld OFFSET %d OutArgsRegion OUTARGS\n",
					addr, this->OutgoingArgsSize, DataReferentID, 0, ParentReferentID, 0);
				++DataReferentID;
#if SMP_DEBUG_STACK_GRANULARITY
			msg("LocalVarTable of size %d for function %s\n", this->LocalVarTable.size(),
				this->GetFuncName());
			for (size_t i = 0; i < this->LocalVarTable.size(); ++i) {
#if SMP_DEBUG_STACK_GRANULARITY
				msg("Entry %d offset %ld size %d name %s\n", i, this->LocalVarTable[i].offset,
					this->LocalVarTable[i].size, this->LocalVarTable[i].VarName);
				// Don't emit annotations for incoming or outgoing args or anything else
				//  above or below the current local frame.
				if ((this->LocalVarTable[i].offset >= (long) this->FuncInfo.frsize)
					|| (this->LocalVarTable[i].offset < (long) this->OutgoingArgsSize))
					continue;
				qfprintf(AnnotFile, "%10x %6u DATAREF STACK %ld esp + %ld CHILDOF %ld OFFSET %ld LOCALVAR %s \n",
					addr, this->LocalVarTable[i].size, DataReferentID,
					this->LocalVarTable[i].offset, ParentReferentID,
					this->LocalVarTable[i].offset, this->LocalVarTable[i].VarName);
				++DataReferentID;
		} // end if (this->AnalyzedSP and not Alloca .... )
	} // end if (0 < LocalVarsSize)
	return;
} // end of SMPFunction::EmitStackFrameAnnotations() 

// Main data flow analysis driver. Goes through the function and
//  fills all objects for instructions, basic blocks, and the function
//  itself.
void SMPFunction::Analyze(void) {
	list<SMPInstr>::iterator FirstInBlock = this->Instrs.end();
	   // For starting a basic block
	list<SMPInstr>::iterator LastInBlock = this->Instrs.end();
	   // Terminating a basic block

#if SMP_DEBUG_CONTROLFLOW
	msg("Entering SMPFunction::Analyze.\n");
#endif

	// Get some basic info from the FuncInfo structure.
	this->Size = this->FuncInfo.endEA - this->FuncInfo.startEA;
	this->UseFP = (0 != (this->FuncInfo.flags & (FUNC_FRAME | FUNC_BOTTOMBP)));
	this->StaticFunc = (0 != (this->FuncInfo.flags & FUNC_STATIC));
	this->LibFunc = (0 != (this->FuncInfo.flags & FUNC_LIB));
	this->BlockCount = 0;
	this->AnalyzedSP = this->FuncInfo.analyzed_sp();

#if SMP_DEBUG_CONTROLFLOW
	msg("SMPFunction::Analyze: got basic info.\n");
#endif

	// Cycle through all chunks that belong to the function.
	func_tail_iterator_t FuncTail(this->GetFuncInfo());
	size_t ChunkCounter = 0;
	for (bool ChunkOK = FuncTail.main(); ChunkOK; ChunkOK = FuncTail.next()) {
		const area_t &CurrChunk = FuncTail.chunk();
		++ChunkCounter;
		if (1 < ChunkCounter) {
			this->SharedChunks = true;
#if SMP_DEBUG_CHUNKS
			msg("Found tail chunk for %s at %x\n", this->GetFuncName(), CurrChunk.startEA);
#endif
		}
		// Build the instruction and block lists for the function.
		for (ea_t addr = CurrChunk.startEA; addr < CurrChunk.endEA;
			addr = get_item_end(addr)) {
			flags_t InstrFlags = getFlags(addr);
			if (isHead(InstrFlags) && isCode(InstrFlags)) {
				SMPInstr CurrInst = SMPInstr(addr);
				// Fill in the instruction data members.
#if SMP_DEBUG_CONTROLFLOW
				msg("SMPFunction::Analyze: calling CurrInst::Analyze.\n");
#endif
				CurrInst.Analyze();
				if (SMPBinaryDebug) {
					msg("Disasm:  %s \n", CurrInst.GetDisasm());
				}
#if SMP_COUNT_MEMORY_ALLOCATIONS
				SMPInstBytes += sizeof(CurrInst);
#endif

#if SMP_USE_SSA_FNOP_MARKER
				if (this->Instrs.empty()) {
					// First instruction in function. We want to create a pseudo-instruction
					//  at the top of the function that can hold SSA DEFs for LiveIn names
					//  to the function. We use a floating point no-op as the pseudo-inst.
					//  The code address is one less than the start address of the function.
					SMPInstr MarkerInst = SMPInstr(addr - 1);
					MarkerInst.AnalyzeMarker();
					assert(FirstInBlock == this->Instrs.end());
					this->Instrs.push_back(MarkerInst);
#if SMP_COUNT_MEMORY_ALLOCATIONS
					SMPInstBytes += sizeof(MarkerInst);
#endif
				if (this->AnalyzedSP) {
					// Audit the IDA SP analysis.
					sval_t sp_delta = get_spd(this->GetFuncInfo(), addr);
					// sp_delta is difference between current value of stack pointer
					//  and value of the stack pointer coming into the function. It
					//  is updated AFTER each instruction. Thus, it should not get back
					//  above zero (e.g. to +4) until after a return instruction.
					if (sp_delta > 0) {
						// Stack pointer has underflowed, according to IDA's analysis,
						//  which is probably incorrect.
						this->AnalyzedSP = false;
						msg("Resetting AnalyzedSP to false for %s\n", this->GetFuncName());
						msg("Underflowing instruction: %s sp_delta: %d\n", CurrInst.GetDisasm(),
							sp_delta);
					}
					else if (sp_delta == 0) {
						// Search for tail calls.
						if (CurrInst.IsBranchToFarChunk()) {
							// After the stack has been restored to the point at which
							//  we are ready to return, we instead find a jump to a
							//  far chunk. This is the classic tail call optimization:
							//  the return statement has been replaced with a jump to
							//  another function, which will return not to this function,
							//  but to the caller of this function.
							CurrInst.SetTailCall();
							msg("Found tail call at %x from %s: %s\n", addr, this->GetFuncName(),
								CurrInst.GetDisasm());
						}
					}
				// Find all functions that call the current function.
				xrefblk_t CurrXrefs;
				if (!FoundAllCallers) {
					for (bool ok = CurrXrefs.first_to(CurrInst.GetAddr(), XREF_ALL);
						ok;
						ok = CurrXrefs.next_to()) {
						if ((CurrXrefs.from != 0) && (CurrXrefs.iscode)) {
							// Make sure it is not a fall-through. Must be a
							//  control-flow instruction of some sort, including
							//  direct or indirect calls or tail calls.
							SMPInstr CallInst(CurrXrefs.from);
							CallInst.Analyze();
							SMPitype CallType = CallInst.GetDataFlowType();
							if ((COND_BRANCH <= CallType) && (RETURN >= CallType)) {
								// Found a caller, with its call address in CurrXrefs.from
								this->AddCallSource(CurrXrefs.from);
							}
						}
					}
					FoundAllCallers = true; // only do this for first inst
				}

				SMPitype DataFlowType = CurrInst.GetDataFlowType();
				if ((DataFlowType == INDIR_CALL) || (DataFlowType == CALL)) {
					// See if IDA has determined the target of the call.
					ea_t TargetAddr = CurrInst.GetCallTarget();
					bool LinkedToTarget = (BADADDR != TargetAddr);
					if (LinkedToTarget) {
						if (0 == TargetAddr) {
							msg("WARNING: Ignoring NULL call target (unreachable) at %x\n", CurrInst.GetAddr());
							this->AllCallTargets.push_back(TargetAddr);
							if (INDIR_CALL == DataFlowType) {
								this->IndirectCallTargets.push_back(TargetAddr);
							}
							else {
								this->DirectCallTargets.push_back(TargetAddr);
							}
					if (DataFlowType == INDIR_CALL) {
						this->IndirectCalls = true;
						this->UnresolvedIndirectCalls = (!LinkedToTarget);
					}
				} // end if INDIR_CALL or CALL
				else if (DataFlowType == INDIR_JUMP)
					this->IndirectJumps = true;

				// Before we insert the instruction into the instruction
				//  list, determine if it is a jump target that does not
				//  follow a basic block terminator. This is the special case
				//  of a CASE in a SWITCH that falls through into another
				//  CASE, for example. The first sequence of statements
				//  was not terminated by a C "break;" statement, so it
				//  looks like straight line code, but there is an entry
				//  point at the beginning of the second CASE sequence and
				//  we have to split basic blocks at the entry point.
				if ((FirstInBlock != this->Instrs.end())
					&& CurrInst.IsJumpTarget()) {
#if SMP_DEBUG_CONTROLFLOW
					msg("SMPFunction::Analyze: hit special jump target case.\n");
#endif
					LastInBlock = --(this->Instrs.end());
					SMPBasicBlock CurrBlock = SMPBasicBlock(this, FirstInBlock,
						LastInBlock);
					// If not the first chunk in the function, it is a shared
					//  tail chunk.
					if (ChunkCounter > 1) {
						CurrBlock.SetShared();
					}
					FirstInBlock = this->Instrs.end();
					LastInBlock = this->Instrs.end();
					this->Blocks.push_back(CurrBlock);
					this->BlockCount += 1;
				}

#if SMP_DEBUG_CONTROLFLOW
		msg("SMPFunction::Analyze: putting CurrInst on list.\n");
#endif
				// Insert instruction at end of list.
				this->Instrs.push_back(CurrInst);

				// Find basic block leaders and terminators.
				if (FirstInBlock == this->Instrs.end()) {
#if SMP_DEBUG_CONTROLFLOW
					msg("SMPFunction::Analyze: setting FirstInBlock.\n");
#if SMP_USE_SSA_FNOP_MARKER
					if (2 == this->Instrs.size()) {
						// Just pushed first real instruction, after the fnop marker.
						FirstInBlock = this->Instrs.begin();
					}
					else {
						FirstInBlock = --(this->Instrs.end());
					}
#else
					FirstInBlock = --(this->Instrs.end());
				}
				if (CurrInst.IsBasicBlockTerminator()) {
#if SMP_DEBUG_CONTROLFLOW
		msg("SMPFunction::Analyze: found block terminator.\n");
#endif
					LastInBlock = --(this->Instrs.end());
					SMPBasicBlock CurrBlock = SMPBasicBlock(this, FirstInBlock, LastInBlock);
					// If not the first chunk in the function, it is a shared
					//  tail chunk.
					if (ChunkCounter > 1) {
						CurrBlock.SetShared();
					}
					FirstInBlock = this->Instrs.end();
					LastInBlock = this->Instrs.end();
					this->Blocks.push_back(CurrBlock);
					this->BlockCount += 1;

					// Is the instruction a branch to a target outside the function? If
					//  so, this function has shared tail chunks.
					if (CurrInst.IsBranchToFarChunk() && (!CurrInst.IsTailCall())) {
						this->SharedChunks = true;
					}
				}
			} // end if (isHead(InstrFlags) && isCode(InstrFlags)
		} // end for (ea_t addr = CurrChunk.startEA; ... )

		// Handle the special case in which a function does not terminate
		//  with a return instruction or any other basic block terminator.
		//  Sometimes IDA Pro sees a call to a NORET function and decides
		//  to not include the dead code after it in the function. That
		//  dead code includes the return instruction, so the function no
		//  longer includes a return instruction and terminates with a CALL.
		if (FirstInBlock != this->Instrs.end()) {
			LastInBlock = --(this->Instrs.end());
			SMPBasicBlock CurrBlock = SMPBasicBlock(this, FirstInBlock, LastInBlock);
			// If not the first chunk in the function, it is a shared
			//  tail chunk.
			if (ChunkCounter > 1) {
				CurrBlock.SetShared();
			}
			FirstInBlock = this->Instrs.end();
			LastInBlock = this->Instrs.end();
			this->Blocks.push_back(CurrBlock);
			this->BlockCount += 1;
		}
	} // end for (bool ChunkOK = ...)

	// Now that we have all instructions and basic blocks, link each instruction
	//  to its basic block. Note that the instruction has to be linked to the copy
	//  of the basic block in this->Blocks(), not to the original SMPBasicBlock
	//  object that was constructed and destructed on the stack above. (Ouch!
	//  Very painful memory corruption debugging lesson.)
	list<SMPBasicBlock>::iterator CurrBlock;
	list<list<SMPInstr>::iterator>::iterator InstIter;
	for (CurrBlock = this->Blocks.begin(); CurrBlock != this->Blocks.end(); ++CurrBlock) {
		for (InstIter = CurrBlock->GetFirstInstr(); InstIter != CurrBlock->GetLastInstr(); ++InstIter) {
			(*InstIter)->SetBlock(CurrBlock->GetThisBlock());
		}
	}

#if KLUDGE_VFPRINTF_FAMILY
	if (0 != strstr(this->GetFuncName(), "printf")) {
		this->SharedChunks = true;
		msg("Kludging function %s\n", this->GetFuncName());
	}
#endif

#if SMP_IDAPRO52_WORKAROUND
	if (0 == strcmp(this->GetFuncName(), "error_for_asm")) {
		this->SharedChunks = true;
		msg("Kludging function %s\n", this->GetFuncName());
	}
#endif

	// Set up basic block links and map of instructions to blocks.
	if (!(this->HasSharedChunks())) {
		this->SetLinks();
		this->RPONumberBlocks();

		// Figure out the stack frame and related info.
		this->SetStackFrameInfo();

		list<SMPInstr>::iterator CurrInst;
		bool GoodRTL;
		this->BuiltRTLs = true;
		for (CurrInst = this->Instrs.begin(); CurrInst != this->Instrs.end(); ++CurrInst) {
			// Build tree RTLs for the instruction.
			GoodRTL = CurrInst->BuildRTL();
			this->BuiltRTLs = (this->BuiltRTLs && GoodRTL);
#if SMP_DEBUG_BUILD_RTL
			if (!GoodRTL) {
				msg("ERROR: Cannot build RTL at %x for %s\n", CurrInst->GetAddr(), 
					CurrInst->GetDisasm());
			}
			// Detect indirect memory references.
			CurrInst->AnalyzeIndirectRefs(this->UseFP);
	else { // has shared chunks; still want to compute stack frame info
#if SMP_DEBUG_CONTROLFLOW
		msg("SMPFunction::Analyze: set stack frame info.\n");
#ifdef SMP_DEBUG_FUNC
		msg(" %s has shared chunks \n", this->GetFuncName());
#endif
		// Figure out the stack frame and related info.
		this->SetStackFrameInfo();
	}
	list<SMPBasicBlock>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		BlockIter->Analyze();
	// Audit the call instructions and call targets.
	if ((!this->AllCallTargets.empty()) || this->UnresolvedIndirectCalls) {
		bool FoundBadCallTarget = false;
		vector<ea_t>::iterator CurrTarget = this->AllCallTargets.begin();
		while (CurrTarget != this->AllCallTargets.end()) {
			if ((this->FirstEA <= *CurrTarget) && (this->FuncInfo.endEA >= *CurrTarget)) {
				// Found a call target that is within the function.
				FoundBadCallTarget = true;
				if (this->FirstEA == *CurrTarget) { // Direct recursion, not a pseudo-jump
					this->DirectlyRecursive = true;
				}
				CurrTarget = this->AllCallTargets.erase(CurrTarget);
			}
			else {
				++CurrTarget;
			}
		}
		if (FoundBadCallTarget) {
			// We have to mark the pseudo-call instructions and audit the direct and
			//  indirect call target vectors.

			// Audit direct call targets.
			CurrTarget = this->DirectCallTargets.begin();
			while (CurrTarget != this->DirectCallTargets.end()) {
				if ((this->FirstEA <= *CurrTarget) && (this->FuncInfo.endEA >= *CurrTarget)) {
					// Found a call target that is within the function.
					CurrTarget = this->DirectCallTargets.erase(CurrTarget);
				}
				else {
					++CurrTarget;
				}
			}
			// Audit indirect call targets.
			CurrTarget = this->IndirectCallTargets.begin();
			while (CurrTarget != this->IndirectCallTargets.end()) {
				if ((this->FirstEA <= *CurrTarget) && (this->FuncInfo.endEA >= *CurrTarget)) {
					// Found a call target that is within the function.
					CurrTarget = this->IndirectCallTargets.erase(CurrTarget);
				}
				else {
					++CurrTarget;
				}
			}
			// Find calls used as jumps.
			list<SMPInstr>::iterator InstIter = this->Instrs.begin();
			while (InstIter != this->Instrs.end()) {
				SMPitype InstFlow = InstIter->GetDataFlowType();
				if ((CALL == InstFlow) || (INDIR_CALL == InstFlow)) {
					InstIter->AnalyzeCallInst(this->FirstEA, this->FuncInfo.endEA);
				}
				++InstIter;
			}
		} // end if (FoundBadCallTarget)
	}

	this->MarkFunctionSafe();

#if SMP_COUNT_MEMORY_ALLOCATIONS
	SMPInstCount += ((unsigned long) this->Instrs.size());
	SMPBlockCount += ((unsigned long) this->Blocks.size());
	SMPLocalVarCount += ((unsigned long) this->LocalVarTable.size());
#endif

} // end of SMPFunction::Analyze()


// Free memory that is no longer needed after loop 2 of SMPProgram::Analyze().
void SMPFunction::FreeUnusedMemory2(void) {
	size_t UnusedElements;
	size_t CurrSize;

	// Go through vector containers and resize to current capacity, if the vector
	//  has been fully computed by the time SMPProgram:Analyze() loop 2 completes.
	CurrSize = this->DirectCallTargets.size();
	UnusedElements = this->DirectCallTargets.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<ea_t>(this->DirectCallTargets).swap(this->DirectCallTargets);
#else
		this->DirectCallTargets.resize(CurrSize);
	}

	CurrSize = this->IndirectCallTargets.size();
	UnusedElements = this->IndirectCallTargets.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<ea_t>(this->IndirectCallTargets).swap(this->IndirectCallTargets);
#else
		this->IndirectCallTargets.resize(CurrSize);
	}

	CurrSize = this->AllCallTargets.size();
	UnusedElements = this->AllCallTargets.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<ea_t>(this->AllCallTargets).swap(this->AllCallTargets);
#else
		this->AllCallTargets.resize(CurrSize);
	}

	CurrSize = this->SavedRegLoc.size();
	UnusedElements = this->SavedRegLoc.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<int>(this->SavedRegLoc).swap(this->SavedRegLoc);
#else
		this->SavedRegLoc.resize(CurrSize);
	}

	CurrSize = this->RPOBlocks.size();
	UnusedElements = this->RPOBlocks.capacity() - CurrSize;
	if (0 < UnusedElements) {
		list<SMPBasicBlock>::iterator DummyIter = this->Blocks.end();
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<list<SMPBasicBlock>::iterator>(this->RPOBlocks).swap(this->RPOBlocks);
#else
		this->RPOBlocks.resize(CurrSize, DummyIter);
	}

	CurrSize = this->LocalVarTable.size();
	UnusedElements = this->LocalVarTable.capacity() - CurrSize;
	if (0 < UnusedElements) {
		struct LocalVar DummyVar;
		DummyVar.offset = 0;
		DummyVar.size = 0;
		UnusedStructCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<struct LocalVar>(this->LocalVarTable).swap(this->LocalVarTable);
#else
		this->LocalVarTable.resize(CurrSize, DummyVar);
	}

	CurrSize = this->StackFrameMap.size();
	UnusedElements = this->StackFrameMap.capacity() - CurrSize;
	if (0 < UnusedElements) {
		struct StackFrameEntry DummyEntry;
		DummyEntry.offset = 0;
		DummyEntry.VarPtr = NULL;
		UnusedStructCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<struct StackFrameEntry>(this->StackFrameMap).swap(this->StackFrameMap);
#else
		this->StackFrameMap.resize(CurrSize, DummyEntry);
	}

	CurrSize = this->FineGrainedStackTable.size();
	UnusedElements = this->FineGrainedStackTable.capacity() - CurrSize;
	if (0 < UnusedElements) {
		struct FineGrainedInfo DummyFG;
		DummyFG.SignMiscInfo = 0;
		DummyFG.SizeInfo = 0;
		UnusedStructCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<struct FineGrainedInfo>(this->FineGrainedStackTable).swap(this->FineGrainedStackTable);
#else
		this->FineGrainedStackTable.resize(CurrSize, DummyFG);
	}

	return;
} // end of SMPFunction::FreeUnusedMemory2()

// Free memory that is no longer needed after loop 3 of SMPProgram::Analyze().
void SMPFunction::FreeUnusedMemory3(void) {
	size_t UnusedElements;
	size_t CurrSize;

	// Go through vector containers and resize to current capacity, if the vector
	//  has been fully computed by the time SMPProgram:Analyze() loop 2 completes.
	CurrSize = this->ReturnRegTypes.size();
	UnusedElements = this->ReturnRegTypes.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<SMPOperandType>(this->ReturnRegTypes).swap(this->ReturnRegTypes);
#else		
		this->ReturnRegTypes.resize(CurrSize);
	}

	return;
} // end of SMPFunction::FreeUnusedMemory3()

// Free memory that is no longer needed after type inference (loop 4 of SMPProgram::Analyze()).
void SMPFunction::FreeUnusedMemory4(void) {
	this->KillSet.clear();
	this->LiveOutSet.clear();
	this->LiveInSet.clear();
	this->StackFrameMap.clear();
	this->BlocksDefinedIn.clear();

#if SMP_SHRINK_TO_FIT
	std::set<op_t, LessOp>(this->KillSet).swap(this->KillSet);
	std::set<op_t, LessOp>(this->LiveOutSet).swap(this->LiveOutSet);
	std::set<op_t, LessOp>(this->LiveInSet).swap(this->LiveInSet);
#endif

	list<SMPBasicBlock>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		BlockIter->FreeUnusedMemory4();
	}
	return;
} // end of SMPFunction::FreeUnusedMemory4()

// Free SSA data structures that are no longer needed when all SSA numbers have
//  been recorded in DEFs and USEs.
void SMPFunction::FreeSSAMemory(void) {
	this->IDom.clear();
	this->DomTree.clear();
	this->BlocksDefinedIn.clear();
	this->SSACounter.clear();
	this->SSAStack.clear();

#if SMP_SHRINK_TO_FIT
	vector<int>(this->IDom).swap(this->IDom);
	vector<pair<int, list<int> > >(this->DomTree).swap(this->DomTree);
	vector<list<int> >(this->BlocksDefinedIn).swap(this->BlocksDefinedIn);
	vector<int>(this->SSACounter).swap(this->SSACounter);
	vector<list<int> >(this->SSAStack).swap(this->SSAStack);
#endif

	list<SMPBasicBlock>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		BlockIter->FreeSSAMemory();
	}
	return;
} // end of SMPFunction::FreeSSAMemory()

// For each instruction, mark the non-flags-reg DEFs as having live
//  metadata (mmStrata needs to fetch and track this metadata for this
//  instruction) or dead metadata (won't be used as addressing reg, won't
//  be stored to memory, won't be returned to caller).
void SMPFunction::AnalyzeMetadataLiveness(void) {
	bool changed;
	int BaseReg;
	int IndexReg;
	ushort ScaleFactor;
	ea_t offset;
	op_t BaseOp, IndexOp, ReturnOp, DefOp, UseOp;
	BaseOp.type = o_reg;
	IndexOp.type = o_reg;
	ReturnOp.type = o_reg;
	list<SMPInstr>::iterator CurrInst;
	set<DefOrUse, LessDefUse>::iterator CurrDef;
	set<DefOrUse, LessDefUse>::iterator CurrUse;
	set<DefOrUse, LessDefUse>::iterator NextUse;
clc5q's avatar
clc5q committed
	bool DebugFlag = false;
#if SMP_DEBUG_DATAFLOW
	if (0 == strcmp("uw_frame_state_for", this->GetFuncName())) {
clc5q's avatar
clc5q committed
		DebugFlag = true;
	}

	do {
		changed = false;
		if (DebugFlag) {
			msg("AnalyzeMetadataLiveness iteration count: %d \n", IterationCount);
		}
		for (CurrInst = this->Instrs.begin(); CurrInst != this->Instrs.end(); ++CurrInst) {
			SafeMemDest = false;  // true for some SafeFunc instructions
			// Skip the SSA marker instruction.
			if (NN_fnop == CurrInst->GetCmd().itype)
				continue;

			if (DebugFlag) {
				msg("Inst addr: %x \n", CurrInst->GetAddr());
			}
			CurrDef = CurrInst->GetFirstDef();
			while (CurrDef != CurrInst->GetLastDef()) {
				if (DEF_METADATA_UNANALYZED == CurrDef->GetMetadataStatus()) {
					DefOp = CurrDef->GetOp();
					// Handle special registers never used as address regs.
					if (DefOp.is_reg(X86_FLAGS_REG)
						|| ((o_trreg <= DefOp.type) && (o_xmmreg >= DefOp.type))) {
						CurrDef = CurrInst->SetDefMetadata(DefOp,
							DEF_METADATA_UNUSED);
						changed = true;
					}
					else if (DefOp.is_reg(R_sp) 
						|| (this->UseFP && DefOp.is_reg(R_bp))) {
						// Stack pointer register DEFs always have live
						//  metadata, but we don't need to propagate back
						//  through particular DEF-USE chains.
						CurrDef = CurrInst->SetDefMetadata(DefOp, DEF_METADATA_USED);
						changed = true;
					}
					else if ((o_mem <= DefOp.type) && (o_displ >= DefOp.type)) {
						// DEF is a memory operand. The addressing registers
						//  therefore have live metadata, and the memory metadata is live.
						// EXCEPTION: If the function is Safe, then direct stack writes
						//  to local variables (above the outgoing args area of the frame)
						//  are not live metadata, and there will be no indirect local frame
						//  writes, by definition of "safe." So, for safe funcs, only
						//  the o_mem (globals) and indirect writes are live metadata.
						if (this->SafeFunc && MDIsStackAccessOpnd(DefOp, this->UseFP)
							&& (!this->WritesAboveLocalFrame(DefOp))
							&& (!this->IsInOutgoingArgsRegion(DefOp))) {
							++CurrDef;
							SafeMemDest = true;
							continue;
						}
						CurrDef = CurrInst->SetDefMetadata(DefOp, DEF_METADATA_USED);
						changed = true;
						MDExtractAddressFields(DefOp, BaseReg, IndexReg,
							ScaleFactor, offset);
						if (R_none != BaseReg) {
							BaseOp.reg = MDCanonicalizeSubReg((ushort) BaseReg);
							if (BaseOp.is_reg(R_sp) 
								|| (this->UseFP && BaseOp.is_reg(R_bp))) {
								; // do nothing; DEF handled by case above
							}
							else {
								CurrUse = CurrInst->FindUse(BaseOp);
								if (CurrUse == CurrInst->GetLastUse()) {
									msg("ERROR: BaseReg %d not in USE list at %x for %s\n",
										BaseOp.reg, CurrInst->GetAddr(),
										CurrInst->GetDisasm());
								}
								assert(CurrUse != CurrInst->GetLastUse());
								if (this->IsGlobalName(BaseOp)) {
									changed |= this->PropagateGlobalMetadata(BaseOp,
										DEF_METADATA_USED, CurrUse->GetSSANum());
								}
								else {
									changed |= CurrInst->GetBlock()->PropagateLocalMetadata(BaseOp,
										DEF_METADATA_USED, CurrUse->GetSSANum());
								}
							}
						} // end if R_none != BaseReg
						if (R_none != IndexReg) {
							IndexOp.reg = MDCanonicalizeSubReg((ushort) IndexReg);
							if (IndexOp.is_reg(R_sp) 
								|| (this->UseFP && IndexOp.is_reg(R_bp))) {
								; // do nothing; DEF handled by case above
							}
							else {
								CurrUse = CurrInst->FindUse(IndexOp);
								if (CurrUse == CurrInst->GetLastUse()) {
									msg("ERROR: IndexReg %d not in USE list at %x for %s\n",
										IndexOp.reg, CurrInst->GetAddr(),
										CurrInst->GetDisasm());
								}
								assert(CurrUse != CurrInst->GetLastUse());
								if (0 != ScaleFactor) {
									; // mmStrata knows scaled reg is NUMERIC
									// ... its metadata is not fetched
								}
								else if (this->IsGlobalName(IndexOp)) {
									changed |= this->PropagateGlobalMetadata(IndexOp,
										DEF_METADATA_USED, CurrUse->GetSSANum());
								}
								else {
									changed |= CurrInst->GetBlock()->PropagateLocalMetadata(IndexOp,
										DEF_METADATA_USED, CurrUse->GetSSANum());
								}
							}
						} // end if R_none != IndexReg
					} // end if X86_FLAGS_REG .. else if stack ptr ... 
				} // end if unanalyzed metadata usage
				++CurrDef;
			} // end while processing DEFs
			if ((RETURN == CurrInst->GetDataFlowType())
clc5q's avatar
clc5q committed
				|| (CurrInst->IsTailCall())   // quasi-return
				|| (CALL == CurrInst->GetDataFlowType())
				|| (INDIR_CALL == CurrInst->GetDataFlowType())) {
				// The EAX and EDX registers can be returned to the caller,
				//  which might use their metadata. They show up as USEs
				//  of the return instruction. Some library functions
				//  pass return values in non-standard ways. e.g. through
				//  EBX or EDI, so we treat all return regs the same.
				// For CALL instructions, values can be passed in caller-saved
				//  registers, unfortunately, so the metadata is live-in.
				CurrUse = CurrInst->GetFirstUse();
				while (CurrUse != CurrInst->GetLastUse()) {
					NextUse = CurrUse;
					++NextUse;
					ReturnOp = CurrUse->GetOp();
					if (DebugFlag) {
						msg("ReturnOp: ");
						PrintOperand(ReturnOp);
						msg("\n");
					}
					if ((o_reg == ReturnOp.type) &&
						(!ReturnOp.is_reg(X86_FLAGS_REG))) {
						if (this->IsGlobalName(ReturnOp)) {
							changed |= this->PropagateGlobalMetadata(ReturnOp,
									DEF_METADATA_USED, CurrUse->GetSSANum());
						}
						else {
							changed |= CurrInst->GetBlock()->PropagateLocalMetadata(ReturnOp,
									DEF_METADATA_USED, CurrUse->GetSSANum());
						}
					}
					CurrUse = NextUse;
				} // end while all USEs
			} // end if return or call
			else if (CurrInst->HasDestMemoryOperand() 
clc5q's avatar
clc5q committed
				|| CurrInst->MDIsPushInstr()) {
				// Memory writes cause a lot of metadata usage.
				//  Addressing registers in the memory destination
				//  have live metadata used in bounds checking. The
				//  register being stored to memory could end up being
				//  used in some other bounds checking, unless we 
				//  have precise memory tracking and know that it
				//  won't.
				// We handled the addressing registers above, so we
				//  handle the register written to memory here.
				// The same exception applies as above: If the destination
				//  memory operand is not a stack write, then safe functions
				//  do not need to track the metadata.
				// If we push a register and have callees, the metadata could
				//  be live, if the callee gets its incoming args from our push
				//  instructions.
				if (SafeMemDest && !(CurrInst->MDIsPushInstr() && !this->IsLeaf())) {
					continue;  // go to next instruction
				}
clc5q's avatar
clc5q committed
				CurrUse = CurrInst->GetFirstUse();
				while (CurrUse != CurrInst->GetLastUse()) {
					NextUse = CurrUse;
					++NextUse;
					UseOp = CurrUse->GetOp();
					// NOTE: **!!** To be less conservative, we
					//  should propagate less for exchange category
					//  instructions.
					if ((UseOp.type == o_reg) && (!UseOp.is_reg(R_sp))
						&& (!(this->UseFP && UseOp.is_reg(R_bp)))
						&& (!UseOp.is_reg(X86_FLAGS_REG))) {

						if (this->IsGlobalName(UseOp)) {
							changed |= this->PropagateGlobalMetadata(UseOp,
									DEF_METADATA_USED, CurrUse->GetSSANum());
						}
						else {
							changed |= CurrInst->GetBlock()->PropagateLocalMetadata(UseOp,
									DEF_METADATA_USED, CurrUse->GetSSANum());
						}
					} // end if register
					CurrUse = NextUse;
				} // end while all USEs
			} // end if call or return else if memdest ...
		} // end for all instructions
	} while (changed);

	// All DEFs that still have status DEF_METADATA_UNANALYZED can now
	//  be marked as DEF_METADATA_UNUSED.
	for (CurrInst = this->Instrs.begin(); CurrInst != this->Instrs.end(); ++CurrInst) {
		if (NN_fnop == CurrInst->GetCmd().itype)
			continue;
		CurrDef = CurrInst->GetFirstDef();
		while (CurrDef != CurrInst->GetLastDef()) {
			if (DEF_METADATA_UNANALYZED == CurrDef->GetMetadataStatus()) {
				CurrDef = CurrInst->SetDefMetadata(CurrDef->GetOp(),
					DEF_METADATA_UNUSED);
				assert(CurrDef != CurrInst->GetLastDef());
			}
			++CurrDef;
		}
	}

	return;
} // end of SMPFunction::AnalyzeMetadataLiveness() 

// Propagate the metadata Status for UseOp/SSANum to its global DEF.
// Return true if successful.
bool SMPFunction::PropagateGlobalMetadata(op_t UseOp, SMPMetadataType Status, int SSANum) {
	bool changed = false;

	if ((0 > SSANum) || (o_void == UseOp.type))
		return false;

	// Find the DEF of UseOp with SSANum.
	bool FoundDef = false;
	list<SMPInstr>::iterator CurrInst;

	for (CurrInst = this->Instrs.begin(); CurrInst != this->Instrs.end(); ++CurrInst) {
		set<DefOrUse, LessDefUse>::iterator CurrDef;
		set<DefOrUse, LessDefUse>::iterator CurrUse;
		CurrDef = CurrInst->FindDef(UseOp);
		if (CurrDef != CurrInst->GetLastDef()) {
			if (SSANum == CurrDef->GetSSANum()) {
				if (Status != CurrDef->GetMetadataStatus()) {
					CurrDef = CurrInst->SetDefMetadata(UseOp, Status);
					changed = (CurrDef != CurrInst->GetLastDef());

					// If source operand was memory, we have two cases.
					//  (1) The instruction could be a load, in which
					//  case we should simply terminate the
					//  propagation, because the prior DEF of a memory
					//  location is always considered live metadata
					//  already, and we do not want to propagate liveness
					//  to the address regs in the USE list.
					//  EXCEPTION: For safe funcs, we propagate liveness
					//   for stack locations.
					//  (2) We could have an arithmetic operation such
					//  as reg := reg arithop memsrc. In this case, we
					//  still do not want to propagate through the memsrc,
					//  (with the same safe func EXCEPTION),
					//  but the register is both DEF and USE and we need
					//  to propagate through the register.
					if (CurrInst->HasSourceMemoryOperand()) {
						if (this->SafeFunc) {
							op_t MemSrcOp = CurrInst->MDGetMemUseOp();
							assert(o_void != MemSrcOp.type);
							if (MDIsStackAccessOpnd(MemSrcOp, this->UseFP)) {
								// We have a SafeFunc stack access. This is
								//  the EXCEPTION case where we want to
								//  propagate metadata liveness for a memory
								//  location.
								CurrUse = CurrInst->FindUse(MemSrcOp);
								assert(CurrUse != CurrInst->GetLastUse());
								if (this->IsGlobalName(MemSrcOp)) {
									changed |= this->PropagateGlobalMetadata(MemSrcOp,
										Status, CurrUse->GetSSANum());
								}
								else {
									changed |= CurrInst->GetBlock()->PropagateLocalMetadata(MemSrcOp,
										Status, CurrUse->GetSSANum());
								}
							} // end if stack access operand
						} // end if SafeFunc
						if (3 == CurrInst->GetOptType()) { // move inst
							break; // load address regs are not live metadata
						}
						else if ((5 == CurrInst->GetOptType())
							|| (NN_and == CurrInst->GetCmd().itype)
							|| (NN_or == CurrInst->GetCmd().itype)
							|| (NN_xor == CurrInst->GetCmd().itype)) {
							// add, subtract, and, or with memsrc
							// Find the DEF reg in the USE list.
							CurrUse = CurrInst->FindUse(UseOp);
							assert(CurrUse != CurrInst->GetLastUse());
							changed |= this->PropagateGlobalMetadata(UseOp,
								Status, CurrUse->GetSSANum());
							break;
						}
					} // end if memory source

					// Now, propagate the metadata status to all the
					//  non-memory, non-flags-reg, non-special-reg 
					//  (i.e. regular registers) USEs.
					CurrUse = CurrInst->GetFirstUse();
					while (CurrUse != CurrInst->GetLastUse()) {
						op_t UseOp = CurrUse->GetOp();
clc5q's avatar
clc5q committed
						// NOTE: **!!** To be less conservative, we
						//  should propagate less for exchange category
						//  instructions.
						if ((UseOp.type == o_reg) && (!UseOp.is_reg(R_sp))