From 15d61fe505927c82f605c89657bc6cd3db941e99 Mon Sep 17 00:00:00 2001
From: clc5q <clc5q@git.zephyr-software.com>
Date: Tue, 21 Jul 2015 20:53:10 +0000
Subject: [PATCH] Partial commit of new stack frame access vector computations
 (not yet called).

Former-commit-id: 0292cbf7c3b1bf90f2329a9677d948dd9e98aca3
---
 include/base/SMPFunction.h |  19 +-
 src/base/SMPFunction.cpp   | 515 ++++++++++++++++++++++++++++++++++---
 2 files changed, 493 insertions(+), 41 deletions(-)

diff --git a/include/base/SMPFunction.h b/include/base/SMPFunction.h
index 382544e2..32ea9ac6 100644
--- a/include/base/SMPFunction.h
+++ b/include/base/SMPFunction.h
@@ -80,10 +80,8 @@ class STARS_IRDB_Function_t;
 #define STARS_AUDIT_JUMP_XREFS 0
 #define STARS_AUDIT_INDIR_JUMP_XREFS 1
 
-#define SMP_ANALYZE_STACK_POINTER 1
-
-	// We can decide if conservative analysis of memory writes will cause us to avoid fast returns,
-	//  or merely shadow the return address.
+// We can decide if conservative analysis of memory writes will cause us to avoid fast returns,
+//  or merely shadow the return address.
 #define STARS_CONSERVATIVE_FAST_RETURNS 0
 
 struct LocalVar {
@@ -95,6 +93,15 @@ struct LocalVar {
 // Comparison function for sorting.
 bool LocalVarCompare(const LocalVar &LV1, const LocalVar &LV2);
 
+enum StackAccessType {
+	STARS_STACK_UNKNOWN = 0,
+	STARS_STACK_INARG = 1,
+	STARS_STACK_RETURN_ADDRESS = 2,
+	STARS_STACK_CALLEE_SAVED_REG = 3,
+	STARS_STACK_LOCAL_FRAME = 4,
+	STARS_STACK_OUTARG = 5    // could be part of LOCAL_FRAME initially, then changed when we detect usage as OUTARG
+};
+
 // Entry for each byte address in the stack frame
 struct StackFrameEntry {
 	struct LocalVar *VarPtr;  // LocalVar that includes this offset
@@ -105,6 +112,7 @@ struct StackFrameEntry {
 	bool ESPRelativeAccess; // ever accessed by ESP+const?
 	bool EBPRelativeAccess; // ever accessed by EBP-const? (only if UseFP)
 	bool IndexedAccess;  // index reg of unknown value added to the base address
+	StackAccessType EntryType; // inference based on location and accessing instructions
 };
 
 enum FuncType { 
@@ -502,6 +510,8 @@ private:
 	std::vector<struct StackFrameEntry> StackFrameMap; // memory map of every byte on stack frame
 	std::vector<struct StackFrameEntry> PositiveOffsetStackFrameMap; // memory map of every byte on stack frame, return address and up (inargs, etc.)
 	std::vector<struct StackFrameEntry> NegativeOffsetStackFrameMap; // memory map of every byte on stack frame, below return address (saved reg, locals, etc.)
+	std::vector<struct FineGrainedInfo> PositiveOffsetFineGrainedStackTable; // Inferences based on instruction accesses of stack
+	std::vector<struct FineGrainedInfo> NegativeOffsetFineGrainedStackTable; // Inferences based on instruction accesses of stack
 	std::vector<struct FineGrainedInfo> FineGrainedStackTable; // built using opcode analysis, not IDA stack info
 	std::vector<int> SavedRegLoc; // indexed by reg #; offset from return address of callee-saved reg
 	std::vector<SMPOperandType> ReturnRegTypes; // indexed by reg #; inferred types upon return
@@ -559,6 +569,7 @@ private:
 	bool MDFixFrameInfo(void); // Redefine stack regions for our needs
 	bool MDFixUseFP(void);  // Fix IDA errors affecting UseFP
 	void BuildLocalVarTable(void); // Determine local variable boundaries on the stack
+	void BuildStackAccessTables(void); // Build tables to characterize stack accesses.
 	void SemiNaiveLocalVarID(void); // Semi-naive algorithm for local var boundaries ID
 	void UpdateMinMaxStackOffsets(SMPInstr *CurrInst, const STARSOpndTypePtr &TempOp); // Update MinStackAccessOffset and MaxStackAccessLimit if TempOp is stack access
 	bool AuditLocalVarTable(void); // Check and correct IDA Pro listing of local frame members.
diff --git a/src/base/SMPFunction.cpp b/src/base/SMPFunction.cpp
index 2d677d98..752c3b36 100644
--- a/src/base/SMPFunction.cpp
+++ b/src/base/SMPFunction.cpp
@@ -83,16 +83,13 @@ using namespace std;
 // Compute LVA/SSA or not? Turn it off for NICECAP demo on 31-JAN-2008
 #define SMP_COMPUTE_LVA_SSA 1
 
-// Compute fine-grained stack boundaries?
-#define SMP_COMPUTE_STACK_GRANULARITY 1
-
 // Use conditional type propagation on phi functions
 #define SMP_CONDITIONAL_TYPE_PROPAGATION 1
 
 // Kludges to fix IDA Pro 5.2 errors in cc1.ncexe
 #define SMP_IDAPRO52_WORKAROUND 0
 
-// Basic block number 0 is the top of the CFG lattice.
+// Basic block number 0 is the top of the CFG.
 #define SMP_TOP_BLOCK 0 
 
 // Set SharedTailChunks to TRUE for entire printf family
@@ -2013,10 +2010,8 @@ void SMPFunction::FindFramePointerDelta(void) {
 //  the function epilogue.
 void SMPFunction::SetStackFrameInfo(void) {
 
-#if SMP_COMPUTE_STACK_GRANULARITY
-	// Now, find the boundaries between local variables.
+	// Now, find the boundaries between stack frame objects.
 	this->BuildLocalVarTable();
-#endif
 
 	// Get callee-saved regs info for remediation use.
 	if ((STARS_BADADDR != this->GetFirstFrameAllocInstAddr()) && this->StackPtrAnalysisSucceeded()) {
@@ -2546,7 +2541,7 @@ void SMPFunction::MDFindIncomingTypes(void) {
 	return;
 } // end of SMPFunction::MDFindIncomingTypes()
 
-// Determine local variable boundaries in the stack frame.
+// Determine boundaries in the stack frame.
 void SMPFunction::BuildLocalVarTable(void) {
 	// Currently we just use the info that IDA Pro has inferred from the direct
 	//  addressing of stack locations.
@@ -2554,13 +2549,8 @@ void SMPFunction::BuildLocalVarTable(void) {
 	return;
 } // end of SMPFunction::BuildLocalVarTable()
 
-// Use the local variable offset list from IDA's stack frame structure to compute
-//  the table of local variable boundaries.
-void SMPFunction::SemiNaiveLocalVarID(void) {
-	// NOTE: We use IDA Pro's offsets from this->FuncInfo (e.g. frsize) and NOT
-	//  our own corrected values in our private data members. The offsets we
-	//  read from the stack frame structure returned by get_frame() are consistent
-	//  with other IDA Pro values, not with our corrected values.
+// Build tables to characterize stack accesses.
+void SMPFunction::BuildStackAccessTables(void) {
 	list<SMPInstr *>::iterator InstIter;
 	this->SetLocalVarOffsetLimit(-20000);
 
@@ -2570,9 +2560,6 @@ void SMPFunction::SemiNaiveLocalVarID(void) {
 	}
 	assert(NULL != FuncPtr);
 
-	// We only get stack frame info from IDA Pro, not IRDB
-	FuncPtr->FillInLocalVarTable(this);
-
 	// If AnalyzedSP is false, that is all we can do.
 	if (!this->AnalyzedSP) {
 		this->OutgoingArgsSize = 0;
@@ -2581,33 +2568,469 @@ void SMPFunction::SemiNaiveLocalVarID(void) {
 		return;
 	}
 
-	// Calculate min and max stack point deltas.
-	this->MinStackDelta = 20000; // Final value should be negative or zero
-	this->MaxStackDelta = -1000; // Final value should be zero.
+	// Calculate min and max stack operand offsets accessed.
 	InstIter = this->Instrs.begin();
 #if SMP_USE_SSA_FNOP_MARKER
 	if ((*InstIter)->IsMarkerInst())
 		++InstIter;  // skip marker instruction
 #endif
-	for ( ; InstIter != this->Instrs.end(); ++InstIter) {
+	for (; InstIter != this->Instrs.end(); ++InstIter) {
 		SMPInstr *CurrInst = (*InstIter);
 		STARS_ea_t addr = CurrInst->GetAddr();
-		STARS_sval_t sp_delta = CurrInst->GetStackPtrOffset();
-		if (sp_delta < this->MinStackDelta)
-			this->MinStackDelta = sp_delta;
-		if (sp_delta > this->MaxStackDelta)
-			this->MaxStackDelta = sp_delta;
-		if (addr == this->LocalVarsAllocInstr) {
-			// Total stack pointer delta is sp_delta for the next instruction,
-			//  because IDA updates the sp delta AFTER each instruction.
-			list<SMPInstr *>::iterator NextInstIter = InstIter;
-			++NextInstIter;
-			if (NextInstIter != this->Instrs.end()) {
-				sp_delta = (*NextInstIter)->GetStackPtrOffset();
-				this->AllocPointDelta = sp_delta;
+		// Find the min and max stack offsets in DEFs and USEs.
+		if (CurrInst->HasDestMemoryOperand() || CurrInst->MDIsPushInstr() || CurrInst->MDIsEnterInstr()) {
+			set<DefOrUse, LessDefUse>::iterator CurrDef;
+			for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
+				STARSOpndTypePtr TempOp = CurrDef->GetOp();
+				if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
+					continue;
+				this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
+			} // end for all DEFs
+		}
+		if (CurrInst->HasSourceMemoryOperand() || CurrInst->MDIsPopInstr() || CurrInst->MDIsLeaveInstr() || CurrInst->MDIsLoadEffectiveAddressInstr()) {
+			if (CurrInst->MDIsLoadEffectiveAddressInstr()) {
+				STARSOpndTypePtr TempOp = CurrInst->GetLeaMemUseOp();
+				if (nullptr != TempOp) {
+					if (((TempOp->IsMemNoDisplacementOp()) || (TempOp->IsMemDisplacementOp())) && (!(CurrInst->IsRegClearIdiom() || CurrInst->IsNop())))  {
+						this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
+					}
+				}
+			}
+			else {
+				set<DefOrUse, LessDefUse>::iterator CurrUse;
+				for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
+					STARSOpndTypePtr TempOp = CurrUse->GetOp();
+					if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
+						continue;
+					this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
+				} // end for all USEs
 			}
 		}
 	} // end for all instructions
+	if (0 == this->MaxStackAccessLimit) {
+		// Never accessed any incoming args. However, we know the return address is on the stack,
+		//  and it is almost never accessed, so we want to record its presence.
+		this->MaxStackAccessLimit = MD_DEFAULT_RETURN_ADDRESS_SIZE;
+	}
+	if (this->MinStackAccessOffset > this->MinStackDelta) {
+		// Some functions allocate space that is not visibly accessed. We still want to make
+		//  our stack frame maps of maximum size, and MinStackDelta is used for normalizing offsets.
+		this->MinStackAccessOffset = this->MinStackDelta;
+	}
+	else if (this->MinStackAccessOffset < this->MinStackDelta) {
+		// x86-64 leaf functions are often optimized by not allocating a stack frame. Instead,
+		//  negative displacements from the stack pointer are used to access locals. So the
+		//  stack pointer (reflected in MinStackDelta) never goes down as far as the bottom of
+		//  the frame (reflected by MinStackAccessOffset). We need to record that such a function
+		//  has been detected so that we don't fail assertions unnecessarily later.
+		this->SetStackFrameExtendsPastStackTop();
+	}
+
+	if (!this->AnalyzedSP || this->CallsAlloca || (STARS_BADADDR == this->LocalVarsAllocInstr)) {
+		SMP_msg("FindOutgoingArgsSize not called for %s ", this->GetFuncName());
+		SMP_msg("AnalyzedSP: %d CallsAlloca: %d LocalVarsAllocInstr: %lx \n",
+			this->AnalyzedSP, this->CallsAlloca, (unsigned long) this->LocalVarsAllocInstr);
+		return;
+	}
+	
+
+	// Now, do the mapping of instruction stack accesses to the table entries.
+	unsigned short BitWidthMask;
+	bool DebugFlag = false;
+	int SignedOffset;
+
+	if ((0 <= this->MinStackDelta) || (0 <= this->AllocPointDelta)) {
+		// No allocations; sometimes happens in library functions.
+		this->OutgoingArgsSize = 0;
+		this->AllocPointDelta = 0;
+		if ((this->MinStackDelta > this->MaxStackDelta) || (0 < this->MinStackDelta)) {
+			this->MinStackDelta = 0;
+		}
+	}
+	assert(0 >= this->MinStackDelta);
+
+	// Allocate vectors of stack frame entries, one for each byte of the stack frame.
+	// One vector will be for negative stack offsets (e.g. saved regs, local vars) and
+	//  the other vector will be for nonnegative offsets (e.g. return address, inargs).
+	// Two more vectors will hold fine-grained stack access info.
+	//  These will be our memory maps for analyzing stack usage.
+	for (int i = this->MinStackAccessOffset; i < this->MaxStackAccessLimit; ++i) {
+		struct StackFrameEntry TempEntry;
+		TempEntry.VarPtr = NULL;
+		TempEntry.offset = (long)i;
+		TempEntry.Read = false;
+		TempEntry.Written = false;
+		TempEntry.AddressTaken = false;
+		TempEntry.ESPRelativeAccess = false;
+		TempEntry.EBPRelativeAccess = false;
+		TempEntry.IndexedAccess = false;
+		struct FineGrainedInfo TempFineGrained;
+		TempFineGrained.SignMiscInfo = 0;
+		TempFineGrained.SizeInfo = 0;
+		if (i < 0) {
+			TempEntry.EntryType = STARS_STACK_UNKNOWN;
+			this->NegativeOffsetStackFrameMap.push_back(TempEntry);
+			this->NegativeOffsetFineGrainedStackTable.push_back(TempFineGrained);
+		}
+		else {
+			if (i < MD_DEFAULT_RETURN_ADDRESS_SIZE)
+				TempEntry.EntryType = STARS_STACK_RETURN_ADDRESS;
+			else
+				TempEntry.EntryType = STARS_STACK_INARG;
+			this->PositiveOffsetStackFrameMap.push_back(TempEntry);
+			this->PositiveOffsetFineGrainedStackTable.push_back(TempFineGrained);
+		}
+	}
+
+	if (0 < this->AllocPointDelta) {
+		SMP_msg("FATAL ERROR: AllocPointDelta = %ld in %s\n", (long) this->AllocPointDelta, this->GetFuncName());
+	}
+	assert(0 >= this->AllocPointDelta);
+
+	// Iterate through all instructions and record stack frame accesses in the StackFrameMap.
+	InstIter = this->Instrs.begin();
+#if SMP_USE_SSA_FNOP_MARKER
+	if ((*InstIter)->IsMarkerInst())
+		++InstIter;  // skip marker instruction
+#endif
+	for (; InstIter != this->Instrs.end(); ++InstIter) {
+		SMPInstr *CurrInst = (*InstIter);
+		STARS_ea_t InstAddr = CurrInst->GetAddr();
+		STARS_sval_t sp_delta = CurrInst->GetStackPtrOffset();
+		if (0 < sp_delta) {
+			// Stack underflow.
+			SMP_msg("ERROR: Stack underflow at %lx %s sp_delta: %ld\n", (unsigned long) InstAddr,
+				CurrInst->GetDisasm(), (long)sp_delta);
+			this->OutgoingArgsComputed = false;
+			this->OutgoingArgsSize = 0;
+			return;
+		}
+		assert(0 >= sp_delta);
+		STARS_ea_t offset;
+		std::size_t DataSize;
+		bool UsedFramePointer;
+		bool IndexedAccess;
+		bool SignedMove;
+		bool UnsignedMove;
+		if (CurrInst->HasDestMemoryOperand()) {
+			set<DefOrUse, LessDefUse>::iterator CurrDef;
+			for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
+				STARSOpndTypePtr TempOp = CurrDef->GetOp();
+				if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
+					continue;
+				if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackAccessOffset, offset, DataSize, UsedFramePointer,
+					IndexedAccess, SignedMove, UnsignedMove)) {
+					SignedOffset = (int) offset;
+					if (IndexedAccess && ((0 > SignedOffset) || ((SignedOffset + DataSize) > this->StackFrameMap.size()))) {
+						continue; // Indexed expressions can be within frame even when offset is outside frame
+					}
+					assert(0 <= SignedOffset);
+					SignedOffset += this->MinStackAccessOffset;
+#if 0
+					if ((offset + DataSize) > this->StackFrameMap.size()) {
+						SMP_msg("ERROR: offset = %lu DataSize = %zu FrameMapSize = %zu\n",
+							(unsigned long)offset, DataSize, this->StackFrameMap.size());
+						continue;
+					}
+					assert((offset + DataSize) <= this->StackFrameMap.size());
+#endif
+					bool ESPRelative = (!(UsedFramePointer || CurrInst->HasFPNormalizedToSP()));
+					if (SignedOffset < 0) {
+						for (int j = 0; j < (int) DataSize; ++j) {
+							this->NegativeOffsetStackFrameMap[-SignedOffset + j].Written = true;
+							this->NegativeOffsetStackFrameMap[-SignedOffset + j].IndexedAccess = IndexedAccess;
+							if (ESPRelative) {
+								this->NegativeOffsetStackFrameMap[-SignedOffset + j].ESPRelativeAccess = true;
+							}
+							else {
+								this->NegativeOffsetStackFrameMap[-SignedOffset + j].EBPRelativeAccess = true;
+							}
+						}
+					}
+					else {
+						for (int j = 0; j < (int) DataSize; ++j) {
+							this->PositiveOffsetStackFrameMap[SignedOffset + j].Written = true;
+							this->PositiveOffsetStackFrameMap[SignedOffset + j].IndexedAccess = IndexedAccess;
+							if (ESPRelative) {
+								this->PositiveOffsetStackFrameMap[SignedOffset + j].ESPRelativeAccess = true;
+							}
+							else {
+								this->PositiveOffsetStackFrameMap[SignedOffset + j].EBPRelativeAccess = true;
+							}
+						}
+					}
+					struct FineGrainedInfo StackDefFG;
+					BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
+					StackDefFG.SizeInfo = BitWidthMask;
+					StackDefFG.SignMiscInfo = FG_MASK_WRITTEN;
+					if (SignedOffset < 0) {
+						this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SizeInfo |= BitWidthMask;
+						this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_WRITTEN;
+						if (IndexedAccess) {
+							this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+							StackDefFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+						}
+						if (ESPRelative) {
+							this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
+							StackDefFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
+						}
+						else {
+							this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
+							StackDefFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
+						}
+					}
+					else { // SignedOffset >= 0
+						this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SizeInfo |= BitWidthMask;
+						this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_WRITTEN;
+						if (IndexedAccess) {
+							this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+							StackDefFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+						}
+						if (ESPRelative) {
+							this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
+							StackDefFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
+						}
+						else {
+							this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
+							StackDefFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
+						}
+					}
+					// We will process the signedness of stores later, so that loads can take precedence
+					//  over stores in determining signedness in the table. We go ahead and process
+					//  signedness for the separate DEF and USE maps by InstAddr.
+					if (SignedMove) {
+						StackDefFG.SignMiscInfo |= FG_MASK_SIGNED;
+					}
+					else if (UnsignedMove) {
+						StackDefFG.SignMiscInfo |= FG_MASK_UNSIGNED;
+					}
+					// Insert the StackDefFG into the map of InstAddr to DEF FG info.
+					pair<map<STARS_ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
+					pair<STARS_ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackDefFG);
+					InsertResult = this->StackDefFGInfo.insert(InsertValue);
+					assert(InsertResult.second);
+				} // end if MDGetStackOffsetAndSize()
+			} // end for all DEFs
+		} // end if DestMemoryOperand
+
+		if (CurrInst->HasSourceMemoryOperand()) {
+			set<DefOrUse, LessDefUse>::iterator CurrUse;
+			for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
+				STARSOpndTypePtr TempOp = CurrUse->GetOp();
+				if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
+					continue;
+				if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackAccessOffset, offset, DataSize, UsedFramePointer,
+					IndexedAccess, SignedMove, UnsignedMove)) {
+					SignedOffset = (int) offset;
+					if (IndexedAccess && ((0 > SignedOffset) || ((SignedOffset + DataSize) > this->StackFrameMap.size()))) {
+						continue; // Indexed expressions can be within frame but offset is outside frame
+					}
+					assert(0 <= SignedOffset);
+					SignedOffset += this->MinStackAccessOffset;
+#if 0
+					if ((SignedOffset + DataSize) > this->StackFrameMap.size()) {
+						SMP_msg("ERROR: offset = %lu DataSize = %zu FrameMapSize = %zu\n",
+							(unsigned long)offset, DataSize, this->StackFrameMap.size());
+						continue;
+					}
+					assert((SignedOffset + DataSize) <= this->StackFrameMap.size());
+#endif
+					bool ESPRelative = (!(UsedFramePointer || CurrInst->HasFPNormalizedToSP()));
+					if (SignedOffset < 0) {
+						for (int j = 0; j < (int) DataSize; ++j) {
+							this->NegativeOffsetStackFrameMap[-SignedOffset + j].Read = true;
+							this->NegativeOffsetStackFrameMap[-SignedOffset + j].IndexedAccess = IndexedAccess;
+							if (ESPRelative) {
+								this->NegativeOffsetStackFrameMap[-SignedOffset + j].ESPRelativeAccess = true;
+							}
+							else {
+								this->NegativeOffsetStackFrameMap[-SignedOffset + j].EBPRelativeAccess = true;
+							}
+						}
+					}
+					else {
+						for (int j = 0; j < (int)DataSize; ++j) {
+							this->PositiveOffsetStackFrameMap[SignedOffset + j].Read = true;
+							this->PositiveOffsetStackFrameMap[SignedOffset + j].IndexedAccess = IndexedAccess;
+							if (ESPRelative) {
+								this->PositiveOffsetStackFrameMap[SignedOffset + j].ESPRelativeAccess = true;
+							}
+							else {
+								this->PositiveOffsetStackFrameMap[SignedOffset + j].EBPRelativeAccess = true;
+							}
+						}
+					}
+					struct FineGrainedInfo StackUseFG;
+					BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
+					StackUseFG.SizeInfo = BitWidthMask;
+					StackUseFG.SignMiscInfo = FG_MASK_READ;
+					if (SignedOffset < 0) {
+						this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SizeInfo |= BitWidthMask;
+						this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_READ;
+						if (IndexedAccess) {
+							this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+							StackUseFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+						}
+						if (ESPRelative) {
+							this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
+							StackUseFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
+						}
+						else {
+							this->NegativeOffsetFineGrainedStackTable.at(-SignedOffset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
+							StackUseFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
+						}
+					}
+					else { // SignedOffset >= 0
+						this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SizeInfo |= BitWidthMask;
+						this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_READ;
+						if (IndexedAccess) {
+							this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+							StackUseFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
+						}
+						if (ESPRelative) {
+							this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
+							StackUseFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
+						}
+						else {
+							this->PositiveOffsetFineGrainedStackTable.at(SignedOffset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
+							StackUseFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
+						}
+					}
+					if (SignedMove) {
+						this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_SIGNED;
+						StackUseFG.SignMiscInfo |= FG_MASK_SIGNED;
+					}
+					else if (UnsignedMove) {
+						this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_UNSIGNED;
+						StackUseFG.SignMiscInfo |= FG_MASK_UNSIGNED;
+					}
+					// Insert the StackUseFG into the map of InstAddr to USE FG info.
+					pair<map<STARS_ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
+					pair<STARS_ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackUseFG);
+					InsertResult = this->StackUseFGInfo.insert(InsertValue);
+					assert(InsertResult.second);
+				} // end if MDGetStackOffsetAndSize()
+			} // end for all USEs
+		} // end if SourceMemoryOperand
+		// NOTE: Detect taking the address of stack locations. **!!**
+	} // end for all instructions
+
+	// If function is a leaf function, set OutgoingArgsSize to zero and return.
+	// If function has no local frame allocation, ditto.
+	if ((this->IsLeaf() && !(this->IsDirectlyRecursive())) || (this->AllocPointDelta == 0)) {
+		this->OutgoingArgsSize = 0;
+		return;
+	}
+
+	// For non-leaf functions, set the OutgoingArgsSize to the write-only, ESP-relative
+	//  region of the bottom of the StackFrameMap.
+	bool OutgoingArgsRegionFinished = false;
+	bool IndexedOutgoingArgs = false; // Any indexed accesses to outgoing args?
+	std::size_t FramePadSize = 0;
+	std::size_t AlignmentPadSize = 0; // bottom of frame, unused space below outargs
+	for (std::size_t MapIndex = 0; MapIndex < this->NegativeOffsetStackFrameMap.size(); ++MapIndex) {
+		// Some of the bottom of the stack frame might be below the local frame allocation.
+		//  These are pushes that happened after allocation, etc. We skip over these
+		//  locations and define the outgoing args region to start strictly at the bottom
+		//  of the local frame allocation.
+		struct StackFrameEntry TempEntry = this->NegativeOffsetStackFrameMap.at(MapIndex);
+#if 0
+		if (DebugFlag) {
+			SMP_msg("StackFrameMap entry %zu: offset: %ld Read: %d Written: %d ESP: %d EBP: %d\n",
+				MapIndex, TempEntry.offset, TempEntry.Read, TempEntry.Written,
+				TempEntry.ESPRelativeAccess, TempEntry.EBPRelativeAccess);
+		}
+#endif
+		if (TempEntry.offset < this->AllocPointDelta)
+			continue;
+#if 0
+		if ((NULL != TempEntry.VarPtr) && ((0 == strcmp(" s", TempEntry.VarPtr->VarName)) || (0 == strcmp(" r", TempEntry.VarPtr->VarName)))) {
+			// We have reached saved regs or the return address.
+			break;
+		}
+#endif
+
+		if (OutgoingArgsRegionFinished) {
+			// We are just processing the stack frame padding.
+			if (!TempEntry.Read && !TempEntry.Written) {
+				// Could be stack frame padding.
+				++FramePadSize;
+			}
+			else {
+				break; // No more padding region
+			}
+		}
+		else if ((this->OutgoingArgsSize == 0) && (!TempEntry.Read) && (!TempEntry.Written)) {
+			// We have not started accumulating outgoing args bytes, we have reached the
+			//  AllocPointDelta, yet we find space that is neither written nor read. This
+			//  empty space at the bottom of the stack frame could just be for stack alignment
+			//  purposes, especially in the new x86-64 ABI, so it should not prevent us from
+			//  finding outgoing args space above it.
+			++AlignmentPadSize;
+		}
+		else if (TempEntry.Read || TempEntry.EBPRelativeAccess || !TempEntry.Written
+			|| !TempEntry.ESPRelativeAccess) {
+			OutgoingArgsRegionFinished = true;
+			if (!TempEntry.Read && !TempEntry.Written) {
+				// Could be stack frame padding.
+				++FramePadSize;
+			}
+			else {
+				break; // No padding region
+			}
+		}
+		else {
+			this->OutgoingArgsSize++;
+			if (TempEntry.IndexedAccess) {
+				IndexedOutgoingArgs = true;
+			}
+		}
+	}
+
+	// Add in the alignment padding below the written outargs region.
+	if (this->OutgoingArgsSize > 0) {
+		this->OutgoingArgsSize += AlignmentPadSize;
+	}
+
+	// If any outgoing arg was accessed using an index register, then we don't know how high
+	//  the index register value went. It could potentially consume the so-called padding
+	//  region, which might be just the region we did not detect direct accesses to because
+	//  the accesses were indirect. To be safe, we expand the outgoing args region to fill
+	//  the padding region above it in this indexed access case.
+	if (IndexedOutgoingArgs) {
+		this->OutgoingArgsSize += FramePadSize;
+	}
+
+	return;
+
+} // end of SMPFunction::BuildStackAccessTables()
+
+// Use the local variable offset list from IDA's stack frame structure to compute
+//  the table of local variable boundaries.
+void SMPFunction::SemiNaiveLocalVarID(void) {
+	// NOTE: We use IDA Pro's offsets from this->FuncInfo (e.g. frsize) and NOT
+	//  our own corrected values in our private data members. The offsets we
+	//  read from the stack frame structure returned by get_frame() are consistent
+	//  with other IDA Pro values, not with our corrected values.
+	list<SMPInstr *>::iterator InstIter;
+	this->SetLocalVarOffsetLimit(-20000);
+
+	STARS_Function_t *FuncPtr = SMP_get_func(this->GetStartAddr());
+	if (NULL == FuncPtr) {
+		SMP_msg("FATAL ERROR in SMPFunction::SemiNaiveLocalVarID; no func ptr\n");
+	}
+	assert(NULL != FuncPtr);
+
+	// We only get stack frame info from IDA Pro, not IRDB
+	FuncPtr->FillInLocalVarTable(this);
+
+	// If AnalyzedSP is false, that is all we can do.
+	if (!this->AnalyzedSP) {
+		this->OutgoingArgsSize = 0;
+		this->MinStackDelta = 0;
+		this->AllocPointDelta = 0;
+		return;
+	}
 
 	// Calculate min and max stack operand offsets accessed.
 	InstIter = this->Instrs.begin();
@@ -3848,7 +4271,6 @@ void SMPFunction::EmitStackFrameAnnotations(FILE *AnnotFile, SMPInstr *Instr) {
 		global_STARS_program->IncrementDataReferentID();
 		SMP_fprintf(AnnotFile, "%18llx %6lu DATAREF STACK %lu esp + %d PARENT LocalFrame LOCALFRAME\n",
 				(unsigned long long) addr, (unsigned long) this->LocalVarsSize, ParentReferentID, 0);
-#if SMP_COMPUTE_STACK_GRANULARITY
 		if (this->AnalyzedSP && !this->CallsAlloca && (STARS_BADADDR != this->LocalVarsAllocInstr)) {
 			// We can only fine-grain the stack frame if we were able to analyze the stack
 			if (this->OutgoingArgsSize > 0) {
@@ -3877,7 +4299,6 @@ void SMPFunction::EmitStackFrameAnnotations(FILE *AnnotFile, SMPInstr *Instr) {
 				global_STARS_program->IncrementDataReferentID();
 			}
 		} // end if (this->AnalyzedSP and not Alloca .... )
-#endif
 	} // end if (0 < LocalVarsSize)
 	return;
 } // end of SMPFunction::EmitStackFrameAnnotations() 
@@ -4164,13 +4585,33 @@ void SMPFunction::AdvancedAnalysis(void) {
 		&& (!this->HasUnresolvedIndirectJumps()) && (!this->HasSharedChunks())) {
 		this->LiveVariableAnalysis(false);
 	}
+
+	// Calculate min and max stack point deltas, AllocPointDelta, and fixup DEF and USE lists for call insts.
+	this->MinStackDelta = 20000; // Final value should be negative or zero
+	this->MaxStackDelta = -1000; // Final value should be zero.
 	InstIter = this->Instrs.begin();
 	if ((*InstIter)->IsMarkerInst()) {
 		++InstIter; // skip marker inst
 	}
 	for ( ; InstIter != this->Instrs.end(); ++InstIter) {
 		CurrInst = (*InstIter);
+		STARS_ea_t addr = CurrInst->GetAddr();
 		CurrInst->MDFixupCallDefUseLists();
+		STARS_sval_t sp_delta = CurrInst->GetStackPtrOffset();
+		if (sp_delta < this->MinStackDelta)
+			this->MinStackDelta = sp_delta;
+		if (sp_delta > this->MaxStackDelta)
+			this->MaxStackDelta = sp_delta;
+		if (addr == this->LocalVarsAllocInstr) {
+			// Total stack pointer delta is sp_delta for the next instruction,
+			//  because IDA updates the sp delta AFTER each instruction.
+			list<SMPInstr *>::iterator NextInstIter = InstIter;
+			++NextInstIter;
+			if (NextInstIter != this->Instrs.end()) {
+				sp_delta = (*NextInstIter)->GetStackPtrOffset();
+				this->AllocPointDelta = sp_delta;
+			}
+		}
 	}
 
 #if SMP_DEBUG_CONTROLFLOW
-- 
GitLab