Newer
Older
RegIndex = (int) PushedReg.reg;
if (RegIndex > R_di) {
clc5q
committed
SMP_msg("WARNING: Skipping save of register %d\n", RegIndex);
continue;
}
if (this->SavedRegLoc.at((size_t) RegIndex) == 0) {
this->SavedRegLoc[(size_t) RegIndex] = CurrOffset - 4;
}
else {
clc5q
committed
SMP_msg("WARNING: Multiple saves of register %d\n", RegIndex);
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
}
} // end if register push operand
} // end if PUSH instruction
else if (NN_pusha == CurrInst->GetCmd().itype) {
// **!!** Handle pushes of all regs.
this->SavedRegLoc[(size_t) R_ax] = CurrOffset - 4;
this->SavedRegLoc[(size_t) R_cx] = CurrOffset - 8;
this->SavedRegLoc[(size_t) R_dx] = CurrOffset - 12;
this->SavedRegLoc[(size_t) R_bx] = CurrOffset - 16;
this->SavedRegLoc[(size_t) R_sp] = CurrOffset - 20;
this->SavedRegLoc[(size_t) R_bp] = CurrOffset - 24;
this->SavedRegLoc[(size_t) R_si] = CurrOffset - 28;
this->SavedRegLoc[(size_t) R_di] = CurrOffset - 32;
break; // all regs accounted for
}
else if (CurrInst->MDIsEnterInstr()) {
this->SavedRegLoc[(size_t) R_bp] = CurrOffset - 4;
}
} // end for all instructions
return;
} // end of SMPFunction::MDFindSavedRegs()
// Compute the ReturnRegTypes[] as the meet over all register types
// at all return instructions.
void SMPFunction::MDFindReturnTypes(void) {
list<SMPBasicBlock *>::iterator BlockIter;
SMPBasicBlock *CurrBlock;
list<SMPInstr *>::iterator InstIter;
vector<SMPOperandType> RegTypes;
SMPInstr *CurrInst;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
CurrBlock = (*BlockIter);
if (CurrBlock->HasReturn()) {
// Get the types of all registers at the RETURN point.
// Calculate the meet function over them.
InstIter = CurrBlock->GetLastInstr();
--InstIter;
assert(RETURN == CurrInst->GetDataFlowType());
set<DefOrUse, LessDefUse>::iterator CurrUse;
for (CurrUse = CurrInst->GetFirstUse();
CurrUse != CurrInst->GetLastUse();
++CurrUse) {
op_t UseOp = CurrUse->GetOp();
if ((o_reg != UseOp.type) || (R_di < UseOp.reg))
continue;
this->ReturnRegTypes[UseOp.reg]
= SMPTypeMeet(this->ReturnRegTypes.at(UseOp.reg),
CurrUse->GetType());
} // for all USEs in the RETURN instruction
} // end if current block has a RETURN
} // end for all blocks
return;
} // end of SMPFunction::MDFindReturnTypes()
// Determine local variable boundaries in the stack frame.
void SMPFunction::BuildLocalVarTable(void) {
// Currently we just use the info that IDA Pro has inferred from the direct
// addressing of stack locations.
this->SemiNaiveLocalVarID();
return;
} // end of SMPFunction::BuildLocalVarTable()
clc5q
committed
// Limit damage from garbage stack offset values produced by IDA Pro.
#define IDAPRO_KLUDGE_STACK_FRAME_SIZE_LIMIT 5000000
// Use the local variable offset list from IDA's stack frame structure to compute
// the table of local variable boundaries.
void SMPFunction::SemiNaiveLocalVarID(void) {
// NOTE: We use IDA Pro's offsets from this->FuncInfo (e.g. frsize) and NOT
// our own corrected values in our private data members. The offsets we
// read from the stack frame structure returned by get_frame() are consistent
// with other IDA Pro values, not with our corrected values.
list<SMPInstr *>::iterator InstIter;
bool DebugFlag = false;
bool FoundReturnAddress = false;
this->LocalVarOffsetLimit = -20000;
#if SMP_DEBUG_STACK_GRANULARITY
DebugFlag |= (0 == strcmp("qSort3", this->GetFuncName()));
#endif
clc5q
committed
func_t *FuncPtr = SMP_get_func(this->FuncInfo.startEA);
if (NULL == FuncPtr) {
clc5q
committed
SMP_msg("ERROR in SMPFunction::SemiNaiveLocalVarID; no func ptr\n");
}
assert(NULL != FuncPtr);
struc_t *StackFrame = get_frame(FuncPtr);
if (NULL == StackFrame) {
clc5q
committed
SMP_msg("WARNING: No stack frame info from get_frame for %s\n", this->GetFuncName());
return;
}
member_t *Member = StackFrame->members;
for (size_t i = 0; i < StackFrame->memqty; ++i, ++Member) {
long offset;
if (NULL == Member) {
clc5q
committed
SMP_msg("NULL stack frame member pointer in %s\n", this->GetFuncName());
break;
}
get_member_name(Member->id, MemberName, MAXSMPVARSTR - 1);
if (MemberName == NULL) {
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("NULL stack frame member in %s\n", this->GetFuncName());
continue;
}
if (Member->unimem()) {
// Not a separate variable; name for member of a union.
// The union itself should have a separate entry, so we skip this.
clc5q
committed
SMP_msg("STACK INFO: Skipping union member %s frame member %zu in stack frame for %s\n",
continue;
}
offset = (long) Member->get_soff(); // Would be 0 for union member, so we skipped them above.
if (DebugFlag) {
clc5q
committed
SMP_msg("%s local var %s at offset %ld\n", this->GetFuncName(), MemberName, offset);
clc5q
committed
if (offset > IDAPRO_KLUDGE_STACK_FRAME_SIZE_LIMIT) {
SMP_msg("ERROR: Rejected enormous stack offset %ld for var %s in func %s\n", offset, MemberName, this->GetFuncName());
continue;
}
if (!FoundReturnAddress && (2 == strlen(MemberName)) && (0 == strncmp(" r", MemberName, 2))) {
FoundReturnAddress = true;
this->IDAReturnAddressOffset = offset;
}
struct LocalVar TempLocal;
TempLocal.offset = offset;
TempLocal.size = Member->eoff - Member->soff; // audit later
clc5q
committed
SMP_strncpy(TempLocal.VarName, MemberName, sizeof(TempLocal.VarName) - 1);
this->LocalVarTable.push_back(TempLocal);
if ((offset + (long) TempLocal.size) >= this->LocalVarOffsetLimit) {
this->LocalVarOffsetLimit = (long) (TempLocal.offset + TempLocal.size);
}
} // end for all stack frame members
// If AnalyzedSP is false, that is all we can do.
if (!this->AnalyzedSP) {
this->OutgoingArgsSize = 0;
this->MinStackDelta = 0;
this->AllocPointDelta = 0;
return;
}
// Calculate min and max stack point deltas.
this->MinStackDelta = 20000; // Final value should be negative or zero
this->MaxStackDelta = -1000; // Final value should be zero.
InstIter = this->Instrs.begin();
if ((*InstIter)->IsFloatNop())
++InstIter; // skip marker instruction
for ( ; InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
ea_t addr = CurrInst->GetAddr();
sval_t sp_delta = CurrInst->GetStackPtrOffset();
if (sp_delta < this->MinStackDelta)
this->MinStackDelta = sp_delta;
if (sp_delta > this->MaxStackDelta)
this->MaxStackDelta = sp_delta;
if (addr == this->LocalVarsAllocInstr) {
// Total stack pointer delta is sp_delta for the next instruction,
// because IDA updates the sp delta AFTER each instruction.
list<SMPInstr *>::iterator NextInstIter = InstIter;
++NextInstIter;
sp_delta = (*NextInstIter)->GetStackPtrOffset();
this->AllocPointDelta = sp_delta;
}
}
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
// Calculate min and max stack operand offsets accessed.
InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
if ((*InstIter)->IsFloatNop())
++InstIter; // skip marker instruction
#endif
for ( ; InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
ea_t addr = CurrInst->GetAddr();
// Find the min and max stack offsets in DEFs and USEs.
op_t TempOp;
if (CurrInst->HasDestMemoryOperand() || CurrInst->MDIsPushInstr() || CurrInst->MDIsEnterInstr()) {
set<DefOrUse, LessDefUse>::iterator CurrDef;
for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
TempOp = CurrDef->GetOp();
if (TempOp.type != o_phrase && TempOp.type != o_displ)
continue;
this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
} // end for all DEFs
}
if (CurrInst->HasSourceMemoryOperand() || CurrInst->MDIsPopInstr() || CurrInst->MDIsLeaveInstr() || CurrInst->MDIsLoadEffectiveAddressInstr()) {
if (CurrInst->MDIsLoadEffectiveAddressInstr()) {
TempOp = CurrInst->GetLeaMemUseOp();
if ((TempOp.type == o_phrase) || (TempOp.type == o_displ)) {
this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
}
}
else {
set<DefOrUse, LessDefUse>::iterator CurrUse;
for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
TempOp = CurrUse->GetOp();
if ((TempOp.type != o_phrase) && (TempOp.type != o_displ))
continue;
this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
} // end for all USEs
}
}
}
if (0 == this->MaxStackAccessLimit) {
// Never accessed any incoming args. However, we know the return address is on the stack,
// and it is almost never accessed, so we want to record its presence.
this->MaxStackAccessLimit = MD_DEFAULT_RETURN_ADDRESS_SIZE;
}
if (this->MinStackAccessOffset > this->MinStackDelta) {
// Some functions allocate space that is not visibly accessed. We still want to make
// our stack frame maps of maximum size, and MinStackDelta is used for normalizing offsets.
this->MinStackAccessOffset = this->MinStackDelta;
}
// IDA Pro sometimes fails to add stack frame members for all incoming args, etc.
// Find and correct these omissions by examining stack accesses in instructions
// and extend the LocalVarTable to cover whatever is out of range.
if (!this->AuditLocalVarTable()) {
// Catastrophic error must have occurred, probably due to errors in IDA's
// stack pointer analysis, despite AnalyzedSP being true.
if (!(this->LocalVarTable.empty())) {
this->GoodLocalVarTable = true;
// Sort the LocalVarTable so that we do not depend on IDA Pro
// presenting the stack frame members in order.
std::sort(this->LocalVarTable.begin(), this->LocalVarTable.end(), LocalVarCompare);
}
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("Computing %d local var sizes\n", this->LocalVarTable.size());
// Now we want to audit the size field for each local
if (this->GoodLocalVarTable) {
size_t VarLimit = this->LocalVarTable.size() - 1;
assert(this->LocalVarTable.size() > 0);
for (size_t VarIndex = 0; VarIndex < VarLimit; ++VarIndex) {
struct LocalVar TempLocEntry = this->LocalVarTable[VarIndex];
bool AboveLocalsRegion = (TempLocEntry.offset >= this->LocalVarsSize);
size_t TempSize = this->LocalVarTable[VarIndex + 1].offset - TempLocEntry.offset;
int DiffSize = ((int) TempSize) - ((int) TempLocEntry.size);
// We don't have IDA Pro stack frame members for callee saved registers. This
// omission can make it seem that there is a gap between the uppermost local
// variable and the return address or saved frame pointer. Avoid expanding the
// last local variable into the callee saved registers region.
if (DiffSize > 0) { // We are expanding the size.
if (!AboveLocalsRegion && ((TempLocEntry.offset + TempLocEntry.size + DiffSize) > this->LocalVarsSize)) {
// Current local does not start above the locals region, but its new size will
// carry it above the locals region.
if ((TempLocEntry.offset + TempLocEntry.size) > this->LocalVarsSize) {
// Weird. It already overlapped the callee saved regs region.
clc5q
committed
SMP_msg("WARNING: Local var at offset %ld size %zu in %s extends above local vars region.\n",
TempLocEntry.offset, TempLocEntry.size, this->GetFuncName());
}
// Limit DiffSize to avoid overlapping callee saved regs.
DiffSize = this->LocalVarsSize - (TempLocEntry.offset + TempLocEntry.size);
if (DiffSize < 0)
DiffSize = 0; // started out positive, cap it at zero.
}
}
if (DiffSize < 0)
DiffSize = 0; // should not happen with sorted LocalVarTable unless duplicate entries.
if (DiffSize != 0) {
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("STACK INFO: Adjusted size for stack frame member at %ld in %s\n",
#endif
this->LocalVarTable[VarIndex].size += DiffSize;
}
#if 0 // Using Member->eoff seems to be working for all members, including the last one.
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("Computing last local var size for frsize %d\n", this->FuncInfo.frsize);
// Size of last local is total frsize minus savedregs in frame minus offset of last local
size_t SavedRegsSpace = 0; // portion of frsize that is saved regs, not locals.
if (this->CalleeSavedRegsSize > this->FuncInfo.frregs) {
// IDA Pro counts the save of EBP in frregs, but then EBP gets its new
// value and callee saved regs other than the old EBP push get counted
// in frsize rather than frregs. CalleeSavedRegsSize includes all saved
// regs on the stack, both above and below the current EBP offset.
// NOTE: For windows, this has to be done differently, as callee saved regs
// happen at the bottom of the local frame, not the top.
#if 0
SavedRegsSpace = this->CalleeSavedRegsSize - this->FuncInfo.frregs;
#else
SavedRegsSpace = this->FuncInfo.frsize - this->LocalVarsSize;
#endif
this->LocalVarTable.back().size = this->FuncInfo.frsize
- SavedRegsSpace - this->LocalVarTable.back().offset;
this->LocalVarOffsetLimit = this->LocalVarTable.back().offset
+ (adiff_t) this->LocalVarTable.back().size;
#if 0 // AboveLocalsSize is not a reliable number.
// IDA Pro can have difficulty with some irregular functions such as are found
// in the C startup code. The frsize value might be bogus. Just punt on the
// local variable ID if that is the case.
if ((this->LocalVarOffsetLimit - AboveLocalsSize) > (adiff_t) this->FuncInfo.frsize) {
this->LocalVarTable.clear();
this->GoodLocalVarTable = false;
clc5q
committed
SMP_msg("WARNING: Bad frsize %d for %s OffsetLimit: %d AboveLocalsSize: %d LocalVarsSize: %d ; abandoning SemiNaiveLocalVarID.\n",
this->FuncInfo.frsize, this->GetFuncName(), this->LocalVarOffsetLimit, AboveLocalsSize, this->LocalVarsSize);
return;
}
assert((this->LocalVarOffsetLimit - AboveLocalsSize) <= (adiff_t) this->FuncInfo.frsize);
// Find out how many of the locals are really outgoing args.
if (this->AnalyzedSP && !this->CallsAlloca && (BADADDR != this->LocalVarsAllocInstr)) {
this->FindOutgoingArgsSize();
}
else {
clc5q
committed
SMP_msg("FindOutgoingArgsSize not called for %s ", this->GetFuncName());
SMP_msg("AnalyzedSP: %d CallsAlloca: %d LocalVarsAllocInstr: %x \n",
this->AnalyzedSP, this->CallsAlloca, this->LocalVarsAllocInstr);
}
return;
} // end of SMPFunction::SemiNaiveLocalVarID()
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
// Update MinStackAccessOffset and MaxStackAccessLimit if TempOp is stack access
void SMPFunction::UpdateMinMaxStackOffsets(SMPInstr *CurrInst, op_t TempOp) {
ea_t offset;
int SignedOffset;
size_t DataSize;
bool UsedFramePointer;
bool IndexedAccess;
bool SignedMove;
bool UnsignedMove;
if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackDelta, offset, DataSize, UsedFramePointer,
IndexedAccess, SignedMove, UnsignedMove)) {
SignedOffset = (int) offset + (int) this->MinStackDelta; // Don't want zero-based for min/max finding
if (((sval_t) SignedOffset) < this->MinStackAccessOffset) {
this->MinStackAccessOffset = (sval_t) SignedOffset;
}
if (((sval_t)(SignedOffset + (int) DataSize)) > this->MaxStackAccessLimit) {
this->MaxStackAccessLimit = (sval_t)(SignedOffset + (int) DataSize);
}
}
return;
} // end of SMPFunction::UpdateMinMaxStackOffsets()
// Check and correct the LocalVarTable derived from IDA Pro stack frame members.
// Examine each instruction and see if any stack accesses are beyond the LocalVarTable
// and create new entries in the LocalVarTable if so.
bool SMPFunction::AuditLocalVarTable(void) {
list<SMPInstr *>::iterator InstIter;
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
// For some functions, IDA Pro does not base its stack frame at the MinStackDelta. This
// is detected by noting that the offset field for the saved return address is not
// the negation of the MinStackDelta, e.g. offset is 12 and MinStackDelta is -16
// for a function such as call_gmon_start, which has a temporary 4-byte decrease
// in the stack delta for an internal thunk call that IDA Pro excludes from the
// stack frame analysis, because it does not represent any local variable:
// call next_instruction
// pop ebx
// Instead, IDA Pro typically bases its stack frame at the AllocPointDelta. For many
// functions, the MinStackDelta and the AllocPointDelta are the same. For some, they
// are not the same, and for some functions, IDA Pro has a stack frame that is not
// even based at the AllocPointDelta, because IDA Pro makes a mistake in its analyses
// when the first basic block is interrupted by odd code such as a function call
// before it reaches the frame allocation instruction.
// So, we need to align the local var table so that the base of the table is at the
// AllocPointDelta and the saved return address falls at normalized address zero, i.e.
// if AllocPointDelta is -28, then the LocalVarTable will start at offset zero as IDA
// computes offsets, and the saved return address will fall at offset 28, the negation
// of the AllocPointDelta. If the LocalVarTable does not conform to this pattern, we will
// need to add 4-byte entries at the bottom of the table and adjust offsets until the return address
// falls at the correct offset.
long IDAFrameAdjustment = (0 - this->IDAReturnAddressOffset - this->AllocPointDelta);
if (IDAFrameAdjustment != 0) {
SMP_msg("WARNING: %ld bytes IDAFrameAdjustment needed: Func at: %x\n",
IDAFrameAdjustment, this->FirstEA);
// We need to subtract (IDAReturnAddressOffset + this->AllocPointDelta) from the local var table offsets.
// this->AllocPointDelta is negative, e.g. -44 for libc_csu_init in toy.exe, and IDAReturnAddressOffset
// should be its negation (44 in that example), but is a smaller number (20 in the toy.exe example),
// so we are subtracting (20 + -44) from each offset, meaning we are adding 24. We also add 24 to the
// value of this->LocalVarOffsetLimit, and create an entry at the bottom of the frame with a size of
// 24 in this example.
long LocalVarIncrement = (0 - (this->IDAReturnAddressOffset + this->AllocPointDelta));
if (LocalVarIncrement <= 0) {
SMP_msg("SERIOUS WARNING: Unexpected non-positive value for LocalVarIncrement: %ld Func at: %x\n",
LocalVarIncrement, this->FirstEA);
}
else {
for (size_t i = 0; i < this->LocalVarTable.size(); ++i) {
this->LocalVarTable[i].offset += LocalVarIncrement;
}
// Add dummy placeholders at bottom of LocalVarTable, four bytes each.
size_t TotalFillerSize = 0;
do {
struct LocalVar TempLocal;
char TempStr[20];
TempLocal.offset = (long) TotalFillerSize;
TempLocal.size = 4;
if (((long)(TempLocal.size + TotalFillerSize)) > LocalVarIncrement) {
TempLocal.size = (size_t)(LocalVarIncrement - (long) TotalFillerSize);
}
TotalFillerSize += TempLocal.size;
SMP_strncpy(TempLocal.VarName, "SMP_IDA_FixVar", sizeof(TempLocal.VarName) - 1);
(void) SMP_snprintf(TempStr, 18, "%ld", TempLocal.offset);
SMP_strncat(TempLocal.VarName, TempStr, sizeof(TempLocal.VarName) - 1);
this->LocalVarTable.push_back(TempLocal);
} while (((long)TotalFillerSize) < LocalVarIncrement);
this->LocalVarOffsetLimit += LocalVarIncrement;
this->FuncInfo.frsize += (asize_t) LocalVarIncrement;
}
}
// We cannot depend on IDA Pro making Member
// entries for everything that is accessed on the stack.
// When an incoming arg is accessed but no Member is
// created, then LocalVarOffsetLimit will be too small
// and we will get ERROR messages. We already looped through the
// instructions to find the MaxStackAccessLimit. If LocalVarOffsetLimit
// is not big enough to reach from AllocPointDelta to MaxStackAccessLimit,
// then add 4-byte incoming arg entries until it reaches.
while (this->LocalVarOffsetLimit < (long) this->MaxStackAccessLimit) {
// Extend LocalVarTable.
struct LocalVar TempLocal;
char TempStr[20];
TempLocal.offset = this->LocalVarOffsetLimit;
TempLocal.size = 4;
if ((TempLocal.size + TempLocal.offset) > ((long) this->MaxStackAccessLimit)) {
TempLocal.size = ((long) this->MaxStackAccessLimit) - TempLocal.offset;
SMP_strncpy(TempLocal.VarName, "SMP_InArg", sizeof(TempLocal.VarName) - 1);
(void) SMP_snprintf(TempStr, 18, "%ld", TempLocal.offset);
SMP_strncat(TempLocal.VarName, TempStr, sizeof(TempLocal.VarName) - 1);
this->LocalVarTable.push_back(TempLocal);
this->LocalVarOffsetLimit += TempLocal.size;
}
// Fill in the gaps with new variables as well. SHOULD WE? WHY?
return true;
} // end of SMPFunction::AuditLocalVarTable()
// Determine how many bytes at the bottom of the stack frame (i.e. at bottom of
// this->LocalVarsSize) are used for outgoing args. This is the case when the cdecl
// calling convention is used, e.g. gcc/linux allocates local var space + out args space
// in a single allocation and then writes outarg values directly to ESP+0, ESP+4, etc.
void SMPFunction::FindOutgoingArgsSize(void) {
// Compute the lowest value reached by the stack pointer.
list<SMPInstr *>::iterator InstIter;
unsigned short BitWidthMask;
#if SMP_DEBUG_STACK_GRANULARITY
DebugFlag = (0 == strcmp("BZ2_blockSort", this->GetFuncName()));
this->OutgoingArgsComputed = true;
clc5q
committed
SMP_msg("DEBUG: Entered FindOutgoingArgsSize for %s\n", this->GetFuncName());
#if SMP_IDAPRO52_WORKAROUND
this->OutgoingArgsSize = 16;
return;
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("AllocPointDelta: %d MinStackDelta: %d\n", this->AllocPointDelta, this->MinStackDelta);
if ((0 <= this->MinStackDelta) || (0 <= this->AllocPointDelta)) {
// No allocations; sometimes happens in library functions.
this->OutgoingArgsSize = 0;
this->AllocPointDelta = 0;
if ((this->MinStackDelta > this->MaxStackDelta) || (0 < this->MinStackDelta)) {
this->MinStackDelta = 0;
}
assert(0 >= this->MinStackDelta);
// Allocate a vector of stack frame entries, one for each byte of the stack frame.
// This will be our memory map for analyzing stack usage.
for (int i = this->MinStackAccessOffset; i < this->MaxStackAccessLimit; ++i) {
struct StackFrameEntry TempEntry;
TempEntry.VarPtr = NULL;
TempEntry.offset = (long) i;
TempEntry.Read = false;
TempEntry.Written = false;
TempEntry.AddressTaken = false;
TempEntry.ESPRelativeAccess = false;
TempEntry.EBPRelativeAccess = false;
TempEntry.IndexedAccess = false;
this->StackFrameMap.push_back(TempEntry);
struct FineGrainedInfo TempFineGrained;
TempFineGrained.SignMiscInfo = 0;
TempFineGrained.SizeInfo = 0;
this->FineGrainedStackTable.push_back(TempFineGrained);
#if 0
for (int i = 0; i < this->LocalVarOffsetLimit; ++i) {
struct FineGrainedInfo TempFineGrained;
TempFineGrained.SignMiscInfo = 0;
TempFineGrained.SizeInfo = 0;
this->FineGrainedStackTable.push_back(TempFineGrained);
#endif
// Fill in the VarPtr fields for each StackFrameMap entry.
if (0 < this->AllocPointDelta) {
clc5q
committed
SMP_msg("FATAL ERROR: AllocPointDelta = %d in %s\n", this->AllocPointDelta, this->GetFuncName());
assert(0 >= this->AllocPointDelta);
// We were not able to adjust the LocalVarTable for a negative IDAFrameAdjustment back
// in AuditLocalVarTable(), but we can use the negative adjustment value in this loop
// to properly match the StackFrameMap entries to the LocalVarTable entries and avoid
// an out of range error.
long IDAFrameAdjustment = (0 - this->IDAReturnAddressOffset - this->AllocPointDelta);
if (0 < IDAFrameAdjustment) {
IDAFrameAdjustment = 0; // only handling the negative case; positive was handled in AuditLocalVarTable()
}
for (size_t i = 0; i < this->LocalVarTable.size(); ++i) {
assert(this->LocalVarTable.at(i).offset >= 0);
// Picture that AllocPointDelta is -200, MinStackAccessOffset is -210, and
// the LocalVarTable[i].offset is +8 (i.e. 8 bytes above alloc point).
// Then base = 8 + (-200 - -210) = 8 + 10 = 18, the proper offset into
// the StackFrameMap.
size_t base = (size_t) (this->LocalVarTable.at(i).offset
+ (this->AllocPointDelta - this->MinStackAccessOffset) + IDAFrameAdjustment);
size_t limit = base + this->LocalVarTable.at(i).size;
if (limit > this->StackFrameMap.size()) {
clc5q
committed
SMP_msg("ERROR: FindOutArgsSize: base = %zu limit = %zu StackFrameMap size = %zu in %s\n",
base, limit, this->StackFrameMap.size(), this->GetFuncName());
this->OutgoingArgsComputed = false;
this->OutgoingArgsSize = 0;
return;
}
assert(limit <= this->StackFrameMap.size());
for (size_t MapIndex = base; MapIndex < limit; ++MapIndex) {
this->StackFrameMap[MapIndex].VarPtr = &(this->LocalVarTable.at(i));
}
}
// Iterate through all instructions and record stack frame accesses in the StackFrameMap.
InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
if ((*InstIter)->IsFloatNop())
++InstIter; // skip marker instruction
for ( ; InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
ea_t InstAddr = CurrInst->GetAddr();
sval_t sp_delta = CurrInst->GetStackPtrOffset();
if (0 < sp_delta) {
// Stack underflow; about to assert
SMP_msg("ERROR: Stack underflow at %x %s sp_delta: %d\n", InstAddr,
CurrInst->GetDisasm(), sp_delta);
this->OutgoingArgsComputed = false;
this->OutgoingArgsSize = 0;
return;
}
assert(0 >= sp_delta);
ea_t offset;
size_t DataSize;
bool UsedFramePointer;
bool IndexedAccess;
bool SignedMove;
bool UnsignedMove;
if (CurrInst->HasDestMemoryOperand()) {
set<DefOrUse, LessDefUse>::iterator CurrDef;
for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
op_t TempOp = CurrDef->GetOp();
if (TempOp.type != o_phrase && TempOp.type != o_displ)
continue;
if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackAccessOffset, offset, DataSize, UsedFramePointer,
IndexedAccess, SignedMove, UnsignedMove)) {
SignedOffset = (int) offset;
if (IndexedAccess && ((0 > SignedOffset) || ((SignedOffset + DataSize) > this->StackFrameMap.size()))) {
continue; // Indexed expressions can be within frame even when offset is outside frame
}
assert(0 <= SignedOffset);
#if 0
if (offset >= this->FuncInfo.frsize)
continue; // limit processing to outgoing args and locals
if ((offset + DataSize) > this->StackFrameMap.size()) {
clc5q
committed
SMP_msg("ERROR: offset = %u DataSize = %zu FrameMapSize = %zu\n",
offset, DataSize, this->StackFrameMap.size());
}
assert((offset + DataSize) <= this->StackFrameMap.size());
for (int j = 0; j < (int) DataSize; ++j) {
this->StackFrameMap[offset + j].Written = true;
this->StackFrameMap[offset + j].IndexedAccess = IndexedAccess;
if (!UsedFramePointer) {
this->StackFrameMap[offset + j].ESPRelativeAccess = true;
}
else {
this->StackFrameMap[offset + j].EBPRelativeAccess = true;
struct FineGrainedInfo StackDefFG;
BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
this->FineGrainedStackTable.at(offset).SizeInfo |= BitWidthMask;
StackDefFG.SizeInfo = BitWidthMask;
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_WRITTEN;
StackDefFG.SignMiscInfo = FG_MASK_WRITTEN;
if (IndexedAccess) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
StackDefFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
if (!UsedFramePointer) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
StackDefFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
}
else {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
StackDefFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
}
// We will process the signedness of stores later, so that loads can take precedence
// over stores in determining signedness in the table. We go ahead and process
// signedness for the separate DEF and USE maps by InstAddr.
if (SignedMove) {
StackDefFG.SignMiscInfo |= FG_MASK_SIGNED;
}
else if (UnsignedMove) {
StackDefFG.SignMiscInfo |= FG_MASK_UNSIGNED;
}
// Insert the StackDefFG into the map of InstAddr to DEF FG info.
pair<map<ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
pair<ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackDefFG);
InsertResult = this->StackDefFGInfo.insert(InsertValue);
assert(InsertResult.second);
} // end if MDGetStackOffsetAndSize()
} // end for all DEFs
} // end if DestMemoryOperand
if (CurrInst->HasSourceMemoryOperand()) {
set<DefOrUse, LessDefUse>::iterator CurrUse;
for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
op_t TempOp = CurrUse->GetOp();
if (TempOp.type != o_phrase && TempOp.type != o_displ)
continue;
if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackAccessOffset, offset, DataSize, UsedFramePointer,
IndexedAccess, SignedMove, UnsignedMove)) {
SignedOffset = (int) offset;
if (IndexedAccess && ((0 > SignedOffset) || ((SignedOffset + DataSize) > this->StackFrameMap.size()))) {
continue; // Indexed expressions can be within frame but offset is outside frame
}
assert(0 <= SignedOffset);
#if 0
if (offset >= this->FuncInfo.frsize)
continue; // limit processing to outgoing args and locals
#endif
if ((SignedOffset + DataSize) > this->StackFrameMap.size()) {
clc5q
committed
SMP_msg("ERROR: offset = %u DataSize = %zu FrameMapSize = %zu\n",
offset, DataSize, this->StackFrameMap.size());
assert((SignedOffset + DataSize) <= this->StackFrameMap.size());
for (int j = 0; j < (int) DataSize; ++j) {
this->StackFrameMap[offset + j].Read = true;
this->StackFrameMap[offset + j].IndexedAccess |= IndexedAccess;
if (!UsedFramePointer)
this->StackFrameMap[offset + j].ESPRelativeAccess = true;
else
this->StackFrameMap[offset + j].EBPRelativeAccess = true;
}
struct FineGrainedInfo StackUseFG;
BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
this->FineGrainedStackTable.at(offset).SizeInfo |= BitWidthMask;
StackUseFG.SizeInfo = BitWidthMask;
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_READ;
StackUseFG.SignMiscInfo = FG_MASK_READ;
if (IndexedAccess) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
StackUseFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
if (!UsedFramePointer) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
StackUseFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
}
else {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
StackUseFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
}
if (SignedMove) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_SIGNED;
StackUseFG.SignMiscInfo |= FG_MASK_SIGNED;
}
else if (UnsignedMove) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_UNSIGNED;
StackUseFG.SignMiscInfo |= FG_MASK_UNSIGNED;
// Insert the StackUseFG into the map of InstAddr to USE FG info.
pair<map<ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
pair<ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackUseFG);
InsertResult = this->StackUseFGInfo.insert(InsertValue);
assert(InsertResult.second);
} // end if MDGetStackOffsetAndSize()
} // end if SourceMemoryOperand
// NOTE: Detect taking the address of stack locations. **!!**
} // end for all instructions
// If function is a leaf function, set OutgoingArgsSize to zero and return.
// If function has no local frame allocation, ditto.
if ((this->IsLeaf() && !(this->IsDirectlyRecursive()))
|| (this->AllocPointDelta == 0)) {
this->OutgoingArgsSize = 0;
return;
}
// For non-leaf functions, set the OutgoingArgsSize to the write-only, ESP-relative
// region of the bottom of the StackFrameMap.
bool OutgoingArgsRegionFinished = false;
bool IndexedOutgoingArgs = false; // Any indexed accesses to outgoing args?
size_t FramePadSize = 0;
for (size_t MapIndex = 0; MapIndex < this->StackFrameMap.size(); ++MapIndex) {
// Some of the bottom of the stack frame might be below the local frame allocation.
// These are pushes that happened after allocation, etc. We skip over these
// locations and define the outgoing args region to start strictly at the bottom
// of the local frame allocation.
struct StackFrameEntry TempEntry = this->StackFrameMap.at(MapIndex);
if (DebugFlag) {
clc5q
committed
SMP_msg("StackFrameMap entry %zu: offset: %ld Read: %d Written: %d ESP: %d EBP: %d\n",
MapIndex, TempEntry.offset, TempEntry.Read, TempEntry.Written,
TempEntry.ESPRelativeAccess, TempEntry.EBPRelativeAccess);
}
if (TempEntry.offset < this->AllocPointDelta)
continue;
if (OutgoingArgsRegionFinished) {
// We are just processing the stack frame padding.
if (!TempEntry.Read && !TempEntry.Written) {
// Could be stack frame padding.
++FramePadSize;
}
else {
break; // No more padding region
}
}
clc5q
committed
else if (TempEntry.Read || TempEntry.EBPRelativeAccess || !TempEntry.Written
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
|| !TempEntry.ESPRelativeAccess) {
OutgoingArgsRegionFinished = true;
if (!TempEntry.Read && !TempEntry.Written) {
// Could be stack frame padding.
++FramePadSize;
}
else {
break; // No padding region
}
}
else {
this->OutgoingArgsSize++;
if (TempEntry.IndexedAccess) {
IndexedOutgoingArgs = true;
}
}
}
// If any outgoing arg was accessed using an index register, then we don't know how high
// the index register value went. It could potentially consume the so-called padding
// region, which might be just the region we did not detect direct accesses to because
// the accesses were indirect. To be safe, we expand the outgoing args region to fill
// the padding region above it in this indexed access case.
if (IndexedOutgoingArgs) {
this->OutgoingArgsSize += FramePadSize;
// Sometimes we encounter unused stack space above the outgoing args. Lump this space
// in with the outgoing args. We detect this by noting when the outgoing args space
// has only partially used the space assigned to a local var.
// NOTE: This is usually just stack padding to maintain stack alignment. It could
// also be the case that the lowest local variable is accessed indirectly and we missed
// seeing its address taken, in which case it would be unsound to lump it into the
// outgoing args region. We might want to create a local var called STACKPAD
// to occupy this space.
if ((0 < this->OutgoingArgsSize) && (this->OutgoingArgsSize < this->FuncInfo.frsize)) {
long MapIndex = (this->AllocPointDelta - this->MinStackDelta);
assert(0 <= MapIndex);
MapIndex += (((long) this->OutgoingArgsSize) - 1);
struct StackFrameEntry TempEntry = this->StackFrameMap.at((size_t) MapIndex);
clc5q
committed
if (NULL == TempEntry.VarPtr) { // Gap in stack frame; IDA 6.0
clc5q
committed
SMP_msg("Gap in stack frame: %s\n", this->GetFuncName());
clc5q
committed
}
else if (this->OutgoingArgsSize < (TempEntry.VarPtr->offset + TempEntry.VarPtr->size)) {
clc5q
committed
#if SMP_DEBUG_FRAMEFIXUP
clc5q
committed
SMP_msg("OutGoingArgsSize = %d", this->OutgoingArgsSize);
clc5q
committed
#endif
this->OutgoingArgsSize = TempEntry.VarPtr->offset + TempEntry.VarPtr->size;
clc5q
committed
#if SMP_DEBUG_FRAMEFIXUP
clc5q
committed
SMP_msg(" adjusted to %d\n", this->OutgoingArgsSize);
clc5q
committed
#endif
return;
} // end of SMPFunction::FindOutgoingArgsSize()
// If TempOp reads or writes to a stack location, return the offset (relative to the initial
// stack pointer value) and the size in bytes of the data access. Also return whether the
// access was frame-pointer-relative, and whether signedness can be inferred due to a load
// from the stack being zero-extended or sign-extended.
// NOTE: This function assumes that offsets are already normalized. i.e. the TempOp argument
// should always come from a DEF or USE that has been normalized to the stack delta at function entry.
// NOTE: TempOp must be of type o_displ or o_phrase, as no other operand type could be a
// stack memory access.
// BaseValue is either this->MinStackAccessOffset, or this->MinStackDelta (when this->MinStackAccessOffset is still
// being computed).
// Return true if a stack memory access was found in TempOp, false otherwise.
bool SMPFunction::MDGetStackOffsetAndSize(SMPInstr *Instr, op_t TempOp, sval_t BaseValue, ea_t &offset, size_t &DataSize, bool &FP,
bool &Indexed, bool &Signed, bool &Unsigned) {
clc5q
committed
int BaseReg;
int IndexReg;
ushort ScaleFactor;
int SignedOffset;
sval_t sp_delta = Instr->GetStackPtrOffset();
ea_t InstAddr = Instr->GetAddr(); // helps debugging
assert((o_displ == TempOp.type) || (o_phrase == TempOp.type));
clc5q
committed
MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);
clc5q
committed
if (TempOp.type == o_phrase) {
assert(offset == 0); // implicit zero, as in [esp] ==> [esp+0]
SignedOffset = (int) offset; // avoid sign errors during adjustment arithmetic
if ((BaseReg == R_sp) || (IndexReg == R_sp)) {
// ESP-relative constant offset
if (!Instr->AreDefsNormalized()) {
SignedOffset += sp_delta; // base offsets from entry ESP value
}
SignedOffset -= BaseValue; // convert to StackFrameMap index
offset = (ea_t) SignedOffset; // write back to outgoing argument
// Get size of data written
DataSize = GetOpDataSize(TempOp);
FP = false;
Indexed = ((BaseReg != R_none) && (IndexReg != R_none)); // two regs used
unsigned short opcode = Instr->GetCmd().itype;
Unsigned = (opcode == NN_movzx);
Signed = (opcode == NN_movsx);
if ((0 > SignedOffset) && (!Indexed) && (BaseValue == this->MinStackAccessOffset)) {
clc5q
committed
SMP_msg("ERROR: Negative offset in MDGetStackOffsetAndSize for inst dump: \n");
return true;
}
else if (this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp))) {
SignedOffset -= this->FuncInfo.frregs; // base offsets from entry ESP value
SignedOffset -= BaseValue; // convert to StackFrameMap index
offset = (ea_t) SignedOffset;
DataSize = GetOpDataSize(TempOp);
FP = true;
Indexed = ((BaseReg != R_none) && (IndexReg != R_none)); // two regs used
assert(Indexed || (!this->StackPtrAnalysisSucceeded()) || !this->HasSTARSStackPtrAnalysisCompleted()); // Else we should never get here with unnormalized stack operands
unsigned short opcode = Instr->GetCmd().itype;
Unsigned = (opcode == NN_movzx);
Signed = (opcode == NN_movsx);
if ((0 > SignedOffset) && (!Indexed) && (BaseValue == this->MinStackAccessOffset)) {
SMP_msg("ERROR: Negative offset %d in MDGetStackOffsetAndSize: frregs: %d MinStackDelta: %d Inst dump: \n",
SignedOffset, this->FuncInfo.frregs, this->MinStackDelta);
return true;
}
else {
return false;
}
} // end of SMPFunction::MDGetStackOffsetAndSize()
// Return fine grained stack entry for stack op TempOp from instruction at InstAddr
bool SMPFunction::MDGetFGStackLocInfo(ea_t InstAddr, op_t TempOp, struct FineGrainedInfo &FGEntry) {
int BaseReg;
int IndexReg;
ushort ScaleFactor;
ea_t offset;
int SignedOffset;
assert((o_displ == TempOp.type) || (o_phrase == TempOp.type));
MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);
SignedOffset = (int) offset;
if (TempOp.type == o_phrase) {
assert(SignedOffset == 0); // implicit zero, as in [esp] ==> [esp+0]
}
if ((BaseReg == R_sp) || (IndexReg == R_sp)) {
// ESP-relative constant offset
SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
}
else if (this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp))) {
assert(false); // should never get here with unnormalized stack operand
SignedOffset -= this->FuncInfo.frregs; // base offsets from entry ESP value
SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
}
else {
return false;
}
// We did not return false, so we should have a good offset. Use it to
// pass back the fine grained stack table entry for that offset.
if ((0 > SignedOffset) || (SignedOffset >= (int) this->FineGrainedStackTable.size())) {
clc5q
committed
SMP_msg("ERROR: FG stack table index out of range in MDGetFGStackLocInfo at %x\n", InstAddr);
FGEntry.SignMiscInfo = 0; // We cannot figure out signedness info without an FG info stack table.
FGEntry.SizeInfo = ComputeOperandBitWidthMask(TempOp, 0); // IDA can figure out width, anyway.
}
else {
FGEntry = this->FineGrainedStackTable.at((size_t) SignedOffset);
}
return true;
} // end of SMPFunction::MDGetFGStackLocInfo()
// Return true if we update fine grained stack entry for stack op TempOp from instruction at InstAddr
bool SMPFunction::MDUpdateFGStackLocInfo(ea_t InstAddr, op_t TempOp, struct FineGrainedInfo NewFG) {
int BaseReg;
int IndexReg;
ushort ScaleFactor;
ea_t offset;
int SignedOffset;
struct FineGrainedInfo OldFG, UnionFG;
assert((o_displ == TempOp.type) || (o_phrase == TempOp.type));
MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);
SignedOffset = (int) offset;
if (TempOp.type == o_phrase) {
assert(SignedOffset == 0); // implicit zero, as in [esp] ==> [esp+0]
}
if ((BaseReg == R_sp) || (IndexReg == R_sp)) {
// ESP-relative constant offset
SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
}
else if (this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp))) {
assert(false); // should never get here with unnormalized stack operands
SignedOffset -= this->FuncInfo.frregs; // base offsets from entry ESP value
SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
}
else {
return false;
}
// We did not return false, so we should have a good offset. Use it to
// retrieve the fine grained stack table entry for that offset.
if ((0 > SignedOffset) || (SignedOffset >= (int) this->FineGrainedStackTable.size())) {
if (this->OutgoingArgsComputed) {
clc5q
committed
SMP_msg("ERROR: FG stack table index out of range in MDGetFGStackLocInfo at %x\n", InstAddr);
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
}
return false;
}
else if (this->OutgoingArgsComputed && (((size_t)SignedOffset) < this->OutgoingArgsSize)) {
// We don't want to update the outgoing args region, as it will not be consistent
// over multiple function calls. NOTE: We could fine tune this by seeing if we
// call mutliple target functions or not; if only one, then outgoing args region
// would be consistent in the absence of varargs targets.
return false;
}
else {
OldFG = this->FineGrainedStackTable.at((size_t) SignedOffset);
UnionFG.SignMiscInfo = OldFG.SignMiscInfo | NewFG.SignMiscInfo;
UnionFG.SizeInfo = OldFG.SizeInfo | NewFG.SizeInfo;
if ((OldFG.SignMiscInfo != UnionFG.SignMiscInfo) || (OldFG.SizeInfo != UnionFG.SizeInfo)) {
// The signs they are a-changin'. Or maybe the sizes.
this->FineGrainedStackTable.at(SignedOffset).SignMiscInfo |= NewFG.SignMiscInfo;
this->FineGrainedStackTable.at(SignedOffset).SizeInfo |= NewFG.SizeInfo;
}
}
return true;
} // end of SMPFunction::MDUpdateFGStackLocInfo()
// retrieve DEF addr from GlobalDefAddrBySSA or return BADADDR
ea_t SMPFunction::GetGlobalDefAddr(op_t DefOp, int SSANum) {
map<int, ea_t>::iterator DefAddrMapIter;
map<int, ea_t>::iterator MapResult;