Newer
Older
SMPInstr *CurrInst = (*InstIter);
ea_t InstAddr = CurrInst->GetAddr();
sval_t sp_delta = get_spd(this->GetFuncInfo(), InstAddr);
if (0 < sp_delta) {
// Stack underflow; about to assert
clc5q
committed
SMP_msg("Stack underflow at %x %s sp_delta: %d\n", CurrInst->GetAddr(),
CurrInst->GetDisasm(), sp_delta);
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
}
assert(0 >= sp_delta);
ea_t offset;
size_t DataSize;
bool UsedFramePointer;
bool IndexedAccess;
bool SignedMove;
bool UnsignedMove;
if (CurrInst->HasDestMemoryOperand()) {
// NOTE: We need to catch stack pushes here also (callee-saved regs). !!!!!*******!!!!!!!!
set<DefOrUse, LessDefUse>::iterator CurrDef;
for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
op_t TempOp = CurrDef->GetOp();
if (TempOp.type != o_phrase && TempOp.type != o_displ)
continue;
if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, sp_delta, offset, DataSize, UsedFramePointer,
IndexedAccess, SignedMove, UnsignedMove)) {
SignedOffset = (int) offset;
if (IndexedAccess && ((0 > SignedOffset) || ((offset + DataSize) > this->StackFrameMap.size()))) {
continue; // Indexed expressions can be within frame but offset is outside frame
}
#if 0 // ls_O3.exe has IDA trouble on chunked function get_funky_string().
assert(0 <= SignedOffset);
#else
if (0 > SignedOffset) { // negative offset but not Indexed; very bad
clc5q
committed
SMP_msg("ERROR: Negative stack offset at %x in %s. Abandoning LocalVar ID.\n", CurrInst->GetAddr(), this->GetFuncName());
return false;
}
#endif
if ((SignedOffset + (long) DataSize) > this->LocalVarOffsetLimit) {
// Going out of range. Extend LocalVarTable.
struct LocalVar TempLocal;
char TempStr[20];
TempLocal.offset = (long) SignedOffset;
TempLocal.size = DataSize;
clc5q
committed
SMP_strncpy(TempLocal.VarName, "SMP_InArg", sizeof(TempLocal.VarName) - 1);
(void) SMP_snprintf(TempStr, 18, "%d", offset);
SMP_strncat(TempLocal.VarName, TempStr, sizeof(TempLocal.VarName) - 1);
this->LocalVarTable.push_back(TempLocal);
this->LocalVarOffsetLimit = (long) (SignedOffset + (long) DataSize);
}
}
}
}
if (CurrInst->HasSourceMemoryOperand()) {
set<DefOrUse, LessDefUse>::iterator CurrUse;
for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
op_t TempOp = CurrUse->GetOp();
clc5q
committed
if ((TempOp.type != o_phrase) && (TempOp.type != o_displ))
continue;
if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, sp_delta, offset, DataSize, UsedFramePointer,
IndexedAccess, SignedMove, UnsignedMove)) {
SignedOffset = (int) offset;
if (IndexedAccess && ((0 > SignedOffset) || ((offset + DataSize) > this->StackFrameMap.size()))) {
continue; // Indexed expressions can be within frame but offset is outside frame
}
assert(0 <= SignedOffset);
if ((SignedOffset + (long) DataSize) > this->LocalVarOffsetLimit) {
// Going out of range. Extend LocalVarTable.
struct LocalVar TempLocal;
char TempStr[20];
TempLocal.offset = (long) SignedOffset;
TempLocal.size = DataSize;
clc5q
committed
SMP_strncpy(TempLocal.VarName, "SMP_InArg", sizeof(TempLocal.VarName) - 1);
(void) SMP_snprintf(TempStr, 18, "%d", offset);
SMP_strncat(TempLocal.VarName, TempStr, sizeof(TempLocal.VarName) - 1);
this->LocalVarTable.push_back(TempLocal);
this->LocalVarOffsetLimit = (long) (SignedOffset + (long) DataSize);
}
}
}
}
} // end for all instructions
// Fill in the gaps with new variables as well. SHOULD WE? WHY?
return true;
} // end of SMPFunction::AuditLocalVarTable()
// Determine how many bytes at the bottom of the stack frame (i.e. at bottom of
// this->LocalVarsSize) are used for outgoing args. This is the case when the cdecl
// calling convention is used, e.g. gcc/linux allocates local var space + out args space
// in a single allocation and then writes outarg values directly to ESP+0, ESP+4, etc.
void SMPFunction::FindOutgoingArgsSize(void) {
// Compute the lowest value reached by the stack pointer.
list<SMPInstr *>::iterator InstIter;
unsigned short BitWidthMask;
#if SMP_DEBUG_STACK_GRANULARITY
DebugFlag = (0 == strcmp("BZ2_blockSort", this->GetFuncName()));
this->OutgoingArgsComputed = true;
clc5q
committed
SMP_msg("DEBUG: Entered FindOutgoingArgsSize for %s\n", this->GetFuncName());
#if SMP_IDAPRO52_WORKAROUND
this->OutgoingArgsSize = 16;
return;
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("AllocPointDelta: %d MinStackDelta: %d\n", this->AllocPointDelta, this->MinStackDelta);
if ((0 <= this->MinStackDelta) || (0 <= this->AllocPointDelta)) {
// No allocations; sometimes happens in library functions.
this->OutgoingArgsSize = 0;
this->MinStackDelta = 0;
this->AllocPointDelta = 0;
return;
}
assert(0 > this->MinStackDelta);
// Allocate a vector of stack frame entries, one for each byte of the stack frame.
// This will be our memory map for analyzing stack usage.
int limit = 0;
#if 1
if (this->LocalVarOffsetLimit > 0) {
if (limit < (this->LocalVarOffsetLimit + this->MinStackDelta)) {
// Make room for incoming args, other stuff above local vars.
limit = this->LocalVarOffsetLimit + this->MinStackDelta;
if (this->MinStackDelta < this->AllocPointDelta) {
// Also have stuff below alloc point to make room for.
limit += (this->AllocPointDelta - this->MinStackDelta);
}
#endif
for (int i = this->MinStackDelta; i < limit; ++i) {
struct StackFrameEntry TempEntry;
TempEntry.VarPtr = NULL;
TempEntry.offset = (long) i;
TempEntry.Read = false;
TempEntry.Written = false;
TempEntry.AddressTaken = false;
TempEntry.ESPRelativeAccess = false;
TempEntry.EBPRelativeAccess = false;
TempEntry.IndexedAccess = false;
this->StackFrameMap.push_back(TempEntry);
struct FineGrainedInfo TempFineGrained;
TempFineGrained.SignMiscInfo = 0;
TempFineGrained.SizeInfo = 0;
this->FineGrainedStackTable.push_back(TempFineGrained);
#if 0
for (int i = 0; i < this->LocalVarOffsetLimit; ++i) {
struct FineGrainedInfo TempFineGrained;
TempFineGrained.SignMiscInfo = 0;
TempFineGrained.SizeInfo = 0;
this->FineGrainedStackTable.push_back(TempFineGrained);
#endif
// Fill in the VarPtr fields for each StackFrameMap entry.
if (0 <= this->AllocPointDelta) {
clc5q
committed
SMP_msg("FATAL ERROR: AllocPointDelta = %d in %s\n", this->AllocPointDelta, this->GetFuncName());
assert(0 > this->AllocPointDelta);
for (size_t i = 0; i < this->LocalVarTable.size(); ++i) {
assert(this->LocalVarTable.at(i).offset >= 0);
// Picture that AllocPointDelta is -200, MinStackDelta is -210, and
// the LocalVarTable[i].offset is +8 (i.e. 8 bytes above alloc point).
// Then base = 8 + (-200 - -210) = 8 + 10 = 18, the proper offset into
// the StackFrameMap.
size_t base = (size_t) (this->LocalVarTable.at(i).offset
+ (this->AllocPointDelta - this->MinStackDelta));
size_t limit = base + this->LocalVarTable.at(i).size;
if (limit > this->StackFrameMap.size()) {
clc5q
committed
SMP_msg("ERROR: FindOutArgsSize: base = %zu limit = %zu StackFrameMap size = %zu in %s\n",
base, limit, this->StackFrameMap.size(), this->GetFuncName());
this->OutgoingArgsComputed = false;
this->OutgoingArgsSize = 0;
return;
}
assert(limit <= this->StackFrameMap.size());
for (size_t MapIndex = base; MapIndex < limit; ++MapIndex) {
this->StackFrameMap[MapIndex].VarPtr = &(this->LocalVarTable.at(i));
}
}
// Iterate through all instructions and record stack frame accesses in the StackFrameMap.
InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
if ((*InstIter)->IsFloatNop())
++InstIter; // skip marker instruction
for ( ; InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
ea_t InstAddr = CurrInst->GetAddr();
sval_t sp_delta = get_spd(this->GetFuncInfo(), InstAddr);
if (0 < sp_delta) {
// Stack underflow; about to assert
SMP_msg("FATAL ERROR: Stack underflow at %x %s sp_delta: %d\n", InstAddr,
CurrInst->GetDisasm(), sp_delta);
}
assert(0 >= sp_delta);
ea_t offset;
size_t DataSize;
bool UsedFramePointer;
bool IndexedAccess;
bool SignedMove;
bool UnsignedMove;
if (CurrInst->HasDestMemoryOperand()) {
set<DefOrUse, LessDefUse>::iterator CurrDef;
for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
op_t TempOp = CurrDef->GetOp();
if (TempOp.type != o_phrase && TempOp.type != o_displ)
continue;
if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, sp_delta, offset, DataSize, UsedFramePointer,
IndexedAccess, SignedMove, UnsignedMove)) {
SignedOffset = (int) offset;
if (IndexedAccess && ((0 > SignedOffset) || ((offset + DataSize) > this->StackFrameMap.size()))) {
continue; // Indexed expressions can be within frame even when offset is outside frame
}
assert(0 <= SignedOffset);
#if 0
if (offset >= this->FuncInfo.frsize)
continue; // limit processing to outgoing args and locals
if ((offset + DataSize) > this->StackFrameMap.size()) {
clc5q
committed
SMP_msg("ERROR: offset = %u DataSize = %zu FrameMapSize = %zu\n",
offset, DataSize, this->StackFrameMap.size());
}
assert((offset + DataSize) <= this->StackFrameMap.size());
for (int j = 0; j < (int) DataSize; ++j) {
this->StackFrameMap[offset + j].Written = true;
this->StackFrameMap[offset + j].IndexedAccess = IndexedAccess;
if (!UsedFramePointer) {
this->StackFrameMap[offset + j].ESPRelativeAccess = true;
}
else {
this->StackFrameMap[offset + j].EBPRelativeAccess = true;
struct FineGrainedInfo StackDefFG;
BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
this->FineGrainedStackTable.at(offset).SizeInfo |= BitWidthMask;
StackDefFG.SizeInfo = BitWidthMask;
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_WRITTEN;
StackDefFG.SignMiscInfo = FG_MASK_WRITTEN;
if (IndexedAccess) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
StackDefFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
if (!UsedFramePointer) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
StackDefFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
}
else {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
StackDefFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
}
// We will process the signedness of stores later, so that loads can take precedence
// over stores in determining signedness in the table. We go ahead and process
// signedness for the separate DEF and USE maps by InstAddr.
if (SignedMove) {
StackDefFG.SignMiscInfo |= FG_MASK_SIGNED;
}
else if (UnsignedMove) {
StackDefFG.SignMiscInfo |= FG_MASK_UNSIGNED;
}
// Insert the StackDefFG into the map of InstAddr to DEF FG info.
pair<map<ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
pair<ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackDefFG);
InsertResult = this->StackDefFGInfo.insert(InsertValue);
assert(InsertResult.second);
} // end if MDGetStackOffsetAndSize()
} // end for all DEFs
} // end if DestMemoryOperand
if (CurrInst->HasSourceMemoryOperand()) {
set<DefOrUse, LessDefUse>::iterator CurrUse;
for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
op_t TempOp = CurrUse->GetOp();
if (TempOp.type != o_phrase && TempOp.type != o_displ)
continue;
if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, sp_delta, offset, DataSize, UsedFramePointer,
IndexedAccess, SignedMove, UnsignedMove)) {
SignedOffset = (int) offset;
if (IndexedAccess && ((0 > SignedOffset) || ((SignedOffset + DataSize) > this->StackFrameMap.size()))) {
continue; // Indexed expressions can be within frame but offset is outside frame
}
assert(0 <= SignedOffset);
#if 0
if (offset >= this->FuncInfo.frsize)
continue; // limit processing to outgoing args and locals
#endif
if ((SignedOffset + DataSize) > this->StackFrameMap.size()) {
clc5q
committed
SMP_msg("ERROR: offset = %u DataSize = %zu FrameMapSize = %zu\n",
offset, DataSize, this->StackFrameMap.size());
assert((SignedOffset + DataSize) <= this->StackFrameMap.size());
for (int j = 0; j < (int) DataSize; ++j) {
this->StackFrameMap[offset + j].Read = true;
this->StackFrameMap[offset + j].IndexedAccess |= IndexedAccess;
if (!UsedFramePointer)
this->StackFrameMap[offset + j].ESPRelativeAccess = true;
else
this->StackFrameMap[offset + j].EBPRelativeAccess = true;
}
struct FineGrainedInfo StackUseFG;
BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
this->FineGrainedStackTable.at(offset).SizeInfo |= BitWidthMask;
StackUseFG.SizeInfo = BitWidthMask;
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_READ;
StackUseFG.SignMiscInfo = FG_MASK_READ;
if (IndexedAccess) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
StackUseFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
if (!UsedFramePointer) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_SP_RELATIVE;
StackUseFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
}
else {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_FP_RELATIVE;
StackUseFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
}
if (SignedMove) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_SIGNED;
StackUseFG.SignMiscInfo |= FG_MASK_SIGNED;
}
else if (UnsignedMove) {
this->FineGrainedStackTable.at(offset).SignMiscInfo |= FG_MASK_UNSIGNED;
StackUseFG.SignMiscInfo |= FG_MASK_UNSIGNED;
// Insert the StackUseFG into the map of InstAddr to USE FG info.
pair<map<ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
pair<ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackUseFG);
InsertResult = this->StackUseFGInfo.insert(InsertValue);
assert(InsertResult.second);
} // end if MDGetStackOffsetAndSize()
} // end if SourceMemoryOperand
// NOTE: Detect taking the address of stack locations. **!!**
} // end for all instructions
// If function is a leaf function, set OutgoingArgsSize to zero and return.
clc5q
committed
if (this->IsLeaf() && !(this->IsDirectlyRecursive())) {
this->OutgoingArgsSize = 0;
return;
}
// For non-leaf functions, set the OutgoingArgsSize to the write-only, ESP-relative
// region of the bottom of the StackFrameMap.
bool OutgoingArgsRegionFinished = false;
bool IndexedOutgoingArgs = false; // Any indexed accesses to outgoing args?
size_t FramePadSize = 0;
for (size_t MapIndex = 0; MapIndex < this->StackFrameMap.size(); ++MapIndex) {
// Some of the bottom of the stack frame might be below the local frame allocation.
// These are pushes that happened after allocation, etc. We skip over these
// locations and define the outgoing args region to start strictly at the bottom
// of the local frame allocation.
struct StackFrameEntry TempEntry = this->StackFrameMap.at(MapIndex);
if (DebugFlag) {
clc5q
committed
SMP_msg("StackFrameMap entry %zu: offset: %ld Read: %d Written: %d ESP: %d EBP: %d\n",
MapIndex, TempEntry.offset, TempEntry.Read, TempEntry.Written,
TempEntry.ESPRelativeAccess, TempEntry.EBPRelativeAccess);
}
if (TempEntry.offset < this->AllocPointDelta)
continue;
if (OutgoingArgsRegionFinished) {
// We are just processing the stack frame padding.
if (!TempEntry.Read && !TempEntry.Written) {
// Could be stack frame padding.
++FramePadSize;
}
else {
break; // No more padding region
}
}
clc5q
committed
else if (TempEntry.Read || TempEntry.EBPRelativeAccess || !TempEntry.Written
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
|| !TempEntry.ESPRelativeAccess) {
OutgoingArgsRegionFinished = true;
if (!TempEntry.Read && !TempEntry.Written) {
// Could be stack frame padding.
++FramePadSize;
}
else {
break; // No padding region
}
}
else {
this->OutgoingArgsSize++;
if (TempEntry.IndexedAccess) {
IndexedOutgoingArgs = true;
}
}
}
// If any outgoing arg was accessed using an index register, then we don't know how high
// the index register value went. It could potentially consume the so-called padding
// region, which might be just the region we did not detect direct accesses to because
// the accesses were indirect. To be safe, we expand the outgoing args region to fill
// the padding region above it in this indexed access case.
if (IndexedOutgoingArgs) {
this->OutgoingArgsSize += FramePadSize;
// Sometimes we encounter unused stack space above the outgoing args. Lump this space
// in with the outgoing args. We detect this by noting when the outgoing args space
// has only partially used the space assigned to a local var.
// NOTE: This is usually just stack padding to maintain stack alignment. It could
// also be the case that the lowest local variable is accessed indirectly and we missed
// seeing its address taken, in which case it would be unsound to lump it into the
// outgoing args region. We might want to create a local var called STACKPAD
// to occupy this space.
if ((0 < this->OutgoingArgsSize) && (this->OutgoingArgsSize < this->FuncInfo.frsize)) {
long MapIndex = (this->AllocPointDelta - this->MinStackDelta);
assert(0 <= MapIndex);
MapIndex += (((long) this->OutgoingArgsSize) - 1);
struct StackFrameEntry TempEntry = this->StackFrameMap.at((size_t) MapIndex);
clc5q
committed
if (NULL == TempEntry.VarPtr) { // Gap in stack frame; IDA 6.0
clc5q
committed
SMP_msg("Gap in stack frame: %s\n", this->GetFuncName());
clc5q
committed
}
else if (this->OutgoingArgsSize < (TempEntry.VarPtr->offset + TempEntry.VarPtr->size)) {
clc5q
committed
#if SMP_DEBUG_FRAMEFIXUP
clc5q
committed
SMP_msg("OutGoingArgsSize = %d", this->OutgoingArgsSize);
clc5q
committed
#endif
this->OutgoingArgsSize = TempEntry.VarPtr->offset + TempEntry.VarPtr->size;
clc5q
committed
#if SMP_DEBUG_FRAMEFIXUP
clc5q
committed
SMP_msg(" adjusted to %d\n", this->OutgoingArgsSize);
clc5q
committed
#endif
return;
} // end of SMPFunction::FindOutgoingArgsSize()
// If TempOp reads or writes to a stack location, return the offset (relative to the initial
// stack pointer value) and the size in bytes of the data access. Also return whether the
// access was frame-pointer-relative, and whether signedness can be inferred due to a load
// from the stack being zero-extended or sign-extended.
// NOTE: TempOp must be of type o_displ or o_phrase, as no other operand type could be a
// stack memory access.
// sp_delta is the stack pointer delta of the current instruction, relative to the initial
// stack pointer value for the function.
// Return true if a stack memory access was found in TempOp, false otherwise.
bool SMPFunction::MDGetStackOffsetAndSize(SMPInstr *Instr, op_t TempOp, sval_t sp_delta, ea_t &offset, size_t &DataSize, bool &FP,
bool &Indexed, bool &Signed, bool &Unsigned) {
clc5q
committed
int BaseReg;
int IndexReg;
ushort ScaleFactor;
int SignedOffset;
assert((o_displ == TempOp.type) || (o_phrase == TempOp.type));
clc5q
committed
MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);
clc5q
committed
if (TempOp.type == o_phrase) {
assert(offset == 0); // implicit zero, as in [esp] ==> [esp+0]
SignedOffset = (int) offset; // avoid sign errors during adjustment arithmetic
if ((BaseReg == R_sp) || (IndexReg == R_sp)) {
// ESP-relative constant offset
SignedOffset += sp_delta; // base offsets from entry ESP value
SignedOffset -= this->MinStackDelta; // convert to StackFrameMap index
offset = (ea_t) SignedOffset;
// Get size of data written
DataSize = GetOpDataSize(TempOp);
FP = false;
Indexed = ((BaseReg != R_none) && (IndexReg != R_none)); // two regs used
unsigned short opcode = Instr->GetCmd().itype;
Unsigned = (opcode == NN_movzx);
Signed = (opcode == NN_movsx);
if ((0 > SignedOffset) && (!Indexed)) {
// Consider asserting here.
clc5q
committed
SMP_msg("ERROR: Negative offset in MDGetStackOffsetAndSize for inst dump: \n");
return true;
}
else if (this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp))) {
SignedOffset -= this->FuncInfo.frregs; // base offsets from entry ESP value
SignedOffset -= this->MinStackDelta; // convert to StackFrameMap index
offset = (ea_t) SignedOffset;
DataSize = GetOpDataSize(TempOp);
FP = true;
Indexed = ((BaseReg != R_none) && (IndexReg != R_none)); // two regs used
unsigned short opcode = Instr->GetCmd().itype;
Unsigned = (opcode == NN_movzx);
Signed = (opcode == NN_movsx);
if ((0 > SignedOffset) && (!Indexed)) {
// Consider asserting here.
clc5q
committed
SMP_msg("ERROR: Negative offset in MDGetStackOffsetAndSize for inst dump: \n");
return true;
}
else {
return false;
}
} // end of SMPFunction::MDGetStackOffsetAndSize()
// Return fine grained stack entry for stack op TempOp from instruction at InstAddr
bool SMPFunction::MDGetFGStackLocInfo(ea_t InstAddr, op_t TempOp, struct FineGrainedInfo &FGEntry) {
int BaseReg;
int IndexReg;
ushort ScaleFactor;
ea_t offset;
int SignedOffset;
assert((o_displ == TempOp.type) || (o_phrase == TempOp.type));
MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);
sval_t sp_delta = get_spd(this->GetFuncInfo(), InstAddr);
SignedOffset = (int) offset;
if (TempOp.type == o_phrase) {
assert(SignedOffset == 0); // implicit zero, as in [esp] ==> [esp+0]
}
if ((BaseReg == R_sp) || (IndexReg == R_sp)) {
// ESP-relative constant offset
SignedOffset += sp_delta; // base offsets from entry ESP value
SignedOffset -= this->MinStackDelta; // convert to StackFrameMap index
}
else if (this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp))) {
SignedOffset -= this->FuncInfo.frregs; // base offsets from entry ESP value
SignedOffset -= this->MinStackDelta; // convert to StackFrameMap index
}
else {
return false;
}
// We did not return false, so we should have a good offset. Use it to
// pass back the fine grained stack table entry for that offset.
if ((0 > SignedOffset) || (SignedOffset >= (int) this->FineGrainedStackTable.size())) {
clc5q
committed
SMP_msg("ERROR: FG stack table index out of range in MDGetFGStackLocInfo at %x\n", InstAddr);
FGEntry.SignMiscInfo = 0; // We cannot figure out signedness info without an FG info stack table.
FGEntry.SizeInfo = ComputeOperandBitWidthMask(TempOp, 0); // IDA can figure out width, anyway.
}
else {
FGEntry = this->FineGrainedStackTable.at((size_t) SignedOffset);
}
return true;
} // end of SMPFunction::MDGetFGStackLocInfo()
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
// Return true if we update fine grained stack entry for stack op TempOp from instruction at InstAddr
bool SMPFunction::MDUpdateFGStackLocInfo(ea_t InstAddr, op_t TempOp, struct FineGrainedInfo NewFG) {
int BaseReg;
int IndexReg;
ushort ScaleFactor;
ea_t offset;
int SignedOffset;
struct FineGrainedInfo OldFG, UnionFG;
assert((o_displ == TempOp.type) || (o_phrase == TempOp.type));
MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);
sval_t sp_delta = get_spd(this->GetFuncInfo(), InstAddr);
SignedOffset = (int) offset;
if (TempOp.type == o_phrase) {
assert(SignedOffset == 0); // implicit zero, as in [esp] ==> [esp+0]
}
if ((BaseReg == R_sp) || (IndexReg == R_sp)) {
// ESP-relative constant offset
SignedOffset += sp_delta; // base offsets from entry ESP value
SignedOffset -= this->MinStackDelta; // convert to StackFrameMap index
}
else if (this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp))) {
SignedOffset -= this->FuncInfo.frregs; // base offsets from entry ESP value
SignedOffset -= this->MinStackDelta; // convert to StackFrameMap index
}
else {
return false;
}
// We did not return false, so we should have a good offset. Use it to
// retrieve the fine grained stack table entry for that offset.
if ((0 > SignedOffset) || (SignedOffset >= (int) this->FineGrainedStackTable.size())) {
if (this->OutgoingArgsComputed) {
clc5q
committed
SMP_msg("ERROR: FG stack table index out of range in MDGetFGStackLocInfo at %x\n", InstAddr);
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
}
return false;
}
else if (this->OutgoingArgsComputed && (((size_t)SignedOffset) < this->OutgoingArgsSize)) {
// We don't want to update the outgoing args region, as it will not be consistent
// over multiple function calls. NOTE: We could fine tune this by seeing if we
// call mutliple target functions or not; if only one, then outgoing args region
// would be consistent in the absence of varargs targets.
return false;
}
else {
OldFG = this->FineGrainedStackTable.at((size_t) SignedOffset);
UnionFG.SignMiscInfo = OldFG.SignMiscInfo | NewFG.SignMiscInfo;
UnionFG.SizeInfo = OldFG.SizeInfo | NewFG.SizeInfo;
if ((OldFG.SignMiscInfo != UnionFG.SignMiscInfo) || (OldFG.SizeInfo != UnionFG.SizeInfo)) {
// The signs they are a-changin'. Or maybe the sizes.
this->FineGrainedStackTable.at(SignedOffset).SignMiscInfo |= NewFG.SignMiscInfo;
this->FineGrainedStackTable.at(SignedOffset).SizeInfo |= NewFG.SizeInfo;
}
}
return true;
} // end of SMPFunction::MDUpdateFGStackLocInfo()
// retrieve DEF addr from GlobalDefAddrBySSA or return BADADDR
ea_t SMPFunction::GetGlobalDefAddr(op_t DefOp, int SSANum) {
map<int, ea_t>::iterator DefAddrMapIter;
map<int, ea_t>::iterator MapResult;
ea_t DefAddr = BADADDR; // BADADDR means we did not find it
int HashedName = HashGlobalNameAndSSA(DefOp, SSANum);
MapResult = this->GlobalDefAddrBySSA.find(HashedName);
if (MapResult != this->GlobalDefAddrBySSA.end()) { // Found it.
DefAddr = (ea_t) MapResult->second;
}
return DefAddr;
} // end of SMPFunction::GetGlobalDefAddr()
// Retrieve block iterator for InstAddr from InstBlockMap; assert if failure
SMPBasicBlock *SMPFunction::GetBlockFromInstAddr(ea_t InstAddr) {
map<ea_t, SMPBasicBlock *>::iterator MapEntry;
MapEntry = this->InstBlockMap.find(InstAddr);
assert(MapEntry != this->InstBlockMap.end());
return MapEntry->second;
}
clc5q
committed
// Retrieve inst iterator for InstAddr; assert if failure on block find.
SMPInstr *SMPFunction::GetInstFromAddr(ea_t InstAddr) {
SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(InstAddr);
SMPInstr *CurrInst = CurrBlock->FindInstr(InstAddr);
return CurrInst;
clc5q
committed
}
// Given block # and PhiDef op_t and SSANum, return the Phi iterator or assert.
set<SMPPhiFunction, LessPhi>::iterator SMPFunction::GetPhiIterForPhiDef(size_t BlockNumber, op_t DefOp, int SSANum) {
SMPBasicBlock *DefBlock = this->RPOBlocks.at(BlockNumber);
set<SMPPhiFunction, LessPhi>::iterator PhiIter = DefBlock->FindPhi(DefOp);
assert(PhiIter != DefBlock->GetLastPhi());
return PhiIter;
}
// Is DestOp within the outgoing args area? Assume it must be an ESP-relative
// DEF operand in order to be a write to the outgoing args area.
bool SMPFunction::IsInOutgoingArgsRegion(op_t DestOp) {
bool OutArgWrite = false;
int BaseReg, IndexReg;
ushort ScaleFactor;
ea_t offset;
if (this->IsLeaf())
return false;
MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
if ((BaseReg != R_sp) && (IndexReg != R_sp))
return false;
if (((BaseReg == R_sp) && (IndexReg != R_none))
|| ((IndexReg == R_sp) && (BaseReg != R_none))
|| (0 < ScaleFactor)) {
clc5q
committed
SMP_msg("WARNING: WritesToOutgoingArgs called with indexed write.");
PrintOperand(DestOp);
return false;
}
if (!this->OutgoingArgsComputed) {
OutArgWrite = true; // be conservative
}
else {
OutArgWrite = (offset < this->OutgoingArgsSize);
}
return OutArgWrite;
} // end of SMPFunction::IsInOutgoingArgsRegion()
// Is DestOp a direct memory access above the local vars frame?
bool SMPFunction::WritesAboveLocalFrame(op_t DestOp) {
bool InArgWrite = false;
int BaseReg, IndexReg;
ushort ScaleFactor;
ea_t offset;
MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
bool ESPrelative = (BaseReg == R_sp) || (IndexReg == R_sp);
bool EBPrelative = this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp));
if (!(ESPrelative || EBPrelative))
return false;
if (((IndexReg != R_none) && (BaseReg != R_none))
|| (0 < ScaleFactor)) {
clc5q
committed
SMP_msg("WARNING: WritesAboveLocalFrame called with indexed write.");
PrintOperand(DestOp);
return false;
}
InArgWrite = (ESPrelative && (SignedOffset > ((long) this->LocalVarsSize)))
|| (EBPrelative && (SignedOffset > 0));
return InArgWrite;
}// end of SMPFunction::WritesAboveLocalFrame()
// Is DestOp an indexed write above the local vars frame?
clc5q
committed
bool SMPFunction::IndexedWritesAboveLocalFrame(op_t DestOp) {
bool InArgWrite = false;
int BaseReg, IndexReg;
ushort ScaleFactor;
ea_t offset;
int SignedOffset;
MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
bool ESPrelative = (BaseReg == R_sp) || (IndexReg == R_sp);
bool EBPrelative = this->UseFP && ((BaseReg == R_bp) || (IndexReg == R_bp));
if (!(ESPrelative || EBPrelative))
return false;
SignedOffset = (int) offset;
InArgWrite = (ESPrelative && (SignedOffset > this->LocalVarsSize))
|| (EBPrelative && (SignedOffset > 0));
} // end of SMPFunction::IndexedWritesAboveLocalFrame
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
// Is CurrOp found anywhere in the StackPtrCopySet, regardless of which address and stack delta
// values are associated with it?
bool SMPFunction::IsInStackPtrCopySet(op_t CurrOp) {
bool found = false;
// Set is composed of triples, so we have to iterate through it and compare operands.
set<pair<op_t, pair<ea_t, sval_t> >, LessStackDeltaCopy>::iterator CopyIter;
for (CopyIter = this->StackPtrCopySet.begin(); CopyIter != this->StackPtrCopySet.end(); ++CopyIter) {
pair<op_t, pair<ea_t, sval_t> > CurrCopy = *CopyIter;
op_t CopyOp = CurrCopy.first;
if (IsEqOp(CopyOp, CurrOp)) {
// Found it.
found = true;
break;
}
else if (CopyOp.type > CurrOp.type) {
// already moved past its spot; not found
break;
}
}
return found;
} // end of SMPFunction::IsInStackPtrCopySet()
// Find evidence of calls to alloca(), which appear as stack space allocations (i.e.
// subtractions from the stack pointer) AFTER the local frame allocation instruction
// for this function.
// Return true if such an allocation is found and false otherwise.
bool SMPFunction::FindAlloca(void) {
list<SMPInstr *>::iterator CurrInst = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
++CurrInst; // skip marker instruction
for ( ; CurrInst != this->Instrs.end(); ++CurrInst) {
if (((*CurrInst)->GetAddr() > this->LocalVarsAllocInstr) && (*CurrInst)->MDIsFrameAllocInstr()) {
return true;
}
}
return false;
} // end of SMPFunction::FindAlloca()
// Emit the annotations describing the regions of the stack frame.
void SMPFunction::EmitStackFrameAnnotations(FILE *AnnotFile, SMPInstr *Instr) {
ea_t addr = Instr->GetAddr();
#if 0
if (0 < IncomingArgsSize) {
clc5q
committed
SMP_fprintf(AnnotFile, "%10x %6d INARGS STACK esp + %d %s \n",
addr, IncomingArgsSize,
(LocalVarsSize + CalleeSavedRegsSize + RetAddrSize),
Instr->GetDisasm());
}
#endif
if (0 < this->RetAddrSize) {
clc5q
committed
SMP_fprintf(AnnotFile, "%10x %6d MEMORYHOLE STACK esp + %d ReturnAddress \n",
addr, RetAddrSize, (this->LocalVarsSize + this->CalleeSavedRegsSize));
if (0 < this->CalleeSavedRegsSize) {
clc5q
committed
SMP_fprintf(AnnotFile, "%10x %6u MEMORYHOLE STACK esp + %d CalleeSavedRegs \n",
addr, this->CalleeSavedRegsSize, this->LocalVarsSize);
if ((0 < this->LocalVarsSize) && this->GoodLocalVarTable) {
unsigned long ParentReferentID = DataReferentID++;
clc5q
committed
SMP_fprintf(AnnotFile, "%10x %6u DATAREF STACK %ld esp + %d PARENT LocalFrame LOCALFRAME\n",
addr, this->LocalVarsSize, ParentReferentID, 0);
#if SMP_COMPUTE_STACK_GRANULARITY
if (this->AnalyzedSP && !this->CallsAlloca && (BADADDR != this->LocalVarsAllocInstr)) {
// We can only fine-grain the stack frame if we were able to analyze the stack
if (this->OutgoingArgsSize > 0) {
clc5q
committed
SMP_fprintf(AnnotFile, "%10x %6zu DATAREF STACK %ld esp + %d CHILDOF %ld OFFSET %d OutArgsRegion OUTARGS\n",
addr, this->OutgoingArgsSize, DataReferentID, 0, ParentReferentID, 0);
++DataReferentID;
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("LocalVarTable of size %d for function %s\n", this->LocalVarTable.size(),
for (size_t i = 0; i < this->LocalVarTable.size(); ++i) {
#if SMP_DEBUG_STACK_GRANULARITY
clc5q
committed
SMP_msg("Entry %d offset %ld size %d name %s\n", i, this->LocalVarTable[i].offset,
this->LocalVarTable[i].size, this->LocalVarTable[i].VarName);
// Don't emit annotations for incoming or outgoing args or anything else
// above or below the current local frame.
if ((this->LocalVarTable[i].offset >= (long) this->FuncInfo.frsize)
|| (this->LocalVarTable[i].offset < (long) this->OutgoingArgsSize))
continue;
clc5q
committed
SMP_fprintf(AnnotFile, "%10x %6zu DATAREF STACK %ld esp + %ld CHILDOF %ld OFFSET %ld LOCALVAR %s \n",
addr, this->LocalVarTable[i].size, DataReferentID,
this->LocalVarTable[i].offset, ParentReferentID,
this->LocalVarTable[i].offset, this->LocalVarTable[i].VarName);
++DataReferentID;
} // end if (this->AnalyzedSP and not Alloca .... )
} // end if (0 < LocalVarsSize)
return;
} // end of SMPFunction::EmitStackFrameAnnotations()
// Main data flow analysis driver. Goes through the function and
// fills all objects for instructions, basic blocks, and the function
// itself.
void SMPFunction::Analyze(void) {
clc5q
committed
bool FoundAllCallers = false;
list<SMPInstr *>::iterator FirstInBlock = this->Instrs.end();
// For starting a basic block
list<SMPInstr *>::iterator LastInBlock = this->Instrs.end();
// Terminating a basic block
sval_t CurrStackPointerOffset = 0;
set<ea_t> FragmentWorkList; // Distant code fragments that belong to this function and need processing
ea_t InstAddr; // grab address to help in debugging, conditional breakpoints, etc.
#if SMP_DEBUG_CONTROLFLOW
clc5q
committed
SMP_msg("Entering SMPFunction::Analyze.\n");
#endif
// Get some basic info from the FuncInfo structure.
this->Size = this->FuncInfo.endEA - this->FuncInfo.startEA;
this->UseFP = (0 != (this->FuncInfo.flags & (FUNC_FRAME | FUNC_BOTTOMBP)));
this->StaticFunc = (0 != (this->FuncInfo.flags & FUNC_STATIC));
this->LibFunc = (0 != (this->FuncInfo.flags & FUNC_LIB));
this->AnalyzedSP = this->FuncInfo.analyzed_sp();
#if SMP_DEBUG_CONTROLFLOW
clc5q
committed
SMP_msg("SMPFunction::Analyze: got basic info.\n");
// Determine if we are dealing with shared chunks.
size_t ChunkCounter = 0;
func_tail_iterator_t FuncTail(this->GetFuncInfo());
for (bool ChunkOK = FuncTail.main(); ChunkOK; ChunkOK = FuncTail.next()) {
const area_t &CurrChunk = FuncTail.chunk();
if (1 == ChunkCounter) { // head chunk
FuncHeadLastAddr = CurrChunk.endEA;
}
else { // a tail chunk
#if STARS_FIND_UNSHARED_CHUNKS
if (this->GetProg()->IsChunkUnshared(CurrChunk.startEA, this->FirstEA, FuncHeadLastAddr)) {
this->UnsharedChunks = true;
#if SMP_DEBUG_CHUNKS
SMP_msg("INFO: Found unshared tail chunk for %s at %x\n", this->GetFuncName(), CurrChunk.startEA);
#endif
}
else {
#endif // STARS_FIND_UNSHARED_CHUNKS
this->SharedChunks = true;
SMP_msg("INFO: Found tail chunk for %s at %x\n", this->GetFuncName(), CurrChunk.startEA);
#if STARS_FIND_UNSHARED_CHUNKS
}
#endif // STARS_FIND_UNSHARED_CHUNKS
}
// Cycle through all chunks that belong to the function.
ChunkCounter = 0;
bool GoodRTL;
for (bool ChunkOK = FuncTail.main(); ChunkOK; ChunkOK = FuncTail.next()) {
const area_t &CurrChunk = FuncTail.chunk();
++ChunkCounter;
// Build the instruction and block lists for the function.
for (ea_t addr = CurrChunk.startEA; addr < CurrChunk.endEA;
addr = get_item_end(addr)) {
flags_t InstrFlags = getFlags(addr);
if (isHead(InstrFlags) && isCode(InstrFlags)) {
SMPInstr *CurrInst = new SMPInstr(addr);
// Fill in the instruction data members.
#if SMP_DEBUG_CONTROLFLOW
clc5q
committed
SMP_msg("SMPFunction::Analyze: calling CurrInst::Analyze.\n");
CurrInst->Analyze();
clc5q
committed
SMP_msg("Disasm: %s \n", CurrInst->GetDisasm());
#if SMP_COUNT_MEMORY_ALLOCATIONS
SMPInstBytes += sizeof(*CurrInst);
#endif
#if SMP_USE_SSA_FNOP_MARKER
if (this->Instrs.empty()) {
// First instruction in function. We want to create a pseudo-instruction
// at the top of the function that can hold SSA DEFs for LiveIn names
// to the function. We use a floating point no-op as the pseudo-inst.
// The code address is one less than the start address of the function.
SMPInstr *MarkerInst = new SMPInstr(addr - 1);
MarkerInst->AnalyzeMarker();
GoodRTL = MarkerInst->BuildRTL();
this->BuiltRTLs = (this->BuiltRTLs && GoodRTL);
if (GoodRTL) {
MarkerInst->SetGoodRTL();
}
assert(FirstInBlock == this->Instrs.end());
this->Instrs.push_back(MarkerInst);
#if SMP_COUNT_MEMORY_ALLOCATIONS
SMPInstBytes += sizeof(*MarkerInst);
clc5q
committed
// Find all functions that call the current function.
clc5q
committed
SMP_xref_t CurrXrefs;
clc5q
committed
if (!FoundAllCallers) {
clc5q
committed
for (bool ok = CurrXrefs.SMP_first_to(CurrInst->GetAddr(), XREF_ALL);
clc5q
committed
ok;
clc5q
committed
ok = CurrXrefs.SMP_next_to()) {
ea_t FromAddr = CurrXrefs.GetFrom();
if ((FromAddr != 0) && (CurrXrefs.GetIscode())) {
clc5q
committed
// Make sure it is not a fall-through. Must be a
// control-flow instruction of some sort, including
// direct or indirect calls or tail calls.
clc5q
committed
CallInst.Analyze();
SMPitype CallType = CallInst.GetDataFlowType();
if ((COND_BRANCH <= CallType) && (RETURN >= CallType)) {
// Found a caller, with its call address in CurrXrefs.from
clc5q
committed
}
}
}
FoundAllCallers = true; // only do this for first inst
}
SMPitype DataFlowType = CurrInst->GetDataFlowType();
if ((DataFlowType == INDIR_CALL) || (DataFlowType == CALL)) {
// See if IDA has determined the target of the call.
#if 0
CurrInst->AnalyzeCallInst(this->FirstEA, this->FuncInfo.endEA);
#endif
ea_t TargetAddr = CurrInst->GetCallTarget();
bool LinkedToTarget = (BADADDR != TargetAddr);
if (LinkedToTarget) {
if (0 == TargetAddr) {
clc5q
committed
SMP_msg("WARNING: Ignoring NULL call target (unreachable) at %x\n", CurrInst->GetAddr());
}
else {
this->AllCallTargets.push_back(TargetAddr);
if (INDIR_CALL == DataFlowType) {
this->IndirectCallTargets.push_back(TargetAddr);
}
else {
this->DirectCallTargets.push_back(TargetAddr);
}
if (DataFlowType == INDIR_CALL) {
this->IndirectCalls = true;
this->UnresolvedIndirectCalls = (!LinkedToTarget);
}
} // end if INDIR_CALL or CALL
else if (DataFlowType == INDIR_JUMP)
this->IndirectJumps = true;
// Add call targets for tail call jumps.
else if (CurrInst->IsBranchToFarChunk()) {
ea_t FarTargetAddr = CurrInst->GetFarBranchTarget();
if (BADADDR != FarTargetAddr) {
assert((RETURN == DataFlowType) || (JUMP == DataFlowType) || (COND_BRANCH == DataFlowType));
// Optimized tail calls, where the stack frame is down to zero at the call point,
// get RETURN as their DataFlowType. Might have to revisit that idea at some point. !!!!****!!!!
if (this->FindDistantCodeFragment(FarTargetAddr)) {
if (this->GetProg()->InsertUnsharedFragment(FarTargetAddr)) {
// Fragment address was inserted in SMPProgram set, was not already there.
pair<set<ea_t>::iterator, bool> InsertResult;
InsertResult = FragmentWorkList.insert(FarTargetAddr);
if (InsertResult.second) {