Newer
Older
/*
* SMPFunction.cpp - <see below>.
*
* Copyright (c) 2000, 2001, 2010 - University of Virginia
*
* This file is part of the Memory Error Detection System (MEDS) infrastructure.
* This file may be used and modified for non-commercial purposes as long as
* all copyright, permission, and nonwarranty notices are preserved.
* Redistribution is prohibited without prior written consent from the University
* of Virginia.
*
* Please contact the authors for restrictions applying to commercial use.
*
* THIS SOURCE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Author: University of Virginia
* e-mail: jwd@virginia.com
* URL : http://www.cs.virginia.edu/
*
* Additional copyrights 2010, 2011 by Zephyr Software LLC
* e-mail: {clc,jwd}@zephyr-software.com
* URL : http://www.zephyr-software.com/
*
//
// SMPFunction.cpp
//
// This module performs the fundamental data flow analyses needed for the
// SMP project (Software Memory Protection) at the function level.
//
using namespace std;
#include <utility>
#include <list>
#include <set>
#include <vector>
#include <algorithm>
#include <cstring>
#include <cstdlib>
#include <pro.h>
#include <assert.h>
#include <ida.hpp>
#include <ua.hpp>
#include <idp.hpp>
#include <auto.hpp>
#include <bytes.hpp>
#include <funcs.hpp>
#include <allins.hpp>
#include <intel.hpp>
#include <name.hpp>
clc5q
committed
#include <struct.hpp>
clc5q
committed
#include "SMPDBInterface.h"
#include "SMPDataFlowAnalysis.h"
#include "SMPStaticAnalyzer.h"
#include "SMPFunction.h"
#include "SMPBasicBlock.h"
#include "SMPInstr.h"
#include "SMPProgram.h"
// Set to 1 for debugging output
#define SMP_DEBUG 1
#define SMP_DEBUG2 0 // verbose
#define SMP_DEBUG3 0 // verbose
#define SMP_DEBUG_CONTROLFLOW 0 // tells what processing stage is entered
#define SMP_DEBUG_XOR 0
#define SMP_DEBUG_CHUNKS 1 // tracking down tail chunks for functions
clc5q
committed
#define SMP_DEBUG_FRAMEFIXUP 1
#define SMP_DEBUG_FRAMEFIXUP_VERBOSE 0
#define SMP_DEBUG_DATAFLOW 0
#define SMP_DEBUG_DATAFLOW_VERBOSE 0
#define SMP_DEBUG_TYPE_INFERENCE 0
#define SMP_DEBUG_STACK_GRANULARITY 0
#define SMP_DEBUG_BUILD_RTL 1 // leave this on; serious errors reported
#define SMP_DEBUG_UNINITIALIZED_SSA_NAMES 1
clc5q
committed
#define SMP_OPTIMIZE_BLOCK_PROFILING 0
#define SMP_DECLARE_INDIRECT_TARGETS_UNSAFE 1
#define SMP_AUDIT_STACK_POINTER_DELTAS 0
clc5q
committed
#define SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS 1
// Compute LVA/SSA or not? Turn it off for NICECAP demo on 31-JAN-2008
#define SMP_COMPUTE_LVA_SSA 1
// Compute fine-grained stack boundaries?
#define SMP_COMPUTE_STACK_GRANULARITY 1
// Use conditional type propagation on phi functions
#define SMP_CONDITIONAL_TYPE_PROPAGATION 0
// Kludges to fix IDA Pro 5.2 errors in cc1.ncexe
#define SMP_IDAPRO52_WORKAROUND 0
// Basic block number 0 is the top of the CFG lattice.
#define SMP_TOP_BLOCK 0
// Set SharedTailChunks to TRUE for entire printf family
// After we restructure the parent/tail structure of the database, this
// will go away.
#define KLUDGE_VFPRINTF_FAMILY 1
// Used for binary search by function number in SMPStaticAnalyzer.cpp
// to trigger debugging output and find which instruction in which
// function is causing a crash.
bool SMPBinaryDebug = false;
using namespace std;
// helper function to determine if an object is in a vector
template <class T>
clc5q
committed
bool vector_exists(const T &item, const vector<T> &vec) {
for (size_t i = 0; i < vec.size(); ++i) {
if (vec[i] == item)
return true;
}
return false;
}
// Comparison function for sorting.
bool LocalVarCompare(const LocalVar &LV1, const LocalVar &LV2) {
return (LV1.offset < LV2.offset);
}
// *****************************************************************
// Class SMPFunction
// *****************************************************************
// Constructor
SMPFunction::SMPFunction(func_t *Info, SMPProgram* pgm) {
clc5q
committed
this->Program = pgm;
this->FuncInfo = *Info;
clc5q
committed
this->FirstEA = this->FuncInfo.startEA;
clc5q
committed
this->FuncName[0] = '\0';
clc5q
committed
this->BlockCount = 0;
this->FuncProcessed = false;
clc5q
committed
this->UseFP = false;
this->StaticFunc = false;
this->LibFunc = false;
this->HasReturnInst = false;
this->IndirectCalls = false;
this->UnresolvedIndirectCalls = false;
this->IndirectJumps = false;
this->UnresolvedIndirectJumps = false;
this->DirectlyRecursive = false;
this->SharedChunks = false;
this->PushAfterLocalVarAlloc = false;
clc5q
committed
this->AnalyzedSP = false;
clc5q
committed
this->STARSStackPtrAnalysisPerformed = false;
this->StackAdjustmentComputed = false;
#if 1 // default to unsafe
this->SafeFunc = false;
#else // default to safe
this->SafeFunc = true;
this->SpecSafeFunc = true;
this->SafeCallee = true;
this->SpecSafeCallee = true;
#endif
this->WritesAboveRA = false;
this->HasIndirectWrites = false;
this->PossibleIndirectCallTarget = false;
this->PossibleTailCallTarget = false;
this->OutgoingArgsComputed = false;
this->GoodLocalVarTable = false;
clc5q
committed
this->TypedDefs = 0;
this->UntypedDefs = 0;
this->TypedPhiDefs = 0;
this->UntypedPhiDefs = 0;
this->SafeBlocks = 0;
this->UnsafeBlocks = 0;
this->Size = 0;
this->LocalVarsSize = 0;
this->CalleeSavedRegsSize = 0;
this->RetAddrSize = 0;
this->IncomingArgsSize = 0;
this->OutgoingArgsSize = 0;
this->LocalVarsAllocInstr = BADADDR;
this->LocalVarsDeallocInstr = BADADDR;
this->AllocPointDelta = 0;
this->MinStackDelta = 0;
this->MaxStackDelta = 0;
this->MinStackAccessOffset = 0;
this->MaxStackAccessLimit = 0;
clc5q
committed
this->NetStackDelta = CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA;
this->PreAllocStackDelta = CALLING_CONVENTION_DEFAULT_PREFRAMEALLOC_STACK_DELTA;
this->FramePointerStackDelta = 0;
this->GlobalStackAdjustment = 0;
clc5q
committed
this->LocalVarOffsetLimit = 0;
this->IDAReturnAddressOffset = 0;
clc5q
committed
this->ReturnAddrStatus = FUNC_UNKNOWN;
this->SetIsSpeculative(false);
this->Blocks.clear();
this->DirectCallTargets.clear();
this->IndirectCallTargets.clear();
this->AllCallTargets.clear();
clc5q
committed
this->AllCallSources.clear();
this->InstBlockMap.clear();
this->RPOBlocks.clear();
this->IDom.clear();
this->DomTree.clear();
this->BlocksDefinedIn.clear();
this->SSACounter.clear();
this->SSAStack.clear();
this->LocalVarTable.clear();
this->StackFrameMap.clear();
this->FineGrainedStackTable.clear();
clc5q
committed
this->SavedRegLoc.clear();
this->ReturnRegTypes.clear();
clc5q
committed
this->LiveInSet.clear();
this->LiveOutSet.clear();
this->KillSet.clear();
this->GlobalDefAddrBySSA.clear();
for (int RegIndex = R_ax; RegIndex <= R_di; ++RegIndex) {
this->SavedRegLoc.push_back(0); // zero offset means reg not saved
this->ReturnRegTypes.push_back(UNINIT);
}
// The sizes of the three regions of the stack frame other than the
// return address are stored in the function structure.
this->LocalVarsSize = this->FuncInfo.frsize;
this->CalleeSavedRegsSize = this->FuncInfo.frregs;
this->IncomingArgsSize = this->FuncInfo.argsize;
// The return address size can be obtained in a machine independent
// way by calling get_frame_retsize().
this->RetAddrSize = get_frame_retsize(this->GetFuncInfo());
} // end of SMPFunction() constructor
SMPFunction::~SMPFunction() {
list<SMPInstr *>::iterator InstIter;
for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
SMPInstr *CurrInst = (*InstIter);
delete CurrInst;
}
list<SMPBasicBlock *>::iterator BlockIter;
for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
SMPBasicBlock *CurrBlock = (*BlockIter);
delete CurrBlock;
}
clc5q
committed
// Get a non-stale pointer to the func_t info for the current function.
func_t *SMPFunction::GetFuncInfo(void) {
clc5q
committed
func_t *myPtr = SMP_get_func(this->FirstEA);
clc5q
committed
assert(NULL != myPtr);
return myPtr;
}
// Reset the Processed flags in all blocks to false.
void SMPFunction::ResetProcessedBlocks(void) {
list<SMPBasicBlock *>::iterator CurrBlock;
for (CurrBlock = this->Blocks.begin(); CurrBlock != this->Blocks.end(); ++CurrBlock) {
(*CurrBlock)->SetProcessed(false);
}
return;
} // end of SMPFunction::ResetProcessedBlocks()
clc5q
committed
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
// Return an iterator for the beginning of the LiveInSet.
set<op_t, LessOp>::iterator SMPFunction::GetFirstLiveIn(void) {
return this->LiveInSet.begin();
} // end of SMPBasicBlock::GetFirstLiveIn()
// Get termination iterator marker for the LiveIn set, for use by predecessors.
set<op_t, LessOp>::iterator SMPFunction::GetLastLiveIn(void) {
return this->LiveInSet.end();
}
// Get iterator for the start of the LiveOut set.
set<op_t, LessOp>::iterator SMPFunction::GetFirstLiveOut(void) {
return this->LiveOutSet.begin();
}
// Get termination iterator marker for the LiveOut set.
set<op_t, LessOp>::iterator SMPFunction::GetLastLiveOut(void) {
return this->LiveOutSet.end();
}
// Get iterator for the start of the VarKill set.
set<op_t, LessOp>::iterator SMPFunction::GetFirstVarKill(void) {
return this->KillSet.begin();
}
// Get termination iterator marker for the VarKill set.
set<op_t, LessOp>::iterator SMPFunction::GetLastVarKill(void) {
return this->KillSet.end();
}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
// Four methods to get values from the maps of global reg/SSA to FG info.
// For local names, see corresponding methods in SMPBasicBlock.
unsigned short SMPFunction::GetDefSignMiscInfo(int DefHashValue) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
if (MapIter != this->GlobalDefFGInfoBySSA.end())
return MapIter->second.SignMiscInfo;
else
return 0;
} // end of SMPFunction::GetDefSignMiscInfo()
unsigned short SMPFunction::GetUseSignMiscInfo(int UseHashValue) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
if (MapIter != this->GlobalUseFGInfoBySSA.end())
return MapIter->second.SignMiscInfo;
else
return 0;
} // end of SMPFunction::GetUseSignMiscInfo()
unsigned short SMPFunction::GetDefWidthTypeInfo(int DefHashValue) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
if (MapIter != this->GlobalDefFGInfoBySSA.end())
return MapIter->second.SizeInfo;
else
return 0;
} // end of SMPFunction::GetDefWidthTypeInfo()
unsigned short SMPFunction::GetUseWidthTypeInfo(int UseHashValue) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
if (MapIter != this->GlobalUseFGInfoBySSA.end())
return MapIter->second.SizeInfo;
else
return 0;
} // end of SMPFunction::GetUseWidthTypeInfo()
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
struct FineGrainedInfo SMPFunction::GetDefFGInfo(int DefHashValue) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
if (MapIter != this->GlobalDefFGInfoBySSA.end())
return MapIter->second;
else {
struct FineGrainedInfo EmptyFG;
EmptyFG.SignMiscInfo = 0;
EmptyFG.SizeInfo = 0;
return EmptyFG;
}
} // end of SMPFunction::GetDefFGInfo()
struct FineGrainedInfo SMPFunction::GetUseFGInfo(int UseHashValue) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
if (MapIter != this->GlobalUseFGInfoBySSA.end())
return MapIter->second;
else {
struct FineGrainedInfo EmptyFG;
EmptyFG.SignMiscInfo = 0;
EmptyFG.SizeInfo = 0;
return EmptyFG;
}
} // end of SMPFunction::GetUseFGInfo()
clc5q
committed
// Add a caller to the list of all callers of this function.
void SMPFunction::AddCallSource(ea_t addr) {
// Convert call instruction address to beginning address of the caller.
clc5q
committed
func_t *FuncInfo = SMP_get_func(addr);
clc5q
committed
if (NULL == FuncInfo) {
clc5q
committed
SMP_msg("SERIOUS WARNING: Call location %x not in a function.\n", addr);
clc5q
committed
return;
}
ea_t FirstAddr = FuncInfo->startEA;
assert(BADADDR != FirstAddr);
this->AllCallSources.insert(FirstAddr);
this->AllCallSites.insert(addr);
clc5q
committed
return;
} // end of SMPFunction::AddCallSource()
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
// Add input arguments to the NormalizedStackOpsMap.
void SMPFunction::AddNormalizedStackOperand(op_t OldOp, ea_t InstAddr, op_t NormalizedOp) {
bool DuplicateCase = false; // e.g. inc [esp+8] will have [esp+8] as a DEF and a USE and maps will see [esp+8] twice
pair<map<pair<op_t, ea_t>, op_t, LessDefinition>::iterator, bool> InsertResult;
pair<map<pair<op_t, ea_t>, map<pair<op_t, ea_t>, op_t, LessDefinition>::iterator, LessDefinition>::iterator, bool> InverseInsertResult;
pair<op_t, ea_t> OldValue(OldOp, InstAddr);
pair<op_t, ea_t> InverseValue(OldOp, InstAddr); // OldOp was NormalizedOp when it was inserted previously
pair<pair<op_t, ea_t>, op_t> InsertValue(OldValue, NormalizedOp);
pair<op_t, ea_t> InverseInsertValue(NormalizedOp, InstAddr);
map<pair<op_t, ea_t>, op_t, LessDefinition>::iterator OldIter = this->NormalizedStackOpsMap.begin();
pair<pair<op_t, ea_t>, map<pair<op_t, ea_t>, op_t, LessDefinition>::iterator> InverseInsertTriple(InverseInsertValue, OldIter);
map<pair<op_t, ea_t>, map<pair<op_t, ea_t>, op_t, LessDefinition>::iterator>::iterator InverseIter;
// If this function calls alloca(), stack operands could be normalized more than once.
// Before we proceed, we update an old entry instead of inserting a new entry.
if (this->CallsAlloca || this->HasPushAfterFrameAlloc()) {
InverseIter = this->InverseNormalizedStackOpsMap.find(InverseValue);
if (InverseIter != this->InverseNormalizedStackOpsMap.end()) {
// We have our alloca() update case. We formerly mapped <A, InstAddr> to B.
// Now B is being normalized to C. All we want to do is change the original
// map entry so that we map <A, InstAddr> to C. In this manner, A is always the
// original un-normalized stack op, available for lookup from an RTL.
OldIter = InverseIter->second; // OldIter points at map of <A, InstAddr> to B.
OldIter->second = NormalizedOp; // Change B to C
// Now we want to erase the Inverse map entry and insert a new one that maps
// <C, InstAddr> to OldIter instead of mapping <B, InstAddr> to OldIter.
(void) this->InverseNormalizedStackOpsMap.erase(InverseIter);
InverseInsertTriple.second = OldIter;
InverseInsertResult = this->InverseNormalizedStackOpsMap.insert(InverseInsertTriple);
assert(InverseInsertResult.second);
return;
}
else {
// We might have the final difficult case: We have a combination of CallsAlloca and the
// DuplicateCase described below (e.g. an increment of a stack location produces a DEF
// and a USE of the same location, causing duplicate mappings to be attempted). We need
// to detect the duplicate case here. What will happen is that, on the first call to this
// method, we will map <A, InstAddr> to B, and reverse-map <B, InstAddr> to A. On the second
// call to this method, we will detect the duplicate case and exit. On the third call, caused
// by CallsAlloca, we are asked to map <B, InstAddr> to C, and we will correctly hit the code
// just above, in the if-clause, to fix the A->B mapping to be an A->C mapping, and we will
// erase the reverse mapping of B->A and replace it with the C->A reverse mapping. On the
// fourth call to this method, we will not find a reverse mapping B->A any more, so the if-clause
// does not execute. We can only detect this case by finding an existing C->A reverse mapping
// and an existing A->C mapping to confirm our inference.
pair<op_t, ea_t> TestInverseValue(NormalizedOp, InstAddr);
InverseIter = this->InverseNormalizedStackOpsMap.find(TestInverseValue);
if (InverseIter != this->InverseNormalizedStackOpsMap.end()) {
// Found existing C->A inverse mapping. Is there an A->C mapping to confirm
// our interpretation of the situation?
pair<op_t, ea_t> TestOldValue(InverseIter->second->first.first, InstAddr);
map<pair<op_t, ea_t>, op_t, LessDefinition>::iterator TestOldIter;
TestOldIter = this->NormalizedStackOpsMap.find(TestOldValue);
if (TestOldIter != this->NormalizedStackOpsMap.end()) {
// We found a mapping from <A, InstAddr>.
if (IsEqOp(NormalizedOp, TestOldIter->second)) {
// The mapping is A->C as suspected.
return; // duplication; nothing to do in either map.
}
}
}
}
}
// At this point, we have no inverse map entry to worry about, because we are
// normalizing this operand for the first time.
InsertResult = this->NormalizedStackOpsMap.insert(InsertValue);
OldIter = InsertResult.first;
if (!(InsertResult.second)) {
// Already had an entry. That should mean a rare case such as "inc [esp+8]" which
// produces a USE and a DEF of the same address. We can confirm that the map has
// the same normalized operand we were trying to insert. Otherwise, the collision
// is fatal.
op_t OldOldOp = InsertResult.first->first.first;
op_t OldNormalizedOp = InsertResult.first->second;
assert(IsEqOp(OldOldOp, OldOp) && IsEqOp(OldNormalizedOp, NormalizedOp));
DuplicateCase = true;
}
if (this->CallsAlloca || this->HasPushAfterFrameAlloc()) {
// We need to add an entry to the inverse map.
InverseInsertTriple.second = OldIter;
InverseInsertResult = this->InverseNormalizedStackOpsMap.insert(InverseInsertTriple);
assert(InverseInsertResult.second || DuplicateCase);
}
return;
} // SMPFunction::AddNormalizedStackOperand()
// Return RTLop if not stack opnd; return normalized RTLop otherwise.
op_t SMPFunction::GetNormalizedOperand(ea_t InstAddr, op_t RTLop) {
op_t NormOp;
if (MDIsStackAccessOpnd(RTLop, this->UsesFramePointer())) {
pair<op_t, ea_t> OldDefn(RTLop, InstAddr);
map<pair<op_t, ea_t>, op_t, LessDefinition>::iterator FindIter = this->NormalizedStackOpsMap.find(OldDefn);
assert(this->NormalizedStackOpsMap.end() != FindIter);
NormOp = FindIter->second;
}
else {
NormOp = RTLop;
}
return NormOp;
} // end of SMPFunction::GetNormalizedOperand()
// Six methods to set values into the maps of global reg/SSA to FG info.
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
// For local names, see corresponding methods in SMPBasicBlock.
void SMPFunction::UpdateDefSignMiscInfo(int DefHashValue, unsigned short NewInfo) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
if (MapIter == this->GlobalDefFGInfoBySSA.end()) {
// Not found; insert first.
struct FineGrainedInfo NewFGInfo;
NewFGInfo.SignMiscInfo = NewInfo;
NewFGInfo.SizeInfo = 0;
pair<int, struct FineGrainedInfo> MapItem(DefHashValue, NewFGInfo);
MapResult = this->GlobalDefFGInfoBySSA.insert(MapItem);
assert(MapResult.second); // Was not previously found, insertion must work.
}
else { // found; just OR in the new bits.
MapIter->second.SignMiscInfo |= NewInfo;
}
return;
} // end of SMPFunction::UpdateDefSignMiscInfo()
void SMPFunction::UpdateUseSignMiscInfo(int UseHashValue, unsigned short NewInfo) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
if (MapIter == this->GlobalUseFGInfoBySSA.end()) {
// Not found; insert first.
struct FineGrainedInfo NewFGInfo;
NewFGInfo.SignMiscInfo = NewInfo;
NewFGInfo.SizeInfo = 0;
pair<int, struct FineGrainedInfo> MapItem(UseHashValue, NewFGInfo);
MapResult = this->GlobalUseFGInfoBySSA.insert(MapItem);
assert(MapResult.second); // Was not previously found, insertion must work.
}
else { // found; just OR in the new bits.
MapIter->second.SignMiscInfo |= NewInfo;
}
return;
} // end of SMPFunction::UpdateUseSignMiscInfo()
void SMPFunction::UpdateDefWidthTypeInfo(int DefHashValue, unsigned short NewInfo) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
if (MapIter == this->GlobalDefFGInfoBySSA.end()) {
// Not found; insert first.
struct FineGrainedInfo NewFGInfo;
NewFGInfo.SignMiscInfo = 0;
NewFGInfo.SizeInfo = NewInfo;
pair<int, struct FineGrainedInfo> MapItem(DefHashValue, NewFGInfo);
MapResult = this->GlobalDefFGInfoBySSA.insert(MapItem);
assert(MapResult.second); // Was not previously found, insertion must work.
}
else { // found; just OR in the new bits.
MapIter->second.SizeInfo |= NewInfo;
}
return;
} // end of SMPFunction::UpdateDefWidthTypeInfo()
void SMPFunction::UpdateUseWidthTypeInfo(int UseHashValue, unsigned short NewInfo) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
if (MapIter == this->GlobalUseFGInfoBySSA.end()) {
// Not found; insert first.
struct FineGrainedInfo NewFGInfo;
NewFGInfo.SignMiscInfo = 0;
NewFGInfo.SizeInfo = NewInfo;
pair<int, struct FineGrainedInfo> MapItem(UseHashValue, NewFGInfo);
MapResult = this->GlobalUseFGInfoBySSA.insert(MapItem);
assert(MapResult.second); // Was not previously found, insertion must work.
}
else { // found; just OR in the new bits.
MapIter->second.SizeInfo |= NewInfo;
}
return;
} // end of SMPFunction::UpdateUseWidthTypeInfo()
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
void SMPFunction::UpdateDefFGInfo(int DefHashValue, struct FineGrainedInfo NewFG) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
if (MapIter == this->GlobalDefFGInfoBySSA.end()) {
// Not found; insert it.
pair<int, struct FineGrainedInfo> MapItem(DefHashValue, NewFG);
MapResult = this->GlobalDefFGInfoBySSA.insert(MapItem);
assert(MapResult.second); // Was not previously found, insertion must work.
}
else { // found; just put in the new bits.
MapIter->second.SignMiscInfo |= NewFG.SignMiscInfo;
MapIter->second.SizeInfo |= NewFG.SizeInfo;
}
return;
} // end of SMPFunction::UpdateDefFGInfo()
void SMPFunction::UpdateUseFGInfo(int UseHashValue, struct FineGrainedInfo NewFG) {
map<int, struct FineGrainedInfo>::iterator MapIter;
pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;
MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
if (MapIter == this->GlobalUseFGInfoBySSA.end()) {
// Not found; insert it.
pair<int, struct FineGrainedInfo> MapItem(UseHashValue, NewFG);
MapResult = this->GlobalUseFGInfoBySSA.insert(MapItem);
assert(MapResult.second); // Was not previously found, insertion must work.
}
else { // found; just put in the new bits.
MapIter->second.SignMiscInfo |= NewFG.SignMiscInfo;
MapIter->second.SizeInfo |= NewFG.SizeInfo;
}
return;
} // end of SMPFunction::UpdateUseFGInfo()
// Erase a range of instructions from the Instrs list, usually corresponding
// the the range of a basic block.
void SMPFunction::EraseInstRange(ea_t FirstAddr, ea_t LastAddr) {
list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
SMPInstr *CurrInst;
ea_t InstAddr;
while (InstIter != this->Instrs.end()) {
CurrInst = (*InstIter);
InstAddr = CurrInst->GetAddr();
if ((InstAddr >= FirstAddr) && (InstAddr <= LastAddr)) {
InstIter = this->Instrs.erase(InstIter);
}
else {
++InstIter;
}
}
} // end of SMPFunction::EraseInstRange()
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
// For instruction address UseAddr, compute the reaching defs for operand TempOp,
// placing them into the TempReachingDefs list.
void SMPFunction::ComputeTempReachingDefs(op_t TempOp, ea_t UseAddr) {
this->TempReachingDefs.clear();
SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(UseAddr);
assert(NULL != CurrBlock);
set<pair<op_t, ea_t>, LessDefinition>::iterator ReachesInIter;
pair<set<ea_t, LessAddr>::iterator, bool> InsertResult;
// Start with the matching members of the ReachesIn set for the current basic block.
for (ReachesInIter = CurrBlock->GetFirstReachesIn(); ReachesInIter != CurrBlock->GetLastReachesIn(); ++ReachesInIter) {
pair<op_t, ea_t> ReachesInDef = *ReachesInIter;
if (IsEqOp(TempOp, ReachesInDef.first)) {
InsertResult = this->TempReachingDefs.insert(ReachesInDef.second);
assert(InsertResult.second);
}
}
// Now, see if any def in the block hides the ReachesIn defs before we get to UseAddr.
list<SMPInstr *>::iterator InstIter;
for (InstIter = CurrBlock->GetFirstInstr(); InstIter != CurrBlock->GetLastInstr(); ++InstIter) {
SMPInstr *CurrInst = *InstIter;
ea_t InstAddr = CurrInst->GetAddr();
if (InstAddr >= UseAddr)
break;
set<DefOrUse, LessDefUse>::iterator DefIter = CurrInst->FindDef(TempOp);
if (DefIter != CurrInst->GetLastDef()) {
// Found a def. All previous defs are hidden from UseAddr by this def.
this->TempReachingDefs.clear();
InsertResult = this->TempReachingDefs.insert(InstAddr);
assert(InsertResult.second);
}
}
return;
} // end of SMPFunction::ComputeTempReachingDefs()
// Find all the saved stack deltas (if any) for the def addrs in the TempReachesDefs list for TempOp.
// Put the entries matching TempOp into TempStackDeltaReachesList.
void SMPFunction::ComputeTempStackDeltaReachesList(op_t TempOp) {
bool FoundOperand = false;
set<pair<op_t, pair<ea_t, sval_t> >, LessStackDeltaCopy>::iterator CopyIter;
this->TempStackDeltaReachesList.clear();
for (CopyIter = this->StackPtrCopySet.begin(); CopyIter != this->StackPtrCopySet.end(); ++CopyIter) {
pair<op_t, pair<ea_t, sval_t> > CopyEntry = *CopyIter;
if (IsEqOp(TempOp, CopyEntry.first)) {
set<ea_t, LessAddr>::iterator FindReachDefIter;
FoundOperand = true; // help us save time later by exiting loop
// Match address at which stack ptr copy was made to a reaching def address for TempOp.
FindReachDefIter = this->TempReachingDefs.find(CopyEntry.second.first);
if (FindReachDefIter != this->TempReachingDefs.end()) {
// Found a StackPtrCopySet entry for TempOp, AND we found the DefAddr
// in the TempReachingDefs set.
this->TempStackDeltaReachesList.push_back(CopyEntry.second); // push back a pair<ea_t, sval_t>
}
}
else if (FoundOperand) {
// We have found the operand, but have now moved past it in the iteration of StackPtrCopySet.
// Save time by exiting the loop.
break;
}
}
return;
} // end of SMPFunction::ComputeTempStackDeltaReachesList()
// Find the largest stack delta in the TempStackDeltaReachesList.
// Return true if only one value was found in the list.
bool SMPFunction::FindReachingStackDelta(sval_t &StackDelta) {
bool UniqueDelta = true;
if (this->TempStackDeltaReachesList.empty()) {
StackDelta = 0;
return false;
}
else {
StackDelta = this->TempStackDeltaReachesList.front().second;
}
list<pair<ea_t, sval_t> >::iterator DeltaIter;
for (DeltaIter = this->TempStackDeltaReachesList.begin(); DeltaIter != this->TempStackDeltaReachesList.end(); ++DeltaIter) {
sval_t NewDelta = DeltaIter->second;
if (NewDelta != StackDelta) {
UniqueDelta = false;
if (NewDelta > StackDelta) {
StackDelta = NewDelta;
}
}
}
return UniqueDelta;
} // end of SMPFunction::FindReachingStackDelta()
// Find any apparent stack adjustment after the call instruction at CallAddr,
// confining our search to the basic block containing CallAddr.
sval_t SMPFunction::GetStackAdjustmentForCallee(ea_t CallAddr) {
bool success = false;
sval_t CalleeAdjustment = 0;
SMPBasicBlock *CallBlock = this->GetBlockFromInstAddr(CallAddr);
assert(NULL != CallBlock);
sval_t BlockAnalysisDelta = CallBlock->ComputeStackAdjustmentAfterCall(CallAddr);
if (0 != BlockAnalysisDelta) {
CalleeAdjustment = BlockAnalysisDelta;
SMP_msg("INFO: Block analysis produced callee adjustment of %d bytes after %x\n", CalleeAdjustment, CallAddr);
}
return CalleeAdjustment;
} // end of SMPFunction::GetStackAdjustmentForCallee()
clc5q
committed
// Get stack delta from a callee function that is unable to provide the info from
// its own analyses (e.g. analyses failed or have not been performed yet, due to
// a mutually recursive clique in the call graph). We have three approaches in
// this case: Use a default value, consult IDA Pro's analyses, or see if we can
// detect a stack adjustment after the call instruction, from which we could infer
// the stack delta of the callee. We choose the latter approach, and find the smallest
// adjustment among all call sites for the callee.
sval_t SMPFunction::GetStackDeltaForCallee(ea_t CallTargetAddr) {
clc5q
committed
bool success = false;
sval_t CalleeDelta = CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA;
SMPFunction *CalleeFunc = this->GetProg()->FindFunction(CallTargetAddr);
if (NULL != CalleeFunc) {
sval_t GlobalAdjustment = CalleeFunc->ComputeGlobalStackAdjustment();
if (0 != GlobalAdjustment) {
CalleeDelta -= GlobalAdjustment;
SMP_msg("INFO: Global stack adjustment analysis produced callee delta of %d bytes after %x\n", CalleeDelta, CallTargetAddr);
clc5q
committed
}
}
return CalleeDelta;
} // end of SMPFunction::GetStackDeltaForCallee()
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
// Compute a consistent (or smallest) stack adjustment seen program-wide after all calls to the current function.
// Do not return a non-zero value unless more than one call site can be used as evidence.
sval_t SMPFunction::ComputeGlobalStackAdjustment(void) {
bool FoundZeroAdjustment = false;
sval_t GlobalAdjustment = 0;
sval_t NegativeAdjustment = -10000; // record negative adjustments detected
sval_t PositiveAdjustment = 10000; // record positive adjustments detected
size_t NumCallSites = this->AllCallSites.size();
// Use cached value if already computed.
if (this->StackAdjustmentComputed) {
return this->GlobalStackAdjustment;
}
if (1 < NumCallSites) { // if only one call site, it is dangerous to draw conclusions about seeming "adjustments."
set<ea_t>::iterator CallSiteIter;
for (CallSiteIter = this->AllCallSites.begin(); CallSiteIter != this->AllCallSites.end(); ++CallSiteIter) {
ea_t CallSiteAddr = (*CallSiteIter);
func_t *CurrFunc = SMP_get_func(CallSiteAddr);
assert(NULL != CurrFunc);
ea_t CallerFirstAddr = CurrFunc->startEA;
SMPFunction *CallerFunc = this->GetProg()->FindFunction(CallerFirstAddr);
assert(NULL != CallerFunc);
sval_t CurrentAdjustment = CallerFunc->GetStackAdjustmentForCallee(CallSiteAddr);
// See if CurrentAdjustment is a new, lowest positive value for GlobalAdjustment.
if ((0 < CurrentAdjustment) && (CurrentAdjustment < PositiveAdjustment)) {
PositiveAdjustment = CurrentAdjustment;
}
else if ((0 > CurrentAdjustment) && (CurrentAdjustment > NegativeAdjustment)) {
NegativeAdjustment = CurrentAdjustment;
}
else if (0 == CurrentAdjustment) {
FoundZeroAdjustment = true;
break; // Any zero adjustment found invalidates non-zero inferences
}
}
}
// See if we consistently had positive or negative adjustments
if (FoundZeroAdjustment) {
GlobalAdjustment = 0; // cannot be a clear non-zero indication if we found any zeroes
}
else if (PositiveAdjustment < 10000) { // found at least one positive adjustment
if (NegativeAdjustment > -10000) { // found at least one negative adjustment; bad
GlobalAdjustment = 0; // inconsistent; reset to zero
}
else {
GlobalAdjustment = PositiveAdjustment;
}
}
else if (NegativeAdjustment > -10000) { // found negative but no positive adjustments
GlobalAdjustment = NegativeAdjustment;
}
else { // did not find negative or positive adjustments
GlobalAdjustment = 0;
}
this->StackAdjustmentComputed = true; // signal caching of the value for future speed
this->GlobalStackAdjustment = GlobalAdjustment; // cache the value
return GlobalAdjustment;
} // end of SMPFunction::ComputeGlobalStackAdjustment()
// Use IDA Pro stack pointer deltas instead of doing our own analysis.
bool SMPFunction::UseIDAStackPointerDeltas(void) {
list<SMPInstr *>::iterator InstIter;
SMPInstr *CurrInst;
#if SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS
bool IDATraceFlag = (0 == strcmp("do_length", this->GetFuncName()));
#endif
InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
++InstIter; // skip marker pseudo-instruction
#endif
while (InstIter != this->Instrs.end()) {
CurrInst = *InstIter;
sval_t IDAProDelta = get_spd(this->GetFuncInfo(), CurrInst->GetAddr());
CurrInst->SetStackPtrOffset(IDAProDelta);
++InstIter;
if (IDATraceFlag) {
SMP_msg("INFO: IDA Pro stack delta trace: %d at %x\n", IDAProDelta, CurrInst->GetAddr());
}
}
return true;
} // end of SMPFunction::UseIDAStackPointerDeltas()
// Analyze changes to the stack pointer over all instructions.
bool SMPFunction::AnalyzeStackPointerDeltas(void) {
list<pair<SMPBasicBlock *, sval_t> > WorkList;
clc5q
committed
list<SMPInstr *>::iterator InstIter;
SMPInstr *CurrInst;
sval_t DeltaIncrement = 0; // change when reprocessing a block in alloca()-calling function
clc5q
committed
bool ConsistentNetDelta = true; // Net change to stack pointer is consistent at all RETURN locations
bool ConflictingValuesSeen = false; // At least one block was entered with multiple deltas
bool StackPointerRestoreSeen = false; // Stack pointer restored; must become true if ConflictingValuesSeen
clc5q
committed
bool ReturnSeen = false;
bool IDAProSucceeded = this->AnalyzedSP;
bool FirstBlockProcessed = false;
bool FPSaved = false; // found save of frame pointer; prelude to initializing it
bool SPintoFP = false; // found move of stack pointer into frame pointer (FP init)
#if SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS
bool DebugFlag = (0 == strcmp("qsort", this->GetFuncName()));
bool TraceFlag = (0 == strcmp("qsort", this->GetFuncName()));
clc5q
committed
#endif
clc5q
committed
// Temporarily pull the functions that call alloca out of the stack pointer delta computations, so
// that we can focus on solving other problems.
if (this->CallsAlloca || this->HasPushAfterFrameAlloc()) {
clc5q
committed
if (!this->AnalyzedSP) {
(void) this->UseIDAStackPointerDeltas();
clc5q
committed
return false; // leave it unsolved
}
else {
SMP_msg("INFO: Using IDA Pro stack pointer deltas for alloca-calling function %s .\n", this->GetFuncName());
return this->UseIDAStackPointerDeltas();
clc5q
committed
}
}
#endif
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
// In order to precisely track stack deltas, we need to deal with instruction sequences that save the stack pointer
// and then restore it later. This requires a reaching definitions data flow analysis that includes, at a minimum,
// all stack definitions (normalized by stack delta, so that we do not confuse [esp+20] and [esp+20] where the values
// of esp are not the same). We also need to keep track of stack pointer saves in both registers and in stack locations.
// In order for the information about saved stack pointer copies to be available as soon as we need them in the stack
// delta analysis, we have to perform both stack delta analysis and reaching definitions analysis at the same time. Luckily,
// both analyses are well suited to being performed as forward analyses starting from the entry basic block.
//
// Data structures for the reaching definitions analysis include a ReachesIn and a ReachesOut set for each basic block, a
// VarKill set for each block, and a DownExposedDefs set for each block. The VarKill set is shared with the later Live Variable
// Analysis (LVA), so we compute the VarKill and the UpExposed sets (UpExposed is only used by LVA) on the first pass through
// each block. The VarKill and all other LVA sets are sets of operands. The ReachesIn, ReachesOut, and DownExposedDefs sets
// are sets of definitions, where a definition is a pair<operand, instruction address>. The StackPtrCopySet is a triple of
// <operand, instruction address, stack delta>, arranged as a pair of pairs <operand, <addr, delta> >
//
// Algorithm: We maintain a WorkList of pairs <basic block pointer, incoming stack delta to that block>
//
// All sets are empty at the beginning.
// Add the entry basic block to the WorkList, with IncomingDelta of zero.
// while (WorkList is not empty) do
// de-queue first block from WorkList, obtain IncomingDelta
// Compute ReachesIn as the union of the ReachesOut of all predecesssor blocks
// if (block has not already been processed) then
// mark block as processed
// for each inst in block (forward iteration) do
// for each USE in inst do
// if USE operand not in VarKill set for block then
// add USE operand to UpExposed set for block
// endif
// if USE operand is a stack pointer value AND it will produce DEF that is a stack pointer value then
// if DEF is stack pointer register then { a stack pointer value that was saved is being restored }
// retrieve new stack pointer delta from saved value in StackPtrCopySet, looking it up in
// that set using the reaching definitions for the USE operand. If inconsistent ******
// else { stack pointer value is being saved somewhere besides the stack pointer register }
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
// add stack delta to StackPtrCopySet for DEF that is receiving it in current inst
// endif
// endif
// endfor { each USE }
// for each DEF in inst do
// if register or stack operand then
// add to VarKill set
// update DownExposedDefs set (insert, or replace current def for this operand)
// endif
// endfor { each DEF }
// Store IncomingDelta for current instruction
// Get change in delta for current instruction
// Add current change to IncomingDelta
// endfor { each inst }
// At end of block, make ReachesOut set be (ReachesIn minus VarKill) union DownExposedDefs
// For each successor block, add pairs <block pointer, IncomingDelta> to end of WorkList
// else { block has already been processed at least once}
// if IncomingDelta from WorkList is inconsistent with old IncomingDelta then
// if function calls alloca() then
// if new IncomingDelta makes stack frame look larger than old IncomingDelta then
// ignore new IncomingDelta and just process reaching definitions sets below
// else
// use new IncomingDelta and re-process deltas in this block to converge to
// smallest stack frame, which means we are basically ignoring alloca()'s as much as possible.
// endif
// else
// Set AnalyzedSP to false, emit error message, clear WorkList and bail out of this function.
// endif
// endif { inconsistent IncomingDelta values }
// Recompute ReachesIn as union of ReachesOut of all predecessor blocks
// if ReachesIn set changed then
// recompute ReachesOut without examining instructions unless alloca() case requires iterating through instructions
// endif
// if any change in deltas or reaching definitions sets, then add block to end of WorkList along with all successor blocks.
// endif
// end while
// Mark all blocks as unprocessed
this->ResetProcessedBlocks();
this->AnalyzedSP = true;
// Put the entry block on the work list.
assert(0 < this->Blocks.size());
pair<SMPBasicBlock *, sval_t> WorkingPair (this->Blocks.front(), CurrentDelta);
WorkList.push_back(WorkingPair);
// While blocks exist on the work list
// if block already processed, confirm that we are re-entering
// the block with the same stack pointer delta as previously,
// and pop it off the work list
// otherwise declare the stack pointer to be un-analyzeable;
// else
// iterate through all instructions in the block, analyzing
// the stack pointer delta of each inst and accumulating current delta
// At the end of the block, put the successor blocks on the work list.
// For both cases, maintain and update reaching definitions sets, and the
// UpExposed and VarKill sets that are used by LVA as well as reaching defs analysis.
bool ReprocessingAllocaBlocks = false;
bool ReachesInChanged;
bool ReachesOutChanged = false;